diff --git a/.dockerignore b/.dockerignore
deleted file mode 100644
index 78de5153..00000000
--- a/.dockerignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/logs
-.ruff_cache
\ No newline at end of file
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 00000000..9030923a
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+*.ipynb linguist-vendored
\ No newline at end of file
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index 0d07b90f..0c8e2ab4 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -39,13 +39,13 @@ jobs:
- name: Generate Docs [Sphinx]
run: |
sphinx-build -b html -D version=latest -D release=latest docs docs/_build
- # - name: Deploy Docs
- # uses: JamesIves/github-pages-deploy-action@v4
- # if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' && github.repository == 'geometric-intelligence/TopoBenchmark' }}
- # with:
- # branch: main
- # folder: docs/_build
- # token: ${{ secrets.DOCUMENTATION_KEY }}
- # repository-name: TBD
- # target-folder: topobenchmarkx
- # clean: true
\ No newline at end of file
+ - name: Deploy Docs
+ uses: JamesIves/github-pages-deploy-action@v4
+ if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' && github.repository == 'geometric-intelligence/TopoBenchmark' }}
+ with:
+ branch: main
+ folder: docs/_build
+ token: ${{ secrets.DOCUMENTATION_KEY }}
+ repository-name: geometric-intelligence/geometric-intelligence.github.io
+ target-folder: topobenchmark
+ clean: true
\ No newline at end of file
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 973612d2..e4ac5caf 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -13,4 +13,4 @@ jobs:
- uses: actions/checkout@v3
- uses: chartboost/ruff-action@v1
with:
- src: './topobenchmarkx'
\ No newline at end of file
+ src: './topobenchmark'
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index 1477c893..00000000
--- a/Dockerfile
+++ /dev/null
@@ -1,19 +0,0 @@
-FROM python:3.11.3
-
-WORKDIR /TopoBenchmarkX
-
-COPY . .
-
-RUN pip install --upgrade pip
-
-RUN pip install -e '.[all]'
-
-# Note that not all combinations of torch and CUDA are available
-# See https://github.com/pyg-team/pyg-lib to check the configuration that works for you
-RUN TORCH="2.3.0"
- # available options: 1.12.0, 1.13.0, 2.0.0, 2.1.0, 2.2.0, or 2.3.0
-RUN CUDA="cu121"
- # if available, select the CUDA version suitable for your system
- # available options: cpu, cu102, cu113, cu116, cu117, cu118, or cu121
-RUN pip install torch==${TORCH} --extra-index-url https://download.pytorch.org/whl/${CUDA}
-RUN pip install pyg-lib torch-scatter torch-sparse torch-cluster -f https://data.pyg.org/whl/torch-${TORCH}+${CUDA}.html
\ No newline at end of file
diff --git a/README.md b/README.md
index 080b6f1c..e596d9c2 100755
--- a/README.md
+++ b/README.md
@@ -15,7 +15,7 @@ Assess how your model compares against state-of-the-art topological neural netwo
[![Lint](https://github.com/geometric-intelligence/TopoBenchmark/actions/workflows/lint.yml/badge.svg)](https://github.com/geometric-intelligence/TopoBenchmark/actions/workflows/lint.yml)
[![Test](https://github.com/geometric-intelligence/TopoBenchmark/actions/workflows/test.yml/badge.svg)](https://github.com/geometric-intelligence/TopoBenchmark/actions/workflows/test.yml)
[![Codecov](https://codecov.io/gh/geometric-intelligence/TopoBenchmark/branch/main/graph/badge.svg)](https://app.codecov.io/gh/geometric-intelligence/TopoBenchmark)
-[![Docs](https://img.shields.io/badge/docs-website-brightgreen)](https://geometric-intelligence.github.io/topobenchmarkx/index.html)
+[![Docs](https://img.shields.io/badge/docs-website-brightgreen)](https://geometric-intelligence.github.io/topobenchmark/index.html)
[![Python](https://img.shields.io/badge/python-3.10+-blue?logo=python)](https://www.python.org/)
[![license](https://badgen.net/github/license/geometric-intelligence/TopoBenchmark?color=green)](https://github.com/geometric-intelligence/TopoBenchmark/blob/main/LICENSE)
[![slack](https://img.shields.io/badge/chat-on%20slack-purple?logo=slack)](https://join.slack.com/t/geometric-intelligenceworkspace/shared_invite/zt-2k63sv99s-jbFMLtwzUCc8nt3sIRWjEw)
@@ -53,12 +53,12 @@ Additionally, the library offers the ability to transform, i.e. _lift_, each dat
If you do not have conda on your machine, please follow [their guide](https://docs.anaconda.com/free/miniconda/miniconda-install/) to install it.
-First, clone the `TopoBenchmark` repository and set up a conda environment `tbx` with python 3.11.3.
+First, clone the `TopoBenchmark` repository and set up a conda environment `tb` with python 3.11.3.
```
-git clone git@github.com:geometric-intelligence/topobenchmarkx.git
+git clone git@github.com:geometric-intelligence/topobenchmark.git
cd TopoBenchmark
-conda create -n tbx python=3.11.3
+conda create -n tb python=3.11.3
```
Next, check the CUDA version of your machine:
@@ -79,19 +79,21 @@ This command installs the `TopoBenchmark` library and its dependencies.
Next, train the neural networks by running the following command:
```
-python -m topobenchmarkx
+python -m topobenchmark
```
Thanks to `hydra` implementation, one can easily override the default experiment configuration through the command line. For instance, the model and dataset can be selected as:
```
-python -m topobenchmarkx model=cell/cwn dataset=graph/MUTAG
+python -m topobenchmark model=cell/cwn dataset=graph/MUTAG
```
**Remark:** By default, our pipeline identifies the source and destination topological domains, and applies a default lifting between them if required.
The same CLI override mechanism also applies when modifying more finer configurations within a `CONFIG GROUP`. Please, refer to the official [`hydra`documentation](https://hydra.cc/docs/intro/) for further details.
+
+
## :bike: Experiments Reproducibility
To reproduce Table 1 from the [`TopoBenchmark: A Framework for Benchmarking Topological Deep Learning`](https://arxiv.org/pdf/2406.06642) paper, please run the following command:
@@ -116,6 +118,7 @@ We list the neural networks trained and evaluated by `TopoBenchmark`, organized
| GAT | [Graph Attention Networks](https://openreview.net/pdf?id=rJXMpikCZ) |
| GIN | [How Powerful are Graph Neural Networks?](https://openreview.net/pdf?id=ryGs6iA5Km) |
| GCN | [Semi-Supervised Classification with Graph Convolutional Networks](https://arxiv.org/pdf/1609.02907v4) |
+| GraphMLP | [Graph-MLP: Node Classification without Message Passing in Graph](https://arxiv.org/pdf/2106.04051) |
### Simplicial complexes
| Model | Reference |
@@ -145,7 +148,7 @@ We list the neural networks trained and evaluated by `TopoBenchmark`, organized
### Combinatorial complexes
| Model | Reference |
| --- | --- |
-| GCCN | [Generalized Combinatorial Complex Neural Networks](https://arxiv.org/pdf/2410.06530) |
+| GCCN | [TopoTune: A Framework for Generalized Combinatorial Complex Neural Networks](https://arxiv.org/pdf/2410.06530) |
## :bulb: TopoTune
@@ -160,13 +163,13 @@ To implement and train a GCCN, run the following command line with the desired c
```
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/PROTEINS \
dataset.split_params.data_seed=1 \
model=cell/topotune\
model.tune_gnn=GCN \
model.backbone.GNN.num_layers=2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=4 \
model.feature_encoder.out_channels=32 \
model.feature_encoder.proj_dropout=0.3 \
@@ -178,12 +181,17 @@ python -m topobenchmarkx \
To use a single augmented Hasse graph expansion, use `model={domain}/topotune_onehasse` instead of `model={domain}/topotune`.
-To specify a set of neighborhoods (routes) on the complex, use a list of neighborhoods each specified as `\[\[{source_rank}, {destination_rank}\], {neighborhood}\]`. Currently, the following options for `{neighborhood}` are supported:
-- `up_laplacian`, from rank $r$ to $r$
-- `down_laplacian`, from rank $r$ to $r$
-- `boundary`, from rank $r$ to $r-1$
-- `coboundary`, from rank $r$ to $r+1$
-- `adjacency`, from rank $r$ to $r$ (stand-in for `up_adjacency`, as `down_adjacency` not yet supported in TopoBenchmark)
+To specify a set of neighborhoods on the complex, use a list of neighborhoods each specified as a string of the form
+`r-{neighborhood}-k`, where $k$ represents the source cell rank, and $r$ is the number of ranks up or down that the selected `{neighborhood}` considers. Currently, the following options for `{neighborhood}` are supported:
+- `up_laplacian`, between cells of rank $k$ through $k+r$ cells.
+- `down_laplacian`, between cells of rank $k$ through $k-r$ cells.
+- `hodge_laplacian`, between cells of rank $k$ through both $k-r$ and $k+r$ cells.
+- `up_adjacency`, between cells of rank $k$ through $k+r$ cells.
+- `down_adjacency`, between cells of rank $k$ through $k-r$ cells.
+- `up_incidence`, from rank $k$ to $k+r$.
+- `down_incidence`, from rank $k$ to $k-r$.
+
+The number $r$ can be omitted, in which case $r=1$ by default (e.g. `up_incidence-k` represents the incidence from rank $k$ to $k+1$).
### Using backbone models from any package
@@ -235,16 +243,18 @@ We list the liftings used in `TopoBenchmark` to transform datasets. Here, a _lif
-## Data Transformations
+
+ Data Transformations
| Transform | Description | Reference |
| --- | --- | --- |
| Message Passing Homophily | Higher-order homophily measure for hypergraphs | [Source](https://arxiv.org/abs/2310.07684) |
| Group Homophily | Higher-order homophily measure for hypergraphs that considers groups of predefined sizes | [Source](https://arxiv.org/abs/2103.11818) |
+
## :books: Datasets
-
+### Graphs
| Dataset | Task | Description | Reference |
| --- | --- | --- | --- |
| Cora | Classification | Cocitation dataset. | [Source](https://link.springer.com/article/10.1023/A:1009953814988) |
@@ -264,50 +274,17 @@ We list the liftings used in `TopoBenchmark` to transform datasets. Here, a _lif
| US-county-demos | Regression | In turn each node attribute is used as the target label. | [Source](https://arxiv.org/pdf/2002.08274) |
| ZINC | Regression | Graph-level regression. | [Source](https://pubs.acs.org/doi/10.1021/ci3001277) |
+### Hypergraphs
+| Dataset | Task | Description | Reference |
+| --- | --- | --- | --- |
+| Cora-Cocitation | Classification | Cocitation dataset. | [Source](https://proceedings.neurips.cc/paper_files/paper/2019/file/1efa39bcaec6f3900149160693694536-Paper.pdf) |
+| Citeseer-Cocitation | Classification | Cocitation dataset. | [Source](https://proceedings.neurips.cc/paper_files/paper/2019/file/1efa39bcaec6f3900149160693694536-Paper.pdf) |
+| PubMed-Cocitation | Classification | Cocitation dataset. | [Source](https://proceedings.neurips.cc/paper_files/paper/2019/file/1efa39bcaec6f3900149160693694536-Paper.pdf) |
+| Cora-Coauthorship | Classification | Cocitation dataset. | [Source](https://proceedings.neurips.cc/paper_files/paper/2019/file/1efa39bcaec6f3900149160693694536-Paper.pdf) |
+| DBLP-Coauthorship | Classification | Cocitation dataset. | [Source](https://proceedings.neurips.cc/paper_files/paper/2019/file/1efa39bcaec6f3900149160693694536-Paper.pdf) |
-## :hammer_and_wrench: Development
-
-To join the development of `TopoBenchmark`, you should install the library in dev mode.
-
-For this, you can create an environment using either conda or docker. Both options are detailed below.
-
-### :snake: Using Conda Environment
-
-Follow the steps in :jigsaw: Get Started.
-
-
-### :whale: Using Docker
-
-For ease of use, TopoBenchmark employs [Docker](https://www.docker.com/). To set it up on your system you can follow [their guide](https://docs.docker.com/get-docker/). once installed, please follow the next steps:
-
-First, clone the repository and navigate to the correct folder.
-```
-git clone git@github.com:geometric-intelligence/topobenchmarkx.git
-cd TopoBenchmark
-```
-
-Then, build the Docker image.
-```
-docker build -t topobenchmarkx:new .
-```
-
-Depending if you want to use GPUs or not, these are the commands to run the Docker image and mount the current directory.
-
-With GPUs
-```
-docker run -it -d --gpus all --volume $(pwd):/TopoBenchmark topobenchmarkx:new
-```
-
-With CPU
-```
-docker run -it -d --volume $(pwd):/TopoBenchmark topobenchmarkx:new
-```
-
-Happy development!
-
-
## :mag: References ##
To learn more about `TopoBenchmark`, we invite you to read the paper:
diff --git a/__init__.py b/__init__.py
index 579801aa..2728f5a0 100644
--- a/__init__.py
+++ b/__init__.py
@@ -1,12 +1,12 @@
# numpydoc ignore=GL08
import configs
import test
-import topobenchmarkx
+import topobenchmark
__all__ = [
- "topobenchmarkx",
"configs",
"test",
+ "topobenchmark",
]
__version__ = "0.0.1"
diff --git a/codecov.yml b/codecov.yml
index 85ba6f8b..ac4c5a9c 100644
--- a/codecov.yml
+++ b/codecov.yml
@@ -4,4 +4,5 @@ coverage:
round: down
precision: 2
ignore:
- - "test/"
\ No newline at end of file
+ - "test/"
+ - "topobenchmark/run.py"
\ No newline at end of file
diff --git a/configs/dataset/graph/AQSOL.yaml b/configs/dataset/graph/AQSOL.yaml
index 0fefc55d..c802fcba 100644
--- a/configs/dataset/graph/AQSOL.yaml
+++ b/configs/dataset/graph/AQSOL.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.MoleculeDatasetLoader
+ _target_: topobenchmark.data.loaders.MoleculeDatasetLoader
parameters:
data_domain: graph
data_type: AQSOL
diff --git a/configs/dataset/graph/IMDB-BINARY.yaml b/configs/dataset/graph/IMDB-BINARY.yaml
index fae4b0c8..2e151f7a 100755
--- a/configs/dataset/graph/IMDB-BINARY.yaml
+++ b/configs/dataset/graph/IMDB-BINARY.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.TUDatasetLoader
+ _target_: topobenchmark.data.loaders.TUDatasetLoader
parameters:
data_domain: graph
data_type: TUDataset
diff --git a/configs/dataset/graph/IMDB-MULTI.yaml b/configs/dataset/graph/IMDB-MULTI.yaml
index c485040e..27fb0c23 100755
--- a/configs/dataset/graph/IMDB-MULTI.yaml
+++ b/configs/dataset/graph/IMDB-MULTI.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.TUDatasetLoader
+ _target_: topobenchmark.data.loaders.TUDatasetLoader
parameters:
data_domain: graph
data_type: TUDataset
diff --git a/configs/dataset/graph/MUTAG.yaml b/configs/dataset/graph/MUTAG.yaml
index 2a4f99e0..c0af46a7 100755
--- a/configs/dataset/graph/MUTAG.yaml
+++ b/configs/dataset/graph/MUTAG.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.TUDatasetLoader
+ _target_: topobenchmark.data.loaders.TUDatasetLoader
parameters:
data_domain: graph
data_type: TUDataset
diff --git a/configs/dataset/graph/NCI1.yaml b/configs/dataset/graph/NCI1.yaml
index ea7f4c1e..d21d807e 100755
--- a/configs/dataset/graph/NCI1.yaml
+++ b/configs/dataset/graph/NCI1.yaml
@@ -1,5 +1,5 @@
loader:
- _target_: topobenchmarkx.data.loaders.TUDatasetLoader
+ _target_: topobenchmark.data.loaders.TUDatasetLoader
parameters:
data_domain: graph
data_type: TUDataset
diff --git a/configs/dataset/graph/NCI109.yaml b/configs/dataset/graph/NCI109.yaml
index 94f71ab9..44a20a83 100755
--- a/configs/dataset/graph/NCI109.yaml
+++ b/configs/dataset/graph/NCI109.yaml
@@ -1,5 +1,5 @@
loader:
- _target_: topobenchmarkx.data.loaders.TUDatasetLoader
+ _target_: topobenchmark.data.loaders.TUDatasetLoader
parameters:
data_domain: graph
data_type: TUDataset
diff --git a/configs/dataset/graph/PROTEINS.yaml b/configs/dataset/graph/PROTEINS.yaml
index 58dcaac8..762f362b 100755
--- a/configs/dataset/graph/PROTEINS.yaml
+++ b/configs/dataset/graph/PROTEINS.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.TUDatasetLoader
+ _target_: topobenchmark.data.loaders.TUDatasetLoader
parameters:
data_domain: graph
data_type: TUDataset
diff --git a/configs/dataset/graph/REDDIT-BINARY.yaml b/configs/dataset/graph/REDDIT-BINARY.yaml
index 0513e51c..07f9554b 100755
--- a/configs/dataset/graph/REDDIT-BINARY.yaml
+++ b/configs/dataset/graph/REDDIT-BINARY.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.TUDatasetLoader
+ _target_: topobenchmark.data.loaders.TUDatasetLoader
parameters:
data_domain: graph
data_type: TUDataset
diff --git a/configs/dataset/graph/US-county-demos.yaml b/configs/dataset/graph/US-county-demos.yaml
index dcb5fbf3..6e21a4a9 100755
--- a/configs/dataset/graph/US-county-demos.yaml
+++ b/configs/dataset/graph/US-county-demos.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.USCountyDemosDatasetLoader
+ _target_: topobenchmark.data.loaders.USCountyDemosDatasetLoader
parameters:
data_domain: graph
data_type: cornel
diff --git a/configs/dataset/graph/ZINC.yaml b/configs/dataset/graph/ZINC.yaml
index 7b277d5f..25b70860 100644
--- a/configs/dataset/graph/ZINC.yaml
+++ b/configs/dataset/graph/ZINC.yaml
@@ -2,7 +2,7 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.MoleculeDatasetLoader
+ _target_: topobenchmark.data.loaders.MoleculeDatasetLoader
parameters:
data_domain: graph
data_type: ZINC
diff --git a/configs/dataset/graph/amazon_ratings.yaml b/configs/dataset/graph/amazon_ratings.yaml
index 217b9bae..3e5a9dae 100755
--- a/configs/dataset/graph/amazon_ratings.yaml
+++ b/configs/dataset/graph/amazon_ratings.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.HeterophilousGraphDatasetLoader
+ _target_: topobenchmark.data.loaders.HeterophilousGraphDatasetLoader
parameters:
data_domain: graph
data_type: heterophilic
diff --git a/configs/dataset/graph/cocitation_citeseer.yaml b/configs/dataset/graph/cocitation_citeseer.yaml
index 5d96cdc9..cfb1b6fe 100755
--- a/configs/dataset/graph/cocitation_citeseer.yaml
+++ b/configs/dataset/graph/cocitation_citeseer.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.PlanetoidDatasetLoader
+ _target_: topobenchmark.data.loaders.PlanetoidDatasetLoader
parameters:
data_domain: graph
data_type: cocitation
diff --git a/configs/dataset/graph/cocitation_cora.yaml b/configs/dataset/graph/cocitation_cora.yaml
index c6a60bba..d2b9fa3b 100755
--- a/configs/dataset/graph/cocitation_cora.yaml
+++ b/configs/dataset/graph/cocitation_cora.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.PlanetoidDatasetLoader
+ _target_: topobenchmark.data.loaders.PlanetoidDatasetLoader
parameters:
data_domain: graph
data_type: cocitation
diff --git a/configs/dataset/graph/cocitation_pubmed.yaml b/configs/dataset/graph/cocitation_pubmed.yaml
index e8fb957d..7d901437 100755
--- a/configs/dataset/graph/cocitation_pubmed.yaml
+++ b/configs/dataset/graph/cocitation_pubmed.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.PlanetoidDatasetLoader
+ _target_: topobenchmark.data.loaders.PlanetoidDatasetLoader
parameters:
data_domain: graph
data_type: cocitation
diff --git a/configs/dataset/graph/manual_dataset.yaml b/configs/dataset/graph/manual_dataset.yaml
index bd391324..e0357d2b 100755
--- a/configs/dataset/graph/manual_dataset.yaml
+++ b/configs/dataset/graph/manual_dataset.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.ManualGraphDatasetLoader
+ _target_: topobenchmark.data.loaders.ManualGraphDatasetLoader
parameters:
data_domain: graph
data_type: manual_dataset
diff --git a/configs/dataset/graph/minesweeper.yaml b/configs/dataset/graph/minesweeper.yaml
index efdfb423..19119e78 100755
--- a/configs/dataset/graph/minesweeper.yaml
+++ b/configs/dataset/graph/minesweeper.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.HeterophilousGraphDatasetLoader
+ _target_: topobenchmark.data.loaders.HeterophilousGraphDatasetLoader
parameters:
data_domain: graph
data_type: heterophilic
diff --git a/configs/dataset/graph/questions.yaml b/configs/dataset/graph/questions.yaml
index a50d3337..25333b75 100755
--- a/configs/dataset/graph/questions.yaml
+++ b/configs/dataset/graph/questions.yaml
@@ -1,5 +1,5 @@
loader:
- _target_: topobenchmarkx.data.loaders.HeterophilousGraphDatasetLoader
+ _target_: topobenchmark.data.loaders.HeterophilousGraphDatasetLoader
parameters:
data_domain: graph
data_type: heterophilic
diff --git a/configs/dataset/graph/roman_empire.yaml b/configs/dataset/graph/roman_empire.yaml
index 8b9322e8..37adfb4b 100755
--- a/configs/dataset/graph/roman_empire.yaml
+++ b/configs/dataset/graph/roman_empire.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.HeterophilousGraphDatasetLoader
+ _target_: topobenchmark.data.loaders.HeterophilousGraphDatasetLoader
parameters:
data_domain: graph
data_type: heterophilic
diff --git a/configs/dataset/graph/tolokers.yaml b/configs/dataset/graph/tolokers.yaml
index 4921cea9..f1657f16 100755
--- a/configs/dataset/graph/tolokers.yaml
+++ b/configs/dataset/graph/tolokers.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.HeterophilousGraphDatasetLoader
+ _target_: topobenchmark.data.loaders.HeterophilousGraphDatasetLoader
parameters:
data_domain: graph
data_type: heterophilic
diff --git a/configs/dataset/hypergraph/coauthorship_cora.yaml b/configs/dataset/hypergraph/coauthorship_cora.yaml
index d864623e..80699bbd 100755
--- a/configs/dataset/hypergraph/coauthorship_cora.yaml
+++ b/configs/dataset/hypergraph/coauthorship_cora.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.CitationHypergraphDatasetLoader
+ _target_: topobenchmark.data.loaders.CitationHypergraphDatasetLoader
parameters:
data_domain: hypergraph
data_type: coauthorship
diff --git a/configs/dataset/hypergraph/coauthorship_dblp.yaml b/configs/dataset/hypergraph/coauthorship_dblp.yaml
index 9d9fc62a..5f4c4e25 100755
--- a/configs/dataset/hypergraph/coauthorship_dblp.yaml
+++ b/configs/dataset/hypergraph/coauthorship_dblp.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.CitationHypergraphDatasetLoader
+ _target_: topobenchmark.data.loaders.CitationHypergraphDatasetLoader
parameters:
data_domain: hypergraph
data_type: coauthorship
diff --git a/configs/dataset/hypergraph/cocitation_citeseer.yaml b/configs/dataset/hypergraph/cocitation_citeseer.yaml
index 3ab48224..d51b884f 100755
--- a/configs/dataset/hypergraph/cocitation_citeseer.yaml
+++ b/configs/dataset/hypergraph/cocitation_citeseer.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.CitationHypergraphDatasetLoader
+ _target_: topobenchmark.data.loaders.CitationHypergraphDatasetLoader
parameters:
data_domain: hypergraph
data_type: cocitation
diff --git a/configs/dataset/hypergraph/cocitation_cora.yaml b/configs/dataset/hypergraph/cocitation_cora.yaml
index c157bbde..557b0a14 100755
--- a/configs/dataset/hypergraph/cocitation_cora.yaml
+++ b/configs/dataset/hypergraph/cocitation_cora.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.CitationHypergraphDatasetLoader
+ _target_: topobenchmark.data.loaders.CitationHypergraphDatasetLoader
parameters:
data_domain: hypergraph
data_type: cocitation
diff --git a/configs/dataset/hypergraph/cocitation_pubmed.yaml b/configs/dataset/hypergraph/cocitation_pubmed.yaml
index 5204e26a..8aa19826 100755
--- a/configs/dataset/hypergraph/cocitation_pubmed.yaml
+++ b/configs/dataset/hypergraph/cocitation_pubmed.yaml
@@ -1,6 +1,6 @@
# Dataset loader config
loader:
- _target_: topobenchmarkx.data.loaders.CitationHypergraphDatasetLoader
+ _target_: topobenchmark.data.loaders.CitationHypergraphDatasetLoader
parameters:
data_domain: hypergraph
data_type: cocitation
diff --git a/configs/dataset/simplicial/karate_club.yaml b/configs/dataset/simplicial/karate_club.yaml
deleted file mode 100755
index e0e38cbd..00000000
--- a/configs/dataset/simplicial/karate_club.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-_target_: topobenchmarkx.data.loaders.SimplicialLoader
-
-# Data definition
-parameters:
- data_domain: simplicial
- data_type: social
- data_name: KarateClub
- data_dir: ${paths.data_dir}/${dataset.parameters.data_domain}/${dataset.parameters.data_type}/${dataset.parameters.data_name}
- data_split_dir: ${paths.data_dir}/data_splits/${dataset.parameters.data_name}
-
- # Dataset parameters
- num_features: 2
- num_classes: 2
- task: classification
- loss_type: cross_entropy
- monitor_metric: accuracy
- task_level: node
- data_seed: 0
-
- # Dataloader parameters
- batch_size: 128 # Needs to be divisible by the number of devices (e.g., if in a distributed setup)
- # train_val_test_split: [55_000, 5_000, 10_000]
- num_workers: 0
- pin_memory: False
diff --git a/configs/dataset/simplicial/mantra_betti_numbers.yaml b/configs/dataset/simplicial/mantra_betti_numbers.yaml
new file mode 100755
index 00000000..64a43713
--- /dev/null
+++ b/configs/dataset/simplicial/mantra_betti_numbers.yaml
@@ -0,0 +1,40 @@
+loader:
+ _target_: topobenchmark.data.loaders.MantraSimplicialDatasetLoader
+ parameters:
+ data_domain: simplicial
+ data_type: topological
+ data_name: MANTRA
+ data_dir: ${paths.data_dir}/${dataset.loader.parameters.data_domain}/${dataset.loader.parameters.data_type}
+ manifold_dim: 3
+ version: "v0.0.5"
+ task_variable: "betti_numbers" # Options: ['name', 'genus', 'orientable'] To use 'torsion_coefficients', 'betti_numbers' fix multilabel multiclass issue
+ model_domain: ${model.model_domain}
+
+# Data definition
+parameters:
+ # In the case of higher-order datasets we have multiple feature dimentions
+ num_features: [1,1,1]
+ #num_classes: 2 # Num classes depents on the task_variable
+
+ # Dataset parameters
+ # task: classification # TODO: adapt pipeline to support multilabel classification
+ # loss_type: cross_entropy # TODO: adapt pipeline to support multilabel classification
+ # monitor_metric: accuracy # TODO: adapt pipeline to support multilabel classification
+ task_level: graph
+ data_seed: 0
+
+#splits
+split_params:
+ learning_setting: inductive
+ data_split_dir: ${paths.data_dir}/data_splits/${dataset.loader.parameters.data_name}
+ data_seed: 0
+ split_type: random #'k-fold' # either "k-fold" or "random" strategies
+ k: 10 # for "k-fold" Cross-Validation
+ train_prop: 0.5 # for "random" strategy splitting
+
+# Dataloader parameters
+dataloader_params:
+ batch_size: 5
+ num_workers: 0
+ pin_memory: False
+ persistent_workers: False
\ No newline at end of file
diff --git a/configs/dataset/simplicial/mantra_genus.yaml b/configs/dataset/simplicial/mantra_genus.yaml
new file mode 100755
index 00000000..d33f32d3
--- /dev/null
+++ b/configs/dataset/simplicial/mantra_genus.yaml
@@ -0,0 +1,40 @@
+loader:
+ _target_: topobenchmark.data.loaders.MantraSimplicialDatasetLoader
+ parameters:
+ data_domain: simplicial
+ data_type: topological
+ data_name: MANTRA
+ data_dir: ${paths.data_dir}/${dataset.loader.parameters.data_domain}/${dataset.loader.parameters.data_type}
+ manifold_dim: 2
+ version: "v0.0.5"
+ task_variable: "genus" # Options: ['name', 'genus', 'orientable'] To use 'torsion_coefficients', 'betti_numbers' fix multilabel multiclass issue
+ model_domain: ${model.model_domain}
+
+# Data definition
+parameters:
+ # In the case of higher-order datasets we have multiple feature dimentions
+ num_features: [1,1,1]
+ num_classes: 8 # Num classes depents on the task_variable
+
+ # Dataset parameters
+ task: classification
+ loss_type: cross_entropy
+ monitor_metric: accuracy
+ task_level: graph
+ data_seed: 0
+
+#splits
+split_params:
+ learning_setting: inductive
+ data_split_dir: ${paths.data_dir}/data_splits/${dataset.loader.parameters.data_name}
+ data_seed: 0
+ split_type: random #'k-fold' # either "k-fold" or "random" strategies
+ k: 10 # for "k-fold" Cross-Validation
+ train_prop: 0.5 # for "random" strategy splitting
+
+# Dataloader parameters
+dataloader_params:
+ batch_size: 5
+ num_workers: 0
+ pin_memory: False
+ persistent_workers: False
\ No newline at end of file
diff --git a/configs/dataset/simplicial/mantra_name.yaml b/configs/dataset/simplicial/mantra_name.yaml
new file mode 100755
index 00000000..67e8f599
--- /dev/null
+++ b/configs/dataset/simplicial/mantra_name.yaml
@@ -0,0 +1,40 @@
+loader:
+ _target_: topobenchmark.data.loaders.MantraSimplicialDatasetLoader
+ parameters:
+ data_domain: simplicial
+ data_type: topological
+ data_name: MANTRA
+ data_dir: ${paths.data_dir}/${dataset.loader.parameters.data_domain}/${dataset.loader.parameters.data_type}
+ manifold_dim: 2
+ version: "v0.0.5"
+ task_variable: "name" # Options: ['name', 'genus', 'orientable'] To use 'torsion_coefficients', 'betti_numbers' fix multilabel multiclass issue
+ model_domain: ${model.model_domain}
+
+# Data definition
+parameters:
+ # In the case of higher-order datasets we have multiple feature dimentions
+ num_features: [1,1,1]
+ num_classes: 8 # Num classes depents on the task_variable
+
+ # Dataset parameters
+ task: classification
+ loss_type: cross_entropy
+ monitor_metric: accuracy
+ task_level: graph
+ data_seed: 0
+
+#splits
+split_params:
+ learning_setting: inductive
+ data_split_dir: ${paths.data_dir}/data_splits/${dataset.loader.parameters.data_name}
+ data_seed: 0
+ split_type: random #'k-fold' # either "k-fold" or "random" strategies
+ k: 10 # for "k-fold" Cross-Validation
+ train_prop: 0.5 # for "random" strategy splitting
+
+# Dataloader parameters
+dataloader_params:
+ batch_size: 5
+ num_workers: 0
+ pin_memory: False
+ persistent_workers: False
\ No newline at end of file
diff --git a/configs/dataset/simplicial/mantra_orientation.yaml b/configs/dataset/simplicial/mantra_orientation.yaml
new file mode 100755
index 00000000..3bf0297b
--- /dev/null
+++ b/configs/dataset/simplicial/mantra_orientation.yaml
@@ -0,0 +1,40 @@
+loader:
+ _target_: topobenchmark.data.loaders.MantraSimplicialDatasetLoader
+ parameters:
+ data_domain: simplicial
+ data_type: topological
+ data_name: MANTRA
+ data_dir: ${paths.data_dir}/${dataset.loader.parameters.data_domain}/${dataset.loader.parameters.data_type}
+ manifold_dim: 2
+ version: "v0.0.5"
+ task_variable: "orientable" # Options: ['name', 'genus', 'orientable'] To use 'torsion_coefficients', 'betti_numbers' fix multilabel multiclass issue
+ model_domain: ${model.model_domain}
+
+# Data definition
+parameters:
+ # In the case of higher-order datasets we have multiple feature dimentions
+ num_features: [1,1,1]
+ num_classes: 2 # Num classes depents on the task_variable
+
+ # Dataset parameters
+ task: classification
+ loss_type: cross_entropy
+ monitor_metric: accuracy
+ task_level: graph
+ data_seed: 0
+
+#splits
+split_params:
+ learning_setting: inductive
+ data_split_dir: ${paths.data_dir}/data_splits/${dataset.loader.parameters.data_name}
+ data_seed: 0
+ split_type: random #'k-fold' # either "k-fold" or "random" strategies
+ k: 10 # for "k-fold" Cross-Validation
+ train_prop: 0.5 # for "random" strategy splitting
+
+# Dataloader parameters
+dataloader_params:
+ batch_size: 5
+ num_workers: 0
+ pin_memory: False
+ persistent_workers: False
\ No newline at end of file
diff --git a/configs/evaluator/classification.yaml b/configs/evaluator/classification.yaml
index 8febe71b..a3deb8f9 100755
--- a/configs/evaluator/classification.yaml
+++ b/configs/evaluator/classification.yaml
@@ -1,7 +1,7 @@
-_target_: topobenchmarkx.evaluator.evaluator.TBXEvaluator
+_target_: topobenchmark.evaluator.evaluator.TBEvaluator
task: ${dataset.parameters.task}
task_level: ${dataset.parameters.task_level}
num_classes: ${dataset.parameters.num_classes}
# Metrics
-metrics: [accuracy, precision, recall, auroc] #Available options: accuracy, auroc, precision, recall
\ No newline at end of file
+metrics: [accuracy, precision, recall, auroc] # Available options: accuracy, auroc, precision, recall
\ No newline at end of file
diff --git a/configs/evaluator/default.yaml b/configs/evaluator/default.yaml
index 9b4766a6..8095f97c 100755
--- a/configs/evaluator/default.yaml
+++ b/configs/evaluator/default.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.evaluator.evaluator.TBXEvaluator
+_target_: topobenchmark.evaluator.evaluator.TBEvaluator
task: ${dataset.parameters.task}
task_level: ${dataset.parameters.task_level}
num_classes: ${dataset.parameters.num_classes}
@@ -6,5 +6,5 @@ num_classes: ${dataset.parameters.num_classes}
# Automatically selects the default metrics depending on the task
# Classification: [accuracy, precision, recall, auroc]
# Regression: [mae, mse]
-metrics: ${get_default_metrics:${evaluator.task}}
+metrics: ${get_default_metrics:${evaluator.task},${oc.select:dataset.parameters.metrics,null}}
# Select classification/regression config files to manually define the metrics
\ No newline at end of file
diff --git a/configs/evaluator/regression.yaml b/configs/evaluator/regression.yaml
index aa2dfd61..4c77fd07 100755
--- a/configs/evaluator/regression.yaml
+++ b/configs/evaluator/regression.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.evaluator.evaluator.TBXEvaluator
+_target_: topobenchmark.evaluator.evaluator.TBEvaluator
task: ${dataset.parameters.task}
task_level: ${dataset.parameters.task_level}
num_classes: ${dataset.parameters.num_classes}
diff --git a/configs/loss/default.yaml b/configs/loss/default.yaml
index ca1742d7..c97f5c8b 100644
--- a/configs/loss/default.yaml
+++ b/configs/loss/default.yaml
@@ -1,7 +1,9 @@
-_target_: topobenchmarkx.loss.TBXLoss
+_target_: topobenchmark.loss.TBLoss
+
dataset_loss:
task: ${dataset.parameters.task}
loss_type: ${dataset.parameters.loss_type}
+
modules_losses: # Collect model losses
feature_encoder: ${oc.select:model.feature_encoder.loss,null}
backbone: ${oc.select:model.backbone.loss,null}
diff --git a/configs/model/cell/can.yaml b/configs/model/cell/can.yaml
index 2767ff42..3722a2b5 100755
--- a/configs/model/cell/can.yaml
+++ b/configs/model/cell/can.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: can
model_domain: cell
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 128
@@ -26,16 +26,16 @@ backbone:
att_lift: False
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.CANWrapper
+ _target_: topobenchmark.nn.wrappers.CANWrapper
_partial_: true
wrapper_name: CANWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/cell/cccn.yaml b/configs/model/cell/cccn.yaml
index da6e7dd7..96ac9058 100755
--- a/configs/model/cell/cccn.yaml
+++ b/configs/model/cell/cccn.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: cccn
model_domain: cell
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -14,22 +14,22 @@ feature_encoder:
- 1
backbone:
- _target_: topobenchmarkx.nn.backbones.cell.cccn.CCCN
+ _target_: topobenchmark.nn.backbones.cell.cccn.CCCN
in_channels: ${model.feature_encoder.out_channels}
n_layers: 4
dropout: 0.0
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.CCCNWrapper
+ _target_: topobenchmark.nn.wrappers.CCCNWrapper
_partial_: true
wrapper_name: CCCNWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/cell/ccxn.yaml b/configs/model/cell/ccxn.yaml
index cdf45b1c..40a289cf 100755
--- a/configs/model/cell/ccxn.yaml
+++ b/configs/model/cell/ccxn.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: ccxn
model_domain: cell
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -22,16 +22,16 @@ backbone_additional_params:
hidden_channels: ${model.feature_encoder.out_channels}
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.CCXNWrapper
+ _target_: topobenchmark.nn.wrappers.CCXNWrapper
_partial_: true
wrapper_name: CCXNWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/cell/cwn.yaml b/configs/model/cell/cwn.yaml
index e8a64f3c..86062bb0 100755
--- a/configs/model/cell/cwn.yaml
+++ b/configs/model/cell/cwn.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: cwn
model_domain: cell
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 64
@@ -19,16 +19,16 @@ backbone:
n_layers: 4
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.CWNWrapper
+ _target_: topobenchmark.nn.wrappers.CWNWrapper
_partial_: true
wrapper_name: CWNWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/cell/topotune.yaml b/configs/model/cell/topotune.yaml
index f9f2f0ad..d395390d 100755
--- a/configs/model/cell/topotune.yaml
+++ b/configs/model/cell/topotune.yaml
@@ -1,11 +1,11 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: topotune
model_domain: cell
tune_gnn: IdentityGCN
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -16,9 +16,9 @@ feature_encoder:
- 2
backbone:
- _target_: topobenchmarkx.nn.backbones.combinatorial.gccn.TopoTune
+ _target_: topobenchmark.nn.backbones.combinatorial.gccn.TopoTune
GNN:
- _target_: topobenchmarkx.nn.backbones.graph.${model.tune_gnn}
+ _target_: topobenchmark.nn.backbones.graph.${model.tune_gnn}
in_channels: ${model.feature_encoder.out_channels}
out_channels: ${model.feature_encoder.out_channels}
hidden_channels: ${model.feature_encoder.out_channels}
@@ -35,16 +35,16 @@ backbone:
activation: relu
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.combinatorial.TuneWrapper
+ _target_: topobenchmark.nn.wrappers.combinatorial.TuneWrapper
_partial_: true
wrapper_name: TuneWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/cell/topotune_onehasse.yaml b/configs/model/cell/topotune_onehasse.yaml
index ed4324d7..a2d41177 100644
--- a/configs/model/cell/topotune_onehasse.yaml
+++ b/configs/model/cell/topotune_onehasse.yaml
@@ -1,11 +1,11 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: topotune_onehasse
model_domain: cell
tune_gnn: IdentityGCN
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -16,9 +16,9 @@ feature_encoder:
- 2
backbone:
- _target_: topobenchmarkx.nn.backbones.combinatorial.gccn_onehasse.TopoTune_OneHasse
+ _target_: topobenchmark.nn.backbones.combinatorial.gccn_onehasse.TopoTune_OneHasse
GNN:
- _target_: topobenchmarkx.nn.backbones.graph.${model.tune_gnn}
+ _target_: topobenchmark.nn.backbones.graph.${model.tune_gnn}
in_channels: ${model.feature_encoder.out_channels}
out_channels: ${model.feature_encoder.out_channels}
hidden_channels: ${model.feature_encoder.out_channels}
@@ -34,16 +34,16 @@ backbone:
activation: relu
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.combinatorial.TuneWrapper
+ _target_: topobenchmark.nn.wrappers.combinatorial.TuneWrapper
_partial_: true
wrapper_name: TuneWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/graph/gat.yaml b/configs/model/graph/gat.yaml
index 1841d3c2..e2bf7709 100755
--- a/configs/model/graph/gat.yaml
+++ b/configs/model/graph/gat.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: gat
model_domain: graph
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -22,16 +22,16 @@ backbone:
concat: true
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.GNNWrapper
+ _target_: topobenchmark.nn.wrappers.GNNWrapper
_partial_: true
wrapper_name: GNNWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: NoReadOut # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/graph/gcn.yaml b/configs/model/graph/gcn.yaml
index 36bf8ca4..a200c8e6 100755
--- a/configs/model/graph/gcn.yaml
+++ b/configs/model/graph/gcn.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: gcn
model_domain: graph
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 64
@@ -19,16 +19,16 @@ backbone:
act: relu
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.GNNWrapper
+ _target_: topobenchmark.nn.wrappers.GNNWrapper
_partial_: true
wrapper_name: GNNWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: NoReadOut # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/graph/gcn_dgm.yaml b/configs/model/graph/gcn_dgm.yaml
new file mode 100755
index 00000000..e5ed7ac9
--- /dev/null
+++ b/configs/model/graph/gcn_dgm.yaml
@@ -0,0 +1,41 @@
+_target_: topobenchmark.model.TBModel
+
+model_name: gcn
+model_domain: graph
+
+feature_encoder:
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
+ encoder_name: DGMStructureFeatureEncoder
+ in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
+ out_channels: 64
+ proj_dropout: 0.0
+ loss:
+ _target_: topobenchmark.loss.model.DGMLoss
+ loss_weight: 10
+
+backbone:
+ _target_: torch_geometric.nn.models.GCN
+ in_channels: ${model.feature_encoder.out_channels}
+ hidden_channels: ${model.feature_encoder.out_channels}
+ num_layers: 1
+ dropout: 0.0
+ act: relu
+
+backbone_wrapper:
+ _target_: topobenchmark.nn.wrappers.GNNWrapper
+ _partial_: true
+ wrapper_name: GNNWrapper
+ out_channels: ${model.feature_encoder.out_channels}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+
+readout:
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
+ readout_name: NoReadOut # Use in case readout is not needed Options: PropagateSignalDown
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ hidden_dim: ${model.feature_encoder.out_channels}
+ out_channels: ${dataset.parameters.num_classes}
+ task_level: ${dataset.parameters.task_level}
+ pooling_type: sum
+
+# compile model for faster training with pytorch 2.0
+compile: false
diff --git a/configs/model/graph/gin.yaml b/configs/model/graph/gin.yaml
index 6f941c95..c05336e6 100755
--- a/configs/model/graph/gin.yaml
+++ b/configs/model/graph/gin.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: gin
model_domain: graph
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -19,16 +19,16 @@ backbone:
act: relu
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.GNNWrapper
+ _target_: topobenchmark.nn.wrappers.GNNWrapper
_partial_: true
wrapper_name: GNNWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: NoReadOut # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/graph/graph_mlp.yaml b/configs/model/graph/graph_mlp.yaml
index 34fe5072..20374c0f 100755
--- a/configs/model/graph/graph_mlp.yaml
+++ b/configs/model/graph/graph_mlp.yaml
@@ -1,38 +1,38 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: GraphMLP
model_domain: graph
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
proj_dropout: 0.0
backbone:
- _target_: topobenchmarkx.nn.backbones.GraphMLP
+ _target_: topobenchmark.nn.backbones.GraphMLP
in_channels: ${model.feature_encoder.out_channels}
hidden_channels: ${model.feature_encoder.out_channels}
order: 2
dropout: 0.0
loss:
- _target_: topobenchmarkx.loss.model.GraphMLPLoss
+ _target_: topobenchmark.loss.model.GraphMLPLoss
r_adj_power: 2
tau: 1.
loss_weight: 0.5
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.GraphMLPWrapper
+ _target_: topobenchmark.nn.wrappers.GraphMLPWrapper
_partial_: true
wrapper_name: GraphMLPWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: NoReadOut # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/hypergraph/alldeepset.yaml b/configs/model/hypergraph/alldeepset.yaml
index 8e251a16..4c9b6cbc 100755
--- a/configs/model/hypergraph/alldeepset.yaml
+++ b/configs/model/hypergraph/alldeepset.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: alldeepset
model_domain: hypergraph
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -27,16 +27,16 @@ backbone:
#num_features: ${model.backbone.hidden_channels}
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.HypergraphWrapper
+ _target_: topobenchmark.nn.wrappers.HypergraphWrapper
_partial_: true
wrapper_name: HypergraphWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/hypergraph/allsettransformer.yaml b/configs/model/hypergraph/allsettransformer.yaml
index c23133f4..70b1c66f 100755
--- a/configs/model/hypergraph/allsettransformer.yaml
+++ b/configs/model/hypergraph/allsettransformer.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: allsettransformer
model_domain: hypergraph
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 128
@@ -21,16 +21,16 @@ backbone:
mlp_dropout: 0.
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.HypergraphWrapper
+ _target_: topobenchmark.nn.wrappers.HypergraphWrapper
_partial_: true
wrapper_name: HypergraphWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/hypergraph/edgnn.yaml b/configs/model/hypergraph/edgnn.yaml
index 02e575be..2156beb1 100755
--- a/configs/model/hypergraph/edgnn.yaml
+++ b/configs/model/hypergraph/edgnn.yaml
@@ -1,17 +1,17 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: edgnn
model_domain: hypergraph
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 128
proj_dropout: 0.0
backbone:
- _target_: topobenchmarkx.nn.backbones.hypergraph.edgnn.EDGNN
+ _target_: topobenchmark.nn.backbones.hypergraph.edgnn.EDGNN
num_features: ${model.feature_encoder.out_channels}
input_dropout: 0.
dropout: 0.
@@ -22,16 +22,16 @@ backbone:
aggregate: 'add'
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.HypergraphWrapper
+ _target_: topobenchmark.nn.wrappers.HypergraphWrapper
_partial_: true
wrapper_name: HypergraphWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/hypergraph/unignn.yaml b/configs/model/hypergraph/unignn.yaml
index cb1d279a..d3126794 100755
--- a/configs/model/hypergraph/unignn.yaml
+++ b/configs/model/hypergraph/unignn.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: unignn2
model_domain: hypergraph
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -17,16 +17,16 @@ backbone:
n_layers: 1
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.HypergraphWrapper
+ _target_: topobenchmark.nn.wrappers.HypergraphWrapper
_partial_: true
wrapper_name: HypergraphWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/hypergraph/unignn2.yaml b/configs/model/hypergraph/unignn2.yaml
index f3f8dc4b..e99b0c6f 100755
--- a/configs/model/hypergraph/unignn2.yaml
+++ b/configs/model/hypergraph/unignn2.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: unignn2
model_domain: hypergraph
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 128
@@ -21,16 +21,16 @@ backbone:
layer_drop: 0.0
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.HypergraphWrapper
+ _target_: topobenchmark.nn.wrappers.HypergraphWrapper
_partial_: true
wrapper_name: HypergraphWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/simplicial/san.yaml b/configs/model/simplicial/san.yaml
index 338a634f..67c45d62 100755
--- a/configs/model/simplicial/san.yaml
+++ b/configs/model/simplicial/san.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: san
model_domain: simplicial
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 64
@@ -23,16 +23,16 @@ backbone:
epsilon_harmonic: 1e-1
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.SANWrapper
+ _target_: topobenchmark.nn.wrappers.SANWrapper
_partial_: true
wrapper_name: SANWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/simplicial/sccn.yaml b/configs/model/simplicial/sccn.yaml
index 91144079..7c34a6f1 100755
--- a/configs/model/simplicial/sccn.yaml
+++ b/configs/model/simplicial/sccn.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: sccn
model_domain: simplicial
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -18,16 +18,16 @@ backbone:
update_func: "sigmoid"
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.SCCNWrapper
+ _target_: topobenchmark.nn.wrappers.SCCNWrapper
_partial_: true
wrapper_name: SCCNWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/simplicial/sccnn.yaml b/configs/model/simplicial/sccnn.yaml
index e631c7e2..3b11ea34 100755
--- a/configs/model/simplicial/sccnn.yaml
+++ b/configs/model/simplicial/sccnn.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: sccnn
model_domain: simplicial
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -31,16 +31,16 @@ backbone:
n_layers: 1
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.SCCNNWrapper
+ _target_: topobenchmark.nn.wrappers.SCCNNWrapper
_partial_: true
wrapper_name: SCCNNWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/simplicial/sccnn_custom.yaml b/configs/model/simplicial/sccnn_custom.yaml
index 09697984..1617670d 100755
--- a/configs/model/simplicial/sccnn_custom.yaml
+++ b/configs/model/simplicial/sccnn_custom.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: sccnn
model_domain: simplicial
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -15,7 +15,7 @@ feature_encoder:
- 2
backbone:
- _target_: topobenchmarkx.nn.backbones.simplicial.sccnn.SCCNNCustom
+ _target_: topobenchmark.nn.backbones.simplicial.sccnn.SCCNNCustom
in_channels_all:
- ${model.feature_encoder.out_channels}
- ${model.feature_encoder.out_channels}
@@ -31,16 +31,16 @@ backbone:
n_layers: 1
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.SCCNNWrapper
+ _target_: topobenchmark.nn.wrappers.SCCNNWrapper
_partial_: true
wrapper_name: SCCNNWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/simplicial/scn.yaml b/configs/model/simplicial/scn.yaml
index c6a0f0a8..1cfce74e 100755
--- a/configs/model/simplicial/scn.yaml
+++ b/configs/model/simplicial/scn.yaml
@@ -1,10 +1,10 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: scn
model_domain: simplicial
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -22,16 +22,16 @@ backbone:
n_layers: 1
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.SCNWrapper
+ _target_: topobenchmark.nn.wrappers.SCNWrapper
_partial_: true
wrapper_name: SCNWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/simplicial/topotune.yaml b/configs/model/simplicial/topotune.yaml
index 4e7bf859..7ad639d7 100755
--- a/configs/model/simplicial/topotune.yaml
+++ b/configs/model/simplicial/topotune.yaml
@@ -1,11 +1,11 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: topotune
model_domain: simplicial
tune_gnn: GIN
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -16,9 +16,9 @@ feature_encoder:
- 2
backbone:
- _target_: topobenchmarkx.nn.backbones.combinatorial.gccn.TopoTune
+ _target_: topobenchmark.nn.backbones.combinatorial.gccn.TopoTune
GNN:
- _target_: topobenchmarkx.nn.backbones.graph.${model.tune_gnn}
+ _target_: topobenchmark.nn.backbones.graph.${model.tune_gnn}
in_channels: ${model.feature_encoder.out_channels}
out_channels: ${model.feature_encoder.out_channels}
hidden_channels: ${model.feature_encoder.out_channels}
@@ -35,16 +35,16 @@ backbone:
activation: relu
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.combinatorial.TuneWrapper
+ _target_: topobenchmark.nn.wrappers.combinatorial.TuneWrapper
_partial_: true
wrapper_name: TuneWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/model/simplicial/topotune_onehasse.yaml b/configs/model/simplicial/topotune_onehasse.yaml
index 4bf21276..e903adbd 100644
--- a/configs/model/simplicial/topotune_onehasse.yaml
+++ b/configs/model/simplicial/topotune_onehasse.yaml
@@ -1,11 +1,11 @@
-_target_: topobenchmarkx.model.TBXModel
+_target_: topobenchmark.model.TBModel
model_name: topotune_onehasse
model_domain: simplicial
tune_gnn: GCN
feature_encoder:
- _target_: topobenchmarkx.nn.encoders.${model.feature_encoder.encoder_name}
+ _target_: topobenchmark.nn.encoders.${model.feature_encoder.encoder_name}
encoder_name: AllCellFeatureEncoder
in_channels: ${infer_in_channels:${dataset},${oc.select:transforms,null}}
out_channels: 32
@@ -16,9 +16,9 @@ feature_encoder:
- 2
backbone:
- _target_: topobenchmarkx.nn.backbones.combinatorial.gccn_onehasse.TopoTune_OneHasse
+ _target_: topobenchmark.nn.backbones.combinatorial.gccn_onehasse.TopoTune_OneHasse
GNN:
- _target_: topobenchmarkx.nn.backbones.graph.${model.tune_gnn}
+ _target_: topobenchmark.nn.backbones.graph.${model.tune_gnn}
in_channels: ${model.feature_encoder.out_channels}
out_channels: ${model.feature_encoder.out_channels}
hidden_channels: ${model.feature_encoder.out_channels}
@@ -34,16 +34,16 @@ backbone:
activation: relu
backbone_wrapper:
- _target_: topobenchmarkx.nn.wrappers.combinatorial.TuneWrapper
+ _target_: topobenchmark.nn.wrappers.combinatorial.TuneWrapper
_partial_: true
wrapper_name: TuneWrapper
out_channels: ${model.feature_encoder.out_channels}
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}}
readout:
- _target_: topobenchmarkx.nn.readouts.${model.readout.readout_name}
+ _target_: topobenchmark.nn.readouts.${model.readout.readout_name}
readout_name: PropagateSignalDown # Use in case readout is not needed Options: PropagateSignalDown
- num_cell_dimensions: ${infere_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
+ num_cell_dimensions: ${infer_num_cell_dimensions:${oc.select:model.feature_encoder.selected_dimensions,null},${model.feature_encoder.in_channels}} # The highest order of cell dimensions to consider
hidden_dim: ${model.feature_encoder.out_channels}
out_channels: ${dataset.parameters.num_classes}
task_level: ${dataset.parameters.task_level}
diff --git a/configs/optimizer/default.yaml b/configs/optimizer/default.yaml
index cb76ab94..80372487 100644
--- a/configs/optimizer/default.yaml
+++ b/configs/optimizer/default.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.optimizer.TBXOptimizer
+_target_: topobenchmark.optimizer.TBOptimizer
# Full compatibility with all available torch optimizers and schedulers
optimizer_id: Adam # torch id of the optimizer
diff --git a/configs/run.yaml b/configs/run.yaml
index 8ec582f4..c4dc1f77 100755
--- a/configs/run.yaml
+++ b/configs/run.yaml
@@ -4,8 +4,8 @@
# order of defaults determines the order in which configs override each other
defaults:
- _self_
- - dataset: graph/cocitation_cora
- - model: cell/topotune
+ - dataset: simplicial/mantra_orientation
+ - model: simplicial/scn
- transforms: ${get_default_transform:${dataset},${model}} #no_transform
- optimizer: default
- loss: default
diff --git a/configs/transforms/data_manipulations/data_fields_to_dense.yaml b/configs/transforms/data_manipulations/data_fields_to_dense.yaml
index 40fa2e71..0c5da8e0 100755
--- a/configs/transforms/data_manipulations/data_fields_to_dense.yaml
+++ b/configs/transforms/data_manipulations/data_fields_to_dense.yaml
@@ -1,3 +1,3 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "DataFieldsToDense"
transform_type: "data manipulation"
\ No newline at end of file
diff --git a/configs/transforms/data_manipulations/equal_gaus_features.yaml b/configs/transforms/data_manipulations/equal_gaus_features.yaml
index c671ea7a..f918552c 100755
--- a/configs/transforms/data_manipulations/equal_gaus_features.yaml
+++ b/configs/transforms/data_manipulations/equal_gaus_features.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "EqualGausFeatures"
transform_type: "data manipulation"
diff --git a/configs/transforms/data_manipulations/group_homophily.yaml b/configs/transforms/data_manipulations/group_homophily.yaml
index d07de392..0699e1a5 100755
--- a/configs/transforms/data_manipulations/group_homophily.yaml
+++ b/configs/transforms/data_manipulations/group_homophily.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "GroupCombinatorialHomophily"
transform_type: "data manipulation"
top_k: 10
diff --git a/configs/transforms/data_manipulations/identity.yaml b/configs/transforms/data_manipulations/identity.yaml
index c5deadbe..422be56f 100755
--- a/configs/transforms/data_manipulations/identity.yaml
+++ b/configs/transforms/data_manipulations/identity.yaml
@@ -1,3 +1,3 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "Identity"
transform_type: null
\ No newline at end of file
diff --git a/configs/transforms/data_manipulations/infere_knn_connectivity.yaml b/configs/transforms/data_manipulations/infere_knn_connectivity.yaml
index 43dc8680..a403f028 100755
--- a/configs/transforms/data_manipulations/infere_knn_connectivity.yaml
+++ b/configs/transforms/data_manipulations/infere_knn_connectivity.yaml
@@ -1,8 +1,7 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "InfereKNNConnectivity"
transform_type: "data manipulation"
args:
- k: 5 # Number of nearest neighbors to consider
+ k: 40 # Number of nearest neighbors to consider
cosine: false # If true, will use the cosine distance instead of euclidean distance to find nearest neighbors. (Note: option equal to true gives an error)
loop: false # If True, the graph will contain self-loops. Note: using true and then siplicial lifting leads to an error because there are selfedges that cause simplex to have duplicated node.
-
diff --git a/configs/transforms/data_manipulations/infere_radius_connectivity.yaml b/configs/transforms/data_manipulations/infere_radius_connectivity.yaml
index d96fe764..bf4936e4 100755
--- a/configs/transforms/data_manipulations/infere_radius_connectivity.yaml
+++ b/configs/transforms/data_manipulations/infere_radius_connectivity.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "InfereRadiusConnectivity"
transform_type: "data manipulation"
args:
diff --git a/configs/transforms/data_manipulations/infere_tree.yaml b/configs/transforms/data_manipulations/infere_tree.yaml
new file mode 100755
index 00000000..23944323
--- /dev/null
+++ b/configs/transforms/data_manipulations/infere_tree.yaml
@@ -0,0 +1,3 @@
+_target_: topobenchmark.transforms.data_transform.DataTransform
+transform_name: "InferTreeConnectivity"
+#split_params: ${dataset.split_params}
diff --git a/configs/transforms/data_manipulations/keep_connected_component.yaml b/configs/transforms/data_manipulations/keep_connected_component.yaml
index 57515139..b0fee212 100644
--- a/configs/transforms/data_manipulations/keep_connected_component.yaml
+++ b/configs/transforms/data_manipulations/keep_connected_component.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "KeepOnlyConnectedComponent"
transform_type: "data manipulation"
num_components: 1
\ No newline at end of file
diff --git a/configs/transforms/data_manipulations/keep_selected_fields.yaml b/configs/transforms/data_manipulations/keep_selected_fields.yaml
index 864f80d3..8997cc12 100644
--- a/configs/transforms/data_manipulations/keep_selected_fields.yaml
+++ b/configs/transforms/data_manipulations/keep_selected_fields.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "KeepSelectedDataFields"
transform_type: "data manipulation"
# Fields that must be for pipeline
diff --git a/configs/transforms/data_manipulations/mp_homophily.yaml b/configs/transforms/data_manipulations/mp_homophily.yaml
index 431b5371..b02b1dcd 100755
--- a/configs/transforms/data_manipulations/mp_homophily.yaml
+++ b/configs/transforms/data_manipulations/mp_homophily.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "MessagePassingHomophily"
transform_type: "data manipulation"
num_steps: 10
diff --git a/configs/transforms/data_manipulations/node_degrees.yaml b/configs/transforms/data_manipulations/node_degrees.yaml
index 1d666d32..14b6cb34 100755
--- a/configs/transforms/data_manipulations/node_degrees.yaml
+++ b/configs/transforms/data_manipulations/node_degrees.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "NodeDegrees"
transform_type: "data manipulation"
selected_fields: ["edge_index"] # "incidence"
diff --git a/configs/transforms/data_manipulations/node_feat_to_float.yaml b/configs/transforms/data_manipulations/node_feat_to_float.yaml
index 53686954..e66be399 100755
--- a/configs/transforms/data_manipulations/node_feat_to_float.yaml
+++ b/configs/transforms/data_manipulations/node_feat_to_float.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "NodeFeaturesToFloat"
transform_type: "data manipulation"
diff --git a/configs/transforms/data_manipulations/one_hot_node_degree_features.yaml b/configs/transforms/data_manipulations/one_hot_node_degree_features.yaml
index 573d5248..9e14c022 100755
--- a/configs/transforms/data_manipulations/one_hot_node_degree_features.yaml
+++ b/configs/transforms/data_manipulations/one_hot_node_degree_features.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "OneHotDegreeFeatures"
transform_type: "data manipulation"
diff --git a/configs/transforms/data_manipulations/remove_extra_feature.yaml b/configs/transforms/data_manipulations/remove_extra_feature.yaml
index 9bca7003..b0cb693a 100755
--- a/configs/transforms/data_manipulations/remove_extra_feature.yaml
+++ b/configs/transforms/data_manipulations/remove_extra_feature.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "RemoveExtraFeatureFromProteins"
transform_type: "data manipulation"
remove_first_n_features: 1
diff --git a/configs/transforms/data_manipulations/simplicial_curvature.yaml b/configs/transforms/data_manipulations/simplicial_curvature.yaml
index 2fb00b26..75aee95f 100755
--- a/configs/transforms/data_manipulations/simplicial_curvature.yaml
+++ b/configs/transforms/data_manipulations/simplicial_curvature.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "CalculateSimplicialCurvature"
transform_type: "data manipulation"
diff --git a/configs/transforms/dataset_defaults/REDDIT-BINARY.yaml b/configs/transforms/dataset_defaults/REDDIT-BINARY.yaml
index 12b670e3..18fbcd21 100755
--- a/configs/transforms/dataset_defaults/REDDIT-BINARY.yaml
+++ b/configs/transforms/dataset_defaults/REDDIT-BINARY.yaml
@@ -1,3 +1,3 @@
defaults:
- - data_manipulations: equal_gaus_features
+ - data_manipulations@equal_gaus_features: equal_gaus_features
- liftings@_here_: ${get_required_lifting:graph,${model}}
diff --git a/configs/transforms/dataset_defaults/ZINC.yaml b/configs/transforms/dataset_defaults/ZINC.yaml
index 1c759853..d73b8c9b 100644
--- a/configs/transforms/dataset_defaults/ZINC.yaml
+++ b/configs/transforms/dataset_defaults/ZINC.yaml
@@ -1,4 +1,4 @@
-# USE python -m topobenchmarkx transforms.one_hot_node_degree_features.degrees_fields=x to run this config
+# USE python -m topobenchmark transforms.one_hot_node_degree_features.degrees_fields=x to run this config
defaults:
- data_manipulations: node_degrees
- data_manipulations@one_hot_node_degree_features: one_hot_node_degree_features
diff --git a/configs/transforms/feature_liftings/base_lifting.yaml b/configs/transforms/feature_liftings/base_lifting.yaml
index 12036020..d7e038be 100755
--- a/configs/transforms/feature_liftings/base_lifting.yaml
+++ b/configs/transforms/feature_liftings/base_lifting.yaml
@@ -1,3 +1,3 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "ProjectionSum"
transform_type: "feature_lifting"
\ No newline at end of file
diff --git a/configs/transforms/feature_liftings/concatenate.yaml b/configs/transforms/feature_liftings/concatenate.yaml
index 6c621489..13e5e9b3 100755
--- a/configs/transforms/feature_liftings/concatenate.yaml
+++ b/configs/transforms/feature_liftings/concatenate.yaml
@@ -1,3 +1,3 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_name: "ConcatentionLifting"
transform_type: null
\ No newline at end of file
diff --git a/configs/transforms/knn.yaml b/configs/transforms/knn.yaml
new file mode 100644
index 00000000..bab757b9
--- /dev/null
+++ b/configs/transforms/knn.yaml
@@ -0,0 +1,2 @@
+defaults:
+ - /transforms/data_manipulations@knn: infer_knn_connectivity
\ No newline at end of file
diff --git a/configs/transforms/liftings/cell2hypergraph_default.yaml b/configs/transforms/liftings/cell2hypergraph_default.yaml
new file mode 100644
index 00000000..92ec7dff
--- /dev/null
+++ b/configs/transforms/liftings/cell2hypergraph_default.yaml
@@ -0,0 +1,2 @@
+defaults:
+ - /transforms/liftings: null
\ No newline at end of file
diff --git a/configs/transforms/liftings/cell2simplicial_default.yaml b/configs/transforms/liftings/cell2simplicial_default.yaml
new file mode 100644
index 00000000..92ec7dff
--- /dev/null
+++ b/configs/transforms/liftings/cell2simplicial_default.yaml
@@ -0,0 +1,2 @@
+defaults:
+ - /transforms/liftings: null
\ No newline at end of file
diff --git a/configs/transforms/liftings/graph2cell/cycle.yaml b/configs/transforms/liftings/graph2cell/cycle.yaml
index 23244043..d0d4b0fc 100644
--- a/configs/transforms/liftings/graph2cell/cycle.yaml
+++ b/configs/transforms/liftings/graph2cell/cycle.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_type: 'lifting'
transform_name: "CellCycleLifting"
complex_dim: ${oc.select:dataset.parameters.max_dim_if_lifted,3}
diff --git a/configs/transforms/liftings/graph2hypergraph/khop.yaml b/configs/transforms/liftings/graph2hypergraph/khop.yaml
index 9fc6d185..8b2dfe30 100755
--- a/configs/transforms/liftings/graph2hypergraph/khop.yaml
+++ b/configs/transforms/liftings/graph2hypergraph/khop.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_type: 'lifting'
transform_name: "HypergraphKHopLifting"
k_value: 1
diff --git a/configs/transforms/liftings/graph2simplicial/clique.yaml b/configs/transforms/liftings/graph2simplicial/clique.yaml
index a3419278..3a16c357 100755
--- a/configs/transforms/liftings/graph2simplicial/clique.yaml
+++ b/configs/transforms/liftings/graph2simplicial/clique.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_type: 'lifting'
transform_name: "SimplicialCliqueLifting"
complex_dim: ${oc.select:dataset.parameters.max_dim_if_lifted,3}
diff --git a/configs/transforms/liftings/graph2simplicial/khop.yaml b/configs/transforms/liftings/graph2simplicial/khop.yaml
index 02f86a9a..4330771e 100755
--- a/configs/transforms/liftings/graph2simplicial/khop.yaml
+++ b/configs/transforms/liftings/graph2simplicial/khop.yaml
@@ -1,4 +1,4 @@
-_target_: topobenchmarkx.transforms.data_transform.DataTransform
+_target_: topobenchmark.transforms.data_transform.DataTransform
transform_type: 'lifting'
transform_name: "SimplicialKHopLifting"
max_k_simplices: 5000
diff --git a/configs/transforms/liftings/hypergraph2cell_default.yaml b/configs/transforms/liftings/hypergraph2cell_default.yaml
new file mode 100644
index 00000000..92ec7dff
--- /dev/null
+++ b/configs/transforms/liftings/hypergraph2cell_default.yaml
@@ -0,0 +1,2 @@
+defaults:
+ - /transforms/liftings: null
\ No newline at end of file
diff --git a/configs/transforms/liftings/hypergraph2simplicial_default.yaml b/configs/transforms/liftings/hypergraph2simplicial_default.yaml
new file mode 100644
index 00000000..92ec7dff
--- /dev/null
+++ b/configs/transforms/liftings/hypergraph2simplicial_default.yaml
@@ -0,0 +1,2 @@
+defaults:
+ - /transforms/liftings: null
\ No newline at end of file
diff --git a/configs/transforms/liftings/simplicial2graph_default.yaml b/configs/transforms/liftings/simplicial2graph_default.yaml
new file mode 100644
index 00000000..92ec7dff
--- /dev/null
+++ b/configs/transforms/liftings/simplicial2graph_default.yaml
@@ -0,0 +1,2 @@
+defaults:
+ - /transforms/liftings: null
\ No newline at end of file
diff --git a/configs/transforms/tree.yaml b/configs/transforms/tree.yaml
new file mode 100644
index 00000000..0c691d48
--- /dev/null
+++ b/configs/transforms/tree.yaml
@@ -0,0 +1,2 @@
+defaults:
+ - /transforms/data_manipulations@tree: infer_tree
\ No newline at end of file
diff --git a/docs/api/data/index.rst b/docs/api/data/index.rst
index 06e32475..5f3eb2c7 100644
--- a/docs/api/data/index.rst
+++ b/docs/api/data/index.rst
@@ -12,35 +12,35 @@ The `data` module of `TopoBenchmarkX` consists of several submodules:
Datasets
--------
-.. automodule:: topobenchmarkx.data.datasets.us_county_demos_dataset
+.. automodule:: topobenchmark.data.datasets.us_county_demos_dataset
:members:
Load
----
-.. automodule:: topobenchmarkx.data.loaders.base
+.. automodule:: topobenchmark.data.loaders.base
:members:
-.. automodule:: topobenchmarkx.data.loaders.loaders
+.. automodule:: topobenchmark.data.loaders.loaders
:members:
Preprocess
----------
-.. automodule:: topobenchmarkx.data.preprocessor.preprocessor
+.. automodule:: topobenchmark.data.preprocessor.preprocessor
:members:
Utils
-----
-.. automodule:: topobenchmarkx.data.utils.io_utils
+.. automodule:: topobenchmark.data.utils.io_utils
:members:
-.. automodule:: topobenchmarkx.data.utils.split_utils
+.. automodule:: topobenchmark.data.utils.split_utils
:members:
-.. automodule:: topobenchmarkx.data.utils.utils
+.. automodule:: topobenchmark.data.utils.utils
:members:
\ No newline at end of file
diff --git a/docs/api/dataloader/index.rst b/docs/api/dataloader/index.rst
index 6d30dc25..e3421add 100644
--- a/docs/api/dataloader/index.rst
+++ b/docs/api/dataloader/index.rst
@@ -4,11 +4,11 @@ DataLoader
The `dataloader` module implements custom dataloaders for training.
-.. automodule:: topobenchmarkx.dataloader.dataload_dataset
+.. automodule:: topobenchmark.dataloader.dataload_dataset
:members:
-.. automodule:: topobenchmarkx.dataloader.dataloader
+.. automodule:: topobenchmark.dataloader.dataloader
:members:
-.. automodule:: topobenchmarkx.dataloader.utils
+.. automodule:: topobenchmark.dataloader.utils
:members:
\ No newline at end of file
diff --git a/docs/api/evaluator/index.rst b/docs/api/evaluator/index.rst
index c6eb7db5..167688bb 100644
--- a/docs/api/evaluator/index.rst
+++ b/docs/api/evaluator/index.rst
@@ -4,9 +4,9 @@ Evaluator
This module implements custom Python classes to evaluate performances of models in `TopoBenchmarkX`.
-.. automodule:: topobenchmarkx.evaluator.base
+.. automodule:: topobenchmark.evaluator.base
:members:
-.. automodule:: topobenchmarkx.evaluator.evaluator
+.. automodule:: topobenchmark.evaluator.evaluator
:members:
diff --git a/docs/api/loss/index.rst b/docs/api/loss/index.rst
index 4f0d195d..56ce1796 100644
--- a/docs/api/loss/index.rst
+++ b/docs/api/loss/index.rst
@@ -4,8 +4,8 @@ Loss
This module implements custom Python classes to compute losses in `TopoBenchmarkX`.
-.. automodule:: topobenchmarkx.loss.base
+.. automodule:: topobenchmark.loss.base
:members:
-.. automodule:: topobenchmarkx.loss.loss
+.. automodule:: topobenchmark.loss.loss
:members:
diff --git a/docs/api/model/index.rst b/docs/api/model/index.rst
index 839f5738..f7338935 100644
--- a/docs/api/model/index.rst
+++ b/docs/api/model/index.rst
@@ -4,5 +4,5 @@ Model
This module implements custom Python classes to represent models leveraging pytorch-lightning within `TopoBenchmarkX`.
-.. automodule:: topobenchmarkx.model.model
+.. automodule:: topobenchmark.model.model
:members:
\ No newline at end of file
diff --git a/docs/api/nn/backbones/index.rst b/docs/api/nn/backbones/index.rst
index 01909597..0dff2b96 100644
--- a/docs/api/nn/backbones/index.rst
+++ b/docs/api/nn/backbones/index.rst
@@ -2,11 +2,11 @@
Backbones
*********
-.. automodule:: topobenchmarkx.nn.backbones.cell.cccn
+.. automodule:: topobenchmark.nn.backbones.cell.cccn
:members:
-.. automodule:: topobenchmarkx.nn.backbones.hypergraph.edgnn
+.. automodule:: topobenchmark.nn.backbones.hypergraph.edgnn
:members:
-.. automodule:: topobenchmarkx.nn.backbones.simplicial.sccnn
+.. automodule:: topobenchmark.nn.backbones.simplicial.sccnn
:members:
\ No newline at end of file
diff --git a/docs/api/nn/encoders/index.rst b/docs/api/nn/encoders/index.rst
index 93f05e23..36f0ce77 100644
--- a/docs/api/nn/encoders/index.rst
+++ b/docs/api/nn/encoders/index.rst
@@ -2,8 +2,8 @@
Encoders
********
-.. automodule:: topobenchmarkx.nn.encoders.base
+.. automodule:: topobenchmark.nn.encoders.base
:members:
-.. automodule:: topobenchmarkx.nn.encoders.all_cell_encoder
+.. automodule:: topobenchmark.nn.encoders.all_cell_encoder
:members:
\ No newline at end of file
diff --git a/docs/api/nn/readouts/index.rst b/docs/api/nn/readouts/index.rst
index 3ff705c7..d2bab1c0 100644
--- a/docs/api/nn/readouts/index.rst
+++ b/docs/api/nn/readouts/index.rst
@@ -2,11 +2,11 @@
Readouts
********
-.. automodule:: topobenchmarkx.nn.readouts.base
+.. automodule:: topobenchmark.nn.readouts.base
:members:
-.. automodule:: topobenchmarkx.nn.readouts.identical
+.. automodule:: topobenchmark.nn.readouts.identical
:members:
-.. automodule:: topobenchmarkx.nn.readouts.propagate_signal_down
+.. automodule:: topobenchmark.nn.readouts.propagate_signal_down
:members:
\ No newline at end of file
diff --git a/docs/api/nn/wrappers/index.rst b/docs/api/nn/wrappers/index.rst
index b915848a..2c2d72da 100644
--- a/docs/api/nn/wrappers/index.rst
+++ b/docs/api/nn/wrappers/index.rst
@@ -2,35 +2,35 @@
Wrappers
********
-.. automodule:: topobenchmarkx.nn.wrappers.base
+.. automodule:: topobenchmark.nn.wrappers.base
:members:
-.. automodule:: topobenchmarkx.nn.wrappers.cell.can_wrapper
+.. automodule:: topobenchmark.nn.wrappers.cell.can_wrapper
:members:
-.. automodule:: topobenchmarkx.nn.wrappers.cell.cccn_wrapper
+.. automodule:: topobenchmark.nn.wrappers.cell.cccn_wrapper
:members:
-.. automodule:: topobenchmarkx.nn.wrappers.cell.ccxn_wrapper
+.. automodule:: topobenchmark.nn.wrappers.cell.ccxn_wrapper
:members:
-.. automodule:: topobenchmarkx.nn.wrappers.cell.cwn_wrapper
+.. automodule:: topobenchmark.nn.wrappers.cell.cwn_wrapper
:members:
-.. automodule:: topobenchmarkx.nn.wrappers.graph.gnn_wrapper
+.. automodule:: topobenchmark.nn.wrappers.graph.gnn_wrapper
:members:
-.. automodule:: topobenchmarkx.nn.wrappers.hypergraph.hypergraph_wrapper
+.. automodule:: topobenchmark.nn.wrappers.hypergraph.hypergraph_wrapper
:members:
-.. automodule:: topobenchmarkx.nn.wrappers.simplicial.san_wrapper
+.. automodule:: topobenchmark.nn.wrappers.simplicial.san_wrapper
:members:
-.. automodule:: topobenchmarkx.nn.wrappers.simplicial.sccn_wrapper
+.. automodule:: topobenchmark.nn.wrappers.simplicial.sccn_wrapper
:members:
-.. automodule:: topobenchmarkx.nn.wrappers.simplicial.sccnn_wrapper
+.. automodule:: topobenchmark.nn.wrappers.simplicial.sccnn_wrapper
:members:
-.. automodule:: topobenchmarkx.nn.wrappers.simplicial.scn_wrapper
+.. automodule:: topobenchmark.nn.wrappers.simplicial.scn_wrapper
:members:
\ No newline at end of file
diff --git a/docs/api/optimizer/index.rst b/docs/api/optimizer/index.rst
index 28f49b85..9e69f4bf 100644
--- a/docs/api/optimizer/index.rst
+++ b/docs/api/optimizer/index.rst
@@ -4,8 +4,8 @@ Optimizer
This module implements a custom Python class to manage `PyTorch` optimizers and learning rate schedulers in `TopoBenchmarkX`.
-.. automodule:: topobenchmarkx.optimizer.base
+.. automodule:: topobenchmark.optimizer.base
:members:
-.. automodule:: topobenchmarkx.optimizer.optimizer
+.. automodule:: topobenchmark.optimizer.optimizer
:members:
diff --git a/docs/api/transforms/data_manipulations/index.rst b/docs/api/transforms/data_manipulations/index.rst
index 3c238f72..794d0658 100644
--- a/docs/api/transforms/data_manipulations/index.rst
+++ b/docs/api/transforms/data_manipulations/index.rst
@@ -2,32 +2,32 @@
Data Manipulations
******************
-.. automodule:: topobenchmarkx.transforms.data_manipulations.calculate_simplicial_curvature
+.. automodule:: topobenchmark.transforms.data_manipulations.calculate_simplicial_curvature
:members:
-.. automodule:: topobenchmarkx.transforms.data_manipulations.equal_gaus_features
+.. automodule:: topobenchmark.transforms.data_manipulations.equal_gaus_features
:members:
-.. automodule:: topobenchmarkx.transforms.data_manipulations.identity_transform
+.. automodule:: topobenchmark.transforms.data_manipulations.identity_transform
:members:
-.. automodule:: topobenchmarkx.transforms.data_manipulations.infere_knn_connectivity
+.. automodule:: topobenchmark.transforms.data_manipulations.infer_knn_connectivity
:members:
-.. automodule:: topobenchmarkx.transforms.data_manipulations.infere_radius_connectivity
+.. automodule:: topobenchmark.transforms.data_manipulations.infer_radius_connectivity
:members:
-.. automodule:: topobenchmarkx.transforms.data_manipulations.keep_only_connected_component
+.. automodule:: topobenchmark.transforms.data_manipulations.keep_only_connected_component
:members:
-.. automodule:: topobenchmarkx.transforms.data_manipulations.keep_selected_data_fields
+.. automodule:: topobenchmark.transforms.data_manipulations.keep_selected_data_fields
:members:
-.. automodule:: topobenchmarkx.transforms.data_manipulations.node_degrees
+.. automodule:: topobenchmark.transforms.data_manipulations.node_degrees
:members:
-.. automodule:: topobenchmarkx.transforms.data_manipulations.node_features_to_float
+.. automodule:: topobenchmark.transforms.data_manipulations.node_features_to_float
:members:
-.. automodule:: topobenchmarkx.transforms.data_manipulations.one_hot_degree_features
+.. automodule:: topobenchmark.transforms.data_manipulations.one_hot_degree_features
:members:
\ No newline at end of file
diff --git a/docs/api/transforms/data_transform/index.rst b/docs/api/transforms/data_transform/index.rst
index 046d2271..8550d47f 100644
--- a/docs/api/transforms/data_transform/index.rst
+++ b/docs/api/transforms/data_transform/index.rst
@@ -2,5 +2,5 @@
Data Transform
**************
-.. automodule:: topobenchmarkx.transforms.data_transform
+.. automodule:: topobenchmark.transforms.data_transform
:members:
\ No newline at end of file
diff --git a/docs/api/transforms/feature_liftings/index.rst b/docs/api/transforms/feature_liftings/index.rst
index f79244a7..b71f1c13 100644
--- a/docs/api/transforms/feature_liftings/index.rst
+++ b/docs/api/transforms/feature_liftings/index.rst
@@ -2,14 +2,14 @@
Feature Liftings
****************
-.. automodule:: topobenchmarkx.transforms.feature_liftings.concatenation
+.. automodule:: topobenchmark.transforms.feature_liftings.concatenation
:members:
-.. automodule:: topobenchmarkx.transforms.feature_liftings.identity
+.. automodule:: topobenchmark.transforms.feature_liftings.identity
:members:
-.. automodule:: topobenchmarkx.transforms.feature_liftings.projection_sum
+.. automodule:: topobenchmark.transforms.feature_liftings.projection_sum
:members:
-.. automodule:: topobenchmarkx.transforms.feature_liftings.set
+.. automodule:: topobenchmark.transforms.feature_liftings.set
:members:
\ No newline at end of file
diff --git a/docs/api/transforms/liftings/index.rst b/docs/api/transforms/liftings/index.rst
index 8ec4228d..5fe244e6 100644
--- a/docs/api/transforms/liftings/index.rst
+++ b/docs/api/transforms/liftings/index.rst
@@ -2,32 +2,32 @@
Liftings
********
-.. automodule:: topobenchmarkx.transforms.liftings.base
+.. automodule:: topobenchmark.transforms.liftings.base
:members:
-.. automodule:: topobenchmarkx.transforms.liftings
+.. automodule:: topobenchmark.transforms.liftings
:members:
-.. automodule:: topobenchmarkx.transforms.liftings.graph2cell.base
+.. automodule:: topobenchmark.transforms.liftings.graph2cell.base
:members:
-.. automodule:: topobenchmarkx.transforms.liftings.graph2cell.cycle
+.. automodule:: topobenchmark.transforms.liftings.graph2cell.cycle
:members:
-.. automodule:: topobenchmarkx.transforms.liftings.graph2hypergraph.base
+.. automodule:: topobenchmark.transforms.liftings.graph2hypergraph.base
:members:
-.. automodule:: topobenchmarkx.transforms.liftings.graph2hypergraph.khop
+.. automodule:: topobenchmark.transforms.liftings.graph2hypergraph.khop
:members:
-.. automodule:: topobenchmarkx.transforms.liftings.graph2hypergraph.knn
+.. automodule:: topobenchmark.transforms.liftings.graph2hypergraph.knn
:members:
-.. automodule:: topobenchmarkx.transforms.liftings.graph2simplicial.base
+.. automodule:: topobenchmark.transforms.liftings.graph2simplicial.base
:members:
-.. automodule:: topobenchmarkx.transforms.liftings.graph2simplicial.clique
+.. automodule:: topobenchmark.transforms.liftings.graph2simplicial.clique
:members:
-.. automodule:: topobenchmarkx.transforms.liftings.graph2simplicial.khop
+.. automodule:: topobenchmark.transforms.liftings.graph2simplicial.khop
:members:
\ No newline at end of file
diff --git a/docs/api/utils/index.rst b/docs/api/utils/index.rst
index 6af9d046..3df42dda 100644
--- a/docs/api/utils/index.rst
+++ b/docs/api/utils/index.rst
@@ -4,20 +4,20 @@ Utils
This module implements implements additional utilities to handle the training process.
-.. automodule:: topobenchmarkx.utils.config_resolvers
+.. automodule:: topobenchmark.utils.config_resolvers
:members:
-.. automodule:: topobenchmarkx.utils.instantiators
+.. automodule:: topobenchmark.utils.instantiators
:members:
-.. automodule:: topobenchmarkx.utils.logging_utils
+.. automodule:: topobenchmark.utils.logging_utils
:members:
-.. automodule:: topobenchmarkx.utils.pylogger
+.. automodule:: topobenchmark.utils.pylogger
:members:
-.. automodule:: topobenchmarkx.utils.rich_utils
+.. automodule:: topobenchmark.utils.rich_utils
:members:
-.. automodule:: topobenchmarkx.utils.utils
+.. automodule:: topobenchmark.utils.utils
:members:
\ No newline at end of file
diff --git a/docs/conf.py b/docs/conf.py
index 7527ecdb..80a102a8 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -3,7 +3,7 @@
import os
import shutil
-project = "TopoBenchmarkX"
+project = "TopoBenchmark"
copyright = "2022-2023, PyT-Team, Inc."
author = "PyT-Team Authors"
@@ -57,8 +57,8 @@
pygments_style = None
html_theme = "pydata_sphinx_theme"
-html_baseurl = "pyt-team.github.io"
-htmlhelp_basename = "pyt-teamdoc"
+html_baseurl = "https://geometric-intelligence.github.io/topobenchmark"
+htmlhelp_basename = "topobenchmarkdoc"
html_last_updated_fmt = "%c"
latex_elements = {}
@@ -67,7 +67,7 @@
latex_documents = [
(
master_doc,
- "topobenchmarkx.tex",
+ "topobenchmark.tex",
"TopoBenchmarkX Documentation",
"PyT-Team",
"manual",
@@ -75,16 +75,16 @@
]
man_pages = [
- (master_doc, "topobenchmarkx", "TopoBenchmarkX Documentation", [author], 1)
+ (master_doc, "topobenchmark", "TopoBenchmarkX Documentation", [author], 1)
]
texinfo_documents = [
(
master_doc,
- "topobenchmarkx",
+ "topobenchmark",
"TopoBenchmarkX Documentation",
author,
- "topobenchmarkx",
+ "topobenchmark",
"One line description of project.",
"Miscellaneous",
),
diff --git a/docs/contributing/index.rst b/docs/contributing/index.rst
index 2d58b05b..c26aecda 100644
--- a/docs/contributing/index.rst
+++ b/docs/contributing/index.rst
@@ -13,7 +13,7 @@ community effort, and everyone is welcome to contribute.
Making Changes
--------------
-The preferred way to contribute to topobenchmarkx is to fork the `upstream
+The preferred way to contribute to topobenchmark is to fork the `upstream
repository `__ and submit a "pull request" (PR).
Follow these steps before submitting a PR:
@@ -107,7 +107,7 @@ A docstring is a well-formatted description of your function/class/module which
its purpose, usage, and other information.
There are different markdown languages/formats used for docstrings in Python. The most common
-three are reStructuredText, numpy, and google docstring styles. For topobenchmarkx, we are
+three are reStructuredText, numpy, and google docstring styles. For topobenchmark, we are
using the numpy docstring standard.
When writing up your docstrings, please review the `NumPy docstring guide `_
to understand the role and syntax of each section. Following this syntax is important not only for readability,
diff --git a/docs/index.rst b/docs/index.rst
index 8121d750..9d5e3f97 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,16 +1,16 @@
-🌐 TopoBenchmarkX (TBX) 🍩
+🌐 TopoBenchmark (TB) 🍩
==========================
-.. figure:: https://github.com/pyt-team/TopoBenchmarkX/raw/main/resources/logo.jpeg
- :alt: topobenchmarkx
+.. figure:: https://github.com/geometric-intelligence/TopoBenchmark/raw/main/resources/logo.jpg
+ :alt: topobenchmark
:class: with-shadow
:width: 1000px
-`TopoBenchmarkX` (TBX) is a modular Python library designed to standardize benchmarking and accelerate research in Topological Deep Learning (TDL).
-In particular, TBX allows to train and compare the performances of all sorts of Topological Neural Networks (TNNs) across the different topological domains,
+`TopoBenchmark` (TB) is a modular Python library designed to standardize benchmarking and accelerate research in Topological Deep Learning (TDL).
+In particular, TB allows to train and compare the performances of all sorts of Topological Neural Networks (TNNs) across the different topological domains,
where by *topological domain* we refer to a graph, a simplicial complex, a cellular complex, or a hypergraph.
-.. figure:: https://github.com/pyt-team/TopoBenchmarkX/raw/main/resources/workflow.jpg
+.. figure:: https://github.com/geometric-intelligence/TopoBenchmark/raw/main/resources/workflow.jpg
:alt: workflow
:class: with-shadow
:width: 1000px
@@ -29,7 +29,7 @@ Additionally, the library offers the ability to transform, i.e., *lift*, each da
⚙ Neural Networks
-----------------
-We list the neural networks trained and evaluated by `TopoBenchmarkX`, organized by the topological domain over which they operate: graph, simplicial complex, cellular complex or hypergraph. Many of these neural networks were originally implemented in `TopoModelX `_.
+We list the neural networks trained and evaluated by `TopoBenchmark`, organized by the topological domain over which they operate: graph, simplicial complex, cellular complex or hypergraph. Many of these neural networks were originally implemented in `TopoModelX `_.
Graphs
@@ -106,7 +106,7 @@ Hypergraphs
🚀 Liftings
-----------
-We list the liftings used in `TopoBenchmarkX` to transform datasets. Here, a *lifting* refers to a function that transforms a dataset defined on a topological domain (*e.g.*, on a graph) into the same dataset but supported on a different topological domain (*e.g.*, on a simplicial complex).
+We list the liftings used in `TopoBenchmark` to transform datasets. Here, a *lifting* refers to a function that transforms a dataset defined on a topological domain (*e.g.*, on a graph) into the same dataset but supported on a different topological domain (*e.g.*, on a simplicial complex).
Graph2Simplicial
****************
@@ -237,12 +237,12 @@ Graph2Hypergraph
🔍 References
-------------
-To learn more about `TopoBenchmarkX`, we invite you to read the paper:
+To learn more about `TopoBenchmark`, we invite you to read the paper:
.. code-block:: BibTeX
- @misc{topobenchmarkx2024,
- title={TopoBenchmarkX},
+ @misc{topobenchmark2024,
+ title={TopoBenchmark},
author={PyT-Team},
year={2024},
eprint={TBD},
@@ -250,12 +250,12 @@ To learn more about `TopoBenchmarkX`, we invite you to read the paper:
primaryClass={cs.LG}
}
-If you find `TopoBenchmarkX` useful, we would appreciate if you cite us!
+If you find `TopoBenchmark` useful, we would appreciate if you cite us!
🦾 Getting Started
------------------
-Check out our `tutorials `_ to get started!
+Check out our `tutorials `_ to get started!
.. toctree::
diff --git a/index.html b/index.html
deleted file mode 100644
index a3b4b091..00000000
--- a/index.html
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-
-
-
-
- Your Website Title
-
-
-
-
-
-
-
diff --git a/pyproject.toml b/pyproject.toml
index 5b122550..b63dda02 100755
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,7 +6,7 @@ build-backend = "setuptools.build_meta"
name = "TopoBenchmark"
dynamic = ["version"]
authors = [
- {name = "PyT-Team Authors", email = "tlscabinet@gmail.com"}
+ {name = "Topological Intelligence Team Authors", email = "tlscabinet@gmail.com"}
]
readme = "README.md"
description = "Topological Deep Learning"
@@ -48,8 +48,8 @@ dependencies=[
"jupyterlab",
"rich",
"rootutils",
- "toponetx @ git+https://github.com/pyt-team/TopoNetX.git",
"topomodelx @ git+https://github.com/pyt-team/TopoModelX.git",
+ "toponetx @ git+https://github.com/pyt-team/TopoNetX.git",
"lightning==2.4.0",
]
@@ -132,14 +132,15 @@ convention = "numpy"
[tool.ruff.lint.per-file-ignores]
"__init__.py" = ["F403"]
+"tests/*" = ["D"]
[tool.setuptools.dynamic]
-version = {attr = "topobenchmarkx.__version__"}
+version = {attr = "topobenchmark.__version__"}
[tool.setuptools.packages.find]
include = [
- "topobenchmarkx",
- "topobenchmarkx.*"
+ "topobenchmark",
+ "topobenchmark.*"
]
[tool.mypy]
diff --git a/resources/workflow.jpg b/resources/workflow.jpg
index 321008b5..ad589cd2 100644
Binary files a/resources/workflow.jpg and b/resources/workflow.jpg differ
diff --git a/scripts/reproduce.sh b/scripts/reproduce.sh
index ebab3f84..e06a4565 100644
--- a/scripts/reproduce.sh
+++ b/scripts/reproduce.sh
@@ -26,264 +26,264 @@ run_command() {
# List of commands to execute
commands=(
- 'python -m topobenchmarkx model=cell/cccn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=32 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/NCI1 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/NCI109 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=cell/cccn dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=64 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/NCI1 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/ZINC optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/ccxn dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=32 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=cell/cwn dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/MUTAG optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=32 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/NCI1 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/NCI109 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gat dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=32 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/NCI1 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/NCI109 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/amazon_ratings optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gcn dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=64 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/minesweeper optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=graph/gin dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/MUTAG optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=32 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/minesweeper optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/roman_empire optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.All_num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=64 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/PROTEINS optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.All_num_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/minesweeper optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=64 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/NCI1 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/NCI109 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/ZINC optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/minesweeper optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=64 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/cocitation_cora optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-# 'python -m topobenchmarkx model=simplicial/sccn dataset=graph/roman_empire optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/MUTAG optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=32 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/cocitation_cora optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/minesweeper optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/MUTAG optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=64 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/NCI109 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/ZINC optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/cocitation_cora optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=simplicial/scn dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=cell/cccn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=cell/cccn dataset=graph/tolokers optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=cell/ccxn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=cell/ccxn dataset=graph/tolokers optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=cell/cwn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=cell/cwn dataset=graph/tolokers optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=graph/gat dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=graph/gat dataset=graph/tolokers optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=graph/gcn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=graph/gcn dataset=graph/tolokers optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=graph/gin dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=graph/gin dataset=graph/tolokers optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=hypergraph/allsettransformer dataset=graph/tolokers optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.All_num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=hypergraph/edgnn dataset=graph/tolokers optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=16 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-'python -m topobenchmarkx model=hypergraph/unignn2 dataset=graph/tolokers optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=simplicial/sccn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=simplicial/sccn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=simplicial/sccn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=simplicial/sccnn_custom dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=simplicial/scn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=simplicial/scn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
-#'python -m topobenchmarkx model=simplicial/scn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+ 'python -m topobenchmark model=cell/cccn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=32 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/NCI1 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/NCI109 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=cell/cccn dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=64 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/NCI1 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/ZINC optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/ccxn dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=32 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 transforms.graph2cell_lifting.max_cell_length=10 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=cell/cwn dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/MUTAG optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=32 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/NCI1 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/NCI109 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gat dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=32 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/NCI1 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/NCI109 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/amazon_ratings optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gcn dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=64 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/minesweeper optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=graph/gin dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/MUTAG optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=32 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/minesweeper optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/roman_empire optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.All_num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=64 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/PROTEINS optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.All_num_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/minesweeper optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/edgnn dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=64 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/NCI1 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/NCI109 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/ZINC optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/amazon_ratings optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/cocitation_cora optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/minesweeper optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/MUTAG optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=64 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/cocitation_cora optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+# 'python -m topobenchmark model=simplicial/sccn dataset=graph/roman_empire optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/MUTAG optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=32 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/NCI109 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/ZINC optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/cocitation_cora optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/minesweeper optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/MUTAG optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=64 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/NCI1 optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/NCI109 optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/PROTEINS optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BirthRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=DeathRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=Election dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MigraRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=UnemploymentRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=BachelorRate dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/US-county-demos optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.loader.parameters.task_variable=MedianIncome dataset.loader.parameters.year=2012 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/ZINC optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=4 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 callbacks.early_stopping.min_delta=0.005 transforms.one_hot_node_degree_features.degrees_fields=x seed=42,3,5,23,150 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/cocitation_citeseer optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/cocitation_cora optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/cocitation_pubmed optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/minesweeper optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=simplicial/scn dataset=graph/roman_empire optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=cell/cccn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=cell/cccn dataset=graph/tolokers optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=cell/ccxn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=2 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=cell/ccxn dataset=graph/tolokers optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=cell/cwn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=cell/cwn dataset=graph/tolokers optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 transforms.graph2cell_lifting.max_cell_length=10 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=graph/gat dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=3 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=graph/gat dataset=graph/tolokers optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=graph/gcn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.num_layers=2 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=graph/gcn dataset=graph/tolokers optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=graph/gin dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.num_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=graph/gin dataset=graph/tolokers optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=4 model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=hypergraph/allsettransformer dataset=graph/tolokers optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=hypergraph/edgnn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.All_num_layers=4 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=256 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=hypergraph/edgnn dataset=graph/tolokers optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.All_num_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=16 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+'python -m topobenchmark model=hypergraph/unignn2 dataset=graph/tolokers optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=1 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=1000 trainer.min_epochs=50 trainer.check_val_every_n_epoch=1 callbacks.early_stopping.patience=50 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=simplicial/sccn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.25 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=simplicial/sccn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=128 model.backbone.n_layers=4 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=simplicial/sccn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=64 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=128 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=simplicial/sccnn_custom dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=64 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=simplicial/scn dataset=graph/IMDB-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=128 model.backbone.n_layers=2 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=simplicial/scn dataset=graph/IMDB-MULTI optimizer.parameters.lr=0.01 model.feature_encoder.out_channels=32 model.backbone.n_layers=3 model.readout.readout_name=PropagateSignalDown transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
+#'python -m topobenchmark model=simplicial/scn dataset=graph/REDDIT-BINARY optimizer.parameters.lr=0.001 model.feature_encoder.out_channels=32 model.backbone.n_layers=1 model.readout.readout_name=NoReadOut transforms.graph2simplicial_lifting.signed=True model.feature_encoder.proj_dropout=0.5 dataset.dataloader_params.batch_size=16 dataset.split_params.data_seed=0,3,5,7,9 trainer.max_epochs=500 trainer.min_epochs=50 trainer.check_val_every_n_epoch=5 callbacks.early_stopping.patience=10 logger.wandb.project=TopoBenchmarkX_main --multirun'
)
# Iterate over the commands and run them
diff --git a/scripts/topotune/existing_models/tune_cwn.sh b/scripts/topotune/existing_models/tune_cwn.sh
index e2bd341c..f571cc86 100644
--- a/scripts/topotune/existing_models/tune_cwn.sh
+++ b/scripts/topotune/existing_models/tune_cwn.sh
@@ -1,8 +1,8 @@
-python -m topobenchmarkx \
+python -m topobenchmark \
model=cell/topotune_onehasse,cell/topotune \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1 \
- model.backbone.routes=\[\[\[0,1\],coboundary\],\[\[1,1\],adjacency\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_incidence-0,1-up_adjacency-1,1-down_incidence-2\] \
logger.wandb.project=TopoTune_CWN \
dataset=graph/MUTAG \
optimizer.parameters.lr=0.001 \
@@ -20,11 +20,11 @@ python -m topobenchmarkx \
trainer.devices=\[1\] \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
model=cell/topotune_onehasse,cell/topotune \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1 \
- model.backbone.routes=\[\[\[0,1\],coboundary\],\[\[1,1\],adjacency\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_incidence-0,1-up_adjacency-1,1-down_incidence-2\] \
logger.wandb.project=TopoTune_CWN \
dataset=graph/NCI1 \
optimizer.parameters.lr=0.001 \
@@ -41,11 +41,11 @@ python -m topobenchmarkx \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
model=cell/topotune_onehasse,cell/topotune \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1 \
- model.backbone.routes=\[\[\[0,1\],coboundary\],\[\[1,1\],adjacency\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_incidence-0,1-up_adjacency-1,1-down_incidence-2\] \
logger.wandb.project=TopoTune_CWN \
dataset=graph/NCI109 \
optimizer.parameters.lr=0.001 \
@@ -61,11 +61,11 @@ python -m topobenchmarkx \
trainer.devices=\[2\] \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
model=cell/topotune_onehasse,cell/topotune \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1 \
- model.backbone.routes=\[\[\[0,1\],coboundary\],\[\[1,1\],adjacency\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_incidence-0,1-up_adjacency-1,1-down_incidence-2\] \
logger.wandb.project=TopoTune_CWN \
dataset=graph/ZINC \
optimizer.parameters.lr=0.001 \
@@ -86,11 +86,11 @@ python -m topobenchmarkx \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
model=cell/topotune_onehasse,cell/topotune \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1 \
- model.backbone.routes=\[\[\[0,1\],coboundary\],\[\[1,1\],adjacency\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_incidence-0,1-up_adjacency-1,1-down_incidence-2\] \
logger.wandb.project=TopoTune_CWN \
dataset=graph/cocitation_citeseer \
optimizer.parameters.lr=0.001 \
@@ -107,11 +107,11 @@ python -m topobenchmarkx \
trainer.devices=\[3\] \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
model=cell/topotune_onehasse,cell/topotune \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1 \
- model.backbone.routes=\[\[\[0,1\],coboundary\],\[\[1,1\],adjacency\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_incidence-0,1-up_adjacency-1,1-down_incidence-2\] \
logger.wandb.project=TopoTune_CWN \
dataset=graph/cocitation_pubmed \
optimizer.parameters.lr=0.01 \
@@ -130,11 +130,11 @@ python -m topobenchmarkx \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
model=cell/topotune_onehasse,cell/topotune \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1 \
- model.backbone.routes=\[\[\[0,1\],coboundary\],\[\[1,1\],adjacency\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_incidence-0,1-up_adjacency-1,1-down_incidence-2\] \
logger.wandb.project=TopoTune_CWN \
dataset=graph/PROTEINS,graph/cocitation_cora \
optimizer.parameters.lr=0.001 \
diff --git a/scripts/topotune/existing_models/tune_sccn.sh b/scripts/topotune/existing_models/tune_sccn.sh
index b925f562..6330b5c9 100644
--- a/scripts/topotune/existing_models/tune_sccn.sh
+++ b/scripts/topotune/existing_models/tune_sccn.sh
@@ -1,11 +1,11 @@
# SCCN
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/MUTAG \
model=simplicial/topotune_onehasse,simplicial/topotune \
model.feature_encoder.out_channels=128 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=3 \
dataset.split_params.data_seed=1,3,5,7,9 \
model.readout.readout_name=NoReadOut \
@@ -22,13 +22,13 @@ python -m topobenchmarkx \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/NCI1 \
model=simplicial/topotune_onehasse,simplicial/topotune \
model.feature_encoder.out_channels=64 \
model.backbone.GNN.num_layers=1 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=3 \
model.feature_encoder.proj_dropout=0.5 \
model.readout.readout_name=PropagateSignalDown \
@@ -45,13 +45,13 @@ python -m topobenchmarkx \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/NCI109 \
model=simplicial/topotune_onehasse,simplicial/topotune \
model.feature_encoder.out_channels=64 \
model.backbone.GNN.num_layers=1 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=4 \
model.readout.readout_name=NoReadOut \
transforms.graph2simplicial_lifting.signed=True \
@@ -69,10 +69,10 @@ python -m topobenchmarkx \
-python -m topobenchmarkx \
+python -m topobenchmark \
model=simplicial/topotune_onehasse,simplicial/topotune \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\] \
dataset=graph/PROTEINS \
optimizer.parameters.lr=0.01 \
model.feature_encoder.out_channels=128 \
@@ -91,11 +91,11 @@ python -m topobenchmarkx \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
model=simplicial/topotune_onehasse,simplicial/topotune \
dataset=graph/ZINC \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\] \
optimizer.parameters.lr=0.001 \
model.feature_encoder.out_channels=128 \
model.backbone.layers=4 \
@@ -114,10 +114,10 @@ python -m topobenchmarkx \
trainer.devices=\[0\] \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
model=simplicial/topotune_onehasse,simplicial/topotune \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\] \
dataset=graph/cocitation_citeseer \
optimizer.parameters.lr=0.01 \
model.feature_encoder.out_channels=64 \
@@ -135,11 +135,11 @@ python -m topobenchmarkx \
trainer.devices=\[0\] \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
model=simplicial/topotune_onehasse,simplicial/topotune \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
- model.backbone.GNN._target_=topobenchmarkx.nn.backbones.graph.IdentityGCN \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.GNN._target_=topobenchmark.nn.backbones.graph.IdentityGCN \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\] \
dataset=graph/cocitation_cora \
optimizer.parameters.lr=0.01 \
model.feature_encoder.out_channels=32 \
@@ -157,10 +157,10 @@ python -m topobenchmarkx \
trainer.devices=\[1\] \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
model=simplicial/topotune_onehasse,simplicial/topotune \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\] \
dataset=graph/cocitation_pubmed \
optimizer.parameters.lr=0.01 \
model.feature_encoder.out_channels=64 \
diff --git a/scripts/topotune/search_gccn_cell.sh b/scripts/topotune/search_gccn_cell.sh
index 2a006935..463fb41c 100644
--- a/scripts/topotune/search_gccn_cell.sh
+++ b/scripts/topotune/search_gccn_cell.sh
@@ -1,10 +1,10 @@
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/NCI109 \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -18,13 +18,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/cocitation_cora \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -38,13 +38,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/PROTEINS \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -58,13 +58,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/MUTAG \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -78,13 +78,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/ZINC \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -99,13 +99,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/cocitation_citeseer \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -119,13 +119,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/NCI1 \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -139,13 +139,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/cocitation_pubmed \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -159,13 +159,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/NCI109 \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -179,13 +179,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/cocitation_cora \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -199,13 +199,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/PROTEINS \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -219,13 +219,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/MUTAG \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -239,13 +239,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/cocitation_citeseer \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -259,13 +259,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/NCI1 \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -279,13 +279,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/cocitation_pubmed \
model=cell/topotune,cell/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
diff --git a/scripts/topotune/search_gccn_simplicial.sh b/scripts/topotune/search_gccn_simplicial.sh
index c83dc861..2da5b1b0 100644
--- a/scripts/topotune/search_gccn_simplicial.sh
+++ b/scripts/topotune/search_gccn_simplicial.sh
@@ -1,10 +1,10 @@
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/NCI109 \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -18,13 +18,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/ZINC \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -39,13 +39,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/cocitation_cora \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -59,13 +59,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/PROTEINS \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -79,13 +79,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/MUTAG \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -99,13 +99,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/cocitation_citeseer \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -119,13 +119,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/amazon_ratings \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -139,13 +139,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/NCI1 \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -159,13 +159,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/cocitation_pubmed \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\]\],\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\]\],\[\[\[0,0\],up_laplacian\],\[\[1,0\],boundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[2,1\],boundary\],\[\[2,2\],down_laplacian\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-down_laplacian-1,1-up_laplacian-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-up_incidence-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_incidence-2,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_incidence-1,1-up_laplacian-1,1-down_incidence-2\],\[1-up_laplacian-0,1-up_incidence-0,1-up_laplacian-1,1-up_incidence-1\],\[1-up_laplacian-0,1-down_incidence-1,1-down_laplacian-1,1-up_laplacian-1,1-down_incidence-2,1-down_laplacian-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -179,13 +179,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/NCI109 \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -199,13 +199,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/cocitation_cora \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -219,13 +219,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/PROTEINS \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -239,13 +239,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/MUTAG \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -259,13 +259,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/cocitation_citeseer \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -279,13 +279,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/amazon_ratings \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -299,13 +299,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/NCI1 \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
@@ -319,13 +319,13 @@ python -m topobenchmarkx \
tags="[FirstExperiments]" \
--multirun &
-python -m topobenchmarkx \
+python -m topobenchmark \
dataset=graph/cocitation_pubmed \
model=simplicial/topotune,simplicial/topotune_onehasse \
model.feature_encoder.out_channels=32 \
model.tune_gnn=GCN,GIN,GAT,GraphSAGE \
model.backbone.GNN.num_layers=1,2 \
- model.backbone.routes=\[\[\[0,0\],up_laplacian\],\[\[0,1\],coboundary\],\[\[1,1\],down_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,2\],coboundary\],\[\[2,2\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[1,1\],up_laplacian\],\[\[1,1\],down_laplacian\]\],\[\[\[0,0\],up_laplacian\],\[\[2,1\],boundary\]\] \
+ model.backbone.neighborhoods=\[1-up_laplacian-0,1-up_incidence-0,1-down_laplacian-1,1-up_laplacian-1,1-up_incidence-1,1-down_laplacian-2\],\[1-up_laplacian-0,1-down_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1\],\[1-up_laplacian-0,1-up_laplacian-1,1-down_laplacian-1\],\[1-up_laplacian-0,1-down_incidence-2\] \
model.backbone.layers=2,4,8 \
model.feature_encoder.proj_dropout=0.3 \
dataset.split_params.data_seed=1,3,5,7,9 \
diff --git a/test/conftest.py b/test/conftest.py
index 026c110c..c84a1b72 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -3,10 +3,10 @@
import pytest
import torch
import torch_geometric
-from topobenchmarkx.transforms.liftings.graph2simplicial import (
+from topobenchmark.transforms.liftings.graph2simplicial import (
SimplicialCliqueLifting
)
-from topobenchmarkx.transforms.liftings.graph2cell import (
+from topobenchmark.transforms.liftings.graph2cell import (
CellCycleLifting
)
diff --git a/test/data/dataload/test_Dataloaders.py b/test/data/dataload/test_Dataloaders.py
index 36cfd279..35770d68 100644
--- a/test/data/dataload/test_Dataloaders.py
+++ b/test/data/dataload/test_Dataloaders.py
@@ -4,13 +4,13 @@
import rootutils
import torch
-from topobenchmarkx.data.preprocessor import PreProcessor
-from topobenchmarkx.dataloader import TBXDataloader
-from topobenchmarkx.dataloader.utils import to_data_list
+from topobenchmark.data.preprocessor import PreProcessor
+from topobenchmark.dataloader import TBDataloader
+from topobenchmark.dataloader.utils import to_data_list
from omegaconf import OmegaConf
import os
-from topobenchmarkx.run import initialize_hydra
+from topobenchmark.run import initialize_hydra
# rootutils.setup_root("./", indicator=".project-root", pythonpath=True)
@@ -35,7 +35,7 @@ def setup_method(self):
)
self.batch_size = 2
- datamodule = TBXDataloader(
+ datamodule = TBDataloader(
dataset_train=dataset_train,
dataset_val=dataset_val,
dataset_test=dataset_test,
@@ -47,7 +47,7 @@ def setup_method(self):
def test_lift_features(self):
"""Test the collate funciton.
- To test the collate function we use the TBXDataloader class to create a dataloader that uses the collate function.
+ To test the collate function we use the TBDataloader class to create a dataloader that uses the collate function.
We then first check that the batched data has the expected shape. We then convert the batched data back to a list and check that the data in the list is the same as the original data.
"""
diff --git a/test/data/dataload/test_dataload_dataset.py b/test/data/dataload/test_dataload_dataset.py
index 463ecd8f..6a1ff336 100644
--- a/test/data/dataload/test_dataload_dataset.py
+++ b/test/data/dataload/test_dataload_dataset.py
@@ -1,7 +1,7 @@
import torch
from torch_geometric.data import Data
-from topobenchmarkx.dataloader import DataloadDataset
+from topobenchmark.dataloader import DataloadDataset
class TestDataloadDataset:
diff --git a/test/data/load/test_datasetloaders.py b/test/data/load/test_datasetloaders.py
index 82790a94..058d5001 100644
--- a/test/data/load/test_datasetloaders.py
+++ b/test/data/load/test_datasetloaders.py
@@ -36,11 +36,15 @@ def _gather_config_files(self, base_dir: Path) -> List[str]:
"""
config_files = []
config_base_dir = base_dir / "configs/dataset"
+ # Below the datasets that have some default transforms manually overriten with no_transform,
exclude_datasets = {"karate_club.yaml",
# Below the datasets that have some default transforms with we manually overriten with no_transform,
# due to lack of default transform for domain2domain
"REDDIT-BINARY.yaml", "IMDB-MULTI.yaml", "IMDB-BINARY.yaml", #"ZINC.yaml"
}
+
+ # Below the datasets that takes quite some time to load and process
+ self.long_running_datasets = {"mantra_name.yaml", "mantra_orientation.yaml", "mantra_genus.yaml", "mantra_betti_numbers.yaml",}
for dir_path in config_base_dir.iterdir():
@@ -76,12 +80,16 @@ def _load_dataset(self, data_domain: str, config_file: str) -> Tuple[Any, Dict]:
parameters = hydra.compose(
config_name="run.yaml",
overrides=[f"dataset={data_domain}/{config_file}", f"model=graph/gat"],
- return_hydra_config=True
-
+ return_hydra_config=True,
)
dataset_loader = hydra.utils.instantiate(parameters.dataset.loader)
print(repr(dataset_loader))
- return dataset_loader.load()
+
+ if config_file in self.long_running_datasets:
+ dataset, data_dir = dataset_loader.load(slice=100)
+ else:
+ dataset, data_dir = dataset_loader.load()
+ return dataset, data_dir
def test_dataset_loading_states(self):
"""Test different states and scenarios during dataset loading."""
diff --git a/test/data/preprocess/test_preprocessor.py b/test/data/preprocess/test_preprocessor.py
index 2c17545b..8e25536f 100644
--- a/test/data/preprocess/test_preprocessor.py
+++ b/test/data/preprocess/test_preprocessor.py
@@ -6,7 +6,7 @@
import torch_geometric
from omegaconf import DictConfig
-from topobenchmarkx.data.preprocessor import PreProcessor
+from topobenchmark.data.preprocessor import PreProcessor
from ..._utils.flow_mocker import FlowMocker
@@ -115,7 +115,7 @@ def test_init_with_transform(self, mocker_fixture):
)
self.flow_mocker.assert_all(self.preprocessor_with_tranform)
- @patch("topobenchmarkx.data.preprocessor.preprocessor.load_inductive_splits")
+ @patch("topobenchmark.data.preprocessor.preprocessor.load_inductive_splits")
def test_load_dataset_splits_inductive(self, mock_load_inductive_splits):
"""Test loading dataset splits for inductive learning.
@@ -131,7 +131,7 @@ def test_load_dataset_splits_inductive(self, mock_load_inductive_splits):
)
@patch(
- "topobenchmarkx.data.preprocessor.preprocessor.load_transductive_splits"
+ "topobenchmark.data.preprocessor.preprocessor.load_transductive_splits"
)
def test_load_dataset_splits_transductive(
self, mock_load_transductive_splits
diff --git a/test/data/utils/test_data_utils.py b/test/data/utils/test_data_utils.py
index 9e31ee3d..4e08ead2 100644
--- a/test/data/utils/test_data_utils.py
+++ b/test/data/utils/test_data_utils.py
@@ -4,7 +4,7 @@
import pytest
import torch_geometric
import torch
-from topobenchmarkx.data.utils import *
+from topobenchmark.data.utils import *
import toponetx as tnx
from toponetx.classes import CellComplex
diff --git a/test/data/utils/test_io_utils.py b/test/data/utils/test_io_utils.py
index be75ae79..883a49aa 100644
--- a/test/data/utils/test_io_utils.py
+++ b/test/data/utils/test_io_utils.py
@@ -1,6 +1,6 @@
import pytest
-from topobenchmarkx.data.utils.io_utils import *
+from topobenchmark.data.utils.io_utils import *
def test_get_file_id_from_url():
diff --git a/test/evaluator/test_TBXEvaluator.py b/test/evaluator/test_TBXEvaluator.py
deleted file mode 100644
index 93d79af5..00000000
--- a/test/evaluator/test_TBXEvaluator.py
+++ /dev/null
@@ -1,15 +0,0 @@
-""" Test the TBXEvaluator class."""
-import pytest
-
-from topobenchmarkx.evaluator import TBXEvaluator
-
-class TestTBXEvaluator:
- """ Test the TBXEvaluator class."""
-
- def setup_method(self):
- """ Setup the test."""
- self.evaluator_multilable = TBXEvaluator(task="multilabel classification")
- self.evaluator_regression = TBXEvaluator(task="regression")
- with pytest.raises(ValueError):
- TBXEvaluator(task="wrong")
- repr = self.evaluator_multilable.__repr__()
\ No newline at end of file
diff --git a/test/evaluator/test_evaluator.py b/test/evaluator/test_evaluator.py
new file mode 100644
index 00000000..eecc59a0
--- /dev/null
+++ b/test/evaluator/test_evaluator.py
@@ -0,0 +1,43 @@
+""" Test the TBEvaluator class."""
+import pytest
+import torch
+from topobenchmark.evaluator import TBEvaluator
+
+class TestTBEvaluator:
+ """ Test the TBXEvaluator class."""
+
+ def setup_method(self):
+ """ Setup the test."""
+ self.classification_metrics = ["accuracy", "precision", "recall", "auroc"]
+ self.evaluator_classification = TBEvaluator(task="classification", num_classes=3, metrics=self.classification_metrics)
+ self.evaluator_multilabel = TBEvaluator(task="multilabel classification", num_classes=2, metrics=self.classification_metrics)
+ self.regression_metrics = ["example", "mae"]
+ self.evaluator_regression = TBEvaluator(task="regression", num_classes=1, metrics=self.regression_metrics)
+ with pytest.raises(ValueError):
+ TBEvaluator(task="wrong", num_classes=2, metrics=self.classification_metrics)
+
+ def test_repr(self):
+ """Test the __repr__ method."""
+ assert "TBEvaluator" in self.evaluator_classification.__repr__()
+ assert "TBEvaluator" in self.evaluator_multilabel.__repr__()
+ assert "TBEvaluator" in self.evaluator_regression.__repr__()
+
+ def test_update_and_compute(self):
+ """Test the update and compute methods."""
+ self.evaluator_classification.update({"logits": torch.randn(10, 3), "labels": torch.randint(0, 3, (10,))})
+ out = self.evaluator_classification.compute()
+ for metric in self.classification_metrics:
+ assert metric in out
+ self.evaluator_multilabel.update({"logits": torch.randn(10, 2), "labels": torch.randint(0, 2, (10, 2))})
+ out = self.evaluator_multilabel.compute()
+ for metric in self.classification_metrics:
+ assert metric in out
+ self.evaluator_regression.update({"logits": torch.randn(10, 1), "labels": torch.randn(10,)})
+ out = self.evaluator_regression.compute()
+ for metric in self.regression_metrics:
+ assert metric in out
+
+ def test_reset(self):
+ """Test the reset method."""
+ self.evaluator_multilabel.reset()
+ self.evaluator_regression.reset()
diff --git a/test/loss/test_dataset_loss.py b/test/loss/test_dataset_loss.py
index 2097eba6..5572304d 100644
--- a/test/loss/test_dataset_loss.py
+++ b/test/loss/test_dataset_loss.py
@@ -1,12 +1,12 @@
-""" Test the TBXEvaluator class."""
+""" Test the TBEvaluator class."""
import pytest
import torch
import torch_geometric
-from topobenchmarkx.loss.dataset import DatasetLoss
+from topobenchmark.loss.dataset import DatasetLoss
class TestDatasetLoss:
- """ Test the TBXEvaluator class."""
+ """ Test the TBEvaluator class."""
def setup_method(self):
""" Setup the test."""
diff --git a/test/nn/backbones/cell/test_cccn.py b/test/nn/backbones/cell/test_cccn.py
index e7665698..791b4e0f 100644
--- a/test/nn/backbones/cell/test_cccn.py
+++ b/test/nn/backbones/cell/test_cccn.py
@@ -2,7 +2,7 @@
import torch
from ...._utils.nn_module_auto_test import NNModuleAutoTest
-from topobenchmarkx.nn.backbones.cell.cccn import CCCN
+from topobenchmark.nn.backbones.cell.cccn import CCCN
def test_cccn(random_graph_input):
diff --git a/test/nn/backbones/combinatorial/test_gccn.py b/test/nn/backbones/combinatorial/test_gccn.py
index 22ca594e..8b382e21 100644
--- a/test/nn/backbones/combinatorial/test_gccn.py
+++ b/test/nn/backbones/combinatorial/test_gccn.py
@@ -4,7 +4,7 @@
import torch
from torch_geometric.data import Data
from test._utils.nn_module_auto_test import NNModuleAutoTest
-from topobenchmarkx.nn.backbones.combinatorial.gccn import TopoTune, interrank_boundary_index, get_activation
+from topobenchmark.nn.backbones.combinatorial.gccn import TopoTune, interrank_boundary_index, get_activation
from torch_geometric.nn import GCNConv
from omegaconf import OmegaConf
diff --git a/test/nn/backbones/combinatorial/test_gccn_onehasse.py b/test/nn/backbones/combinatorial/test_gccn_onehasse.py
index 67b6911e..fa898927 100644
--- a/test/nn/backbones/combinatorial/test_gccn_onehasse.py
+++ b/test/nn/backbones/combinatorial/test_gccn_onehasse.py
@@ -4,7 +4,7 @@
import torch
from torch_geometric.data import Data
from test._utils.nn_module_auto_test import NNModuleAutoTest
-from topobenchmarkx.nn.backbones.combinatorial.gccn_onehasse import TopoTune_OneHasse, get_activation
+from topobenchmark.nn.backbones.combinatorial.gccn_onehasse import TopoTune_OneHasse, get_activation
from torch_geometric.nn import GCNConv
from omegaconf import OmegaConf
diff --git a/test/nn/backbones/graph/test_graph_dgm.py b/test/nn/backbones/graph/test_graph_dgm.py
new file mode 100644
index 00000000..5119a57c
--- /dev/null
+++ b/test/nn/backbones/graph/test_graph_dgm.py
@@ -0,0 +1,37 @@
+"""Unit tests for GraphMLP."""
+
+import torch
+import torch_geometric
+from topobenchmark.nn.backbones.graph import GraphMLP
+from topobenchmark.nn.wrappers.graph import GraphMLPWrapper
+from topobenchmark.loss.model import GraphMLPLoss
+
+def testGraphMLP(random_graph_input):
+ """ Unit test for GraphMLP.
+
+ Parameters
+ ----------
+ random_graph_input : Tuple[torch.Tensor, torch.Tensor, torch.Tensor, Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]
+ A tuple of input tensors for testing EDGNN.
+ """
+ x, x_1, x_2, edges_1, edges_2 = random_graph_input
+ batch = torch_geometric.data.Data(x_0=x, y=x, edge_index=edges_1, batch_0=torch.zeros(x.shape[0], dtype=torch.long))
+ model = GraphMLP(x.shape[1], x.shape[1])
+ wrapper = GraphMLPWrapper(model, **{"out_channels": x.shape[1], "num_cell_dimensions": 1})
+ loss_fn = GraphMLPLoss()
+
+ _ = wrapper.__repr__()
+ _ = loss_fn.__repr__()
+
+ model_out = wrapper(batch)
+ assert model_out["x_0"].shape == x.shape
+ assert list(model_out["x_dis"].shape) == [8,8]
+
+ loss = loss_fn(model_out, batch)
+ assert loss.item() >= 0
+
+ model_out["x_dis"] = None
+ loss = loss_fn(model_out, batch)
+ assert loss == torch.tensor(0.0)
+
+
diff --git a/test/nn/backbones/graph/test_graphmlp.py b/test/nn/backbones/graph/test_graphmlp.py
index 5810414d..5119a57c 100644
--- a/test/nn/backbones/graph/test_graphmlp.py
+++ b/test/nn/backbones/graph/test_graphmlp.py
@@ -2,9 +2,9 @@
import torch
import torch_geometric
-from topobenchmarkx.nn.backbones.graph import GraphMLP
-from topobenchmarkx.nn.wrappers.graph import GraphMLPWrapper
-from topobenchmarkx.loss.model import GraphMLPLoss
+from topobenchmark.nn.backbones.graph import GraphMLP
+from topobenchmark.nn.wrappers.graph import GraphMLPWrapper
+from topobenchmark.loss.model import GraphMLPLoss
def testGraphMLP(random_graph_input):
""" Unit test for GraphMLP.
diff --git a/test/nn/backbones/hypergraph/test_edgnn.py b/test/nn/backbones/hypergraph/test_edgnn.py
index 7dd77587..a06590f8 100644
--- a/test/nn/backbones/hypergraph/test_edgnn.py
+++ b/test/nn/backbones/hypergraph/test_edgnn.py
@@ -4,7 +4,7 @@
import torch
from ...._utils.nn_module_auto_test import NNModuleAutoTest
-from topobenchmarkx.nn.backbones.hypergraph.edgnn import (
+from topobenchmark.nn.backbones.hypergraph.edgnn import (
EDGNN,
MLP as edgnn_MLP,
PlainMLP,
diff --git a/test/nn/backbones/simplicial/test_sccnn.py b/test/nn/backbones/simplicial/test_sccnn.py
index b68fa727..19e2b774 100644
--- a/test/nn/backbones/simplicial/test_sccnn.py
+++ b/test/nn/backbones/simplicial/test_sccnn.py
@@ -3,8 +3,8 @@
import torch
from torch_geometric.utils import get_laplacian
from ...._utils.nn_module_auto_test import NNModuleAutoTest
-from topobenchmarkx.nn.backbones.simplicial import SCCNNCustom
-from topobenchmarkx.transforms.liftings.graph2simplicial import (
+from topobenchmark.nn.backbones.simplicial import SCCNNCustom
+from topobenchmark.transforms.liftings.graph2simplicial import (
SimplicialCliqueLifting,
)
diff --git a/test/nn/encoders/test_dgm.py b/test/nn/encoders/test_dgm.py
new file mode 100644
index 00000000..2ac0b381
--- /dev/null
+++ b/test/nn/encoders/test_dgm.py
@@ -0,0 +1,172 @@
+"""Unit tests for the DGMStructureFeatureEncoder module."""
+
+import pytest
+import torch
+import torch_geometric
+import numpy as np
+
+from topobenchmark.nn.encoders import DGMStructureFeatureEncoder
+from topobenchmark.nn.encoders.kdgm import DGM_d
+
+class TestDGMStructureFeatureEncoder:
+ """Test suite for the DGMStructureFeatureEncoder class.
+
+ This test class covers various aspects of the DGMStructureFeatureEncoder,
+ including initialization, forward pass, selective encoding, and
+ configuration settings.
+ """
+
+ @pytest.fixture
+ def sample_data(self):
+ """Create a sample PyG Data object for testing.
+
+ Returns
+ -------
+ torch_geometric.data.Data
+ A data object with simulated multi-dimensional features and batch information.
+ """
+ data = torch_geometric.data.Data()
+
+ # Simulate multi-dimensional features
+ data.x_0 = torch.randn(10, 5) # 10 nodes, 5 features
+ data.x_1 = torch.randn(10, 7) # 10 nodes, 7 features
+ data.x_2 = torch.randn(10, 9) # 10 nodes, 9 features
+
+ # Add batch information
+ data.batch_0 = torch.zeros(10, dtype=torch.long)
+ data.batch_1 = torch.zeros(10, dtype=torch.long)
+ data.batch_2 = torch.zeros(10, dtype=torch.long)
+
+ return data
+
+ def test_initialization(self, sample_data):
+ """Test encoder initialization with different configurations.
+
+ Parameters
+ ----------
+ sample_data : torch_geometric.data.Data
+ Fixture providing sample graph data for testing.
+ """
+ # Test with default settings
+ encoder = DGMStructureFeatureEncoder(
+ in_channels=[5, 7, 9],
+ out_channels=64
+ )
+
+ # Test __repr__ method
+ repr_str = encoder.__repr__()
+
+ # Check basic attributes
+ assert encoder.in_channels == [5, 7, 9]
+ assert encoder.out_channels == 64
+ assert len(encoder.dimensions) == 3
+
+ def test_forward_pass(self, sample_data):
+ """Test forward pass of the encoder.
+
+ Parameters
+ ----------
+ sample_data : torch_geometric.data.Data
+ Fixture providing sample graph data for testing.
+ """
+ encoder = DGMStructureFeatureEncoder(
+ in_channels=[5, 7, 9],
+ out_channels=64,
+ selected_dimensions=[0, 1, 2]
+ )
+
+ # Perform forward pass
+ output_data = encoder(sample_data)
+
+ # Check output attributes
+ for i in [0, 1, 2]:
+ # Check encoded features exist
+ assert hasattr(output_data, f'x_{i}')
+ assert output_data[f'x_{i}'].shape[1] == 64
+
+ # Check auxiliary attributes
+ assert hasattr(output_data, f'x_aux_{i}')
+ assert hasattr(output_data, f'logprobs_{i}')
+
+ # Check edges index exists
+ assert 'edges_index' in output_data
+
+ def test_selective_encoding(self, sample_data):
+ """Test encoding only specific dimensions.
+
+ Parameters
+ ----------
+ sample_data : torch_geometric.data.Data
+ Fixture providing sample graph data for testing.
+ """
+ encoder = DGMStructureFeatureEncoder(
+ in_channels=[5, 7, 9],
+ out_channels=64,
+ selected_dimensions=[0, 1] # Only encode the first two dimensions
+ )
+
+ # Perform forward pass
+ output_data = encoder(sample_data)
+
+ # Verify encoding for selected dimensions
+ assert hasattr(output_data, 'x_1')
+ assert output_data['x_0'].shape[1] == 64
+ assert output_data['x_1'].shape[1] == 64
+ assert output_data['x_2'].shape[1] == 9
+
+ def test_dropout_configuration(self):
+ """Test dropout configuration for the encoder."""
+ # Test with non-zero dropout
+ encoder = DGMStructureFeatureEncoder(
+ in_channels=[5, 7, 9],
+ out_channels=64,
+ proj_dropout=0.5
+ )
+
+ # Check dropout value
+ for i in encoder.dimensions:
+ encoder_module = getattr(encoder, f'encoder_{i}')
+ assert encoder_module.base_enc.dropout.p == 0.5
+ assert encoder_module.embed_f.dropout.p == 0.5
+
+ @pytest.mark.parametrize("in_channels", [
+ [5], # Single dimension
+ [5, 7, 9], # Multiple dimensions
+ [10, 20, 30, 40] # More dimensions
+ ])
+ def test_variable_input_dimensions(self, sample_data, in_channels):
+ """Test encoder with varying input dimensions.
+
+ Parameters
+ ----------
+ sample_data : torch_geometric.data.Data
+ Fixture providing sample graph data for testing.
+ in_channels : list
+ List of input channel dimensions to test.
+ """
+ encoder = DGMStructureFeatureEncoder(
+ in_channels=in_channels,
+ out_channels=64
+ )
+
+ # Prepare data dynamically
+ data = torch_geometric.data.Data()
+ for i, channel in enumerate(in_channels):
+ setattr(data, f'x_{i}', torch.randn(10, channel))
+ setattr(data, f'batch_{i}', torch.zeros(10, dtype=torch.long))
+
+ # Perform forward pass
+ output_data = encoder(data)
+
+ # Verify encoding for each dimension
+ for i in range(len(in_channels)):
+ assert hasattr(output_data, f'x_{i}')
+ assert output_data[f'x_{i}'].shape[1] == 64
+
+def pytest_configure():
+ """Custom pytest configuration.
+
+ Sets up default configuration values for testing.
+ """
+ pytest.in_channels = [5, 7, 9]
+ pytest.out_channels = 64
\ No newline at end of file
diff --git a/test/nn/wrappers/cell/test_cell_wrappers.py b/test/nn/wrappers/cell/test_cell_wrappers.py
index 74019925..45b69888 100644
--- a/test/nn/wrappers/cell/test_cell_wrappers.py
+++ b/test/nn/wrappers/cell/test_cell_wrappers.py
@@ -6,7 +6,7 @@
from ...._utils.flow_mocker import FlowMocker
from unittest.mock import MagicMock
-from topobenchmarkx.nn.wrappers import (
+from topobenchmark.nn.wrappers import (
AbstractWrapper,
CCCNWrapper,
CANWrapper,
@@ -16,7 +16,7 @@
from topomodelx.nn.cell.can import CAN
from topomodelx.nn.cell.ccxn import CCXN
from topomodelx.nn.cell.cwn import CWN
-from topobenchmarkx.nn.backbones.cell.cccn import CCCN
+from topobenchmark.nn.backbones.cell.cccn import CCCN
from unittest.mock import MagicMock
diff --git a/test/nn/wrappers/simplicial/test_SCCNNWrapper.py b/test/nn/wrappers/simplicial/test_SCCNNWrapper.py
index 35fd5d84..f3614a7b 100644
--- a/test/nn/wrappers/simplicial/test_SCCNNWrapper.py
+++ b/test/nn/wrappers/simplicial/test_SCCNNWrapper.py
@@ -4,11 +4,11 @@
from torch_geometric.utils import get_laplacian
from ...._utils.nn_module_auto_test import NNModuleAutoTest
from ...._utils.flow_mocker import FlowMocker
-from topobenchmarkx.nn.backbones.simplicial import SCCNNCustom
+from topobenchmark.nn.backbones.simplicial import SCCNNCustom
from topomodelx.nn.simplicial.san import SAN
from topomodelx.nn.simplicial.scn2 import SCN2
from topomodelx.nn.simplicial.sccn import SCCN
-from topobenchmarkx.nn.wrappers import (
+from topobenchmark.nn.wrappers import (
SCCNWrapper,
SCCNNWrapper,
SANWrapper,
diff --git a/test/optimizer/test_optimizer.py b/test/optimizer/test_optimizer.py
index acc45711..450b4b6f 100644
--- a/test/optimizer/test_optimizer.py
+++ b/test/optimizer/test_optimizer.py
@@ -3,11 +3,11 @@
import pytest
import torch
-from topobenchmarkx.optimizer import TBXOptimizer
+from topobenchmark.optimizer import TBOptimizer
-class TestTBXOptimizer:
- """Test the TBXOptimizer class."""
+class TestTBOptimizer:
+ """Test the TBOptimizer class."""
def setup_method(self):
"""Setup method."""
@@ -25,13 +25,13 @@ def setup_method(self):
def test_configure_optimizer(self):
"""Test the configure_optimizer method."""
# Check with scheduler
- optimizer = TBXOptimizer(**self.optimizer_config_with_scheduler)
+ optimizer = TBOptimizer(**self.optimizer_config_with_scheduler)
out = optimizer.configure_optimizer(self.params)
assert "optimizer" in out
assert "lr_scheduler" in out
# Check without scheduler
- optimizer = TBXOptimizer(**self.optimizer_config_without_scheduler)
+ optimizer = TBOptimizer(**self.optimizer_config_without_scheduler)
out = optimizer.configure_optimizer(self.params)
assert "optimizer" in out
assert "lr_scheduler" not in out
diff --git a/test/transforms/data_manipulations/test_ConnectivityTransforms.py b/test/transforms/data_manipulations/test_ConnectivityTransforms.py
index 127e0a43..4d95be5b 100644
--- a/test/transforms/data_manipulations/test_ConnectivityTransforms.py
+++ b/test/transforms/data_manipulations/test_ConnectivityTransforms.py
@@ -2,7 +2,7 @@
import torch
from torch_geometric.data import Data
-from topobenchmarkx.transforms.data_manipulations import (
+from topobenchmark.transforms.data_manipulations import (
InfereKNNConnectivity,
InfereRadiusConnectivity,
)
@@ -28,19 +28,19 @@ def setup_method(self):
)
# Initialize transforms
- self.infere_by_knn = InfereKNNConnectivity(args={"k": 3})
- self.infere_by_radius = InfereRadiusConnectivity(args={"r": 1.0})
+ self.infer_by_knn = InfereKNNConnectivity(args={"k": 3})
+ self.infer_by_radius = InfereRadiusConnectivity(args={"r": 1.0})
- def test_infere_knn_connectivity(self):
+ def test_infer_knn_connectivity(self):
"""Test inferring connectivity using k-nearest neighbors."""
- data = self.infere_by_knn(self.data.clone())
+ data = self.infer_by_knn(self.data.clone())
assert "edge_index" in data, "No edges in Data object"
assert data.edge_index.size(0) == 2
assert data.edge_index.size(1) > 0
def test_radius_connectivity(self):
"""Test inferring connectivity by radius."""
- data = self.infere_by_radius(self.data.clone())
+ data = self.infer_by_radius(self.data.clone())
assert "edge_index" in data, "No edges in Data object"
assert data.edge_index.size(0) == 2
assert data.edge_index.size(1) > 0
\ No newline at end of file
diff --git a/test/transforms/data_manipulations/test_DataFieldTransforms.py b/test/transforms/data_manipulations/test_DataFieldTransforms.py
index 9c177512..c9af0ca0 100644
--- a/test/transforms/data_manipulations/test_DataFieldTransforms.py
+++ b/test/transforms/data_manipulations/test_DataFieldTransforms.py
@@ -2,7 +2,7 @@
import torch
from torch_geometric.data import Data
-from topobenchmarkx.transforms.data_manipulations import KeepSelectedDataFields
+from topobenchmark.transforms.data_manipulations import KeepSelectedDataFields
class TestDataFieldTransforms:
diff --git a/test/transforms/data_manipulations/test_EqualGausFeatures.py b/test/transforms/data_manipulations/test_EqualGausFeatures.py
index dbff7459..15c681fb 100644
--- a/test/transforms/data_manipulations/test_EqualGausFeatures.py
+++ b/test/transforms/data_manipulations/test_EqualGausFeatures.py
@@ -3,7 +3,7 @@
import pytest
import torch
from torch_geometric.data import Data
-from topobenchmarkx.transforms.data_manipulations import EqualGausFeatures
+from topobenchmark.transforms.data_manipulations import EqualGausFeatures
class TestEqualGausFeatures:
diff --git a/test/transforms/data_manipulations/test_FeatureTransforms.py b/test/transforms/data_manipulations/test_FeatureTransforms.py
index 7c80397c..872a164e 100644
--- a/test/transforms/data_manipulations/test_FeatureTransforms.py
+++ b/test/transforms/data_manipulations/test_FeatureTransforms.py
@@ -2,7 +2,7 @@
import torch
from torch_geometric.data import Data
-from topobenchmarkx.transforms.data_manipulations import (
+from topobenchmark.transforms.data_manipulations import (
NodeFeaturesToFloat,
OneHotDegreeFeatures,
NodeDegrees,
diff --git a/test/transforms/data_manipulations/test_GroupHomophily.py b/test/transforms/data_manipulations/test_GroupHomophily.py
index b39d123a..2a83da69 100644
--- a/test/transforms/data_manipulations/test_GroupHomophily.py
+++ b/test/transforms/data_manipulations/test_GroupHomophily.py
@@ -3,7 +3,7 @@
import pytest
import torch
from torch_geometric.data import Data
-from topobenchmarkx.transforms.data_manipulations import GroupCombinatorialHomophily
+from topobenchmark.transforms.data_manipulations import GroupCombinatorialHomophily
class TestGroupCombinatorialHomophily:
diff --git a/test/transforms/data_manipulations/test_IdentityTransform.py b/test/transforms/data_manipulations/test_IdentityTransform.py
index 50c841b8..a362d427 100644
--- a/test/transforms/data_manipulations/test_IdentityTransform.py
+++ b/test/transforms/data_manipulations/test_IdentityTransform.py
@@ -3,7 +3,7 @@
import pytest
import torch
from torch_geometric.data import Data
-from topobenchmarkx.transforms.data_manipulations import IdentityTransform
+from topobenchmark.transforms.data_manipulations import IdentityTransform
class TestIdentityTransform:
diff --git a/test/transforms/data_manipulations/test_MessagePassingHomophily.py b/test/transforms/data_manipulations/test_MessagePassingHomophily.py
index 9411e389..8d58ee4f 100644
--- a/test/transforms/data_manipulations/test_MessagePassingHomophily.py
+++ b/test/transforms/data_manipulations/test_MessagePassingHomophily.py
@@ -4,7 +4,7 @@
import pytest
import torch
from torch_geometric.data import Data
-from topobenchmarkx.transforms.data_manipulations import MessagePassingHomophily
+from topobenchmark.transforms.data_manipulations import MessagePassingHomophily
class TestMessagePassingHomophily:
diff --git a/test/transforms/data_manipulations/test_OnlyConnectedComponent.py b/test/transforms/data_manipulations/test_OnlyConnectedComponent.py
index 64b58ef6..bd3b2efe 100644
--- a/test/transforms/data_manipulations/test_OnlyConnectedComponent.py
+++ b/test/transforms/data_manipulations/test_OnlyConnectedComponent.py
@@ -3,7 +3,7 @@
import pytest
import torch
from torch_geometric.data import Data
-from topobenchmarkx.transforms.data_manipulations import KeepOnlyConnectedComponent
+from topobenchmark.transforms.data_manipulations import KeepOnlyConnectedComponent
class TestKeepOnlyConnectedComponent:
diff --git a/test/transforms/data_manipulations/test_SimplicialCurvature.py b/test/transforms/data_manipulations/test_SimplicialCurvature.py
index 4d556c96..e4cb517b 100644
--- a/test/transforms/data_manipulations/test_SimplicialCurvature.py
+++ b/test/transforms/data_manipulations/test_SimplicialCurvature.py
@@ -2,8 +2,8 @@
import torch
from torch_geometric.data import Data
-from topobenchmarkx.transforms.data_manipulations import CalculateSimplicialCurvature
-from topobenchmarkx.transforms.liftings.graph2simplicial import SimplicialCliqueLifting
+from topobenchmark.transforms.data_manipulations import CalculateSimplicialCurvature
+from topobenchmark.transforms.liftings.graph2simplicial import SimplicialCliqueLifting
class TestSimplicialCurvature:
diff --git a/test/transforms/feature_liftings/test_Concatenation.py b/test/transforms/feature_liftings/test_Concatenation.py
index ffd92819..a8f83d78 100644
--- a/test/transforms/feature_liftings/test_Concatenation.py
+++ b/test/transforms/feature_liftings/test_Concatenation.py
@@ -2,7 +2,7 @@
import torch
-from topobenchmarkx.transforms.liftings.graph2simplicial import (
+from topobenchmark.transforms.liftings.graph2simplicial import (
SimplicialCliqueLifting,
)
diff --git a/test/transforms/feature_liftings/test_ProjectionSum.py b/test/transforms/feature_liftings/test_ProjectionSum.py
index e598e4a6..935a5148 100644
--- a/test/transforms/feature_liftings/test_ProjectionSum.py
+++ b/test/transforms/feature_liftings/test_ProjectionSum.py
@@ -2,7 +2,7 @@
import torch
-from topobenchmarkx.transforms.liftings.graph2simplicial import (
+from topobenchmark.transforms.liftings.graph2simplicial import (
SimplicialCliqueLifting,
)
diff --git a/test/transforms/feature_liftings/test_SetLifting.py b/test/transforms/feature_liftings/test_SetLifting.py
index 9f73260a..9b71816f 100644
--- a/test/transforms/feature_liftings/test_SetLifting.py
+++ b/test/transforms/feature_liftings/test_SetLifting.py
@@ -2,7 +2,7 @@
import torch
-from topobenchmarkx.transforms.liftings.graph2simplicial import (
+from topobenchmark.transforms.liftings.graph2simplicial import (
SimplicialCliqueLifting,
)
diff --git a/test/transforms/liftings/cell/test_CellCyclesLifting.py b/test/transforms/liftings/cell/test_CellCyclesLifting.py
index 36e2e5d4..54fd276f 100644
--- a/test/transforms/liftings/cell/test_CellCyclesLifting.py
+++ b/test/transforms/liftings/cell/test_CellCyclesLifting.py
@@ -2,7 +2,7 @@
import torch
-from topobenchmarkx.transforms.liftings.graph2cell import CellCycleLifting
+from topobenchmark.transforms.liftings.graph2cell import CellCycleLifting
class TestCellCycleLifting:
diff --git a/test/transforms/liftings/hypergraph/test_HypergraphKHopLifting.py b/test/transforms/liftings/hypergraph/test_HypergraphKHopLifting.py
index 0358b6eb..13285fc1 100644
--- a/test/transforms/liftings/hypergraph/test_HypergraphKHopLifting.py
+++ b/test/transforms/liftings/hypergraph/test_HypergraphKHopLifting.py
@@ -2,7 +2,7 @@
import torch
-from topobenchmarkx.transforms.liftings.graph2hypergraph import (
+from topobenchmark.transforms.liftings.graph2hypergraph import (
HypergraphKHopLifting,
)
diff --git a/test/transforms/liftings/hypergraph/test_HypergraphKNearestNeighborsLifting.py b/test/transforms/liftings/hypergraph/test_HypergraphKNearestNeighborsLifting.py
index f70b087d..7e9d1216 100644
--- a/test/transforms/liftings/hypergraph/test_HypergraphKNearestNeighborsLifting.py
+++ b/test/transforms/liftings/hypergraph/test_HypergraphKNearestNeighborsLifting.py
@@ -3,7 +3,7 @@
import pytest
import torch
from torch_geometric.data import Data
-from topobenchmarkx.transforms.liftings.graph2hypergraph import (
+from topobenchmark.transforms.liftings.graph2hypergraph import (
HypergraphKNNLifting,
)
diff --git a/test/transforms/liftings/simplicial/test_SimplicialCliqueLifting.py b/test/transforms/liftings/simplicial/test_SimplicialCliqueLifting.py
index fc2f89f8..7d85b19e 100644
--- a/test/transforms/liftings/simplicial/test_SimplicialCliqueLifting.py
+++ b/test/transforms/liftings/simplicial/test_SimplicialCliqueLifting.py
@@ -2,11 +2,11 @@
import torch
-from topobenchmarkx.transforms.liftings.graph2simplicial import (
+from topobenchmark.transforms.liftings.graph2simplicial import (
SimplicialCliqueLifting
)
-from topobenchmarkx.transforms.converters import Data2NxGraph, Complex2Dict
-from topobenchmarkx.transforms.liftings.base import LiftingTransform
+from topobenchmark.transforms.converters import Data2NxGraph, Complex2Dict
+from topobenchmark.transforms.liftings.base import LiftingTransform
class TestSimplicialCliqueLifting:
"""Test the SimplicialCliqueLifting class."""
diff --git a/test/transforms/liftings/simplicial/test_SimplicialNeighborhoodLifting.py b/test/transforms/liftings/simplicial/test_SimplicialNeighborhoodLifting.py
index 2cf01ac4..5a03f67e 100644
--- a/test/transforms/liftings/simplicial/test_SimplicialNeighborhoodLifting.py
+++ b/test/transforms/liftings/simplicial/test_SimplicialNeighborhoodLifting.py
@@ -2,7 +2,7 @@
import torch
-from topobenchmarkx.transforms.liftings.graph2simplicial import (
+from topobenchmark.transforms.liftings.graph2simplicial import (
SimplicialKHopLifting,
)
diff --git a/test/transforms/liftings/test_AbstractLifting.py b/test/transforms/liftings/test_AbstractLifting.py
index 0d2d6ad1..49167cb1 100644
--- a/test/transforms/liftings/test_AbstractLifting.py
+++ b/test/transforms/liftings/test_AbstractLifting.py
@@ -3,7 +3,7 @@
import pytest
import torch
from torch_geometric.data import Data
-from topobenchmarkx.transforms.liftings import AbstractLifting
+from topobenchmark.transforms.liftings import AbstractLifting
class TestAbstractLifting:
"""Test the AbstractLifting class."""
diff --git a/test/transforms/liftings/test_GraphLifting.py b/test/transforms/liftings/test_GraphLifting.py
index fa02d332..c7acf454 100644
--- a/test/transforms/liftings/test_GraphLifting.py
+++ b/test/transforms/liftings/test_GraphLifting.py
@@ -2,7 +2,7 @@
import pytest
import torch
from torch_geometric.data import Data
-from topobenchmarkx.transforms.liftings import GraphLifting
+from topobenchmark.transforms.liftings import GraphLifting
class ConcreteGraphLifting(GraphLifting):
diff --git a/test/utils/test_config_resolvers.py b/test/utils/test_config_resolvers.py
index f9b3c007..5ebcd6e0 100644
--- a/test/utils/test_config_resolvers.py
+++ b/test/utils/test_config_resolvers.py
@@ -3,9 +3,9 @@
import pytest
from omegaconf import OmegaConf
import hydra
-from topobenchmarkx.utils.config_resolvers import (
+from topobenchmark.utils.config_resolvers import (
infer_in_channels,
- infere_num_cell_dimensions,
+ infer_num_cell_dimensions,
get_default_metrics,
get_default_transform,
get_monitor_metric,
@@ -47,6 +47,7 @@ def test_get_default_transform(self):
out = get_default_transform("graph/ZINC", "cell/can")
assert out == "dataset_defaults/ZINC"
+
def test_get_required_lifting(self):
"""Test get_required_lifting."""
out = get_required_lifting("graph", "graph/gat")
@@ -106,17 +107,29 @@ def test_infer_in_channels(self):
in_channels = infer_in_channels(cfg.dataset, cfg.transforms)
assert in_channels == [1433,1433,1433]
+ cfg = hydra.compose(config_name="run.yaml", overrides=["model=graph/gcn", "dataset=simplicial/mantra_orientation"], return_hydra_config=True)
+ in_channels = infer_in_channels(cfg.dataset, cfg.transforms)
+ assert in_channels == [1]
+
+ cfg = hydra.compose(config_name="run.yaml", overrides=["model=simplicial/scn", "dataset=graph/cocitation_cora"], return_hydra_config=True)
+ in_channels = infer_in_channels(cfg.dataset, cfg.transforms)
+ assert in_channels == [1433,1433,1433]
+
+
def test_infer_num_cell_dimensions(self):
"""Test infer_num_cell_dimensions."""
- out = infere_num_cell_dimensions(None, [7, 7, 7])
+ out = infer_num_cell_dimensions(None, [7, 7, 7])
assert out == 3
- out = infere_num_cell_dimensions([1, 2, 3], [7, 7])
+ out = infer_num_cell_dimensions([1, 2, 3], [7, 7])
assert out == 3
def test_get_default_metrics(self):
"""Test get_default_metrics."""
+ out = get_default_metrics("classification", ["accuracy", "precision"])
+ assert out == ["accuracy", "precision"]
+
out = get_default_metrics("classification")
assert out == ["accuracy", "precision", "recall", "auroc"]
diff --git a/test/utils/test_instantiators.py b/test/utils/test_instantiators.py
index 3eb8c8ed..a4a8c700 100644
--- a/test/utils/test_instantiators.py
+++ b/test/utils/test_instantiators.py
@@ -2,7 +2,7 @@
import pytest
from omegaconf import OmegaConf, DictConfig
-from topobenchmarkx.utils.instantiators import (
+from topobenchmark.utils.instantiators import (
instantiate_callbacks,
instantiate_loggers
)
diff --git a/test/utils/test_logging_utils.py b/test/utils/test_logging_utils.py
index 142dcfce..f21631d3 100644
--- a/test/utils/test_logging_utils.py
+++ b/test/utils/test_logging_utils.py
@@ -1,10 +1,10 @@
"""Unit tests for logging utils."""
import pytest
from unittest.mock import MagicMock, patch
-from topobenchmarkx.utils import log_hyperparameters
+from topobenchmark.utils import log_hyperparameters
-@patch("topobenchmarkx.utils.logging_utils.pylogger.RankedLogger.warning")
-@patch("topobenchmarkx.utils.logging_utils.OmegaConf.to_container")
+@patch("topobenchmark.utils.logging_utils.pylogger.RankedLogger.warning")
+@patch("topobenchmark.utils.logging_utils.OmegaConf.to_container")
def test_log_hyperparameters(mock_to_container, mock_warning):
"""Test the log_hyperparameters function.
diff --git a/test/utils/test_rich_utils.py b/test/utils/test_rich_utils.py
index a9f221d3..20060409 100644
--- a/test/utils/test_rich_utils.py
+++ b/test/utils/test_rich_utils.py
@@ -1,15 +1,15 @@
"""Unit tests for rich_utils."""
import pytest
from unittest.mock import MagicMock, patch
-from topobenchmarkx.utils.rich_utils import enforce_tags, print_config_tree
+from topobenchmark.utils.rich_utils import enforce_tags, print_config_tree
from omegaconf import DictConfig
-@patch("topobenchmarkx.utils.rich_utils.pylogger.RankedLogger.info")
-@patch("topobenchmarkx.utils.rich_utils.rich.tree.Tree")
-@patch("topobenchmarkx.utils.rich_utils.rich.syntax.Syntax")
-@patch("topobenchmarkx.utils.rich_utils.rich.print")
-@patch("topobenchmarkx.utils.rich_utils.Path.write_text")
-@patch("topobenchmarkx.utils.rich_utils.HydraConfig.get")
+@patch("topobenchmark.utils.rich_utils.pylogger.RankedLogger.info")
+@patch("topobenchmark.utils.rich_utils.rich.tree.Tree")
+@patch("topobenchmark.utils.rich_utils.rich.syntax.Syntax")
+@patch("topobenchmark.utils.rich_utils.rich.print")
+@patch("topobenchmark.utils.rich_utils.Path.write_text")
+@patch("topobenchmark.utils.rich_utils.HydraConfig.get")
def test_print_config_tree(mock_hydra_config_get, mock_write_text, mock_rich_print, mock_syntax, mock_tree, mock_info):
'''Test the print_config_tree function.
@@ -56,11 +56,11 @@ def test_print_config_tree(mock_hydra_config_get, mock_write_text, mock_rich_pri
print_config_tree(mock_cfg, save_to_file=True)
-@patch("topobenchmarkx.utils.rich_utils.HydraConfig")
-@patch("topobenchmarkx.utils.rich_utils.Prompt.ask")
-@patch("topobenchmarkx.utils.rich_utils.pylogger.RankedLogger.warning")
-@patch("topobenchmarkx.utils.rich_utils.pylogger.RankedLogger.info")
-@patch("topobenchmarkx.utils.rich_utils.rich.print")
+@patch("topobenchmark.utils.rich_utils.HydraConfig")
+@patch("topobenchmark.utils.rich_utils.Prompt.ask")
+@patch("topobenchmark.utils.rich_utils.pylogger.RankedLogger.warning")
+@patch("topobenchmark.utils.rich_utils.pylogger.RankedLogger.info")
+@patch("topobenchmark.utils.rich_utils.rich.print")
def test_enforce_tags_no_tags(mock_rich_print, mock_info, mock_warning, mock_prompt_ask, mock_hydra_config):
"""Test the enforce_tags function when no tags are provided in the config.
diff --git a/test/utils/test_utils.py b/test/utils/test_utils.py
index cafb9bee..02985868 100644
--- a/test/utils/test_utils.py
+++ b/test/utils/test_utils.py
@@ -5,7 +5,7 @@
from omegaconf import OmegaConf, DictConfig
import torch
from unittest.mock import MagicMock
-from topobenchmarkx.utils.utils import extras, get_metric_value, task_wrapper
+from topobenchmark.utils.utils import extras, get_metric_value, task_wrapper
# initialize(config_path="../../configs", job_name="job")
diff --git a/topobenchmarkx/__init__.py b/topobenchmark/__init__.py
similarity index 100%
rename from topobenchmarkx/__init__.py
rename to topobenchmark/__init__.py
index d91c2f9a..38434159 100755
--- a/topobenchmarkx/__init__.py
+++ b/topobenchmark/__init__.py
@@ -15,14 +15,14 @@
__all__ = [
"data",
+ "dataloader",
"evaluator",
+ "initialize_hydra",
"loss",
+ "model",
"nn",
"transforms",
"utils",
- "dataloader",
- "model",
- "initialize_hydra",
]
diff --git a/topobenchmarkx/__main__.py b/topobenchmark/__main__.py
similarity index 100%
rename from topobenchmarkx/__main__.py
rename to topobenchmark/__main__.py
diff --git a/topobenchmarkx/data/__init__.py b/topobenchmark/data/__init__.py
similarity index 100%
rename from topobenchmarkx/data/__init__.py
rename to topobenchmark/data/__init__.py
diff --git a/topobenchmarkx/data/datasets/__init__.py b/topobenchmark/data/datasets/__init__.py
similarity index 100%
rename from topobenchmarkx/data/datasets/__init__.py
rename to topobenchmark/data/datasets/__init__.py
diff --git a/topobenchmarkx/data/datasets/citation_hypergaph_dataset.py b/topobenchmark/data/datasets/citation_hypergaph_dataset.py
similarity index 99%
rename from topobenchmarkx/data/datasets/citation_hypergaph_dataset.py
rename to topobenchmark/data/datasets/citation_hypergaph_dataset.py
index 3854a924..8710a967 100644
--- a/topobenchmarkx/data/datasets/citation_hypergaph_dataset.py
+++ b/topobenchmark/data/datasets/citation_hypergaph_dataset.py
@@ -9,7 +9,7 @@
from torch_geometric.data import Data, InMemoryDataset, extract_zip
from torch_geometric.io import fs
-from topobenchmarkx.data.utils import (
+from topobenchmark.data.utils import (
download_file_from_drive,
load_hypergraph_pickle_dataset,
)
diff --git a/topobenchmark/data/datasets/mantra_dataset.py b/topobenchmark/data/datasets/mantra_dataset.py
new file mode 100644
index 00000000..897174c4
--- /dev/null
+++ b/topobenchmark/data/datasets/mantra_dataset.py
@@ -0,0 +1,201 @@
+"""Dataset class MANTRA dataset."""
+
+import os
+import os.path as osp
+from typing import ClassVar
+
+from omegaconf import DictConfig
+from torch_geometric.data import Data, InMemoryDataset, extract_gz
+from torch_geometric.io import fs
+
+from topobenchmark.data.utils import (
+ download_file_from_link,
+ read_ndim_manifolds,
+)
+
+
+class MantraDataset(InMemoryDataset):
+ r"""Dataset class for MANTRA manifold dataset.
+
+ Parameters
+ ----------
+ root : str
+ Root directory where the dataset will be saved.
+ name : str
+ Name of the dataset.
+ parameters : DictConfig
+ Configuration parameters for the dataset.
+ **kwargs : dict
+ Additional keyword arguments.
+
+ Attributes
+ ----------
+ URLS (dict): Dictionary containing the URLs for downloading the dataset.
+ FILE_FORMAT (dict): Dictionary containing the file formats for the dataset.
+ RAW_FILE_NAMES (dict): Dictionary containing the raw file names for the dataset.
+ """
+
+ URLS: ClassVar = {
+ "2_manifolds": "https://github.com/aidos-lab/mantra/releases/download/{version}/2_manifolds.json.gz",
+ "3_manifolds": "https://github.com/aidos-lab/mantra/releases/download/{version}/3_manifolds.json.gz",
+ }
+
+ FILE_FORMAT: ClassVar = {
+ "2_manifolds": "json.gz",
+ "3_manifolds": "json.gz",
+ }
+
+ RAW_FILE_NAMES: ClassVar = {}
+
+ def __init__(
+ self,
+ root: str,
+ name: str,
+ parameters: DictConfig,
+ **kwargs,
+ ) -> None:
+ self.parameters = parameters
+ self.manifold_dim = parameters.manifold_dim
+ self.version = parameters.version
+ self.task_variable = parameters.task_variable
+ self.name = "_".join(
+ [name, str(self.version), f"manifold_dim_{self.manifold_dim}"]
+ )
+ if kwargs.get("slice"):
+ self.slice = 100
+ else:
+ self.slice = None
+ super().__init__(
+ root,
+ )
+
+ out = fs.torch_load(self.processed_paths[0])
+ assert len(out) == 3 or len(out) == 4
+
+ if len(out) == 3: # Backward compatibility.
+ data, self.slices, self.sizes = out
+ data_cls = Data
+ else:
+ data, self.slices, self.sizes, data_cls = out
+
+ if not isinstance(data, dict): # Backward compatibility.
+ self.data = data
+ else:
+ self.data = data_cls.from_dict(data)
+
+ assert isinstance(self._data, Data)
+
+ def __repr__(self) -> str:
+ return f"{self.name}(self.root={self.root}, self.name={self.name}, self.parameters={self.parameters}, self.force_reload={self.force_reload})"
+
+ @property
+ def raw_dir(self) -> str:
+ """Return the path to the raw directory of the dataset.
+
+ Returns
+ -------
+ str
+ Path to the raw directory.
+ """
+ return osp.join(
+ self.root,
+ self.name,
+ "raw",
+ )
+
+ @property
+ def processed_dir(self) -> str:
+ """Return the path to the processed directory of the dataset.
+
+ Returns
+ -------
+ str
+ Path to the processed directory.
+ """
+ self.processed_root = osp.join(
+ self.root,
+ self.name,
+ self.task_variable,
+ )
+ return osp.join(self.processed_root, "processed")
+
+ @property
+ def raw_file_names(self) -> list[str]:
+ """Return the raw file names for the dataset.
+
+ Returns
+ -------
+ list[str]
+ List of raw file names.
+ """
+ return [f"{self.manifold_dim}_manifolds.json"]
+
+ @property
+ def processed_file_names(self) -> str:
+ """Return the processed file name for the dataset.
+
+ Returns
+ -------
+ str
+ Processed file name.
+ """
+ return "data.pt"
+
+ def download(self) -> None:
+ r"""Download the dataset from a URL and saves it to the raw directory.
+
+ Raises:
+ FileNotFoundError: If the dataset URL is not found.
+ """
+ # Step 1: Download data from the source
+ self.url = self.URLS[f"{self.manifold_dim}_manifolds"].format(
+ version=self.version
+ )
+ self.file_format = self.FILE_FORMAT[f"{self.manifold_dim}_manifolds"]
+ dataset_name = f"{self.manifold_dim}_manifolds"
+
+ download_file_from_link(
+ file_link=self.url,
+ path_to_save=self.raw_dir,
+ dataset_name=dataset_name,
+ file_format=self.file_format,
+ )
+
+ # Extract zip file
+ folder = self.raw_dir
+ filename = f"{dataset_name}.{self.file_format}"
+ path = osp.join(folder, filename)
+ extract_gz(path, folder)
+
+ # Delete zip file
+ os.unlink(path)
+
+ # # Move files from osp.join(folder, name_download) to folder
+ # for file in os.listdir(osp.join(folder, self.name)):
+ # shutil.move(osp.join(folder, self.name, file), folder)
+ # # Delete osp.join(folder, self.name) dir
+ # shutil.rmtree(osp.join(folder, self.name))
+
+ def process(self) -> None:
+ r"""Handle the data for the dataset.
+
+ This method loads the JSON file for MANTRA for the specified manifold
+ dimmension, applies the respective preprocessing if specified and saves
+ the preprocessed data to the appropriate location.
+ """
+
+ data = read_ndim_manifolds(
+ # TODO Fix this
+ osp.join(self.raw_dir, self.raw_file_names[0]),
+ self.manifold_dim,
+ self.task_variable,
+ self.slice,
+ )
+
+ data_list = data
+ self.data, self.slices = self.collate(data_list)
+ self._data_list = None # Reset cache.
+ fs.torch_save(
+ (self._data.to_dict(), self.slices, {}, self._data.__class__),
+ self.processed_paths[0],
+ )
diff --git a/topobenchmarkx/data/datasets/us_county_demos_dataset.py b/topobenchmark/data/datasets/us_county_demos_dataset.py
similarity index 95%
rename from topobenchmarkx/data/datasets/us_county_demos_dataset.py
rename to topobenchmark/data/datasets/us_county_demos_dataset.py
index 3bf5d40e..ea383819 100644
--- a/topobenchmarkx/data/datasets/us_county_demos_dataset.py
+++ b/topobenchmark/data/datasets/us_county_demos_dataset.py
@@ -9,7 +9,7 @@
from torch_geometric.data import Data, InMemoryDataset, extract_zip
from torch_geometric.io import fs
-from topobenchmarkx.data.utils import (
+from topobenchmark.data.utils import (
download_file_from_drive,
read_us_county_demos,
)
@@ -141,13 +141,16 @@ def download(self) -> None:
dataset_name=self.name,
file_format=self.file_format,
)
- # Extract zip file
+
+ # Step 2: extract zip file
folder = self.raw_dir
filename = f"{self.name}.{self.file_format}"
path = osp.join(folder, filename)
extract_zip(path, folder)
# Delete zip file
os.unlink(path)
+
+ # Step 3: organize files
# Move files from osp.join(folder, name_download) to folder
for file in os.listdir(osp.join(folder, self.name)):
shutil.move(osp.join(folder, self.name, file), folder)
@@ -161,12 +164,17 @@ def process(self) -> None:
processing transformations if specified, and saves the processed data
to the appropriate location.
"""
+ # Step 1: extract the data
data = read_us_county_demos(
self.raw_dir, self.year, self.task_variable
)
data_list = [data]
+
+ # Step 2: collate the graphs
self.data, self.slices = self.collate(data_list)
self._data_list = None # Reset cache.
+
+ # Step 3: save processed data
fs.torch_save(
(self._data.to_dict(), self.slices, {}, self._data.__class__),
self.processed_paths[0],
diff --git a/topobenchmarkx/data/loaders/__init__.py b/topobenchmark/data/loaders/__init__.py
similarity index 74%
rename from topobenchmarkx/data/loaders/__init__.py
rename to topobenchmark/data/loaders/__init__.py
index 24da93c2..9d243507 100755
--- a/topobenchmarkx/data/loaders/__init__.py
+++ b/topobenchmark/data/loaders/__init__.py
@@ -5,9 +5,12 @@
from .graph import __all__ as graph_all
from .hypergraph import *
from .hypergraph import __all__ as hypergraph_all
+from .simplicial import *
+from .simplicial import __all__ as simplicial_all
__all__ = [
"AbstractLoader",
*graph_all,
*hypergraph_all,
+ *simplicial_all,
]
diff --git a/topobenchmarkx/data/loaders/base.py b/topobenchmark/data/loaders/base.py
similarity index 86%
rename from topobenchmarkx/data/loaders/base.py
rename to topobenchmark/data/loaders/base.py
index 7f4446fe..66b08cb0 100755
--- a/topobenchmarkx/data/loaders/base.py
+++ b/topobenchmark/data/loaders/base.py
@@ -45,15 +45,20 @@ def load_dataset(self) -> torch_geometric.data.Data:
"""
raise NotImplementedError
- def load(self) -> tuple[torch_geometric.data.Data, str]:
+ def load(self, **kwargs) -> tuple[torch_geometric.data.Data, str]:
"""Load data.
+ Parameters
+ ----------
+ **kwargs : dict
+ Additional keyword arguments.
+
Returns
-------
tuple[torch_geometric.data.Data, str]
Tuple containing the loaded data and the data directory.
"""
- dataset = self.load_dataset()
+ dataset = self.load_dataset(**kwargs)
data_dir = self.get_data_dir()
return dataset, data_dir
diff --git a/topobenchmarkx/data/loaders/graph/__init__.py b/topobenchmark/data/loaders/graph/__init__.py
similarity index 100%
rename from topobenchmarkx/data/loaders/graph/__init__.py
rename to topobenchmark/data/loaders/graph/__init__.py
diff --git a/topobenchmarkx/data/loaders/graph/hetero_datasets.py b/topobenchmark/data/loaders/graph/hetero_datasets.py
similarity index 94%
rename from topobenchmarkx/data/loaders/graph/hetero_datasets.py
rename to topobenchmark/data/loaders/graph/hetero_datasets.py
index 0f0809cd..d4426483 100644
--- a/topobenchmarkx/data/loaders/graph/hetero_datasets.py
+++ b/topobenchmark/data/loaders/graph/hetero_datasets.py
@@ -4,7 +4,7 @@
from torch_geometric.data import Dataset
from torch_geometric.datasets import HeterophilousGraphDataset
-from topobenchmarkx.data.loaders.base import AbstractLoader
+from topobenchmark.data.loaders.base import AbstractLoader
class HeterophilousGraphDatasetLoader(AbstractLoader):
diff --git a/topobenchmarkx/data/loaders/graph/manual_graph_dataset_loader.py b/topobenchmark/data/loaders/graph/manual_graph_dataset_loader.py
similarity index 87%
rename from topobenchmarkx/data/loaders/graph/manual_graph_dataset_loader.py
rename to topobenchmark/data/loaders/graph/manual_graph_dataset_loader.py
index 07ef182d..71f9a0f5 100644
--- a/topobenchmarkx/data/loaders/graph/manual_graph_dataset_loader.py
+++ b/topobenchmark/data/loaders/graph/manual_graph_dataset_loader.py
@@ -5,9 +5,9 @@
from omegaconf import DictConfig
-from topobenchmarkx.data.loaders.base import AbstractLoader
-from topobenchmarkx.data.utils import load_manual_graph
-from topobenchmarkx.dataloader import DataloadDataset
+from topobenchmark.data.loaders.base import AbstractLoader
+from topobenchmark.data.utils import load_manual_graph
+from topobenchmark.dataloader import DataloadDataset
class ManualGraphDatasetLoader(AbstractLoader):
@@ -38,6 +38,7 @@ def load_dataset(self) -> Any:
# Load the graph data using the manual graph loader function
data = load_manual_graph()
+ data["num_classes"] = 2
# Create and return the dataset object
dataset = DataloadDataset([data])
diff --git a/topobenchmarkx/data/loaders/graph/modecule_datasets.py b/topobenchmark/data/loaders/graph/modecule_datasets.py
similarity index 98%
rename from topobenchmarkx/data/loaders/graph/modecule_datasets.py
rename to topobenchmark/data/loaders/graph/modecule_datasets.py
index d089dd2f..c2b150f6 100644
--- a/topobenchmarkx/data/loaders/graph/modecule_datasets.py
+++ b/topobenchmark/data/loaders/graph/modecule_datasets.py
@@ -8,7 +8,7 @@
from torch_geometric.data import Dataset
from torch_geometric.datasets import AQSOL, ZINC
-from topobenchmarkx.data.loaders.base import AbstractLoader
+from topobenchmark.data.loaders.base import AbstractLoader
class MoleculeDatasetLoader(AbstractLoader):
diff --git a/topobenchmarkx/data/loaders/graph/planetoid_datasets.py b/topobenchmark/data/loaders/graph/planetoid_datasets.py
similarity index 94%
rename from topobenchmarkx/data/loaders/graph/planetoid_datasets.py
rename to topobenchmark/data/loaders/graph/planetoid_datasets.py
index d51a5842..77884eea 100644
--- a/topobenchmarkx/data/loaders/graph/planetoid_datasets.py
+++ b/topobenchmark/data/loaders/graph/planetoid_datasets.py
@@ -4,7 +4,7 @@
from torch_geometric.data import Dataset
from torch_geometric.datasets import Planetoid
-from topobenchmarkx.data.loaders.base import AbstractLoader
+from topobenchmark.data.loaders.base import AbstractLoader
class PlanetoidDatasetLoader(AbstractLoader):
diff --git a/topobenchmarkx/data/loaders/graph/tu_datasets.py b/topobenchmark/data/loaders/graph/tu_datasets.py
similarity index 94%
rename from topobenchmarkx/data/loaders/graph/tu_datasets.py
rename to topobenchmark/data/loaders/graph/tu_datasets.py
index 60cb4f28..137aeca5 100644
--- a/topobenchmarkx/data/loaders/graph/tu_datasets.py
+++ b/topobenchmark/data/loaders/graph/tu_datasets.py
@@ -4,7 +4,7 @@
from torch_geometric.data import Dataset
from torch_geometric.datasets import TUDataset
-from topobenchmarkx.data.loaders.base import AbstractLoader
+from topobenchmark.data.loaders.base import AbstractLoader
class TUDatasetLoader(AbstractLoader):
diff --git a/topobenchmarkx/data/loaders/graph/us_county_demos_dataset_loader.py b/topobenchmark/data/loaders/graph/us_county_demos_dataset_loader.py
similarity index 94%
rename from topobenchmarkx/data/loaders/graph/us_county_demos_dataset_loader.py
rename to topobenchmark/data/loaders/graph/us_county_demos_dataset_loader.py
index 7fd928a5..f017926f 100644
--- a/topobenchmarkx/data/loaders/graph/us_county_demos_dataset_loader.py
+++ b/topobenchmark/data/loaders/graph/us_county_demos_dataset_loader.py
@@ -4,8 +4,8 @@
from omegaconf import DictConfig
-from topobenchmarkx.data.datasets import USCountyDemosDataset
-from topobenchmarkx.data.loaders.base import AbstractLoader
+from topobenchmark.data.datasets import USCountyDemosDataset
+from topobenchmark.data.loaders.base import AbstractLoader
class USCountyDemosDatasetLoader(AbstractLoader):
diff --git a/topobenchmarkx/data/loaders/hypergraph/__init__.py b/topobenchmark/data/loaders/hypergraph/__init__.py
similarity index 95%
rename from topobenchmarkx/data/loaders/hypergraph/__init__.py
rename to topobenchmark/data/loaders/hypergraph/__init__.py
index 2ca51e62..5015a41c 100644
--- a/topobenchmarkx/data/loaders/hypergraph/__init__.py
+++ b/topobenchmark/data/loaders/hypergraph/__init__.py
@@ -26,12 +26,12 @@ def is_loader_class(obj: Any) -> bool:
-------
bool
True if the object is a valid hypergraph loader class (non-private class
- with 'HypergraphDatasetLoader' in name), False otherwise.
+ with 'DatasetLoader' in name), False otherwise.
"""
return (
inspect.isclass(obj)
and not obj.__name__.startswith("_")
- and "HypergraphDatasetLoader" in obj.__name__
+ and "DatasetLoader" in obj.__name__
)
@classmethod
diff --git a/topobenchmarkx/data/loaders/hypergraph/citation_hypergraph_dataset_loader.py b/topobenchmark/data/loaders/hypergraph/citation_hypergraph_dataset_loader.py
similarity index 91%
rename from topobenchmarkx/data/loaders/hypergraph/citation_hypergraph_dataset_loader.py
rename to topobenchmark/data/loaders/hypergraph/citation_hypergraph_dataset_loader.py
index 6a9ed63d..9eeced13 100644
--- a/topobenchmarkx/data/loaders/hypergraph/citation_hypergraph_dataset_loader.py
+++ b/topobenchmark/data/loaders/hypergraph/citation_hypergraph_dataset_loader.py
@@ -2,8 +2,8 @@
from omegaconf import DictConfig
-from topobenchmarkx.data.datasets import CitationHypergraphDataset
-from topobenchmarkx.data.loaders.base import AbstractLoader
+from topobenchmark.data.datasets import CitationHypergraphDataset
+from topobenchmark.data.loaders.base import AbstractLoader
class CitationHypergraphDatasetLoader(AbstractLoader):
diff --git a/topobenchmark/data/loaders/simplicial/__init__.py b/topobenchmark/data/loaders/simplicial/__init__.py
new file mode 100644
index 00000000..e6d55701
--- /dev/null
+++ b/topobenchmark/data/loaders/simplicial/__init__.py
@@ -0,0 +1,96 @@
+"""Init file for simplicial dataset load module with automated loader discovery."""
+
+import inspect
+from importlib import util
+from pathlib import Path
+from typing import Any, ClassVar
+
+
+class SimplicialLoaderManager:
+ """Manages automatic discovery and registration of simplicial dataset loader classes."""
+
+ # Base class that all simplicial loaders should inherit from (adjust based on your actual base class)
+ BASE_LOADER_CLASS: ClassVar[type] = object
+
+ @staticmethod
+ def is_loader_class(obj: Any) -> bool:
+ """Check if an object is a valid simplicial dataset loader class.
+
+ Parameters
+ ----------
+ obj : Any
+ The object to check if it's a valid simplicial dataset loader class.
+
+ Returns
+ -------
+ bool
+ True if the object is a valid simplicial dataset loader class (non-private class
+ with 'DatasetLoader' in name), False otherwise.
+ """
+ return (
+ inspect.isclass(obj)
+ and not obj.__name__.startswith("_")
+ and "DatasetLoader" in obj.__name__
+ )
+
+ @classmethod
+ def discover_loaders(cls, package_path: str) -> dict[str, type[Any]]:
+ """Dynamically discover all simplicial dataset loader classes in the package.
+
+ Parameters
+ ----------
+ package_path : str
+ Path to the package's __init__.py file.
+
+ Returns
+ -------
+ Dict[str, Type[Any]]
+ Dictionary mapping loader class names to their corresponding class objects.
+ """
+ loaders = {}
+
+ # Get the directory containing the loader modules
+ package_dir = Path(package_path).parent
+
+ # Iterate through all .py files in the directory
+ for file_path in package_dir.glob("*.py"):
+ if file_path.stem == "__init__":
+ continue
+
+ # Import the module
+ module_name = f"{Path(package_path).stem}.{file_path.stem}"
+ spec = util.spec_from_file_location(module_name, file_path)
+ if spec and spec.loader:
+ module = util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+
+ # Find all simplicial dataset loader classes in the module
+ for name, obj in inspect.getmembers(module):
+ if (
+ cls.is_loader_class(obj)
+ and obj.__module__ == module.__name__
+ ):
+ loaders[name] = obj # noqa: PERF403
+
+ return loaders
+
+
+# Create the loader manager
+manager = SimplicialLoaderManager()
+
+# Automatically discover and populate loaders
+SIMPLICIAL_LOADERS = manager.discover_loaders(__file__)
+
+SIMPLICIAL_LOADERS_list = list(SIMPLICIAL_LOADERS.keys())
+
+# Automatically generate __all__
+__all__ = [
+ # Loader collections
+ "SIMPLICIAL_LOADERS",
+ "SIMPLICIAL_LOADERS_list",
+ # Individual loader classes
+ *SIMPLICIAL_LOADERS.keys(),
+]
+
+# For backwards compatibility, create individual imports
+locals().update(**SIMPLICIAL_LOADERS)
diff --git a/topobenchmark/data/loaders/simplicial/mantra_dataset_loader.py b/topobenchmark/data/loaders/simplicial/mantra_dataset_loader.py
new file mode 100644
index 00000000..b264b611
--- /dev/null
+++ b/topobenchmark/data/loaders/simplicial/mantra_dataset_loader.py
@@ -0,0 +1,70 @@
+"""Loaders for Citation Hypergraph dataset."""
+
+from omegaconf import DictConfig
+
+from topobenchmark.data.datasets import MantraDataset
+from topobenchmark.data.loaders.base import AbstractLoader
+
+
+class MantraSimplicialDatasetLoader(AbstractLoader):
+ """Load Mantra dataset with configurable parameters.
+
+ Note: for the simplicial datasets it is necessary to include DatasetLoader into the name of the class!
+
+ Parameters
+ ----------
+ parameters : DictConfig
+ Configuration parameters containing:
+ - data_dir: Root directory for data
+ - data_name: Name of the dataset
+ - other relevant parameters
+
+ **kwargs : dict
+ Additional keyword arguments.
+ """
+
+ def __init__(self, parameters: DictConfig, **kwargs) -> None:
+ super().__init__(parameters, **kwargs)
+
+ def load_dataset(self, **kwargs) -> MantraDataset:
+ """Load the Citation Hypergraph dataset.
+
+ Parameters
+ ----------
+ **kwargs : dict
+ Additional keyword arguments for dataset initialization.
+
+ Returns
+ -------
+ CitationHypergraphDataset
+ The loaded Citation Hypergraph dataset with the appropriate `data_dir`.
+
+ Raises
+ ------
+ RuntimeError
+ If dataset loading fails.
+ """
+
+ dataset = self._initialize_dataset(**kwargs)
+ self.data_dir = self.get_data_dir()
+ return dataset
+
+ def _initialize_dataset(self, **kwargs) -> MantraDataset:
+ """Initialize the Citation Hypergraph dataset.
+
+ Parameters
+ ----------
+ **kwargs : dict
+ Additional keyword arguments for dataset initialization.
+
+ Returns
+ -------
+ CitationHypergraphDataset
+ The initialized dataset instance.
+ """
+ return MantraDataset(
+ root=str(self.root_data_dir),
+ name=self.parameters.data_name,
+ parameters=self.parameters,
+ **kwargs,
+ )
diff --git a/topobenchmarkx/data/preprocessor/__init__.py b/topobenchmark/data/preprocessor/__init__.py
similarity index 100%
rename from topobenchmarkx/data/preprocessor/__init__.py
rename to topobenchmark/data/preprocessor/__init__.py
diff --git a/topobenchmarkx/data/preprocessor/preprocessor.py b/topobenchmark/data/preprocessor/preprocessor.py
similarity index 98%
rename from topobenchmarkx/data/preprocessor/preprocessor.py
rename to topobenchmark/data/preprocessor/preprocessor.py
index 27a92176..751c6274 100644
--- a/topobenchmarkx/data/preprocessor/preprocessor.py
+++ b/topobenchmark/data/preprocessor/preprocessor.py
@@ -8,14 +8,14 @@
import torch_geometric
from torch_geometric.io import fs
-from topobenchmarkx.data.utils import (
+from topobenchmark.data.utils import (
ensure_serializable,
load_inductive_splits,
load_transductive_splits,
make_hash,
)
-from topobenchmarkx.dataloader import DataloadDataset
-from topobenchmarkx.transforms.data_transform import DataTransform
+from topobenchmark.dataloader import DataloadDataset
+from topobenchmark.transforms.data_transform import DataTransform
class PreProcessor(torch_geometric.data.InMemoryDataset):
diff --git a/topobenchmarkx/data/utils/__init__.py b/topobenchmark/data/utils/__init__.py
similarity index 94%
rename from topobenchmarkx/data/utils/__init__.py
rename to topobenchmark/data/utils/__init__.py
index 01e77220..d7010c2b 100644
--- a/topobenchmarkx/data/utils/__init__.py
+++ b/topobenchmark/data/utils/__init__.py
@@ -40,7 +40,9 @@
from .io_utils import ( # noqa: E402
download_file_from_drive, # noqa: F401
+ download_file_from_link, # noqa: F401
load_hypergraph_pickle_dataset, # noqa: F401
+ read_ndim_manifolds, # noqa: F401
read_us_county_demos, # noqa: F401
)
diff --git a/topobenchmarkx/data/utils/adapters.py b/topobenchmark/data/utils/adapters.py
similarity index 98%
rename from topobenchmarkx/data/utils/adapters.py
rename to topobenchmark/data/utils/adapters.py
index 6aa33fac..9aa8355a 100644
--- a/topobenchmarkx/data/utils/adapters.py
+++ b/topobenchmark/data/utils/adapters.py
@@ -7,8 +7,8 @@
from topomodelx.utils.sparse import from_sparse
from torch_geometric.utils.undirected import is_undirected, to_undirected
-from topobenchmarkx.data.utils.domain import Complex
-from topobenchmarkx.data.utils.utils import (
+from topobenchmark.data.utils.domain import Complex
+from topobenchmark.data.utils.utils import (
generate_zero_sparse_connectivity,
select_neighborhoods_of_interest,
)
diff --git a/topobenchmarkx/data/utils/domain.py b/topobenchmark/data/utils/domain.py
similarity index 100%
rename from topobenchmarkx/data/utils/domain.py
rename to topobenchmark/data/utils/domain.py
diff --git a/topobenchmarkx/data/utils/io_utils.py b/topobenchmark/data/utils/io_utils.py
similarity index 68%
rename from topobenchmarkx/data/utils/io_utils.py
rename to topobenchmark/data/utils/io_utils.py
index f49c5cc8..7e33d31b 100644
--- a/topobenchmarkx/data/utils/io_utils.py
+++ b/topobenchmark/data/utils/io_utils.py
@@ -1,5 +1,6 @@
"""Data IO utilities."""
+import json
import os.path as osp
import pickle
from urllib.parse import parse_qs, urlparse
@@ -9,11 +10,13 @@
import requests
import torch
import torch_geometric
+from toponetx.classes import SimplicialComplex
from torch_geometric.data import Data
from torch_sparse import coalesce
+from topobenchmark.data.utils import get_complex_connectivity
+
-# Function to extract file ID from Google Drive URL
def get_file_id_from_url(url):
"""Extract the file ID from a Google Drive file URL.
@@ -47,7 +50,6 @@ def get_file_id_from_url(url):
return file_id
-# Function to download file from Google Drive
def download_file_from_drive(
file_link, path_to_save, dataset_name, file_format="tar.gz"
):
@@ -82,6 +84,137 @@ def download_file_from_drive(
print("Failed to download the file.")
+def download_file_from_link(
+ file_link, path_to_save, dataset_name, file_format="tar.gz"
+):
+ """Download a file from a link and saves it to the specified path.
+
+ Parameters
+ ----------
+ file_link : str
+ The link of the file to download.
+ path_to_save : str
+ The path where the downloaded file will be saved.
+ dataset_name : str
+ The name of the dataset.
+ file_format : str, optional
+ The format of the downloaded file. Defaults to "tar.gz".
+
+ Raises
+ ------
+ None
+ """
+ response = requests.get(file_link)
+
+ output_path = f"{path_to_save}/{dataset_name}.{file_format}"
+ if response.status_code == 200:
+ with open(output_path, "wb") as f:
+ f.write(response.content)
+ print("Download complete.")
+ else:
+ print("Failed to download the file.")
+
+
+def read_ndim_manifolds(path, dim, y_val="betti_numbers", slice=None):
+ """Load MANTRA dataset.
+
+ Parameters
+ ----------
+ path : str
+ Path to the dataset.
+ dim : int
+ Dimension of the manifolds to load, required to make sanity checks.
+ y_val : str, optional
+ The triangulation information to use as label. Can be one of ['betti_numbers', 'torsion_coefficients',
+ 'name', 'genus', 'orientable'] (default: "orientable").
+ slice : int, optional
+ Slice of the dataset to load. If None, load the entire dataset (default: None). Used for testing.
+
+ Returns
+ -------
+ torch_geometric.data.Data
+ Data object of the manifold for the MANTRA dataset.
+ """
+ # Assert that y_val is one of the valid options
+ # for each surface
+ if dim == 2:
+ assert y_val in [
+ "betti_numbers",
+ "torsion_coefficients",
+ "name",
+ "genus",
+ "orientable",
+ ]
+ elif dim == 3:
+ assert y_val in ["betti_numbers", "torsion_coefficients", "name"]
+ else:
+ raise ValueError("Invalid dimension. Only 2 and 3 are supported.")
+
+ TORSION_COEF_NAMES = ["", "Z_2"]
+ HOMEO_NAMES = [
+ "",
+ "Klein bottle",
+ "RP^2",
+ "S^2",
+ "T^2",
+ "S^2 twist S^1",
+ "S^2 x S^1",
+ "S^3",
+ ]
+
+ TORSION_COEF_NAME_TO_IDX = {
+ name: i for i, name in enumerate(TORSION_COEF_NAMES)
+ }
+ HOMEO_NAME_TO_IDX = {name: i for i, name in enumerate(HOMEO_NAMES)}
+
+ # Load file
+ with open(path) as f:
+ manifold_list = json.load(f)
+
+ data_list = []
+ # For each manifold
+ for manifold in manifold_list[:slice]:
+ n_vertices = manifold["n_vertices"]
+ x = torch.ones(n_vertices, 1)
+ y_value = manifold[y_val]
+
+ if y_val == "betti_numbers":
+ y = torch.tensor(y_value, dtype=torch.long).unsqueeze(dim=0)
+ elif y_val == "genus":
+ y = torch.tensor([y_value], dtype=torch.long).squeeze()
+ elif y_val == "torsion_coefficients":
+ y = torch.tensor(
+ [TORSION_COEF_NAME_TO_IDX[coef] for coef in y_value],
+ dtype=torch.long,
+ ).unsqueeze(dim=0)
+ elif y_val == "name":
+ y = torch.tensor(
+ [HOMEO_NAME_TO_IDX[y_value]], dtype=torch.long
+ ).unsqueeze(0)
+ elif y_val == "orientable":
+ y = torch.tensor([y_value], dtype=torch.long).squeeze()
+ else:
+ raise ValueError(f"Invalid y_val: {y_val}")
+
+ sc = SimplicialComplex()
+
+ # Insert all simplices
+ sc.add_simplices_from(manifold["triangulation"])
+
+ # Build the simplex tensors for features, having only a one
+ x_i = {
+ f"x_{i}": torch.ones(len(sc.skeleton(i)), 1)
+ for i in range(dim + 1)
+ }
+
+ # Construct the connectivity matrices
+ inc_dict = get_complex_connectivity(sc, dim, signed=False)
+
+ data = Data(x=x, y=y, **x_i, **inc_dict)
+ data_list.append(data)
+ return data_list
+
+
def read_us_county_demos(path, year=2012, y_col="Election"):
"""Load US County Demos dataset.
@@ -212,8 +345,10 @@ def load_hypergraph_pickle_dataset(data_dir, data_name):
Parameters
----------
- cfg : DictConfig
- Configuration parameters.
+ data_dir : str
+ Path to data.
+ data_name : str
+ Name of the dataset.
Returns
-------
diff --git a/topobenchmarkx/data/utils/split_utils.py b/topobenchmark/data/utils/split_utils.py
similarity index 99%
rename from topobenchmarkx/data/utils/split_utils.py
rename to topobenchmark/data/utils/split_utils.py
index 68d33c39..dccfa1ec 100644
--- a/topobenchmarkx/data/utils/split_utils.py
+++ b/topobenchmark/data/utils/split_utils.py
@@ -6,7 +6,7 @@
import torch
from sklearn.model_selection import StratifiedKFold
-from topobenchmarkx.dataloader import DataloadDataset
+from topobenchmark.dataloader import DataloadDataset
# Generate splits in different fasions
diff --git a/topobenchmarkx/data/utils/utils.py b/topobenchmark/data/utils/utils.py
similarity index 97%
rename from topobenchmarkx/data/utils/utils.py
rename to topobenchmark/data/utils/utils.py
index cf4c6c16..9580d59e 100755
--- a/topobenchmarkx/data/utils/utils.py
+++ b/topobenchmark/data/utils/utils.py
@@ -153,9 +153,16 @@ def generate_adjacency_from_laplacian(sparse_tensor):
for neighborhood in neighborhoods:
src_rank = int(neighborhood.split("-")[-1])
try:
- if len(neighborhood.split("-")) == 2:
+ if (
+ len(neighborhood.split("-")) == 2
+ or neighborhood.split("-")[0] == "1"
+ ):
r = 1
- neighborhood_type = neighborhood.split("-")[0]
+ neighborhood_type = (
+ neighborhood.split("-")[0]
+ if neighborhood.split("-")[0] != "1"
+ else neighborhood.split("-")[1]
+ )
if "adjacency" in neighborhood_type:
useful_connectivity[neighborhood] = (
connectivity[f"adjacency_{src_rank}"]
diff --git a/topobenchmark/dataloader/__init__.py b/topobenchmark/dataloader/__init__.py
new file mode 100644
index 00000000..ed51644e
--- /dev/null
+++ b/topobenchmark/dataloader/__init__.py
@@ -0,0 +1,6 @@
+"""This module implements the dataloader for the topobenchmark package."""
+
+from .dataload_dataset import DataloadDataset
+from .dataloader import TBDataloader
+
+__all__ = ["DataloadDataset", "TBDataloader"]
diff --git a/topobenchmarkx/dataloader/dataload_dataset.py b/topobenchmark/dataloader/dataload_dataset.py
similarity index 95%
rename from topobenchmarkx/dataloader/dataload_dataset.py
rename to topobenchmark/dataloader/dataload_dataset.py
index 7e95d3c7..ec2ec9ce 100644
--- a/topobenchmarkx/dataloader/dataload_dataset.py
+++ b/topobenchmark/dataloader/dataload_dataset.py
@@ -1,4 +1,4 @@
-"""Dataset class compatible with TBXDataloader."""
+"""Dataset class compatible with TBDataloader."""
import torch_geometric
diff --git a/topobenchmarkx/dataloader/dataloader.py b/topobenchmark/dataloader/dataloader.py
similarity index 96%
rename from topobenchmarkx/dataloader/dataloader.py
rename to topobenchmark/dataloader/dataloader.py
index e033cbde..30c42689 100755
--- a/topobenchmarkx/dataloader/dataloader.py
+++ b/topobenchmark/dataloader/dataloader.py
@@ -1,15 +1,15 @@
-"TBXDataloader class."
+"TBDataloader class."
from typing import Any
from lightning import LightningDataModule
from torch.utils.data import DataLoader
-from topobenchmarkx.dataloader.dataload_dataset import DataloadDataset
-from topobenchmarkx.dataloader.utils import collate_fn
+from topobenchmark.dataloader.dataload_dataset import DataloadDataset
+from topobenchmark.dataloader.utils import collate_fn
-class TBXDataloader(LightningDataModule):
+class TBDataloader(LightningDataModule):
r"""This class takes care of returning the dataloaders for the training, validation, and test datasets.
It also handles the collate function. The class is designed to work with the `torch` dataloaders.
diff --git a/topobenchmarkx/dataloader/utils.py b/topobenchmark/dataloader/utils.py
similarity index 100%
rename from topobenchmarkx/dataloader/utils.py
rename to topobenchmark/dataloader/utils.py
diff --git a/topobenchmarkx/evaluator/__init__.py b/topobenchmark/evaluator/__init__.py
similarity index 75%
rename from topobenchmarkx/evaluator/__init__.py
rename to topobenchmark/evaluator/__init__.py
index 1634e2dc..f03c6c9d 100755
--- a/topobenchmarkx/evaluator/__init__.py
+++ b/topobenchmark/evaluator/__init__.py
@@ -3,6 +3,8 @@
from torchmetrics.classification import AUROC, Accuracy, Precision, Recall
from torchmetrics.regression import MeanAbsoluteError, MeanSquaredError
+from .metrics import ExampleRegressionMetric
+
# Define metrics
METRICS = {
"accuracy": Accuracy,
@@ -11,13 +13,14 @@
"auroc": AUROC,
"mae": MeanAbsoluteError,
"mse": MeanSquaredError,
+ "example": ExampleRegressionMetric,
}
from .base import AbstractEvaluator # noqa: E402
-from .evaluator import TBXEvaluator # noqa: E402
+from .evaluator import TBEvaluator # noqa: E402
__all__ = [
- "AbstractEvaluator",
- "TBXEvaluator",
"METRICS",
+ "AbstractEvaluator",
+ "TBEvaluator",
]
diff --git a/topobenchmarkx/evaluator/base.py b/topobenchmark/evaluator/base.py
similarity index 100%
rename from topobenchmarkx/evaluator/base.py
rename to topobenchmark/evaluator/base.py
diff --git a/topobenchmarkx/evaluator/evaluator.py b/topobenchmark/evaluator/evaluator.py
similarity index 90%
rename from topobenchmarkx/evaluator/evaluator.py
rename to topobenchmark/evaluator/evaluator.py
index 7ac0240d..c091ca62 100755
--- a/topobenchmarkx/evaluator/evaluator.py
+++ b/topobenchmark/evaluator/evaluator.py
@@ -2,10 +2,10 @@
from torchmetrics import MetricCollection
-from topobenchmarkx.evaluator import METRICS, AbstractEvaluator
+from topobenchmark.evaluator import METRICS, AbstractEvaluator
-class TBXEvaluator(AbstractEvaluator):
+class TBEvaluator(AbstractEvaluator):
r"""Evaluator class that is responsible for computing the metrics.
Parameters
@@ -37,6 +37,7 @@ def __init__(self, task, **kwargs):
elif self.task == "multilabel classification":
parameters = {"num_classes": kwargs["num_classes"]}
parameters["task"] = "multilabel"
+ parameters["num_labels"] = kwargs["num_classes"]
metric_names = kwargs["metrics"]
elif self.task == "regression":
@@ -44,7 +45,7 @@ def __init__(self, task, **kwargs):
metric_names = kwargs["metrics"]
else:
- raise ValueError(f"Invalid task {kwargs['task']}")
+ raise ValueError(f"Invalid task {task}")
metrics = {}
for name in metric_names:
@@ -83,7 +84,10 @@ def update(self, model_out: dict):
if self.task == "regression":
self.metrics.update(preds, target.unsqueeze(1))
- elif self.task == "classification":
+ elif (
+ self.task == "classification"
+ or self.task == "multilabel classification"
+ ):
self.metrics.update(preds, target)
else:
diff --git a/topobenchmark/evaluator/metrics/__init__.py b/topobenchmark/evaluator/metrics/__init__.py
new file mode 100644
index 00000000..7250366f
--- /dev/null
+++ b/topobenchmark/evaluator/metrics/__init__.py
@@ -0,0 +1,108 @@
+"""Init file for custom metrics in evaluator module."""
+
+import importlib
+import inspect
+import sys
+from pathlib import Path
+from typing import Any
+
+
+class LoadManager:
+ """Manages automatic discovery and registration of loss classes."""
+
+ @staticmethod
+ def is_metric_class(obj: Any) -> bool:
+ """Check if an object is a valid metric class.
+
+ Parameters
+ ----------
+ obj : Any
+ The object to check if it's a valid loss class.
+
+ Returns
+ -------
+ bool
+ True if the object is a valid loss class (non-private class
+ with 'FeatureEncoder' in name), False otherwise.
+ """
+ try:
+ from torchmetrics import Metric
+
+ return (
+ inspect.isclass(obj)
+ and not obj.__name__.startswith("_")
+ and issubclass(obj, Metric)
+ and obj is not Metric
+ )
+ except ImportError:
+ return False
+
+ @classmethod
+ def discover_metrics(cls, package_path: str) -> dict[str, type]:
+ """Dynamically discover all metric classes in the package.
+
+ Parameters
+ ----------
+ package_path : str
+ Path to the package's __init__.py file.
+
+ Returns
+ -------
+ Dict[str, Type]
+ Dictionary mapping loss class names to their corresponding class objects.
+ """
+ metrics = {}
+ package_dir = Path(package_path).parent
+
+ # Add parent directory to sys.path to ensure imports work
+ parent_dir = str(package_dir.parent)
+ if parent_dir not in sys.path:
+ sys.path.insert(0, parent_dir)
+
+ # Iterate through all .py files in the directory
+ for file_path in package_dir.glob("*.py"):
+ if file_path.stem == "__init__":
+ continue
+
+ try:
+ # Use importlib to safely import the module
+ module_name = f"{package_dir.stem}.{file_path.stem}"
+ module = importlib.import_module(module_name)
+
+ # Find all loss classes in the module
+ for name, obj in inspect.getmembers(module):
+ if (
+ cls.is_metric_class(obj)
+ and obj.__module__ == module.__name__
+ ):
+ metrics[name] = obj # noqa: PERF403
+
+ except ImportError as e:
+ print(f"Could not import module {module_name}: {e}")
+
+ return metrics
+
+
+# Dynamically create the loss manager and discover losses
+manager = LoadManager()
+CUSTOM_METRICS = manager.discover_metrics(__file__)
+CUSTOM_METRICS_list = list(CUSTOM_METRICS.keys())
+
+# Combine manual and discovered losses
+all_metrics = {**CUSTOM_METRICS}
+
+# Generate __all__
+__all__ = [
+ "CUSTOM_METRICS",
+ "CUSTOM_METRICS_list",
+ *list(all_metrics.keys()),
+]
+
+# Update locals for direct import
+locals().update(all_metrics)
+
+# from .example import ExampleRegressionMetric
+
+# __all__ = [
+# "ExampleRegressionMetric",
+# ]
diff --git a/topobenchmark/evaluator/metrics/example.py b/topobenchmark/evaluator/metrics/example.py
new file mode 100644
index 00000000..97ce9b57
--- /dev/null
+++ b/topobenchmark/evaluator/metrics/example.py
@@ -0,0 +1,87 @@
+"""Loss module for the topobenchmark package."""
+
+from typing import Any
+
+import torch
+from torchmetrics import Metric
+from torchmetrics.functional.regression.mse import (
+ _mean_squared_error_compute,
+ _mean_squared_error_update,
+)
+
+
+class ExampleRegressionMetric(Metric):
+ r"""Example metric.
+
+ Parameters
+ ----------
+ squared : bool
+ Whether to compute the squared error (default: True).
+ num_outputs : int
+ The number of outputs.
+ **kwargs : Any
+ Additional keyword arguments.
+ """
+
+ is_differentiable = True
+ higher_is_better = False
+ full_state_update = False
+
+ sum_squared_error: torch.Tensor
+ total: torch.Tensor
+
+ def __init__(
+ self,
+ squared: bool = True,
+ num_outputs: int = 1,
+ **kwargs: Any,
+ ) -> None:
+ super().__init__(**kwargs)
+
+ if not isinstance(squared, bool):
+ raise ValueError(
+ f"Expected argument `squared` to be a boolean but got {squared}"
+ )
+ self.squared = squared
+
+ if not (isinstance(num_outputs, int) and num_outputs > 0):
+ raise ValueError(
+ f"Expected num_outputs to be a positive integer but got {num_outputs}"
+ )
+ self.num_outputs = num_outputs
+
+ self.add_state(
+ "sum_squared_error",
+ default=torch.zeros(num_outputs),
+ dist_reduce_fx="sum",
+ )
+ self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
+
+ def update(self, preds: torch.Tensor, target: torch.Tensor) -> None:
+ """Update state with predictions and targets.
+
+ Parameters
+ ----------
+ preds : torch.Tensor
+ Predictions from model.
+ target : torch.Tensor
+ Ground truth values.
+ """
+ sum_squared_error, num_obs = _mean_squared_error_update(
+ preds, target, num_outputs=self.num_outputs
+ )
+
+ self.sum_squared_error += sum_squared_error
+ self.total += num_obs
+
+ def compute(self) -> torch.Tensor:
+ """Compute mean squared error over state.
+
+ Returns
+ -------
+ torch.Tensor
+ Mean squared error.
+ """
+ return _mean_squared_error_compute(
+ self.sum_squared_error, self.total, squared=self.squared
+ )
diff --git a/topobenchmark/loss/__init__.py b/topobenchmark/loss/__init__.py
new file mode 100755
index 00000000..c4193f3a
--- /dev/null
+++ b/topobenchmark/loss/__init__.py
@@ -0,0 +1,102 @@
+"""This module implements the loss functions for the topobenchmark package."""
+
+import importlib
+import inspect
+import sys
+from pathlib import Path
+from typing import Any
+
+
+class LoadManager:
+ """Manages automatic discovery and registration of loss classes."""
+
+ @staticmethod
+ def is_encoder_class(obj: Any) -> bool:
+ """Check if an object is a valid loss class.
+
+ Parameters
+ ----------
+ obj : Any
+ The object to check if it's a valid loss class.
+
+ Returns
+ -------
+ bool
+ True if the object is a valid loss class (non-private class
+ with 'FeatureEncoder' in name), False otherwise.
+ """
+ try:
+ from .base import AbstractLoss
+
+ return (
+ inspect.isclass(obj)
+ and not obj.__name__.startswith("_")
+ and issubclass(obj, AbstractLoss)
+ and obj is not AbstractLoss
+ )
+ except ImportError:
+ return False
+
+ @classmethod
+ def discover_losses(cls, package_path: str) -> dict[str, type]:
+ """Dynamically discover all loss classes in the package.
+
+ Parameters
+ ----------
+ package_path : str
+ Path to the package's __init__.py file.
+
+ Returns
+ -------
+ Dict[str, Type]
+ Dictionary mapping loss class names to their corresponding class objects.
+ """
+ losses = {}
+ package_dir = Path(package_path).parent
+
+ # Add parent directory to sys.path to ensure imports work
+ parent_dir = str(package_dir.parent)
+ if parent_dir not in sys.path:
+ sys.path.insert(0, parent_dir)
+
+ # Iterate through all .py files in the directory
+ for file_path in package_dir.glob("*.py"):
+ if file_path.stem == "__init__":
+ continue
+
+ try:
+ # Use importlib to safely import the module
+ module_name = f"{package_dir.stem}.{file_path.stem}"
+ module = importlib.import_module(module_name)
+
+ # Find all loss classes in the module
+ for name, obj in inspect.getmembers(module):
+ if (
+ cls.is_encoder_class(obj)
+ and obj.__module__ == module.__name__
+ ):
+ losses[name] = obj # noqa: PERF403
+
+ except ImportError as e:
+ print(f"Could not import module {module_name}: {e}")
+
+ return losses
+
+
+# Dynamically create the loss manager and discover losses
+manager = LoadManager()
+LOSSES = manager.discover_losses(__file__)
+LOSSES_list = list(LOSSES.keys())
+
+# Combine manual and discovered losses
+all_encoders = {**LOSSES}
+
+# Generate __all__
+__all__ = [
+ "LOSSES",
+ "LOSSES_list",
+ *list(all_encoders.keys()),
+]
+
+# Update locals for direct import
+locals().update(all_encoders)
diff --git a/topobenchmarkx/loss/base.py b/topobenchmark/loss/base.py
similarity index 100%
rename from topobenchmarkx/loss/base.py
rename to topobenchmark/loss/base.py
diff --git a/topobenchmarkx/loss/dataset/DatasetLoss.py b/topobenchmark/loss/dataset/DatasetLoss.py
similarity index 94%
rename from topobenchmarkx/loss/dataset/DatasetLoss.py
rename to topobenchmark/loss/dataset/DatasetLoss.py
index 0d05a9bc..01f3b413 100644
--- a/topobenchmarkx/loss/dataset/DatasetLoss.py
+++ b/topobenchmark/loss/dataset/DatasetLoss.py
@@ -1,9 +1,9 @@
-"""Loss module for the topobenchmarkx package."""
+"""Loss module for the topobenchmark package."""
import torch
import torch_geometric
-from topobenchmarkx.loss.base import AbstractLoss
+from topobenchmark.loss.base import AbstractLoss
class DatasetLoss(AbstractLoss):
diff --git a/topobenchmark/loss/dataset/__init__.py b/topobenchmark/loss/dataset/__init__.py
new file mode 100644
index 00000000..59291852
--- /dev/null
+++ b/topobenchmark/loss/dataset/__init__.py
@@ -0,0 +1,111 @@
+"""This module implements the loss functions for the topobenchmark package."""
+
+import importlib
+import inspect
+import sys
+from pathlib import Path
+from typing import Any
+
+
+class LoadManager:
+ """Manages automatic discovery and registration of loss classes."""
+
+ @staticmethod
+ def is_encoder_class(obj: Any) -> bool:
+ """Check if an object is a valid loss class.
+
+ Parameters
+ ----------
+ obj : Any
+ The object to check if it's a valid loss class.
+
+ Returns
+ -------
+ bool
+ True if the object is a valid loss class (non-private class
+ with 'FeatureEncoder' in name), False otherwise.
+ """
+ try:
+ from ..base import AbstractLoss
+
+ return (
+ inspect.isclass(obj)
+ and not obj.__name__.startswith("_")
+ and issubclass(obj, AbstractLoss)
+ and obj is not AbstractLoss
+ )
+ except ImportError:
+ return False
+
+ @classmethod
+ def discover_losses(cls, package_path: str) -> dict[str, type]:
+ """Dynamically discover all loss classes in the package.
+
+ Parameters
+ ----------
+ package_path : str
+ Path to the package's __init__.py file.
+
+ Returns
+ -------
+ Dict[str, Type]
+ Dictionary mapping loss class names to their corresponding class objects.
+ """
+ losses = {}
+ package_dir = Path(package_path).parent
+
+ # Add parent directory to sys.path to ensure imports work
+ parent_dir = str(package_dir.parent)
+ if parent_dir not in sys.path:
+ sys.path.insert(0, parent_dir)
+
+ # Iterate through all .py files in the directory
+ for file_path in package_dir.glob("*.py"):
+ if file_path.stem == "__init__":
+ continue
+
+ try:
+ # Use importlib to safely import the module
+ module_name = f"{package_dir.stem}.{file_path.stem}"
+ module = importlib.import_module(module_name)
+
+ # Find all loss classes in the module
+ for name, obj in inspect.getmembers(module):
+ if (
+ cls.is_encoder_class(obj)
+ and obj.__module__ == module.__name__
+ ):
+ losses[name] = obj # noqa: PERF403
+
+ except ImportError as e:
+ print(f"Could not import module {module_name}: {e}")
+
+ return losses
+
+
+# Dynamically create the loss manager and discover losses
+manager = LoadManager()
+LOSSES = manager.discover_losses(__file__)
+LOSSES_list = list(LOSSES.keys())
+
+# Combine manual and discovered losses
+all_encoders = {**LOSSES}
+
+# Generate __all__
+__all__ = [
+ "LOSSES",
+ "LOSSES_list",
+ *list(all_encoders.keys()),
+]
+
+# Update locals for direct import
+locals().update(all_encoders)
+
+
+# """Init file for custom loss module."""
+
+# from .DatasetLoss import DatasetLoss
+
+# __all__ = [
+# "DatasetLoss",
+# ]
diff --git a/topobenchmarkx/loss/loss.py b/topobenchmark/loss/loss.py
similarity index 88%
rename from topobenchmarkx/loss/loss.py
rename to topobenchmark/loss/loss.py
index 95c68b23..bb5a3ed4 100644
--- a/topobenchmarkx/loss/loss.py
+++ b/topobenchmark/loss/loss.py
@@ -1,13 +1,13 @@
-"""Loss module for the topobenchmarkx package."""
+"""Loss module for the topobenchmark package."""
import torch
import torch_geometric
-from topobenchmarkx.loss.base import AbstractLoss
-from topobenchmarkx.loss.dataset import DatasetLoss
+from topobenchmark.loss.base import AbstractLoss
+from topobenchmark.loss.dataset import DatasetLoss
-class TBXLoss(AbstractLoss):
+class TBLoss(AbstractLoss):
r"""Defines the default model loss for the given task.
Parameters
diff --git a/topobenchmark/loss/model/DGMLoss.py b/topobenchmark/loss/model/DGMLoss.py
new file mode 100644
index 00000000..708151dd
--- /dev/null
+++ b/topobenchmark/loss/model/DGMLoss.py
@@ -0,0 +1,77 @@
+"""Differentiable Graph Module loss function."""
+
+import torch
+import torch_geometric
+
+from topobenchmark.loss.base import AbstractLoss
+
+
+class DGMLoss(AbstractLoss):
+ r"""DGM loss function.
+
+ Original implementation https://github.com/lcosmo/DGM_pytorch/blob/main/DGMlib/model_dDGM_old.py
+
+ Parameters
+ ----------
+ loss_weight : float, optional
+ Loss weight (default: 0.5).
+ """
+
+ def __init__(self, loss_weight=0.5):
+ super().__init__()
+ self.loss_weight = loss_weight
+ self.avg_accuracy = None
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}()"
+
+ def forward(
+ self, model_out: dict, batch: torch_geometric.data.Data
+ ) -> torch.Tensor:
+ r"""Forward pass of the loss function.
+
+ Parameters
+ ----------
+ model_out : dict
+ Dictionary containing the model output.
+ batch : torch_geometric.data.Data
+ Batch object containing the batched domain data.
+
+ Returns
+ -------
+ dict
+ Dictionary containing the model output with the loss.
+ """
+ batch_keys = batch.keys()
+ logprobs_keys = sorted(
+ [key for key in batch_keys if "logprobs_" in key]
+ )
+
+ # Filter out the logprobs based on the model phase (Training, test)
+ logprobs = []
+ for key in logprobs_keys:
+ # Get the correct mask
+ if batch.model_state == "Training":
+ mask = batch.train_mask
+ elif batch.model_state == "Validation":
+ mask = batch.val_mask
+ elif batch.model_state == "Test":
+ mask = batch.test_mask
+ logprobs.append(batch[key][mask])
+ logprobs = torch.stack(logprobs)
+
+ corr_pred = (
+ (model_out["logits"].argmax(-1) == model_out["labels"])
+ .float()
+ .detach()
+ )
+ if (
+ self.avg_accuracy is None
+ or self.avg_accuracy.shape[-1] != corr_pred.shape[-1]
+ ):
+ self.avg_accuracy = torch.ones_like(corr_pred) * 0.5
+
+ point_w = self.avg_accuracy - corr_pred
+ loss = (point_w * logprobs.exp().mean(-1)).mean()
+
+ return self.loss_weight * loss
diff --git a/topobenchmarkx/loss/model/GraphMLPLoss.py b/topobenchmark/loss/model/GraphMLPLoss.py
similarity index 98%
rename from topobenchmarkx/loss/model/GraphMLPLoss.py
rename to topobenchmark/loss/model/GraphMLPLoss.py
index 3ee35575..50232812 100644
--- a/topobenchmarkx/loss/model/GraphMLPLoss.py
+++ b/topobenchmark/loss/model/GraphMLPLoss.py
@@ -3,7 +3,7 @@
import torch
import torch_geometric
-from topobenchmarkx.loss.base import AbstractLoss
+from topobenchmark.loss.base import AbstractLoss
class GraphMLPLoss(AbstractLoss):
diff --git a/topobenchmark/loss/model/__init__.py b/topobenchmark/loss/model/__init__.py
new file mode 100644
index 00000000..530fb068
--- /dev/null
+++ b/topobenchmark/loss/model/__init__.py
@@ -0,0 +1,113 @@
+"""This module implements the loss functions for the topobenchmark package."""
+
+import importlib
+import inspect
+import sys
+from pathlib import Path
+from typing import Any
+
+
+class LoadManager:
+ """Manages automatic discovery and registration of loss classes."""
+
+ @staticmethod
+ def is_encoder_class(obj: Any) -> bool:
+ """Check if an object is a valid loss class.
+
+ Parameters
+ ----------
+ obj : Any
+ The object to check if it's a valid loss class.
+
+ Returns
+ -------
+ bool
+ True if the object is a valid loss class (non-private class
+ with 'FeatureEncoder' in name), False otherwise.
+ """
+ try:
+ from ..base import AbstractLoss
+
+ return (
+ inspect.isclass(obj)
+ and not obj.__name__.startswith("_")
+ and issubclass(obj, AbstractLoss)
+ and obj is not AbstractLoss
+ )
+ except ImportError:
+ return False
+
+ @classmethod
+ def discover_losses(cls, package_path: str) -> dict[str, type]:
+ """Dynamically discover all loss classes in the package.
+
+ Parameters
+ ----------
+ package_path : str
+ Path to the package's __init__.py file.
+
+ Returns
+ -------
+ Dict[str, Type]
+ Dictionary mapping loss class names to their corresponding class objects.
+ """
+ losses = {}
+ package_dir = Path(package_path).parent
+
+ # Add parent directory to sys.path to ensure imports work
+ parent_dir = str(package_dir.parent)
+ if parent_dir not in sys.path:
+ sys.path.insert(0, parent_dir)
+
+ # Iterate through all .py files in the directory
+ for file_path in package_dir.glob("*.py"):
+ if file_path.stem == "__init__":
+ continue
+
+ try:
+ # Use importlib to safely import the module
+ module_name = f"{package_dir.stem}.{file_path.stem}"
+ module = importlib.import_module(module_name)
+
+ # Find all loss classes in the module
+ for name, obj in inspect.getmembers(module):
+ if (
+ cls.is_encoder_class(obj)
+ and obj.__module__ == module.__name__
+ ):
+ losses[name] = obj # noqa: PERF403
+
+ except ImportError as e:
+ print(f"Could not import module {module_name}: {e}")
+
+ return losses
+
+
+# Dynamically create the loss manager and discover losses
+manager = LoadManager()
+LOSSES = manager.discover_losses(__file__)
+LOSSES_list = list(LOSSES.keys())
+
+# Combine manual and discovered losses
+all_encoders = {**LOSSES}
+
+# Generate __all__
+__all__ = [
+ "LOSSES",
+ "LOSSES_list",
+ *list(all_encoders.keys()),
+]
+
+# Update locals for direct import
+locals().update(all_encoders)
+
+
+# """Init file for custom loss module."""
+
+# from .GraphMLPLoss import GraphMLPLoss
+# from .DGMLoss import DGMLoss
+
+# __all__ = [
+# "GraphMLPLoss",
+# "DGMLoss",
+# ]
diff --git a/topobenchmark/model/__init__.py b/topobenchmark/model/__init__.py
new file mode 100644
index 00000000..371f19ba
--- /dev/null
+++ b/topobenchmark/model/__init__.py
@@ -0,0 +1,7 @@
+"""TB model module."""
+
+from .model import TBModel
+
+__all__ = [
+ "TBModel",
+]
diff --git a/topobenchmarkx/model/model.py b/topobenchmark/model/model.py
similarity index 98%
rename from topobenchmarkx/model/model.py
rename to topobenchmark/model/model.py
index 189c794a..d3760355 100755
--- a/topobenchmarkx/model/model.py
+++ b/topobenchmark/model/model.py
@@ -1,4 +1,4 @@
-"""This module defines the `TBXModel` class."""
+"""This module defines the `TBModel` class."""
from typing import Any
@@ -8,7 +8,7 @@
from torchmetrics import MeanMetric
-class TBXModel(LightningModule):
+class TBModel(LightningModule):
r"""A `LightningModule` to define a network.
Parameters
@@ -105,6 +105,9 @@ def model_step(self, batch: Data) -> dict:
dict
Dictionary containing the model output and the loss.
"""
+ # Allow batch object to know the phase of the training
+ batch["model_state"] = self.state_str
+
# Feature Encoder
model_out = self.feature_encoder(batch)
diff --git a/topobenchmarkx/nn/__init__.py b/topobenchmark/nn/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/__init__.py
rename to topobenchmark/nn/__init__.py
diff --git a/topobenchmarkx/nn/backbones/__init__.py b/topobenchmark/nn/backbones/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/backbones/__init__.py
rename to topobenchmark/nn/backbones/__init__.py
index c2ad4b25..0d5c0b1d 100644
--- a/topobenchmarkx/nn/backbones/__init__.py
+++ b/topobenchmark/nn/backbones/__init__.py
@@ -18,11 +18,11 @@
"CCCN",
"EDGNN",
"GraphMLP",
- "SCCNNCustom",
- "TopoTune",
- "TopoTune_OneHasse",
+ "IdentityGAT",
"IdentityGCN",
"IdentityGIN",
- "IdentityGAT",
"IdentitySAGE",
+ "SCCNNCustom",
+ "TopoTune",
+ "TopoTune_OneHasse",
]
diff --git a/topobenchmarkx/nn/backbones/cell/__init__.py b/topobenchmark/nn/backbones/cell/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/backbones/cell/__init__.py
rename to topobenchmark/nn/backbones/cell/__init__.py
diff --git a/topobenchmarkx/nn/backbones/cell/cccn.py b/topobenchmark/nn/backbones/cell/cccn.py
similarity index 100%
rename from topobenchmarkx/nn/backbones/cell/cccn.py
rename to topobenchmark/nn/backbones/cell/cccn.py
diff --git a/topobenchmarkx/nn/backbones/combinatorial/__init__.py b/topobenchmark/nn/backbones/combinatorial/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/backbones/combinatorial/__init__.py
rename to topobenchmark/nn/backbones/combinatorial/__init__.py
diff --git a/topobenchmarkx/nn/backbones/combinatorial/gccn.py b/topobenchmark/nn/backbones/combinatorial/gccn.py
similarity index 99%
rename from topobenchmarkx/nn/backbones/combinatorial/gccn.py
rename to topobenchmark/nn/backbones/combinatorial/gccn.py
index 3c67f737..fda093e3 100644
--- a/topobenchmarkx/nn/backbones/combinatorial/gccn.py
+++ b/topobenchmark/nn/backbones/combinatorial/gccn.py
@@ -6,7 +6,7 @@
import torch.nn.functional as F
from torch_geometric.data import Data
-from topobenchmarkx.data.utils import get_routes_from_neighborhoods
+from topobenchmark.data.utils import get_routes_from_neighborhoods
class TopoTune(torch.nn.Module):
diff --git a/topobenchmarkx/nn/backbones/combinatorial/gccn_onehasse.py b/topobenchmark/nn/backbones/combinatorial/gccn_onehasse.py
similarity index 99%
rename from topobenchmarkx/nn/backbones/combinatorial/gccn_onehasse.py
rename to topobenchmark/nn/backbones/combinatorial/gccn_onehasse.py
index acadabf9..e8299d63 100644
--- a/topobenchmarkx/nn/backbones/combinatorial/gccn_onehasse.py
+++ b/topobenchmark/nn/backbones/combinatorial/gccn_onehasse.py
@@ -6,7 +6,7 @@
import torch.nn.functional as F
from torch_geometric.data import Data
-from topobenchmarkx.data.utils import get_routes_from_neighborhoods
+from topobenchmark.data.utils import get_routes_from_neighborhoods
class TopoTune_OneHasse(torch.nn.Module):
diff --git a/topobenchmarkx/nn/backbones/graph/__init__.py b/topobenchmark/nn/backbones/graph/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/backbones/graph/__init__.py
rename to topobenchmark/nn/backbones/graph/__init__.py
index 59e3996f..ee9b98af 100644
--- a/topobenchmarkx/nn/backbones/graph/__init__.py
+++ b/topobenchmark/nn/backbones/graph/__init__.py
@@ -22,19 +22,19 @@
)
__all__ = [
- "IdentityGCN",
- "IdentityGIN",
- "IdentityGAT",
- "IdentitySAGE",
- "GraphMLP",
- "MLP",
+ "GAT",
"GCN",
- "GraphSAGE",
"GIN",
- "GAT",
+ "MLP",
"PNA",
+ "DeepGraphInfomax",
"EdgeCNN",
+ "GraphMLP",
+ "GraphSAGE",
+ "IdentityGAT",
+ "IdentityGCN",
+ "IdentityGIN",
+ "IdentitySAGE",
"MetaLayer",
"Node2Vec",
- "DeepGraphInfomax",
]
diff --git a/topobenchmarkx/nn/backbones/graph/graph_mlp.py b/topobenchmark/nn/backbones/graph/graph_mlp.py
similarity index 100%
rename from topobenchmarkx/nn/backbones/graph/graph_mlp.py
rename to topobenchmark/nn/backbones/graph/graph_mlp.py
diff --git a/topobenchmarkx/nn/backbones/graph/identity_gnn.py b/topobenchmark/nn/backbones/graph/identity_gnn.py
similarity index 100%
rename from topobenchmarkx/nn/backbones/graph/identity_gnn.py
rename to topobenchmark/nn/backbones/graph/identity_gnn.py
diff --git a/topobenchmarkx/nn/backbones/hypergraph/__init__.py b/topobenchmark/nn/backbones/hypergraph/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/backbones/hypergraph/__init__.py
rename to topobenchmark/nn/backbones/hypergraph/__init__.py
diff --git a/topobenchmarkx/nn/backbones/hypergraph/edgnn.py b/topobenchmark/nn/backbones/hypergraph/edgnn.py
similarity index 100%
rename from topobenchmarkx/nn/backbones/hypergraph/edgnn.py
rename to topobenchmark/nn/backbones/hypergraph/edgnn.py
diff --git a/topobenchmarkx/nn/backbones/simplicial/__init__.py b/topobenchmark/nn/backbones/simplicial/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/backbones/simplicial/__init__.py
rename to topobenchmark/nn/backbones/simplicial/__init__.py
diff --git a/topobenchmarkx/nn/backbones/simplicial/sccnn.py b/topobenchmark/nn/backbones/simplicial/sccnn.py
similarity index 100%
rename from topobenchmarkx/nn/backbones/simplicial/sccnn.py
rename to topobenchmark/nn/backbones/simplicial/sccnn.py
diff --git a/topobenchmark/nn/encoders/__init__.py b/topobenchmark/nn/encoders/__init__.py
new file mode 100644
index 00000000..2260b018
--- /dev/null
+++ b/topobenchmark/nn/encoders/__init__.py
@@ -0,0 +1,103 @@
+"""Init file for encoder module with automated encoder discovery."""
+
+import importlib
+import inspect
+import sys
+from pathlib import Path
+from typing import Any
+
+
+class LoadManager:
+ """Manages automatic discovery and registration of encoder classes."""
+
+ @staticmethod
+ def is_encoder_class(obj: Any) -> bool:
+ """Check if an object is a valid encoder class.
+
+ Parameters
+ ----------
+ obj : Any
+ The object to check if it's a valid encoder class.
+
+ Returns
+ -------
+ bool
+ True if the object is a valid encoder class (non-private class
+ with 'FeatureEncoder' in name), False otherwise.
+ """
+ try:
+ from .base import AbstractFeatureEncoder
+
+ return (
+ inspect.isclass(obj)
+ and not obj.__name__.startswith("_")
+ and issubclass(obj, AbstractFeatureEncoder)
+ and obj is not AbstractFeatureEncoder
+ )
+ except ImportError:
+ return False
+
+ @classmethod
+ def discover_encoders(cls, package_path: str) -> dict[str, type]:
+ """Dynamically discover all encoder classes in the package.
+
+ Parameters
+ ----------
+ package_path : str
+ Path to the package's __init__.py file.
+
+ Returns
+ -------
+ Dict[str, Type]
+ Dictionary mapping encoder class names to their corresponding class objects.
+ """
+ encoders = {}
+ package_dir = Path(package_path).parent
+
+ # Add parent directory to sys.path to ensure imports work
+ parent_dir = str(package_dir.parent)
+ if parent_dir not in sys.path:
+ sys.path.insert(0, parent_dir)
+
+ # Iterate through all .py files in the directory
+ for file_path in package_dir.glob("*.py"):
+ if file_path.stem == "__init__":
+ continue
+
+ try:
+ # Use importlib to safely import the module
+ module_name = f"{package_dir.stem}.{file_path.stem}"
+ module = importlib.import_module(module_name)
+
+ # Find all encoder classes in the module
+ for name, obj in inspect.getmembers(module):
+ if (
+ cls.is_encoder_class(obj)
+ and obj.__module__ == module.__name__
+ ):
+ encoders[name] = obj # noqa: PERF403
+
+ except ImportError as e:
+ print(f"Could not import module {module_name}: {e}")
+
+ return encoders
+
+
+# Dynamically create the encoder manager and discover encoders
+manager = LoadManager()
+FEATURE_ENCODERS = manager.discover_encoders(__file__)
+FEATURE_ENCODERS_list = list(FEATURE_ENCODERS.keys())
+
+
+# Combine manual and discovered encoders
+all_encoders = {**FEATURE_ENCODERS}
+
+# Generate __all__
+__all__ = [
+ "FEATURE_ENCODERS",
+ "FEATURE_ENCODERS_list",
+ *list(all_encoders.keys()),
+]
+
+# Update locals for direct import
+locals().update(all_encoders)
diff --git a/topobenchmarkx/nn/encoders/all_cell_encoder.py b/topobenchmark/nn/encoders/all_cell_encoder.py
similarity index 98%
rename from topobenchmarkx/nn/encoders/all_cell_encoder.py
rename to topobenchmark/nn/encoders/all_cell_encoder.py
index ae3a7039..df4b8d21 100644
--- a/topobenchmarkx/nn/encoders/all_cell_encoder.py
+++ b/topobenchmark/nn/encoders/all_cell_encoder.py
@@ -4,7 +4,7 @@
import torch_geometric
from torch_geometric.nn.norm import GraphNorm
-from topobenchmarkx.nn.encoders.base import AbstractFeatureEncoder
+from topobenchmark.nn.encoders.base import AbstractFeatureEncoder
class AllCellFeatureEncoder(AbstractFeatureEncoder):
diff --git a/topobenchmarkx/nn/encoders/base.py b/topobenchmark/nn/encoders/base.py
similarity index 100%
rename from topobenchmarkx/nn/encoders/base.py
rename to topobenchmark/nn/encoders/base.py
diff --git a/topobenchmark/nn/encoders/dgm_encoder.py b/topobenchmark/nn/encoders/dgm_encoder.py
new file mode 100644
index 00000000..c0f81c9f
--- /dev/null
+++ b/topobenchmark/nn/encoders/dgm_encoder.py
@@ -0,0 +1,104 @@
+"""Encoder class to apply BaseEncoder."""
+
+import torch_geometric
+
+from topobenchmark.nn.encoders.all_cell_encoder import BaseEncoder
+from topobenchmark.nn.encoders.base import AbstractFeatureEncoder
+
+from .kdgm import DGM_d
+
+
+class DGMStructureFeatureEncoder(AbstractFeatureEncoder):
+ r"""Encoder class to apply BaseEncoder.
+
+ The BaseEncoder is applied to the features of higher order
+ structures. The class creates a BaseEncoder for each dimension specified in
+ selected_dimensions. Then during the forward pass, the BaseEncoders are
+ applied to the features of the corresponding dimensions.
+
+ Parameters
+ ----------
+ in_channels : list[int]
+ Input dimensions for the features.
+ out_channels : list[int]
+ Output dimensions for the features.
+ proj_dropout : float, optional
+ Dropout for the BaseEncoders (default: 0).
+ selected_dimensions : list[int], optional
+ List of indexes to apply the BaseEncoders to (default: None).
+ **kwargs : dict, optional
+ Additional keyword arguments.
+ """
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ proj_dropout=0,
+ selected_dimensions=None,
+ **kwargs,
+ ):
+ super().__init__()
+
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+
+ self.dimensions = (
+ selected_dimensions
+ if (
+ selected_dimensions is not None
+ ) # and len(selected_dimensions) <= len(self.in_channels))
+ else range(len(self.in_channels))
+ )
+ for i in self.dimensions:
+ base_enc = BaseEncoder(
+ self.in_channels[i],
+ self.out_channels,
+ dropout=proj_dropout,
+ )
+ embed_f = BaseEncoder(
+ self.in_channels[i],
+ self.out_channels,
+ dropout=proj_dropout,
+ )
+
+ setattr(
+ self,
+ f"encoder_{i}",
+ DGM_d(base_enc=base_enc, embed_f=embed_f),
+ )
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}(in_channels={self.in_channels}, out_channels={self.out_channels}, dimensions={self.dimensions})"
+
+ def forward(
+ self, data: torch_geometric.data.Data
+ ) -> torch_geometric.data.Data:
+ r"""Forward pass.
+
+ The method applies the BaseEncoders to the features of the selected_dimensions.
+
+ Parameters
+ ----------
+ data : torch_geometric.data.Data
+ Input data object which should contain x_{i} features for each i in the selected_dimensions.
+
+ Returns
+ -------
+ torch_geometric.data.Data
+ Output data object with updated x_{i} features.
+ """
+ if not hasattr(data, "x_0"):
+ data.x_0 = data.x
+
+ for i in self.dimensions:
+ if hasattr(data, f"x_{i}") and hasattr(self, f"encoder_{i}"):
+ batch = getattr(data, f"batch_{i}")
+ x_, x_aux, edges_dgm, logprobs = getattr(self, f"encoder_{i}")(
+ data[f"x_{i}"], batch
+ )
+ data[f"x_{i}"] = x_
+ data[f"x_aux_{i}"] = x_aux
+ data["edges_index"] = edges_dgm
+ data[f"logprobs_{i}"] = logprobs
+ return data
diff --git a/topobenchmark/nn/encoders/kdgm.py b/topobenchmark/nn/encoders/kdgm.py
new file mode 100644
index 00000000..d49f4cd8
--- /dev/null
+++ b/topobenchmark/nn/encoders/kdgm.py
@@ -0,0 +1,214 @@
+"""KDGM module."""
+
+import torch
+from torch import nn
+
+
+def pairwise_euclidean_distances(x: torch.Tensor, dim: int = -1) -> tuple:
+ r"""Compute pairwise Euclidean distances between points in a tensor.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor of points. Each row represents a point in a multidimensional space.
+ dim : int, optional
+ Dimension along which to compute the squared distances.
+ Defaults to -1 (last dimension).
+
+ Returns
+ -------
+ tuple
+ A tuple containing two elements:
+ - dist (torch.Tensor): Squared pairwise Euclidean distances matrix
+ - x (torch.Tensor): The original input tensor
+ """
+ dist = torch.cdist(x, x) ** 2
+ return dist, x
+
+
+def pairwise_poincare_distances(x: torch.Tensor, dim: int = -1) -> tuple:
+ r"""Compute pairwise distances in the Poincarè disk model (Hyperbolic space).
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor of points. Each row represents a point in a multidimensional space.
+ dim : int, optional
+ Dimension along which to compute the squared distances.
+ Defaults to -1 (last dimension).
+
+ Returns
+ -------
+ tuple
+ A tuple containing two elements:
+ - dist (torch.Tensor): Squared pairwise hyperbolic distances matrix
+ - x (torch.Tensor): Normalized input tensor in the Poincarè disk
+ """
+ x_norm = (x**2).sum(dim, keepdim=True)
+ x_norm = (x_norm.sqrt() - 1).relu() + 1
+ x = x / (x_norm * (1 + 1e-2))
+ x_norm = (x**2).sum(dim, keepdim=True)
+
+ pq = torch.cdist(x, x) ** 2
+ dist = (
+ torch.arccosh(
+ 1e-6 + 1 + 2 * pq / ((1 - x_norm) * (1 - x_norm.transpose(-1, -2)))
+ )
+ ** 2
+ )
+ return dist, x
+
+
+class DGM_d(nn.Module):
+ r"""Distance Graph Matching (DGM) neural network module.
+
+ This class implements a graph matching technique that learns to sample
+ edges based on distance metrics in either Euclidean or Hyperbolic space.
+
+ Parameters
+ ----------
+ base_enc : nn.Module
+ Base encoder for transforming input features.
+ embed_f : nn.Module
+ Embedding function for further feature transformation.
+ k : int, optional
+ Number of edges to sample in each graph. Defaults to 5.
+ distance : str, optional
+ Distance metric to use for edge sampling.
+ Choices are 'euclidean' or 'hyperbolic'.
+ Defaults to 'euclidean'.
+ sparse : bool, optional
+ Flag to indicate sparse sampling strategy.
+ Defaults to True.
+ """
+
+ def __init__(
+ self, base_enc, embed_f, k=5, distance="euclidean", sparse=True
+ ):
+ super().__init__()
+
+ self.sparse = sparse
+ self.temperature = nn.Parameter(
+ torch.tensor(1.0 if distance == "hyperbolic" else 4.0).float()
+ )
+ self.base_enc = base_enc
+ self.embed_f = embed_f
+ self.centroid = None
+ self.scale = None
+ self.k = k
+
+ self.debug = False
+ if distance == "euclidean":
+ self.distance = pairwise_euclidean_distances
+ else:
+ self.distance = pairwise_poincare_distances
+
+ def forward(
+ self, x: torch.Tensor, batch: torch.Tensor, fixedges=None
+ ) -> tuple:
+ r"""Forward pass of the Distance Graph Matching module.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor containing node features.
+ batch : torch.Tensor
+ Batch information for graph-level processing.
+ fixedges : torch.Tensor, optional
+ Predefined edges to use instead of sampling.
+ Defaults to None.
+
+ Returns
+ -------
+ tuple
+ A tuple containing four elements:
+ - base_encoded_features (torch.Tensor)
+ - embedded_features (torch.Tensor)
+ - sampled_edges (torch.Tensor)
+ - edge_sampling_log_probabilities (torch.Tensor)
+ """
+ # Input embedding
+ x_ = self.base_enc(x, batch)
+ x = self.embed_f(x, batch)
+
+ if self.training:
+ if fixedges is not None:
+ return (
+ x,
+ fixedges,
+ torch.zeros(
+ fixedges.shape[0],
+ fixedges.shape[-1] // self.k,
+ self.k,
+ dtype=torch.float,
+ device=x.device,
+ ),
+ )
+
+ D, _x = self.distance(x)
+
+ # sampling here
+ edges_hat, logprobs = self.sample_without_replacement(D)
+ else:
+ with torch.no_grad():
+ if fixedges is not None:
+ return (
+ x,
+ fixedges,
+ torch.zeros(
+ fixedges.shape[0],
+ fixedges.shape[-1] // self.k,
+ self.k,
+ dtype=torch.float,
+ device=x.device,
+ ),
+ )
+ D, _x = self.distance(x)
+
+ # sampling here
+ edges_hat, logprobs = self.sample_without_replacement(D)
+
+ if self.debug:
+ self.D = D
+ self.edges_hat = edges_hat
+ self.logprobs = logprobs
+ self.x = x
+
+ return x_, x, edges_hat, logprobs
+
+ def sample_without_replacement(self, logits: torch.Tensor) -> tuple:
+ r"""Sample edges without replacement using a temperature-scaled Gumbel-top-k method.
+
+ Parameters
+ ----------
+ logits : torch.Tensor
+ Input logits representing edge weights or distances.
+ Shape should be (n, n) where n is the number of nodes.
+
+ Returns
+ -------
+ tuple
+ A tuple containing two elements:
+ - edges (torch.Tensor): Sampled edges without replacement
+ - logprobs (torch.Tensor): Log probabilities of the sampled edges
+ """
+ b = 1
+ n, _ = logits.shape
+ logits = logits * torch.exp(torch.clamp(self.temperature, -5, 5))
+
+ q = torch.rand_like(logits) + 1e-8
+ lq = logits - torch.log(-torch.log(q))
+ logprobs, indices = torch.topk(-lq, self.k)
+
+ rows = (
+ torch.arange(n)
+ .view(1, n, 1)
+ .to(logits.device)
+ .repeat(b, 1, self.k)
+ )
+ edges = torch.stack((indices.view(b, -1), rows.view(b, -1)), -2)
+
+ if b == 1:
+ edges.squeeze(0)
+
+ return edges, logprobs
diff --git a/topobenchmarkx/nn/readouts/__init__.py b/topobenchmark/nn/readouts/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/readouts/__init__.py
rename to topobenchmark/nn/readouts/__init__.py
index a33066ed..b493d826 100644
--- a/topobenchmarkx/nn/readouts/__init__.py
+++ b/topobenchmark/nn/readouts/__init__.py
@@ -12,8 +12,8 @@
# Export all readouts and the dictionary
__all__ = [
"AbstractZeroCellReadOut",
- "PropagateSignalDown",
"NoReadOut",
+ "PropagateSignalDown",
# "OtherReadout1",
# "OtherReadout2",
# ... add other readout classes here
diff --git a/topobenchmarkx/nn/readouts/base.py b/topobenchmark/nn/readouts/base.py
similarity index 100%
rename from topobenchmarkx/nn/readouts/base.py
rename to topobenchmark/nn/readouts/base.py
diff --git a/topobenchmarkx/nn/readouts/identical.py b/topobenchmark/nn/readouts/identical.py
similarity index 94%
rename from topobenchmarkx/nn/readouts/identical.py
rename to topobenchmark/nn/readouts/identical.py
index ffbaa065..2401e627 100644
--- a/topobenchmarkx/nn/readouts/identical.py
+++ b/topobenchmark/nn/readouts/identical.py
@@ -2,7 +2,7 @@
import torch_geometric
-from topobenchmarkx.nn.readouts.base import AbstractZeroCellReadOut
+from topobenchmark.nn.readouts.base import AbstractZeroCellReadOut
class NoReadOut(AbstractZeroCellReadOut):
diff --git a/topobenchmarkx/nn/readouts/propagate_signal_down.py b/topobenchmark/nn/readouts/propagate_signal_down.py
similarity index 97%
rename from topobenchmarkx/nn/readouts/propagate_signal_down.py
rename to topobenchmark/nn/readouts/propagate_signal_down.py
index 1d1bf658..1eafe325 100644
--- a/topobenchmarkx/nn/readouts/propagate_signal_down.py
+++ b/topobenchmark/nn/readouts/propagate_signal_down.py
@@ -4,7 +4,7 @@
import torch
import torch_geometric
-from topobenchmarkx.nn.readouts.base import AbstractZeroCellReadOut
+from topobenchmark.nn.readouts.base import AbstractZeroCellReadOut
class PropagateSignalDown(AbstractZeroCellReadOut):
diff --git a/topobenchmarkx/nn/wrappers/__init__.py b/topobenchmark/nn/wrappers/__init__.py
similarity index 55%
rename from topobenchmarkx/nn/wrappers/__init__.py
rename to topobenchmark/nn/wrappers/__init__.py
index f1d26851..566375a1 100755
--- a/topobenchmarkx/nn/wrappers/__init__.py
+++ b/topobenchmark/nn/wrappers/__init__.py
@@ -1,16 +1,16 @@
"""This module implements the wrappers for the neural networks."""
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
-from topobenchmarkx.nn.wrappers.cell import (
+from topobenchmark.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.cell import (
CANWrapper,
CCCNWrapper,
CCXNWrapper,
CWNWrapper,
)
-from topobenchmarkx.nn.wrappers.combinatorial import TuneWrapper
-from topobenchmarkx.nn.wrappers.graph import GNNWrapper, GraphMLPWrapper
-from topobenchmarkx.nn.wrappers.hypergraph import HypergraphWrapper
-from topobenchmarkx.nn.wrappers.simplicial import (
+from topobenchmark.nn.wrappers.combinatorial import TuneWrapper
+from topobenchmark.nn.wrappers.graph import GNNWrapper, GraphMLPWrapper
+from topobenchmark.nn.wrappers.hypergraph import HypergraphWrapper
+from topobenchmark.nn.wrappers.simplicial import (
SANWrapper,
SCCNNWrapper,
SCCNWrapper,
@@ -19,24 +19,24 @@
# ... import other readout classes here
# For example:
-# from topobenchmarkx.nn.wrappers.other_wrapper_1 import OtherWrapper1
-# from topobenchmarkx.nn.wrappers.other_wrapper_2 import OtherWrapper2
+# from topobenchmark.nn.wrappers.other_wrapper_1 import OtherWrapper1
+# from topobenchmark.nn.wrappers.other_wrapper_2 import OtherWrapper2
# Export all wrappers
__all__ = [
"AbstractWrapper",
- "GraphMLPWrapper",
+ "CANWrapper",
+ "CCCNWrapper",
+ "CCXNWrapper",
+ "CWNWrapper",
"GNNWrapper",
+ "GraphMLPWrapper",
"HypergraphWrapper",
"SANWrapper",
- "SCNWrapper",
"SCCNNWrapper",
"SCCNWrapper",
- "CANWrapper",
- "CCCNWrapper",
- "CWNWrapper",
- "CCXNWrapper",
+ "SCNWrapper",
"TuneWrapper",
# "OtherWrapper1",
# "OtherWrapper2",
diff --git a/topobenchmarkx/nn/wrappers/base.py b/topobenchmark/nn/wrappers/base.py
similarity index 100%
rename from topobenchmarkx/nn/wrappers/base.py
rename to topobenchmark/nn/wrappers/base.py
diff --git a/topobenchmarkx/nn/wrappers/cell/__init__.py b/topobenchmark/nn/wrappers/cell/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/wrappers/cell/__init__.py
rename to topobenchmark/nn/wrappers/cell/__init__.py
index 37f5a651..1422834e 100644
--- a/topobenchmarkx/nn/wrappers/cell/__init__.py
+++ b/topobenchmark/nn/wrappers/cell/__init__.py
@@ -8,8 +8,8 @@
__all__ = [
"CANWrapper",
"CCCNWrapper",
- "CWNWrapper",
"CCXNWrapper",
+ "CWNWrapper",
# "OtherWrapper1",
# "OtherWrapper2",
# ... add other readout classes here
diff --git a/topobenchmarkx/nn/wrappers/cell/can_wrapper.py b/topobenchmark/nn/wrappers/cell/can_wrapper.py
similarity index 95%
rename from topobenchmarkx/nn/wrappers/cell/can_wrapper.py
rename to topobenchmark/nn/wrappers/cell/can_wrapper.py
index 872b0d49..5d1945fd 100644
--- a/topobenchmarkx/nn/wrappers/cell/can_wrapper.py
+++ b/topobenchmark/nn/wrappers/cell/can_wrapper.py
@@ -2,7 +2,7 @@
import torch
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.base import AbstractWrapper
class CANWrapper(AbstractWrapper):
diff --git a/topobenchmarkx/nn/wrappers/cell/cccn_wrapper.py b/topobenchmark/nn/wrappers/cell/cccn_wrapper.py
similarity index 94%
rename from topobenchmarkx/nn/wrappers/cell/cccn_wrapper.py
rename to topobenchmark/nn/wrappers/cell/cccn_wrapper.py
index 89d84008..327609aa 100644
--- a/topobenchmarkx/nn/wrappers/cell/cccn_wrapper.py
+++ b/topobenchmark/nn/wrappers/cell/cccn_wrapper.py
@@ -2,7 +2,7 @@
import torch
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.base import AbstractWrapper
class CCCNWrapper(AbstractWrapper):
diff --git a/topobenchmarkx/nn/wrappers/cell/ccxn_wrapper.py b/topobenchmark/nn/wrappers/cell/ccxn_wrapper.py
similarity index 94%
rename from topobenchmarkx/nn/wrappers/cell/ccxn_wrapper.py
rename to topobenchmark/nn/wrappers/cell/ccxn_wrapper.py
index 1ffedba5..f640a562 100644
--- a/topobenchmarkx/nn/wrappers/cell/ccxn_wrapper.py
+++ b/topobenchmark/nn/wrappers/cell/ccxn_wrapper.py
@@ -1,6 +1,6 @@
"""Wrapper for the CCXN model."""
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.base import AbstractWrapper
class CCXNWrapper(AbstractWrapper):
diff --git a/topobenchmarkx/nn/wrappers/cell/cwn_wrapper.py b/topobenchmark/nn/wrappers/cell/cwn_wrapper.py
similarity index 94%
rename from topobenchmarkx/nn/wrappers/cell/cwn_wrapper.py
rename to topobenchmark/nn/wrappers/cell/cwn_wrapper.py
index 3d9f1f7b..d845195c 100644
--- a/topobenchmarkx/nn/wrappers/cell/cwn_wrapper.py
+++ b/topobenchmark/nn/wrappers/cell/cwn_wrapper.py
@@ -1,6 +1,6 @@
"""Wrapper for the CWN model."""
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.base import AbstractWrapper
class CWNWrapper(AbstractWrapper):
diff --git a/topobenchmarkx/nn/wrappers/combinatorial/__init__.py b/topobenchmark/nn/wrappers/combinatorial/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/wrappers/combinatorial/__init__.py
rename to topobenchmark/nn/wrappers/combinatorial/__init__.py
diff --git a/topobenchmarkx/nn/wrappers/combinatorial/tune_wrapper.py b/topobenchmark/nn/wrappers/combinatorial/tune_wrapper.py
similarity index 92%
rename from topobenchmarkx/nn/wrappers/combinatorial/tune_wrapper.py
rename to topobenchmark/nn/wrappers/combinatorial/tune_wrapper.py
index 79025210..184baf26 100644
--- a/topobenchmarkx/nn/wrappers/combinatorial/tune_wrapper.py
+++ b/topobenchmark/nn/wrappers/combinatorial/tune_wrapper.py
@@ -1,6 +1,6 @@
"""Wrapper for the TopoTune model."""
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.base import AbstractWrapper
class TuneWrapper(AbstractWrapper):
diff --git a/topobenchmarkx/nn/wrappers/graph/__init__.py b/topobenchmark/nn/wrappers/graph/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/wrappers/graph/__init__.py
rename to topobenchmark/nn/wrappers/graph/__init__.py
diff --git a/topobenchmarkx/nn/wrappers/graph/gnn_wrapper.py b/topobenchmark/nn/wrappers/graph/gnn_wrapper.py
similarity index 76%
rename from topobenchmarkx/nn/wrappers/graph/gnn_wrapper.py
rename to topobenchmark/nn/wrappers/graph/gnn_wrapper.py
index fe8732bc..c0576823 100644
--- a/topobenchmarkx/nn/wrappers/graph/gnn_wrapper.py
+++ b/topobenchmark/nn/wrappers/graph/gnn_wrapper.py
@@ -1,6 +1,6 @@
"""Wrapper for the GNN models."""
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.base import AbstractWrapper
class GNNWrapper(AbstractWrapper):
@@ -23,7 +23,12 @@ def forward(self, batch):
dict
Dictionary containing the updated model output.
"""
- x_0 = self.backbone(batch.x_0, batch.edge_index)
+
+ x_0 = self.backbone(
+ batch.x_0,
+ batch.edge_index,
+ edge_weight=batch.get("edge_weight", None),
+ )
model_out = {"labels": batch.y, "batch_0": batch.batch_0}
model_out["x_0"] = x_0
diff --git a/topobenchmarkx/nn/wrappers/graph/graph_mlp_wrapper.py b/topobenchmark/nn/wrappers/graph/graph_mlp_wrapper.py
similarity index 92%
rename from topobenchmarkx/nn/wrappers/graph/graph_mlp_wrapper.py
rename to topobenchmark/nn/wrappers/graph/graph_mlp_wrapper.py
index 8896da75..a70fe9e7 100644
--- a/topobenchmarkx/nn/wrappers/graph/graph_mlp_wrapper.py
+++ b/topobenchmark/nn/wrappers/graph/graph_mlp_wrapper.py
@@ -1,6 +1,6 @@
"""Wrapper for the GNN models."""
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.base import AbstractWrapper
class GraphMLPWrapper(AbstractWrapper):
diff --git a/topobenchmarkx/nn/wrappers/hypergraph/__init__.py b/topobenchmark/nn/wrappers/hypergraph/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/wrappers/hypergraph/__init__.py
rename to topobenchmark/nn/wrappers/hypergraph/__init__.py
diff --git a/topobenchmarkx/nn/wrappers/hypergraph/hypergraph_wrapper.py b/topobenchmark/nn/wrappers/hypergraph/hypergraph_wrapper.py
similarity index 93%
rename from topobenchmarkx/nn/wrappers/hypergraph/hypergraph_wrapper.py
rename to topobenchmark/nn/wrappers/hypergraph/hypergraph_wrapper.py
index 4c891152..dc2a7e00 100644
--- a/topobenchmarkx/nn/wrappers/hypergraph/hypergraph_wrapper.py
+++ b/topobenchmark/nn/wrappers/hypergraph/hypergraph_wrapper.py
@@ -1,6 +1,6 @@
"""Wrapper for the hypergraph models."""
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.base import AbstractWrapper
class HypergraphWrapper(AbstractWrapper):
diff --git a/topobenchmarkx/nn/wrappers/simplicial/__init__.py b/topobenchmark/nn/wrappers/simplicial/__init__.py
similarity index 100%
rename from topobenchmarkx/nn/wrappers/simplicial/__init__.py
rename to topobenchmark/nn/wrappers/simplicial/__init__.py
index 49c64cae..c4c02331 100644
--- a/topobenchmarkx/nn/wrappers/simplicial/__init__.py
+++ b/topobenchmark/nn/wrappers/simplicial/__init__.py
@@ -8,7 +8,7 @@
# Export all wrappers
__all__ = [
"SANWrapper",
- "SCNWrapper",
"SCCNNWrapper",
"SCCNWrapper",
+ "SCNWrapper",
]
diff --git a/topobenchmarkx/nn/wrappers/simplicial/san_wrapper.py b/topobenchmark/nn/wrappers/simplicial/san_wrapper.py
similarity index 94%
rename from topobenchmarkx/nn/wrappers/simplicial/san_wrapper.py
rename to topobenchmark/nn/wrappers/simplicial/san_wrapper.py
index e70a77f0..acfd8279 100644
--- a/topobenchmarkx/nn/wrappers/simplicial/san_wrapper.py
+++ b/topobenchmark/nn/wrappers/simplicial/san_wrapper.py
@@ -2,7 +2,7 @@
import torch
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.base import AbstractWrapper
class SANWrapper(AbstractWrapper):
diff --git a/topobenchmarkx/nn/wrappers/simplicial/sccn_wrapper.py b/topobenchmark/nn/wrappers/simplicial/sccn_wrapper.py
similarity index 96%
rename from topobenchmarkx/nn/wrappers/simplicial/sccn_wrapper.py
rename to topobenchmark/nn/wrappers/simplicial/sccn_wrapper.py
index b0ac75c2..41dd23e1 100644
--- a/topobenchmarkx/nn/wrappers/simplicial/sccn_wrapper.py
+++ b/topobenchmark/nn/wrappers/simplicial/sccn_wrapper.py
@@ -1,6 +1,6 @@
"""Wrapper for the SCCN model."""
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.base import AbstractWrapper
class SCCNWrapper(AbstractWrapper):
diff --git a/topobenchmarkx/nn/wrappers/simplicial/sccnn_wrapper.py b/topobenchmark/nn/wrappers/simplicial/sccnn_wrapper.py
similarity index 95%
rename from topobenchmarkx/nn/wrappers/simplicial/sccnn_wrapper.py
rename to topobenchmark/nn/wrappers/simplicial/sccnn_wrapper.py
index 1890dc5f..a1d48665 100644
--- a/topobenchmarkx/nn/wrappers/simplicial/sccnn_wrapper.py
+++ b/topobenchmark/nn/wrappers/simplicial/sccnn_wrapper.py
@@ -1,6 +1,6 @@
"""Wrapper for the SCCNN model."""
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.base import AbstractWrapper
class SCCNNWrapper(AbstractWrapper):
diff --git a/topobenchmarkx/nn/wrappers/simplicial/scn_wrapper.py b/topobenchmark/nn/wrappers/simplicial/scn_wrapper.py
similarity index 97%
rename from topobenchmarkx/nn/wrappers/simplicial/scn_wrapper.py
rename to topobenchmark/nn/wrappers/simplicial/scn_wrapper.py
index a2e8773d..0948e22d 100644
--- a/topobenchmarkx/nn/wrappers/simplicial/scn_wrapper.py
+++ b/topobenchmark/nn/wrappers/simplicial/scn_wrapper.py
@@ -2,7 +2,7 @@
import torch
-from topobenchmarkx.nn.wrappers.base import AbstractWrapper
+from topobenchmark.nn.wrappers.base import AbstractWrapper
class SCNWrapper(AbstractWrapper):
diff --git a/topobenchmark/optimizer/__init__.py b/topobenchmark/optimizer/__init__.py
new file mode 100644
index 00000000..3a583e70
--- /dev/null
+++ b/topobenchmark/optimizer/__init__.py
@@ -0,0 +1,7 @@
+"""Init file for optimizer module."""
+
+from .optimizer import TBOptimizer
+
+__all__ = [
+ "TBOptimizer",
+]
diff --git a/topobenchmarkx/optimizer/base.py b/topobenchmark/optimizer/base.py
similarity index 100%
rename from topobenchmarkx/optimizer/base.py
rename to topobenchmark/optimizer/base.py
diff --git a/topobenchmarkx/optimizer/optimizer.py b/topobenchmark/optimizer/optimizer.py
similarity index 95%
rename from topobenchmarkx/optimizer/optimizer.py
rename to topobenchmark/optimizer/optimizer.py
index f7802c67..9ca8efdc 100644
--- a/topobenchmarkx/optimizer/optimizer.py
+++ b/topobenchmark/optimizer/optimizer.py
@@ -11,7 +11,7 @@
TORCH_SCHEDULERS = torch.optim.lr_scheduler.__dict__
-class TBXOptimizer(AbstractOptimizer):
+class TBOptimizer(AbstractOptimizer):
"""Optimizer class that manage both optimizer and scheduler, fully compatible with `torch.optim` classes.
Parameters
@@ -48,7 +48,7 @@ def configure_optimizer(self, model_parameters) -> dict[str:Any]:
"""Configure the optimizer and scheduler.
Act as a wrapper to provide the LightningTrainer module the required config dict
- when it calls `TBXModel`'s `configure_optimizers()` method.
+ when it calls `TBModel`'s `configure_optimizers()` method.
Parameters
----------
diff --git a/topobenchmarkx/run.py b/topobenchmark/run.py
similarity index 96%
rename from topobenchmarkx/run.py
rename to topobenchmark/run.py
index 6e01c29c..6dc72154 100755
--- a/topobenchmarkx/run.py
+++ b/topobenchmark/run.py
@@ -12,9 +12,9 @@
from lightning.pytorch.loggers import Logger
from omegaconf import DictConfig, OmegaConf
-from topobenchmarkx.data.preprocessor import PreProcessor
-from topobenchmarkx.dataloader import TBXDataloader
-from topobenchmarkx.utils import (
+from topobenchmark.data.preprocessor import PreProcessor
+from topobenchmark.dataloader import TBDataloader
+from topobenchmark.utils import (
RankedLogger,
extras,
get_metric_value,
@@ -23,14 +23,14 @@
log_hyperparameters,
task_wrapper,
)
-from topobenchmarkx.utils.config_resolvers import (
+from topobenchmark.utils.config_resolvers import (
get_default_metrics,
get_default_transform,
get_monitor_metric,
get_monitor_mode,
get_required_lifting,
infer_in_channels,
- infere_num_cell_dimensions,
+ infer_num_cell_dimensions,
)
rootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
@@ -71,7 +71,7 @@
"infer_in_channels", infer_in_channels, replace=True
)
OmegaConf.register_new_resolver(
- "infere_num_cell_dimensions", infere_num_cell_dimensions, replace=True
+ "infer_num_cell_dimensions", infer_num_cell_dimensions, replace=True
)
OmegaConf.register_new_resolver(
"parameter_multiplication", lambda x, y: int(int(x) * int(y)), replace=True
@@ -141,7 +141,7 @@ def run(cfg: DictConfig) -> tuple[dict[str, Any], dict[str, Any]]:
# Prepare datamodule
log.info("Instantiating datamodule...")
if cfg.dataset.parameters.task_level in ["node", "graph"]:
- datamodule = TBXDataloader(
+ datamodule = TBDataloader(
dataset_train=dataset_train,
dataset_val=dataset_val,
dataset_test=dataset_test,
diff --git a/topobenchmark/transforms/__init__.py b/topobenchmark/transforms/__init__.py
new file mode 100755
index 00000000..3f568814
--- /dev/null
+++ b/topobenchmark/transforms/__init__.py
@@ -0,0 +1,32 @@
+"""This module contains the transforms for the topobenchmark package."""
+
+from typing import Any
+
+from topobenchmark.transforms.data_manipulations import DATA_MANIPULATIONS
+from topobenchmark.transforms.feature_liftings import FEATURE_LIFTINGS
+from topobenchmark.transforms.liftings.graph2cell import GRAPH2CELL_LIFTINGS
+from topobenchmark.transforms.liftings.graph2hypergraph import (
+ GRAPH2HYPERGRAPH_LIFTINGS,
+)
+from topobenchmark.transforms.liftings.graph2simplicial import (
+ GRAPH2SIMPLICIAL_LIFTINGS,
+)
+
+LIFTINGS = {
+ **GRAPH2CELL_LIFTINGS,
+ **GRAPH2HYPERGRAPH_LIFTINGS,
+ **GRAPH2SIMPLICIAL_LIFTINGS,
+}
+
+TRANSFORMS: dict[Any, Any] = {
+ **LIFTINGS,
+ **FEATURE_LIFTINGS,
+ **DATA_MANIPULATIONS,
+}
+
+__all__ = [
+ "DATA_MANIPULATIONS",
+ "FEATURE_LIFTINGS",
+ "LIFTINGS",
+ "TRANSFORMS",
+]
diff --git a/topobenchmarkx/transforms/data_manipulations/__init__.py b/topobenchmark/transforms/data_manipulations/__init__.py
similarity index 100%
rename from topobenchmarkx/transforms/data_manipulations/__init__.py
rename to topobenchmark/transforms/data_manipulations/__init__.py
diff --git a/topobenchmarkx/transforms/data_manipulations/calculate_simplicial_curvature.py b/topobenchmark/transforms/data_manipulations/calculate_simplicial_curvature.py
similarity index 100%
rename from topobenchmarkx/transforms/data_manipulations/calculate_simplicial_curvature.py
rename to topobenchmark/transforms/data_manipulations/calculate_simplicial_curvature.py
diff --git a/topobenchmarkx/transforms/data_manipulations/equal_gaus_features.py b/topobenchmark/transforms/data_manipulations/equal_gaus_features.py
similarity index 100%
rename from topobenchmarkx/transforms/data_manipulations/equal_gaus_features.py
rename to topobenchmark/transforms/data_manipulations/equal_gaus_features.py
diff --git a/topobenchmarkx/transforms/data_manipulations/group_homophily.py b/topobenchmark/transforms/data_manipulations/group_homophily.py
similarity index 100%
rename from topobenchmarkx/transforms/data_manipulations/group_homophily.py
rename to topobenchmark/transforms/data_manipulations/group_homophily.py
diff --git a/topobenchmarkx/transforms/data_manipulations/identity_transform.py b/topobenchmark/transforms/data_manipulations/identity_transform.py
similarity index 100%
rename from topobenchmarkx/transforms/data_manipulations/identity_transform.py
rename to topobenchmark/transforms/data_manipulations/identity_transform.py
diff --git a/topobenchmarkx/transforms/data_manipulations/infere_knn_connectivity.py b/topobenchmark/transforms/data_manipulations/infere_knn_connectivity.py
similarity index 96%
rename from topobenchmarkx/transforms/data_manipulations/infere_knn_connectivity.py
rename to topobenchmark/transforms/data_manipulations/infere_knn_connectivity.py
index 4c7ddc66..70fd87e1 100644
--- a/topobenchmarkx/transforms/data_manipulations/infere_knn_connectivity.py
+++ b/topobenchmark/transforms/data_manipulations/infere_knn_connectivity.py
@@ -17,7 +17,7 @@ class InfereKNNConnectivity(torch_geometric.transforms.BaseTransform):
def __init__(self, **kwargs):
super().__init__()
- self.type = "infere_knn_connectivity"
+ self.type = "infer_knn_connectivity"
self.parameters = kwargs
def __repr__(self) -> str:
diff --git a/topobenchmarkx/transforms/data_manipulations/infere_radius_connectivity.py b/topobenchmark/transforms/data_manipulations/infere_radius_connectivity.py
similarity index 95%
rename from topobenchmarkx/transforms/data_manipulations/infere_radius_connectivity.py
rename to topobenchmark/transforms/data_manipulations/infere_radius_connectivity.py
index 5dc2471c..0e5d9bf3 100644
--- a/topobenchmarkx/transforms/data_manipulations/infere_radius_connectivity.py
+++ b/topobenchmark/transforms/data_manipulations/infere_radius_connectivity.py
@@ -17,7 +17,7 @@ class InfereRadiusConnectivity(torch_geometric.transforms.BaseTransform):
def __init__(self, **kwargs):
super().__init__()
- self.type = "infere_radius_connectivity"
+ self.type = "infer_radius_connectivity"
self.parameters = kwargs
def __repr__(self) -> str:
diff --git a/topobenchmarkx/transforms/data_manipulations/keep_only_connected_component.py b/topobenchmark/transforms/data_manipulations/keep_only_connected_component.py
similarity index 100%
rename from topobenchmarkx/transforms/data_manipulations/keep_only_connected_component.py
rename to topobenchmark/transforms/data_manipulations/keep_only_connected_component.py
diff --git a/topobenchmarkx/transforms/data_manipulations/keep_selected_data_fields.py b/topobenchmark/transforms/data_manipulations/keep_selected_data_fields.py
similarity index 100%
rename from topobenchmarkx/transforms/data_manipulations/keep_selected_data_fields.py
rename to topobenchmark/transforms/data_manipulations/keep_selected_data_fields.py
diff --git a/topobenchmarkx/transforms/data_manipulations/mp_homophily.py b/topobenchmark/transforms/data_manipulations/mp_homophily.py
similarity index 100%
rename from topobenchmarkx/transforms/data_manipulations/mp_homophily.py
rename to topobenchmark/transforms/data_manipulations/mp_homophily.py
diff --git a/topobenchmarkx/transforms/data_manipulations/node_degrees.py b/topobenchmark/transforms/data_manipulations/node_degrees.py
similarity index 100%
rename from topobenchmarkx/transforms/data_manipulations/node_degrees.py
rename to topobenchmark/transforms/data_manipulations/node_degrees.py
diff --git a/topobenchmarkx/transforms/data_manipulations/node_features_to_float.py b/topobenchmark/transforms/data_manipulations/node_features_to_float.py
similarity index 100%
rename from topobenchmarkx/transforms/data_manipulations/node_features_to_float.py
rename to topobenchmark/transforms/data_manipulations/node_features_to_float.py
diff --git a/topobenchmarkx/transforms/data_manipulations/one_hot_degree_features.py b/topobenchmark/transforms/data_manipulations/one_hot_degree_features.py
similarity index 100%
rename from topobenchmarkx/transforms/data_manipulations/one_hot_degree_features.py
rename to topobenchmark/transforms/data_manipulations/one_hot_degree_features.py
diff --git a/topobenchmarkx/transforms/data_transform.py b/topobenchmark/transforms/data_transform.py
similarity index 95%
rename from topobenchmarkx/transforms/data_transform.py
rename to topobenchmark/transforms/data_transform.py
index 0471557e..da9e883b 100755
--- a/topobenchmarkx/transforms/data_transform.py
+++ b/topobenchmark/transforms/data_transform.py
@@ -2,7 +2,7 @@
import torch_geometric
-from topobenchmarkx.transforms import TRANSFORMS
+from topobenchmark.transforms import TRANSFORMS
class DataTransform(torch_geometric.transforms.BaseTransform):
diff --git a/topobenchmarkx/transforms/feature_liftings/__init__.py b/topobenchmark/transforms/feature_liftings/__init__.py
similarity index 100%
rename from topobenchmarkx/transforms/feature_liftings/__init__.py
rename to topobenchmark/transforms/feature_liftings/__init__.py
diff --git a/topobenchmarkx/transforms/feature_liftings/base.py b/topobenchmark/transforms/feature_liftings/base.py
similarity index 100%
rename from topobenchmarkx/transforms/feature_liftings/base.py
rename to topobenchmark/transforms/feature_liftings/base.py
diff --git a/topobenchmarkx/transforms/feature_liftings/concatenation.py b/topobenchmark/transforms/feature_liftings/concatenation.py
similarity index 100%
rename from topobenchmarkx/transforms/feature_liftings/concatenation.py
rename to topobenchmark/transforms/feature_liftings/concatenation.py
diff --git a/topobenchmarkx/transforms/feature_liftings/identity.py b/topobenchmark/transforms/feature_liftings/identity.py
similarity index 100%
rename from topobenchmarkx/transforms/feature_liftings/identity.py
rename to topobenchmark/transforms/feature_liftings/identity.py
diff --git a/topobenchmarkx/transforms/feature_liftings/projection_sum.py b/topobenchmark/transforms/feature_liftings/projection_sum.py
similarity index 100%
rename from topobenchmarkx/transforms/feature_liftings/projection_sum.py
rename to topobenchmark/transforms/feature_liftings/projection_sum.py
diff --git a/topobenchmarkx/transforms/feature_liftings/set.py b/topobenchmark/transforms/feature_liftings/set.py
similarity index 100%
rename from topobenchmarkx/transforms/feature_liftings/set.py
rename to topobenchmark/transforms/feature_liftings/set.py
diff --git a/topobenchmarkx/transforms/liftings/__init__.py b/topobenchmark/transforms/liftings/__init__.py
similarity index 100%
rename from topobenchmarkx/transforms/liftings/__init__.py
rename to topobenchmark/transforms/liftings/__init__.py
index bed124a1..4692ceaf 100755
--- a/topobenchmarkx/transforms/liftings/__init__.py
+++ b/topobenchmark/transforms/liftings/__init__.py
@@ -12,10 +12,10 @@
__all__ = [
"AbstractLifting",
+ "CellComplexLifting",
+ "CombinatorialLifting",
"GraphLifting",
+ "HypergraphLifting",
"PointCloudLifting",
"SimplicialLifting",
- "CellComplexLifting",
- "HypergraphLifting",
- "CombinatorialLifting",
]
diff --git a/topobenchmarkx/transforms/liftings/base.py b/topobenchmark/transforms/liftings/base.py
similarity index 95%
rename from topobenchmarkx/transforms/liftings/base.py
rename to topobenchmark/transforms/liftings/base.py
index 0a436585..6f5f35a7 100644
--- a/topobenchmarkx/transforms/liftings/base.py
+++ b/topobenchmark/transforms/liftings/base.py
@@ -4,11 +4,9 @@
import torch_geometric
-from topobenchmarkx.data.utils import IdentityAdapter
-from topobenchmarkx.transforms.feature_liftings import FEATURE_LIFTINGS
-from topobenchmarkx.transforms.feature_liftings.identity import (
- Identity,
-)
+from topobenchmark.data.utils import IdentityAdapter
+from topobenchmark.transforms.feature_liftings import FEATURE_LIFTINGS
+from topobenchmark.transforms.feature_liftings.identity import Identity
class LiftingTransform(torch_geometric.transforms.BaseTransform):
diff --git a/topobenchmarkx/transforms/liftings/graph2cell/__init__.py b/topobenchmark/transforms/liftings/graph2cell/__init__.py
similarity index 100%
rename from topobenchmarkx/transforms/liftings/graph2cell/__init__.py
rename to topobenchmark/transforms/liftings/graph2cell/__init__.py
diff --git a/topobenchmarkx/transforms/liftings/graph2cell/base.py b/topobenchmark/transforms/liftings/graph2cell/base.py
similarity index 92%
rename from topobenchmarkx/transforms/liftings/graph2cell/base.py
rename to topobenchmark/transforms/liftings/graph2cell/base.py
index 80b120c5..aeff3646 100755
--- a/topobenchmarkx/transforms/liftings/graph2cell/base.py
+++ b/topobenchmark/transforms/liftings/graph2cell/base.py
@@ -4,8 +4,8 @@
import torch
from toponetx.classes import CellComplex
-from topobenchmarkx.data.utils.utils import get_complex_connectivity
-from topobenchmarkx.transforms.liftings import GraphLifting
+from topobenchmark.data.utils.utils import get_complex_connectivity
+from topobenchmark.transforms.liftings import GraphLifting
class Graph2CellLifting(GraphLifting):
diff --git a/topobenchmarkx/transforms/liftings/graph2cell/cycle.py b/topobenchmark/transforms/liftings/graph2cell/cycle.py
similarity index 96%
rename from topobenchmarkx/transforms/liftings/graph2cell/cycle.py
rename to topobenchmark/transforms/liftings/graph2cell/cycle.py
index 4071bd75..31e94d8b 100755
--- a/topobenchmarkx/transforms/liftings/graph2cell/cycle.py
+++ b/topobenchmark/transforms/liftings/graph2cell/cycle.py
@@ -4,7 +4,7 @@
import torch_geometric
from toponetx.classes import CellComplex
-from topobenchmarkx.transforms.liftings.graph2cell.base import (
+from topobenchmark.transforms.liftings.graph2cell.base import (
Graph2CellLifting,
)
diff --git a/topobenchmarkx/transforms/liftings/graph2hypergraph/__init__.py b/topobenchmark/transforms/liftings/graph2hypergraph/__init__.py
similarity index 100%
rename from topobenchmarkx/transforms/liftings/graph2hypergraph/__init__.py
rename to topobenchmark/transforms/liftings/graph2hypergraph/__init__.py
diff --git a/topobenchmarkx/transforms/liftings/graph2hypergraph/base.py b/topobenchmark/transforms/liftings/graph2hypergraph/base.py
similarity index 86%
rename from topobenchmarkx/transforms/liftings/graph2hypergraph/base.py
rename to topobenchmark/transforms/liftings/graph2hypergraph/base.py
index a7a51520..e060e30e 100755
--- a/topobenchmarkx/transforms/liftings/graph2hypergraph/base.py
+++ b/topobenchmark/transforms/liftings/graph2hypergraph/base.py
@@ -1,6 +1,6 @@
"""Abstract class for lifting graphs to hypergraphs."""
-from topobenchmarkx.transforms.liftings import GraphLifting
+from topobenchmark.transforms.liftings import GraphLifting
class Graph2HypergraphLifting(GraphLifting):
diff --git a/topobenchmarkx/transforms/liftings/graph2hypergraph/khop.py b/topobenchmark/transforms/liftings/graph2hypergraph/khop.py
similarity index 97%
rename from topobenchmarkx/transforms/liftings/graph2hypergraph/khop.py
rename to topobenchmark/transforms/liftings/graph2hypergraph/khop.py
index b3d3552e..298fa135 100755
--- a/topobenchmarkx/transforms/liftings/graph2hypergraph/khop.py
+++ b/topobenchmark/transforms/liftings/graph2hypergraph/khop.py
@@ -3,7 +3,7 @@
import torch
import torch_geometric
-from topobenchmarkx.transforms.liftings.graph2hypergraph import (
+from topobenchmark.transforms.liftings.graph2hypergraph import (
Graph2HypergraphLifting,
)
diff --git a/topobenchmarkx/transforms/liftings/graph2hypergraph/knn.py b/topobenchmark/transforms/liftings/graph2hypergraph/knn.py
similarity index 98%
rename from topobenchmarkx/transforms/liftings/graph2hypergraph/knn.py
rename to topobenchmark/transforms/liftings/graph2hypergraph/knn.py
index 91114576..03d0a13a 100755
--- a/topobenchmarkx/transforms/liftings/graph2hypergraph/knn.py
+++ b/topobenchmark/transforms/liftings/graph2hypergraph/knn.py
@@ -3,7 +3,7 @@
import torch
import torch_geometric
-from topobenchmarkx.transforms.liftings.graph2hypergraph import (
+from topobenchmark.transforms.liftings.graph2hypergraph import (
Graph2HypergraphLifting,
)
diff --git a/topobenchmarkx/transforms/liftings/graph2simplicial/__init__.py b/topobenchmark/transforms/liftings/graph2simplicial/__init__.py
similarity index 100%
rename from topobenchmarkx/transforms/liftings/graph2simplicial/__init__.py
rename to topobenchmark/transforms/liftings/graph2simplicial/__init__.py
diff --git a/topobenchmarkx/transforms/liftings/graph2simplicial/base.py b/topobenchmark/transforms/liftings/graph2simplicial/base.py
similarity index 93%
rename from topobenchmarkx/transforms/liftings/graph2simplicial/base.py
rename to topobenchmark/transforms/liftings/graph2simplicial/base.py
index 7d5be886..e52449dc 100755
--- a/topobenchmarkx/transforms/liftings/graph2simplicial/base.py
+++ b/topobenchmark/transforms/liftings/graph2simplicial/base.py
@@ -4,8 +4,8 @@
import torch
from toponetx.classes import SimplicialComplex
-from topobenchmarkx.data.utils.utils import get_complex_connectivity
-from topobenchmarkx.transforms.liftings import GraphLifting
+from topobenchmark.data.utils.utils import get_complex_connectivity
+from topobenchmark.transforms.liftings import GraphLifting
class Graph2SimplicialLifting(GraphLifting):
diff --git a/topobenchmarkx/transforms/liftings/graph2simplicial/clique.py b/topobenchmark/transforms/liftings/graph2simplicial/clique.py
similarity index 96%
rename from topobenchmarkx/transforms/liftings/graph2simplicial/clique.py
rename to topobenchmark/transforms/liftings/graph2simplicial/clique.py
index 990d2e6e..2bb8c405 100755
--- a/topobenchmarkx/transforms/liftings/graph2simplicial/clique.py
+++ b/topobenchmark/transforms/liftings/graph2simplicial/clique.py
@@ -5,7 +5,7 @@
import networkx as nx
from toponetx.classes import SimplicialComplex
-from topobenchmarkx.transforms.liftings.base import LiftingMap
+from topobenchmark.transforms.liftings.base import LiftingMap
class SimplicialCliqueLifting(LiftingMap):
diff --git a/topobenchmarkx/transforms/liftings/graph2simplicial/khop.py b/topobenchmark/transforms/liftings/graph2simplicial/khop.py
similarity index 97%
rename from topobenchmarkx/transforms/liftings/graph2simplicial/khop.py
rename to topobenchmark/transforms/liftings/graph2simplicial/khop.py
index 80b09f95..50239f18 100755
--- a/topobenchmarkx/transforms/liftings/graph2simplicial/khop.py
+++ b/topobenchmark/transforms/liftings/graph2simplicial/khop.py
@@ -7,7 +7,7 @@
import torch_geometric
from toponetx.classes import SimplicialComplex
-from topobenchmarkx.transforms.liftings.graph2simplicial.base import (
+from topobenchmark.transforms.liftings.graph2simplicial.base import (
Graph2SimplicialLifting,
)
diff --git a/topobenchmarkx/transforms/liftings/liftings.py b/topobenchmark/transforms/liftings/liftings.py
similarity index 98%
rename from topobenchmarkx/transforms/liftings/liftings.py
rename to topobenchmark/transforms/liftings/liftings.py
index 807b5765..9453eaa3 100644
--- a/topobenchmarkx/transforms/liftings/liftings.py
+++ b/topobenchmark/transforms/liftings/liftings.py
@@ -4,7 +4,7 @@
import torch_geometric
from torch_geometric.utils.undirected import is_undirected, to_undirected
-from topobenchmarkx.transforms.liftings import AbstractLifting
+from topobenchmark.transforms.liftings import AbstractLifting
class GraphLifting(AbstractLifting):
diff --git a/topobenchmarkx/utils/__init__.py b/topobenchmark/utils/__init__.py
similarity index 63%
rename from topobenchmarkx/utils/__init__.py
rename to topobenchmark/utils/__init__.py
index 7cc2a457..406ae3be 100755
--- a/topobenchmarkx/utils/__init__.py
+++ b/topobenchmark/utils/__init__.py
@@ -1,30 +1,30 @@
# numpydoc ignore=GL08
-from topobenchmarkx.utils.instantiators import (
+from topobenchmark.utils.instantiators import (
instantiate_callbacks,
instantiate_loggers,
)
-from topobenchmarkx.utils.logging_utils import (
+from topobenchmark.utils.logging_utils import (
log_hyperparameters,
)
-from topobenchmarkx.utils.pylogger import RankedLogger
-from topobenchmarkx.utils.rich_utils import (
+from topobenchmark.utils.pylogger import RankedLogger
+from topobenchmark.utils.rich_utils import (
enforce_tags,
print_config_tree,
)
-from topobenchmarkx.utils.utils import (
+from topobenchmark.utils.utils import (
extras,
get_metric_value,
task_wrapper,
)
__all__ = [
- "instantiate_callbacks",
- "instantiate_loggers",
- "log_hyperparameters",
"RankedLogger",
"enforce_tags",
- "print_config_tree",
"extras",
"get_metric_value",
+ "instantiate_callbacks",
+ "instantiate_loggers",
+ "log_hyperparameters",
+ "print_config_tree",
"task_wrapper",
]
diff --git a/topobenchmarkx/utils/config_resolvers.py b/topobenchmark/utils/config_resolvers.py
similarity index 80%
rename from topobenchmarkx/utils/config_resolvers.py
rename to topobenchmark/utils/config_resolvers.py
index 9fc46d22..8d59a825 100644
--- a/topobenchmarkx/utils/config_resolvers.py
+++ b/topobenchmark/utils/config_resolvers.py
@@ -1,7 +1,9 @@
-"""Configuration resolvers for the topobenchmarkx package."""
+"""Configuration resolvers for the topobenchmark package."""
import os
+import omegaconf
+
def get_default_transform(dataset, model):
r"""Get default transform for a given data domain and model.
@@ -227,14 +229,43 @@ def check_for_type_feature_lifting(transforms, lifting):
transforms[lifting].complex_dim
- len(dataset.parameters.num_features)
)
- else:
- if isinstance(dataset.parameters.num_features, int):
+
+ # Case when there is no lifting
+ elif not there_is_complex_lifting:
+ # Check if dataset and model are from the same domain and data_domain is higher-order
+
+ # TODO: Does this if statement ever execute? model_domain == data_domain and data_domain in ["simplicial", "cell", "combinatorial", "hypergraph"]
+ # BUT get_default_transform() returns "no_transform" when model_domain == data_domain
+ if (
+ dataset.loader.parameters.get("model_domain", "graph")
+ == dataset.loader.parameters.data_domain
+ and dataset.loader.parameters.data_domain
+ in ["simplicial", "cell", "combinatorial", "hypergraph"]
+ ):
+ if isinstance(
+ dataset.parameters.num_features,
+ omegaconf.listconfig.ListConfig,
+ ):
+ return list(dataset.parameters.num_features)
+ else:
+ raise ValueError(
+ "The dataset and model are from the same domain but the data_domain is not higher-order."
+ )
+
+ elif isinstance(dataset.parameters.num_features, int):
return [dataset.parameters.num_features]
+
else:
return [dataset.parameters.num_features[0]]
+ # This else is never executed
+ else:
+ raise ValueError(
+ "There is a problem with the complex lifting. Please check the configuration file."
+ )
+
-def infere_num_cell_dimensions(selected_dimensions, in_channels):
+def infer_num_cell_dimensions(selected_dimensions, in_channels):
r"""Infer the length of a list.
Parameters
@@ -255,13 +286,15 @@ def infere_num_cell_dimensions(selected_dimensions, in_channels):
return len(in_channels)
-def get_default_metrics(task):
+def get_default_metrics(task, metrics=None):
r"""Get default metrics for a given task.
Parameters
----------
task : str
Task, either "classification" or "regression".
+ metrics : list, optional
+ List of metrics to be used. If None, the default metrics will be used.
Returns
-------
@@ -273,9 +306,12 @@ def get_default_metrics(task):
ValueError
If the task is invalid.
"""
- if "classification" in task:
- return ["accuracy", "precision", "recall", "auroc"]
- elif "regression" in task:
- return ["mse", "mae"]
+ if metrics is not None:
+ return metrics
else:
- raise ValueError(f"Invalid task {task}")
+ if "classification" in task:
+ return ["accuracy", "precision", "recall", "auroc"]
+ elif "regression" in task:
+ return ["mse", "mae"]
+ else:
+ raise ValueError(f"Invalid task {task}")
diff --git a/topobenchmarkx/utils/instantiators.py b/topobenchmark/utils/instantiators.py
similarity index 97%
rename from topobenchmarkx/utils/instantiators.py
rename to topobenchmark/utils/instantiators.py
index 83963183..35bbed24 100755
--- a/topobenchmarkx/utils/instantiators.py
+++ b/topobenchmark/utils/instantiators.py
@@ -5,7 +5,7 @@
from lightning.pytorch.loggers import Logger
from omegaconf import DictConfig
-from topobenchmarkx.utils import pylogger
+from topobenchmark.utils import pylogger
log = pylogger.RankedLogger(__name__, rank_zero_only=True)
diff --git a/topobenchmarkx/utils/logging_utils.py b/topobenchmark/utils/logging_utils.py
similarity index 97%
rename from topobenchmarkx/utils/logging_utils.py
rename to topobenchmark/utils/logging_utils.py
index aea6a78c..37735c06 100755
--- a/topobenchmarkx/utils/logging_utils.py
+++ b/topobenchmark/utils/logging_utils.py
@@ -5,7 +5,7 @@
from lightning_utilities.core.rank_zero import rank_zero_only
from omegaconf import OmegaConf
-from topobenchmarkx.utils import pylogger
+from topobenchmark.utils import pylogger
log = pylogger.RankedLogger(__name__, rank_zero_only=True)
diff --git a/topobenchmarkx/utils/pylogger.py b/topobenchmark/utils/pylogger.py
similarity index 100%
rename from topobenchmarkx/utils/pylogger.py
rename to topobenchmark/utils/pylogger.py
diff --git a/topobenchmarkx/utils/rich_utils.py b/topobenchmark/utils/rich_utils.py
similarity index 98%
rename from topobenchmarkx/utils/rich_utils.py
rename to topobenchmark/utils/rich_utils.py
index 5ef7afc4..8074ae03 100755
--- a/topobenchmarkx/utils/rich_utils.py
+++ b/topobenchmark/utils/rich_utils.py
@@ -11,7 +11,7 @@
from omegaconf import DictConfig, OmegaConf, open_dict
from rich.prompt import Prompt
-from topobenchmarkx.utils import pylogger
+from topobenchmark.utils import pylogger
log = pylogger.RankedLogger(__name__, rank_zero_only=True)
diff --git a/topobenchmarkx/utils/utils.py b/topobenchmark/utils/utils.py
similarity index 98%
rename from topobenchmarkx/utils/utils.py
rename to topobenchmark/utils/utils.py
index 1fa3e3ac..978df2c9 100755
--- a/topobenchmarkx/utils/utils.py
+++ b/topobenchmark/utils/utils.py
@@ -7,7 +7,7 @@
from omegaconf import DictConfig
-from topobenchmarkx.utils import pylogger, rich_utils
+from topobenchmark.utils import pylogger, rich_utils
log = pylogger.RankedLogger(__name__, rank_zero_only=True)
diff --git a/topobenchmarkx/dataloader/__init__.py b/topobenchmarkx/dataloader/__init__.py
deleted file mode 100644
index 2397cbb2..00000000
--- a/topobenchmarkx/dataloader/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-"""This module implements the dataloader for the topobenchmarkx package."""
-
-from .dataload_dataset import DataloadDataset
-from .dataloader import TBXDataloader
-
-__all__ = ["TBXDataloader", "DataloadDataset"]
diff --git a/topobenchmarkx/loss/__init__.py b/topobenchmarkx/loss/__init__.py
deleted file mode 100755
index 8549e855..00000000
--- a/topobenchmarkx/loss/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""This module implements the loss functions for the topobenchmarkx package."""
-
-from .base import AbstractLoss
-from .loss import TBXLoss
-
-# ... import other readout classes here
-# For example:
-# from topobenchmarkx.loss.other_loss_1 import OtherLoss1
-# from topobenchmarkx.loss.other_loss_2 import OtherLoss2
-
-__all__ = [
- "AbstractLoss",
- "TBXLoss",
- # "OtherLoss1",
- # "OtherLoss2",
- # ... add other loss classes here
-]
diff --git a/topobenchmarkx/loss/dataset/__init__.py b/topobenchmarkx/loss/dataset/__init__.py
deleted file mode 100644
index 3cfc3b83..00000000
--- a/topobenchmarkx/loss/dataset/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""Init file for custom loss module."""
-
-from .DatasetLoss import DatasetLoss
-
-__all__ = [
- "DatasetLoss",
-]
diff --git a/topobenchmarkx/loss/model/__init__.py b/topobenchmarkx/loss/model/__init__.py
deleted file mode 100644
index 4943a8d9..00000000
--- a/topobenchmarkx/loss/model/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""Init file for custom loss module."""
-
-from .GraphMLPLoss import GraphMLPLoss
-
-__all__ = [
- "GraphMLPLoss",
-]
diff --git a/topobenchmarkx/model/__init__.py b/topobenchmarkx/model/__init__.py
deleted file mode 100644
index 81ce9bd2..00000000
--- a/topobenchmarkx/model/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""TBX model module."""
-
-from .model import TBXModel
-
-__all__ = [
- "TBXModel",
-]
diff --git a/topobenchmarkx/nn/encoders/__init__.py b/topobenchmarkx/nn/encoders/__init__.py
deleted file mode 100644
index 912439f0..00000000
--- a/topobenchmarkx/nn/encoders/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""This module implements the encoders for the neural networks."""
-
-from .all_cell_encoder import AllCellFeatureEncoder
-from .base import AbstractFeatureEncoder
-
-# ... import other encoders classes here
-# For example:
-# from topobenchmarkx.nn.encoders.other_encoder_1 import OtherEncoder1
-# from topobenchmarkx.nn.encoders.other_encoder_2 import OtherEncoder2
-
-__all__ = [
- "AbstractFeatureEncoder",
- "AllCellFeatureEncoder",
- # "OtherEncoder1",
- # "OtherEncoder2",
- # ... add other readout classes here
-]
diff --git a/topobenchmarkx/optimizer/__init__.py b/topobenchmarkx/optimizer/__init__.py
deleted file mode 100644
index 31a82ca5..00000000
--- a/topobenchmarkx/optimizer/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""Init file for optimizer module."""
-
-from .optimizer import TBXOptimizer
-
-__all__ = [
- "TBXOptimizer",
-]
diff --git a/topobenchmarkx/transforms/__init__.py b/topobenchmarkx/transforms/__init__.py
deleted file mode 100755
index 46c9690f..00000000
--- a/topobenchmarkx/transforms/__init__.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""This module contains the transforms for the topobenchmarkx package."""
-
-from typing import Any
-
-from topobenchmarkx.transforms.data_manipulations import DATA_MANIPULATIONS
-from topobenchmarkx.transforms.feature_liftings import FEATURE_LIFTINGS
-from topobenchmarkx.transforms.liftings.graph2cell import GRAPH2CELL_LIFTINGS
-from topobenchmarkx.transforms.liftings.graph2hypergraph import (
- GRAPH2HYPERGRAPH_LIFTINGS,
-)
-from topobenchmarkx.transforms.liftings.graph2simplicial import (
- GRAPH2SIMPLICIAL_LIFTINGS,
-)
-
-LIFTINGS = {
- **GRAPH2CELL_LIFTINGS,
- **GRAPH2HYPERGRAPH_LIFTINGS,
- **GRAPH2SIMPLICIAL_LIFTINGS,
-}
-
-TRANSFORMS: dict[Any, Any] = {
- **LIFTINGS,
- **FEATURE_LIFTINGS,
- **DATA_MANIPULATIONS,
-}
-
-__all__ = [
- "DATA_MANIPULATIONS",
- "FEATURE_LIFTINGS",
- "LIFTINGS",
- "TRANSFORMS",
-]
diff --git a/tutorials/homophily_tutorial.ipynb b/tutorials/homophily_tutorial.ipynb
index cd0fd3af..44e737b7 100644
--- a/tutorials/homophily_tutorial.ipynb
+++ b/tutorials/homophily_tutorial.ipynb
@@ -1,32 +1,17 @@
{
"cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Homophily Tutorial"
+ ]
+ },
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/tmp/ipykernel_97245/3190777372.py:20: UserWarning: \n",
- "The version_base parameter is not specified.\n",
- "Please specify a compatability version level, or None.\n",
- "Will assume defaults for version 1.1\n",
- " hydra.initialize(config_path=\"../configs\", job_name=\"job\")\n"
- ]
- },
- {
- "data": {
- "text/plain": [
- "hydra.initialize()"
- ]
- },
- "execution_count": 1,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
+ "outputs": [],
"source": [
"import rootutils\n",
"\n",
@@ -34,10 +19,10 @@
"\n",
"import torch\n",
"import hydra\n",
- "from topobenchmarkx.data.loaders.graph import *\n",
- "from topobenchmarkx.data.loaders.hypergraph import *\n",
- "from topobenchmarkx.data.preprocessor import PreProcessor\n",
- "from topobenchmarkx.utils.config_resolvers import (\n",
+ "from topobenchmark.data.loaders.graph import *\n",
+ "from topobenchmark.data.loaders.hypergraph import *\n",
+ "from topobenchmark.data.preprocessor import PreProcessor\n",
+ "from topobenchmark.utils.config_resolvers import (\n",
" get_default_transform,\n",
" get_monitor_metric,\n",
" get_monitor_mode,\n",
@@ -48,8 +33,7 @@
"%load_ext autoreload\n",
"%autoreload 2\n",
"\n",
- "hydra.initialize(config_path=\"../configs\", job_name=\"job\")\n",
- "\n"
+ "hydra.initialize(config_path=\"../configs\", job_name=\"job\")\n"
]
},
{
@@ -90,7 +74,7 @@
}
],
"source": [
- "cfg = hydra.compose(config_name=\"run.yaml\", overrides=[\"model=hypergraph/unignn2\",\"dataset=hypergraph/coauthorship_cora\",\"transforms=data_manipulations/group_homophily\" ], return_hydra_config=True)\n",
+ "cfg = hydra.compose(config_name=\"run.yaml\", overrides=[\"model=hypergraph/unignn2\", \"dataset=hypergraph/coauthorship_cora\"], return_hydra_config=True)\n",
"loader = hydra.utils.instantiate(cfg.dataset.loader)\n",
"\n",
"dataset, dataset_dir = loader.load()\n",
@@ -99,7 +83,7 @@
"\n",
"transform_config = {\"group_homophily\" :\n",
" {\n",
- " '_target_': 'topobenchmarkx.transforms.data_transform.DataTransform',\n",
+ " '_target_': 'topobenchmark.transforms.data_transform.DataTransform',\n",
" 'transform_name': 'GroupCombinatorialHomophily',\n",
" 'transform_type': 'data manipulation',\n",
" 'top_k': 5,\n",
@@ -418,7 +402,7 @@
"# Create transform config\n",
"transform_config = {\"mp_homophily\" :\n",
" {\n",
- " '_target_': 'topobenchmarkx.transforms.data_transform.DataTransform',\n",
+ " '_target_': 'topobenchmark.transforms.data_transform.DataTransform',\n",
" 'transform_name': 'MessagePassingHomophily',\n",
" 'transform_type': 'data manipulation',\n",
" 'num_steps': 3,\n",
@@ -660,7 +644,7 @@
"# Add one more transform into Omegaconf dict\n",
"\n",
"new_transform = {\n",
- " '_target_': 'topobenchmarkx.transforms.data_transform.DataTransform',\n",
+ " '_target_': 'topobenchmark.transforms.data_transform.DataTransform',\n",
" 'transform_name': 'MessagePassingHomophily',\n",
" 'transform_type': 'data manipulation',\n",
" 'num_steps': 3,\n",
@@ -790,7 +774,7 @@
"# Add one more transform into Omegaconf dict\n",
"\n",
"new_transform = {\n",
- " '_target_': 'topobenchmarkx.transforms.data_transform.DataTransform',\n",
+ " '_target_': 'topobenchmark.transforms.data_transform.DataTransform',\n",
" 'transform_name': 'MessagePassingHomophily',\n",
" 'transform_type': 'data manipulation',\n",
" 'num_steps': 3,\n",
@@ -814,7 +798,7 @@
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAA/MAAAJjCAYAAABA7UFUAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd1xT5/cH8M/NYk9Zgjhw46ir2taqX7XDXevCUZx11r1a66+11qodal1t3aNoFVfdo62jtWodOHBPEARlCQTITu7vj0ggJECAJJck5/168fLemzsOqMDJ8zznMCzLsiCEEEIIIYQQQojN4HEdACGEEEIIIYQQQsqGknlCCCGEEEIIIcTGUDJPCCGEEEIIIYTYGErmCSGEEEIIIYQQG0PJPCGEEEIIIYQQYmMomSeEEEIIIYQQQmwMJfOEEEIIIYQQQoiNEXAdADEvjUaD5ORkeHh4gGEYrsMhhBBCCCGEEFIGLMsiJycHwcHB4PGKH3+nZN7OJCcnIzQ0lOswCCGEEEIIIYRUQGJiIqpVq1bs65TM2xkPDw8A2r94T09PjqMhhBBCCCGEEFIWYrEYoaGhutyuOJTM25n8qfWenp6UzBNCCCGEEEKIjSpt2TQVwCOEEEIIIYQQQmwMJfOEEEIIIYQQQoiNoWSeEEIIIYQQQgixMZTME0IIIYQQQgghNoaSeUIIIYQQQgghxMZQMk8IIYQQQgghhNgYSuYJIYQQQgghhBAbQ8k8IYQQQgghhBBiYyiZJ4QQQgghhBBCbAwl84QQQgghhBBCiI2hZJ4QQgghhBBCCLExlMwTQgghhBBCCCE2hpJ5QgghhBBCCCHExjhsMp+WloaIiAgwDAOGYXDmzBmLP1OlUmHHjh3o3r07atSoAWdnZ4SEhKBTp05Yt24dJBKJxWMghBBCCCGEEGL7HDKZ37FjB8LDw7Fr1y6rPTMuLg5vv/02Bg8ejKNHj8LX1xcffPABqlevjtOnT2Ps2LFo0aIFrl27ZrWYCCGEEEIIIYTYJgHXAVjT8+fPMW7cOBw8eBACgfU+9dTUVHTs2BFPnz6Fm5sbdu3ahW7duulev3TpEnr27In79++jc+fOuHDhAurXr2+1+AghhBBCCCGE2BaHGZnfsmULwsPDcfDgQbRo0QKXL1+22rMHDx6Mp0+fAgDWr1+vl8gDQOvWrbFv3z4wDIPMzEz07t0bKpXKavERQgghhBBCCLEtDpPMT506FVKpFIsWLcLFixfRrFkzqzz3yJEjOHnyJACgefPmGDRokNHz2rZti169egEA7t27h/Xr11slPkIIIYQQQgghtsdhkvm3334b169fx5w5c6w6xX7JkiW67cjIyBLPHTp0qNHrCCGEEEIIIYSQwhwmmT98+DAaNGhg1Wempqbin3/+0e137ty5xPM7duyo237y5AmuXr1qsdgIIYQQQgghhNguh0nmufDnn39Co9EAAJydndGoUaMSz/fx8UHt2rV1+8ePH7dofIQQQgghhBBCbBMl8xZ08+ZN3Xb16tXB5/NLvaZWrVpGryeEEEIIIYQQQvJRMm9Bd+7c0W2HhISYdE3h8wpfTwghhBBCCCGE5HOoPvPWlpaWptv29vY26ZrC56Wnp5s5ImKL7jzPRrcV/5rlXo2CPfHb6Dfg5SI0y/1szoubwJq39Y8FNQWGHQJcvDkJyZ6IFWKMPjEad16a/41IN6Eb/hv8H9747Q3kKfPKdQ8ew8OuHrtQ37e+maMjBMiNiUHikI+4DsNm8dzcUOvgAYhMHPywZ3KJEr8vvYqMpPJ9r7MlH85sjuA6PlyHYXXP7t1G9LxPrfrMiPnfoVqDkpf8ck2VKcOLpZeBCnbo9hvXBM41vc0SU2VHI/MWlJOTo9t2cnIy6RpnZ2ej1xdHLpdDLBbrfRD70nPVObPd63ayGGOjrpjtfjZnXQfDYy9igWj6Bdwcpp6eapFE3lw0rAYRhyO4DoPYKUrkK0aTl4e43h9yHUalcGzNTYdI5AHg9yXXuA6BE9ZO5Ll6ZlmlrLha4UQeANLXOM5SZUrmLUgqleq2RSKRSdcUPk8ikZR6/uLFi+Hl5aX7CA0NLXughBBiBiqNGX4CE0IIIYQQk1Ayb0EuLi66bYVCYdI1hc9zdXUt9fw5c+YgOztb95GYmFj2QEmllS1Vwrn0uokmaxTsibWRrcx3Q1sz5m/DY0FNgYht1o/FziTlJuF66nWuwygRj+Ehukc012EQO6QWiyGycvtbe8Nzc0Ot/b9zHUal0GlYQ/AcZDXchzObcx0CJyLmf+cQzyyrwCktzLII3G9ck4rfxEbQmnkL8vDw0G3L5XKTrpHJZEavL46Tk5PJU/iJ7Rm19TJkaoDPY+Aq4qNRsCd2jnmT67BsV1AT4KtsrqOwO2KFGN32dQMLVneMz/Bxfeh1sz/rv8H/mf2ehFRU4vjxUCUlgefhAad6dVFz+3auQyI27NTWu6hay1u333t6C+6CIRZRrUEjzIg+zHUYlY7AxxnVvmnHdRg2hZJ5C/L399dtZ2VlmXRNdnZBouHn52fukIgNScyU4Ep8pm5folA79qg6qbSmnp4KDavRO+YicCnmbELsj/zefWjy8nTbhBBCiDXQNHsLCg8P120nJSWZdE3h8wpfTxxLtlSJ/31/Wu+Yq5DnuFXoSaWVlJuEmJQYg+N7eu3hIBpCrE8tFusSeQDQFKqXQ0h5dBrWEGmJuUhLzEWnYQ25DocQUolRMm9BTZoUrNdISEiAWq0u9ZonT54YvZ44lsiNF6Fm9Y/VDyp92QUh1pKUm4TWUa3RZW8Xg1H5432PI8Sd2ksRx5AweozePmNCvRtCiiOXKBG94BKUcjXAsvhrU+XtEEII4R5Ns7egd999FzweDxqNBjKZDLdv30bTpk2LPT8zMxOPHz/W7Xfp0sUaYZJKJluqROwz/XXdfAbYOLw1RxHZiRc3gbXtAVYDBDcHIvdTb/kyuP/yPvod6lfqeR4iD0rkiUNQi8WIHzoMinv39I6HHdjPTUDELhxadQMKmXbwR6lQIyPZMVrUORJZXi4OLlkIAOg1cy6c3dw5jojYMhqZt6CAgAC0b99et3/y5MkSzz99umBadVhYGFq0oIInjshYH/gzszvSFPuKWtdBm8gDQPI16i1vovwReFMSeQDY3XO3hSMipHJIHD/eIJFn3N0hCqE3s0j5iDOkSIkT6/ZZDVAlmGZ62Jv8RL7oNiHlQSPzFjZz5kycOXMGABAVFYVp06YVe+6vv/6q254xY4alQyOVlEShvxzj6JS3EepDP8yJ5YkVYow8NhL3s8pewMtV4Ip9H+yjUXniENRiMaTXrhscp1F5Ul7pz3IQ/c1lg+PdP3mNg2iIJWk0aqQnPAUA+FWvwXE0xNbRyHw5ZGVloXv37nB3d0f79u1L7O3evXt3dOrUCQBw7do17Ny50+h5586dw8GDBwEADRo0wJgxY4yeR+xbYqZEb4o9jwHCq3pxGJEdGfM3wLz6lhfcnHrLGzH+r/FlTuRdBa443vc4Lg65SIk8cQhqsRiPOnYCNPq1IkK3b6NReVJuuxYZJvKBNT3g5Eqz8uyNSqGAXJIHuSQPKoWC63CIjaOR+XJYtmwZjh49CgA4e/Ys5s6dqzeqXtRvv/2GNm3a4OnTp/j444/h6emJbt266V6/dOkS+vTpA5Zl4e3tjf3790MgoL8aR5OYKUG77/Qr2DMMw1E0diioCTAvs/TzHJRYIUZsWmyp523tshUtAmkJEHEsuTExSBxS/NIcxt0d7i1bWjEiYm+K1BGF0JmPnpObcRILsZzstBSkPHkEAGB4PKTGPynlCseikaqQEaUt+lglMhw8F8qHSuMwX6F79+7h22+/Lfb1b7/9Flu2bNHt9+7dG7179zbp3qUlXIGBgTh16hQGDRqES5cuoXv37mjevDnq1q2LxMRE/Pfff2BZFvXq1cOOHTtQv359k55L7Eu3FWcNjh2a1JaDSIijMaW4HU2jJ45IkZSExz16AqW0m6Pp9aQixBn6/76ETnwMW/QWjcrboV9nT9JtsxoNRC60jDKf4nkuUldcAwAIq7kjI+oO/McUXzicaDlMMv/ixQts3bq12NdPnDiht1+zZs1ik/np06cjJiYGZ86cQcuWLfHNN9+U+vywsDCcO3cOu3btQlRUFG7duoXbt2+jSpUqaN++PQYNGoTIyEi4Uksbh1V0rXyzUE+aYk/MQqwQY/SJ0bjz0vQWR9Rejjg6tViMx+++ZzCdvqjaJ/+i6fWk3OQSJbZ9cUG3z/CAYYspkbdXSplMb3/oD6s4iqTySV11TbetfJYLpzD6HdgUDpPM/+9//wPLsqWfaAJvb28cOXKkzNcJBAIMHjwYgwcPNkscxH5kS5XQaAr+ffIZYOvINziMiNiTMX+MKVMizwOPEnnisIprOWeAx0PN3/dRIk/KTS5RYuvn5/Wm2LMaUCJvp57duw321ZuDDI+HoLC68PIP5DiqSoYB8OrX4SqR4ZyGYiscJpknpDIbvvlS/vcu8BkGzat7USs6YhZJuUm4nXG7TNfs6rnLQtEQUnlJ799H/Ae9SzwndPs2WhtPzObYmptQyvRn5QmcqDa1vYqe96lum9Vo0Ofz+RxGU7moMmXasuwq7b7fuCa0Xt5E9FUipBIoXMFezbLYOLw1h9EQe9LvoGn94QGgUZVGWPfeOniKPC0YESGVhykJfD6aTk/MTaM2nDE66Ms2HERCLC07LcXgmLObOweRVE4pK64CGuj6rOX8kQDnMd5chmQzKJknhGOJmRKoC02x5zGgUXlzk2YBvw0E0u4AAQ2BQdGAizfXUVmFVKVfWGlPzz2o70tFNgkxNZFnXF0RduggJfLEAlgInHhQyTVgeMBHC96EZxUXroMiFlC48B0ARMz/jqNIiL2huTyEcChbqsT/vj+jd6xpNRoVNbvt/YDEC4AsG0i5DUQX32LK3jjznXXbbgI3SuQJgbZKfamJPI+Hmgf2o8HVGErkiYUw4PF4ELkIEFTLixJ5OyXLy4VCItHtMzweqjVoxGFElU/glBaAUJuWiqp50Hr5MqBknhAORW68CHWRwoxU+M4Ckq8WbMtzuIvDzIYMGQKGYfQ+/ve//+leT8pN0o3MuwndUM+3nt71//77Lxo1amRwj7K4e/cu5s6diw4dOqBq1apwdXWFUCiEr68vmjVrhhEjRuDQoUNmK0BKSEUpkpLwuPM7xb7u1LAh6l26iIZ3bsOFWsUSC9KoWSjlaijlaqNT7ol92Ld4nt6+QOTEUSSVl8DHGdXmt0W1Re0QMKEZrZcvA/pKEcKhwmvlAW07OppibwECV0DxKoln+EDENm7jMYMzZ87gt99+M/qasVZ0UpUUqzuvBgC8fPkSs2fPxqZNm8qdZKvVakyfPh2rV6+G5lV1XoFAgF69esHb2xsnTpzAjRs3cOPGDWzZsgUtW7bE3r17UaNGjXI9jxBzKC6RZ1xdUffMafA9aWYUsazkR5n4fck1vWNCJz4yUyTFXEFsWerTODx/eF/vWECNWhxFQ+wRJfOEcORSfIbBMRqVN7PMp8DPbwHKXO0+wweCW9j8enmVSoWJEycW+/rEkxMNWtFpWA08RZ6IiorCjBkzkJaWhqCgILx48aJcMcyaNQsrV67UO7ZhwwYMGzYMAJCamorw8HBkZGj/ncfExKBTp06IjY2Fm5tbuZ5JSHkpkpLwuEdPQCo1eI3n5oY6p09RIk8sKv1ZDqK/uWz0NaVcjaq16d+fPdr22RS9fYbHQ+9Pv+QomspLlSnTFsGDdsq9wMe5lCtIPppmTwgHsqVKDFjzn96xo1PeplF5c1vTtiCRBwBWDQht/wfE8uXLcfv2bfB4xr+FP8x8aHDMTeCGI0eOYOjQocjJycFXX32FM2fOlOv5aWlpWLVqld4xPp+PiIgI3X5AQADee+89vXOePHmCTZs2leuZhJSXbjTeSCIPHo8SeWJxcomy2EQ+X/dPXrNSNMSa8vvK5xu1cj1VsTciP5Evuk1KR8k8IRwYG3XF4Fh4VS8OIrFjmU+Nr4+38Sn2ycnJmD9/PhiGwahRo4yeU3TqvKvAFXs/2AulUon3338ft27dwrx58+DkVL51excvXoRKpdI75u/vD2dn/TdKqlevbnDtP//8U65nElIeuTExJa6Pr/3nH5TIE4s7uOJ6ia9/OLM5nFzpzXx7I8vLhcCp4OdiYK068PIP5DCiykmVKQMrU4OVqQEqHVFmNM2eEI40rOqOu8+1o8a7xtH0erNb87bhsXH/2vwU++nTpyM3NxfDhg3DW2+9hfXr1+u9nqvIhVwlBwDwGB5cBa64MPgCAMD3fV/07t27wjHk5Bi+SSIUGv4iKhKJDI6JxeIKP5+Q4qjFYsQPHQbFvXslnkft5oi1yCVKpD41/J754czmCK7jw0FExFoOLlmIwJphSE98CgDo98U3HEdUOaWsuAqIGEDBgpWrEfTp61yHZFNoZJ4QDnzZMxwPUvLA5zE4OuVttK5ZheuQ7I8iT39/SiwQ1ISbWMzk1KlTiI6OhpeXF777zniP2rsv7+q2NawGdX3q6vZdXMzT9qi+kQrfxhJ8Y4l7nTp1zBIDIcYkjB5TciLP46HepYvUbo5YzbE1N/UPMMAnazpRIu8geHw+AmqGIaBmGE2vLwHD44Fx5oNx5tN6+TKiZJ4QK0vMlKDbin+h1rBwEfIxcO1/pV9EyubFTe36+HwiD8DHtquoK5VKXdG7+fPnIzDQtKl6+RXszalFixZ4+239mQ9ZWVlITEzUO3bzpv4vsQKBAKNHjzZ7PMSxKZKTAQD3W7aC7MaNYs9jXF1pWj2xOpVSf810xFwadXQUb0V8hMQ7N5F45ybeiviI63AqJY1UBX4VF+00e4Ua/mObch2SzaFknhAr677irG47V64q4UxSbus66O8HNuYmDjP68ccfcffuXTRp0gSffPKJSdds7bIVniLLJC67d+9GmzZt9I5NmDAB8fHxyM7Oxtq1a3H69Gnday4uLti0aROaNWtmkXiI41EkJeFu8xaI69mr5BN5PNQ8sJ9G4wknXj7PA8NjwPAYCJ358KvmwXVIxEp2zZ+jHXHm8bBr/hyuw6mUMqLuQJ0h1Y7Ki/jIPvSE65BsDq2ZJ8TKWAAMCmp8HJnSjsNo7FDmU0Cj1j82eCc3sZjJs2fPsGDBAgDA6tWrIRCU/q17T889qO9rOB3eXIKCgnD+/HkcOHAACxcuRExMDA4fPozDhw/rneft7Y2xY8diwoQJRgviEVJecR/01laoL6bVYe2Tf1HyTjjHMAyETnyuwyCE2CkamSfEiu48z0aOTKVL5BuHeCLUx5XTmOxO0cJ3dlD0btq0acjNzcXgwYPRvn37Us/f2mWrRRN5ANBoNFizZg1mzJiBmJgYAICXlxf69u2LUaNGoUGDBgC00+83btyIlStXIjMz06IxEcchvX8fmtzcYl8P3b6NEnnCufRnOVBIVVBIVVDKVPhwRnOuQyJWkPo0DksjeoDVaHSt6T76dgXHUVVOPv3rgWVZsAo1hAGuqBIZznVINodG5gmxop6rzuntJ2RIOIrETkmzAHmhomsM3+aL3v3555/Ys2cPPDw88MMPP5R6PgMGLQJbWDQmlmUxePBgREdH6475+Pjg0qVLugJ3SqUS/fr1w8GDB5Geno6lS5di3759OHPmDI3QkwpRJCUh/oPeBsdF9euj+ppfaE08qTR2LSpoQ8uywL+7HqL3dMt+fybc2/bZFINjATVqcRBJ5Ze5+wGcQgqWnvBcKDUtKxqZJ8SK1Br9Bpr1AqmyqVlFfwTtIoZXhLY960GhUGDSpEkAgC+//BLBwcF6r0tVUoNreDzLf1vfsmWLXiIPAP369dOrVC8UCjFr1iy9c+Li4vDxxx9bPD5i3+KMJPIAUOu37ZTIk0pDLlGC1VDTbEeTnZaiG40nJdNIVVAk5UL+JBvy+GywKvq6lQcl84RYSbZUCTdhQaLpJuJj4/DWHEZkp5w8AB5f+2Hjhe+WLl2K+/fvo2HDhpgyxfCd/l9v/2pwrFGVRhaPa8OGDYbPbWT43MaNDb/+f/75J+Li4iwSF7F/iqQkg+n1jKttv2lH7NOxNTfBFxX8zBc68dF1nG3PFCOli5o92eAYTbE3LiPqDljFqxpHGkCRklfyBcQomstAiJWMjbqChsFeeJCi/UX07Ked4OUi5DgqOyLN0p9mH9TU5gvf5SfNd+/ehUgkMuma//79DwxT8AtkXFwcatasada4YmNjDY55e3sbHPPy8jJ6/fXr11GrFk05JGUX1+sDvX2emxtqHzlczNmEcIvP54Pvot32D3WHkyv9zLc32Wkp2Dx9AtQKucFrDI+HCRt+o/7yxdAo1AXVoBno/e5CTEfJPCFWJODzEB6snQZKibyZbe8HpBTqa5562+YL340aNQovX740OC5Xy7Hj3g7Ik+TIvak/SlmtWjVERETo9otLqCtCpTJsqciypk8nVSgU5gyHOIjcmBho8vRHbuqcPkVT60ml1HVcExz56QYykiWoEuxGo/J26tfZk4wm8gAgEDlRIl8CZVKh319YIHAK1ZMoD0rmCbGSH/q/pusxT+3oLCD5KtcRmN3nn3+uty9WiDHy2Ejcz7qPqs2qIvNspkEyX7t2bSxZssSicdWoUQP379/XO2asUn1x1etr1KhhkbiI/VGLxYgfOgyKe/cMX3Rzo0SeVGo8Pg/+oe7oOq4JjcrbodSncVBIii9kbI0aNrZKI1UVjMq/+jIJfJw5i8eW0b8yQqwgMVOC/31/GmKZChoWmBZ9neuQ7Is0S1squLAxf3MSiiVNPDkR97Pul36ihfXr18/g2M2bNw2O3bp1y+BYtWrV0Lo11YogpkkYPcZ4Ig+g9sEDVo6GENMdW3PT6DaxD7K8XETNnlTs60JnZ0R+v9KKEdmWjKg7EIS8mrWgAQImUdvG8qJknhAr6L7iLNSvcs1cuQoPXuRwG5A9kWYBPzYC2FdVUBk+MCXW5lvSGfMo6xHXIQAAPvvsM9StW1fv2N69e3GvUNKlUCjw/fff653D5/Px888/02gFMYkiKQmyGzeMvkZ95AkhXNr//dcGx0Lqh2NG9GHMiD6MyVv3wMs/kIPIbAOrYaHOkIJx5kNU0xOiqrQcobxomj0hFnYpPgNimf4aY2pJZ0Y7IgBF4XVXasDHPqdxV/eojitXriDrYhYAQJVsuHb98ePHmDlzJgDtlPvx48frvb5o0SLdOnyxWGz0OfnXA4Cvr6/BdH93d3ecPXsWw4cPx/Hjx3X3atOmDd577z14eXnh3Llzesl9cHAw1qxZg549e5bxsyaOymgLOh4PNX/fB5f69a0eDyFl0XVcE92IPK2Xtz/pCU8hcHKGSi4DAASG1UXvT7/kOCobwhazTcqMknlCLChbqsSANf/pHeMzoJZ05pRyW3+fsc9R36TcJNzOuA3ZMxkyjmcUe96zZ8+wdOlSAECHDh0Mkvl169bh6dOnJT4r/3pAu769aDIPAIGBgTh27Bj+++8/7N69GxcvXsSjR49w+PBhKJVKuLu7Izw8HE2bNkW3bt3Qt29fuFILMWKi3JgYgxZ0zk2botauaI4iIsR0colSr/gdsT9+oTWQnvgUfFc3+IXWwMCvvy/9IlKAKWablBkl84RY0Kitlw2OuTkJqJK9uWQ+BeRFliyM/YebWCys30HtOnWfdj7waecDD5EHzg86X+b7xMfHmzWuN954A2+88YZZ70lI4pCP9PZ5bm6ovmE9R9EQUjaHVt1ASpx25lN6Ui6OrbmJ3tOpUrc96f3plzi4ZCEAoNfMuRxHY1s0UhUUz3MBpQaMiE8j8xVEyTwhFnTvuX6i6SriUyV7c1rTVn/fydMu18oDgFQlBQMG7Kufert77uY4IkIsQ21k+Qe1oCO2JPVpwc9+pUzNYSTEUpzd3DFg3mKuw7BJGVF3tD3lRXwAAMOnofmKsM/5qIRUEhJFwZpmHgPc+boLQn1oqrFZSLMMR+XH/ctJKNZQ36e+LpEP9w1HiDsV/yL2KWHkKL390O3bKJEnNkUg4ummDjM8WjNPSFHCwILlJ1UiwzmMxPY5ZDJ/+PBh9O/fH2FhYXBxcUFQUBDeeust/Pjjj7rCUJZw/fp1TJ06Fc2bN4efnx+cnJwQEhKC9u3bW/zZxPoSMyXQvJo6xGcYuL16B5KYyY4I/X2Rh8UK36VJ0vDz9Z+RJkmzyP1Lk5SbhHsvtcXk3IRucBI4cRKHLeP675CYRi0WQ1akpaF7y5YcRUNI+Qz8ojVEzgKIXAT4aMGb1GPeDmWnpWD1iAisHhGB7LQUrsOxKT7960GZkgcACJzSAjwXmiheEQ6VzKenp6N79+7o2bMn9uzZA5FIhB49eqBBgwa4dOkSpk+fjsaNG+PkyZNmfW5OTg4GDx6M5s2bY8WKFXj48CFatWqFvn37ombNmjh37hymT5+O8PBwHDx40KzPJtzptuKsrqaHmmVxdGp7TuOxK9IsILFIPYLx5yz2uDRpGn658QvSpNZPBJNyk9BlbxdooG29J1PJIODRD76y4vLvkJRMev8+7jZoiLsNGuJB6zb6L1IbQ2JD5BIldi64iKi5F6CQquDp5wwnSlTsiiwvF7/OmoQNE0dBLsmDRqNG1OzJXIdlMzRSFVKWXwUrU4NVqPFyx73SLyIlcpifkhKJBF26dMHRo0fB5/OxceNG3Lt3D7t378aZM2dw9+5d1KtXD8+fP0e3bt1w9uxZszw3JycH7dq1w44dOwAAI0aMwPPnz3H8+HH89ttvOHfuHO7evYvXXnsNKSkp6NOnDyX0diBbqkSOTAUWAJ/HgM9jaHq9OUV/BL2KKU6edtuOLr/wXT41q8byjsu5CYYQM8pP4uONtaB7pebv+6wXECEVdGzNTWQk5en20xNzde3piH04uGQh0hLidPtKmYzDaGxPRtQdsPJXdSQ0gOJZTskXkFI5TDI/efJkxMTEAAAWLFiAkSNH6r1et25dHDt2DM7OzlAoFOjTpw+ysrIq/Nxx48bhxo0bAICuXbti48aN8PDw0DunXr16+OOPP+Dr6wu1Wo0hQ4bg+fPnFX424c7YqCtwF2n/e6k1LJqEeJRyBSkzkTvA42s/AhpxHY3FSFVSvf0mfk3gKaL1w8S2KZKSSkziAYBxd6d+8oSQSi/y+5Vch2BbCte7EzpMKmoxDMuydt8Q4ObNm2jWrBk0Gg0CAwORkJAAkUhk9NypU6dixYoVAIDZs2fju+++K/dzb9y4gebNmyP/SxwTE4MWLYpvTfLNN9/giy++AKAdwd+0aVOZnykWi+Hl5YXs7Gx4UsEgzgxcdwEqtQYPUrR9ks9+2ona0ZmLNAvY0gNIeTXaEdQUGHYIcPG22CN77e+FuOw4+Dj7QMiz3t8jy7IG08L9Xfy1VWBJmSg1SmTKMlHLqxYO9qbZT1y73+p1gz7yhTGurgg7dBCiECr0SGyHXKLE70uv6kbn/ULd0Xtac1ozb0ey01KwefoEqBVyAEDE/O9QrYH9DiiYm0aqQtqmW1Am5YAR8RE4pQUEPs5ch1UpmZrTOUQyP2LECGzZsgWAdoQ+P1k35urVq2j5qtiOm5sb0tLS4OLiUq7nzp07F4sWLQIAhISE4NmzZyWef/fuXYSHays6Ojs7IyUlpcwJOSXzlUO2VImxUVcAAGsjW1Eib06b3gcS/ivY5/GBLy1bPLJ9dHtkyjIt+gxiHT7OPvgn4h+uw3BoiqQkPO78jsFx58aNUX3TRqpcT2zSkCFD8Ntvv+kd69ChA86cOWNwblZWFi5cuIALFy7g4sWLePLkCVJSUiCRSCASieDj44P69eujbdu2GDJkCBo0aFCmWG7evIldu3bh5MmTSEhIQHp6OkQiEfz8/FC1alU0a9YMLVq0QJcuXRBCb5iVya75c/T2qT0dsRRTczq7r8qhVCpx4MAB3X7nzp1LPL958+bw9vZGVlYW8vLycOzYMfTp06dczz5//rxuu3HjxqWeX79+fTg7O0Mmk0Emk+HgwYP46KOPyvVswi0vFyF2jnmT6zDsU8odqz/S28kbmbJMq4/Mv5S9hEqjbW/IgIGfix+NypdT/si8t5M316E4vLii0+t5PNT77wIl8cRmnTlzxiCRL8lHH32EI0eO6Pbbtm2Ld955B7m5uThx4gSSk5ORnJyM06dPY+HChRgxYgRWrVoFV9eSa+9kZ2dj4sSJ2L59O1iWBcMwaNOmDbp37w6WZXH9+nWcP39e9/vpjz/+iKlTp5brcyaEVA52n8xfunQJmZkFI2otS2lxwzAMWrZsqatof/z48XIn8ykpBa0qfH19Sz2fx+PB19cXycnJAID//vuPknkbded5Nnqu0lZXPzSpLcKrenEckZ14cROQi/WPjfnb4o/9tt23iDgcgTXvrEF4Fev0Q82vYp+PAYNTA05Z5dn26E7GHUQcjsC37b7lOhSHlhsTYzC93qVZM0rkic1SqVSYOHFiua4ViUTYt28funfvrjuWk5ODvn374s8//wSgXW61adMmPHz4EKdOnYJAYPxX99TUVLz77ruIjY0FAPj4+ODAgQNo166d3nkHDhzAgAEDoFAoyhWzo3t/wlRd9XpaK192qkwZUlZcBQCaYm8mdl914ObNgiqi+X3dS1OrVi2j15dVeVYwFP4mXZFnE27lJ/JFt0kFreugv+/kCQQ14SYWCytaxZ5G5Ik9uDdoMHZnZWFS0jN0efIYrz98gDq7ouHh4YFatWqhe/fuWLFiBTIyMky+57///otGjRqBYRi9j7K4e/cu5s6diw4dOqBq1apwdXWFUCiEr68vmjVrhhEjRuDQoUPl+rlO7Nvy5ctx+/ZtMEzZf6X+9NNP9RJ5APDw8EBUVBScnJz0jp89exY//fRTsfcaOXKkLpEHgJ9//tkgkQeADz74ANOnTy9zrETrxM/LEVAzDAE1w3Di5+Vch2Nz8hP5otuk/Ow+mb9zp2BKbnBwsEnXFE74C19fVv7+/rrtly9NW9NbuIJ+fHx8uZ9NuJOYKYFaw0KtYfW6p5EKenET0Kj1j9lpFXuxQoxcpf7oZXSPaI6iIcQ8jh07hi5PHmNeyguczM1FglKJeo0b4+OPP8Zrr72G+Ph4HD16FFOnTkXt2rWxffv2Eu/38uVLfPzxx2jfvn25f1ar1WpMmTIFjRs3xqJFi/DPP/8gPT0dXbt2xdChQ+Hq6oobN25gy5Yt6NWrF15//XU8ffq0XM8i9ic5ORnz588HwzB4s37XMl8/dOhQo8cDAwONLguNiooyev6BAwf0pu37+/tjwIABxT539uzZ+PPPP9G3b98yRkwIqWzsPplPSyuoBO3t7W3SNYXPE4vFUCqV5Xp24Sn9t27dKvX8Z8+eQSwumEJceJvYhsRMCdp9d1q3r2ZZHJrUlsOI7IQ0C1jztv6x4ObA4J2chGNp4/8ar7ffxK8J6vtSiy5iu87t3Yte3bohQ13whlzH1q1x+epVrFmzBv/++y8iIyN1r2VnZyMyMhJHjx41er+oqCg0aNAAGzduRGBgYLnjmjVrFlauXAmNRqM7tmHDBuzduxcbN27E1atXUaVKFd1rMTEx6NSpE/Ly8ozdjjiY6dOnIzc3F+2bd0NYkOlLsObMmYN9+/ahTp06xZ5jrOjd/fv3jZ77yy+/6O23a9cOPF7xv+L7+PjgnXfeQWhoqIkRk3y9Zs41uk1MEzilhdFtUn52n8zn5OTototOWSqOs7P++o3C9yiLXr166baTkpJ0fe6Lc+jQIb393BLa9uSTy+UQi8V6H4Q73VecNThG6+XNYEeE4bExZyzajq4wfxd/jH9tPPxd/Es/2Qxup9/W21/z7hqrPNeeWfvvkOj7YvgIqIocGzx6NPh8vm6/aI0YlmV17VoLO3LkCIYOHYqcnBx89dVXRquFmyItLQ2rVq3SO8bn8xERUfD9JiAgAO+9957eOU+ePClX61hiX06dOoXo6Gh4eXmhS5MRZbq2bdu2+PDDD0s8x1gnpcJvOuV7+fKlbn19vrp165YpHmI6Zzd3DJi3GAPmLYazmzvX4dgcgY8zQr56CyFfvUXr5c3E7pN5qVSq2y6ut3xRRc+TSCTlenbHjh3x5psFFc2/+OKLYtfb5eTk4Ntv9QszmRLv4sWL4eXlpfugd1m5VfRv111k9//FrKNoBfuqza36eH9Xf0xoNgH+rtZJBAt/n+CBB08RFQerKGv/HZICiqQkXJMa/hwt+vOqevXqBudcvXrVYBRcqVTi/fffx61btzBv3jyT36gv6uLFi1Cp9N9i8Pf3N3hD31hc//xD7Q0dmVKp1BW9mz9/Pny9qpRyRdmlp6cbHDPWGSkmJsYgyff29sZff/2FiIgIVK9eHU5OTvDy8sJrr72GWbNmISkpyezxEkK4YfeZRuF3Nk2t3Fn0vNJagZRk27Ztukr2x44dw6hRowxG+hMSEtCtWzckJCTores3pU/8nDlzkJ2drftITEwsd6yk4mr7uyK/7BKfAY5N61Di+cREhd8EY/jA0P2chWINjfwagc/wwWf4aORnn3UBiOOI6/UB8oyMKAqF+m0ei3sDu+iMs/fffx/Hjx9H7dq1KxSXsVl3RWMqLi6aBefYfvzxR9y9exdNmjTBJ598At+qboCZi5Qam81ZeClKvsJF7/KtXLkS7777LhISEtCrVy80b94cYrEYsbGxWLJkCerVq4d9+/aZNV5CTKGRqpC2LhZp62KhkRadr0XKw+6TeQ8PD922XC436RqZTFbsPcoqLCwM58+fR3i4di3V5s2bERwcjK5du2Lw4MHo0KEDateujQsXLmDGjBmYM2eO7lpT1vg7OTnB09NT74Nw41J8Bq4nisECaBTsiddr+SLUp/xvBJFCGAbg8bUfIjerTa/nQlJuEuKy4wBok3qaYk9smVoshiYvD7WEhglx0WTaWILs4eGBgIAAvWPGph+XR/36hnUojCX4xuIqaa0zsW/Pnj3DggULAACrV6+GQCCAQMSHT4B5/l0CwMOHDw2S+VatWmHs2LEG5xauDZXv+fPn6N+/P86fP4/Vq1fjwoULem2WJRIJBg4ciAsXLpgtZkJMkRF1x+g2KT+7T+YLV5QvXCm+JNnZ2bptT09Po+/Ul0X9+vURGxuLbdu24cMPP4SPjw/OnDmDQ4cOITU1FWPHjsW1a9ewZMkSvWUBxn7RIJVTtlSJAWv+0+3fThZjbWQrDiOyMwGNAJG79sNOK9jn63+oPxiGgavQFXHZcTTFnti0hJGjAACRPj4GrxVtv2qsHevYsWP11tWbU4sWLfD22/qFNbOysgxmuBWNSyAQYPTo0RaJiVR+06ZNQ25uLgYPHoz27dsDALqOM2+L1Dlz5ugtt2rUqBEOHTpk9PfRwr+zFo0zv0UjwzCYNm2a3utKpdLgGCmZLC8Xu+bPwa75cyDLK72uFdGnkaqgSM6FIjkXrIbaPZmL3Sfz+SPigLaFiCkKryUqfH1F8Pl8DBkyBPv27UNCQgKkUilycnJw9+5drF69Gk2aNDGIsVmzZmZ5NrG8sVFXDI55uVTsTSDyijSrYDugod1WsAe0LenylHnIUeQgR5EDDWs4NZkQW6AWi/G494eQverk0tfbG+OqVNFLzFeuXImTJ08iLy8PV65cwddff613j759++Kbb76xaJy7d+9GmzZt9I5NmDAB8fHxyM7Oxtq1a3H6dEGHEhcXF2zatIl+PjuoP//8E3v27IGHhwd++OEHAIBcosSxNYZvRJXX8uXLsXfvXt1+ly5dcPbsWQQFBRk9X61WGxxjGAYtWuhXCm/VqpUuuc938eLFYivkE0P7v/8aqfFPkBr/BPu//7r0C4iejKg7EAa6AQCUL/JQJdI8OZajs/tkPj9JBrTT7E0p+vHkyROj11vDo0ePdNtvvfWWVZ9NKqZh1YKqprvGvcFhJHYm+iOALwCCmgI8oV1PsZ96eqp+kUx645rYqMTx46G4d0/v2OSAQNy6dQujRo2CQCBAWloa3nnnHbi7u+P111/Ho0ePwDAMevXqhT/++AN79uwpd3E7UwUFBeH8+fPYt2+frp3s4cOHUatWLXh7e2PcuHFgWRbe3t749NNPce/ePaPrlon9UygUmDRpEgDgyy+/1NU4Mmciv2rVKkyfPh2AtlbDt99+i6NHj8LHyMyWfMaWV/r6+hr833F2dja6fPPSpUsVC9qBpCc8NbpNTMfwGYiC3SEKdgfPRcB1OHbB7r+KrVu3ho+PDzIzMwFoC4qEhIQUez7LsnrrlLp06WLxGPNpNBr89592qnZAQAA6depktWeTilkb2Qpjo67gjTBfrI1sRaPy5qRRAql3tdsBDbmNxcJUGhXYVxk8n+GX2CeYkMpKev8+pDFXDY5nfD0fX0+ejL/++kv3plX79u0RHh6OuLg4/Pnnn9BoNDh69CiUSiU8PDzwxhuWfWNUo9FgzZo1WLJkCeLitLUqvLy88M4778Db2xvnzp3DvXv3kJWVhY0bN0KlUmHu3LklJlfEPi1duhT3799Hw4YNMWXKFLPeW6VSYdq0aVi9ejUAoGXLlti8ebNJA0qFl5PmK65ws5ubm+734XypqanliNgx+YZUw4vHDwEAQbWp/V9ZVYkM162Tp1F587H7ZF4oFOKDDz7Ali1bAAAnT57U6/9e1LVr13Rr693c3NC1a9cKPV+hUEChUMDFxaXUdX///fefrhVJZGQkBAK7/+uxG14uQuwc82bpJ5KyYxnj23bKTeCGPJW2Fdfunrs5joaQssmNiUHikI8Mjt8Y/TGGDh4MpVKpO7ZkyRLMmDFDtx8dHY2BAwdCpVLh2LFjOHHiBDZu3Ijhw4dbJFaWZTF48GBER0frjvn4+ODSpUu6AndKpRL9+vXDwYMHkZ6ejqVLl2Lfvn04c+aM0ZZ1xH5t2LABAHD37l2TWx3//fffelPb4+LiULNmTb1z4uPjMXjwYFy4cAFOTk6YN28eZs2aZfA74JEjR/D8+XMMHDgQ7u4FMwGNLfkorg2yseOWqklBCLEOh8gWp02bhl9//RUajQbR0dH44Ycfiv1G/Ouvv+q2J0yYUOGqudOnT8dPP/2E5cuXl/pObv76Kz8/P3z++ecVei6xrmypUrdunkbmzSx/ir2D4PF48BB5oI53HYS4Fz+LiJDKRpGUZDSRVzZujHELFugl8m5ubpg8ebLeeREREZg7dy4eP34MQDtqPm7cOHTo0AG1atUye7xbtmzRS+QBoF+/fnqV6oVCIWbNmoWDBw/qjsXFxeHjjz/GH3/8YfaYSOU1atQovHz50uC4WqlB3I00JKQ8wbV75/Veq1atGiIiInT7Xl5eeq9v27YNEydORHZ2Nt544w1s2rQJDRsan4H2ww8/4O+//9YtTcnXpk0biEQivbbKEonE6D2MHS/cEpkUT5aXqxuVFzo742XSM44jsj1FK9n7j3Gc3+0sySHmcDZt2hQjRowAAKSkpGDZsmVGz3vy5AnWrl0LQJtQF24TV5hSqURkZCQ8PDzQvHlzoz0+i9q+fbvRIiWFX9+/fz8AbfGT/N70xDYULoBnrBgeKSdpFqCUAQnngec3gA9+4joiQkgx4np/aHCMcXXFhfbtDCpu161b12hl7saNG+vty+Vy3cw6c8sfaS2sUSPDbhlFYwK0hdDyp+UTx/D5559jyZIlBh8danyE4b2m4e3X3jW4pnbt2nrn5i/PyMjIQP/+/REZGQmFQoGlS5fi3LlzxSbyJfH29kbv3r31jmVmZhq0Y5ZKpQZT7AHoKvKTkh1cshBCJ2cAgFImg19oDY4jIkTLIZJ5QFs1N7+y5xdffIHNmzfrvf7w4UN07doVMpkMIpEI+/btK3ZNXFRUFLZt24bc3Fxcv34dEydOLPX5ly9fxujRow3a4+Xl5WHhwoW6Nxt++OEHDBkypByfISF2aEcE8LzQ2tsDn3AXi4WJFWI8ytIWwKzjXQcCnkNMnCJ2QpGUBE2RHu1ODRui7pnTuPXwocH5xgpxFXf8+vXrZojQkLE34o09v+hoaj5LxUVsi0bNIi0xFzkv5aWf/EqnTp2wZ88eANoke8aMGeDz+WAYptiPv//+u9j7zZ07V+/NMZZlcfWqft2Kon3rAeDDDz8stko+MeRfvSZEzi4QObug96dfch2OzSm8Tp7WzJuPwyTzrq6uOH78OLp27QqVSoWRI0eiYcOGGDBgADp27Ijw8HA8ePAAVatWxZEjR9CuXTuT71201UdxNm/ejJo1a6JTp04YNGgQOnfujMDAQPzf//0f/Pz8sGfPHsycObO8nyLhUOGe8tRf3oxS7gAatfZDkcd1NBY19fRU1PHWTu99lPUIyzsu5zYgQkykFovx+N339I45N22KsN/3ge/pCZVKZXBNWdb0Fp4+bE5licsYS8VFbE3Z244U1xu+vJo2bapbqplv+fLlun/PLMvixx9/1Hvd39+/2JmqxFCvmXPB4/MRUDMMH6/eCGc399IvIsQKHGrox9/fH0ePHsWhQ4ewZcsWXL16FQcPHoSnpydatWqFfv36YcSIEaVOcY+MjMSpU6ewf/9+1K1bF6tWrSr23MWLF6Nbt244ffo0zp8/j8ePH+Pff/+FSCRCUFAQOnXqhD59+qBv377w8PAw96dMrEQsU+JOsli3TWvmzeDpeUAu1j8WsY2bWKxApVHpjcx7igzbDRFSmajFYjwdMRLy27cNXuM5O+u2a9QwnI5qbLpvcceNXW8ONWrUMOixbez5xcVqqbiIbZBLlFgyfTtOXTgKAMiQGq6hfvz4sW6Qpnbt2hg/frzF4pkyZQrc3d0xffp0iMVi7Nq1C4mJiWjZsiUuX76Mixcv6s5t2LAhdu3aZVCMjxBLojXzlsGwZXkbmlR6YrEYXl5eyM7ONtp7lFhG069O6O3HfvU+R5HYCWkW8F2RX5SdPIE5iZyEYw0DDw/E7QxtUtSoSiPs7LGT44gIKVn8kCFGW9CBx0O9/y6A/+pn0IMHD9CwYUNoNBrdKS4uLsjMzDTohR0WFmawFv3EiRN47z39kX+9OOLjjRbIK+3Xm//7v//DwoUL9Y6NGDECmzZt0jv2zz//oEOHDnrHqlWrhqdPn1L7SAe2f9lV7D20E9vO/FD6yQA6dOiAM2fOAABq1qyJp0/L16fcWEX8wtLT07Fx40acOHEC9+/fR3p6OgQCAQICAtCyZUv06dMH/fv3N1qzghRv57zZut7yftVrYOD87zmOyPakrrkB5QvtLEthkBsCxr3GcUSVm6k5nUONzBNCbMT2fobHAgwLU9kLsUKsS+T5DB/3Xt7jOCJCSqYWiyG9dt3oa7X//EOXyANAvXr1MHv2bHz77be6Y1KpFMuWLdMrNPvbb78ZJPL9+/cvMZGviM8++wy7du3Cw0Jr+vfu3YvZs2ejQYMGALRT6b//Xv+Xdj6fj59//pkSeYI36nfBG/W76PY/WdPJpOvi4+MtFJG2gPOnn36KTz/91GLPcET5iXzRbVIGbDHbpEIomSfEDI5MaYfuK87qtkkFJRcZ7QtqCgy235HqiScLimiqWTXchbQWj1RuiePHA4VG2gEAPB5q/r4PohDDloqLFy+Gl5cXFixYoGuP9fnnn+PYsWNo1KgR4uLi9Fq98fl8TJgwAUuXLjX6/EWLFunahInFYqPnFK5B4+vra9Dy1d3dHWfPnsXw4cNx/Phx3b3atGmD9957D15eXjh37hzu3St4cy04OBhr1qxBz549i/vSEAfRaVhDbP/qAjSvOi5+OLM5twERi/ILrYH0xKe6bVJ2DJ+BKJh+vzE3mmZvZ2iaPbF50izg+1oAqwEYPiBys+vp9QDw5m9vIk+ZB/bVW9XH+x6nHvOk0lKLxXjwxpt6yTzj7o4GVy6Xeu2LFy+wc+dOnDp1Crdv30ZaWhokEgmcnJzg6+uLBg0aoEOHDhg0aBBq165d7H3KOk25Ro0aJY6G/vfff9i9ezcuXryIR48eITs7G0qlEu7u7ggJCUHTpk3RrVs39O3bF66uriY/l5hXXrYct/9JQqP2IXDzcir9Agva90MMMpK1U4arBLuhz6yWnMZjayrT36UpZHm5OLhEuyyn18y5VACvHDRSlW7dfJXIcPBcaEy5JDTNnhBim/aMAIRugOJVm6tx/3IbjxWwLKtL5N0EbpTIk0rt2cRJBqPyYQf2m3RtUFAQpk6diqlTp1YoBnNPU37jjTfwxhtvmPWexPwk2QpcPhKPWq/5c54AZiTlQqnQ6LZJ2VSmv0tTOLu5Y8C8xVyHYdN4LgIqemcBlMwTQiqX1HuASgLw+Nqk3se+p7OJFWJIVVIA2vXypra6JIQLarEYsiLV6xl3d6NT6wmxV+IMKRQyNQCA4dHyX0dAI/OksqLqLYSQykWRU9Bb3gFWAU09PRWuQlfwGT4AoK5PXY4jIsrUVKStWg1lairXoVQ6CSNHQZOXp3fM1FF5Qopja//nohdc0m2zGsAvxI3DaIg15CfyRbeJaTRSFdLWxSJtXSw0UhXX4dgVSuYJIZWXg4xS1/WpC1ehK1yFrljdeTXX4Tg8VVoa0n/6Caq0NK5DqVQUSUmQ3bqld4zn4UGj8qTCbOn/nFyi1I3K5+v+CbXYIqQkRXvME/OhafaEVNCd59noueocAODQpLYIr+rFcUQ2SpoFuHjrH7PjdnT5FrRdgP6H+gMAdvfcDU8RFa4klVPcB70NjtXa/7v1AyEO649N2iUeB1deB1/AzXiUNFdhcGzH15eMnElKolZp6w38sek2hnxV+etV9Jo5V2+aPSGVBSXzhFRAtlSJbiu0Bdr4PAY9V53D40XdOI7KRu0cDIw4WrDP8O26HV2+2f/MhkQp0W1v77ad44gIMaQWi6HJ1S/yVfPAfhqVJ1Yly1Xq/VlZ5GXJuQ7BZlW2v8viUAG8ivHqGYbUVdcAAAGTqI2jOVEyT0gFjI26ottWa1jweY4xLdzspFnAi1j9Y9VaG47U25mk3CTEpmk/bx7Dw+3026VcQQg3EseP1z/g5gaX+vW5CYY4LGd3IWS5Sji7CzkbmWdZFrJcJTRqFjw+A2d3IRUuLQe1SqP7u6zsqPhdxWikKl0iz4j4SFsbi5Cv3uI4KvtByTwhFdQ4xAO3krRt1A5NastxNDZqR4ThMQcYlc+fXg8AGlYDdyH9glAZJM+aDQBIHD0GjLDy/6JpDaqihckUCjzs8D9OYiH2h1VqR2eTZ81G7aNHij3vvZGNsGvRZfSa3Az+1T2sFZ4euUSJY2tuAgC6jmsCJ1f6HlEeaQk52LXoMt4badnldImJiTh//jzOnz+PmJgYJCQkICMjA3K5HK6urvDz80Pjxo3xzjvvYNCgQfD39ze4R9HidyHv9MDWrVtx4cIFPHv2DHl5efD29kb16tXRuXNnDBs2DOHh4SbFd/fuXWzbtg3//vsvHjx4gOzsbCiVSnh4eKB69epo3rw5+vTpgx49etjsm0aF18izCjUYEZ/DaOwPJfOEVMAP/V9D9xVn4ekswJEp7RDq48p1SLYp9S4gKFQNeNy/dj8qn89N4IY8lbY6+J5eeziOhgCAOitL++fLl9wGUpkplVClpHAdBbEz+f/3KrMjP8UiIzlPt91nVkuOIyIladiwIfJedeBwcnJC586dUatWLTx79gwnTpxAXFwc4uLicOjQIcydOxfz58/H9OnT9e6hUirw4vFDyJRK7L/9GJe/+lb3WkhICAYNGoTU1FTs378fMTExWLp0KT755BMsW7YMfL7xxFWtVmP69OlYvXo1NBpt/QCBQIBevXrB29sbJ06cwI0bN3Djxg1s2bIFLVu2xN69e1Gjhm226xWGeED5auArcEoLjqOxL5TME1IB06Kv623vGUfThsrFPxzITtBuh7QGgppwG4+V7O65G/0P9YeHyAO7e+5GiDutP64M+N7eUL98Cb6vL43Mv6JKTS1oFckwEAQEcBsQsSusUqn9P+ftzXUopcpP5Ituk8otLCwMR48eRf1Cy4MePnyIzp07IzExEQCQm5uLGTNmIC0tDYsXF6yPz3iWCLVGg81nL+NRaobuuL+/P65du6Ybzd+wYQNGjx4NtVqNlStX4vnz59i1a5fReGbNmoWVK1fqHduwYQOGDRsGAEhNTUV4eDgyMrTPi4mJQadOnRAbGws3N+u0QmRZFps3b8asWbPwstCb2x06dMCZM2dMusepU6cQfWknLv55DgkZSchVSMBbxoOnpyfq1KmDN998E4MHD0bLlqa9KXb58mVERUXh1KlTSE5OhkQiQUBAAMLDw9GrVy8MHz4crq6ONbDGsKwDNHJ2IGKxGF5eXsjOzoanJ1XFtrQmX52ARKFtUeMq4uPmV+9zHJGNkmYB+0YDQ3Ybr2pvp8QKMaaengoAWN5xOVWyrySkt28jvm8/1Ny7By6N7L+jginuv95ab7/+ZareTczH1P9zedly3P4nCY3ah8DNy8mKERbY98MVZCRri5ZWCXZFn1mtOInD1lnr79Ld3R15eXk4efIkOnXqZPD6gQMH0Lt3b71jDMMgJiYGzZs3hywvFz9/PBj/PX6K3Zf1a/tMnToVP/74o25fpVLBz88P2dnZumMbNmzAqFGj9K5LS0tDcHAwVKqCfut8Ph+5ublwdnbWHRs8eDB27Nihd+3KlSsxadIk078A5XTnzh2MGzcOZ8+eNXjNlGQ+PT0dEREROHXqlO5YlSpV0L17d8jlchw6dAgSiUT32kcffYT169frff6FyeVyTJgwAZs3bwbLshAKhejZsycCAwPx559/4tGjRwCA0NBQbNmyxejfta0xNaejPvOEVETh98LofTFSRvmJfNFtQioLtViMJ337QZOTA01ODqDRUDs6whk3Lye07hnGWSIPaHvK+4e6wz/UnfrLV4A1/y5DQkKKTe569OgBDw/9+gssy2L7dm1nmYNLFiIorC4uPkkwuLZ5c/2q7AKBAE2a6M8snD9/PtRqtd6xixcv6iXygHaUv2giW716dYNn/vPPP0Y/D3ORSqWYO3cumjVrhrNnzyIoKKjM91CpVOjWrZteIi8SiXDhwgVs3boVO3fuxKFDh/Su2bZtm25WgrH79e7dG5s2bUL+GPTOnTuxd+9e/Pzzz4iNjUWLFtqp+4mJiejevTtOnjxZ5rhtFSXzhJRTtrSgnYqriI/6QdwU47F50ixgRVMg5VUl932jOQ2HEFLg2cRJkN8u6LKgkcmoHR1xaE6uQvSe3gK9p7eg4nc2YNu2bdi5s/iCunw+H3Xr1jU4fv/+/cIn4dnLbINzjCW6RY8lJibizz//1DuWk5NjcJ3QyJIukUhkcEwsFhscM6cvv/wSixYtgp+fH3bs2KG33MBUhw4dwuXLl/WOvf7663pf506dOqFq1ap65+zatQs3b940uN+yZctw/Phx3X79+vXRp08f3b6Li4tenQOZTIahQ4da/GtVWVAyT0g5jY26gvpBHnAT8cEA2Di8danXECN2RACKXEDhWGsPk3KTcDfjLq6mXIVcLcfyjsu5Dom8IvD3h98nn0BgpKoxIcT86P8csZTevXvj7bffLvEcFxcXg2P5Rel6zZyLXIkUGiOzL41NCTd2r3///Vdvv76Rtp7GEnxjyWidOnUMjpkTy7KYPHky7t27h4EDB5brHufOnTM4FhoaanDMlJkHYrEYX3/9td6x/FH4ko4lJydj6dKlJsVr6yiZJ6QCBHwewoM9ER7sCS8Xeoe+XFLuaJcosK+mofVZz208VtL/UH8wDANXoSvisuNovXwlIgwIgP+kiRBSkTdUXbwIyC8mxOOh5r693AZE7BL9nyNcSk9PNzjWuHFjAICzmzv6fDbP6HX5CX9hRafPA8C1a9f09lu0aGHwBkNWVpauEF++oqPUAoEAo0dbdvbiggULsGLFigrV3TLnzINdu3bpuhHkq1atmsF1xt4s2LJlCxyhNBwl84SU05c9w3E5PhOX4zPxZU/T+omSYjAMwLxq3+IAxe/ECjHylHnIUeQgR5EDDWv4CwEhlUHy7Nng8fngeXjApXkzuBgZUSKEEFslFot1xdPyMQyDIUOG6PZ9fX2NVpA3NnJuLJFNS0szOLZ79260adNG79iECRMQHx+P7OxsrF27FqdPn9a95uLigk2bNqFZs2alfk4VYWxmQVmZc+bBsWPHDM6pUqWKwTF3d3eDNwwSEhJw69atUuO1dZTME1JOA9f+BzcRH24iPgau/Y/rcGxXQCNA5A6IrNNqpTKYenqq/rvF9v/GMbFR8vsPoJFIoJFIIL//gOtwCCHErHbt2mVQoG7SpElo2rSpbp/H46F9+/YG18bFxZl0rHB1+3xBQUE4f/489u3bp2vLdvjwYdSqVQve3t4YN24cWJaFt7c3Pv30U9y7dw+RkZFl/vy4EBkZCR8fH71jRWcZyOVyPHz4UO9YaGgoevTooXcsJibG4P7u7u5Gn1u0kCFgOCvCHlEyT0g5JGZKIJapIJapkCdXUy5WEX3Wch2BVYkVYtxOvw321b8aHsMDj0ffiknloUhKwt0WLXG3QUNocnMBtRpgWfo+RxyeOEOK9dP+wfpp/0CcIeU6HFJBeXl5WLBggd6xAQMGYNmyZbp9WV4uds2fg+ZVDKedFy1sl5SUhLt37xqcx+fzDY5pNBqsWbMGM2bM0CWsXl5e6Nu3L0aNGoUGDRoA0E6/37hxI1auXInMzMyyf5Ic8Pf3x+HDh/UK3D1+/BjT241A8q14PH/+HJMmTdJrTVetWjUcPHhQb2aASqXC06dPDe7v5GS8A4Kx448fP67Ip2IT6DdIQsqh+4qzYF5tq1kWtf1dOY3HZkmzgLXttNv+jjF9d+LJiZCoCn6AaVgNdvfczWFEhOiL6/0hUOiXLACARgPnevW4CYiQSiL6m8tGt4nt0Wg0GDp0KBIStC3nGIbBZ599hh07dugl3weXLAQAvFanFrq0bql3jyNHjmD16tWQSCR4+vQpRowYYXQdvZeXl94+y7IYPHgwPvnkE91Ivo+PD65cuYI9e/Zgw4YNiI2NRa9evQBo1/QvXboULVu21MVb2b311lt48OABVqxYgUB3PwDAj/9uQUiTWggODsb69dr6SHXr1sXy5ctx9+5dgyUExVWjN/bmCKCtKVCUsVkR9oaSeULKgUXBzGg+j4Gz0PAbCDFB9EcF22n3iz/PjjzK0l+bx2f4CHGnVl+kEjHyyygAhK75xcqBEFLJsCyUcjWUcu1sFWKblEolhgwZgn379gHQTnk/dOgQFi9ebDBTTqNRIzX+CVLjn2BYl05YunQpvL29da9PmjQJbm5uqFmzJm7fvo1p06YZPM/Pz09vf8uWLYiOjtY71q9fP7314kKhELNmzdI7Jy4uDh9//HG5PmdrE4vFmDdvHr788kuk5GoLDIZ6BaFfky4YNGgQAl4Vu3z48CF++eUXbNmyxWC5g6Tom8qvMAxj8vHc3NyKfBo2gTIQQsooW6qERl3wy66zgIe1ka04jMjG+YcDaXe4jsJq6njXwf2M+5CotT+kontEl3IFIdZlLJUP3b4N/ApUNybEHvgEuSI1IVe3TWxPWloa+vbti7NnzwIABg4ciNWrVxstqgZAv6YNC0yfPh1jxozBqVOncPv2bYjFYnh7e6NFixb43//+h3///Rc//vij3i0Kr78HgA0bNhg8plGjRgbH8ivqF/bnn38iLi4OtWrVKuUz5U5GRgbatWunt+SgRXAj/DZoKWrNaAtRVXe8ePECb731FuLi4nD//n1MmjQJhw8fxqFDh3SF7Fxdjf8fK65CvbHjxgoX2htK5gkpo7FRVyBTs+DztO8A8nkMtaUrr4ht2tH5oKbabQewuN1i9D/UHx58D+zuuZtG5UmlokhKAl61AeK5uQE8HupfvsRxVIRUDjw+D0Invm6b2JZTp04hMjISycnJqFq1KtasWaObyl7Yhg0b4OHhgYiICPD4fATUDNN73d3dHb169TJ6bUpKisGxVq30B3xiY2MNzik82p+v6PT8fNevX6/Uyfz06dMNagfM+OFz1B/8vm4/KCgII0eOxBdffKE7duLECfzwww/4/PPPAaDY9nhFR/DzGWsLWNzX0J7QdyJCysFVWPBfp16g8aqaxASybOBFrPZDZnxtlD25//I+uuztghxFDqp7VMcX574o/SJCrCiu1we6bU1eHmrt/53DaAipPOQSJdKTcgtNsadp9rZCoVDg008/xbvvvovk5GQMGzYMd+7cMZqMA8Do0aPx6aefAgB6zZyrO154uzgPHuh3/fDy8kKXLl30jhlLOsvSD12hUJh8rrVJJBLs2LHD4Lj/P0qoMmV6x4zNPFi7tqAoskAgQI0aNQzOkcvlRp9t7Hjt2rVLjdnWUTJPSBn90P813XbTal7YOLw1h9FYXmJiIqKjozFlyhS8/fbbqF69Otzc3CAQCODp6YmwsDD06tULK1euNNpLtSS35rdB2zUpYOYkgvGtAYZhwDAM4uPjS702/9yyfli6R2tJIg5H6LZvZ9zmLA5CjFEkJUHzalQ+nyiEZo4QAgBHfoqFSlGwCIVG5m1DbGwsXn/9dXz//fcICQnBsWPHsGXLFqMj4SWRK5T4ZuEi3Tr74pw8eVJvPzIy0qB3u7EE1Vil+uKq1xu7vrK4e/culEqlwXEPvitSVlzVO2bs7yAhIUHv827RooXBOcWtgzd23Nj19oam2RNSRrN230CjkIJpO/Y+xb5hw4bIe/ULvpOTEzp37oxatWrh2bNnOHHiBOLi4hAXF4dDhw5h7ty5mD9/PqZPn17iPaVSKebPn4+lq1KgMl5ry+6IFWKoWf2pYcs7LucmGEKKUIvFePze+/oHHWCtISGmykjK1Y6esoBSoUbXcU24DomU4sWLF3j99dd1I9mJiYno2rWrydfL8nKxYeIoAIDIzx8LlyxFQEAAunXrBmdnZ4Pzr1+/rluLD2inkn/99dcG5/Xr1w8LFy7UO1a0DzsA3Lp1y+BYtWrV0Lp15R1EMjbrANDOPGAVaoNjxhSeedClSxf8/rv+DLGMjAyDa/Ly8gxmLISGhhqtRWBv6G1FQohJwsLCcOPGDV0rlv379yM2NhahoaG6c3JzczFjxgzMmTOn2PscO3YMjRo1wnfffQc/d5E1Qq8Upp6eCld+QTGXcN9weIqooBipHJ5NnKTtJ19I7YMHOIqGkMonP5EHAIGQBydX+34j3x7IZLIKTUnf//3XUMikUMikyHiWCABITU3FsGHDDCqtP3r0CAMGDNAlqJ6enti1axd8fHwM7vvZZ5+hbt26esf27t2Le/fu6fYVCgW+//57vXP4fD5+/vlng4r7lUlxswayZTkQhnjoHTM288DV1RX+/v66/YiICINCeM+ePTO4LjEx0eDYsGHDKvXXylzs/zM04vDhw+jfvz/CwsLg4uKCoKAgvPXWW/jxxx/x8uVLiz03JiYGkydPRsuWLeHr6wuhUAgvLy80bNgQw4cPxx9//FGmNTOEG4Ur1ztSFfv169ejfn39XvB169bFqlWrDM797rvvcO3aNYPjt27dQrdu3ZCQkICJnWvg3jR/g3MsjcvKpvWr1IeHyAMeIg+sf389Z3EQUpgiKQmSK1f0jjk3bUpT7Al5RZwhhVKunUbG8AAU0xqL2Je0hHjtiLJGA6W8YL33rl27EBYWhsGDB2PSpEno1q0bGjVqhIcPHwIAWrZsib///hvt2rUzel93d3ecPXtWby29WCxGmzZt0L9/f3z88cd47bXXcOTIEd3rwcHB+P3339GzZ08LfbbmERQUhLZt2xocf8S+gP9I/TXyxmYefPDBB3oJuJeXF/7v//5P75yrV68Wvczgd86goCDMmDGjTLHbKodK5tPT09G9e3f07NkTe/bsgUgkQo8ePdCgQQNcunQJ06dPR+PGjQ3Wu1SUVCrFsGHD0KpVK6xatQq3bt1CkyZNMGDAALz99tt48eIFtm7divfffx/vvPMOXrx4YdbnE/PychFi55g3sXPMm3Y/xT5fSEgIOnXqZPS1Hj16wMND/91WlmWxfft2g3NVKhVat26NS5cuYdVHjeHlXLFvQTVq1ND+oC3lY926dbprRo8eXaFnltfyjssh4AnQwLcBjvc9TqPypNKI+6C3QW/56hvozSZC8kV/cxl4lb+zGsAvhJagOAT2VX0eHg+e7h7Yt28f5s6diy5dusDb2xt//PEH1qxZg0uXLqFOnToYPXo0Dh48iMuXL5danycwMBDHjh3DhQsXMH36dLRt2xYuLi44fPgwtmzZgufPnyM8PBwDBw7Er7/+iocPH1b6RD7fqlWrDEbTt97aj1xlwWyG58+fY9OmTXrnVKlSBd99953B/WbOnIl3331Xt3///n292gUymUyvHaCTkxO2bt1a5roItophHWQoWCKRoH379oiJiQGfz8e6deswcuRI3esPHz5Ejx498ODBA4hEIvz111/FvqNWFizLonv37jh27BgA4K233sLOnTv1pibn5eVh1qxZ+OWXXwAA4eHhuHjxItzdy14lXSwWw8vLC9nZ2cW2dCAVky1VYmyUdhRrbWQru0/o9+/fDz8/P7z99tvFntOyZUuDd0p79OiBQ4cO6R2Ty+UQCoXad12lWcBvA8F8fMLgfnFxcahZs2aJcTEMgxo1apRaLE+tVqN+/fp4/PgxqlevjkePHul6mBLi6KT37yP+g976B/l8NLxtOGJCiKNaP+0fsCyrK4A3asnbNM3eAez8cjbSE58CAPxCa2Dg19+XcoXty8zM1FvPf/v2bRw/flzvnGrVqiEioqCg7+uvv663DwAXL17E8OHD9ZYOVA+tjo6dOkKpVOKvv/5Camqq7rWWLVti+/btBjNA88lkMowbNw5bt24FAAiFQvTq1QuBgYH466+/dF0EqlWrhs2bN+Odd94p51eg8jA1p3OYZP7jjz/Gxo0bAQCLFi0yuqb3yZMnaNSoEWQyGfz8/PDw4cMKv6sTHR2NgQMHAtC+4/Tw4UOj62dYlsU777yDU6dOAQDmzp2Lb775pszPo2Te8gauu6C3v3PMmxxFUnm8/fbbOHfunN6xbt266U0RMyDNAqI/AjPC8BxTkvnGjRsjJCQEJ04YvhlQ2G+//YYhQ4YAAFavXo1PPvmkxPMtRawQY+rpqQC0o/Q0Mk+4phaL8aB1G4PjNQ/sh0sxv1AR4ojEGVLt6DyAiP97HZ5VXEq5gtgDWV4uDi7RJra9Zs6Fs5v9tyKOj48vcw/7YcOGYcuWLQbHFTkybBu9An/cP4vb6Y+QlPkCuQrt6Lynpydq1aqFli1b4sMPP8R7770HxoTlK5cuXcKvv/6K06dPIzk5GRKJBP7+/ggPD0evXr0wYsQITpdTmhMl84XcvHkTzZo1g0ajQWBgIBISEiASGS+8NXXqVKxYsQIAMHv2bKPTPcriww8/xP79+wEAEydONLq+ON/evXvRr18/AED16tXx9OnTMj+PknnLo2TeUIMGDXD//n29Y6X+/9nSAwDKncybgmVZvPbaa7h58yYCAwMRHx9vtAKtpYkVYry35z3kKfPAZ/ho5NcI27sZLkMgxJrihwyBNEZ/Ro1Lixao+Rv92ySEEFIxaetioUguaBcnCnaH/5imHEZkW0zN6RxizfyyZcugebUeMCIiothEHgCGDh2q2/7pp58glUor9Oz8YhiAdvp8SQq/npCQALFYXKFnE/PLliqhUmtwJ1kMlYZ1qAJ4xRGLxXj06JHeMYZhdKPhRkmzgOc3gITzFo3t0KFDunYv06dP5ySRB7SV7POU2vZ+alaN2+nUY55wT/7gIVBoXaNz48YIXfMLhxERQoih3MyXOL97O3IzLVek2hhZXi52zZ+DXfPnQJZnvLe5LeDq6wcAwsCCUfIqkSXnQaR87D6ZVyqVOHCgoL1O586dSzy/efPmuqn1eXl5urXu5VWRiQ/5vb1J5TE26goEfB7Cgz0h4DF2v17eFLt27YK6SEurSZMmoWnTEt59jf7IwlFpLV68GADg4+OD8ePHW+WZxeEV+nbrIqApmoR7orBagFwO8Plwfq0pau3ZDT7N6CKEVDJ5mS9xYc8O5FkxGc3vMZ8a/wQajVo33d4WcfH1A7TJO8NnIAp2R9VPW4PnIrDq8x2F3Sfzly5d0utj2LJlyxLPZxhG75yiRR/KqnBCc+fOnRLPLfy6i4uLXp9FUjmoNCzuJIt1I/OOLi8vDwsWLNA7NmDAACxbtqzkC1UyQC4GNOqSz6uAU6dO4b///gOgfXOhaMV9a1recTka+TUCn+HDXeiOPb32cBYLIYSQspFLlNi/7Cr2L7sKuUTJdTjECgon7+kJZV/2aqvUYgWy/3wKtVhR4XvxXATwH9MU/mOaWi2RN2f8tsLuk/n8KbaAtlVBiAl9cwsXfih8fXl88sknuoIOO3bs0HtjoTCWZfHzzz/r9nv06AGBgN7BqnQKz7Sw/3ITJdJoNBg6dCgSEhIAaN8I++yzz7Bjxw7w+fySL067/6pZr+UsWrQIgLav/JQpUyz6rNLkKHIQL46Hq9AVe3rtQYg79e8m3FM8iQPP1RU8V1consRxHQ4hldaxNTeNbhP75hdaQ7fda+ZcDiOxHnWOAjknE6DOsc1k2NbjLw+7T+YLj3YHBwebdE3hhL+00fTSvP322/juu+/AMAwyMjLQs2dPPHv2TO8ciUSCTz75RFfJ3svLSzc9mFQu+VPsw4M9IeDb/X+fYimVSgwZMkTX5zMoKAiHDh3C4sWLtW3nTMEwAK+UpL+cLl26hJMnTwIAxo0bB19fX4s8x1T9D/U3uk0IV9RiMViWhUYiAVgWTnXrch0SIYRUGr1mzgWPz0dAzTB8vHqjQ1SyNzeNVIW0dbFIWxcLjVTFdTh2y+6HftPS0nTbpraZK3yeWCyGUqmsUF/qWbNmoU2bNli0aBFOnjyJ2rVr480330S1atWQlZWFc+fOISsrCwDQsGFD/Pbbb6hdu3a5n+eIkpOT8ffff+PKlSu4c+cOkpOT8fLlS7x8+RJKpRIeHh6oUaMGWrRogb59+6JLly7FtsAYPny4ro9laaLHAo0aNcKtW6b1ZE5ISMDEiRMN+q+fPn0a//vf/0y6B9fS0tLQt29fnD17FgAwcOBArF69GlWqVDHtBtIs7awGjRpgLJPM5/dIdXJywowZMyzyDEJsWcLoMWAlEkCjAViWCt8RUoKu45roRuS7jmvCcTSO5+jqJQCAvYvngW+lWassy0KWoy1EvWXGBJPaplVWapU2kT66eglGLFtjtedmRN3R26ZK9pZh98l8Tk6ObtvJycmka4pWvM7JyanwyF6TJk3QvXt3qFQqnDp1Cn///bfe682bN8dnn32Gvn37lj5FuRC5XA65XK7bd9QK+JMnT8bevXsBAAKBAJ07d0bbtm2RmpqKs2fPIjU1FS9fvsS1a9ewceNGvP7669i1a5dZ2p+ZQqVSYfny5fjqq69surDhqVOnEBkZieTkZFStWhVr1qxBr169DM7bsGEDPDw8EBERYXiTHRGASqIdlRe6ATC+9KS8bt26pXuzZMSIEahatapZ718eu3vu1o3I7+65m+NoCAFkt25pZ8fw+dDIZFT4jpBiyCVKvUTeyZUK31qb9NXv8lJxNifPt3bhOEuRFsqJivNy5z0AQPqmm2AqOANVnadfX+L5oosVup8pWLW2e9nLnfcQNMMxOk7ZfTJfuLVcSS3pCit6nkQiqVAyv3PnTkyYMAGZmZmoUqUKli1bht69eyM4OBjZ2dk4ffo05s+fj48//hgnT57EV199ZXICsnjxYsyfP7/csdmbwMBAnDp1Sq/NX05ODoYNG4bff/9dd+zy5ct47733cO3aNbi5uRm7ldlcvHgRY8eOxY0bN+Dp6QkXF5cKtzy0NoVCgS+++AJLliyBRqPBsGHDsHz58mJnu4wePRo1atQwnsyn3tUm8co87YeZLV68GCzLQiAQYPbs2Wa/f3mEuIfg/CDLtuEjxFRqsfjV7BjtLz2Mhb8HEmLLDq26gdSnObrtfp86RoJQmbh4eEAqzoaLp5fVRuaLvnHg4ullledaglql0n79TCgErHlV4FGTZ/5p8dYsSqdxoEKVdp/Mu7gUtIBSKEz7R1T0PNdCfXjLavv27YiMjATLsvD19cX58+dRr1493ev+/v4YMGAAunXrhnfeeQfr1q3Dvn37cPjwYbRp06bU+8+ZMwfTp0/X7YvFYoSGhpY7Xlv3448/6iXyAODh4YENGzbg6NGjerMYHj58iF27dmHEiBEWi0csFuOtt96CRqPBoEGDsGzZMrzxxht4+tR2KqPGxsYiMjISsbGxCA0Nxbp169ClS5fy39A/HEi7A4jctds4YbZYnzx5gujoaADAoEGD9IpZEkK0nk2cBLi4AK9mCTnRsi5CipWfyBfdJtbTbeJMbJszFX3nzEdgWB2rPDM7LQVRsycDACK/Xwkv/0CrPNcSUp48wrY5U9Ft4sxSz+W5CqHJU4HnJqjwyDwXWLVGG78DzaCx+2S+cDuqwolcSWQyWbH3KIvMzEyMHz9e12t+4cKFeol8Ye7u7ti0aRMaN26M9PR09OrVC7du3Sq1PZ2Tk5PJywfsWa1atdC8eXN0797d6Ou+vr6oV6+eQXeC69evF3vP/L+3bKkSr83/Q+81T2cBYr96v9S4NBoN6tSpg59//hmdO3cu9fzK5sWLF3j99dd1b3AlJiaia9euFbvp4J0FfeYjtgEf+1QwygLfffcd1Go1GIbBnDlzzHbfihArxJh6eioAbYs6TxFNZybc4/F4wKufbbwiS8sIIQUEIh5UCo1umziGEz8vR0DNMN32gHmOUZjad2ADpK66Br+RTSAKqVjRP41UpVs3XyUy3Crt6RRJuUhddQ2+AxtY/FmVhd1/VyqcDOcXmStNdnbB1BpPT89yF7/bvn27bs2+QCDARx99VOL54eHhaN26NQAgNTW19F7dROeHH37A1atX4VnCuk9jMzNMWXoxNuqKwbEjU9qZFJeHhwdu3rxpk4k8oH1jy9QZLSZz8QaGH9Z+uHib7bbJycm6woUffvghGjZsaLZ7V8TEkxNx7+U93Ht5DxNPTuQ6HEJQdfEiXSV7UVgYqq1exXVIhFRaA79oDaETH0InPgZ+0ZrrcAixGUUL4BHLsPuR+cJTrpOTk026Jikpyej1ZXX+fMEa2Xr16sHdvfR3uJo3b46LF7UFIvJbfZGKe/HiBR4/fmxw/L333jPp+sYhHriVpH1j5uiUtxHqY9rSCz6fX6aCho7gl19+Mfp3UdiiRYv03phZsmRJqfddunSpbvbN559/XrEgzehR1iOj24Rw5fmcz+FS6GcbFb8jpHieVVww+sf2XIfh0Nx8fPFmv0Fw87Fem9leM+fi4JKFum1bxsXXj1iP3SfzTZoUtBCRy+VISkrS6yNvzJMnT4xeX1bp6em6bR8f06YSFy60FxcXV+5nkwLp6ekYNmwYVCr9Yh5Dhw7Fu+++W+x1GRkZWLNmDR4fOIgbN29DrZTD19cHI8+E4e2338aoUaMqzeivLYnesR1/nz1X4jnr16/X2y8tmX/58iXWrVsHQPsGTcuWLSsWpBnV8a6jS+LreFtnrR8hJdHI5dpq9gCcGzfmOBpCCCmZu48v3uo/xKrPdHZzt5up9WX5+vE9RPDoXB18D9OKhhdHI1WBVbNQpuRBGOQGv2GNKnQ/U5krflti98l869at4ePjg8xMbfurmJiYEpN5lmURExOj269Ioa/CVdKLrsMvTuHzeDy7XwVhMWfPnkV0dDSePn2Kv//+W69FYaNGjTB16lSMGjWqxHvUq1cPHh4e6Ny5M15v1RLnz5/HjRs3kJ6WhosXL2Lp0qUYM2YMfvzxxwoVSazMatasqasdYC5nRnoDIwvVNhh+uML39PX11fs7rkxWd16tt2aeEC6pxWLIbt7UVrLn8yEvZZYMIYQQx8H3FMHr3RoVvk/61ttQphR0LLLGennAfPHbErtP5oVCIT744ANs2bIFAHDy5EmjfbHzXbt2Tbe23s3NrULFvgr3MH/8+DFYlgXDMCVe8/DhQ912aTMISPFiYmLw008/GRyvWbMmunbtijfffLPUv4uPP/4YCxcuhKBQG5Rvv/1Wr7DaunXr8PjxYxw9etTk1ocOTZoFPL+hbUkndAMCy7+MxVZ4ijyx6f1NXIdBCKT37yP+g94FB0z4mUSIo6M+84SUnfJFntFtYn4OMfQ7bdo03Sh3dHR0iQW9fv31V932hAkT9FrbldX77xdUO8/KysK///5b4vl5eXn4+++/dfu2WjStMpg6dSpUKhWePXuGdevWoWrVqgCA+Ph4LFmyBE2bNsXUqVOhVqsNrp08eTJOnDiB7777DgKBANlSJQauu4CB6y5g/JQZaNu2rd75J0+exPfff2+Vz8vm7YgAFLmARg0o8gDW/hMJsUKMkSdGYuSJkRArxFyHQxxYfJ+++gc0GtTa/zs3wRBiI/IT+aLbxL7J8nKxa/4c7Jo/B7K8XK7DsTnCQDej28T8HCKZb9q0qa6XeEpKSrFV4p88eYK1a9cCAPz8/IptbaVUKhEZGQkPDw80b94csbGxRs9777330LRpU93+//3f/0Gj0RQb56JFiyAWa3/ZFwgEmDiRKl9XBJ/PR0hICEaPHo2LFy8iICBA95pGo8GKFSuMFkpr0aKFXmG8UVsv406yGHeSxRi19bLRrgTLli0z+saATcp5AZxerP3T3FLuAGyh/wN8jicHWfJzfSV/in3RbUK4xri7Q0QzwAghRI8sLxcbJo5CavwTaDRqXSE8Yjq/4Y0gCnaHKNgdfsOts17eUTlEMg8AK1euRIsWLQAAX3zxBTZv3qz3+sOHD9G1a1fIZDKIRCLs27ev2KJ1UVFR2LZtG3Jzc3H9+vVik24ej4eoqCh4eXkBAP755x/0798fqampeucpFAp8/fXXepXrv/32WyquZkahoaGYO9ewGuny5cuRkpJS7HWX4jNwJT4TYpkKuXIV7r/IwWuvvWZwXmZmJq5cMWxhZ5NyXgB/f2v+BDfzKSAvMjIdsc28zygrS32uryTlJuFqylVcfnEZdzLuQKVRlX4RIRagFoshqltXt8+4uiLswH7uAiLEBsglSmjUGqQl5kKjZtF1XPmLIhPbUTh5T094ymEktovnIoD/mKbwH9PUauvlHZXDJPOurq44fvw4unbtCpVKhZEjR6Jhw4YYMGAAOnbsiPDwcDx48ABVq1bFkSNH0K6daX3EAZS45rBp06b4999/dQngvn37UL16dXTs2BFDhgxBjx49ULVqVcybNw8sy8LT0xMbN27EjBkzKvw5E309evQwOKZQKHDq1Cmj5ydmSjBgzX+6fQ0LgGVRpUoVo+c/e/bMLHHarbXtAKbQt5xqrc3aZ74y6newH9SsdsZGnpLWjBHuJIwcBcW9ewAAnpsbXBo3plF5QkpxbM1N8Pg8+Ie6g8dnaL28A/ELLSiiZuut6bigkaqQti4WaetioZHSQIYlOUwyDwD+/v44evQoDh48iD59+kAmk+HgwYO4ffs2WrVqhSVLluDWrVt45513SrxPZGQkhgwZAjc3NzRr1gyrVq0q8fzGjRvj6tWrOHLkCEaMGIE6derg+vXriI6Oxt9//w1PT0988MEHWL16NZ4+fYqRI0ea89Mmr1SrVs3o8YSEBKPHu604a3CsfpBHsdXdzV313e4U/vowfGDwTu5isRKpSqq3L+DRu9OEG/mt6ABAk0dvLBFiCo2aRVpirm5knjiGXjPngsfnI6BmGD5evRHObu5ch2RzMqLuGN0m5ueQv1n27NkTPXv2LPf1QqEQ27aVbXowj8dDt27d0K1bt3I/lxiXmpqKkSNHonv37hg/fnyx5xVX+LBwFfo//vgDf/zxBz6ZOhM5Mv13EhsFe2Lj8Na4fe2y0fvYTfeBfaO1f27rC/DNWKFfKS1YL8/wgJ/fNN+9y0v96t/EvtHARON/r+UlVojhxHOCRC0BALgKXKk1HeGEWmxYeLHa6pLfhCaEaJN5pVyt2yaEkMrGIZN5Yl8kEgmOHDmC7OzsEpP5mzeNV6Ft0KCBbvv8+fNYunQprvLrg2GCkf+ju1moJ/Z/ol16cePGDYN7eHp6olWrVuX/JCoTyctXf6Zb7hkaJZCTbLn7l1X+52xGU09PBY/HA1/DBwDU960PT5Gn2Z9DSGmeTZwEp0aNIL99GwBQ88B+8D3p3yIhpcks1FIrk9prOYzCa+YPLlmIAfMWl3A2MaZKZLhuRL5KpP23IeYSJfPEbly8eBG3bt1C48aNjb7+yy+/GBwLDg42uqzi9qWzCGg/GBKldiR568g3dK8Zm5UxceJECIV2spbO1VebyLv6mW9kPuc5gEKjGnyR9v5cUytefa6+Frk9wzBwFboCoCn2hFt8Nze4tm4NAHCpX5/jaAip/OQSJRRyNcBqJ5PRuDwhpDKi3y6J3chvGXj06FFdX3lA24Zu6dKliIqK0jtfJBJh8+bNRpPwjAt7gIC6cKnVAk2qecLLRQiWZbFw4UKcP39e79w2bdoYrZRvs/qsB9Z1AD7aCwQ3M889F4dq+8uzGu16+ZkPK0fxu+Tr2s+1z3qz33p5x+WYeHIiHmU9Qh3vOjTFnnAmYO7nuh7zNfft5TgaQmzDkZ/02w77hVCvbEfRa+Zc3eg8Fb8rn6Jr5v3HNC3hbFIRlMwTm8fjFdRxvH79OmrXro33338f1atXh1gsxt9//424uDi9a+rXr4+1a9eiQ4cOesf5fO2UaLVcitRdX8IppAHEATUw7lZN/PPPP7h7967e+YMHD8bPP/8MV1fXYuOLjo7G5csF67EzMzMNzvnll19w+PBh3f7cuXOLbY1ok6qEaRNnAKjapHIk8hbmKfLEr11/5ToM4uDUYrEukee5uCDho0jUv3yJ46gIqfwyknL19rt/YtiWltgnZzd3mlpfARqpCopk7f8fYZAbGF7xXb9IxVEyT2xe9erVkZycjDNnzuDy5cuIjY1FbGws/vrrL0gkEgiFQvj7+yMsLAzNmjVDjx490KVLFwgEhv/8/+///g9nZdUQ8985yJ8/hPLlM4jvXcDm26fBsiz8/PxQu3ZttG3bFkOHDjXac76oY8eOYevWrSWes2vXLr39iRMn2lcy/zIOcPYq2CaEWMWziZN02xqpFLwS3ngkhGiJM6RQyLSF7xgeIBDxqS2dA5Hl5eqNzFM1+7JJ33pb2+FJqYHyRR6qftqa65DsGiXzxC5UrVoVgwYNwqBBgyp0H4Zh4BlaH1X5QchVaMDnMTgz638I9Sn/L8BbtmzBli1bKhSXTZNmadvSKfMAoRsQ0IjriKxCrBBj6umpALRT7qn4HeGKc6NGkL0qfldr/+8cR0NI5Rf9zWWAAcBqV4fRFHvHQgXwKkb5Ig8MwwAi7WxXngulm5bkUH3mCTGFSq2BVMWCz2PQJMSzQom8TfIIAjp8pv3THKI/0ibwIneAYSpXf3lzf66F5CfyRbcJsRa1WAyVWAxZbCygViP0160Q2UsLTUIsTOjEB8NjwPAYmmJPSBkIA92MbhPLoGSekCKepOXBTcSHm4iPJ2kO2IrGIwjoOMe8CS5fAAQ11X5UpvXylvhcCakkEsePh+LevYL9ocM4jIYQ2xHxf6+DYRgInfj4aMEbNMXewRQuekcF8MrOb3gjiILdIQp2h99wx5iNySVK5olVpYpl+PHPB0gVy7gOpVhh/m7IU6iRp1AjzN9+3lHk7Gsfsc34tp0rXL2eKtkTLsjvP+A6BEJskpOLAP6h7vAPdYcTTRF2OPkF8AbMW0zr5cuB5yKA/5im8B/TlKbYWwEl88SqUnPkWHHyIVJz5FyHYlRipgSxz7Kh1rBwEfIhsKMKnJx97V28geGHtR+VaVSeEDtXtC82taUjxDTH1tw0uk0IKZ0qU4akr84j6avzUGVW3sE7e0HJPCGFdF9xFgzDgM9jIFWqIeDTfxFSdvdf3kfbHW1x+cVlSJQSWjNPOOFcrx54Hh7geXjApUULuNSvz3VIhBBC7FzKiqtGt4llUKZCSBGuwoL/FmsjW3EYiZ2QZgFbemg/pFlcR2MVEYcjdNu3M25zGAlxVNL79yG9dg2anBxAo0HwD99zHRIhNkGcIUXKUzGSHmQhNSEHnYY15DokYkXZaSlYPSICq0dEIDsthetwbI5GqgKrUINVqLkOxWEwLMsWnYlHbJhYLIaXlxeys7Ph6Vn5WmG9s/RvPErLRRU3EYSVcNRbrWGRnqudhu7n7gS+HU2zV6o1yMhToI6/O/6a0cF6D97SQ39/+GHrPZsjzX5tBjVb8IPs3KBz1JqOWI1aLMaD1m30jrm2bo0av27lKCJCbMf6af9AKS/4/h1cxwu9p7fgMCJiTatGDIBSpp0aLnR2xqTNuziOyLakrYuFRqmBMikHABA063UIfJw5jso2mZrTUVUCYlWZEgUAICNPwXEkpUvLrZzr+isq/++AWE59n/q48/IOACDcN5wSeWJViePHcx0CIYTYpPxEvug2MQ2rYaFKk4AR8SEMcqNE3goomSdW5eMqQkaeotKOzGtYFlmvkl1vVxF4jP2NzPu4iqz74A9+Ata2026PPWvdZ3NArBAjMTcRHiIP1PGuAwGPvs0S6ypaxd65cWNUW72Ko2gIsS0R//c6di64BJVCg4AaHug6rgnXIRErCqgZhpQnjwAAgWF1OI7GBrHFbBOLod8yiVUtH9gMPVb9i60jW6NxiBfX4ejJlirR7rtTcBbyUS/IAwIeg51j3uQ6LLO5lZSNHqv+xfKBzaz74AOfaPvL52/b+TT7qaenoo53HTzKeoRHWY9wvO9xrkMiDoYFAD4fAMC4uKDWnt2cxkOILfGs4oIxy624FI1UKgKhCE6ubrptUjYMn4EomNr5WVPlGxolhCOjtl7W9Ze//yKH63Dsw4ubQPxZ7Ycil+torEbAE6CBbwM08G1AU+yJ1TnXqweeqyt4rq5wrleP63AIIcRm8Ph8BNQMQ0DNMPBevSlKTOfTvx4UyblQJOfCpz/9/LEGSuYJeeXBixxdJXuJQk2V7M1hbaHRjeRrQMQ27mKxkgVtF+Dey3u49/IeFrRdwHU4xMGoxWKwKhU0EgnAslTFnhBCTCTLy4VGrUZq/BNoNGr0mjmX65BsTubuBxAFu0MU7I7M3Q9Kv4BUGCXzhLwS5u8GiVIDAGgS4gkvFyHHEdkBtkhrEhdvTsKwpi/OfaEblf/i3Bdch0McTOL48ZDdLmiH+HzO5xxGQ4jtkUuU2L/sKvYvuwq5RMl1OMSKDi5ZqBuZ5/H4cHaj6eKk8qNknlhVgIcTpnSuiwAPJ65DMSDgMXAT8eEm4kNgRy3p8ln9ay/NAoSFfhBWbW6d5xLiwOQPHuq2NVIph5EQYpuO/BSLtMRcpCXm4shPsVyHQ4hNqRIZbnSbWA4VwCNWFeDpjGnvVs41NAI+D+HB9ru+2epf++iPgKAmQJq2RRuG7rfeszm0oO0C9D/UHwCwuycVHiPWJQqrBdnNW4BGA56bG1WxJ6SMMpLzjG4T+/f+hKmImj0ZABD5/UqOo7FNPBcB/Mc05ToMh0Ij84S88kP/13AnWYw7yWL80P81rsOxfSoZkHRJW/jOr55DTLEHgDln5xjdJsRqGAbg8yGqUxt8T/t9g5IQc5NLlGBZFkq5GmBZVAl25TokYkUnfl6uK3534uflXIdDiEkomSfklVm7byA82BPhwZ6YtfsG1+HYvrT7xrft3KOsR0a3CbE0tVgM2S3tenmeiwsUT+I4jogQ23JszU34hbhB6MQHGAbdP6E39gkhlRsl84QQ85NmAYo8QKN+1fTacdTxrmN0mxBLSxw/HmBZQK2GRiqFU926XIdEiM3h8XnwD3WHf6g7nFypEK4jKVy9nirZl51GqkLaulikrYuFRqriOhyHQck8IQASMyW4nZSNy/GZkKs01JauonZEAKy2MwBYNeBXn9t4rGh159W6avarO6/mOhziQOT39dsAha75haNICLFNXcc1MbpNHIOzmzsGzFuMAfMWUyX7ckjbeBPyJ9mQP8lG2sabXIfjMKgAHiEAuq04q2tL9yg1l9rSVVTKHeiG5Bk+IHTmNBxr8hR5YtP7m7gOgzgY6f370OTmand4PDAuLrRenpAycnIVovf0FlyHQYhNUj7LNbpNLIuSeUIASBRqo9uknNgic+sjtnETByEOQHr/PuI/6F1wQKOBc73K2TWEkMpKLlHi0MrrSE3IhVDEQ8QXreFZxYXrsIgVZael6FWz9/IP5Dgi22Ewrd7+OjxXWjTNnhAATUI8jG6TcmIYgMfXfojcHKaSPSHWphaL9RP5V2iKPSFlc2zNTaQmaEcTlQoNor+5zHFExNryE/mi26R0GVF3IAgpWJoQMLk5h9E4FodM5g8fPoz+/fsjLCwMLi4uCAoKwltvvYUff/wRL1++NNtzzpw5A4Zhyv3xv//9z2yxkOLdeZ6NG4liqDUsnIU8rBrckuuQbJs0q2Bb6AYENOIsFGsTK8QYeWIkRp4YCbFCzHU4xAE8mzjJ4JhTo0Y0xZ6QMtKoWbAa7YfDVW4lpIJYDQt1hhSMMx+imp4QVaWaA9biUMl8eno6unfvjp49e2LPnj0QiUTo0aMHGjRogEuXLmH69Olo3LgxTp48yXWoAAA+n891CA6h56pzuh/beXI1taWrqOiPtAm8yF07Qj94J9cRWc3U01ONbhNiKRq5XG+fcXVFjc1Us4GQsmMhcCr4tTji/17nMBbChcjvVxrdJiZgi9kmFucwa+YlEgm6dOmCmJgY8Pl8rFu3DiNHjtS9/vDhQ/To0QMPHjxAt27d8Ndff6Fdu3ZmebanpyeqVq1q0rkKhQJxcdrewN27dzfL8wmxOr4ACGqq3XagKfYqjUrXW57a0hFrkD96VLDD46HumdM0Kk9IOfD4PATWKPi/Q+vlHY+XfyAmbo7mOgybxPAZiIJpNJ4LDpPMT548GTExMQCABQsW6CXyAFC3bl0cO3YMjRo1gkwmQ58+ffDw4UN4e3tX+NkffvghtmzZYtK569evx5gxY+Ds7Izhw4dX+NmkZImZEjjxWEhedVFrFOxJbekq6oOfgLWv3ggbe5bbWKxMoVYgR5Gj2ybEktRiMdi8PO3Oq5lclMgTUj5vD6iLXYuuAAAGfE6/BzgaWV4uDi5ZCEDbY55a05WNT/96SFlxFQAQOIU6QliT2afZJyQkICkpydy3rZCbN29i8+bNAIDAwEDMmDHD6HlhYWEYO3YsAO2U/MWLF1stxny//KItWhQREQFfX1+rP9/RdF9xFnINAz5P++HhLKC2dBV14BPtqHxQU+22A7n38h74DB98ho97L+9xHQ6xc4njx2uXsgCAWg3nRo5Tn4IQc/t96TUInfgQOvHx+9JrXIdDrGz/918jNf4JUuOfYP/3X3Mdjs3J3P0AomB3iILdkbn7AdfhOBSzJ/M1a9ZE69atzX3bClm2bBk0Gu3Qa0REBEQiUbHnDh06VLf9008/QSqVlvu5IpEIgYGB8PLyMun8ixcv4to17Q+Q8ePHl/u5hBDrEyvEYFkWalbb2tBFQFM0iWXJHzwE4+amHZXn81F9w3quQyKEEJuUlhAPhUwKhUyKtIR4rsMhxGQWKYDHFu0xzSGlUokDBw7o9jt37lzi+c2bN9dNrc/Ly8OxY8fK/ey33noLL168wIoVK0w6P39Uvnnz5mjTpk25n0tMd2RKO7gKtf8Nmlbzoin25lC4p7wD9ZefeHKiXgK/p9ceDqMhjkAUVgvsqzecnRtTBXtCyiv9WQ4UUhUUUhVYDUvF7xwRFXCrkCqR4Ua3ieVZZM18eno6xowZg/Hjx6N5c277DF66dAmZmZm6/ZYtS247xjAMWrZsqatof/z4cfTp08eiMQJAZmYmdu3aBYBG5a0p1McVN+d34ToM++LiDQw/zHUUVvco6xF4PB48RB4AgBD3EI4jIvZMLRZD/vgJAIDn4gKG7zAlcAgxu12LroDhaZesqJQaKn7ngPyr10R64lMAgF9oDY6jsT08FwH8xzTlOgyHZJGRebVajY0bN6JVq1Z488038euvv0JepH2Otdy8eVO37eTkhJCQ0n/BrlWrltHrLWnLli2QSqXw9PTE4MGDrfJMAmRLlRi47gIGrruAbKmS63CIDStcvZ4q2RNLezZxEhiGAc/VFWAYMAJK5gkhpDxkebm6bb/qNdD70y85jIaQsrFIMu/u7o7u3buDYRhcvHgRI0aMQEhICGbNmoVHhdvoWMGdO3d028HBwSZdUzjhL3y9pbAsizVr1gDQrtl3c3Oz+DOJ1tioK0a3SQVIs4AtPbQf0iyuo7Ga1Z1Xo4FvAzTwbYDVnVdzHQ5xAE516+q2q61exWEkhNi2wtXrqZK94zm6cgl4fD4CaoaBx+NTJXtiUyySzLu5ueHgwYN48uQJPv30U/j7++Ply5dYtmwZGjRogPfffx8HDhzQFaWzpLS0NN22qW3mCp8nFouhVFp2xPbUqVN48EBb+XHcuHEWfRYpkC1V4k6yGHeSxVBpaIGU2UR/ZHzbznmKPLHp/U3Y9P4meIpo7TKxrCpTJkN69So0OTkIWfMLrZcnpAL8qnlgws8dMeHnjvCr5sF1OITYHI1UhbR1sUhbFwuNVMV1OA7FIsl8vurVq2Px4sVITEzEtm3b8Oabb0Kj0eDPP/9Enz59UKNGDSxYsADPnz+3WAw5OTm6bScnJ5OucXZ2LvYelpBf+K59+/ZoVMbWQnK5HGKxWO+DmGZs1BXUC9S++/rgRQ4VvyOE2IzEocN0VewThw7jOhxCbJpcosT+ZVexf9lVyCW05M7RdJs8U7fda+ZcDiOxXRlRd4xuE8szezI/b948gz7uQqEQgwcPxr///osbN25g7NixcHNzQ1JSEr766ivUrFkTAwYMwKlTp8wdjl5ruZJa0hVW9DyJRGLWmApLTk7WVdsvT+G7xYsXw8vLS/cRGhpq7hDtllylwbXEbOQp1Kgd4E795c3FQavZixVijDwxEiNPjIRYQW+qEctRi8WAWq39IIRU2LE1N41uE8fg7OaOAfMWY8C8xTTFntgcqyTzhTVp0gS//PILkpOTsWrVKoSHh0OpVGLPnj1499130bBhQ6xcuRLZ2dlmicfFpaAiqUKhMOmaoue5urqaJRZjNmzYAJVKhYCAgHJVzZ8zZw6ys7N1H4mJiRaI0j49Sskxuk0qQJpVMLU+Ypu2sr2DmHp6qtFtQswtYfSYgh21GjX37eUuGEJsnFyiRFpiLtISc6FR05I7QspKlSmDPCkH8vhssCoNtaazMotOsy+Ju7s7PvnkE9y8eRNnzpxBREQEBAIB7t+/j2nTpiEkJAQff/wxrlypWFEyD4+CtU+mVtSXyWTF3sOc1Go11q9fDwAYNWqUyTMHCnNycoKnp6feBzENwzBwE/HhJuKDYRiuw7EPDrpenhBrUYvFkN24UXCAz4dL/frcBUSIjTu25iaqBGsHbTKS89B1XBOOIyLEtqSsuAqGYcCI+FCmSsBzoe4q1sRZMl9Yu3btMHDgQDRr1gyAtrq7RCLB5s2b0aZNG7Rp0wZbtmwxSLJN4e/vr9vOysoy6ZrCswI8PT0hFFpm+vWhQ4fw7Nkz8Hg8jBkzpvQLiFkdmdLO6DYhZSVWiKHSqHDv5T2oNCos77ic65CInUosuhyLpZFEQiqKx+fBP9Qd/qHucHKlJXeOJi0hHssG9cKyQb2Q+jSO63BsEqtQg1XQ0i8ucJrMv3jxAt988w1q1qyJPn364MqVK9p3dl59sCwLlmVx+fJljBo1CsHBwZg5c2aZppKHhxdM9UhOTjbpmqSkJKPXm1t+O7quXbuiZs2aFnsOMc7TWYjwYE+EB3vC05l+eJtFl8VAwnntR5fFXEdjNRNPTsSjrIK2m1TNnliK/MFDoNBMIufGjTmMhhDbV3gknkblHVP0V5/qtrd9NoXDSGyPRqoCq9EArxqUCfwttzSZGGf2ZP7XX3/F7t27Szzn1KlTGDBgAGrUqIF58+YhMTER7KvRhfwEvnnz5li/fj3u3r2LL774AiEhIcjKysKPP/6IevXq4euvv9ZdU5ImTQq+Mcvlcr1EvThPnjwxer05PX78GH/88QeA8hW+IxWTLVWi3XendG3pqMe8mWzpDojctR9bunMdjdUUTuQLbxNibk5164JxcwP4fPDc3VF9w3quQyLEpjm5CtF7egv0nt6CRuUdFKvRaBNSUmYZUXfA8HhgnPlgRHzwhJVi0rdDMftXfPjw4Zg6darB8aysLCxfvhwNGjTAu+++i7179+r6t+evV3Z2dsaIESNw8eJFXLlyBaNGjUL9+vUxf/58xMfH4+DBg+jYsSPkcjnmz5+P6dOnlxpP69at4ePjo9uPiYkp8XyWZfXO6dKliymfdpmtXbsWLMuiRo0a6Nq1q0WeQYpXOHl/8IKK35GKqeNdx+g2IeYWuuYXuISHw7VlS9Q5dZL6yxNCSAUF1AgDoE3qP/p2BcfR2B5hoJtum4rfWZ9F3j4pPGJ++fJljBw5EiEhIZgxYwYePHig9zrLsqhfvz6WL1+O5ORkbNy4Ea+//rphoDweevTogb/++gu///47RCIRfvnll1JH2oVCIT744APd/smTJ0s8/9q1a7q19W5ubhZJtOVyOTZv3gwAGDt2LHg8eheLC/k95gFQj3lzkGYBPmGALBuQ5wLDj3AdkdWs7rwaDXwboIFvA6zuvJrrcAghhBBiorTEeDA8HqrWrY+AGrW4DsemVIkMB8NnIAp2R9VPW1PxOw5YJItUqVTYuHEjWrVqhTfeeANbt27V9XvPH4UXCAQYMGAATp8+jTt37mDy5Mnw8vIy6f4ffPABRowYAaVSiX/++afU86dNm6ZLmKOjo0tsUffrr7/qtidMmKDX2s5cdu/ejfT0dIhEIowaNcrs9yel+7JnuK7H/M6xb1CPeXOI/gjIfAI4ewFO7sDxOVxHRIhdUYvFeNT5Hcju3gWrVuHZxElch0QIIXYj4xm1dy4rjUwFRXIuFMm50MhUXIfjkCySzGdkZGDMmDG4evWqwSh89erVsXDhQiQmJmLnzp3o0KFDuZ5Rv359sCxr0hr4pk2bYsSIEQCAlJQULFu2zOh5T548wdq1awEAfn5+mDPHeDKiVCoRGRkJDw8PNG/eHLGxsWWK/ZdffgEA9OnTBwEBAWW6lpjHwLX/6drSDVz7H9fh2AeNElDkaj8crMI29Zgn1pA4fjw0Egk0Egnk9x9wHQ4hhNgFoZMLRM4u1Ka4HFJWXDW6TazHYvO7WZbV/adgGAbdu3fH4cOH8eTJE8yZM6fCSWxMTAwYhoFAYNp0jpUrV6JFixYAgC+++EI3zT3fw4cP0bVrV8hkMohEIuzbt09vrX1hUVFR2LZtG3Jzc3H9+nVMnDjR5LhjY2Nx/vx5AFT4jtgZlgGEBeumELGNu1gIsUPye/cBtRpQq6GRSFBt9SquQyKEELsR+f1KrkMgpMwsulg7ICAAn3/+OZ48eYJDhw6hW7duFX7X688//8SYMWOwfft23TNM4erqiuPHj6Nr165QqVQYOXIkGjZsiAEDBqBjx44IDw/HgwcPULVqVRw5cgTt2pned7wsn1P+qHx4eDjat29v8nXEvKjHvAUwrLZllsgdCAwHXLy5jshqFrRdgHsv7+Hey3tY0HYB1+EQO6QWi6HJy9M7RsXvCCGk4sb+sgUTN0fDyz+Q61Bsjv/Yproe8/5jm3IdjkOySJUCV1dXbNq0CR9++KHJI+emmj17Nm7cuAFAm0Q3a9bM5Gv9/f1x9OhRHDp0CFu2bMHVq1dx8OBBeHp6olWrVujXrx9GjBgBX1/fEu8TGRmJU6dOYf/+/ahbty5WrTJtdCQnJwfbtmlHK2lUnlv5Pebzt0kFSbOAF7cBRQ7A8AGNY62b+uLcF2jg20C3ven9TRxHROzNs4mTAB4PeNU+iXGlXr6EEGIO+xZ/BZVCjl4z58LZzb30CwgAbY/5tLWxYER8CIPckH3oCfzHUEJvbRZJ5j08PNC/f39L3BqjR4/G8+fPAWgr3IeHl70FQs+ePdGzZ89yxyAUCnVJeVl4eHggJ4faoHEtv8c8ANQL8sDYqCvYOeZNjqOycTsitIl8vvSH3MVCiB3SyOUFtSh4PIQd2M9pPIQQYm8OLlmIAfMWcx2GzciIuqPbVr7IgyiY3gjhgtmT+erVqyMw0HLTVCZMmGCxexPHMHzzJYhfVdy89zwHjUNoqmqFpd4FGB7AagBWDfg7Vp/R5R2X6wrfLe+4nNNYiH2SP36sHZkHwLi4QBQSwnFEhBBCHJ0w0A3KFO0SMOoxzw2zJ/Px8fFmuU9SUhLUajWqV69ulvsRAmhH5a8lZOn2c+Uq6jFvDv7hQOptQJmnLYI3eCfXEVmVp8iTptYTi2IYhqbWE2IBcokSx9bcBAB0HdcETq609M5R9Zo5l+sQbEqVyHBkRN2BKNgdVSLDqcc8RyxaAK8iWrVqhbCwMK7DIHZm1NbLevseTnzqMW8Og3cCVV8Dqr8FTL3pUMXvxAoxRp4YiZEnRkKsEHMdDrFD0vv3ocnJ0X7k5aH6tiiuQyLEbhz5KRZpiblIS8zFkZ/K1mqY2La0hHgAQPLDu/jf8DG0Xp7YpEqbzAPQ61FPiDk8eJEDd1HBP/ujU6mjgFm4eAPDD2s/HCiRB6jHPLEstViM+A96FxzQaJC6cBFn8RBibzKS84xuE/sX/dWnuu1tn03hMBLbVHjNfOFtYl2VOpknxNxq+LkiV6GtBt2kmidCfWjaqllIs4AtPbQf0iyuo7EquVqOqylXcTXlKuRqOdfhEDvzbOIkrkMgxK5VCXY1uk0IIbagTIsbrDntPS0tzWrPIo4hW6rE7STtNGg+j0F8Gr0DbxYvbgJr3tZuBzcHoj/SjtA7iCdZT4xuE2IOGolEb9+5cWNUW21aO1RCSOneGdkI0d9c1m0TxyDLy4V/aE3d/kffruAuGBuVv2Y+f5two0zJfHx8PBiGsfj09/xnMAxj0ecQxzI26goYhgH/1T8r+vdlJus6FGwnXwNqtuMuFg4wDANXIY3mEPNTi8WQ3bpVcIDHQ609u7kLiBA7k/4sR5fIB9TwwKmtd9F7eguOoyLWcHDJQgidXQAAwXUbIqBGLY4jsj08FwH1la8EKuU0e1orTyylSYiHbvvIFMdKOi2KKfStJGIbd3FwYHfP3Ua3Cakogyn29AYkIWa1a9EV3Xbq0xwOIyHEtmikKqT8dB3PPj+LpK/OQ5Up4zokh1XmHgLOzs4YMGCAJWLREx0dDbmc1p8S8/mh/2vovuIs3ER8HJnSjtbLm8uYv7Wj8wxfu+1gBfBC3ENwftB5rsMgdohVqwBXV+DVVPua+/ZyHBEhdogB8GoMqeu4JpyGQqzn/QlTsfNLbQG8zqMncByN7cmIugNlkvYNMFahRsqKqwj56i2Oo3JMZU7mvby8sHnzZkvEouf48eNITU21+HOI45gWfV1ve884+qZjFkFNgC9fch0FZ8QKsa6K/fKOy+Ep8uQ2IGI3NHKFLpF3atwYLvXrcxwRIfZlwOettKPzjHabesw7jhM/L4d/9ZoAgJPrf0afOV9xGg8h5VUpp9kTYgkPXuQY3SYV5MCV7AFqTUcsQ5GUBHn+enkeD/K7d7kNiBA75FfNAxN+7ogJP3eEXzWP0i8ghADQFrwTvlq6yoj4CJxCtSa4UmmTeVo3T8ytXqC70W1SQdEfGd8mhJRbXJHe8oyLC2exEEKIvek1c65uu9vkmRxGYpt4LgIEftIM1Ra1Q8hXb0Hg48x1SA6rTMm8RqNBcnKypWLR8+LFC6jVaqs8iziGHwc2N7pNSEUs77jc6DYh5aUWi6HJzdU7FnZgPzfBEEKIHXJ2c9dNrXd2owGeslJlypD01XkqflcJVNqReULMbdbuGwgP9kR4sCdm7b7BdTj2QZoFqFXAi1hAo3S4Sva0Xp5YQsLoMfoH3NwgCgnhJhhC7JRcosT+ZVexf9lVyCVKrsMhxKakrLgKVqHWFb8j3Km0yXzVqlUhEJS5Ph8hxJqiPwL4AiCoKcATOlwl+4knJ+Ley3u49/IeJp6cyHU4xE7o9ZYHqPAdIRZwbM1No9vEMWSnpWDt+OHa7XQquF0WqkwZWJka0ABgtdXsCXcqbTIP0Lp5Yl5rI1sZ3SakvB5lPTK6TUhFMC4uAO/Vj2c+H6FrfuE2IEIIsTNRsyfrtnd+MZvDSGyP3kg8C10hPMKNSp3ME2IuiZkStPvuFO4ki/FD/9fg5ULtZ8yi8LR6B5tiDwB1vOsY3SakIsIO7AfPzQ08Dw/U/uME+J60fIMQcyvcU576yxNSRsyrP3mA/8jGnIbi6Mo0j/3u3bs4ceIEOnfujCZNjH/j69Spk1kCe/nScftWE/PrtuIsJK+mAXVbcRY3v3qf44iIPVjdebXemnlCKkqRlIS43h8CAGrt/53WyhNiIU6uQvSeTu20HJEsLxe+wdXw8vkzAMDABd9zHJHt0EhVYDUagAXA047K81xoWTSXTP7q37t3D61atYJMJoNIJEJMTAzCw8MNzjtz5gwYhjFyh7JhWdYs9yEkW6pEjkwFAODzGF1ST8ygaFu64Ye5i4UDniJPbHp/E9dhEDuSn8jnb9e/fInDaAghxP4cXLIQApEIwXUbAgC8/AI4jsh2ZETdAcPjAa860fGENMmbayb/DRw9ehRSqRQsy0KhUOD48eMlns+ybIU+CDGXsVFX4C7S/lNXa1g0obU9hJDKimWhkUigkUgA+llICCGkkhEGuum2q0QaDuwS6zJ5ZL55c/2+3K+99lqx5wqFQrz55pvljwrA+fPnoVKpKnQPQvI1qOqJBynavs1bR77BcTR2JGJbwei8A66ZJ8TchDVrQH7rtm6bEGIZcolSV8W+67gmcHKlWjqOotfMuTi4ZCHXYdikKpHhyIi6A1GwO6pEhtMU+0rA5L+Bjh07Yv369Th48CC6du2Kzp07F3uur68vTp8+XaHAqlatitRUahVBKm5tZCuMjbqC8GBPrI1sRcXvzMnF2+Gm1hNiScqnCeB5eOi2CSGWUbQ1Ha2fJ6R0PBcB/Mc05ToMUkiZ3k4ZNWoURo0aZalYCLEILxchdo6p2EwRQgixNEVSEjR5eYBGA56bG5yovzwhFqNRs8hIzgMAVAl2K+VsYk/2f/810hOeQujswnUohFRYpZ0bQevmCSGEOJK43h+CcXUFK5VCI5NRf3lCLEijZqGUq3XbxHGkJcRDKZMVtFcjxIaZPZmPi4sDn8+v8H2uXLkCtZqqjpOKy5YqMTbqCgDQNHtzy3wKrG2n3R57FvChNb6EVATDMGBcXQGA+ssTYiFyiRKpCTkAAKGIh8wUCccREWuR5eVCIZVSgVFiN8zeT6BGjRqoVq1ahe9TrVo11KhBiQGpmGypEm2/PYn/nrzE5fhMDN9MbZ7MKj+RL7pNCCmzkDW/QJOTA01ODkJoVJ4Qizm25iYEr7rcKBUaVAl25TgiYi37Fs+jRJ7YlUrbHHD37t349ddfuQ6D2LjIjReR+2oanVrDIvZZNscREXsiVogx8sRIjDwxEmKFmOtwiA1Ti8VIHDoM4PPBc3dH0rjxXIdEiN0qPK1eKOKh+yfFd2gi9uXF44cAo51fz2o0HEdjezRSFdLWxSJtXSw0Uuo6VhlU2mR+8uTJGDlyJNdhEBtXNHl3FVbaf/K2aexZ49sOYurpqUa3CSmrxPHjAbUaUKuhkUq5DocQu6ZRs1AptImcT5AbtaVzELK8XG1NLpYFw+NB6EwzMsoqI+qO0W3Cnf9v797joqrz/4G/zgzMMKCDKKioeEHxgmJ5LTUr17bELM289CsRoV3NMjNrt6i1e9laS+ZlU9u8gLurVopmYt80rUxbFDXNS1qIF7xh6gwww1zP74+JI0MDDDDDmcvr+XjMY98nzjmft22Z7/l8Pu+PzzbAA9gEjxpOIQD2Sv8YbZl1u3zJBKKoDsDzPD6LqKFMP51wzBaJImCzoVPOBrlTIgpY1y4ZEKpWSjEFh03vvonYzl1xseAkAOD/vfGOzBkRNVyjFPOFhYW4evUqysrK3C7QzWaz1/LZvHkzVq1ahfz8fFy4cAGRkZGIj4/H+PHjkZqaiubNm3ttbLvdjq1bt+KTTz5BXl4eLl68CJ1Oh6ZNm6Jjx45ISkrCkCFDMHLkSI/0Hgh2m2fehvsWfgcA+OzJIYiL4rew5Dnzh82XZuTnD5svay7kv2x6PewGg6OQVyohaDRQtW0rd1pEASuqVbjUAK9l+6YyZ0ONxW634er5c1CFaRDdvgMio1vKnZLfaZGSKM3It0hJlDkbArxYzK9fvx4ffvghvv32WxjrsWRQFEUIgmfPjLhy5QpSU1OxZcsWAEC3bt0watQoFBcXY9euXdizZw/eeecdZGdnY/jw4R4dGwB++OEH/PnPf8bevXsREhKCAQMGoGfPnigtLcXRo0dx4MABHDhwAFlZWejZsyd+/PFHj+cQTHRGC1777CgGdIxiF3vyCq1Ki+X3LJc7DfJzZ6c7748P69pVpkyIgoNCCWlmXtHwA5jIX4jVxOQ2hSYEMVN7y50GVeLxYt5isWDChAnYtGkTgPotlfd0EQ8ABoMBI0aMQH5+PpRKJZYtW+a0J//kyZMYNWoUTpw4gZEjR2Lbtm0YOtRz3bl37tyJkSNHwmg04rHHHsPrr7+O6Oho6eeiKOK///0v0tLSvLoqIZhUHEdXEa+ZOkjGbAIQj6Uj8gjTiZPS+fIAeL48kZcplArExDWROw1qZAqlEi07xsudBpFHebwb2HPPPYeNGzc2aL+7N/bKz5w5E/n5+QCA119//XfN9RISEpCbm4uwsDCYzWaMHTsW169f98jYR44cwX333Qej0YiXX34ZH3zwgVMhDzi+wHj44Yfx2muveWRMAqx2EUfP63H0vB5WO7+C9bggP5aOnezJU1TxnaRCPqxXT54vT+RFJoMFdpsdxWdLYbeJSH4sSe6UqJHc/+yLLmNyH7vZ+x6Pzsxfu3YN//znP6WZdVEUceutt+Luu+9Gly5d0KJFC2g0mlpn3kVRxIMPPuixYvrw4cNYsWIFAKBVq1Z45plnXN4XHx+PadOm4f3338eVK1cwd+5c/P3vf2/w+I8//jhKS0uRlJSEl156qcZ7H374YezYsQMdO3Zs8LhBr/KXQmym6FnG64CpFBBtgKAEVBFyZ9Toqnay53J7qi9BGQJFeLgUE5H35C457DQzz072wSMsogkmvDxX7jT8WtVu9lxyLz+P/qnh66+/lpaIKxQK/Oc//8HEiRPr9S6VSuWxvDIzM2H/7SzJiRMn1vjuyZMn4/333wcALF68GK+88go0Gk29x964cSO++eYbAMDs2bOhUNS8GCIuLg5bt26t93hUSeUvjbywdSOorZ0EhIYD5hJHQR/dTe6MiPyWEBKCsB495E6DiCig6YovIfuvMwEAKfMWIDKmlcwZETWcR5fZFxYWAnAsGR8zZky9C3nAc0vtLRYLNm7cKF3X1tiuT58+aNasGQCgrKwMubm5DRq/YkWAIAi49957G/QuqiPOzHuP3QJYDY7OQWotEBomd0aNrnL3enayp4Zot2ihy5iIPK/ysnousQ8uFYV81ZjcV7mDPbvZ+waPzsxX7lr/hz/8oUHv2rBhg0caweXl5eHatWvSdb9+/Wq8XxAE9OvXD9u3bwcAbN26FWPHjq3X2DqdTvoyIC4uDjExMfV6D9Xd2WsGHDirAwD0bheJEKXH20MEN1EAQiMAS5njeuJqefMh8mNKrRYdslbJnQZRUFCHh2LM7L5yp0GNrLysFOZyR50SGhYGAVyxWR/sZu97PFrMVz4XXdvABj6DBnmm8/jhw4elWK1Wo60bZ/d26tTJ5fN1lZ+fL30h0bXSUUMFBQXYsmULTp06hdLSUkRHRyMpKQl33XXX7xrjUf3c+/63CA9VwGCx49A5HfbP+aPcKQUWZQgQe9ONa00z2VKRC/fMExER+Yecea8hRKWGpdwIS3k5Hl3wodwpEXmER4v5u+66C0qlEna7HUVFRZ58db0dPXqjUUObNm3ceqZywV/5+bo6ePCgFEdFReHChQuYNWsW1q1b5/J+tVqN6dOn480330T4b82QqP4EQUCEynGALM+Y97DRi52PpSMiIiLyUcVnCmE1myAoFAgNC+N+eQoYHl17HBsbi7S0NIiiiJycnAa96+mnn8ajjz7a4JyKi4uluGIvfG0q36fX62GxWOo19okTJ6T44sWLuPXWW7Fu3TpMmjQJe/fuhcFgwLVr17Bp0yb07t0bJpMJ8+fPx+233+60NYDq7vOnhrqMyUM2PgG07u34bHxC7mxk8fqQ13H86nEcv3ocrw95Xe50iIiIqDpiNTGRn/P4RuL58+ejf//+yMvLw3vvvVfv96xZswYrV65scD4lJSVSrFar3XomLMy5mVfld9RF5aP1vv32W5w5cwYvvPACsrOz0b9/f2g0GjRr1gz33Xcf9uzZgwEDBgBwLM9PTU11awyTyQS9Xu/0ISAuKhyHXrkHh165B3FRXOXgcXYLcPGQ42Ov35dd/m7Od3PQvXl3dG/eHXO+myN3OkRERFSNFu3iXMZE/s7jxXx4eDh27NiBKVOm4C9/+QumTJnSoKXqDVW5KZ+7x91Vvc9gMNRr7KqFdefOnfHKK6+4vDc8PBzLli2Trj/77DN89dVXtY4xd+5cREZGSp+4OP4GRY1AFFzHRFQnNr0ehQ8/gp8GDEThI4/Axi9kiYg8TqFQQhWmgSpMA4VCKXc6RB5Tpz3z6enpdXp5UlISsrOzkZ2djbi4OCQmJiIqKsqtGXKdTlensapT+Yx4d7vjV72vvvvXq77nkUceQWho9Xu3b775ZvTr1w/5+fkAgGXLltV6KkBGRgZmz54tXev1ehb0AHRGC6Zl7wMALE3pzz3znqYMcSyxD2Lzh82XmuDxaDqqr3MznoTp5EkAgOnESZyb8SQ72xMReZhCqUTLjvFyp0HkcXUq5leuXAlBqNssXMV58WfOnMHZs2fr9Fxdx3KladOmUmwymdx6pry8vNp31EWTJk2crm+99dZanxk8eLBUzH/99de13q9Wq93ePhAsdEYLbnv7KxgsNoSrlHh01V588thgudMKLBNXA2sn3YiDkFalZQd7IiIiH1deVgq7zYYrZ08jun0HjPnrS3Kn5LfsRit+zXasuG6RkgiFxqO91Kke6rXMXhRFtz+CIEgfd5/1pMpnu1few16TyqsCtFptjbPpNal6PJ87M+bx8Te+Nbx48aLTNgFyz7TsfTBYbAAAg9mGExfr1/OAqmG87lzIB+GxdESe0m7RQqgTEgAA6q4JaLdoocwZEREFlk3vvinNzCsUSoRFNKn9IXKpopCvGpN86vx1ikKhcDpP3lvOnj3rkcI+MTFRis+fP+/WM5WP1av8fF116NDB6brykv/qVJ3Nv3btmlvP0Q1Wu+NLIbsIKBUCurbib9oeVVHIV8RTNsuXC1EAEEJCENajB9otWghllS+BicizTAYLcpccBgAkP5YEdTi34QU6u92GK2dOAwCi23eo5W6qjt1ohfl8KQAgtHUEBAV7JvmCOhfzMTExOHXqlDdycRIbG4vLly83+D1JSUlSbDKZUFRU5HSOvCsFBQUun2/I2ADcmmWvuhWganFPbhBFRKiUMFjsCA9V4qMpA+XOiIjIpbPTp8N04qQUd/z3v2XOiCiwfb74EH49XybFY//ST+aMyOt4LJ1H/Jp9FKGtImC5VAbLxTLEPsc/X/sCj3ez9zUDBw5EVFSUdF2xH706oig63TNixIh6j111j7w7KwMqf4HRpEmT3y3Vp9qFKBXo2TYSAzpGoWdbLZvfeVrlPfJBul+eyFMqCvmqMRF5R0UhXzWmwFWxxL5lx3golOxk3xCCUoCqTROo2jThfnkf4bPFvKf2zoeGhmL06NHS9fbt22u8/8CBA9Le+oiICCQnJ9d77Pbt2+OWW26Rrvfv31/rMwcPHpTi22+/vd5jB7N3xt+Eo+f1OHpej3fG3yR3OoGnXHfjjPlyz5w6QRSMbHo9RLsd9pIS2A0GqOI7yZ0SUUA7//M1mI1WmI1WiHYRLdrU77Qi8i/3P/uiy5jqJmp8V5jPl8J8vhRR47vKnQ79pk7F/IEDB7Bt2zZv5eLk4sWLsNlsHnnX008/DYXC8Utdu3ZtjUfUZWVlSfHjjz/e4P3q06ZNk+JPPvmkxnv1ej2+/PJL6XrSpEk13E3VeXrtQZcxeciS2wBzqeOz5Da5syHyW2f+PBVi2W8zg6IIQclZDiJvMRks2PDuAenaYrLh3if4hX8wCItoggkvz8WEl+ey+V0DXPv4hDQrf+3jE3KnQ7+pUzF/0003NaghnFx69+6NtLQ0AMClS5eQmZnp8r6CggIsXboUABAdHY2MjAyX91ksFqSkpKBp06bo06cPDh06VO3Yqamp6NOnDwDHEv+PP/642nvfeOMNaV993759MXHixNp/cfQ7lbvXs5O9F1jKXMdEVCflP/5448JuhxDCYp7IWyqa3lXG5ndE5O98dpm9py1YsAB9+/YFAMyZMwcrVqxw+vnJkyeRnJyM8vJyqFQqrF+/3mmvfWXZ2dlYvXo1SktLcfDgQcyYMaPacRUKBdauXYuWLVsCANLT05GTk+N0j81mw9tvv4133nkHANCqVSt8+umn0moCqpvK3evZyd4LYvu6jonIbTa9Hqi8nUyp5LF0RF7Wom2EFD/wbB8ZMyHyPy1SEl3GJC9B9PTB7tUoLy/Hvn37cO7cOVy7dg2CICAqKgrt2rVDv379EBYW5vUciouLkZqaitzcXABA9+7dkZSUhOLiYuzatQtWqxWxsbHIysrCXXfdVe17li9fjkcffVS6vv322/H111/XOPaRI0cwadIkaU98t27dcNNNN8FsNmPPnj24dOkSAGDIkCH473//69aZ9K7o9XpERkZCp9MFbfM8ndGCadn7AABLU/qzAZ6n8Zx5ogY7PTkV9vJylB85AgDo/H9fQFXLSStEVH88ko6I/Im7NZ1Xi3lRFPHJJ5/ggw8+wHfffQer1eryvpCQEAwdOhTTp0/H2LFjIQjePbfws88+w8qVK7F//35cuHABWq0WnTt3xrhx45CWlobmzZvX+LzFYkFaWhpycnKQkJCAVatWoXfv3rWOa7VasXbtWqxbtw4//PADLl68CJVKhdatW2PIkCGYOHFig7rnAyzmiYj8wenJqU7XHbJWyZQJUXBgMU9E/kT2Yv7IkSNIS0uTjnmrbZiKAn7AgAFYsWIFevTo4Y20Al6wF/NHL+hw38LvAACfPTkEibGRMmdERPR75qIinBrzAACgU84GzsoTeVlOpvOJQmNmc5tYsCgvK8Wmd98E4OhmzyZ45A/crem8sil7165dGDx4MPLz86UivqbZ9oqfiaKIvLw83Hrrrfjuu++8kRoFuIpCvmpM5Gl6sx7pX6Qj/Yt06M16udMhP3Mh4wWE9eiBsB49cCHjBbnTISIKWDnzXsPlwgJcLixAzrzX5E7Hb9mNVhQvO4TiZYdgN7pebU2Nz+Otc8+cOYN7770XJSUlTkV6s2bN0K1bN7Ru3RoREREQRRFlZWW4cOECTpw4AZ3OcV61IAgoKSnByJEj8eOPP9Z77zgFH53RApv9xgoQpcK72zUouM3aMcspXn7PcvmSISKiGiU/luS0zJ6CQ3lZKc6fOA4ACA0Lw5Uzp2XOyH/9mn3UKY6ZWvsWY/I+jxfzTz31lFTIR0REYPr06Xj44Ydx0001n+V54MABrF69GsuWLUNZWRlKS0sxa9YsfPrpp55OkQLUtOx9iAgVUGZxFPQ9YrmMioh8U7tFC3FuxpNSTETeZTJaUXy2VIq5Zz445Mx7zbFKWBRhKS9Hm4TucqdE5FEeXWZ/+fJlbN68GYIgIDExEYcOHcLf//73Wgt5AOjTpw/+8Y9/4IcffkCPHj0giiI2bdqEy5cvezJFCnBKpRLasBBow0LQRM3/UJP3zB8232VM5A5bSQnKjx1D+bFjsJWUyJ0OUUAzGSxYPed7WEw2QBSx9o29cqdEjeTKmdNQhWkg/Hbc85jnXpI5I//Fo+l8k0eL+Z07d8JmsyEsLAwbN25Ex44d6/yO+Ph45OTkICwsDHa7HTt37vRkihTA3hl/k2P7htmGzi2bYGlKf7lTogCmVWmx/J7lWH7PcmhVwddskurPXFSEX4bfBXtJCWC3S43wiMg7vlx+RIotZruMmVBja962HSymcgBA684JbH7XAApNCGKm9kbM1N5QaDy+uJvqyaPF/Llz5wAAo0ePRnx8fL3fk5CQgNGjRwMAioqKPJIbBb6/fPwDeraNxICOUVCHKHi+PBH5pFOjx0ixvaxMvkSIgkjL9jeKuIl/GyBjJtSYFAolVGEaqMI0UCiUcqdD5HEeLeYrGt7dfPPNDX5X3748MoTqxmoXcfS8HkfP62G1e+XERSKiBrMbjUClE1465WyQMRuiwHf7w91w7ZIRoWolJr1+K7QtNHKnRI1EoVSiZcd4tOwYD4WSxTwFHo8W823atAEAhIY2fEY0JMSxfCM2NrbB76IgIYquYyIiHxLWsyegUABKJcJ69+YZ80Re9s1/fkJMXBPExDXBV6uOyZ0ONaL7n33RZUwUKDy64aFfv34AgOPHjzf4XRXv4Aw9uUNntODEJUeX2q6tmyKEx9KRF+nNeulouvnD5nPPPNVJ+399yE72RI3oytlSGEosCFUp0KIt90wHk7CIJpjw8ly50yDyGo/OzHfp0gW33HILNmzYAL1eX+/36PV6rF+/Hn369EHXrl1d3vPggw9i+PDh9R6DAsu07H3o2srxH+gTF0vY/I68quoZ80RE5Lsq1uo5mt9x5R4RBQ6PFvMAsHDhQuj1ekyZMgVWq7XOz9tsNqSlpUGn02HBggXV3rd79252uicnIUoFEttokdhGy+Z35FVWuxXHrx7H8avHYbXX/fc5Cm4Vs/JVYyLyDkEQEKpWIlSthELp8T/6EhHJxuO/o/Xv3x+rV6/Gl19+iUGDBuHLL7+E6Ob+5W3btmHQoEHIzc3Fv/71LwwePNjT6VGAqjwTz1l5IvJV5qIiGPLzYcjPh91kkjsdoqDQPNbR8K5FmwgkP5YkczZERJ7j8UMCX3vtNQDAfffdhzVr1mDEiBGIjo5G//790aVLF2i1WqlBnsVigV6vxy+//IJ9+/ahuLhYerawsFB6lyulpaWeTp2IyC0hihB0b95d7jTID50aPcbRoNNuR/mPP6Lrnt1yp0QU8BRKBWLimiD5sSSow7lyL1iUl5Vi07tvAnA0v+MZ8xSIBNHdaXM3KRQK6Yi6yq8WhJobktXl3or7BUGAzWarZ6aBSa/XIzIyEjqdDlpt8DTlemjZHqfrNVMHyZQJBQM2wKP6Otazl9N1jyM/ypQJUfDYvOggrGY7AGDMbDZWDhb/+dszuPjLSQBA684JePiNf8icEZH73K3pvLpxSBAE6ePJe4mq4hnzROQPwnr2dBkTEZHnlJeV4sLJnyDa7RDtdqmoJwo0XivmRVH06ofICc+Yp0bEbvZUX+3/9SHC+/VDeL9+aP+vD+VOhygoXCjQ4/zPOtw2IUHuVKiRVCyvr8DagQKVV4r5Vq1awW63e/XTqlUrb6RO/qryig6u7iAiH6XUatEhaxU6ZK2CMoi2QhHJKVTl6GS/4R8H5E6FGlFomEaKYzu7Puqa3Gc3WlG87BCKlx2C3ciTfHwFz+cgv6czWvDTBT3KzDbH6bH89pW87PUhr0tH070+5HW50yEiIqJK7n/2RbTs0Anq8Ai07Z6IsS+8KndKfu/X7KMuY5KXx7vZNxYul6EK07L3QRAERKiUABznzRN505zv5kjd7Od8NwfL71kuc0bkD8xFRTg15gEAQKecDVC1bStzRkSBzWSwQB0eCovZBogiJs4ZKHdK1EjCIprgodfmyZ0Gkdd5vJhfsWIFNBpN7Tc20IIFC2A0Gr0+DvmHrq2a4MQlx3GFPGeeiHzRqftHw15WJsXd8vfJnBFRYPty+RGMmnEzYuO1sJrt0Lbw/p9PiQKR9Vo5TEUlgMUOVbumiE7rVftD1Cg8XsynpqZ6+pUuTZgwoVHGId/3zvibcO/73wIAPn9qKCI1PEOWvGv+sPlOR9MR1cam10uFPACnmIiIyJdden+/48QxlRKWywYoNH67uDvg8P8J8nsz/3sAZWabFG94fIjMGVGg06q0XFpPdXJ2+nTnvxARIU8iREHkj+k3jn9MfixJxkyIiLyj0Yp5o9GIPXv24PTp0/j1118hCAJatGiBDh06YNCgQQgLC2usVCjAHDqncxkTEfkK04mTjgL+txn5zps2ypwRUeBThztW6o2acbO8iRD5uVZP9cWl9/dLMfkOrxfzO3bswLx58/DVV1/BanV9jEFoaCjuuusu/OUvf8Edd9zh7ZQogOiMFoiiCLsIKBUCwkPZ/I68S2/WOy2x16p4vBjVTp2QANPJk0DTplAnJLD5HRER+Y2QqDC0fWWw3GmQC16rfEpLSzFhwgTcdddd+L//+z9YLI6iy9XHbDYjNzcXf/jDH/DQQw+hpKTEW2lRgJmWvQ+922mhVDjOlt8y63aZM6JAV1HIV42JatLmnXkuYyIiIqL68koxf/36dQwaNAiffvqpdIScIAjV3l/xM1EU8fHHH2PIkCHQ6bhcmmpntYsoKDYgQqVEn/bNEBcVLndKRES/c/6vf3UZExGR5+mKL2FR2kQsSpsIXfEludMh8hqvLLN/4IEHcOTIEacivVWrVkhMTERsbCyaNGkCURRRVlaGoqIiHD9+HJcu3fgX7ciRIxg7diy2b9/ujfQoQOiMFhw/r0ep2QalQoDVZpc7JQoC7GRPdWUuKoIx37HXUBER4dg/T0ReV3HO/OZFB/HH9J7SHnoKfNl/nekUz1ixVsZsiLzH48X8v//9b3z99dcQBAFhYWF44oknkJqaip49e9b43I8//ohVq1bhgw8+gMFgwM6dO/Gf//wHDz/8sKdTpAAxLXsfjFa7tMS+oJhHPZH3sZM91dWpMQ8ACgVgt8NeVgZNXzYPImoMFefMA0DuksMYM5v/7hFRYPH4Mvu33noLANC5c2ccPHgQ8+bNq7WQB4BevXrhnXfewf79+9G5c2eIoii9y9M2b96M8ePHIz4+HhqNBq1bt8bgwYPx3nvv4erVqx4fTxCEOn26d+/u8RwCVeWGd11bNZExEwoWerMe6V+kI/2LdOjNernTIX/w23YzAIBSibglH8iXCxFREEiZt8BlTBRoBFGs/KeMhiksLER8fDxUKhX27duHXr161es9hw8fRv/+/WG1WvHLL7+gY8eOHsnvypUrSE1NxZYtWwAA3bp1Q1JSEoqLi7Fr1y7YbDbExsYiOzsbw4cP98iYQM39Alzp1q0bjh8/Xq+x9Ho9IiMjodPpoNUGdpdtndGCR1fm4cSlUnRt3RQfpQ5ApIZL6Mi70r9Id7rmLD3VpmDcOJh+PAIAUPfqifhPPpE5I6LgwGX2ROSv3K3pPLrM/n//+x8AYMSIEfUu5AEgKSkJI0aMwObNm/G///3PI8W8wWDAiBEjkJ+fD6VSiWXLliE9/cYfyk+ePIlRo0bhxIkTGDlyJLZt24ahQ4c2eNwKGo0G7du3d+ve+Ph4j40byCI1ofhk+hC50yAiqpHl9BkomjaVYiIiIn9iN1rxa/ZRAECLlEQoNF4/3Zzc5NFl9hVN7AYOHNjgd91yyy1O72yomTNnIj8/HwDw+uuvOxXyAJCQkIDc3FyEhYXBbDZj7NixuH79ukfGBhx/T44fP+7Wp2LlAJHPuXYaeLu943PttNzZyKJy0zs2wKPa2PR6iKIIu8EAiCLUCQlyp0QUNL5cfkSKc5ccljETIv9WUchXjUl+Hi3mjUYjBEFAkyYN37tc0fHeaDQ2+F2HDx/GihUrAACtWrXCM8884/K++Ph4TJs2DYBjSf7cuXMbPDZRQFk61HUcRCoa4C2/Zzm0qsDeykINd27Gkwjr2hWK8HBAELhfnqgR2W2OnaRXzpVJMQWH8rJSrHs1A+tezUB5Wanc6RB5jUeL+ejoaIiiiNOnGz5jd/r0aQiCgOjo6Aa/KzMzE3a749iyiRMnQqVSVXvv5MmTpXjx4sUe+TKBvENntOChZXvw0LI90BktcqdDROSSEBKCsB49ENajB5QB3suEyLeI1cQU6Da9+6bLmOqnRUqiy5jk59FivlOnTgCAjRs3oiF99Ww2GzZs2OD0zvqyWCzYuHGjdF1bY7s+ffqgWbNmAICysjLk5uY2aHzynmnZ+1zG5EXTvnUdE5FL7RYtdBkTkXeZDBZcveCYkGkeGw6F0uMHOJGPKi8rxeXCAlwuLIDdbpM7Hb/H/fK+zaO/sw0ZMgTh4eE4deoUXn311Xq/56WXXkJhYSHCw8Nx2223NSinvLw8XLt2Tbru169fjfcLguB0z9atWxs0PlFAieoAPH/G8YnqIHc2RD5PqdWiQ9YqdMhaxVl5okaUu+QwmsdqAABXLxiQ/FiSzBlRY1k/92WYjAaYDGUoPn0K9z/7otwp+TXul/dtHv1qRa1W48EHH0R2djZef/11FBcX44033kBUVJRbz//666/IyMjAv/71LwiCgPHjx9e4JN4dhw/faHiiVqvRtm3bWp+pvBqg8vMNZbfbsWvXLuzevRtnz56F1WpF8+bNkZCQgGHDhjV4FUKweWf8Tbj3fcfs8OdPBef+bWp8erMes3bMAuBogMd980REvqliNj66XQSPpQsC5WWlWPdKBorPnAIACAoFLCYTwiIa3suLyFd59Jx5wLHXvXv37jCbzQAcjexGjhyJ4cOHo0ePHmjTpg0iIiIgiiJKS0tx/vx5HDt2DNu2bUNubi4MBgNEUYRGo8Hx48cRFxfXoHxmzpyJhQsdSxs7deqEgoKCWp955ZVXpJUFWq0WOp2uQTkIgoAOHTpIv6bqJCcn4+9//zuSkur/7XEwnTP/0LI9Ttdrpg6SKZMgYrwOrJ3kiCeuBjTN5MxGFjxnnurCptfj3IwnATiW2XN2nqhxmAwWfLn8CEbNuFk6b54C27pXM3D2qPMknEoTjidXrpMpo8DAZfbykOWceQDo0KEDPvzwQ0yePBmCIKCkpATr1q3DunW1/4tU8b2CQqHARx991OBCHgCKi4uluGIvfG0q36fX62GxWBAa2rD/CJw+fRoRERF46aWXMH78eMTHx8Nms+HIkSP48MMPsWLFCuTm5mLnzp3IysrCuHHjGjReoNMZLThSpIfBYkO4SolurZvKnVLgu3gYWPLbtpc2fRxF/ZTN8ubUyPRmPY5fdXwh16VZF4Qo+B80qp65qAi/3H0PYLNBERGBs9Ono+O//y13WkQB78q5Eqx9Yy9Cw5QAAFO5lcV8ELBazL/7a5PfYa+ShrKXW2E+XyrFLOZ9i1e6gUyaNAmLFy9GSEgIBEEA4CjUa/pUUKlU+OCDD/DQQw95JJeSkhIpVqvVbj0TFhZW7Tvqq02bNjhw4ABeffVV9OrVC+Hh4WjatCluvfVWfPTRR1i1ahUAx/F+jzzyCL7//nu33msymaDX650+wcDR8M7xz43BbAM8u8CEXFl2x434/AH58pDRrB2z0KVZFwDAz9d/5jnzVKNTo8cANkfzJXtZGUwnTsqbEFGQWPfWXqfr9fP2y5QJNaZfz511up746t8RGdNKpmwCx6X397uMyTd4rbXn9OnT8e233+KWW25xKtYFQXD6VBBFEUOGDMF3332HP//5zx7Lo/LRcu7uv696n8FgaFAOhw8fxg8//ICEhIRq70lJScHDDz8MADCbzXjiiSfcevfcuXMRGRkpfTyxmsFfCIKACJUSESolQtiltnEIlf4+T1wtXx4yClGEoHvz7ujevDv3y1ON7FWONlXX8N8AIvIc0S53BiQHQRCgDo+QPu2695Q7JSKv82oFNHDgQOzevRu7du3Cc889h0GDBiE2NhZqtRpqtRpt2rTB4MGD8fzzz2PPnj349ttva+02X1cajUaKK/bx16bqfeHh4Q3KoVevXoiOjq71vpkzZ0rx/v378e23tR/9lZGRAZ1OJ33Onj1b6zOB4J3xN0EURZSZbejcsgmWpvSXO6XAN/VrQBAAhRJ4bFdQ7pevPBPPWXmqTVjPnoDC8Z9ZRUQE4pZ8IHNGRMEhJs654dnYv/aVKRNqTCnzFriMqWFaPdXXZUy+oVE2PQwePBiDBw9ujKF+p2nTG3upTSaTW8+Ul5dX+w5vGjBgACIiIlBWVgYA+PLLLzF0aM1d2iu+GAk2T689KM3MKxUCIjXcC+d1rZOAl67KnYWstCotG96RW2wVW54EAYomTdBpYw6b3xE1ktFP90HuksMIUTm+TNM219TyBAUCdXgEWnaMl2LyjJCoMLR9RZ46jmoX8GuTY2JipPj69etuPVO5e71Wq21w8zt3KRQKdO7cWbo+ceJEo4zrj366WIIysw1lZht+utjwngZERJ50bsaTMJ86BUV4OCAIuJDxgtwpEQUNdXgoxszui1EzbpY7FWpEm95902VMFMgCvh1hYmKiFJ8/f96tZ4qKilw+3xgqrwK4ejW4Z0Grc/aaASXlVgCAUhDY/I6IiIgoiJWXleJyoeP46ej2HaBQKGXOiKhx+OzMfGxsLEJCGv5dQ+Uz200mk1OhXp3KZ9E35Mz3+qi8xD8igkuEXBn5/o1eAjZR5LF0RORz2i1aKDW8U3dNQLtFPB6JqDHofzXiw6e/wYdPfwP9VWPtD1BA2PTum4iO6wAAuHLmNO5/9kWZMyJqHD5bzANw6oJfXwMHDkRUVJR0nZ+fX+uYle8ZMWJEvcfW6XR44403pGPn3FF59UCbNm3qPXagqjwrX+GjKQNlyoaIqHpCSAjCevRA3AcfcL88USNZ+8aNY+l4JF1wUSiVaNkxHi07xiMsokntDxAFAJ8u5j0hNDQUo0ePlq63b99e4/0HDhyQ9tZHREQgOTm53mNfu3YNc+bMwbx589y6/9y5c7hw4YJ0XVvzu2B07/vfQqh0fXOcls3viMjnnJ0+HeXHjqH82DGcnT5d7nSIiAJa5Zl4zspTMPHannmz2YxNmzZhx44dOHLkCK5evYqysjK3Z9uLi4s9lsvTTz+NrKws2O12rF27Fu+88061Z85nZWVJ8eOPP+50tF19HT9+HJcvX0bLli1rvK/y2M2aNWvQFwmBrIlaCYPFcYjsqvRbZc6GiOj3TCdOuoyJyLsm/m2ANDvPI+mCR1hEE0x4ea7caRA1Oq8U85999hmmT5/uNMsMuL9sXhAEiKIIQRBqv9kNvXv3RlpaGj766CNcunQJmZmZeP755393X0FBAZYuXQoAiI6ORkZGhsv3WSwWpKenIycnB126dMGqVavQu3fvase32+14+eWX8cEH1Z8xXFBQgLffflu6fv755xEZGenuLzFo/GtKf0xY8j0AYN1jt3JWnoh8kiq+E8p/PAIACOvVU+ZsiIKHtoUGf37vdrnTICJqFB5fZr9p0yY8+OCDOH/+vFS8i6JYp/3vntgrX9WCBQvQt6/jG9o5c+ZgxYoVTj8/efIkkpOTUV5eDpVKhfXr1zvtta8sOzsbq1evRmlpKQ4ePIgZM2bUOv6SJUswY8YMlx3qv/rqK9x5550oKXEcsTZu3Dj89a9/resvMSj8aeU+aMNCoA0LwZ9W7pM7neBhvA58NAJ4uz2w/B7HNRFVS1CGQBEeDkV4OARlwB8cQ+QzTAYLcjL3IydzP0wGi9zpEPk9u9GK4mWHULzsEOxGa+0PUKPy6J8wrFYrZsyYAavVKs2ui6KIHj16oHPnzoiMjHS7Q/3atWthMpk8llt4eDi2bt2K1NRU5ObmIj09HfPmzUNSUhKKi4uxa9cuWK1WxMbGIisrq0771atbQRATE4Np06bhP//5D0pKSrB48WJ89NFHGDhwINq2bYvy8nIcOnQIv/zyCwBArVbj+eefx0svveSxVQlEHrF2ElB81BFfPua4nrJZ3pyIfFhF8zsialy5Sw5L8ZfLj/CseaIG+jX7qFMcM7X61cjU+DxazO/atQvnzp2TCtFx48bhH//4B+Li4ur8rq1bt+Ly5cueTA8xMTHYsmULPvvsM6xcuRL79+/Hpk2boNVq0b9/f4wbNw5paWlo3rx5je9JSUnBV199hZycHCQkJGDhQtdHDkVERGDJkiXIzMzEtm3b8MUXX+DAgQM4fvw4vv/+eyiVSjRv3hx333037rzzTqSlpaF169Ye/TUHkqMXdCgpt0IEEKFWYussLqMjIt9j0+shWq0wnTwJddcExNWwxYqIiIiovgTRg2vaFy9ejCeffBKCIKBv377Yu3dv7Q9VIzY2FpcvX4bNZvNUekFBr9cjMjISOp0O2gA7CqnzC1tgszv+cVUqBPzy1kiZMwoixuvAfx5yzM637AH8v7WAppncWRH5pNOTU52uO2S5fzwpETWMyWCRZueTH0uCOpy9dYgawm60SrPzLVISodBw61hjcLem8+j/G3q9XoonTpzYoHdNmDDB6X1EFYV81ZgagaYZ8OhWubMgIiKqkTo8FGNms4s9kacoNCFcWu/DPFrMVz56raHLxd9///2GpkMBplfbpvixqESKiYh8Uezct3BqzAMAgE45G2TOhoiIiAKVR7vZ33nnnVJ88eJFT76aCP/+0yDcGt8ct8Y3x7//NEjudIiIXDpf6TSS8zyZhKhRsZs9EQUTjxbznTt3xgMPPABRFLFhQ8NmIx588EEMHz7cQ5mRv9MZLZiW7TiKbmlKf54vT0Q+y3TipMuYiLyvajd7IqJA5vFz5pcuXYr4+Hh8//33mDt3br3fs3v3buzcudNziZHf0hktGPL2dnxfcBV7C69hyoo8uVMiIqqWOiHBZUxERETkSR4v5qOjo/Hdd9/hjjvuwIsvvoj77rsPO3bsYFd6qrdHV+1Fqcnxz4/NLuLQOZ3MGRERVS9uyQcI69EDYT16IG4Jj6UjakzJjyVJ8R/Te8qYCRGR93n0aLqq/vKXv+Af//gHBEGASqVCfHw8oqKioFKpan32u+++g9Vq5ZcAdRSIR9P1fuUL6Mut0nVTtRKHXx0hY0ZByHgdWDvJEU9czWPpiIiIiIi8RJaj6SocO3YM6enpyMtzLIcWRREmkwnHjh2DIAhuvUMURbfvpcDWtVUTHL+gR6nZDqUgYMus2+VOKfhUFPIV8ZTN8uVCREREVEl5WSk2vfsmAOD+Z19EWEQTmTMiahweX2Z/7Ngx3HbbbcjLy5MK8ooP4CjS3fkQVfhoykD0atcMt8Y3x/6X/oi4qHC5UyIicsmm1+P05FScnpwKm14vdzpEREEhZ95ruFxYgMuFBciZ95rc6RA1Go/PzP/pT3/CtWvXnIr3pk2bIj4+Hk2bNoVSqXTrPbt374bVaq39Rgp4kZpQrJnKo+hkNXG18zJ7InLp7PTpUgf7s9Ono+O//y1zRkREge/KmdMuY6JA59Fi/vDhw9izZ49UyA8aNAjz5s3D4MGD67xkPjY2FpcvX/ZkeuSneCwdEfkL008nYDcapZiIiLwvOq4Drpw9LcXkGXajFb9mHwUAtEhJhELjlR3a1AAeXWa/Z88eKe7cuTO++uorDBkyhHvfqd50Rgtue/sr7C28hiPn9Xh01V65UwpOVffME5FLoigCNhtgs3HLGBFRIxnz3Eto2TEeLTvGY8xzL8mdTsCoKOSrxuQ7PPr1ypUrV6R48uTJUKvV9X5Xu3btEBYW5om0yI89umovSkyO7RYGsw0nLpbInBERkWs2vR7ib7PyUCoBfpFNROR1bH5HwcyjM/NRUVFS3LFjxwa9a+/evTh16lQDMyJ/91Ol4t1mF9G1FX+DlkXlffLcM0/k0tnp052uw7p2lSkTouBkMliQk7kfOZn7YTJY5E6HGgmb33lPi5RElzH5Do/OzN9++40jw3Q6nSdfTcFKFKEQALsIKAVHZ3uSgaYZj6MjqoXpxEkI4eHS7Hzckg9kzogouHy28AdcPl0ixeOe6y9zRtQY2PzOexSaEMRM7S13GlQDj87M9+zZEyNHjoQoiti+fXuD3pWZmYnXXuO3a8GuW+umaKIOgTYsBH3aN2PzOyLyWeqEBAiCAEV4ODQ33QSlVit3SkRBpaKQrxpTYKvc8I7N7yjYCKKHO/RcuXIFf/jDH3D06FFs2bIFd999d73eU9HN3mazeTK9gKfX6xEZGQmdTgdtAPxBkp3sichf2PR6nJvxJACg3aKFLOaJGtmyWV/DarYDAEJUCkydf4fMGVFj4J55CkTu1nQeL+YB4Ndff0VaWhq2bduGV155BY8//jiaNKnbv1gs5usn0Ip58hHG687nzGuayZkNERHR7+h/NWLtG45Tbyb+bQC0LTQyZ0REVD+yFfOVl8bn5uYiLy8ParUat956K3r06IGoqCioVKpa3/POO+/AYDCwmK+jQCvmOTPvI1aOcr7m/nmi3+HMPBEREXmCbMW8QqFwOle+4vV1PWteFEUIgsBivo4CqZjXGS0Y+vevAABdWzdFiELAmqmDZM4qSLGYJ6pV4SOPwHTiJABA3TUBHf/9b5kzIiIiqj+70SqdL98iJREKjUd7p1MN3K3pPNoAr7LKRXxdC3kiANKMPACeLy83Hk0HANCb9Uj/Ih3pX6RDb9bLnQ75mIpCvmpMRETkjyoK+aox+Q6vFfOAo6Cv74fIanf8s1BmtkGEY5k9yaTiaLopm4N6v/ysHbNcxkQ2vR6iKMJuMACiCHVCgtwpERERNYhoF2E+Xwrz+VKIdtZnvshrxfyaNWtgt9vr/WnVqpW3UiM/YbLaUGKywWYX0TE6nPvlichnnZvxJMK6doUiPBwQBJ4xT0RE/k+sJiaf4dWZeaKGOHq+BEqFAKVCwNHzXGZP8ps/bL7LmEi0WWE6eWO/PJvfERGR3xOqicln+GwXAy61p/BQBQwWuxQTyU2r0mL5PcvlToN8EWcviIgo0PC/bT7P48X8hg0bAAADBgxo0Hv27dvHTvZBbsus23Hv+98CAD5/aqjM2RARVU8ICUFYjx5yp0FEROQxglKAqk0TudOgGnj8aDqSVyAdTUdE5A9sej3O/OnPKD9yBAqNBp025kDVtq3caRERETUIj6aTj2znzJO8AqWYP3vN4DQrHxcVLnNGRESunZ6civJjx6TrsB490CFrlYwZERERkT+T/Zx5V0wmE06dOoV9+/YhPz8fhYWFMJlMjZkC+YmKQr5qTCQnnjNPRERERL7C68W8Xq/H3//+d9x2222IjIxEly5dcMstt2DgwIHo3LkzIiMjMXToULz77rvQ6xvnD8ebN2/G+PHjER8fD41Gg9atW2Pw4MF47733cPXq1UbJocJDDz0EQRAgCAI6duzYqGP7qrPXDNCXW6Evt6LMZGO/DbkZrwMrRzk+xutyZyMbvVmPez65B/sv7cexX49hxvYZcqdEMrPp9fhlzAMw5OXBXlICe0kJQtu1Q7tFC+VOjYgoKJSXleI/Lz6DzP93PxamTYCu+JLcKQUMu9GK4mWHULzsEOxGq9zpUDW8Wsz/85//RPv27fHCCy9gz549MJvNEEXR6WM2m7F7924899xzaN++PT74wHtn8165cgX33nsv7rvvPnzyySdQqVQYNWoUunfvjry8PMyePRu9evXC9u3bvZZDZbm5uVi7dm2jjOVPkuffmIm3iSI6x3CJvazWTnIdB5lZO2bBaDUCAIxWI36+/rPMGZHczs14Eubjx53+munECR5LR0TUSDa9+yYuFjiOBbWUlyP7rzNlzihwVOyVrxqTb/FKMS+KIiZPnownn3wSer1eOmauYga66qeCXq/HjBkzkJqa6vGcDAYDRowYgS1btkCpVOKjjz7C8ePH8fHHH2Pnzp04duwYunbtigsXLmDkyJH49lvvLu02GAx4/PHHvTqGP9IZLSg1OX/7FxbKZhvkGzQhGinu0qyLjJmQLxBtnKkgIpKT3W6DaLdDtNvlToVIFl4p5p955hmsXr3a6a+JooiQkBDExsYiISEBXbp0QWxsLJRKpTRLLwgCRFHE6tWr8eyzz3o0p5kzZyI/Px8A8PrrryM9Pd3p5wkJCcjNzUVYWBjMZjPGjh2L69evezSHyl5++WUUFhZCrVZ7bQx/NC17H4RK10oBWJrSX7Z8CMDoxcDFQ47P6MVyZyOb+cPmIyEqAeGh4egd0xuLhi+SOyWSmwhAc+MLHigU6Lj+U9nSIQp2JoMFOZn7kZO5HyaDRe50qDGIQGjYjd+HU+YtkDGZwBI1vivM50thPl+KqPFd5U6HquHxbvZ79uzBkCFDpMJ8wIABmDx5Mu666y506dIFSqXS6X6r1Yqff/4Z27ZtQ1ZWFvbt2+dITBCwe/du3HLLLQ3O6fDhw7j55ptht9vRqlUrnDlzBiqVyuW9s2bNwvvvvw8A+Otf/4q///3vDR6/qh9++AH9+/eHUqnEM888g7feegsA0KFDBxQWFjbo3f7ezf6hZXtQbrHicFEJAGDnX+5kJ3u5rRzlfD1lszx5EPmY05OdV5Gxgz2RvHIy9ztdj5ndV6ZMqLGsezXD6XrCy3NlyiTwFC875HQdM7W3TJkEJ9m62c+ZMwcAoNFokJ2djf/973944okn0K1bt98V8gAQEhKC7t27Y8aMGcjLy0NWVhY0v810VLyroTIzM2H/bfnNxIkTqy3kAWDy5MlSvHjxYhiNRo/kUMFut2Pq1KmwWq144YUXkJCQ4NH3+7uX7kuUCvnPnhzCQp6IfFblRndsekdE1PjunDIV544fwbnjR3DnlKlypxNQRLsozcyLdraj9lUeLeavXbuGnTt3QhAEZGdn45FHHqnzOyZNmoSsrCyIoogdO3Y0eKm7xWLBxo0bpevhw4fXeH+fPn3QrFkzAEBZWRlyc3MbNH5VixcvRl5eHrp164bnn3/eo+8OBBOXfu8yJhlNXO06JgpySq0WHbJWoUPWKja9I5KZ/lcjLp8pwfmfdbBa7Eh+LEnulMjLystKsfr5pwAAoWFhWPcK/1ztUWI1MfkUjxbz3377Lex2OwYOHIgHHnig3u958MEHMXDgQNjtdnzzzTcNyikvLw/Xrl2Trvv161fj/YIgON2zdevWBo1f2blz5/Diiy8CAJYuXVrjCoFgZTDbXMZERL7Gptfj9ORUnJ6cClsjHa1KRK6tfWMvBEFAqFqJaxcNUIeHyp0SeVnOvNek5neW8nK50wk4glKAqk0TqNo0gaAUan+AZOHRYr6oqAgAcPfddzf4Xffcc4/TO+vr8OHDUqxWq9G2bdtan+nUqZPL5xvqySefRElJCaZMmYI77rjDY+8NJEltm7qMSUY8mo7IpXMznnQZExGR9105c1pqfifa7Wx+52EtUhJdxuRbPFrMX79+HYIgIDo6usHvio6OhiiKDV5mf/TojXMR27Rp49YzlQv+ys83xMaNG5GTk4MWLVrgnXfe8cg7A43OaEGIQoEIlRJ92jfDqvRb5U6JiMglm16P8mPHUH7sGI+oI/IBDzzTBxaTDRaTDQ8800fudKgRRMd1gEKhgDo8Am27JSIyppXcKQUUhSYEMVN7I2Zqbyg0PCbaV3m0mG/WrBlEUcSVK1ca/K4rV65AEARp/3p9FRcXS7G776p8n16vh8XSsONNSkpKMGPGDADAu+++65EvOwLRtOx9CFEqkNhGixCFgEgNl8j5BB5NR/Q752Y8CfVvDUxNJ06yAR6RjPS/GrHuLcdpSC07NMWudSdlzogaQ/KTz7iMqeHsRiuKlx1C8bJDsBv5hbUv82gxXzGjvW3btga/64svvnB6Z32VlJRIsbtnuoeFhVX7jvr429/+hnPnzuGOO+7AlClTGvSuqkwmE/R6vdOHyKM2PgG07u34bHxC7myIfIJos8J00lEwqLsmsAEekYzWvrFXii+fbtif2ch/fPHP+WjZMR4tO8bji3/OlzudgPJr9lGXMfkejxbzQ4cOhUKhwPfff49NmzbV+z2ffvop8vLyoFAocPvttzcop8pHy7nbcK7qfQaDod7j79u3D4sWLYJKpcKSJUvq/Z7qzJ07F5GRkdInLi7O42M0lqUp/V3GRL5Ab9Yj/Yt0pH+RDr2ZX5oFPXb5JfIpIaobf6RlJ3siChYeLeajoqJwxx13QBRFPPLII/j444/r/I7//Oc/SE1NhSAIuPPOOxu8zL7izHoAMJvNbj1T9b7w8PqddW6z2TB16lTY7XY899xz6N69e73eU5OMjAzodDrpc/bsWY+P0VgiNaFYM3UQ1kwdxCX2voRH0wEAZu2Y5TKm4CTabLAbDLAbDBBtPHmDSE4T/zZA6mQ/6fVb2ck+SNz/7IsuY2o4Nr/zHx7vZvDaa69h6NChMBgMeOihhzB//nxMnjwZw4cPR+fOnSEIzkcb2O12/Pzzz9i2bRuysrKwd+9eiKIIQRDw2muvNTifpk1vdEQ3mUxuPVNe5XiLyu+oi/nz5+PAgQNISEiQjqTzNLVa7fb2AaJ60TQDpmyWOwsin2L65ReXMRE1Pm0LDf78XsNWcpL/CYtoggkvz5U7jYBU0fyOfJ/Hi/khQ4ZgxowZWLRoEQRBwPfff4/vv/8egGP5esuWLREREQFRFFFaWori4mKnBnMVhfxTTz2FQYMGNTifmJgYKXa3M75Op5NirVaL0NC6f8N7+vRpvPzyywCADz74gAU3kZ+bP2y+NCM/f9h8WXMh+QmCAKGeq7aIyLNMBgtylziOEk5+LIkz80QUNLxyzsD777+Py5cvY926dRAEAaLo2FBoMplqXAZeMWv/yCOPIDMz0yO5JCbeWBpy/vx5t56pfLZ95efr4oknnkBZWRkmTZqE4cOH1+sdwUZntGBatqMb7dKU/lxq7wuM12+cLT9xtWOWPkhpVVosv2e53GmQj+iUswGnxjwgxUQkn4pCviIeM7uvjNkQETUej+6ZryAIAtasWYPMzEyn/eaCILj8VIiIiMCCBQuQlZXlsVySkm40QTGZTE6FenUKCgpcPl8Xn3/+OQBg9erV1f66BUFAWlqa9Mzp06d/9/NXXnmlXuP7o4pCvmpMMqoo5KvGREHMptfjQsYLCOvRA122b4OqgaeuEBEREdWHV2bmK8yaNQupqan45z//ic2bNyM/Px9Wq/NZhSEhIejfvz9Gjx6NadOmNbjhXVUDBw5EVFQUrl27BgDIz8+v8bg7URSRn58vXY8YMaJe46amprp1388//4zvvvsOgOPLjHHjxjn9/Oabb67X+P7Iahdx4qLjSJmurevXp4CIyNvOzXjSKe6QtUrGbIgo+bEkp2X2FBzKy0qx6d03ATga4IVFNJE5I6LGJ4gVa+AbgdFoRFFREa5evQoAaNGiBdq2bfu7c909LS0tDStXrgQAzJw5E++//3619+7fvx/9+vUD4Ciui4uLnTrie9rKlSul2fkOHTqgsLCwQe/T6/WIjIyETqeD1o/OPdYZLbjt7e0wWOwIVynRrVUTfDJ9iNxpEZfZE/3O6cnOX9aymCcianzrXs1wumYzPAok7tZ0XllmXx2NRoMuXbpg4MCBGDhwIDp37uz1Qh4Ann76aSgUjl/q2rVrazyirvIS/8cff9yrhTzdMC17HwRBQIRKCQFAiLJR/9Gk6lR0sp+ymYU80W/aLVroMiYiosZRXlaKy4UFuFxYALudx4NS8AqKiql3797S7PelS5eqba5XUFCApUuXAgCio6ORkZHh8j6LxYKUlBQ0bdoUffr0waFDh7yTeJDp2urG8qilKf1lzISIqHpKrRYdslahQ9YqKP1oBRQRUaDY9O6biI7rAAC4cuY0z5mnoFWnYv4Pf/gDxo8f761cnDz44IMe7QK/YMEC9O3r6G46Z84crFixwunnJ0+eRHJyMsrLy6FSqbB+/XpERUW5fFd2djZWr16N0tJSHDx4EDNmzPBYnsFqaUp/hCgVSGyjxbfP/YGd7InIZ9n0epyenIrTk1Nh0+vlToeIKCgplEq07BiPlh3juV+egladGuDt3LkTrVu39lYuTnbv3o3Lly977H3h4eHYunUrUlNTkZubi/T0dMybNw9JSUkoLi7Grl27YLVaERsbi6ysLAwdOtTtd1fuyF+b48eP4+2335auf/75Zym+cuUKpkyZIl1HR0fj3Xffdfvd/qzougF7C69JcaQmUuaMCABw7TSw9Ld/F6Z9C0R1kDcfIh9w5s9TUf7jj1Lcae0amTMiCm48Zz743P/si07N78iz7EYrfs0+CgBokZIIhcarPdOpAerUAE+hUKB169Zun9feELGxsbh8+TJsNs/vg/nss8+wcuVK7N+/HxcuXIBWq0Xnzp0xbtw4pKWloXnz5jU+b7FYkJaWhpycHCQkJGDVqlXo3bu3W2Pv3LkTw4YNc+ve+jTE89cGeJ1f2OJ0/ctbI2XKhJy83d75+vkz8uRB5EOO9ezldN3jyI8yZUJEAJCTud/pmufMEzVM8TLnLcQxU92rc8hz3K3p6vw1i81mw9mzZ+HtJvjeKOIr3Hfffbjvvvvq/XxoaChWr15dr2fvvPNOr/+98zdnrxlgszv+nigFAXB/oQMRUaMTNBqIRqMUExEREcmhzjPzdVlS3hCiKEIQBK8W9YHIH2fme7/yBWw2G8osjn8Utzx1GxJjuczeJ3CZPdHvmIuKcGrMAwCATjkboGrbVuaMiIIbl9kTeRaX2cvPazPzADizTB6nVCqhVTpiFvI+JKoDl9YTVaFq2xbd9ubJnQYR/UYdHsql9UQUlLx6NJ0gCPWayW+s2X/yDWum3Yoysw1lZhvWTLtV7nSIiIiIiIJWxax81Zh8T51n5kNDQzFo0CC37v3666+hUqncvr+y3bt3w2q11vk58j+vfXYUAzpGSfGaqXX/54WIiIiIiCiY1LmYb968OXbs2OHWvQqFok73V1bRzZ6IyBfozXrM2jELADB/2HxoVf7Rk4KIiCjQlJeVOh1Nx3PmPatFSqLTnnnyXV5dZk/kjqUp/V3GRL6kopCvGlNwMf70E4717IVjPXvB+NNPcqdDFPRMBgtyMvcjJ3M/TAaL3OlQI8mZ9xouFxbgcmEBcua9Jnc6AUehCUHM1N6Imdqbze98nM8W82yyFzwiNaFYM3UQ1kwdhEgNO9ASke8qHPugy5iI5PH54kMoPluK4rOl+HzxodofIL9XXlaK8yeOw1xuhAgRV86cljulgGM3WlG87BCKlx2C3chtz76sTl+1rFixAppGOlN3wYIFMP52ji8RkdzmD5vvtMyeiIjk9+v5MpcxBa5N776JUHUYLKZyWMrL0Sahu9wpBZyqDfBipvaWMRuqSZ2K+dTUVG/l8TsTJkxotLFIXjqjBdOy9wFwLLPn7Dz5Iq1Ki+X3LJc7DZJZx/WfSjPyHdd/KnM2RBTVKhyXz5QAAFq2bypzNtRYYtp3xJWzjhn5Mc+9JHM2RPLx2WX2FDwqCvmqMRGRr9F064YeR35EjyM/QtOtm9zpEAU9hRIIVSsRqlZCoZQ7G2oM9zw+SyrkU+YtYPM7L6jc9I4N8Hwbi3mSlc5owdHzehw9r4fVzj4JRERE5D6FUoGYuCaIiWsChZJ/rA0GuYv+4TImz2EDPP/B3/VIVtOy96FrK8c3qiculrCbPRH5NJtej9OTU3F6cipser3c6RAFvT+k9pAa4P0htYfc6VAjKD5TCHO5EeZyI4rPFMqdDpGsWMyT7EKUCiS20SKxjZb75YnIp52b8aTLmIjk8dWqY9LM/FerjsmdDjUGsZqYKAixmCdZ8Yx5IiIiInJXTPuOUIVpoArTIKZ9R7nTIZJVnYr5b775Bt9//723cnHy/fff45tvvmmUsYiIaqM365H+RTrSv0iH3szl1cGq3aKFLmMikkfyY0kuYwpcY557CS07xqNlx3h2sqegJ4ii6PYCFYVCgdjYWBQVFbl9f+vWrXH+/Pk6JxYbG4vi4mJYrdY6PxvM9Ho9IiMjodPpoNVq5U6nVg8t2+N0vWbqIJkyIapZ+hfpTtc8po6IiKhx6YovIfuvMwE4OtlHxrSSOSMi73C3pqvzMvs61P4N1phjERER1YTN74iI5FVRyFeNyXPsRiuKlx1C8bJDsBs5qerruGeeZMU98+Qv5g+b7zKm4MHmd0REFOh+zT7qMibfVOeDA3U6HdLT02u/sZ73V36OAl+kJpRL633VtdPA0qGOeNq3QFQHefORmVal5dJ6IiIiGaXMW+C0zJ4o2NV5z7wgCG6/XBTFOt3v6lmbzVav54OVv+2ZJx/2dnvn6+fPyJMHkY8w/vQTCsc+CADouP5TaLp1kzkjIqLgUl5Wik3vvgkAuP/ZFxEW0UTmjAKP3WiVZuRbpCRCoanz3C95gNf2zNdFfQt5Cg5nrxmQ9PJWdH5hCx7453fQGS1yp0SViXagXOf4iHa5syGSlbmoCIWjxwA2GxRhYTgzKUXulIiIgk5FIV81Js9RaEIQM7U3Yqb2ZiHvB+pVzIui6PUPBb7k+d+ixGSDzS7i0FkdpmXvkzslqkysJiYKQqfGPCDF9rIyGTMhIiIicqjz1y1RUVH49NNPvZGLRBRFPPjgg7h+/bpXxyH56IwWlJpudMi08Qsc36NQAGGRcmdB5DsiIoDfCvlOORtkToaITAYLcpccBuA4Y14dHipzRuRt9z/7otMye6JgV+c98/U9N76uYmNjcfnyZe6ZryN/2TOftiIPO38qliZ8lQKw/6W7Eanhf4h9BhvgEUnMRUXS7HynnA1QtW0rc0ZElJO53+l6zOy+MmVCRORZ7tZ03AhBsrDaRUSoFCg126FUCNj5lztZyPuaqA5sekf0G1Xbtui2N0/uNIiIiIgkddoz3759e8TFxXkrFyft2rVD+/bta7+R/JMoQqFQQBsWgj5xkYiLCpc7IyIiIvIjyY8luYyJqP7sRiuKlx1C8bJDsButtT9AsqrTzHxhYaGX0vi9vXv3NtpY1PhClAoktvHdbQBERJXZ9Hqcm/EkAKDdooVQ+vA2JiKiQMWj6byv4li6ijhmam8Zs6HaePVoOqLqzH+ojxQvTekvYyZEtdOb9Uj/Ih3pX6RDb9bLnQ7JoKKQrxoTkXwqmt9VjSlw8Wg67xPtIsznS2E+XwrRzgbVvi4oi/nNmzdj/PjxiI+Ph0ajQevWrTF48GC89957uHr1qsfHM5vN+OqrrzBnzhyMGDECHTp0QEREBFQqFaKjo3Hrrbdi9uzZOHjwoMfH9lWRmlCsmToIa6YO4l558nmzdsxyGRMRkXzsNhHFZ0tRfLYUdhuLDiKP4NHEfiWoivkrV67g3nvvxX333YdPPvkEKpUKo0aNQvfu3ZGXl4fZs2ejV69e2L59u8fGzMjIQKtWrTB8+HC88cYb2LFjB1q0aIERI0ZgzJgxaNu2Lf73v//hvffeQ58+fTBlyhSUl5d7bHwiImq4dosWuoyJSE6sOoJN5ePoeDSddwhKAao2TaBq0wSCUpA7HapF0HSzNxgMGDFiBPLz86FUKrFs2TKkp6dLPz958iRGjRqFEydOYOTIkdi2bRuGDh3a4HFzc3Nx/fp1AMBDDz2EefPm/a6J4MGDB/HII4/g6NGjWLVqFYqLi/H55583eGwi8oz5w+ZLM/Lzh82XNRciInJQKBWIieOe6WASFtEEE16eK3caAa1FSqK0b75FSqLM2VBt6nTOvD/705/+hI8++ggA8NZbbyEjI+N39xQUFKBnz54oLy9HdHQ0Tp48iWbNmjVo3Jtvvhk//PAD7rzzTmzbtg1KpdLlfWfOnEG3bt2kWfmcnByMHj26zuP5yznzRET+wqbX4+fhdwEA1F0TIChD0CFrlcxZEZHJYJH2yic/lgR1OLftEVFgcLemC4pl9ocPH8aKFSsAAK1atcIzzzzj8r74+HhMmzYNgGNJ/ty5nvvm79lnn622kAccx/7de++90vWmTZs8NjYREdVf5YZ3phMnZcyEiCpTh4dizOy+GDO7Lwt5IgpKQVHMZ2Zmwm63AwAmTpwIlUpV7b2TJ0+W4sWLF8NoNDZo7PHjx2PatGm44447ar03ISFBis+dO9egcYmIyHPUlX5/5p55IiIi8gUBX8xbLBZs3LhRuh4+fHiN9/fp00daWl9WVobc3NwGjf/iiy9iyZIlaNKk9j1dlRvfNXR5v6/TGS14aNkePLRsD3RGi9zpEBFVK3buWzCddMzId8rZwDPmiYiIyCcEfDGfl5eHa9euSdf9+vWr8X5BEJzu2bp1q9dyqyovL0+Ka/vSwd/NWnNAiqdl75MxE3LJeB1YOcrxMV6XOxsiWRXNfgZ2gwF2gwFFs11v0yIiIiJqbAFfzB8+fFiK1Wo12rZtW+sznTp1cvm8N+Xm5mL37t0AgK5duzot9ydqdGsnuY6DlN6sR/oX6Uj/Ih16s17udKiRlf/4o8uYiIiISE4BX8wfPXpUitu0aePWM5UL/srPe4PBYMCiRYswfvx4AEC3bt2Qm5uLsLAwr44rt/kP9ZHipSn9ZcyEqHYzts/A8avHcfzqcczYPkPudKiRCRqNy5iIiBpPeVkp1r2agXWvZqC8rFTudIh8QsCfM19cXCzF7u5Dr3yfXq+HxWJBaKhnuqTqdDo89dRTMBqNOHfuHA4ePAiDwYCkpCSkp6dj+vTpUKvVHhnLl0VqQrFm6iC506DqTFx9Y0Z+4mp5c/EBP1//2WVMgc+m10MdH4/yI0eg0GjQaWOO3CkREQWlTe++6RTzvHnvsButTufMKzQBXy76tYD/f6ekpESK3S2Sq86Kl5SUoHnz5h7Jx2g0YtUq5/OJmzVrhi5duqB58+YQRbFO7zOZTDCZTNK1Xs8lwOQBmmbAlM1yZ+EzujTrIhXxXZp1kTkbakznZjwJRVgYwn/rpaJyY6sWERGRv6oo5CvimKm9ZcyGahPwy+wrHy1X05F0lVW9z2AweCyf1q1bQxRFWK1WFBcX48svv8SoUaOQk5OD1NRU9OjRA998843b75s7dy4iIyOlT1xcnMdy9SZ2syd/smj4InRv3h3dm3fHouGL5E6HiIgo6Nz/7IsuY6JgFvDFvKbS/kaz2ezWM1XvCw8P92hOAKBUKhEdHY277roL2dnZ2LBhA5RKJQoLC/HHP/4RO3bscOs9GRkZ0Ol00ufs2bMez9Ub2M2e/IlWpcXye5Zj+T3LoVXxWLJgUvlMeZ4vT0REga5FSqLLmHxTwBfzTZs2leLKy9FrUvm896rv8JbRo0fj2WefBeD4MmHSpEm/y8MVtVoNrVbr9CEiIs9QarXokLUKHbJW8Xx5IiIZ5cx7DZcLC3C5sAA5816TO52ApdCEIGZqb8RM7c398n4g4Iv5mJgYKb5+/bpbz+h0OinWarUea35Xm5kzZ0rx+fPn8fHHHzfKuHKw2uw4el4Pq11kN3si8lnmoiL8NGAgfhowEOaiIrnTIaLfmAwW5GTuR07mfpgM3K4XDK6cOe0yJs+xG60oXnYIxcsOwW60yp0OuSHgi/nExBvLQ86fP+/WM0WV/sBW+Xlva9OmDTp27Chd79y5s9HGbmw/F984UiRS0zhflhAR1YVNr8cvd98Du8EAiCJOjXlA7pSI6De5Sw67jClwRcd1cBmT51Rtfke+L+CL+aSkJCk2mUxOhXp1CgoKXD7fGFq3bi3F7n754M9OXCyp/SYiIhmcm/GkFNsrNVMlIvnZbSKKz5ai+Gwp7La6nQRE/mnMcy+hZcd4tOwYjzHPvSR3OkQ+IeA3QgwcOBBRUVG4du0aACA/Px9tazhaSBRF5OfnS9cjRoyo99i7d+/G7t27cc8997j9pYDFcmOpmLvd9/1Z11ZN5E6BiMgl0WYFwsKAsjJAqUSnnA1yp0REErGamAJVWEQTni3vZS1SEp3OmCffF/Az86GhoRg9erR0vX379hrvP3DggLS3PiIiAsnJyfUe+//+7//wl7/8BVu3bnXrfrvdjl9++UW69pdj5uqje2stEtto8dGUgXKnQq4YrwMrRzk+xutyZyMrvVmP9C/Skf5FOvRmvdzpUGMSAYVCAUXTptDcdBPPmCfyIQqlAjFxTRAT1wQKZcD/cZaoUbD5nf8Jit/9nn76aSgUjl/q2rVrazyiLisrS4off/xxp6Pt6svdve9ffvmlU5O+e+65p8Fj+6oVaQOxZuog7pf3VWsnuY6D0Kwds1zGFPiEkBCE9eiBsB49IITwDzVEviT5sSSXMRFRMAmKYr53795IS0sDAFy6dAmZmZku7ysoKMDSpUsBANHR0cjIyHB5n8ViQUpKCpo2bYo+ffrg0KFDNY6fm5uLr7/+usZ7SktLMXv2bKecR44cWeMzRETkPTxjnsh3qcNDMWZ2X4yZ3RfqcE4MEFFwCopiHgAWLFiAvn37AgDmzJmDFStWOP385MmTSE5ORnl5OVQqFdavX4+oqCiX78rOzsbq1atRWlqKgwcPYsaMGTWOLYoi7r//fnz00UcuVwXk5+dj6NChOHrUsUclOjoa//73v6FUKuvzSyVquNGLgYuHHJ/Ri+XORlbzh813GVPg4xnzRES+QVd8CYvSJmJR2kToii/JnQ6RzxBEUQyariHFxcVITU1Fbm4uAKB79+5ISkpCcXExdu3aBavVitjYWGRlZeGuu+6q9j3Lly/Ho48+Kl3ffvvtLmfe9+zZg4yMDKefNWvWDP3790fLli1hMplw9OhRHDt2zOld//rXv5CQkFCvX6Ner0dkZCR0Oh20PvyHz7QVeTBabFia0p9L7X3RylHO11M2y5MHERERBb1FaROdrmesWCtTJkSNw92aLqg2AcbExGDLli347LPPsHLlSuzfvx+bNm2CVqtF//79MW7cOKSlpaF58+Y1viclJQVfffUVcnJykJCQgIULXS+/HDRoEHbu3InCwkJ8/vnn+Pbbb3H06FHk5+ejpKQEISEhiIyMxJAhQzBgwABMnDgRt956qzd+6T5rWvY+rJk6SO40qCq7Bbj825dMLXvImwsREVEVJoNFOl8++bEkLrUnoqAUVDPzwcDfZuYBsJj3NcbrwPwkwFIGhEYALXsCj7p3IgMREVFjyMnc73Q9ZnZfmTKhxnD59Cmsfv4pAMCkt99Hyw6dZM6IyLvcremCZs88+aalKf3lToGqWjsJEARA1cTxv8qgWsBDJDEXFeGnAQPx04CBMBcVyZ0OEVHQ2rlyGdp174l23Xti58plcqdD5DNYzJMseDSdj4tJvBFPXC1fHkQysen1+OXue2A3GABRxKkxD8idEhFV8ofUHig+W4ris6X4Qyq3gxFRcGIxT0TOJq52zMa37g08dQjQNJM7I6JGd27Gk1JsNxplzISIXPlq1THExDVBTFwTfLXqWO0PkF+7/9kXXcZEwY7rZ0kW7GbvwzTN2L2eCEBYz54oP3IEANApZ4PM2RARBa+wiCaY8PJcudMg8jmcmSdZTcveJ3cKRES/027RQijCwhDerx+67tkNVdu2cqdERJUkP5bkMiYiCiacmSciIqpCqdWiQ9YqudMgIhf0vxqx9o29AICJfxvAY+mIPMRutOLX7KMAgBYpiVBoWCr6Os7Mk6zYzZ6IfJFNr8fpyak4PTkVNr1e7nSIqJKKQr5qTIGrvKwU617NwLpXM1BeVip3OgGropCvGpPvYjFPsmA3eyLyZZUb4FWOiYiocZWXleJfMx7F5cIC2O02bHr3TblTIvIZLOaJiIiIyG9M/NsAlzEFpsrF+5Uzp2XMJPC1SEl0GZPvEkRRFOVOgjxHr9cjMjISOp0OWq1W7nSIiPySuahIOlu+U84GNsAjIpLJulczYLfZcOWso5D/06KPEBbRROasiLzL3ZqOxXyAYTFPRNRwpyenOl2zGR6R7zAZLMhdchiAo5M9G+AFtvKyUml2/v5nX2QhT0HB3ZqOLQqJiIiIyG98vvgQfj1fJsVj/9JP5ozIm3jGPFH1uGeeiIioinaLFrqMiUh+FYV81ZiIKNhwZp6IiKgKnjNP5LtatAnHr+cNUkxEFKw4M0+NSme0AADSVuRJMREREZG77n3iJsTENUFMXBPc+8RNcqdDRCQbFvPUqGb8Zz8A4PhFPR5dtVfmbIiIiMjfqMNDMWZ2X4yZ3ZfN74g8yG60onjZIRQvOwS70Sp3OuQGFvPUqH6+XCLFJy6W1HAnEREREQW78rJSrHs1A+tezUB5Wanc6QS0X7OPuozJd7GYp0bVJebGcSJdW/FoESIiIiKqXsWxdFVjImIxT41s0SOO42O6t9bioykDZc6GiIiIiIgAoEVKosuYfJcgiqIodxLkOXq9HpGRkdDpdNBqtXKnQ/7IeB1YO8kRT1wNaJrJmQ0REREFsfKyUmlG/v5nX0RYBFd2UuBzt6ZjMR9gfL2Y1xktiNSEIm1FHuY/1AeRGjau8TkrRzlfT9ksTx5ERERVmAwW5C45DABIfiyJDfCIKCC5W9NxmT01qllrDkjxtOx9MmZCRERE/qaikK8aU+BiAzyi6rGYJyJnE1e7joOU3qxH+hfpSP8iHXqzXu50iIiIggob4BFVj8U8Nar5D/WR4qUp/WXMhKqlaeZYWj9lM/fLA5i1Y5bLmIiIGl/yY0kuYyKiYMRinhpVxR75FWkDuV+eiIiI6kQdHooxs/tizOy+3C8fJO5/9kWXMRGxAV7A8fUGeET+Rm/WSzPy84fNh1bFf6+IiOTCBnhEFAzYzT5IsZgnIiKiQJWTud/peszsvjJlQkTkPe7WdCGNmBMR+QOeM09ERD7KbhPx6/kyAECLNhEyZ0NEJC/umSciZxWFfNWYKEjY9HoUPvwIfhowEIWPPAKbnqcYEPkOsZqYiCj4BGUxv3nzZowfPx7x8fHQaDRo3bo1Bg8ejPfeew9Xr171+Hjl5eVYv349pk6dij59+qBFixYIDQ1FVFQUevbsiSlTpuDzzz+H3W73+NhERFQ352Y8CdPJkwAA04mTODfjSZkzIqIKCqUCMXFNEBPXBAplUP4xlohIElS/C165cgX33nsv7rvvPnzyySdQqVQYNWoUunfvjry8PMyePRu9evXC9u3bPTLehQsX8Oyzz6Jly5Z48MEH8eGHH+Ly5csYMmQIJkyYgL59++LUqVNYtWoVRo0ahX79+uHw4cMeGdsX6YwWpK3Ik2LyUTxnnoiIfBSPpgs+5WWlWPdqBta9moHyslK50yHyKUHTAM9gMOD2229Hfn4+lEolli1bhvT0dOnnJ0+exKhRo3DixAmoVCps27YNQ4cObdCYr7zyCl599VUAQFRUFJYuXYpx48ZBEATpnqtXr2LWrFnIzs4GAERGRuKrr75C3771a+jiyw3wHlq2B5pQJVakDUTaijysSBsod0pERL9jLipCwegxEI1GhPXqhfYfLoPSx34/JSIKFutezXC6nvDyXJkyIWo87tZ0QTMzP3PmTOTn5wMAXn/9dadCHgASEhKQm5uLsLAwmM1mjB07FtevX/fY+OvXr8f48eOdCnkAaN68ObKysnD//fcDAHQ6HR5++GFYLJy5JiKSw4WMF6BJTER4v35QqNUs5ImIiMgnBUUxf/jwYaxYsQIA0KpVKzzzzDMu74uPj8e0adMAOJbkz53rmW/+7rrrLtx555013lN5rJ9++gkbN270yNi+ZGlKfyme/1AfGTMhIqqeaLOi/NgxlB87BtFmlTsdIqKgdv+zL7qMiShIivnMzEypudzEiROhUqmqvXfy5MlSvHjxYhiNxgaPf88999R6T2JiItq2bStdf/nllw0e19dEakKlpfWRmlCZsyEiqgabZRMR+YywiCaY8PJcTHh5LsIimsidDpFPCfhi3mKxOM1yDx8+vMb7+/Tpg2bNmgEAysrKkJubW++xJ02ahNzcXDzyyCNu3R8XFyfF586dq/e4RERUP+aiIhh/+AF2gwGq+HgIISFyp0RERETkUsAX83l5ebh27Zp03a9fvxrvFwTB6Z6tW7fWe+wuXbpgxIgRiI2Ndev+ykfThfAPkEREje7U6DGAKAI2G8p//BHtFi2UOyUioqDFTvZENQv4Yr7yUW9qtdppKXt1OnXq5PJ5bztz5owU9+kTeHvKeTQdEfk6u9EICAKgVAIAm98REclo07tvuoyJyCHgi/mjR49KcZs2bdx6pnLBX/l5bzp16hQuXrwoXU+cOLFRxm1M07L3SfGsNQdkzISIyLWwnj1dxkRERES+JuCL+eLiYimu2Atfm8r36fX6Rjkm7r///a8Ujx07Fj169PD6mERE5Kz9vz5EeL9+CO/XD+3/9aHc6RBRJSaDBTmZ+5GTuR8mA1f4BQN2sieqWcBvzC4pKZFitVrt1jNhYWG/e0fz5s09mldlpaWlWLjQsS8zIiIC//jHP9x+1mQywWQySdd6vd7j+XnK0pT+0ow8j6YjIl+k1GrRIWuV3GkQkQu5Sw47xWNm95UxG2oMFZ3sici1gJ+Zr3y0XE1H0lVW9T6DweDRnKqaM2eOtMR+8eLF6Nixo9vPzp07F5GRkdKnckd8X8Oj6YiIiIiIiDwj4It5jUYjxWaz2a1nqt4XHh7u0Zwq27JlC95//30AwBNPPIHU1NQ6PZ+RkQGdTid9zp496400iYiIiGSV/FiSy5iIKFgF/DL7pk2bSnHl5eg1KS8vr/YdnvTjjz/i//2//wdRFPHAAw9IRX1dqNVqt7cPEBEREfkrdXgol9YTEVUS8DPzMTExUnz9+nW3ntHpdFKs1WoRGur5JeEFBQW4++67odfrkZycjDVr1kD521FIRERERERERDUJ+GI+MTFRis+fP+/WM0VFRS6f95RTp05h2LBhuHDhAu69915s2LDB7f38RERERERERAFfzCcl3dhTZTKZnAr16hQUFLh83hNOnTqFO++8E2fOnMHIkSPx6aefcpk8ERERERHJym60onjZIRQvOwS70Sp3OuSGgC/mBw4ciKioKOk6Pz+/xvtFUXS6Z8SIER7LpbCwEMOGDZMK+fXr17OQJyIiIiIi2f2afdRlTL4r4Iv50NBQjB49Wrrevn17jfcfOHBA2lsfERGB5ORkj+RRWFiIO++8E6dPn0ZycnKNhfykSZNw1113eWRcIiIiIiIiCjwBX8wDwNNPPw2FwvFLXbt2bY1H1GVlZUnx448/7nS0XX2dPn0aw4YNw+nTpzFixAhs2LChxhn5Xbt21fqlAxERERERkae0SEl0GZPvCopivnfv3khLSwMAXLp0CZmZmS7vKygowNKlSwEA0dHRyMjIcHmfxWJBSkoKmjZtij59+uDQoUPVjn369GnceeedKCwsxIgRI5CTk8Ol9URERERE5FMUmhDETO2NmKm9odAE/AnmASFo/l9asGABDhw4gP3792POnDlo1aqVVOADwMmTJzFq1CiUl5dDpVJh/fr1TnvtK8vOzsbq1asBAAcPHsSMGTPwzTff/O6+M2fOYNiwYSgsLAQAWK1WPPjgg7Xmevny5Xr8ComIiIiIiChYBE0xHx4ejq1btyI1NRW5ublIT0/HvHnzkJSUhOLiYuzatQtWqxWxsbHIysrC0KFD3X63IAgu//qzzz6LU6dOSdfbtm1r8K+DiIiIiIjIk+xGq9T0rkVKImfm/URQLLOvEBMTgy1btmDTpk0YO3YsysvLsWnTJhw5cgT9+/fHu+++ix9//LHW5nMpKSl45JFHEBERgZtvvhkLFy50eV9Ne/OJiIiIiIh8ATvZ+ydBFEVR7iTIc/R6PSIjI6HT6aDVauVOh4iIiIiIfFzxMuceYDFTe8uUCQHu13RBNTNPRLUwXgdWjnJ8jNflzoaIiIiIGgE72fsnzswHGM7MU4OsHOV8PWWzPHkQycSm1+PcjCcBAO0WLYSSv48S+QyTwYLcJYcBAMmPJUEdHipzRkRE3sGZeSIiojqqKOSrxkQkv4pCvmpMRBSsWMwT0Q0TV7uOiYiIiIjIp7CYJ6IbNM0cS+unbHbEREGm3aKFLmMikl/yY0kuYyKiYMU98wGGe+aJiIiIiIj8F/fME1HdsZs9EREREZFfYDFPRDesneQ6JiIiIiIin8JinoiIiIiIiMjPsJgnohvYzZ6IiHyUyWBBTuZ+5GTuh8lgkTsdIiLZsZgnohvYzZ6IiHwUz5knInLGYp6IiIiIiIjIz7CYJyIiIiKfx3PmiYichcidABERERFRbdThoRgzu6/caRAR+QzOzBMRERERERH5GRbzRERERERERH6Gy+yJiIiIyOeZDBapi33yY0lQh4fKnBERkbw4M09EREREPo9H0xEROePMPBERERER+ZzyslJsevdNAMD9z76IsIgmMmdE5Fs4M09EREREPs1ksMBus6P4bCnsNpFH0wWJikK+akxEDpyZJyIiIiKflrvkMBRKBWLiHDOz3C9PRMSZeSIiIiIi8kH3P/uiy5iIHARRFEW5kyDP0ev1iIyMhE6ng1arlTsdIiIiogZjJ3siCibu1nRcZk9EREREPk0dHooxs/vKnQYRkU/hMnsiIiIiIiIiP8NinhqNzmhB2oo8KSYiIiIiIqL6YTFPjWZa9j4pnrXmgIyZEBERERER+TfumSciIiIiIgpidqMVv2YfBQC0SEmEQsMy0R8E5cz85s2bMX78eMTHx0Oj0aB169YYPHgw3nvvPVy9etXr4xcXF2PixIkQBAGCIGDnzp1eH9MXLE3pL8XzH+ojYyZERERERFShopCvGpNvC6pi/sqVK7j33ntx33334ZNPPoFKpcKoUaPQvXt35OXlYfbs2ejVqxe2b9/utRz++9//IjExEevWrfPaGL4qUhOKFWkDpZiIiIiIiIjqJ2iKeYPBgBEjRmDLli1QKpX46KOPcPz4cXz88cfYuXMnjh07hq5du+LChQsYOXIkvv32W4+Of+HCBYwePRoPP/wwrl+/7tF3ExERERER1VeLlESXMfm2oCnmZ86cifz8fADA66+/jvT0dKefJyQkIDc3F2FhYTCbzRg7dqzHiu6VK1ciMTERmzZtQt++fbF3716PvJeIiIiIiKihFJoQxEztjZipvblf3o8ERTF/+PBhrFixAgDQqlUrPPPMMy7vi4+Px7Rp0wA4luTPnTvXI+PPmjULRqMRb731Fv73v//h5ptv9sh7iYiIiIiIKDgFRTGfmZkJu90OAJg4cSJUKlW1906ePFmKFy9eDKPR2ODxb7vtNhw8eBAZGRkICeE3XURERERERNQwAV/MWywWbNy4UboePnx4jff36dMHzZo1AwCUlZUhNze3wTls3rwZ3bt3b/B7iIiIiIiIiIAgKObz8vJw7do16bpfv3413i8IgtM9W7du9VpuRERERERERPUR8MX84cOHpVitVqNt27a1PtOpUyeXzxMRERERERH5goAv5o8ePSrFbdq0ceuZygV/5eeJiIiIiIiIfEHAF/PFxcVSXLEXvjaV79Pr9bBYLB7OioiIiIiIiKj+Ar61eklJiRSr1Wq3ngkLC/vdO5o3b+7RvDzFZDLBZDJJ13q9XsZsiIiIiIiIqDEE/Mx85aPlajqSrrKq9xkMBo/m5Elz585FZGSk9ImLi5M7JSIiIiIiIvKygC/mNRqNFJvNZreeqXpfeHi4R3PypIyMDOh0Oulz9uxZuVMiIiIiIiIiLwv4ZfZNmzaV4srL0WtSXl5e7Tt8jVqtdnv7ABEREREREQWGgJ+Zj4mJkeLr16+79YxOp5NirVaL0NBQT6dFREREREREVG8BX8wnJiZK8fnz5916pqioyOXzRERERERERL4g4Iv5pKQkKTaZTE6FenUKCgpcPk9ERERERETkCwK+mB84cCCioqKk6/z8/BrvF0XR6Z4RI0Z4LTciIiIiIiKi+gj4Yj40NBSjR4+Wrrdv317j/QcOHJD21kdERCA5Odmb6RERERERERHVWcAX8wDw9NNPQ6Fw/FLXrl1b4xF1WVlZUvz44487HW1HRERERERE5AuCopjv3bs30tLSAACXLl1CZmamy/sKCgqwdOlSAEB0dDQyMjJc3mexWJCSkoKmTZuiT58+OHTokHcSJyIiIiIiInIhKIp5AFiwYAH69u0LAJgzZw5WrFjh9POTJ08iOTkZ5eXlUKlUWL9+vdNe+8qys7OxevVqlJaW4uDBg5gxY4bX8yciIiIiIiKqECJ3Ao0lPDwcW7duRWpqKnJzc5Geno558+YhKSkJxcXF2LVrF6xWK2JjY5GVlYWhQ4e6/W5BEGr8+fHjx/H2229X+/O3334bK1eulK7HjBmDMWPGuD0+ERERERERBZegKeYBICYmBlu2bMFnn32GlStXYv/+/di0aRO0Wi369++PcePGIS0tDc2bN6/xPSkpKfjqq6+Qk5ODhIQELFy4sMb7L168iFWrVlX78y+++MLpumPHjizmiYiIiIiIqFqCKIqi3EmQ5+j1ekRGRkKn00Gr1cqdDhEREREREdWBuzVd0OyZJyIiIiIiIgoULOaJiIiIiIiI/AyLeSIiIiIiIiI/w2KeiIiIiIiIyM+wmCciIiIiIiLyMyzmiYiIiIiIiPwMi3kiIiIiIiIiP8NinoiIiIiIiMjPsJgnIiIiIiIi8jMs5omIiIiIiIj8DIt5IiIiIiIiIj/DYp6IiIiIiIjIz7CYJyIiIiIiIvIzLOaJiIiIiIiI/AyLeSIiIiIiIiI/w2KeiIiIiIiIyM+wmCciIiIiIiLyMyzmiYiIiIiIiPwMi3kiIiIiIiIiP8NinoiIiIiIiMjPsJgnIiIiIiIi8jMs5omIiIiIiIj8DIt5IiIiIiIiIj/DYp6IiIiIiIjIz7CYJyIiIiIiIvIzLOaJiIiIiIiI/AyLeSIiIiIiIiI/w2KeiIiIiIiIyM+wmCciIiIiIiLyM0FZzG/evBnjx49HfHw8NBoNWrdujcGDB+O9997D1atXA3ZsIiIiIiIiCgyCKIqi3Ek0litXriA1NRVbtmwBAHTr1g1JSUkoLi7Grl27YLPZEBsbi+zsbAwfPtwvx9br9YiMjIROp4NWq/VU+kRERERERNQI3K3pgqaYNxgMuP3225Gfnw+lUolly5YhPT1d+vnJkycxatQonDhxAiqVCtu2bcPQoUP9bmwW80RERERERP7L3ZouaJbZz5w5E/n5+QCA119/3amYBoCEhATk5uYiLCwMZrMZY8eOxfXr1/1+bCIiIiIiIgo8QVHMHz58GCtWrAAAtGrVCs8884zL++Lj4zFt2jQAjmXxc+fO9euxiYiIiIiIKDAFRTGfmZkJu90OAJg4cSJUKlW1906ePFmKFy9eDKPR6LdjExERERERUWAK+GLeYrFg48aN0nVtzeX69OmDZs2aAQDKysqQm5vrl2MTERERERFR4Ar4Yj4vLw/Xrl2Trvv161fj/YIgON2zdetWvxybiIiIiIiIAlfAF/OHDx+WYrVajbZt29b6TKdOnVw+709jExERERERUeAK+GL+6NGjUtymTRu3nqlcdFd+3p/GJiIiIiIiosAV8MV8cXGxFFfsR69N5fv0ej0sFovfjU1ERERERESBK0TuBLytpKREitVqtVvPhIWF/e4dzZs398mxTSYTTCaTdK3T6QA4vgggIiIiIiIi/1JRy4miWON9AV/MVz7eraZj4Sqrep/BYKhXMd8YY8+dOxevvvrqWVZETAAAAyVJREFU7/56XFycm1kSERERERGRrykpKUFkZGS1Pw/4Yl6j0Uix2Wx265mq94WHh/vs2BkZGZg9e7Z0bbfbcfXqVbRo0QKCINQhWyIiIiIiIpKbKIooKSmpte9awBfzTZs2leLKy9FrUl5eXu07fG1stVr9uyX87u7PJyIiIiIiIt9T04x8hYBvgBcTEyPF169fd+uZin3nAKDVahEaGup3YxMREREREVHgCvhiPjExUYrPnz/v1jNFRUUun/ensYmIiIiIiChwBXwxn5SUJMUmk8mpWK5OQUGBy+f9aWwiIiIiIiIKXAFfzA8cOBBRUVHSdX5+fo33i6LodM+IESP8cmwiIiIiIiIKXAFfzIeGhmL06NHS9fbt22u8/8CBA9L+9oiICCQnJ/vl2ERERERERBS4Ar6YB4Cnn34aCoXjl7p27doaj4nLysqS4scff9zpeDl/G5uIiIiIiIgCU1AU871790ZaWhoA4NKlS8jMzHR5X0FBAZYuXQoAiI6ORkZGhsv7LBYLUlJS0LRpU/Tp0weHDh1qtLGJiIiIiIiIBFEURbmTaAwGgwFDhw7F/v37ERISgmXLlklFNgCcPHkSo0aNwokTJ6BSqbBt2zYMHTrU5buWL1+ORx99VLoeOnQovvnmm0YZm4iIiIiIiCgoZuYBIDw8HFu3bkVycjKsVivS09PRo0cPTJgwAcOGDUNiYiJOnDiB2NhYfP7553UqpgVBkG1sIiIiIiIiCj5BMzNf2WeffYaVK1di//79uHDhArRaLTp37oxx48YhLS0NzZs3r/F5i8WCtLQ05OTkICEhAatWrULv3r0bZWwiIiIiIiKioCzmiYiIiIiIiPxZ0CyzJyIiIiIiIgoULOaJiIiIiIiI/AyLeSIiIiIiIiI/w2KeiIiIiIiIyM+wmCciIiIiIiLyMyzmiYiIiIiIiPwMi3kiIiIiIiIiP8NinoiIiIiIiMjPsJgnIiIiIiIi8jMs5omIiIiIiIj8DIt5IiIiIiIiIj/DYp6IiIiIiIjIz7CYJyIiIiIiIvIzLOaJiIiIiIiI/AyLeSIiIiIiIiI/8/8BRo5tJKqIneEAAAAASUVORK5CYII=",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAA/MAAAJjCAYAAABA7UFUAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd1xT5/cH8M/NYk9Zgjhw46ir2taqX7XDXevCUZx11r1a66+11qodal1t3aNoFVfdo62jtWodOHBPEARlCQTITu7vj0ggJECAJJck5/168fLemzsOqMDJ8zznMCzLsiCEEEIIIYQQQojN4HEdACGEEEIIIYQQQsqGknlCCCGEEEIIIcTGUDJPCCGEEEIIIYTYGErmCSGEEEIIIYQQG0PJPCGEEEIIIYQQYmMomSeEEEIIIYQQQmwMJfOEEEIIIYQQQoiNEXAdADEvjUaD5ORkeHh4gGEYrsMhhBBCCCGEEFIGLMsiJycHwcHB4PGKH3+nZN7OJCcnIzQ0lOswCCGEEEIIIYRUQGJiIqpVq1bs65TM2xkPDw8A2r94T09PjqMhhBBCCCGEEFIWYrEYoaGhutyuOJTM25n8qfWenp6UzBNCCCGEEEKIjSpt2TQVwCOEEEIIIYQQQmwMJfOEEEIIIYQQQoiNoWSeEEIIIYQQQgixMZTME0IIIYQQQgghNoaSeUIIIYQQQgghxMZQMk8IIYQQQgghhNgYSuYJIYQQQgghhBAbQ8k8IYQQQgghhBBiYyiZJ4QQQgghhBBCbAwl84QQQgghhBBCiI2hZJ4QQgghhBBCCLExlMwTQgghhBBCCCE2hpJ5QgghhBBCCCHExjhsMp+WloaIiAgwDAOGYXDmzBmLP1OlUmHHjh3o3r07atSoAWdnZ4SEhKBTp05Yt24dJBKJxWMghBBCCCGEEGL7HDKZ37FjB8LDw7Fr1y6rPTMuLg5vv/02Bg8ejKNHj8LX1xcffPABqlevjtOnT2Ps2LFo0aIFrl27ZrWYCCGEEEIIIYTYJgHXAVjT8+fPMW7cOBw8eBACgfU+9dTUVHTs2BFPnz6Fm5sbdu3ahW7duulev3TpEnr27In79++jc+fOuHDhAurXr2+1+AghhBBCCCGE2BaHGZnfsmULwsPDcfDgQbRo0QKXL1+22rMHDx6Mp0+fAgDWr1+vl8gDQOvWrbFv3z4wDIPMzEz07t0bKpXKavERQgghhBBCCLEtDpPMT506FVKpFIsWLcLFixfRrFkzqzz3yJEjOHnyJACgefPmGDRokNHz2rZti169egEA7t27h/Xr11slPkIIIYQQQgghtsdhkvm3334b169fx5w5c6w6xX7JkiW67cjIyBLPHTp0qNHrCCGEEEIIIYSQwhwmmT98+DAaNGhg1Wempqbin3/+0e137ty5xPM7duyo237y5AmuXr1qsdgIIYQQQgghhNguh0nmufDnn39Co9EAAJydndGoUaMSz/fx8UHt2rV1+8ePH7dofIQQQgghhBBCbBMl8xZ08+ZN3Xb16tXB5/NLvaZWrVpGryeEEEIIIYQQQvJRMm9Bd+7c0W2HhISYdE3h8wpfTwghhBBCCCGE5HOoPvPWlpaWptv29vY26ZrC56Wnp5s5ImKL7jzPRrcV/5rlXo2CPfHb6Dfg5SI0y/1szoubwJq39Y8FNQWGHQJcvDkJyZ6IFWKMPjEad16a/41IN6Eb/hv8H9747Q3kKfPKdQ8ew8OuHrtQ37e+maMjBMiNiUHikI+4DsNm8dzcUOvgAYhMHPywZ3KJEr8vvYqMpPJ9r7MlH85sjuA6PlyHYXXP7t1G9LxPrfrMiPnfoVqDkpf8ck2VKcOLpZeBCnbo9hvXBM41vc0SU2VHI/MWlJOTo9t2cnIy6RpnZ2ej1xdHLpdDLBbrfRD70nPVObPd63ayGGOjrpjtfjZnXQfDYy9igWj6Bdwcpp6eapFE3lw0rAYRhyO4DoPYKUrkK0aTl4e43h9yHUalcGzNTYdI5AHg9yXXuA6BE9ZO5Ll6ZlmlrLha4UQeANLXOM5SZUrmLUgqleq2RSKRSdcUPk8ikZR6/uLFi+Hl5aX7CA0NLXughBBiBiqNGX4CE0IIIYQQk1Ayb0EuLi66bYVCYdI1hc9zdXUt9fw5c+YgOztb95GYmFj2QEmllS1Vwrn0uokmaxTsibWRrcx3Q1sz5m/DY0FNgYht1o/FziTlJuF66nWuwygRj+Ehukc012EQO6QWiyGycvtbe8Nzc0Ot/b9zHUal0GlYQ/AcZDXchzObcx0CJyLmf+cQzyyrwCktzLII3G9ck4rfxEbQmnkL8vDw0G3L5XKTrpHJZEavL46Tk5PJU/iJ7Rm19TJkaoDPY+Aq4qNRsCd2jnmT67BsV1AT4KtsrqOwO2KFGN32dQMLVneMz/Bxfeh1sz/rv8H/mf2ehFRU4vjxUCUlgefhAad6dVFz+3auQyI27NTWu6hay1u333t6C+6CIRZRrUEjzIg+zHUYlY7AxxnVvmnHdRg2hZJ5C/L399dtZ2VlmXRNdnZBouHn52fukIgNScyU4Ep8pm5folA79qg6qbSmnp4KDavRO+YicCnmbELsj/zefWjy8nTbhBBCiDXQNHsLCg8P120nJSWZdE3h8wpfTxxLtlSJ/31/Wu+Yq5DnuFXoSaWVlJuEmJQYg+N7eu3hIBpCrE8tFusSeQDQFKqXQ0h5dBrWEGmJuUhLzEWnYQ25DocQUolRMm9BTZoUrNdISEiAWq0u9ZonT54YvZ44lsiNF6Fm9Y/VDyp92QUh1pKUm4TWUa3RZW8Xg1H5432PI8Sd2ksRx5AweozePmNCvRtCiiOXKBG94BKUcjXAsvhrU+XtEEII4R5Ns7egd999FzweDxqNBjKZDLdv30bTpk2LPT8zMxOPHz/W7Xfp0sUaYZJKJluqROwz/XXdfAbYOLw1RxHZiRc3gbXtAVYDBDcHIvdTb/kyuP/yPvod6lfqeR4iD0rkiUNQi8WIHzoMinv39I6HHdjPTUDELhxadQMKmXbwR6lQIyPZMVrUORJZXi4OLlkIAOg1cy6c3dw5jojYMhqZt6CAgAC0b99et3/y5MkSzz99umBadVhYGFq0oIInjshYH/gzszvSFPuKWtdBm8gDQPI16i1vovwReFMSeQDY3XO3hSMipHJIHD/eIJFn3N0hCqE3s0j5iDOkSIkT6/ZZDVAlmGZ62Jv8RL7oNiHlQSPzFjZz5kycOXMGABAVFYVp06YVe+6vv/6q254xY4alQyOVlEShvxzj6JS3EepDP8yJ5YkVYow8NhL3s8pewMtV4Ip9H+yjUXniENRiMaTXrhscp1F5Ul7pz3IQ/c1lg+PdP3mNg2iIJWk0aqQnPAUA+FWvwXE0xNbRyHw5ZGVloXv37nB3d0f79u1L7O3evXt3dOrUCQBw7do17Ny50+h5586dw8GDBwEADRo0wJgxY4yeR+xbYqZEb4o9jwHCq3pxGJEdGfM3wLz6lhfcnHrLGzH+r/FlTuRdBa443vc4Lg65SIk8cQhqsRiPOnYCNPq1IkK3b6NReVJuuxYZJvKBNT3g5Eqz8uyNSqGAXJIHuSQPKoWC63CIjaOR+XJYtmwZjh49CgA4e/Ys5s6dqzeqXtRvv/2GNm3a4OnTp/j444/h6emJbt266V6/dOkS+vTpA5Zl4e3tjf3790MgoL8aR5OYKUG77/Qr2DMMw1E0diioCTAvs/TzHJRYIUZsWmyp523tshUtAmkJEHEsuTExSBxS/NIcxt0d7i1bWjEiYm+K1BGF0JmPnpObcRILsZzstBSkPHkEAGB4PKTGPynlCseikaqQEaUt+lglMhw8F8qHSuMwX6F79+7h22+/Lfb1b7/9Flu2bNHt9+7dG7179zbp3qUlXIGBgTh16hQGDRqES5cuoXv37mjevDnq1q2LxMRE/Pfff2BZFvXq1cOOHTtQv359k55L7Eu3FWcNjh2a1JaDSIijMaW4HU2jJ45IkZSExz16AqW0m6Pp9aQixBn6/76ETnwMW/QWjcrboV9nT9JtsxoNRC60jDKf4nkuUldcAwAIq7kjI+oO/McUXzicaDlMMv/ixQts3bq12NdPnDiht1+zZs1ik/np06cjJiYGZ86cQcuWLfHNN9+U+vywsDCcO3cOu3btQlRUFG7duoXbt2+jSpUqaN++PQYNGoTIyEi4Uksbh1V0rXyzUE+aYk/MQqwQY/SJ0bjz0vQWR9Rejjg6tViMx+++ZzCdvqjaJ/+i6fWk3OQSJbZ9cUG3z/CAYYspkbdXSplMb3/oD6s4iqTySV11TbetfJYLpzD6HdgUDpPM/+9//wPLsqWfaAJvb28cOXKkzNcJBAIMHjwYgwcPNkscxH5kS5XQaAr+ffIZYOvINziMiNiTMX+MKVMizwOPEnnisIprOWeAx0PN3/dRIk/KTS5RYuvn5/Wm2LMaUCJvp57duw321ZuDDI+HoLC68PIP5DiqSoYB8OrX4SqR4ZyGYiscJpknpDIbvvlS/vcu8BkGzat7USs6YhZJuUm4nXG7TNfs6rnLQtEQUnlJ799H/Ae9SzwndPs2WhtPzObYmptQyvRn5QmcqDa1vYqe96lum9Vo0Ofz+RxGU7moMmXasuwq7b7fuCa0Xt5E9FUipBIoXMFezbLYOLw1h9EQe9LvoGn94QGgUZVGWPfeOniKPC0YESGVhykJfD6aTk/MTaM2nDE66Ms2HERCLC07LcXgmLObOweRVE4pK64CGuj6rOX8kQDnMd5chmQzKJknhGOJmRKoC02x5zGgUXlzk2YBvw0E0u4AAQ2BQdGAizfXUVmFVKVfWGlPzz2o70tFNgkxNZFnXF0RduggJfLEAlgInHhQyTVgeMBHC96EZxUXroMiFlC48B0ARMz/jqNIiL2huTyEcChbqsT/vj+jd6xpNRoVNbvt/YDEC4AsG0i5DUQX32LK3jjznXXbbgI3SuQJgbZKfamJPI+Hmgf2o8HVGErkiYUw4PF4ELkIEFTLixJ5OyXLy4VCItHtMzweqjVoxGFElU/glBaAUJuWiqp50Hr5MqBknhAORW68CHWRwoxU+M4Ckq8WbMtzuIvDzIYMGQKGYfQ+/ve//+leT8pN0o3MuwndUM+3nt71//77Lxo1amRwj7K4e/cu5s6diw4dOqBq1apwdXWFUCiEr68vmjVrhhEjRuDQoUNmK0BKSEUpkpLwuPM7xb7u1LAh6l26iIZ3bsOFWsUSC9KoWSjlaijlaqNT7ol92Ld4nt6+QOTEUSSVl8DHGdXmt0W1Re0QMKEZrZcvA/pKEcKhwmvlAW07OppibwECV0DxKoln+EDENm7jMYMzZ87gt99+M/qasVZ0UpUUqzuvBgC8fPkSs2fPxqZNm8qdZKvVakyfPh2rV6+G5lV1XoFAgF69esHb2xsnTpzAjRs3cOPGDWzZsgUtW7bE3r17UaNGjXI9jxBzKC6RZ1xdUffMafA9aWYUsazkR5n4fck1vWNCJz4yUyTFXEFsWerTODx/eF/vWECNWhxFQ+wRJfOEcORSfIbBMRqVN7PMp8DPbwHKXO0+wweCW9j8enmVSoWJEycW+/rEkxMNWtFpWA08RZ6IiorCjBkzkJaWhqCgILx48aJcMcyaNQsrV67UO7ZhwwYMGzYMAJCamorw8HBkZGj/ncfExKBTp06IjY2Fm5tbuZ5JSHkpkpLwuEdPQCo1eI3n5oY6p09RIk8sKv1ZDqK/uWz0NaVcjaq16d+fPdr22RS9fYbHQ+9Pv+QomspLlSnTFsGDdsq9wMe5lCtIPppmTwgHsqVKDFjzn96xo1PeplF5c1vTtiCRBwBWDQht/wfE8uXLcfv2bfB4xr+FP8x8aHDMTeCGI0eOYOjQocjJycFXX32FM2fOlOv5aWlpWLVqld4xPp+PiIgI3X5AQADee+89vXOePHmCTZs2leuZhJSXbjTeSCIPHo8SeWJxcomy2EQ+X/dPXrNSNMSa8vvK5xu1cj1VsTciP5Evuk1KR8k8IRwYG3XF4Fh4VS8OIrFjmU+Nr4+38Sn2ycnJmD9/PhiGwahRo4yeU3TqvKvAFXs/2AulUon3338ft27dwrx58+DkVL51excvXoRKpdI75u/vD2dn/TdKqlevbnDtP//8U65nElIeuTExJa6Pr/3nH5TIE4s7uOJ6ia9/OLM5nFzpzXx7I8vLhcCp4OdiYK068PIP5DCiykmVKQMrU4OVqQEqHVFmNM2eEI40rOqOu8+1o8a7xtH0erNb87bhsXH/2vwU++nTpyM3NxfDhg3DW2+9hfXr1+u9nqvIhVwlBwDwGB5cBa64MPgCAMD3fV/07t27wjHk5Bi+SSIUGv4iKhKJDI6JxeIKP5+Q4qjFYsQPHQbFvXslnkft5oi1yCVKpD41/J754czmCK7jw0FExFoOLlmIwJphSE98CgDo98U3HEdUOaWsuAqIGEDBgpWrEfTp61yHZFNoZJ4QDnzZMxwPUvLA5zE4OuVttK5ZheuQ7I8iT39/SiwQ1ISbWMzk1KlTiI6OhpeXF777zniP2rsv7+q2NawGdX3q6vZdXMzT9qi+kQrfxhJ8Y4l7nTp1zBIDIcYkjB5TciLP46HepYvUbo5YzbE1N/UPMMAnazpRIu8geHw+AmqGIaBmGE2vLwHD44Fx5oNx5tN6+TKiZJ4QK0vMlKDbin+h1rBwEfIxcO1/pV9EyubFTe36+HwiD8DHtquoK5VKXdG7+fPnIzDQtKl6+RXszalFixZ4+239mQ9ZWVlITEzUO3bzpv4vsQKBAKNHjzZ7PMSxKZKTAQD3W7aC7MaNYs9jXF1pWj2xOpVSf810xFwadXQUb0V8hMQ7N5F45ybeiviI63AqJY1UBX4VF+00e4Ua/mObch2SzaFknhAr677irG47V64q4UxSbus66O8HNuYmDjP68ccfcffuXTRp0gSffPKJSdds7bIVniLLJC67d+9GmzZt9I5NmDAB8fHxyM7Oxtq1a3H69Gnday4uLti0aROaNWtmkXiI41EkJeFu8xaI69mr5BN5PNQ8sJ9G4wknXj7PA8NjwPAYCJ358KvmwXVIxEp2zZ+jHXHm8bBr/hyuw6mUMqLuQJ0h1Y7Ki/jIPvSE65BsDq2ZJ8TKWAAMCmp8HJnSjsNo7FDmU0Cj1j82eCc3sZjJs2fPsGDBAgDA6tWrIRCU/q17T889qO9rOB3eXIKCgnD+/HkcOHAACxcuRExMDA4fPozDhw/rneft7Y2xY8diwoQJRgviEVJecR/01laoL6bVYe2Tf1HyTjjHMAyETnyuwyCE2CkamSfEiu48z0aOTKVL5BuHeCLUx5XTmOxO0cJ3dlD0btq0acjNzcXgwYPRvn37Us/f2mWrRRN5ANBoNFizZg1mzJiBmJgYAICXlxf69u2LUaNGoUGDBgC00+83btyIlStXIjMz06IxEcchvX8fmtzcYl8P3b6NEnnCufRnOVBIVVBIVVDKVPhwRnOuQyJWkPo0DksjeoDVaHSt6T76dgXHUVVOPv3rgWVZsAo1hAGuqBIZznVINodG5gmxop6rzuntJ2RIOIrETkmzAHmhomsM3+aL3v3555/Ys2cPPDw88MMPP5R6PgMGLQJbWDQmlmUxePBgREdH6475+Pjg0qVLugJ3SqUS/fr1w8GDB5Geno6lS5di3759OHPmDI3QkwpRJCUh/oPeBsdF9euj+ppfaE08qTR2LSpoQ8uywL+7HqL3dMt+fybc2/bZFINjATVqcRBJ5Ze5+wGcQgqWnvBcKDUtKxqZJ8SK1Br9Bpr1AqmyqVlFfwTtIoZXhLY960GhUGDSpEkAgC+//BLBwcF6r0tVUoNreDzLf1vfsmWLXiIPAP369dOrVC8UCjFr1iy9c+Li4vDxxx9bPD5i3+KMJPIAUOu37ZTIk0pDLlGC1VDTbEeTnZaiG40nJdNIVVAk5UL+JBvy+GywKvq6lQcl84RYSbZUCTdhQaLpJuJj4/DWHEZkp5w8AB5f+2Hjhe+WLl2K+/fvo2HDhpgyxfCd/l9v/2pwrFGVRhaPa8OGDYbPbWT43MaNDb/+f/75J+Li4iwSF7F/iqQkg+n1jKttv2lH7NOxNTfBFxX8zBc68dF1nG3PFCOli5o92eAYTbE3LiPqDljFqxpHGkCRklfyBcQomstAiJWMjbqChsFeeJCi/UX07Ked4OUi5DgqOyLN0p9mH9TU5gvf5SfNd+/ehUgkMuma//79DwxT8AtkXFwcatasada4YmNjDY55e3sbHPPy8jJ6/fXr11GrFk05JGUX1+sDvX2emxtqHzlczNmEcIvP54Pvot32D3WHkyv9zLc32Wkp2Dx9AtQKucFrDI+HCRt+o/7yxdAo1AXVoBno/e5CTEfJPCFWJODzEB6snQZKibyZbe8HpBTqa5562+YL340aNQovX740OC5Xy7Hj3g7Ik+TIvak/SlmtWjVERETo9otLqCtCpTJsqciypk8nVSgU5gyHOIjcmBho8vRHbuqcPkVT60ml1HVcExz56QYykiWoEuxGo/J26tfZk4wm8gAgEDlRIl8CZVKh319YIHAK1ZMoD0rmCbGSH/q/pusxT+3oLCD5KtcRmN3nn3+uty9WiDHy2Ejcz7qPqs2qIvNspkEyX7t2bSxZssSicdWoUQP379/XO2asUn1x1etr1KhhkbiI/VGLxYgfOgyKe/cMX3Rzo0SeVGo8Pg/+oe7oOq4JjcrbodSncVBIii9kbI0aNrZKI1UVjMq/+jIJfJw5i8eW0b8yQqwgMVOC/31/GmKZChoWmBZ9neuQ7Is0S1squLAxf3MSiiVNPDkR97Pul36ihfXr18/g2M2bNw2O3bp1y+BYtWrV0Lo11YogpkkYPcZ4Ig+g9sEDVo6GENMdW3PT6DaxD7K8XETNnlTs60JnZ0R+v9KKEdmWjKg7EIS8mrWgAQImUdvG8qJknhAr6L7iLNSvcs1cuQoPXuRwG5A9kWYBPzYC2FdVUBk+MCXW5lvSGfMo6xHXIQAAPvvsM9StW1fv2N69e3GvUNKlUCjw/fff653D5/Px888/02gFMYkiKQmyGzeMvkZ95AkhXNr//dcGx0Lqh2NG9GHMiD6MyVv3wMs/kIPIbAOrYaHOkIJx5kNU0xOiqrQcobxomj0hFnYpPgNimf4aY2pJZ0Y7IgBF4XVXasDHPqdxV/eojitXriDrYhYAQJVsuHb98ePHmDlzJgDtlPvx48frvb5o0SLdOnyxWGz0OfnXA4Cvr6/BdH93d3ecPXsWw4cPx/Hjx3X3atOmDd577z14eXnh3Llzesl9cHAw1qxZg549e5bxsyaOymgLOh4PNX/fB5f69a0eDyFl0XVcE92IPK2Xtz/pCU8hcHKGSi4DAASG1UXvT7/kOCobwhazTcqMknlCLChbqsSANf/pHeMzoJZ05pRyW3+fsc9R36TcJNzOuA3ZMxkyjmcUe96zZ8+wdOlSAECHDh0Mkvl169bh6dOnJT4r/3pAu769aDIPAIGBgTh27Bj+++8/7N69GxcvXsSjR49w+PBhKJVKuLu7Izw8HE2bNkW3bt3Qt29fuFILMWKi3JgYgxZ0zk2botauaI4iIsR0colSr/gdsT9+oTWQnvgUfFc3+IXWwMCvvy/9IlKAKWablBkl84RY0Kitlw2OuTkJqJK9uWQ+BeRFliyM/YebWCys30HtOnWfdj7waecDD5EHzg86X+b7xMfHmzWuN954A2+88YZZ70lI4pCP9PZ5bm6ovmE9R9EQUjaHVt1ASpx25lN6Ui6OrbmJ3tOpUrc96f3plzi4ZCEAoNfMuRxHY1s0UhUUz3MBpQaMiE8j8xVEyTwhFnTvuX6i6SriUyV7c1rTVn/fydMu18oDgFQlBQMG7Kufert77uY4IkIsQ21k+Qe1oCO2JPVpwc9+pUzNYSTEUpzd3DFg3mKuw7BJGVF3tD3lRXwAAMOnofmKsM/5qIRUEhJFwZpmHgPc+boLQn1oqrFZSLMMR+XH/ctJKNZQ36e+LpEP9w1HiDsV/yL2KWHkKL390O3bKJEnNkUg4ummDjM8WjNPSFHCwILlJ1UiwzmMxPY5ZDJ/+PBh9O/fH2FhYXBxcUFQUBDeeust/Pjjj7rCUJZw/fp1TJ06Fc2bN4efnx+cnJwQEhKC9u3bW/zZxPoSMyXQvJo6xGcYuL16B5KYyY4I/X2Rh8UK36VJ0vDz9Z+RJkmzyP1Lk5SbhHsvtcXk3IRucBI4cRKHLeP675CYRi0WQ1akpaF7y5YcRUNI+Qz8ojVEzgKIXAT4aMGb1GPeDmWnpWD1iAisHhGB7LQUrsOxKT7960GZkgcACJzSAjwXmiheEQ6VzKenp6N79+7o2bMn9uzZA5FIhB49eqBBgwa4dOkSpk+fjsaNG+PkyZNmfW5OTg4GDx6M5s2bY8WKFXj48CFatWqFvn37ombNmjh37hymT5+O8PBwHDx40KzPJtzptuKsrqaHmmVxdGp7TuOxK9IsILFIPYLx5yz2uDRpGn658QvSpNZPBJNyk9BlbxdooG29J1PJIODRD76y4vLvkJRMev8+7jZoiLsNGuJB6zb6L1IbQ2JD5BIldi64iKi5F6CQquDp5wwnSlTsiiwvF7/OmoQNE0dBLsmDRqNG1OzJXIdlMzRSFVKWXwUrU4NVqPFyx73SLyIlcpifkhKJBF26dMHRo0fB5/OxceNG3Lt3D7t378aZM2dw9+5d1KtXD8+fP0e3bt1w9uxZszw3JycH7dq1w44dOwAAI0aMwPPnz3H8+HH89ttvOHfuHO7evYvXXnsNKSkp6NOnDyX0diBbqkSOTAUWAJ/HgM9jaHq9OUV/BL2KKU6edtuOLr/wXT41q8byjsu5CYYQM8pP4uONtaB7pebv+6wXECEVdGzNTWQk5en20xNzde3piH04uGQh0hLidPtKmYzDaGxPRtQdsPJXdSQ0gOJZTskXkFI5TDI/efJkxMTEAAAWLFiAkSNH6r1et25dHDt2DM7OzlAoFOjTpw+ysrIq/Nxx48bhxo0bAICuXbti48aN8PDw0DunXr16+OOPP+Dr6wu1Wo0hQ4bg+fPnFX424c7YqCtwF2n/e6k1LJqEeJRyBSkzkTvA42s/AhpxHY3FSFVSvf0mfk3gKaL1w8S2KZKSSkziAYBxd6d+8oSQSi/y+5Vch2BbCte7EzpMKmoxDMuydt8Q4ObNm2jWrBk0Gg0CAwORkJAAkUhk9NypU6dixYoVAIDZs2fju+++K/dzb9y4gebNmyP/SxwTE4MWLYpvTfLNN9/giy++AKAdwd+0aVOZnykWi+Hl5YXs7Gx4UsEgzgxcdwEqtQYPUrR9ks9+2ona0ZmLNAvY0gNIeTXaEdQUGHYIcPG22CN77e+FuOw4+Dj7QMiz3t8jy7IG08L9Xfy1VWBJmSg1SmTKMlHLqxYO9qbZT1y73+p1gz7yhTGurgg7dBCiECr0SGyHXKLE70uv6kbn/ULd0Xtac1ozb0ey01KwefoEqBVyAEDE/O9QrYH9DiiYm0aqQtqmW1Am5YAR8RE4pQUEPs5ch1UpmZrTOUQyP2LECGzZsgWAdoQ+P1k35urVq2j5qtiOm5sb0tLS4OLiUq7nzp07F4sWLQIAhISE4NmzZyWef/fuXYSHays6Ojs7IyUlpcwJOSXzlUO2VImxUVcAAGsjW1Eib06b3gcS/ivY5/GBLy1bPLJ9dHtkyjIt+gxiHT7OPvgn4h+uw3BoiqQkPO78jsFx58aNUX3TRqpcT2zSkCFD8Ntvv+kd69ChA86cOWNwblZWFi5cuIALFy7g4sWLePLkCVJSUiCRSCASieDj44P69eujbdu2GDJkCBo0aFCmWG7evIldu3bh5MmTSEhIQHp6OkQiEfz8/FC1alU0a9YMLVq0QJcuXRBCb5iVya75c/T2qT0dsRRTczq7r8qhVCpx4MAB3X7nzp1LPL958+bw9vZGVlYW8vLycOzYMfTp06dczz5//rxuu3HjxqWeX79+fTg7O0Mmk0Emk+HgwYP46KOPyvVswi0vFyF2jnmT6zDsU8odqz/S28kbmbJMq4/Mv5S9hEqjbW/IgIGfix+NypdT/si8t5M316E4vLii0+t5PNT77wIl8cRmnTlzxiCRL8lHH32EI0eO6Pbbtm2Ld955B7m5uThx4gSSk5ORnJyM06dPY+HChRgxYgRWrVoFV9eSa+9kZ2dj4sSJ2L59O1iWBcMwaNOmDbp37w6WZXH9+nWcP39e9/vpjz/+iKlTp5brcyaEVA52n8xfunQJmZkFI2otS2lxwzAMWrZsqatof/z48XIn8ykpBa0qfH19Sz2fx+PB19cXycnJAID//vuPknkbded5Nnqu0lZXPzSpLcKrenEckZ14cROQi/WPjfnb4o/9tt23iDgcgTXvrEF4Fev0Q82vYp+PAYNTA05Z5dn26E7GHUQcjsC37b7lOhSHlhsTYzC93qVZM0rkic1SqVSYOHFiua4ViUTYt28funfvrjuWk5ODvn374s8//wSgXW61adMmPHz4EKdOnYJAYPxX99TUVLz77ruIjY0FAPj4+ODAgQNo166d3nkHDhzAgAEDoFAoyhWzo3t/wlRd9XpaK192qkwZUlZcBQCaYm8mdl914ObNgiqi+X3dS1OrVi2j15dVeVYwFP4mXZFnE27lJ/JFt0kFreugv+/kCQQ14SYWCytaxZ5G5Ik9uDdoMHZnZWFS0jN0efIYrz98gDq7ouHh4YFatWqhe/fuWLFiBTIyMky+57///otGjRqBYRi9j7K4e/cu5s6diw4dOqBq1apwdXWFUCiEr68vmjVrhhEjRuDQoUPl+rlO7Nvy5ctx+/ZtMEzZf6X+9NNP9RJ5APDw8EBUVBScnJz0jp89exY//fRTsfcaOXKkLpEHgJ9//tkgkQeADz74ANOnTy9zrETrxM/LEVAzDAE1w3Di5+Vch2Nz8hP5otuk/Ow+mb9zp2BKbnBwsEnXFE74C19fVv7+/rrtly9NW9NbuIJ+fHx8uZ9NuJOYKYFaw0KtYfW6p5EKenET0Kj1j9lpFXuxQoxcpf7oZXSPaI6iIcQ8jh07hi5PHmNeyguczM1FglKJeo0b4+OPP8Zrr72G+Ph4HD16FFOnTkXt2rWxffv2Eu/38uVLfPzxx2jfvn25f1ar1WpMmTIFjRs3xqJFi/DPP/8gPT0dXbt2xdChQ+Hq6oobN25gy5Yt6NWrF15//XU8ffq0XM8i9ic5ORnz588HwzB4s37XMl8/dOhQo8cDAwONLguNiooyev6BAwf0pu37+/tjwIABxT539uzZ+PPPP9G3b98yRkwIqWzsPplPSyuoBO3t7W3SNYXPE4vFUCqV5Xp24Sn9t27dKvX8Z8+eQSwumEJceJvYhsRMCdp9d1q3r2ZZHJrUlsOI7IQ0C1jztv6x4ObA4J2chGNp4/8ar7ffxK8J6vtSiy5iu87t3Yte3bohQ13whlzH1q1x+epVrFmzBv/++y8iIyN1r2VnZyMyMhJHjx41er+oqCg0aNAAGzduRGBgYLnjmjVrFlauXAmNRqM7tmHDBuzduxcbN27E1atXUaVKFd1rMTEx6NSpE/Ly8ozdjjiY6dOnIzc3F+2bd0NYkOlLsObMmYN9+/ahTp06xZ5jrOjd/fv3jZ77yy+/6O23a9cOPF7xv+L7+PjgnXfeQWhoqIkRk3y9Zs41uk1MEzilhdFtUn52n8zn5OTototOWSqOs7P++o3C9yiLXr166baTkpJ0fe6Lc+jQIb393BLa9uSTy+UQi8V6H4Q73VecNThG6+XNYEeE4bExZyzajq4wfxd/jH9tPPxd/Es/2Qxup9/W21/z7hqrPNeeWfvvkOj7YvgIqIocGzx6NPh8vm6/aI0YlmV17VoLO3LkCIYOHYqcnBx89dVXRquFmyItLQ2rVq3SO8bn8xERUfD9JiAgAO+9957eOU+ePClX61hiX06dOoXo6Gh4eXmhS5MRZbq2bdu2+PDDD0s8x1gnpcJvOuV7+fKlbn19vrp165YpHmI6Zzd3DJi3GAPmLYazmzvX4dgcgY8zQr56CyFfvUXr5c3E7pN5qVSq2y6ut3xRRc+TSCTlenbHjh3x5psFFc2/+OKLYtfb5eTk4Ntv9QszmRLv4sWL4eXlpfugd1m5VfRv111k9//FrKNoBfuqza36eH9Xf0xoNgH+rtZJBAt/n+CBB08RFQerKGv/HZICiqQkXJMa/hwt+vOqevXqBudcvXrVYBRcqVTi/fffx61btzBv3jyT36gv6uLFi1Cp9N9i8Pf3N3hD31hc//xD7Q0dmVKp1BW9mz9/Pny9qpRyRdmlp6cbHDPWGSkmJsYgyff29sZff/2FiIgIVK9eHU5OTvDy8sJrr72GWbNmISkpyezxEkK4YfeZRuF3Nk2t3Fn0vNJagZRk27Ztukr2x44dw6hRowxG+hMSEtCtWzckJCTores3pU/8nDlzkJ2drftITEwsd6yk4mr7uyK/7BKfAY5N61Di+cREhd8EY/jA0P2chWINjfwagc/wwWf4aORnn3UBiOOI6/UB8oyMKAqF+m0ei3sDu+iMs/fffx/Hjx9H7dq1KxSXsVl3RWMqLi6aBefYfvzxR9y9exdNmjTBJ598At+qboCZi5Qam81ZeClKvsJF7/KtXLkS7777LhISEtCrVy80b94cYrEYsbGxWLJkCerVq4d9+/aZNV5CTKGRqpC2LhZp62KhkRadr0XKw+6TeQ8PD922XC436RqZTFbsPcoqLCwM58+fR3i4di3V5s2bERwcjK5du2Lw4MHo0KEDateujQsXLmDGjBmYM2eO7lpT1vg7OTnB09NT74Nw41J8Bq4nisECaBTsiddr+SLUp/xvBJFCGAbg8bUfIjerTa/nQlJuEuKy4wBok3qaYk9smVoshiYvD7WEhglx0WTaWILs4eGBgIAAvWPGph+XR/36hnUojCX4xuIqaa0zsW/Pnj3DggULAACrV6+GQCCAQMSHT4B5/l0CwMOHDw2S+VatWmHs2LEG5xauDZXv+fPn6N+/P86fP4/Vq1fjwoULem2WJRIJBg4ciAsXLpgtZkJMkRF1x+g2KT+7T+YLV5QvXCm+JNnZ2bptT09Po+/Ul0X9+vURGxuLbdu24cMPP4SPjw/OnDmDQ4cOITU1FWPHjsW1a9ewZMkSvWUBxn7RIJVTtlSJAWv+0+3fThZjbWQrDiOyMwGNAJG79sNOK9jn63+oPxiGgavQFXHZcTTFnti0hJGjAACRPj4GrxVtv2qsHevYsWP11tWbU4sWLfD22/qFNbOysgxmuBWNSyAQYPTo0RaJiVR+06ZNQ25uLgYPHoz27dsDALqOM2+L1Dlz5ugtt2rUqBEOHTpk9PfRwr+zFo0zv0UjwzCYNm2a3utKpdLgGCmZLC8Xu+bPwa75cyDLK72uFdGnkaqgSM6FIjkXrIbaPZmL3Sfz+SPigLaFiCkKryUqfH1F8Pl8DBkyBPv27UNCQgKkUilycnJw9+5drF69Gk2aNDGIsVmzZmZ5NrG8sVFXDI55uVTsTSDyijSrYDugod1WsAe0LenylHnIUeQgR5EDDWs4NZkQW6AWi/G494eQverk0tfbG+OqVNFLzFeuXImTJ08iLy8PV65cwddff613j759++Kbb76xaJy7d+9GmzZt9I5NmDAB8fHxyM7Oxtq1a3H6dEGHEhcXF2zatIl+PjuoP//8E3v27IGHhwd++OEHAIBcosSxNYZvRJXX8uXLsXfvXt1+ly5dcPbsWQQFBRk9X61WGxxjGAYtWuhXCm/VqpUuuc938eLFYivkE0P7v/8aqfFPkBr/BPu//7r0C4iejKg7EAa6AQCUL/JQJdI8OZajs/tkPj9JBrTT7E0p+vHkyROj11vDo0ePdNtvvfWWVZ9NKqZh1YKqprvGvcFhJHYm+iOALwCCmgI8oV1PsZ96eqp+kUx645rYqMTx46G4d0/v2OSAQNy6dQujRo2CQCBAWloa3nnnHbi7u+P111/Ho0ePwDAMevXqhT/++AN79uwpd3E7UwUFBeH8+fPYt2+frp3s4cOHUatWLXh7e2PcuHFgWRbe3t749NNPce/ePaPrlon9UygUmDRpEgDgyy+/1NU4Mmciv2rVKkyfPh2AtlbDt99+i6NHj8LHyMyWfMaWV/r6+hr833F2dja6fPPSpUsVC9qBpCc8NbpNTMfwGYiC3SEKdgfPRcB1OHbB7r+KrVu3ho+PDzIzMwFoC4qEhIQUez7LsnrrlLp06WLxGPNpNBr89592qnZAQAA6depktWeTilkb2Qpjo67gjTBfrI1sRaPy5qRRAql3tdsBDbmNxcJUGhXYVxk8n+GX2CeYkMpKev8+pDFXDY5nfD0fX0+ejL/++kv3plX79u0RHh6OuLg4/Pnnn9BoNDh69CiUSiU8PDzwxhuWfWNUo9FgzZo1WLJkCeLitLUqvLy88M4778Db2xvnzp3DvXv3kJWVhY0bN0KlUmHu3LklJlfEPi1duhT3799Hw4YNMWXKFLPeW6VSYdq0aVi9ejUAoGXLlti8ebNJA0qFl5PmK65ws5ubm+734XypqanliNgx+YZUw4vHDwEAQbWp/V9ZVYkM162Tp1F587H7ZF4oFOKDDz7Ali1bAAAnT57U6/9e1LVr13Rr693c3NC1a9cKPV+hUEChUMDFxaXUdX///fefrhVJZGQkBAK7/+uxG14uQuwc82bpJ5KyYxnj23bKTeCGPJW2Fdfunrs5joaQssmNiUHikI8Mjt8Y/TGGDh4MpVKpO7ZkyRLMmDFDtx8dHY2BAwdCpVLh2LFjOHHiBDZu3Ijhw4dbJFaWZTF48GBER0frjvn4+ODSpUu6AndKpRL9+vXDwYMHkZ6ejqVLl2Lfvn04c+aM0ZZ1xH5t2LABAHD37l2TWx3//fffelPb4+LiULNmTb1z4uPjMXjwYFy4cAFOTk6YN28eZs2aZfA74JEjR/D8+XMMHDgQ7u4FMwGNLfkorg2yseOWqklBCLEOh8gWp02bhl9//RUajQbR0dH44Ycfiv1G/Ouvv+q2J0yYUOGqudOnT8dPP/2E5cuXl/pObv76Kz8/P3z++ecVei6xrmypUrdunkbmzSx/ir2D4PF48BB5oI53HYS4Fz+LiJDKRpGUZDSRVzZujHELFugl8m5ubpg8ebLeeREREZg7dy4eP34MQDtqPm7cOHTo0AG1atUye7xbtmzRS+QBoF+/fnqV6oVCIWbNmoWDBw/qjsXFxeHjjz/GH3/8YfaYSOU1atQovHz50uC4WqlB3I00JKQ8wbV75/Veq1atGiIiInT7Xl5eeq9v27YNEydORHZ2Nt544w1s2rQJDRsan4H2ww8/4O+//9YtTcnXpk0biEQivbbKEonE6D2MHS/cEpkUT5aXqxuVFzo742XSM44jsj1FK9n7j3Gc3+0sySHmcDZt2hQjRowAAKSkpGDZsmVGz3vy5AnWrl0LQJtQF24TV5hSqURkZCQ8PDzQvHlzoz0+i9q+fbvRIiWFX9+/fz8AbfGT/N70xDYULoBnrBgeKSdpFqCUAQnngec3gA9+4joiQkgx4np/aHCMcXXFhfbtDCpu161b12hl7saNG+vty+Vy3cw6c8sfaS2sUSPDbhlFYwK0hdDyp+UTx/D5559jyZIlBh8danyE4b2m4e3X3jW4pnbt2nrn5i/PyMjIQP/+/REZGQmFQoGlS5fi3LlzxSbyJfH29kbv3r31jmVmZhq0Y5ZKpQZT7AHoKvKTkh1cshBCJ2cAgFImg19oDY4jIkTLIZJ5QFs1N7+y5xdffIHNmzfrvf7w4UN07doVMpkMIpEI+/btK3ZNXFRUFLZt24bc3Fxcv34dEydOLPX5ly9fxujRow3a4+Xl5WHhwoW6Nxt++OEHDBkypByfISF2aEcE8LzQ2tsDn3AXi4WJFWI8ytIWwKzjXQcCnkNMnCJ2QpGUBE2RHu1ODRui7pnTuPXwocH5xgpxFXf8+vXrZojQkLE34o09v+hoaj5LxUVsi0bNIi0xFzkv5aWf/EqnTp2wZ88eANoke8aMGeDz+WAYptiPv//+u9j7zZ07V+/NMZZlcfWqft2Kon3rAeDDDz8stko+MeRfvSZEzi4QObug96dfch2OzSm8Tp7WzJuPwyTzrq6uOH78OLp27QqVSoWRI0eiYcOGGDBgADp27Ijw8HA8ePAAVatWxZEjR9CuXTuT71201UdxNm/ejJo1a6JTp04YNGgQOnfujMDAQPzf//0f/Pz8sGfPHsycObO8nyLhUOGe8tRf3oxS7gAatfZDkcd1NBY19fRU1PHWTu99lPUIyzsu5zYgQkykFovx+N339I45N22KsN/3ge/pCZVKZXBNWdb0Fp4+bE5licsYS8VFbE3Z244U1xu+vJo2bapbqplv+fLlun/PLMvixx9/1Hvd39+/2JmqxFCvmXPB4/MRUDMMH6/eCGc399IvIsQKHGrox9/fH0ePHsWhQ4ewZcsWXL16FQcPHoSnpydatWqFfv36YcSIEaVOcY+MjMSpU6ewf/9+1K1bF6tWrSr23MWLF6Nbt244ffo0zp8/j8ePH+Pff/+FSCRCUFAQOnXqhD59+qBv377w8PAw96dMrEQsU+JOsli3TWvmzeDpeUAu1j8WsY2bWKxApVHpjcx7igzbDRFSmajFYjwdMRLy27cNXuM5O+u2a9QwnI5qbLpvcceNXW8ONWrUMOixbez5xcVqqbiIbZBLlFgyfTtOXTgKAMiQGq6hfvz4sW6Qpnbt2hg/frzF4pkyZQrc3d0xffp0iMVi7Nq1C4mJiWjZsiUuX76Mixcv6s5t2LAhdu3aZVCMjxBLojXzlsGwZXkbmlR6YrEYXl5eyM7ONtp7lFhG069O6O3HfvU+R5HYCWkW8F2RX5SdPIE5iZyEYw0DDw/E7QxtUtSoSiPs7LGT44gIKVn8kCFGW9CBx0O9/y6A/+pn0IMHD9CwYUNoNBrdKS4uLsjMzDTohR0WFmawFv3EiRN47z39kX+9OOLjjRbIK+3Xm//7v//DwoUL9Y6NGDECmzZt0jv2zz//oEOHDnrHqlWrhqdPn1L7SAe2f9lV7D20E9vO/FD6yQA6dOiAM2fOAABq1qyJp0/L16fcWEX8wtLT07Fx40acOHEC9+/fR3p6OgQCAQICAtCyZUv06dMH/fv3N1qzghRv57zZut7yftVrYOD87zmOyPakrrkB5QvtLEthkBsCxr3GcUSVm6k5nUONzBNCbMT2fobHAgwLU9kLsUKsS+T5DB/3Xt7jOCJCSqYWiyG9dt3oa7X//EOXyANAvXr1MHv2bHz77be6Y1KpFMuWLdMrNPvbb78ZJPL9+/cvMZGviM8++wy7du3Cw0Jr+vfu3YvZs2ejQYMGALRT6b//Xv+Xdj6fj59//pkSeYI36nfBG/W76PY/WdPJpOvi4+MtFJG2gPOnn36KTz/91GLPcET5iXzRbVIGbDHbpEIomSfEDI5MaYfuK87qtkkFJRcZ7QtqCgy235HqiScLimiqWTXchbQWj1RuiePHA4VG2gEAPB5q/r4PohDDloqLFy+Gl5cXFixYoGuP9fnnn+PYsWNo1KgR4uLi9Fq98fl8TJgwAUuXLjX6/EWLFunahInFYqPnFK5B4+vra9Dy1d3dHWfPnsXw4cNx/Phx3b3atGmD9957D15eXjh37hzu3St4cy04OBhr1qxBz549i/vSEAfRaVhDbP/qAjSvOi5+OLM5twERi/ILrYH0xKe6bVJ2DJ+BKJh+vzE3mmZvZ2iaPbF50izg+1oAqwEYPiBys+vp9QDw5m9vIk+ZB/bVW9XH+x6nHvOk0lKLxXjwxpt6yTzj7o4GVy6Xeu2LFy+wc+dOnDp1Crdv30ZaWhokEgmcnJzg6+uLBg0aoEOHDhg0aBBq165d7H3KOk25Ro0aJY6G/vfff9i9ezcuXryIR48eITs7G0qlEu7u7ggJCUHTpk3RrVs39O3bF66uriY/l5hXXrYct/9JQqP2IXDzcir9Agva90MMMpK1U4arBLuhz6yWnMZjayrT36UpZHm5OLhEuyyn18y5VACvHDRSlW7dfJXIcPBcaEy5JDTNnhBim/aMAIRugOJVm6tx/3IbjxWwLKtL5N0EbpTIk0rt2cRJBqPyYQf2m3RtUFAQpk6diqlTp1YoBnNPU37jjTfwxhtvmPWexPwk2QpcPhKPWq/5c54AZiTlQqnQ6LZJ2VSmv0tTOLu5Y8C8xVyHYdN4LgIqemcBlMwTQiqX1HuASgLw+Nqk3se+p7OJFWJIVVIA2vXypra6JIQLarEYsiLV6xl3d6NT6wmxV+IMKRQyNQCA4dHyX0dAI/OksqLqLYSQykWRU9Bb3gFWAU09PRWuQlfwGT4AoK5PXY4jIsrUVKStWg1lairXoVQ6CSNHQZOXp3fM1FF5Qopja//nohdc0m2zGsAvxI3DaIg15CfyRbeJaTRSFdLWxSJtXSw0UhXX4dgVSuYJIZWXg4xS1/WpC1ehK1yFrljdeTXX4Tg8VVoa0n/6Caq0NK5DqVQUSUmQ3bqld4zn4UGj8qTCbOn/nFyi1I3K5+v+CbXYIqQkRXvME/OhafaEVNCd59noueocAODQpLYIr+rFcUQ2SpoFuHjrH7PjdnT5FrRdgP6H+gMAdvfcDU8RFa4klVPcB70NjtXa/7v1AyEO649N2iUeB1deB1/AzXiUNFdhcGzH15eMnElKolZp6w38sek2hnxV+etV9Jo5V2+aPSGVBSXzhFRAtlSJbiu0Bdr4PAY9V53D40XdOI7KRu0cDIw4WrDP8O26HV2+2f/MhkQp0W1v77ad44gIMaQWi6HJ1S/yVfPAfhqVJ1Yly1Xq/VlZ5GXJuQ7BZlW2v8viUAG8ivHqGYbUVdcAAAGTqI2jOVEyT0gFjI26ottWa1jweY4xLdzspFnAi1j9Y9VaG47U25mk3CTEpmk/bx7Dw+3026VcQQg3EseP1z/g5gaX+vW5CYY4LGd3IWS5Sji7CzkbmWdZFrJcJTRqFjw+A2d3IRUuLQe1SqP7u6zsqPhdxWikKl0iz4j4SFsbi5Cv3uI4KvtByTwhFdQ4xAO3krRt1A5NastxNDZqR4ThMQcYlc+fXg8AGlYDdyH9glAZJM+aDQBIHD0GjLDy/6JpDaqihckUCjzs8D9OYiH2h1VqR2eTZ81G7aNHij3vvZGNsGvRZfSa3Az+1T2sFZ4euUSJY2tuAgC6jmsCJ1f6HlEeaQk52LXoMt4badnldImJiTh//jzOnz+PmJgYJCQkICMjA3K5HK6urvDz80Pjxo3xzjvvYNCgQfD39ze4R9HidyHv9MDWrVtx4cIFPHv2DHl5efD29kb16tXRuXNnDBs2DOHh4SbFd/fuXWzbtg3//vsvHjx4gOzsbCiVSnh4eKB69epo3rw5+vTpgx49etjsm0aF18izCjUYEZ/DaOwPJfOEVMAP/V9D9xVn4ekswJEp7RDq48p1SLYp9S4gKFQNeNy/dj8qn89N4IY8lbY6+J5eeziOhgCAOitL++fLl9wGUpkplVClpHAdBbEz+f/3KrMjP8UiIzlPt91nVkuOIyIladiwIfJedeBwcnJC586dUatWLTx79gwnTpxAXFwc4uLicOjQIcydOxfz58/H9OnT9e6hUirw4vFDyJRK7L/9GJe/+lb3WkhICAYNGoTU1FTs378fMTExWLp0KT755BMsW7YMfL7xxFWtVmP69OlYvXo1NBpt/QCBQIBevXrB29sbJ06cwI0bN3Djxg1s2bIFLVu2xN69e1Gjhm226xWGeED5auArcEoLjqOxL5TME1IB06Kv623vGUfThsrFPxzITtBuh7QGgppwG4+V7O65G/0P9YeHyAO7e+5GiDutP64M+N7eUL98Cb6vL43Mv6JKTS1oFckwEAQEcBsQsSusUqn9P+ftzXUopcpP5Ituk8otLCwMR48eRf1Cy4MePnyIzp07IzExEQCQm5uLGTNmIC0tDYsXF6yPz3iWCLVGg81nL+NRaobuuL+/P65du6Ybzd+wYQNGjx4NtVqNlStX4vnz59i1a5fReGbNmoWVK1fqHduwYQOGDRsGAEhNTUV4eDgyMrTPi4mJQadOnRAbGws3N+u0QmRZFps3b8asWbPwstCb2x06dMCZM2dMusepU6cQfWknLv55DgkZSchVSMBbxoOnpyfq1KmDN998E4MHD0bLlqa9KXb58mVERUXh1KlTSE5OhkQiQUBAAMLDw9GrVy8MHz4crq6ONbDGsKwDNHJ2IGKxGF5eXsjOzoanJ1XFtrQmX52ARKFtUeMq4uPmV+9zHJGNkmYB+0YDQ3Ybr2pvp8QKMaaengoAWN5xOVWyrySkt28jvm8/1Ny7By6N7L+jginuv95ab7/+ZareTczH1P9zedly3P4nCY3ah8DNy8mKERbY98MVZCRri5ZWCXZFn1mtOInD1lnr79Ld3R15eXk4efIkOnXqZPD6gQMH0Lt3b71jDMMgJiYGzZs3hywvFz9/PBj/PX6K3Zf1a/tMnToVP/74o25fpVLBz88P2dnZumMbNmzAqFGj9K5LS0tDcHAwVKqCfut8Ph+5ublwdnbWHRs8eDB27Nihd+3KlSsxadIk078A5XTnzh2MGzcOZ8+eNXjNlGQ+PT0dEREROHXqlO5YlSpV0L17d8jlchw6dAgSiUT32kcffYT169frff6FyeVyTJgwAZs3bwbLshAKhejZsycCAwPx559/4tGjRwCA0NBQbNmyxejfta0xNaejPvOEVETh98LofTFSRvmJfNFtQioLtViMJ337QZOTA01ODqDRUDs6whk3Lye07hnGWSIPaHvK+4e6wz/UnfrLV4A1/y5DQkKKTe569OgBDw/9+gssy2L7dm1nmYNLFiIorC4uPkkwuLZ5c/2q7AKBAE2a6M8snD9/PtRqtd6xixcv6iXygHaUv2giW716dYNn/vPPP0Y/D3ORSqWYO3cumjVrhrNnzyIoKKjM91CpVOjWrZteIi8SiXDhwgVs3boVO3fuxKFDh/Su2bZtm25WgrH79e7dG5s2bUL+GPTOnTuxd+9e/Pzzz4iNjUWLFtqp+4mJiejevTtOnjxZ5rhtFSXzhJRTtrSgnYqriI/6QdwU47F50ixgRVMg5VUl932jOQ2HEFLg2cRJkN8u6LKgkcmoHR1xaE6uQvSe3gK9p7eg4nc2YNu2bdi5s/iCunw+H3Xr1jU4fv/+/cIn4dnLbINzjCW6RY8lJibizz//1DuWk5NjcJ3QyJIukUhkcEwsFhscM6cvv/wSixYtgp+fH3bs2KG33MBUhw4dwuXLl/WOvf7663pf506dOqFq1ap65+zatQs3b940uN+yZctw/Phx3X79+vXRp08f3b6Li4tenQOZTIahQ4da/GtVWVAyT0g5jY26gvpBHnAT8cEA2Di8danXECN2RACKXEDhWGsPk3KTcDfjLq6mXIVcLcfyjsu5Dom8IvD3h98nn0BgpKoxIcT86P8csZTevXvj7bffLvEcFxcXg2P5Rel6zZyLXIkUGiOzL41NCTd2r3///Vdvv76Rtp7GEnxjyWidOnUMjpkTy7KYPHky7t27h4EDB5brHufOnTM4FhoaanDMlJkHYrEYX3/9td6x/FH4ko4lJydj6dKlJsVr6yiZJ6QCBHwewoM9ER7sCS8Xeoe+XFLuaJcosK+mofVZz208VtL/UH8wDANXoSvisuNovXwlIgwIgP+kiRBSkTdUXbwIyC8mxOOh5r693AZE7BL9nyNcSk9PNzjWuHFjAICzmzv6fDbP6HX5CX9hRafPA8C1a9f09lu0aGHwBkNWVpauEF++oqPUAoEAo0dbdvbiggULsGLFigrV3TLnzINdu3bpuhHkq1atmsF1xt4s2LJlCxyhNBwl84SU05c9w3E5PhOX4zPxZU/T+omSYjAMwLxq3+IAxe/ECjHylHnIUeQgR5EDDWv4CwEhlUHy7Nng8fngeXjApXkzuBgZUSKEEFslFot1xdPyMQyDIUOG6PZ9fX2NVpA3NnJuLJFNS0szOLZ79260adNG79iECRMQHx+P7OxsrF27FqdPn9a95uLigk2bNqFZs2alfk4VYWxmQVmZc+bBsWPHDM6pUqWKwTF3d3eDNwwSEhJw69atUuO1dZTME1JOA9f+BzcRH24iPgau/Y/rcGxXQCNA5A6IrNNqpTKYenqq/rvF9v/GMbFR8vsPoJFIoJFIIL//gOtwCCHErHbt2mVQoG7SpElo2rSpbp/H46F9+/YG18bFxZl0rHB1+3xBQUE4f/489u3bp2vLdvjwYdSqVQve3t4YN24cWJaFt7c3Pv30U9y7dw+RkZFl/vy4EBkZCR8fH71jRWcZyOVyPHz4UO9YaGgoevTooXcsJibG4P7u7u5Gn1u0kCFgOCvCHlEyT0g5JGZKIJapIJapkCdXUy5WEX3Wch2BVYkVYtxOvw321b8aHsMDj0ffiknloUhKwt0WLXG3QUNocnMBtRpgWfo+RxyeOEOK9dP+wfpp/0CcIeU6HFJBeXl5WLBggd6xAQMGYNmyZbp9WV4uds2fg+ZVDKedFy1sl5SUhLt37xqcx+fzDY5pNBqsWbMGM2bM0CWsXl5e6Nu3L0aNGoUGDRoA0E6/37hxI1auXInMzMyyf5Ic8Pf3x+HDh/UK3D1+/BjT241A8q14PH/+HJMmTdJrTVetWjUcPHhQb2aASqXC06dPDe7v5GS8A4Kx448fP67Ip2IT6DdIQsqh+4qzYF5tq1kWtf1dOY3HZkmzgLXttNv+jjF9d+LJiZCoCn6AaVgNdvfczWFEhOiL6/0hUOiXLACARgPnevW4CYiQSiL6m8tGt4nt0Wg0GDp0KBIStC3nGIbBZ599hh07dugl3weXLAQAvFanFrq0bql3jyNHjmD16tWQSCR4+vQpRowYYXQdvZeXl94+y7IYPHgwPvnkE91Ivo+PD65cuYI9e/Zgw4YNiI2NRa9evQBo1/QvXboULVu21MVb2b311lt48OABVqxYgUB3PwDAj/9uQUiTWggODsb69dr6SHXr1sXy5ctx9+5dgyUExVWjN/bmCKCtKVCUsVkR9oaSeULKgUXBzGg+j4Gz0PAbCDFB9EcF22n3iz/PjjzK0l+bx2f4CHGnVl+kEjHyyygAhK75xcqBEFLJsCyUcjWUcu1sFWKblEolhgwZgn379gHQTnk/dOgQFi9ebDBTTqNRIzX+CVLjn2BYl05YunQpvL29da9PmjQJbm5uqFmzJm7fvo1p06YZPM/Pz09vf8uWLYiOjtY71q9fP7314kKhELNmzdI7Jy4uDh9//HG5PmdrE4vFmDdvHr788kuk5GoLDIZ6BaFfky4YNGgQAl4Vu3z48CF++eUXbNmyxWC5g6Tom8qvMAxj8vHc3NyKfBo2gTIQQsooW6qERl3wy66zgIe1ka04jMjG+YcDaXe4jsJq6njXwf2M+5CotT+kontEl3IFIdZlLJUP3b4N/ApUNybEHvgEuSI1IVe3TWxPWloa+vbti7NnzwIABg4ciNWrVxstqgZAv6YNC0yfPh1jxozBqVOncPv2bYjFYnh7e6NFixb43//+h3///Rc//vij3i0Kr78HgA0bNhg8plGjRgbH8ivqF/bnn38iLi4OtWrVKuUz5U5GRgbatWunt+SgRXAj/DZoKWrNaAtRVXe8ePECb731FuLi4nD//n1MmjQJhw8fxqFDh3SF7Fxdjf8fK65CvbHjxgoX2htK5gkpo7FRVyBTs+DztO8A8nkMtaUrr4ht2tH5oKbabQewuN1i9D/UHx58D+zuuZtG5UmlokhKAl61AeK5uQE8HupfvsRxVIRUDjw+D0Invm6b2JZTp04hMjISycnJqFq1KtasWaObyl7Yhg0b4OHhgYiICPD4fATUDNN73d3dHb169TJ6bUpKisGxVq30B3xiY2MNzik82p+v6PT8fNevX6/Uyfz06dMNagfM+OFz1B/8vm4/KCgII0eOxBdffKE7duLECfzwww/4/PPPAaDY9nhFR/DzGWsLWNzX0J7QdyJCysFVWPBfp16g8aqaxASybOBFrPZDZnxtlD25//I+uuztghxFDqp7VMcX574o/SJCrCiu1we6bU1eHmrt/53DaAipPOQSJdKTcgtNsadp9rZCoVDg008/xbvvvovk5GQMGzYMd+7cMZqMA8Do0aPx6aefAgB6zZyrO154uzgPHuh3/fDy8kKXLl30jhlLOsvSD12hUJh8rrVJJBLs2LHD4Lj/P0qoMmV6x4zNPFi7tqAoskAgQI0aNQzOkcvlRp9t7Hjt2rVLjdnWUTJPSBn90P813XbTal7YOLw1h9FYXmJiIqKjozFlyhS8/fbbqF69Otzc3CAQCODp6YmwsDD06tULK1euNNpLtSS35rdB2zUpYOYkgvGtAYZhwDAM4uPjS702/9yyfli6R2tJIg5H6LZvZ9zmLA5CjFEkJUHzalQ+nyiEZo4QAgBHfoqFSlGwCIVG5m1DbGwsXn/9dXz//fcICQnBsWPHsGXLFqMj4SWRK5T4ZuEi3Tr74pw8eVJvPzIy0qB3u7EE1Vil+uKq1xu7vrK4e/culEqlwXEPvitSVlzVO2bs7yAhIUHv827RooXBOcWtgzd23Nj19oam2RNSRrN230CjkIJpO/Y+xb5hw4bIe/ULvpOTEzp37oxatWrh2bNnOHHiBOLi4hAXF4dDhw5h7ty5mD9/PqZPn17iPaVSKebPn4+lq1KgMl5ry+6IFWKoWf2pYcs7LucmGEKKUIvFePze+/oHHWCtISGmykjK1Y6esoBSoUbXcU24DomU4sWLF3j99dd1I9mJiYno2rWrydfL8nKxYeIoAIDIzx8LlyxFQEAAunXrBmdnZ4Pzr1+/rluLD2inkn/99dcG5/Xr1w8LFy7UO1a0DzsA3Lp1y+BYtWrV0Lp15R1EMjbrANDOPGAVaoNjxhSeedClSxf8/rv+DLGMjAyDa/Ly8gxmLISGhhqtRWBv6G1FQohJwsLCcOPGDV0rlv379yM2NhahoaG6c3JzczFjxgzMmTOn2PscO3YMjRo1wnfffQc/d5E1Qq8Upp6eCld+QTGXcN9weIqooBipHJ5NnKTtJ19I7YMHOIqGkMonP5EHAIGQBydX+34j3x7IZLIKTUnf//3XUMikUMikyHiWCABITU3FsGHDDCqtP3r0CAMGDNAlqJ6enti1axd8fHwM7vvZZ5+hbt26esf27t2Le/fu6fYVCgW+//57vXP4fD5+/vlng4r7lUlxswayZTkQhnjoHTM288DV1RX+/v66/YiICINCeM+ePTO4LjEx0eDYsGHDKvXXylzs/zM04vDhw+jfvz/CwsLg4uKCoKAgvPXWW/jxxx/x8uVLiz03JiYGkydPRsuWLeHr6wuhUAgvLy80bNgQw4cPxx9//FGmNTOEG4Ur1ztSFfv169ejfn39XvB169bFqlWrDM797rvvcO3aNYPjt27dQrdu3ZCQkICJnWvg3jR/g3MsjcvKpvWr1IeHyAMeIg+sf389Z3EQUpgiKQmSK1f0jjk3bUpT7Al5RZwhhVKunUbG8AAU0xqL2Je0hHjtiLJGA6W8YL33rl27EBYWhsGDB2PSpEno1q0bGjVqhIcPHwIAWrZsib///hvt2rUzel93d3ecPXtWby29WCxGmzZt0L9/f3z88cd47bXXcOTIEd3rwcHB+P3339GzZ08LfbbmERQUhLZt2xocf8S+gP9I/TXyxmYefPDBB3oJuJeXF/7v//5P75yrV68Wvczgd86goCDMmDGjTLHbKodK5tPT09G9e3f07NkTe/bsgUgkQo8ePdCgQQNcunQJ06dPR+PGjQ3Wu1SUVCrFsGHD0KpVK6xatQq3bt1CkyZNMGDAALz99tt48eIFtm7divfffx/vvPMOXrx4YdbnE/PychFi55g3sXPMm3Y/xT5fSEgIOnXqZPS1Hj16wMND/91WlmWxfft2g3NVKhVat26NS5cuYdVHjeHlXLFvQTVq1ND+oC3lY926dbprRo8eXaFnltfyjssh4AnQwLcBjvc9TqPypNKI+6C3QW/56hvozSZC8kV/cxl4lb+zGsAvhJagOAT2VX0eHg+e7h7Yt28f5s6diy5dusDb2xt//PEH1qxZg0uXLqFOnToYPXo0Dh48iMuXL5danycwMBDHjh3DhQsXMH36dLRt2xYuLi44fPgwtmzZgufPnyM8PBwDBw7Er7/+iocPH1b6RD7fqlWrDEbTt97aj1xlwWyG58+fY9OmTXrnVKlSBd99953B/WbOnIl3331Xt3///n292gUymUyvHaCTkxO2bt1a5roItophHWQoWCKRoH379oiJiQGfz8e6deswcuRI3esPHz5Ejx498ODBA4hEIvz111/FvqNWFizLonv37jh27BgA4K233sLOnTv1pibn5eVh1qxZ+OWXXwAA4eHhuHjxItzdy14lXSwWw8vLC9nZ2cW2dCAVky1VYmyUdhRrbWQru0/o9+/fDz8/P7z99tvFntOyZUuDd0p79OiBQ4cO6R2Ty+UQCoXad12lWcBvA8F8fMLgfnFxcahZs2aJcTEMgxo1apRaLE+tVqN+/fp4/PgxqlevjkePHul6mBLi6KT37yP+g976B/l8NLxtOGJCiKNaP+0fsCyrK4A3asnbNM3eAez8cjbSE58CAPxCa2Dg19+XcoXty8zM1FvPf/v2bRw/flzvnGrVqiEioqCg7+uvv663DwAXL17E8OHD9ZYOVA+tjo6dOkKpVOKvv/5Camqq7rWWLVti+/btBjNA88lkMowbNw5bt24FAAiFQvTq1QuBgYH466+/dF0EqlWrhs2bN+Odd94p51eg8jA1p3OYZP7jjz/Gxo0bAQCLFi0yuqb3yZMnaNSoEWQyGfz8/PDw4cMKv6sTHR2NgQMHAtC+4/Tw4UOj62dYlsU777yDU6dOAQDmzp2Lb775pszPo2Te8gauu6C3v3PMmxxFUnm8/fbbOHfunN6xbt266U0RMyDNAqI/AjPC8BxTkvnGjRsjJCQEJ04YvhlQ2G+//YYhQ4YAAFavXo1PPvmkxPMtRawQY+rpqQC0o/Q0Mk+4phaL8aB1G4PjNQ/sh0sxv1AR4ojEGVLt6DyAiP97HZ5VXEq5gtgDWV4uDi7RJra9Zs6Fs5v9tyKOj48vcw/7YcOGYcuWLQbHFTkybBu9An/cP4vb6Y+QlPkCuQrt6Lynpydq1aqFli1b4sMPP8R7770HxoTlK5cuXcKvv/6K06dPIzk5GRKJBP7+/ggPD0evXr0wYsQITpdTmhMl84XcvHkTzZo1g0ajQWBgIBISEiASGS+8NXXqVKxYsQIAMHv2bKPTPcriww8/xP79+wEAEydONLq+ON/evXvRr18/AED16tXx9OnTMj+PknnLo2TeUIMGDXD//n29Y6X+/9nSAwDKncybgmVZvPbaa7h58yYCAwMRHx9vtAKtpYkVYry35z3kKfPAZ/ho5NcI27sZLkMgxJrihwyBNEZ/Ro1Lixao+Rv92ySEEFIxaetioUguaBcnCnaH/5imHEZkW0zN6RxizfyyZcugebUeMCIiothEHgCGDh2q2/7pp58glUor9Oz8YhiAdvp8SQq/npCQALFYXKFnE/PLliqhUmtwJ1kMlYZ1qAJ4xRGLxXj06JHeMYZhdKPhRkmzgOc3gITzFo3t0KFDunYv06dP5ySRB7SV7POU2vZ+alaN2+nUY55wT/7gIVBoXaNz48YIXfMLhxERQoih3MyXOL97O3IzLVek2hhZXi52zZ+DXfPnQJZnvLe5LeDq6wcAwsCCUfIqkSXnQaR87D6ZVyqVOHCgoL1O586dSzy/efPmuqn1eXl5urXu5VWRiQ/5vb1J5TE26goEfB7Cgz0h4DF2v17eFLt27YK6SEurSZMmoWnTEt59jf7IwlFpLV68GADg4+OD8ePHW+WZxeEV+nbrIqApmoR7orBagFwO8Plwfq0pau3ZDT7N6CKEVDJ5mS9xYc8O5FkxGc3vMZ8a/wQajVo33d4WcfH1A7TJO8NnIAp2R9VPW4PnIrDq8x2F3Sfzly5d0utj2LJlyxLPZxhG75yiRR/KqnBCc+fOnRLPLfy6i4uLXp9FUjmoNCzuJIt1I/OOLi8vDwsWLNA7NmDAACxbtqzkC1UyQC4GNOqSz6uAU6dO4b///gOgfXOhaMV9a1recTka+TUCn+HDXeiOPb32cBYLIYSQspFLlNi/7Cr2L7sKuUTJdTjECgon7+kJZV/2aqvUYgWy/3wKtVhR4XvxXATwH9MU/mOaWi2RN2f8tsLuk/n8KbaAtlVBiAl9cwsXfih8fXl88sknuoIOO3bs0HtjoTCWZfHzzz/r9nv06AGBgN7BqnQKz7Sw/3ITJdJoNBg6dCgSEhIAaN8I++yzz7Bjxw7w+fySL067/6pZr+UsWrQIgLav/JQpUyz6rNLkKHIQL46Hq9AVe3rtQYg79e8m3FM8iQPP1RU8V1consRxHQ4hldaxNTeNbhP75hdaQ7fda+ZcDiOxHnWOAjknE6DOsc1k2NbjLw+7T+YLj3YHBwebdE3hhL+00fTSvP322/juu+/AMAwyMjLQs2dPPHv2TO8ciUSCTz75RFfJ3svLSzc9mFQu+VPsw4M9IeDb/X+fYimVSgwZMkTX5zMoKAiHDh3C4sWLtW3nTMEwAK+UpL+cLl26hJMnTwIAxo0bB19fX4s8x1T9D/U3uk0IV9RiMViWhUYiAVgWTnXrch0SIYRUGr1mzgWPz0dAzTB8vHqjQ1SyNzeNVIW0dbFIWxcLjVTFdTh2y+6HftPS0nTbpraZK3yeWCyGUqmsUF/qWbNmoU2bNli0aBFOnjyJ2rVr480330S1atWQlZWFc+fOISsrCwDQsGFD/Pbbb6hdu3a5n+eIkpOT8ffff+PKlSu4c+cOkpOT8fLlS7x8+RJKpRIeHh6oUaMGWrRogb59+6JLly7FtsAYPny4ro9laaLHAo0aNcKtW6b1ZE5ISMDEiRMN+q+fPn0a//vf/0y6B9fS0tLQt29fnD17FgAwcOBArF69GlWqVDHtBtIs7awGjRpgLJPM5/dIdXJywowZMyzyDEJsWcLoMWAlEkCjAViWCt8RUoKu45roRuS7jmvCcTSO5+jqJQCAvYvngW+lWassy0KWoy1EvWXGBJPaplVWapU2kT66eglGLFtjtedmRN3R26ZK9pZh98l8Tk6ObtvJycmka4pWvM7JyanwyF6TJk3QvXt3qFQqnDp1Cn///bfe682bN8dnn32Gvn37lj5FuRC5XA65XK7bd9QK+JMnT8bevXsBAAKBAJ07d0bbtm2RmpqKs2fPIjU1FS9fvsS1a9ewceNGvP7669i1a5dZ2p+ZQqVSYfny5fjqq69surDhqVOnEBkZieTkZFStWhVr1qxBr169DM7bsGEDPDw8EBERYXiTHRGASqIdlRe6ATC+9KS8bt26pXuzZMSIEahatapZ718eu3vu1o3I7+65m+NoCAFkt25pZ8fw+dDIZFT4jpBiyCVKvUTeyZUK31qb9NXv8lJxNifPt3bhOEuRFsqJivNy5z0AQPqmm2AqOANVnadfX+L5oosVup8pWLW2e9nLnfcQNMMxOk7ZfTJfuLVcSS3pCit6nkQiqVAyv3PnTkyYMAGZmZmoUqUKli1bht69eyM4OBjZ2dk4ffo05s+fj48//hgnT57EV199ZXICsnjxYsyfP7/csdmbwMBAnDp1Sq/NX05ODoYNG4bff/9dd+zy5ct47733cO3aNbi5uRm7ldlcvHgRY8eOxY0bN+Dp6QkXF5cKtzy0NoVCgS+++AJLliyBRqPBsGHDsHz58mJnu4wePRo1atQwnsyn3tUm8co87YeZLV68GCzLQiAQYPbs2Wa/f3mEuIfg/CDLtuEjxFRqsfjV7BjtLz2Mhb8HEmLLDq26gdSnObrtfp86RoJQmbh4eEAqzoaLp5fVRuaLvnHg4ullledaglql0n79TCgErHlV4FGTZ/5p8dYsSqdxoEKVdp/Mu7gUtIBSKEz7R1T0PNdCfXjLavv27YiMjATLsvD19cX58+dRr1493ev+/v4YMGAAunXrhnfeeQfr1q3Dvn37cPjwYbRp06bU+8+ZMwfTp0/X7YvFYoSGhpY7Xlv3448/6iXyAODh4YENGzbg6NGjerMYHj58iF27dmHEiBEWi0csFuOtt96CRqPBoEGDsGzZMrzxxht4+tR2KqPGxsYiMjISsbGxCA0Nxbp169ClS5fy39A/HEi7A4jctds4YbZYnzx5gujoaADAoEGD9IpZEkK0nk2cBLi4AK9mCTnRsi5CipWfyBfdJtbTbeJMbJszFX3nzEdgWB2rPDM7LQVRsycDACK/Xwkv/0CrPNcSUp48wrY5U9Ft4sxSz+W5CqHJU4HnJqjwyDwXWLVGG78DzaCx+2S+cDuqwolcSWQyWbH3KIvMzEyMHz9e12t+4cKFeol8Ye7u7ti0aRMaN26M9PR09OrVC7du3Sq1PZ2Tk5PJywfsWa1atdC8eXN0797d6Ou+vr6oV6+eQXeC69evF3vP/L+3bKkSr83/Q+81T2cBYr96v9S4NBoN6tSpg59//hmdO3cu9fzK5sWLF3j99dd1b3AlJiaia9euFbvp4J0FfeYjtgEf+1QwygLfffcd1Go1GIbBnDlzzHbfihArxJh6eioAbYs6TxFNZybc4/F4wKufbbwiS8sIIQUEIh5UCo1umziGEz8vR0DNMN32gHmOUZjad2ADpK66Br+RTSAKqVjRP41UpVs3XyUy3Crt6RRJuUhddQ2+AxtY/FmVhd1/VyqcDOcXmStNdnbB1BpPT89yF7/bvn27bs2+QCDARx99VOL54eHhaN26NQAgNTW19F7dROeHH37A1atX4VnCuk9jMzNMWXoxNuqKwbEjU9qZFJeHhwdu3rxpk4k8oH1jy9QZLSZz8QaGH9Z+uHib7bbJycm6woUffvghGjZsaLZ7V8TEkxNx7+U93Ht5DxNPTuQ6HEJQdfEiXSV7UVgYqq1exXVIhFRaA79oDaETH0InPgZ+0ZrrcAixGUUL4BHLsPuR+cJTrpOTk026Jikpyej1ZXX+fMEa2Xr16sHdvfR3uJo3b46LF7UFIvJbfZGKe/HiBR4/fmxw/L333jPp+sYhHriVpH1j5uiUtxHqY9rSCz6fX6aCho7gl19+Mfp3UdiiRYv03phZsmRJqfddunSpbvbN559/XrEgzehR1iOj24Rw5fmcz+FS6GcbFb8jpHieVVww+sf2XIfh0Nx8fPFmv0Fw87Fem9leM+fi4JKFum1bxsXXj1iP3SfzTZoUtBCRy+VISkrS6yNvzJMnT4xeX1bp6em6bR8f06YSFy60FxcXV+5nkwLp6ekYNmwYVCr9Yh5Dhw7Fu+++W+x1GRkZWLNmDR4fOIgbN29DrZTD19cHI8+E4e2338aoUaMqzeivLYnesR1/nz1X4jnr16/X2y8tmX/58iXWrVsHQPsGTcuWLSsWpBnV8a6jS+LreFtnrR8hJdHI5dpq9gCcGzfmOBpCCCmZu48v3uo/xKrPdHZzt5up9WX5+vE9RPDoXB18D9OKhhdHI1WBVbNQpuRBGOQGv2GNKnQ/U5krflti98l869at4ePjg8xMbfurmJiYEpN5lmURExOj269Ioa/CVdKLrsMvTuHzeDy7XwVhMWfPnkV0dDSePn2Kv//+W69FYaNGjTB16lSMGjWqxHvUq1cPHh4e6Ny5M15v1RLnz5/HjRs3kJ6WhosXL2Lp0qUYM2YMfvzxxwoVSazMatasqasdYC5nRnoDIwvVNhh+uML39PX11fs7rkxWd16tt2aeEC6pxWLIbt7UVrLn8yEvZZYMIYQQx8H3FMHr3RoVvk/61ttQphR0LLLGennAfPHbErtP5oVCIT744ANs2bIFAHDy5EmjfbHzXbt2Tbe23s3NrULFvgr3MH/8+DFYlgXDMCVe8/DhQ912aTMISPFiYmLw008/GRyvWbMmunbtijfffLPUv4uPP/4YCxcuhKBQG5Rvv/1Wr7DaunXr8PjxYxw9etTk1ocOTZoFPL+hbUkndAMCy7+MxVZ4ijyx6f1NXIdBCKT37yP+g94FB0z4mUSIo6M+84SUnfJFntFtYn4OMfQ7bdo03Sh3dHR0iQW9fv31V932hAkT9FrbldX77xdUO8/KysK///5b4vl5eXn4+++/dfu2WjStMpg6dSpUKhWePXuGdevWoWrVqgCA+Ph4LFmyBE2bNsXUqVOhVqsNrp08eTJOnDiB7777DgKBANlSJQauu4CB6y5g/JQZaNu2rd75J0+exPfff2+Vz8vm7YgAFLmARg0o8gDW/hMJsUKMkSdGYuSJkRArxFyHQxxYfJ+++gc0GtTa/zs3wRBiI/IT+aLbxL7J8nKxa/4c7Jo/B7K8XK7DsTnCQDej28T8HCKZb9q0qa6XeEpKSrFV4p88eYK1a9cCAPz8/IptbaVUKhEZGQkPDw80b94csbGxRs9777330LRpU93+//3f/0Gj0RQb56JFiyAWa3/ZFwgEmDiRKl9XBJ/PR0hICEaPHo2LFy8iICBA95pGo8GKFSuMFkpr0aKFXmG8UVsv406yGHeSxRi19bLRrgTLli0z+saATcp5AZxerP3T3FLuAGyh/wN8jicHWfJzfSV/in3RbUK4xri7Q0QzwAghRI8sLxcbJo5CavwTaDRqXSE8Yjq/4Y0gCnaHKNgdfsOts17eUTlEMg8AK1euRIsWLQAAX3zxBTZv3qz3+sOHD9G1a1fIZDKIRCLs27ev2KJ1UVFR2LZtG3Jzc3H9+vVik24ej4eoqCh4eXkBAP755x/0798fqampeucpFAp8/fXXepXrv/32WyquZkahoaGYO9ewGuny5cuRkpJS7HWX4jNwJT4TYpkKuXIV7r/IwWuvvWZwXmZmJq5cMWxhZ5NyXgB/f2v+BDfzKSAvMjIdsc28zygrS32uryTlJuFqylVcfnEZdzLuQKVRlX4RIRagFoshqltXt8+4uiLswH7uAiLEBsglSmjUGqQl5kKjZtF1XPmLIhPbUTh5T094ymEktovnIoD/mKbwH9PUauvlHZXDJPOurq44fvw4unbtCpVKhZEjR6Jhw4YYMGAAOnbsiPDwcDx48ABVq1bFkSNH0K6daX3EAZS45rBp06b4999/dQngvn37UL16dXTs2BFDhgxBjx49ULVqVcybNw8sy8LT0xMbN27EjBkzKvw5E309evQwOKZQKHDq1Cmj5ydmSjBgzX+6fQ0LgGVRpUoVo+c/e/bMLHHarbXtAKbQt5xqrc3aZ74y6newH9SsdsZGnpLWjBHuJIwcBcW9ewAAnpsbXBo3plF5QkpxbM1N8Pg8+Ie6g8dnaL28A/ELLSiiZuut6bigkaqQti4WaetioZHSQIYlOUwyDwD+/v44evQoDh48iD59+kAmk+HgwYO4ffs2WrVqhSVLluDWrVt45513SrxPZGQkhgwZAjc3NzRr1gyrVq0q8fzGjRvj6tWrOHLkCEaMGIE6derg+vXriI6Oxt9//w1PT0988MEHWL16NZ4+fYqRI0ea89Mmr1SrVs3o8YSEBKPHu604a3CsfpBHsdXdzV313e4U/vowfGDwTu5isRKpSqq3L+DRu9OEG/mt6ABAk0dvLBFiCo2aRVpirm5knjiGXjPngsfnI6BmGD5evRHObu5ch2RzMqLuGN0m5ueQv1n27NkTPXv2LPf1QqEQ27aVbXowj8dDt27d0K1bt3I/lxiXmpqKkSNHonv37hg/fnyx5xVX+LBwFfo//vgDf/zxBz6ZOhM5Mv13EhsFe2Lj8Na4fe2y0fvYTfeBfaO1f27rC/DNWKFfKS1YL8/wgJ/fNN+9y0v96t/EvtHARON/r+UlVojhxHOCRC0BALgKXKk1HeGEWmxYeLHa6pLfhCaEaJN5pVyt2yaEkMrGIZN5Yl8kEgmOHDmC7OzsEpP5mzeNV6Ft0KCBbvv8+fNYunQprvLrg2GCkf+ju1moJ/Z/ol16cePGDYN7eHp6olWrVuX/JCoTyctXf6Zb7hkaJZCTbLn7l1X+52xGU09PBY/HA1/DBwDU960PT5Gn2Z9DSGmeTZwEp0aNIL99GwBQ88B+8D3p3yIhpcks1FIrk9prOYzCa+YPLlmIAfMWl3A2MaZKZLhuRL5KpP23IeYSJfPEbly8eBG3bt1C48aNjb7+yy+/GBwLDg42uqzi9qWzCGg/GBKldiR568g3dK8Zm5UxceJECIV2spbO1VebyLv6mW9kPuc5gEKjGnyR9v5cUytefa6+Frk9wzBwFboCoCn2hFt8Nze4tm4NAHCpX5/jaAip/OQSJRRyNcBqJ5PRuDwhpDKi3y6J3chvGXj06FFdX3lA24Zu6dKliIqK0jtfJBJh8+bNRpPwjAt7gIC6cKnVAk2qecLLRQiWZbFw4UKcP39e79w2bdoYrZRvs/qsB9Z1AD7aCwQ3M889F4dq+8uzGu16+ZkPK0fxu+Tr2s+1z3qz33p5x+WYeHIiHmU9Qh3vOjTFnnAmYO7nuh7zNfft5TgaQmzDkZ/02w77hVCvbEfRa+Zc3eg8Fb8rn6Jr5v3HNC3hbFIRlMwTm8fjFdRxvH79OmrXro33338f1atXh1gsxt9//424uDi9a+rXr4+1a9eiQ4cOesf5fO2UaLVcitRdX8IppAHEATUw7lZN/PPPP7h7967e+YMHD8bPP/8MV1fXYuOLjo7G5csF67EzMzMNzvnll19w+PBh3f7cuXOLbY1ok6qEaRNnAKjapHIk8hbmKfLEr11/5ToM4uDUYrEukee5uCDho0jUv3yJ46gIqfwyknL19rt/YtiWltgnZzd3mlpfARqpCopk7f8fYZAbGF7xXb9IxVEyT2xe9erVkZycjDNnzuDy5cuIjY1FbGws/vrrL0gkEgiFQvj7+yMsLAzNmjVDjx490KVLFwgEhv/8/+///g9nZdUQ8985yJ8/hPLlM4jvXcDm26fBsiz8/PxQu3ZttG3bFkOHDjXac76oY8eOYevWrSWes2vXLr39iRMn2lcy/zIOcPYq2CaEWMWziZN02xqpFLwS3ngkhGiJM6RQyLSF7xgeIBDxqS2dA5Hl5eqNzFM1+7JJ33pb2+FJqYHyRR6qftqa65DsGiXzxC5UrVoVgwYNwqBBgyp0H4Zh4BlaH1X5QchVaMDnMTgz638I9Sn/L8BbtmzBli1bKhSXTZNmadvSKfMAoRsQ0IjriKxCrBBj6umpALRT7qn4HeGKc6NGkL0qfldr/+8cR0NI5Rf9zWWAAcBqV4fRFHvHQgXwKkb5Ig8MwwAi7WxXngulm5bkUH3mCTGFSq2BVMWCz2PQJMSzQom8TfIIAjp8pv3THKI/0ibwIneAYSpXf3lzf66F5CfyRbcJsRa1WAyVWAxZbCygViP0160Q2UsLTUIsTOjEB8NjwPAYmmJPSBkIA92MbhPLoGSekCKepOXBTcSHm4iPJ2kO2IrGIwjoOMe8CS5fAAQ11X5UpvXylvhcCakkEsePh+LevYL9ocM4jIYQ2xHxf6+DYRgInfj4aMEbNMXewRQuekcF8MrOb3gjiILdIQp2h99wx5iNySVK5olVpYpl+PHPB0gVy7gOpVhh/m7IU6iRp1AjzN9+3lHk7Gsfsc34tp0rXL2eKtkTLsjvP+A6BEJskpOLAP6h7vAPdYcTTRF2OPkF8AbMW0zr5cuB5yKA/5im8B/TlKbYWwEl88SqUnPkWHHyIVJz5FyHYlRipgSxz7Kh1rBwEfIhsKMKnJx97V28geGHtR+VaVSeEDtXtC82taUjxDTH1tw0uk0IKZ0qU4akr84j6avzUGVW3sE7e0HJPCGFdF9xFgzDgM9jIFWqIeDTfxFSdvdf3kfbHW1x+cVlSJQSWjNPOOFcrx54Hh7geXjApUULuNSvz3VIhBBC7FzKiqtGt4llUKZCSBGuwoL/FmsjW3EYiZ2QZgFbemg/pFlcR2MVEYcjdNu3M25zGAlxVNL79yG9dg2anBxAo0HwD99zHRIhNkGcIUXKUzGSHmQhNSEHnYY15DokYkXZaSlYPSICq0dEIDsthetwbI5GqgKrUINVqLkOxWEwLMsWnYlHbJhYLIaXlxeys7Ph6Vn5WmG9s/RvPErLRRU3EYSVcNRbrWGRnqudhu7n7gS+HU2zV6o1yMhToI6/O/6a0cF6D97SQ39/+GHrPZsjzX5tBjVb8IPs3KBz1JqOWI1aLMaD1m30jrm2bo0av27lKCJCbMf6af9AKS/4/h1cxwu9p7fgMCJiTatGDIBSpp0aLnR2xqTNuziOyLakrYuFRqmBMikHABA063UIfJw5jso2mZrTUVUCYlWZEgUAICNPwXEkpUvLrZzr+isq/++AWE59n/q48/IOACDcN5wSeWJViePHcx0CIYTYpPxEvug2MQ2rYaFKk4AR8SEMcqNE3goomSdW5eMqQkaeotKOzGtYFlmvkl1vVxF4jP2NzPu4iqz74A9+Ata2026PPWvdZ3NArBAjMTcRHiIP1PGuAwGPvs0S6ypaxd65cWNUW72Ko2gIsS0R//c6di64BJVCg4AaHug6rgnXIRErCqgZhpQnjwAAgWF1OI7GBrHFbBOLod8yiVUtH9gMPVb9i60jW6NxiBfX4ejJlirR7rtTcBbyUS/IAwIeg51j3uQ6LLO5lZSNHqv+xfKBzaz74AOfaPvL52/b+TT7qaenoo53HTzKeoRHWY9wvO9xrkMiDoYFAD4fAMC4uKDWnt2cxkOILfGs4oIxy624FI1UKgKhCE6ubrptUjYMn4EomNr5WVPlGxolhCOjtl7W9Ze//yKH63Dsw4ubQPxZ7Ycil+torEbAE6CBbwM08G1AU+yJ1TnXqweeqyt4rq5wrleP63AIIcRm8Ph8BNQMQ0DNMPBevSlKTOfTvx4UyblQJOfCpz/9/LEGSuYJeeXBixxdJXuJQk2V7M1hbaHRjeRrQMQ27mKxkgVtF+Dey3u49/IeFrRdwHU4xMGoxWKwKhU0EgnAslTFnhBCTCTLy4VGrUZq/BNoNGr0mjmX65BsTubuBxAFu0MU7I7M3Q9Kv4BUGCXzhLwS5u8GiVIDAGgS4gkvFyHHEdkBtkhrEhdvTsKwpi/OfaEblf/i3Bdch0McTOL48ZDdLmiH+HzO5xxGQ4jtkUuU2L/sKvYvuwq5RMl1OMSKDi5ZqBuZ5/H4cHaj6eKk8qNknlhVgIcTpnSuiwAPJ65DMSDgMXAT8eEm4kNgRy3p8ln9ay/NAoSFfhBWbW6d5xLiwOQPHuq2NVIph5EQYpuO/BSLtMRcpCXm4shPsVyHQ4hNqRIZbnSbWA4VwCNWFeDpjGnvVs41NAI+D+HB9ru+2epf++iPgKAmQJq2RRuG7rfeszm0oO0C9D/UHwCwuycVHiPWJQqrBdnNW4BGA56bG1WxJ6SMMpLzjG4T+/f+hKmImj0ZABD5/UqOo7FNPBcB/Mc05ToMh0Ij84S88kP/13AnWYw7yWL80P81rsOxfSoZkHRJW/jOr55DTLEHgDln5xjdJsRqGAbg8yGqUxt8T/t9g5IQc5NLlGBZFkq5GmBZVAl25TokYkUnfl6uK3534uflXIdDiEkomSfklVm7byA82BPhwZ6YtfsG1+HYvrT7xrft3KOsR0a3CbE0tVgM2S3tenmeiwsUT+I4jogQ23JszU34hbhB6MQHGAbdP6E39gkhlRsl84QQ85NmAYo8QKN+1fTacdTxrmN0mxBLSxw/HmBZQK2GRiqFU926XIdEiM3h8XnwD3WHf6g7nFypEK4jKVy9nirZl51GqkLaulikrYuFRqriOhyHQck8IQASMyW4nZSNy/GZkKs01JauonZEAKy2MwBYNeBXn9t4rGh159W6avarO6/mOhziQOT39dsAha75haNICLFNXcc1MbpNHIOzmzsGzFuMAfMWUyX7ckjbeBPyJ9mQP8lG2sabXIfjMKgAHiEAuq04q2tL9yg1l9rSVVTKHeiG5Bk+IHTmNBxr8hR5YtP7m7gOgzgY6f370OTmand4PDAuLrRenpAycnIVovf0FlyHQYhNUj7LNbpNLIuSeUIASBRqo9uknNgic+sjtnETByEOQHr/PuI/6F1wQKOBc73K2TWEkMpKLlHi0MrrSE3IhVDEQ8QXreFZxYXrsIgVZael6FWz9/IP5Dgi22Ewrd7+OjxXWjTNnhAATUI8jG6TcmIYgMfXfojcHKaSPSHWphaL9RP5V2iKPSFlc2zNTaQmaEcTlQoNor+5zHFExNryE/mi26R0GVF3IAgpWJoQMLk5h9E4FodM5g8fPoz+/fsjLCwMLi4uCAoKwltvvYUff/wRL1++NNtzzpw5A4Zhyv3xv//9z2yxkOLdeZ6NG4liqDUsnIU8rBrckuuQbJs0q2Bb6AYENOIsFGsTK8QYeWIkRp4YCbFCzHU4xAE8mzjJ4JhTo0Y0xZ6QMtKoWbAa7YfDVW4lpIJYDQt1hhSMMx+imp4QVaWaA9biUMl8eno6unfvjp49e2LPnj0QiUTo0aMHGjRogEuXLmH69Olo3LgxTp48yXWoAAA+n891CA6h56pzuh/beXI1taWrqOiPtAm8yF07Qj94J9cRWc3U01ONbhNiKRq5XG+fcXVFjc1Us4GQsmMhcCr4tTji/17nMBbChcjvVxrdJiZgi9kmFucwa+YlEgm6dOmCmJgY8Pl8rFu3DiNHjtS9/vDhQ/To0QMPHjxAt27d8Ndff6Fdu3ZmebanpyeqVq1q0rkKhQJxcdrewN27dzfL8wmxOr4ACGqq3XagKfYqjUrXW57a0hFrkD96VLDD46HumdM0Kk9IOfD4PATWKPi/Q+vlHY+XfyAmbo7mOgybxPAZiIJpNJ4LDpPMT548GTExMQCABQsW6CXyAFC3bl0cO3YMjRo1gkwmQ58+ffDw4UN4e3tX+NkffvghtmzZYtK569evx5gxY+Ds7Izhw4dX+NmkZImZEjjxWEhedVFrFOxJbekq6oOfgLWv3ggbe5bbWKxMoVYgR5Gj2ybEktRiMdi8PO3Oq5lclMgTUj5vD6iLXYuuAAAGfE6/BzgaWV4uDi5ZCEDbY55a05WNT/96SFlxFQAQOIU6QliT2afZJyQkICkpydy3rZCbN29i8+bNAIDAwEDMmDHD6HlhYWEYO3YsAO2U/MWLF1stxny//KItWhQREQFfX1+rP9/RdF9xFnINAz5P++HhLKC2dBV14BPtqHxQU+22A7n38h74DB98ho97L+9xHQ6xc4njx2uXsgCAWg3nRo5Tn4IQc/t96TUInfgQOvHx+9JrXIdDrGz/918jNf4JUuOfYP/3X3Mdjs3J3P0AomB3iILdkbn7AdfhOBSzJ/M1a9ZE69atzX3bClm2bBk0Gu3Qa0REBEQiUbHnDh06VLf9008/QSqVlvu5IpEIgYGB8PLyMun8ixcv4to17Q+Q8ePHl/u5hBDrEyvEYFkWalbb2tBFQFM0iWXJHzwE4+amHZXn81F9w3quQyKEEJuUlhAPhUwKhUyKtIR4rsMhxGQWKYDHFu0xzSGlUokDBw7o9jt37lzi+c2bN9dNrc/Ly8OxY8fK/ey33noLL168wIoVK0w6P39Uvnnz5mjTpk25n0tMd2RKO7gKtf8Nmlbzoin25lC4p7wD9ZefeHKiXgK/p9ceDqMhjkAUVgvsqzecnRtTBtCyiv9WQ4UUhUUUhVYDUvF7xwRFXCrkCqR4Ua3ieVZZM18eno6xowZg/Hjx6N5c277DF66dAmZmZm6/ZYtS247xjAMWrZsqatof/z4cfTp08eiMQJAZmYmdu3aBYBG5a0p1McVN+d34ToM++LiDQw/zHUUVvco6xF4PB48RB4AgBD3EI4jIvZMLRZD/vgJAIDn4gKG7zAlcAgxu12LroDhaZesqJQaKn7ngPyr10R64lMAgF9oDY6jsT08FwH8xzTlOgyHZJGRebVajY0bN6JVq1Z488038euvv0JepH2Otdy8eVO37eTkhJCQ0n/BrlWrltHrLWnLli2QSqXw9PTE4MGDrfJMAmRLlRi47gIGrruAbKmS63CIDStcvZ4q2RNLezZxEhiGAc/VFWAYMAJK5gkhpDxkebm6bb/qNdD70y85jIaQsrFIMu/u7o7u3buDYRhcvHgRI0aMQEhICGbNmoVHhdvoWMGdO3d028HBwSZdUzjhL3y9pbAsizVr1gDQrtl3c3Oz+DOJ1tioK0a3SQVIs4AtPbQf0iyuo7Ga1Z1Xo4FvAzTwbYDVnVdzHQ5xAE516+q2q61exWEkhNi2wtXrqZK94zm6cgl4fD4CaoaBx+NTJXtiUyySzLu5ueHgwYN48uQJPv30U/j7++Ply5dYtmwZGjRogPfffx8HDhzQFaWzpLS0NN22qW3mCp8nFouhVFp2xPbUqVN48EBb+XHcuHEWfRYpkC1V4k6yGHeSxVBpaIGU2UR/ZHzbznmKPLHp/U3Y9P4meIpo7TKxrCpTJkN69So0OTkIWfMLrZcnpAL8qnlgws8dMeHnjvCr5sF1OITYHI1UhbR1sUhbFwuNVMV1OA7FIsl8vurVq2Px4sVITEzEtm3b8Oabb0Kj0eDPP/9Enz59UKNGDSxYsADPnz+3WAw5OTm6bScnJ5OucXZ2LvYelpBf+K59+/ZoVMbWQnK5HGKxWO+DmGZs1BXUC9S++/rgRQ4VvyOE2IzEocN0VewThw7jOhxCbJpcosT+ZVexf9lVyCW05M7RdJs8U7fda+ZcDiOxXRlRd4xuE8szezI/b948gz7uQqEQgwcPxr///osbN25g7NixcHNzQ1JSEr766ivUrFkTAwYMwKlTp8wdjl5ruZJa0hVW9DyJRGLWmApLTk7WVdsvT+G7xYsXw8vLS/cRGhpq7hDtllylwbXEbOQp1Kgd4E795c3FQavZixVijDwxEiNPjIRYQW+qEctRi8WAWq39IIRU2LE1N41uE8fg7OaOAfMWY8C8xTTFntgcqyTzhTVp0gS//PILkpOTsWrVKoSHh0OpVGLPnj1499130bBhQ6xcuRLZ2dlmicfFpaAiqUKhMOmaoue5urqaJRZjNmzYAJVKhYCAgHJVzZ8zZw6ys7N1H4mJiRaI0j49Sskxuk0qQJpVMLU+Ypu2sr2DmHp6qtFtQswtYfSYgh21GjX37eUuGEJsnFyiRFpiLtISc6FR05I7QspKlSmDPCkH8vhssCoNtaazMotOsy+Ju7s7PvnkE9y8eRNnzpxBREQEBAIB7t+/j2nTpiEkJAQff/wxrlypWFEyD4+CtU+mVtSXyWTF3sOc1Go11q9fDwAYNWqUyTMHCnNycoKnp6feBzENwzBwE/HhJuKDYRiuw7EPDrpenhBrUYvFkN24UXCAz4dL/frcBUSIjTu25iaqBGsHbTKS89B1XBOOIyLEtqSsuAqGYcCI+FCmSsBzoe4q1sRZMl9Yu3btMHDgQDRr1gyAtrq7RCLB5s2b0aZNG7Rp0wZbtmwxSLJN4e/vr9vOysoy6ZrCswI8PT0hFFpm+vWhQ4fw7Nkz8Hg8jBkzpvQLiFkdmdLO6DYhZSVWiKHSqHDv5T2oNCos77ic65CInUosuhyLpZFEQiqKx+fBP9Qd/qHucHKlJXeOJi0hHssG9cKyQb2Q+jSO63BsEqtQg1XQ0i8ucJrMv3jxAt988w1q1qyJPn364MqVK9p3dl59sCwLlmVx+fJljBo1CsHBwZg5c2aZppKHhxdM9UhOTjbpmqSkJKPXm1t+O7quXbuiZs2aFnsOMc7TWYjwYE+EB3vC05l+eJtFl8VAwnntR5fFXEdjNRNPTsSjrIK2m1TNnliK/MFDoNBMIufGjTmMhhDbV3gknkblHVP0V5/qtrd9NoXDSGyPRqoCq9EArxqUCfwttzSZGGf2ZP7XX3/F7t27Szzn1KlTGDBgAGrUqIF58+YhMTER7KvRhfwEvnnz5li/fj3u3r2LL774AiEhIcjKysKPP/6IevXq4euvv9ZdU5ImTQq+Mcvlcr1EvThPnjwxer05PX78GH/88QeA8hW+IxWTLVWi3XendG3pqMe8mWzpDojctR9bunMdjdUUTuQLbxNibk5164JxcwP4fPDc3VF9w3quQyLEpjm5CtF7egv0nt6CRuUdFKvRaBNSUmYZUXfA8HhgnPlgRHzwhJVi0rdDMftXfPjw4Zg6darB8aysLCxfvhwNGjTAu+++i7179+r6t+evV3Z2dsaIESNw8eJFXLlyBaNGjUL9+vUxf/58xMfH4+DBg+jYsSPkcjnmz5+P6dOnlxpP69at4ePjo9uPiYkp8XyWZfXO6dKliymfdpmtXbsWLMuiRo0a6Nq1q0WeQYpXOHl/8IKK35GKqeNdx+g2IeYWuuYXuISHw7VlS9Q5dZL6yxNCSAUF1AgDoE3qP/p2BcfR2B5hoJtum4rfWZ9F3j4pPGJ++fJljBw5EiEhIZgxYwYePHig9zrLsqhfvz6WL1+O5ORkbNy4Ea+//rphoDweevTogb/++gu///47RCIRfvnll1JH2oVCIT744APd/smTJ0s8/9q1a7q19W5ubhZJtOVyOTZv3gwAGDt2LHg8eheLC/k95gFQj3lzkGYBPmGALBuQ5wLDj3AdkdWs7rwaDXwboIFvA6zuvJrrcAghhBBiorTEeDA8HqrWrY+AGrW4DsemVIkMB8NnIAp2R9VPW1PxOw5YJItUqVTYuHEjWrVqhTfeeANbt27V9XvPH4UXCAQYMGAATp8+jTt37mDy5Mnw8vIy6f4ffPABRowYAaVSiX/++afU86dNm6ZLmKOjo0tsUffrr7/qtidMmKDX2s5cdu/ejfT0dIhEIowaNcrs9yel+7JnuK7H/M6xb1CPeXOI/gjIfAI4ewFO7sDxOVxHRIhdUYvFeNT5Hcju3gWrVuHZxElch0QIIXYj4xm1dy4rjUwFRXIuFMm50MhUXIfjkCySzGdkZGDMmDG4evWqwSh89erVsXDhQiQmJmLnzp3o0KFDuZ5Rv359sCxr0hr4pk2bYsSIEQCAlJQULFu2zOh5T548wdq1awEAfn5+mDPHeDKiVCoRGRkJDw8PNG/eHLGxsWWK/ZdffgEA9OnTBwEBAWW6lpjHwLX/6drSDVz7H9fh2AeNElDkaj8crMI29Zgn1pA4fjw0Egk0Egnk9x9wHQ4hhNgFoZMLRM4u1Ka4HFJWXDW6TazHYvO7WZbV/adgGAbdu3fH4cOH8eTJE8yZM6fCSWxMTAwYhoFAYNp0jpUrV6JFixYAgC+++EI3zT3fw4cP0bVrV8hkMohEIuzbt09vrX1hUVFR2LZtG3Jzc3H9+nVMnDjR5LhjY2Nx/vx5AFT4jtgZlgGEBeumELGNu1gIsUPye/cBtRpQq6GRSFBt9SquQyKEELsR+f1KrkMgpMwsulg7ICAAn3/+OZ48eYJDhw6hW7duFX7X688//8SYMWOwfft23TNM4erqiuPHj6Nr165QqVQYOXIkGjZsiAEDBqBjx44IDw/HgwcPULVqVRw5cgTt2pned7wsn1P+qHx4eDjat29v8nXEvKjHvAUwrLZllsgdCAwHXLy5jshqFrRdgHsv7+Hey3tY0HYB1+EQO6QWi6HJy9M7RsXvCCGk4sb+sgUTN0fDyz+Q61Bsjv/Yproe8/5jm3IdjkOySJUCV1dXbNq0CR9++KHJI+emmj17Nm7cuAFAm0Q3a9bM5Gv9/f1x9OhRHDp0CFu2bMHVq1dx8OBBeHp6olWrVujXrx9GjBgBX1/fEu8TGRmJU6dOYf/+/ahbty5WrTJtdCQnJwfbtmlHK2lUnlv5Pebzt0kFSbOAF7cBRQ7A8AGNY62b+uLcF2jg20C3ven9TRxHROzNs4mTAB4PeNU+iXGlXr6EEGIO+xZ/BZVCjl4z58LZzb30CwgAbY/5tLWxYER8CIPckH3oCfzHUEJvbRZJ5j08PNC/f39L3BqjR4/G8+fPAWgr3IeHl70FQs+ePdGzZ89yxyAUCnVJeVl4eHggJ4faoHEtv8c8ANQL8sDYqCvYOeZNjqOycTsitIl8vvSH3MVCiB3SyOUFtSh4PIQd2M9pPIQQYm8OLlmIAfMWcx2GzciIuqPbVr7IgyiY3gjhgtmT+erVqyMw0HLTVCZMmGCxexPHMHzzJYhfVdy89zwHjUNoqmqFpd4FGB7AagBWDfg7Vp/R5R2X6wrfLe+4nNNYiH2SP36sHZkHwLi4QBQSwnFEhBBCHJ0w0A3KFO0SMOoxzw2zJ/Px8fFmuU9SUhLUajWqV69ulvsRAmhH5a8lZOn2c+Uq6jFvDv7hQOptQJmnLYI3eCfXEVmVp8iTptYTi2IYhqbWE2IBcokSx9bcBAB0HdcETq609M5R9Zo5l+sQbEqVyHBkRN2BKNgdVSLDqcc8RyxaAK8iWrVqhbCwMK7DIHZm1NbLevseTnzqMW8Og3cCVV8Dqr8FTL3pUMXvxAoxRp4YiZEnRkKsEHMdDrFD0vv3ocnJ0X7k5aH6tiiuQyLEbhz5KRZpiblIS8zFkZ/K1mqY2La0hHgAQPLDu/jf8DG0Xp7YpEqbzAPQ61FPiDk8eJEDd1HBP/ujU6mjgFm4eAPDD2s/HCiRB6jHPLEstViM+A96FxzQaJC6cBFn8RBibzKS84xuE/sX/dWnuu1tn03hMBLbVHjNfOFtYl2VOpknxNxq+LkiV6GtBt2kmidCfWjaqllIs4AtPbQf0iyuo7EquVqOqylXcTXlKuRqOdfhEDvzbOIkrkMgxK5VCXY1uk0IIbagTIsbrDntPS0tzWrPIo4hW6rE7STtNGg+j0F8Gr0DbxYvbgJr3tZuBzcHoj/SjtA7iCdZT4xuE2IOGolEb9+5cWNUW21aO1RCSOneGdkI0d9c1m0TxyDLy4V/aE3d/kffruAuGBuVv2Y+f5two0zJfHx8PBiGsfj09/xnMAxj0ecQxzI26goYhgH/1T8r+vdlJus6FGwnXwNqtuMuFg4wDANXIY3mEPNTi8WQ3bpVcIDHQ609u7kLiBA7k/4sR5fIB9TwwKmtd9F7eguOoyLWcHDJQgidXQAAwXUbIqBGLY4jsj08FwH1la8EKuU0e1orTyylSYiHbvvIFMdKOi2KKfStJGIbd3FwYHfP3Ua3Cakogyn29AYkIWa1a9EV3Xbq0xwOIyHEtmikKqT8dB3PPj+LpK/OQ5Up4zokh1XmHgLOzs4YMGCAJWLREx0dDbmc1p8S8/mh/2vovuIs3ER8HJnSjtbLm8uYv7Wj8wxfu+1gBfBC3ENwftB5rsMgdohVqwBXV+DVVPua+/ZyHBEhdogB8GoMqeu4JpyGQqzn/QlTsfNLbQG8zqMncByN7cmIugNlkvYNMFahRsqKqwj56i2Oo3JMZU7mvby8sHnzZkvEouf48eNITU21+HOI45gWfV1ve884+qZjFkFNgC9fch0FZ8QKsa6K/fKOy+Ep8uQ2IGI3NHKFLpF3atwYLvXrcxwRIfZlwOettKPzjHabesw7jhM/L4d/9ZoAgJPrf0afOV9xGg8h5VUpp9kTYgkPXuQY3SYV5MCV7AFqTUcsQ5GUBHn+enkeD/K7d7kNiBA75FfNAxN+7ogJP3eEXzWP0i8ghADQFrwTvlq6yoj4CJxCtSa4UmmTeVo3T8ytXqC70W1SQdEfGd8mhJRbXJHe8oyLC2exEEKIvek1c65uu9vkmRxGYpt4LgIEftIM1Ra1Q8hXb0Hg48x1SA6rTMm8RqNBcnKypWLR8+LFC6jVaqs8iziGHwc2N7pNSEUs77jc6DYh5aUWi6HJzdU7FnZgPzfBEEKIHXJ2c9dNrXd2owGeslJlypD01XkqflcJVNqReULMbdbuGwgP9kR4sCdm7b7BdTj2QZoFqFXAi1hAo3S4Sva0Xp5YQsLoMfoH3NwgCgnhJhhC7JRcosT+ZVexf9lVyCVKrsMhxKakrLgKVqHWFb8j3Km0yXzVqlUhEJS5Ph8hxJqiPwL4AiCoKcATOlwl+4knJ+Ley3u49/IeJp6cyHU4xE7o9ZYHqPAdIRZwbM1No9vEMWSnpWDt+OHa7XQquF0WqkwZWJka0ABgtdXsCXcqbTIP0Lp5Yl5rI1sZ3SakvB5lPTK6TUhFMC4uAO/Vj2c+H6FrfuE2IEIIsTNRsyfrtnd+MZvDSGyP3kg8C10hPMKNSp3ME2IuiZkStPvuFO4ki/FD/9fg5ULtZ8yi8LR6B5tiDwB1vOsY3SakIsIO7AfPzQ08Dw/U/uME+J60fIMQcyvcU576yxNSRsyrP3mA/8jGnIbi6Mo0j/3u3bs4ceIEOnfujCZNjH/j69Spk1kCe/nScftWE/PrtuIsJK+mAXVbcRY3v3qf44iIPVjdebXemnlCKkqRlIS43h8CAGrt/53WyhNiIU6uQvSeTu20HJEsLxe+wdXw8vkzAMDABd9zHJHt0EhVYDUagAXA047K81xoWTSXTP7q37t3D61atYJMJoNIJEJMTAzCw8MNzjtz5gwYhjFyh7JhWdYs9yEkW6pEjkwFAODzGF1ST8ygaFu64Ye5i4UDniJPbHp/E9dhEDuSn8jnb9e/fInDaAghxP4cXLIQApEIwXUbAgC8/AI4jsh2ZETdAcPjAa860fGENMmbayb/DRw9ehRSqRQsy0KhUOD48eMlns+ybIU+CDGXsVFX4C7S/lNXa1g0obU9hJDKimWhkUigkUgA+llICCGkkhEGuum2q0QaDuwS6zJ5ZL55c/2+3K+99lqx5wqFQrz55pvljwrA+fPnoVKpKnQPQvI1qOqJBynavs1bR77BcTR2JGJbwei8A66ZJ8TchDVrQH7rtm6bEGIZcolSV8W+67gmcHKlWjqOotfMuTi4ZCHXYdikKpHhyIi6A1GwO6pEhtMU+0rA5L+Bjh07Yv369Th48CC6du2Kzp07F3uur68vTp8+XaHAqlatitRUahVBKm5tZCuMjbqC8GBPrI1sRcXvzMnF2+Gm1hNiScqnCeB5eOi2CSGWUbQ1Ha2fJ6R0PBcB/Mc05ToMUkiZ3k4ZNWoURo0aZalYCLEILxchdo6p2EwRQgixNEVSEjR5eYBGA56bG5yovzwhFqNRs8hIzgMAVAl2K+VsYk/2f/810hOeQujswnUohFRYpZ0bQevmCSGEOJK43h+CcXUFK5VCI5NRf3lCLEijZqGUq3XbxHGkJcRDKZMVtFcjxIaZPZmPi4sDn8+v8H2uXLkCtZqqjpOKy5YqMTbqCgDQNHtzy3wKrG2n3R57FvChNb6EVATDMGBcXQGA+ssTYiFyiRKpCTkAAKGIh8wUCccREWuR5eVCIZVSgVFiN8zeT6BGjRqoVq1ahe9TrVo11KhBiQGpmGypEm2/PYn/nrzE5fhMDN9MbZ7MKj+RL7pNCCmzkDW/QJOTA01ODkJoVJ4Qizm25iYEr7rcKBUaVAl25TgiYi37Fs+jRJ7YlUrbHHD37t349ddfuQ6D2LjIjReR+2oanVrDIvZZNscREXsiVogx8sRIjDwxEmKFmOtwiA1Ti8VIHDoM4PPBc3dH0rjxXIdEiN0qPK1eKOKh+yfFd2gi9uXF44cAo51fz2o0HEdjezRSFdLWxSJtXSw0Uuo6VhlU2mR+8uTJGDlyJNdhEBtXNHl3FVbaf/K2aexZ49sOYurpqUa3CSmrxPHjAbUaUKuhkUq5DocQu6ZRs1AptImcT5AbtaVzELK8XG1NLpYFw+NB6EwzMsoqI+qO0W3Cnf9v797joqrz/4G/zgzMMKCDKKioeEHxgmJ5LTUr17bELM289CsRoV3NMjNrt6i1e9laS+ZlU9u8gLurVopmYt80rUxbFDXNS1qIF7xh6gwww1zP74+JI0MDDDDDmcvr+XjMY98nzjmft22Z7/l8Pu+PzzbAA9gEjxpOIQD2Sv8YbZl1u3zJBKKoDsDzPD6LqKFMP51wzBaJImCzoVPOBrlTIgpY1y4ZEKpWSjEFh03vvonYzl1xseAkAOD/vfGOzBkRNVyjFPOFhYW4evUqysrK3C7QzWaz1/LZvHkzVq1ahfz8fFy4cAGRkZGIj4/H+PHjkZqaiubNm3ttbLvdjq1bt+KTTz5BXl4eLl68CJ1Oh6ZNm6Jjx45ISkrCkCFDMHLkSI/0Hgh2m2fehvsWfgcA+OzJIYiL4rew5Dnzh82XZuTnD5svay7kv2x6PewGg6OQVyohaDRQtW0rd1pEASuqVbjUAK9l+6YyZ0ONxW634er5c1CFaRDdvgMio1vKnZLfaZGSKM3It0hJlDkbArxYzK9fvx4ffvghvv32WxjrsWRQFEUIgmfPjLhy5QpSU1OxZcsWAEC3bt0watQoFBcXY9euXdizZw/eeecdZGdnY/jw4R4dGwB++OEH/PnPf8bevXsREhKCAQMGoGfPnigtLcXRo0dx4MABHDhwAFlZWejZsyd+/PFHj+cQTHRGC1777CgGdIxiF3vyCq1Ki+X3LJc7DfJzZ6c7748P69pVpkyIgoNCCWlmXtHwA5jIX4jVxOQ2hSYEMVN7y50GVeLxYt5isWDChAnYtGkTgPotlfd0EQ8ABoMBI0aMQH5+PpRKJZYtW+a0J//kyZMYNWoUTpw4gZEjR2Lbtm0YOtRz3bl37tyJkSNHwmg04rHHHsPrr7+O6Oho6eeiKOK///0v0tLSvLoqIZhUHEdXEa+ZOkjGbAIQj6Uj8gjTiZPS+fIAeL48kZcplArExDWROw1qZAqlEi07xsudBpFHebwb2HPPPYeNGzc2aL+7N/bKz5w5E/n5+QCA119//XfN9RISEpCbm4uwsDCYzWaMHTsW169f98jYR44cwX333Qej0YiXX34ZH3zwgVMhDzi+wHj44Yfx2muveWRMAqx2EUfP63H0vB5WO7+C9bggP5aOnezJU1TxnaRCPqxXT54vT+RFJoMFdpsdxWdLYbeJSH4sSe6UqJHc/+yLLmNyH7vZ+x6Pzsxfu3YN//znP6WZdVEUceutt+Luu+9Gly5d0KJFC2g0mlpn3kVRxIMPPuixYvrw4cNYsWIFAKBVq1Z45plnXN4XHx+PadOm4f3338eVK1cwd+5c/P3vf2/w+I8//jhKS0uRlJSEl156qcZ7H374YezYsQMdO3Zs8LhBr/KXQmym6FnG64CpFBBtgKAEVBFyZ9Toqnay53J7qi9BGQJFeLgUE5H35C457DQzz072wSMsogkmvDxX7jT8WtVu9lxyLz+P/qnh66+/lpaIKxQK/Oc//8HEiRPr9S6VSuWxvDIzM2H/7SzJiRMn1vjuyZMn4/333wcALF68GK+88go0Gk29x964cSO++eYbAMDs2bOhUNS8GCIuLg5bt26t93hUSeUvjbywdSOorZ0EhIYD5hJHQR/dTe6MiPyWEBKCsB495E6DiCig6YovIfuvMwEAKfMWIDKmlcwZETWcR5fZFxYWAnAsGR8zZky9C3nAc0vtLRYLNm7cKF3X1tiuT58+aNasGQCgrKwMubm5DRq/YkWAIAi49957G/QuqiPOzHuP3QJYDY7OQWotEBomd0aNrnL3enayp4Zot2ihy5iIPK/ysnousQ8uFYV81ZjcV7mDPbvZ+waPzsxX7lr/hz/8oUHv2rBhg0caweXl5eHatWvSdb9+/Wq8XxAE9OvXD9u3bwcAbN26FWPHjq3X2DqdTvoyIC4uDjExMfV6D9Xd2WsGHDirAwD0bheJEKXH20MEN1EAQiMAS5njeuJqefMh8mNKrRYdslbJnQZRUFCHh2LM7L5yp0GNrLysFOZyR50SGhYGAVyxWR/sZu97PFrMVz4XXdvABj6DBnmm8/jhw4elWK1Wo60bZ/d26tTJ5fN1lZ+fL30h0bXSUUMFBQXYsmULTp06hdLSUkRHRyMpKQl33XXX7xrjUf3c+/63CA9VwGCx49A5HfbP+aPcKQUWZQgQe9ONa00z2VKRC/fMExER+Yecea8hRKWGpdwIS3k5Hl3wodwpEXmER4v5u+66C0qlEna7HUVFRZ58db0dPXqjUUObNm3ceqZywV/5+bo6ePCgFEdFReHChQuYNWsW1q1b5/J+tVqN6dOn480330T4b82QqP4EQUCEynGALM+Y97DRi52PpSMiIiLyUcVnCmE1myAoFAgNC+N+eQoYHl17HBsbi7S0NIiiiJycnAa96+mnn8ajjz7a4JyKi4uluGIvfG0q36fX62GxWOo19okTJ6T44sWLuPXWW7Fu3TpMmjQJe/fuhcFgwLVr17Bp0yb07t0bJpMJ8+fPx+233+60NYDq7vOnhrqMyUM2PgG07u34bHxC7mxk8fqQ13H86nEcv3ocrw95Xe50iIiIqDpiNTGRn/P4RuL58+ejf//+yMvLw3vvvVfv96xZswYrV65scD4lJSVSrFar3XomLMy5mVfld9RF5aP1vv32W5w5cwYvvPACsrOz0b9/f2g0GjRr1gz33Xcf9uzZgwEDBgBwLM9PTU11awyTyQS9Xu/0ISAuKhyHXrkHh165B3FRXOXgcXYLcPGQ42Ov35dd/m7Od3PQvXl3dG/eHXO+myN3OkRERFSNFu3iXMZE/s7jxXx4eDh27NiBKVOm4C9/+QumTJnSoKXqDVW5KZ+7x91Vvc9gMNRr7KqFdefOnfHKK6+4vDc8PBzLli2Trj/77DN89dVXtY4xd+5cREZGSp+4OP4GRY1AFFzHRFQnNr0ehQ8/gp8GDEThI4/Axi9kiYg8TqFQQhWmgSpMA4VCKXc6RB5Tpz3z6enpdXp5UlISsrOzkZ2djbi4OCQmJiIqKsqtGXKdTlensapT+Yx4d7vjV72vvvvXq77nkUceQWho9Xu3b775ZvTr1w/5+fkAgGXLltV6KkBGRgZmz54tXev1ehb0AHRGC6Zl7wMALE3pzz3znqYMcSyxD2Lzh82XmuDxaDqqr3MznoTp5EkAgOnESZyb8SQ72xMReZhCqUTLjvFyp0HkcXUq5leuXAlBqNssXMV58WfOnMHZs2fr9Fxdx3KladOmUmwymdx6pry8vNp31EWTJk2crm+99dZanxk8eLBUzH/99de13q9Wq93ePhAsdEYLbnv7KxgsNoSrlHh01V588thgudMKLBNXA2sn3YiDkFalZQd7IiIiH1deVgq7zYYrZ08jun0HjPnrS3Kn5LfsRit+zXasuG6RkgiFxqO91Kke6rXMXhRFtz+CIEgfd5/1pMpnu1few16TyqsCtFptjbPpNal6PJ87M+bx8Te+Nbx48aLTNgFyz7TsfTBYbAAAg9mGExfr1/OAqmG87lzIB+GxdESe0m7RQqgTEgAA6q4JaLdoocwZEREFlk3vvinNzCsUSoRFNKn9IXKpopCvGpN86vx1ikKhcDpP3lvOnj3rkcI+MTFRis+fP+/WM5WP1av8fF116NDB6brykv/qVJ3Nv3btmlvP0Q1Wu+NLIbsIKBUCurbib9oeVVHIV8RTNsuXC1EAEEJCENajB9otWghllS+BicizTAYLcpccBgAkP5YEdTi34QU6u92GK2dOAwCi23eo5W6qjt1ohfl8KQAgtHUEBAV7JvmCOhfzMTExOHXqlDdycRIbG4vLly83+D1JSUlSbDKZUFRU5HSOvCsFBQUun2/I2ADcmmWvuhWganFPbhBFRKiUMFjsCA9V4qMpA+XOiIjIpbPTp8N04qQUd/z3v2XOiCiwfb74EH49XybFY//ST+aMyOt4LJ1H/Jp9FKGtImC5VAbLxTLEPsc/X/sCj3ez9zUDBw5EVFSUdF2xH706oig63TNixIh6j111j7w7KwMqf4HRpEmT3y3Vp9qFKBXo2TYSAzpGoWdbLZvfeVrlPfJBul+eyFMqCvmqMRF5R0UhXzWmwFWxxL5lx3golOxk3xCCUoCqTROo2jThfnkf4bPFvKf2zoeGhmL06NHS9fbt22u8/8CBA9Le+oiICCQnJ9d77Pbt2+OWW26Rrvfv31/rMwcPHpTi22+/vd5jB7N3xt+Eo+f1OHpej3fG3yR3OoGnXHfjjPlyz5w6QRSMbHo9RLsd9pIS2A0GqOI7yZ0SUUA7//M1mI1WmI1WiHYRLdrU77Qi8i/3P/uiy5jqJmp8V5jPl8J8vhRR47vKnQ79pk7F/IEDB7Bt2zZv5eLk4sWLsNlsHnnX008/DYXC8Utdu3ZtjUfUZWVlSfHjjz/e4P3q06ZNk+JPPvmkxnv1ej2+/PJL6XrSpEk13E3VeXrtQZcxeciS2wBzqeOz5Da5syHyW2f+PBVi2W8zg6IIQclZDiJvMRks2PDuAenaYrLh3if4hX8wCItoggkvz8WEl+ey+V0DXPv4hDQrf+3jE3KnQ7+pUzF/0003NaghnFx69+6NtLQ0AMClS5eQmZnp8r6CggIsXboUABAdHY2MjAyX91ksFqSkpKBp06bo06cPDh06VO3Yqamp6NOnDwDHEv+PP/642nvfeOMNaV993759MXHixNp/cfQ7lbvXs5O9F1jKXMdEVCflP/5448JuhxDCYp7IWyqa3lXG5ndE5O98dpm9py1YsAB9+/YFAMyZMwcrVqxw+vnJkyeRnJyM8vJyqFQqrF+/3mmvfWXZ2dlYvXo1SktLcfDgQcyYMaPacRUKBdauXYuWLVsCANLT05GTk+N0j81mw9tvv4133nkHANCqVSt8+umn0moCqpvK3evZyd4LYvu6jonIbTa9Hqi8nUyp5LF0RF7Wom2EFD/wbB8ZMyHyPy1SEl3GJC9B9PTB7tUoLy/Hvn37cO7cOVy7dg2CICAqKgrt2rVDv379EBYW5vUciouLkZqaitzcXABA9+7dkZSUhOLiYuzatQtWqxWxsbHIysrCXXfdVe17li9fjkcffVS6vv322/H111/XOPaRI0cwadIkaU98t27dcNNNN8FsNmPPnj24dOkSAGDIkCH473//69aZ9K7o9XpERkZCp9MFbfM8ndGCadn7AABLU/qzAZ6n8Zx5ogY7PTkV9vJylB85AgDo/H9fQFXLSStEVH88ko6I/Im7NZ1Xi3lRFPHJJ5/ggw8+wHfffQer1eryvpCQEAwdOhTTp0/H2LFjIQjePbfws88+w8qVK7F//35cuHABWq0WnTt3xrhx45CWlobmzZvX+LzFYkFaWhpycnKQkJCAVatWoXfv3rWOa7VasXbtWqxbtw4//PADLl68CJVKhdatW2PIkCGYOHFig7rnAyzmiYj8wenJqU7XHbJWyZQJUXBgMU9E/kT2Yv7IkSNIS0uTjnmrbZiKAn7AgAFYsWIFevTo4Y20Al6wF/NHL+hw38LvAACfPTkEibGRMmdERPR75qIinBrzAACgU84GzsoTeVlOpvOJQmNmc5tYsCgvK8Wmd98E4OhmzyZ45A/crem8sil7165dGDx4MPLz86UivqbZ9oqfiaKIvLw83Hrrrfjuu++8kRoFuIpCvmpM5Gl6sx7pX6Qj/Yt06M16udMhP3Mh4wWE9eiBsB49cCHjBbnTISIKWDnzXsPlwgJcLixAzrzX5E7Hb9mNVhQvO4TiZYdgN7pebU2Nz+Otc8+cOYN7770XJSUlTkV6s2bN0K1bN7Ru3RoREREQRRFlZWW4cOECTpw4AZ3OcV61IAgoKSnByJEj8eOPP9Z77zgFH53RApv9xgoQpcK72zUouM3aMcspXn7PcvmSISKiGiU/luS0zJ6CQ3lZKc6fOA4ACA0Lw5Uzp2XOyH/9mn3UKY6ZWvsWY/I+jxfzTz31lFTIR0REYPr06Xj44Ydx0001n+V54MABrF69GsuWLUNZWRlKS0sxa9YsfPrpp55OkQLUtOx9iAgVUGZxFPQ9YrmMioh8U7tFC3FuxpNSTETeZTJaUXy2VIq5Zz445Mx7zbFKWBRhKS9Hm4TucqdE5FEeXWZ/+fJlbN68GYIgIDExEYcOHcLf//73Wgt5AOjTpw/+8Y9/4IcffkCPHj0giiI2bdqEy5cvezJFCnBKpRLasBBow0LQRM3/UJP3zB8232VM5A5bSQnKjx1D+bFjsJWUyJ0OUUAzGSxYPed7WEw2QBSx9o29cqdEjeTKmdNQhWkg/Hbc85jnXpI5I//Fo+l8k0eL+Z07d8JmsyEsLAwbN25Ex44d6/yO+Ph45OTkICwsDHa7HTt37vRkihTA3hl/k2P7htmGzi2bYGlKf7lTogCmVWmx/J7lWH7PcmhVwddskurPXFSEX4bfBXtJCWC3S43wiMg7vlx+RIotZruMmVBja962HSymcgBA684JbH7XAApNCGKm9kbM1N5QaDy+uJvqyaPF/Llz5wAAo0ePRnx8fL3fk5CQgNGjRwMAioqKPJIbBb6/fPwDeraNxICOUVCHKHi+PBH5pFOjx0ixvaxMvkSIgkjL9jeKuIl/GyBjJtSYFAolVGEaqMI0UCiUcqdD5HEeLeYrGt7dfPPNDX5X3748MoTqxmoXcfS8HkfP62G1e+XERSKiBrMbjUClE1465WyQMRuiwHf7w91w7ZIRoWolJr1+K7QtNHKnRI1EoVSiZcd4tOwYD4WSxTwFHo8W823atAEAhIY2fEY0JMSxfCM2NrbB76IgIYquYyIiHxLWsyegUABKJcJ69+YZ80Re9s1/fkJMXBPExDXBV6uOyZ0ONaL7n33RZUwUKDy64aFfv34AgOPHjzf4XRXv4Aw9uUNntODEJUeX2q6tmyKEx9KRF+nNeulouvnD5nPPPNVJ+399yE72RI3oytlSGEosCFUp0KIt90wHk7CIJpjw8ly50yDyGo/OzHfp0gW33HILNmzYAL1eX+/36PV6rF+/Hn369EHXrl1d3vPggw9i+PDh9R6DAsu07H3o2srxH+gTF0vY/I68quoZ80RE5Lsq1uo5mt9x5R4RBQ6PFvMAsHDhQuj1ekyZMgVWq7XOz9tsNqSlpUGn02HBggXV3rd79252uicnIUoFEttokdhGy+Z35FVWuxXHrx7H8avHYbXX/fc5Cm4Vs/JVYyLyDkEQEKpWIlSthELp8T/6EhHJxuO/o/Xv3x+rV6/Gl19+iUGDBuHLL7+E6Ob+5W3btmHQoEHIzc3Fv/71LwwePNjT6VGAqjwTz1l5IvJV5qIiGPLzYcjPh91kkjsdoqDQPNbR8K5FmwgkP5YkczZERJ7j8UMCX3vtNQDAfffdhzVr1mDEiBGIjo5G//790aVLF2i1WqlBnsVigV6vxy+//IJ9+/ahuLhYerawsFB6lyulpaWeTp2IyC0hihB0b95d7jTID50aPcbRoNNuR/mPP6Lrnt1yp0QU8BRKBWLimiD5sSSow7lyL1iUl5Vi07tvAnA0v+MZ8xSIBNHdaXM3KRQK6Yi6yq8WhJobktXl3or7BUGAzWarZ6aBSa/XIzIyEjqdDlpt8DTlemjZHqfrNVMHyZQJBQM2wKP6Otazl9N1jyM/ypQJUfDYvOggrGY7AGDMbDZWDhb/+dszuPjLSQBA684JePiNf8icEZH73K3pvLpxSBAE6ePJe4mq4hnzROQPwnr2dBkTEZHnlJeV4sLJnyDa7RDtdqmoJwo0XivmRVH06ofICc+Yp0bEbvZUX+3/9SHC+/VDeL9+aP+vD+VOhygoXCjQ4/zPOtw2IUHuVKiRVCyvr8DagQKVV4r5Vq1awW63e/XTqlUrb6RO/qryig6u7iAiH6XUatEhaxU6ZK2CMoi2QhHJKVTl6GS/4R8H5E6FGlFomEaKYzu7Puqa3Gc3WlG87BCKlx2C3ciTfHwFz+cgv6czWvDTBT3KzDbH6bH89pW87PUhr0tH070+5HW50yEiIqJK7n/2RbTs0Anq8Ai07Z6IsS+8KndKfu/X7KMuY5KXx7vZNxYul6EK07L3QRAERKiUABznzRN505zv5kjd7Od8NwfL71kuc0bkD8xFRTg15gEAQKecDVC1bStzRkSBzWSwQB0eCovZBogiJs4ZKHdK1EjCIprgodfmyZ0Gkdd5vJhfsWIFNBpN7Tc20IIFC2A0Gr0+DvmHrq2a4MQlx3GFPGeeiHzRqftHw15WJsXd8vfJnBFRYPty+RGMmnEzYuO1sJrt0Lbw/p9PiQKR9Vo5TEUlgMUOVbumiE7rVftD1Cg8XsynpqZ6+pUuTZgwoVHGId/3zvibcO/73wIAPn9qKCI1PEOWvGv+sPlOR9MR1cam10uFPACnmIiIyJdden+/48QxlRKWywYoNH67uDvg8P8J8nsz/3sAZWabFG94fIjMGVGg06q0XFpPdXJ2+nTnvxARIU8iREHkj+k3jn9MfixJxkyIiLyj0Yp5o9GIPXv24PTp0/j1118hCAJatGiBDh06YNCgQQgLC2usVCjAHDqncxkTEfkK04mTjgL+txn5zps2ypwRUeBThztW6o2acbO8iRD5uVZP9cWl9/dLMfkOrxfzO3bswLx58/DVV1/BanV9jEFoaCjuuusu/OUvf8Edd9zh7ZQogOiMFoiiCLsIKBUCwkPZ/I68S2/WOy2x16p4vBjVTp2QANPJk0DTplAnJLD5HRER+Y2QqDC0fWWw3GmQC16rfEpLSzFhwgTcdddd+L//+z9YLI6iy9XHbDYjNzcXf/jDH/DQQw+hpKTEW2lRgJmWvQ+922mhVDjOlt8y63aZM6JAV1HIV42JatLmnXkuYyIiIqL68koxf/36dQwaNAiffvqpdIScIAjV3l/xM1EU8fHHH2PIkCHQ6bhcmmpntYsoKDYgQqVEn/bNEBcVLndKRES/c/6vf3UZExGR5+mKL2FR2kQsSpsIXfEludMh8hqvLLN/4IEHcOTIEacivVWrVkhMTERsbCyaNGkCURRRVlaGoqIiHD9+HJcu3fgX7ciRIxg7diy2b9/ujfQoQOiMFhw/r0ep2QalQoDVZpc7JQoC7GRPdWUuKoIx37HXUBER4dg/T0ReV3HO/OZFB/HH9J7SHnoKfNl/nekUz1ixVsZsiLzH48X8v//9b3z99dcQBAFhYWF44oknkJqaip49e9b43I8//ohVq1bhgw8+gMFgwM6dO/Gf//wHDz/8sKdTpAAxLXsfjFa7tMS+oJhHPZH3sZM91dWpMQ8ACgVgt8NeVgZNXzYPImoMFefMA0DuksMYM5v/7hFRYPH4Mvu33noLANC5c2ccPHgQ8+bNq7WQB4BevXrhnXfewf79+9G5c2eIoii9y9M2b96M8ePHIz4+HhqNBq1bt8bgwYPx3nvv4erVqx4fTxCEOn26d+/u8RwCVeWGd11bNZExEwoWerMe6V+kI/2LdOjNernTIX/w23YzAIBSibglH8iXCxFREEiZt8BlTBRoBFGs/KeMhiksLER8fDxUKhX27duHXr161es9hw8fRv/+/WG1WvHLL7+gY8eOHsnvypUrSE1NxZYtWwAA3bp1Q1JSEoqLi7Fr1y7YbDbExsYiOzsbw4cP98iYQM39Alzp1q0bjh8/Xq+x9Ho9IiMjodPpoNUGdpdtndGCR1fm4cSlUnRt3RQfpQ5ApIZL6Mi70r9Id7rmLD3VpmDcOJh+PAIAUPfqifhPPpE5I6LgwGX2ROSv3K3pPLrM/n//+x8AYMSIEfUu5AEgKSkJI0aMwObNm/G///3PI8W8wWDAiBEjkJ+fD6VSiWXLliE9/cYfyk+ePIlRo0bhxIkTGDlyJLZt24ahQ4c2eNwKGo0G7du3d+ve+Ph4j40byCI1ofhk+hC50yAiqpHl9BkomjaVYiIiIn9iN1rxa/ZRAECLlEQoNF4/3Zzc5NFl9hVN7AYOHNjgd91yyy1O72yomTNnIj8/HwDw+uuvOxXyAJCQkIDc3FyEhYXBbDZj7NixuH79ukfGBhx/T44fP+7Wp2LlAJHPuXYaeLu943PttNzZyKJy0zs2wKPa2PR6iKIIu8EAiCLUCQlyp0QUNL5cfkSKc5ccljETIv9WUchXjUl+Hi3mjUYjBEFAkyYN37tc0fHeaDQ2+F2HDx/GihUrAACtWrXCM8884/K++Ph4TJs2DYBjSf7cuXMbPDZRQFk61HUcRCoa4C2/Zzm0qsDeykINd27Gkwjr2hWK8HBAELhfnqgR2W2OnaRXzpVJMQWH8rJSrHs1A+tezUB5Wanc6RB5jUeL+ejoaIiiiNOnGz5jd/r0aQiCgOjo6Aa/KzMzE3a749iyiRMnQqVSVXvv5MmTpXjx4sUe+TKBvENntOChZXvw0LI90BktcqdDROSSEBKCsB49ENajB5QB3suEyLeI1cQU6Da9+6bLmOqnRUqiy5jk59FivlOnTgCAjRs3oiF99Ww2GzZs2OD0zvqyWCzYuHGjdF1bY7s+ffqgWbNmAICysjLk5uY2aHzynmnZ+1zG5EXTvnUdE5FL7RYtdBkTkXeZDBZcveCYkGkeGw6F0uMHOJGPKi8rxeXCAlwuLIDdbpM7Hb/H/fK+zaO/sw0ZMgTh4eE4deoUXn311Xq/56WXXkJhYSHCw8Nx2223NSinvLw8XLt2Tbru169fjfcLguB0z9atWxs0PlFAieoAPH/G8YnqIHc2RD5PqdWiQ9YqdMhaxVl5okaUu+QwmsdqAABXLxiQ/FiSzBlRY1k/92WYjAaYDGUoPn0K9z/7otwp+TXul/dtHv1qRa1W48EHH0R2djZef/11FBcX44033kBUVJRbz//666/IyMjAv/71LwiCgPHjx9e4JN4dhw/faHiiVqvRtm3bWp+pvBqg8vMNZbfbsWvXLuzevRtnz56F1WpF8+bNkZCQgGHDhjV4FUKweWf8Tbj3fcfs8OdPBef+bWp8erMes3bMAuBogMd980REvqliNj66XQSPpQsC5WWlWPdKBorPnAIACAoFLCYTwiIa3suLyFd59Jx5wLHXvXv37jCbzQAcjexGjhyJ4cOHo0ePHmjTpg0iIiIgiiJKS0tx/vx5HDt2DNu2bUNubi4MBgNEUYRGo8Hx48cRFxfXoHxmzpyJhQsdSxs7deqEgoKCWp955ZVXpJUFWq0WOp2uQTkIgoAOHTpIv6bqJCcn4+9//zuSkur/7XEwnTP/0LI9Ttdrpg6SKZMgYrwOrJ3kiCeuBjTN5MxGFjxnnurCptfj3IwnATiW2XN2nqhxmAwWfLn8CEbNuFk6b54C27pXM3D2qPMknEoTjidXrpMpo8DAZfbykOWceQDo0KEDPvzwQ0yePBmCIKCkpATr1q3DunW1/4tU8b2CQqHARx991OBCHgCKi4uluGIvfG0q36fX62GxWBAa2rD/CJw+fRoRERF46aWXMH78eMTHx8Nms+HIkSP48MMPsWLFCuTm5mLnzp3IysrCuHHjGjReoNMZLThSpIfBYkO4SolurZvKnVLgu3gYWPLbtpc2fRxF/ZTN8ubUyPRmPY5fdXwh16VZF4Qo+B80qp65qAi/3H0PYLNBERGBs9Ono+O//y13WkQB78q5Eqx9Yy9Cw5QAAFO5lcV8ELBazL/7a5PfYa+ShrKXW2E+XyrFLOZ9i1e6gUyaNAmLFy9GSEgIBEEA4CjUa/pUUKlU+OCDD/DQQw95JJeSkhIpVqvVbj0TFhZW7Tvqq02bNjhw4ABeffVV9OrVC+Hh4WjatCluvfVWfPTRR1i1ahUAx/F+jzzyCL7//nu33msymaDX650+wcDR8M7xz43BbAM8u8CEXFl2x434/AH58pDRrB2z0KVZFwDAz9d/5jnzVKNTo8cANkfzJXtZGUwnTsqbEFGQWPfWXqfr9fP2y5QJNaZfz511up746t8RGdNKpmwCx6X397uMyTd4rbXn9OnT8e233+KWW25xKtYFQXD6VBBFEUOGDMF3332HP//5zx7Lo/LRcu7uv696n8FgaFAOhw8fxg8//ICEhIRq70lJScHDDz8MADCbzXjiiSfcevfcuXMRGRkpfTyxmsFfCIKACJUSESolQtiltnEIlf4+T1wtXx4yClGEoHvz7ujevDv3y1ON7FWONlXX8N8AIvIc0S53BiQHQRCgDo+QPu2695Q7JSKv82oFNHDgQOzevRu7du3Cc889h0GDBiE2NhZqtRpqtRpt2rTB4MGD8fzzz2PPnj349ttva+02X1cajUaKK/bx16bqfeHh4Q3KoVevXoiOjq71vpkzZ0rx/v378e23tR/9lZGRAZ1OJ33Onj1b6zOB4J3xN0EURZSZbejcsgmWpvSXO6XAN/VrQBAAhRJ4bFdQ7pevPBPPWXmqTVjPnoDC8Z9ZRUQE4pZ8IHNGRMEhJs654dnYv/aVKRNqTCnzFriMqWFaPdXXZUy+oVE2PQwePBiDBw9ujKF+p2nTG3upTSaTW8+Ul5dX+w5vGjBgACIiIlBWVgYA+PLLLzF0aM1d2iu+GAk2T689KM3MKxUCIjXcC+d1rZOAl67KnYWstCotG96RW2wVW54EAYomTdBpYw6b3xE1ktFP90HuksMIUTm+TNM219TyBAUCdXgEWnaMl2LyjJCoMLR9RZ46jmoX8GuTY2JipPj69etuPVO5e71Wq21w8zt3KRQKdO7cWbo+ceJEo4zrj366WIIysw1lZht+utjwngZERJ50bsaTMJ86BUV4OCAIuJDxgtwpEQUNdXgoxszui1EzbpY7FWpEm95902VMFMgCvh1hYmKiFJ8/f96tZ4qKilw+3xgqrwK4ejW4Z0Grc/aaASXlVgCAUhDY/I6IiIgoiJWXleJyoeP46ej2HaBQKGXOiKhx+OzMfGxsLEJCGv5dQ+Uz200mk1OhXp3KZ9E35Mz3+qi8xD8igkuEXBn5/o1eAjZR5LF0RORz2i1aKDW8U3dNQLtFPB6JqDHofzXiw6e/wYdPfwP9VWPtD1BA2PTum4iO6wAAuHLmNO5/9kWZMyJqHD5bzANw6oJfXwMHDkRUVJR0nZ+fX+uYle8ZMWJEvcfW6XR44403pGPn3FF59UCbNm3qPXagqjwrX+GjKQNlyoaIqHpCSAjCevRA3AcfcL88USNZ+8aNY+l4JF1wUSiVaNkxHi07xiMsokntDxAFAJ8u5j0hNDQUo0ePlq63b99e4/0HDhyQ9tZHREQgOTm53mNfu3YNc+bMwbx589y6/9y5c7hw4YJ0XVvzu2B07/vfQqh0fXOcls3viMjnnJ0+HeXHjqH82DGcnT5d7nSIiAJa5Zl4zspTMPHannmz2YxNmzZhx44dOHLkCK5evYqysjK3Z9uLi4s9lsvTTz+NrKws2O12rF27Fu+88061Z85nZWVJ8eOPP+50tF19HT9+HJcvX0bLli1rvK/y2M2aNWvQFwmBrIlaCYPFcYjsqvRbZc6GiOj3TCdOuoyJyLsm/m2ANDvPI+mCR1hEE0x4ea7caRA1Oq8U85999hmmT5/uNMsMuL9sXhAEiKIIQRBqv9kNvXv3RlpaGj766CNcunQJmZmZeP755393X0FBAZYuXQoAiI6ORkZGhsv3WSwWpKenIycnB126dMGqVavQu3fvase32+14+eWX8cEH1Z8xXFBQgLffflu6fv755xEZGenuLzFo/GtKf0xY8j0AYN1jt3JWnoh8kiq+E8p/PAIACOvVU+ZsiIKHtoUGf37vdrnTICJqFB5fZr9p0yY8+OCDOH/+vFS8i6JYp/3vntgrX9WCBQvQt6/jG9o5c+ZgxYoVTj8/efIkkpOTUV5eDpVKhfXr1zvtta8sOzsbq1evRmlpKQ4ePIgZM2bUOv6SJUswY8YMlx3qv/rqK9x5550oKXEcsTZu3Dj89a9/resvMSj8aeU+aMNCoA0LwZ9W7pM7neBhvA58NAJ4uz2w/B7HNRFVS1CGQBEeDkV4OARlwB8cQ+QzTAYLcjL3IydzP0wGi9zpEPk9u9GK4mWHULzsEOxGa+0PUKPy6J8wrFYrZsyYAavVKs2ui6KIHj16oHPnzoiMjHS7Q/3atWthMpk8llt4eDi2bt2K1NRU5ObmIj09HfPmzUNSUhKKi4uxa9cuWK1WxMbGIisrq0771atbQRATE4Np06bhP//5D0pKSrB48WJ89NFHGDhwINq2bYvy8nIcOnQIv/zyCwBArVbj+eefx0svveSxVQlEHrF2ElB81BFfPua4nrJZ3pyIfFhF8zsialy5Sw5L8ZfLj/CseaIG+jX7qFMcM7X61cjU+DxazO/atQvnzp2TCtFx48bhH//4B+Li4ur8rq1bt+Ly5cueTA8xMTHYsmULPvvsM6xcuRL79+/Hpk2boNVq0b9/f4wbNw5paWlo3rx5je9JSUnBV199hZycHCQkJGDhQtdHDkVERGDJkiXIzMzEtm3b8MUXX+DAgQM4fvw4vv/+eyiVSjRv3hx333037rzzTqSlpaF169Ye/TUHkqMXdCgpt0IEEKFWYussLqMjIt9j0+shWq0wnTwJddcExNWwxYqIiIiovgTRg2vaFy9ejCeffBKCIKBv377Yu3dv7Q9VIzY2FpcvX4bNZvNUekFBr9cjMjISOp0O2gA7CqnzC1tgszv+cVUqBPzy1kiZMwoixuvAfx5yzM637AH8v7WAppncWRH5pNOTU52uO2S5fzwpETWMyWCRZueTH0uCOpy9dYgawm60SrPzLVISodBw61hjcLem8+j/G3q9XoonTpzYoHdNmDDB6X1EFYV81ZgagaYZ8OhWubMgIiKqkTo8FGNms4s9kacoNCFcWu/DPFrMVz56raHLxd9///2GpkMBplfbpvixqESKiYh8Uezct3BqzAMAgE45G2TOhoiIiAKVR7vZ33nnnVJ88eJFT76aCP/+0yDcGt8ct8Y3x7//NEjudIiIXDpf6TSS8zyZhKhRsZs9EQUTjxbznTt3xgMPPABRFLFhQ8NmIx588EEMHz7cQ5mRv9MZLZiW7TiKbmlKf54vT0Q+y3TipMuYiLyvajd7IqJA5vFz5pcuXYr4+Hh8//33mDt3br3fs3v3buzcudNziZHf0hktGPL2dnxfcBV7C69hyoo8uVMiIqqWOiHBZUxERETkSR4v5qOjo/Hdd9/hjjvuwIsvvoj77rsPO3bsYFd6qrdHV+1Fqcnxz4/NLuLQOZ3MGRERVS9uyQcI69EDYT16IG4Jj6UjakzJjyVJ8R/Te8qYCRGR93n0aLqq/vKXv+Af//gHBEGASqVCfHw8oqKioFKpan32u+++g9Vq5ZcAdRSIR9P1fuUL6Mut0nVTtRKHXx0hY0ZByHgdWDvJEU9czWPpiIiIiIi8RJaj6SocO3YM6enpyMtzLIcWRREmkwnHjh2DIAhuvUMURbfvpcDWtVUTHL+gR6nZDqUgYMus2+VOKfhUFPIV8ZTN8uVCREREVEl5WSk2vfsmAOD+Z19EWEQTmTMiahweX2Z/7Ngx3HbbbcjLy5MK8ooP4CjS3fkQVfhoykD0atcMt8Y3x/6X/oi4qHC5UyIicsmm1+P05FScnpwKm14vdzpEREEhZ95ruFxYgMuFBciZ95rc6RA1Go/PzP/pT3/CtWvXnIr3pk2bIj4+Hk2bNoVSqXTrPbt374bVaq39Rgp4kZpQrJnKo+hkNXG18zJ7InLp7PTpUgf7s9Ono+O//y1zRkREge/KmdMuY6JA59Fi/vDhw9izZ49UyA8aNAjz5s3D4MGD67xkPjY2FpcvX/ZkeuSneCwdEfkL008nYDcapZiIiLwvOq4Drpw9LcXkGXajFb9mHwUAtEhJhELjlR3a1AAeXWa/Z88eKe7cuTO++uorDBkyhHvfqd50Rgtue/sr7C28hiPn9Xh01V65UwpOVffME5FLoigCNhtgs3HLGBFRIxnz3Eto2TEeLTvGY8xzL8mdTsCoKOSrxuQ7PPr1ypUrV6R48uTJUKvV9X5Xu3btEBYW5om0yI89umovSkyO7RYGsw0nLpbInBERkWs2vR7ib7PyUCoBfpFNROR1bH5HwcyjM/NRUVFS3LFjxwa9a+/evTh16lQDMyJ/91Ol4t1mF9G1FX+DlkXlffLcM0/k0tnp052uw7p2lSkTouBkMliQk7kfOZn7YTJY5E6HGgmb33lPi5RElzH5Do/OzN9++40jw3Q6nSdfTcFKFKEQALsIKAVHZ3uSgaYZj6MjqoXpxEkI4eHS7Hzckg9kzogouHy28AdcPl0ixeOe6y9zRtQY2PzOexSaEMRM7S13GlQDj87M9+zZEyNHjoQoiti+fXuD3pWZmYnXXuO3a8GuW+umaKIOgTYsBH3aN2PzOyLyWeqEBAiCAEV4ODQ33QSlVit3SkRBpaKQrxpTYKvc8I7N7yjYCKKHO/RcuXIFf/jDH3D06FFs2bIFd999d73eU9HN3mazeTK9gKfX6xEZGQmdTgdtAPxBkp3sichf2PR6nJvxJACg3aKFLOaJGtmyWV/DarYDAEJUCkydf4fMGVFj4J55CkTu1nQeL+YB4Ndff0VaWhq2bduGV155BY8//jiaNKnbv1gs5usn0Ip58hHG687nzGuayZkNERHR7+h/NWLtG45Tbyb+bQC0LTQyZ0REVD+yFfOVl8bn5uYiLy8ParUat956K3r06IGoqCioVKpa3/POO+/AYDCwmK+jQCvmOTPvI1aOcr7m/nmi3+HMPBEREXmCbMW8QqFwOle+4vV1PWteFEUIgsBivo4CqZjXGS0Y+vevAABdWzdFiELAmqmDZM4qSLGYJ6pV4SOPwHTiJABA3TUBHf/9b5kzIiIiqj+70SqdL98iJREKjUd7p1MN3K3pPNoAr7LKRXxdC3kiANKMPACeLy83Hk0HANCb9Uj/Ih3pX6RDb9bLnQ75mIpCvmpMRETkjyoK+aox+Q6vFfOAo6Cv74fIanf8s1BmtkGEY5k9yaTiaLopm4N6v/ysHbNcxkQ2vR6iKMJuMACiCHVCgtwpERERNYhoF2E+Xwrz+VKIdtZnvshrxfyaNWtgt9vr/WnVqpW3UiM/YbLaUGKywWYX0TE6nPvlichnnZvxJMK6doUiPBwQBJ4xT0RE/k+sJiaf4dWZeaKGOHq+BEqFAKVCwNHzXGZP8ps/bL7LmEi0WWE6eWO/PJvfERGR3xOqicln+GwXAy61p/BQBQwWuxQTyU2r0mL5PcvlToN8EWcviIgo0PC/bT7P48X8hg0bAAADBgxo0Hv27dvHTvZBbsus23Hv+98CAD5/aqjM2RARVU8ICUFYjx5yp0FEROQxglKAqk0TudOgGnj8aDqSVyAdTUdE5A9sej3O/OnPKD9yBAqNBp025kDVtq3caRERETUIj6aTj2znzJO8AqWYP3vN4DQrHxcVLnNGRESunZ6civJjx6TrsB490CFrlYwZERERkT+T/Zx5V0wmE06dOoV9+/YhPz8fhYWFMJlMjZkC+YmKQr5qTCQnnjNPRERERL7C68W8Xq/H3//+d9x2222IjIxEly5dcMstt2DgwIHo3LkzIiMjMXToULz77rvQ6xvnD8ebN2/G+PHjER8fD41Gg9atW2Pw4MF47733cPXq1UbJocJDDz0EQRAgCAI6duzYqGP7qrPXDNCXW6Evt6LMZGO/DbkZrwMrRzk+xutyZyMbvVmPez65B/sv7cexX49hxvYZcqdEMrPp9fhlzAMw5OXBXlICe0kJQtu1Q7tFC+VOjYgoKJSXleI/Lz6DzP93PxamTYCu+JLcKQUMu9GK4mWHULzsEOxGq9zpUDW8Wsz/85//RPv27fHCCy9gz549MJvNEEXR6WM2m7F7924899xzaN++PT74wHtn8165cgX33nsv7rvvPnzyySdQqVQYNWoUunfvjry8PMyePRu9evXC9u3bvZZDZbm5uVi7dm2jjOVPkuffmIm3iSI6x3CJvazWTnIdB5lZO2bBaDUCAIxWI36+/rPMGZHczs14Eubjx53+munECR5LR0TUSDa9+yYuFjiOBbWUlyP7rzNlzihwVOyVrxqTb/FKMS+KIiZPnownn3wSer1eOmauYga66qeCXq/HjBkzkJqa6vGcDAYDRowYgS1btkCpVOKjjz7C8ePH8fHHH2Pnzp04duwYunbtigsXLmDkyJH49lvvLu02GAx4/PHHvTqGP9IZLSg1OX/7FxbKZhvkGzQhGinu0qyLjJmQLxBtnKkgIpKT3W6DaLdDtNvlToVIFl4p5p955hmsXr3a6a+JooiQkBDExsYiISEBXbp0QWxsLJRKpTRLLwgCRFHE6tWr8eyzz3o0p5kzZyI/Px8A8PrrryM9Pd3p5wkJCcjNzUVYWBjMZjPGjh2L69evezSHyl5++WUUFhZCrVZ7bQx/NC17H4RK10oBWJrSX7Z8CMDoxcDFQ47P6MVyZyOb+cPmIyEqAeGh4egd0xuLhi+SOyWSmwhAc+MLHigU6Lj+U9nSIQp2JoMFOZn7kZO5HyaDRe50qDGIQGjYjd+HU+YtkDGZwBI1vivM50thPl+KqPFd5U6HquHxbvZ79uzBkCFDpMJ8wIABmDx5Mu666y506dIFSqXS6X6r1Yqff/4Z27ZtQ1ZWFvbt2+dITBCwe/du3HLLLQ3O6fDhw7j55ptht9vRqlUrnDlzBiqVyuW9s2bNwvvvvw8A+Otf/4q///3vDR6/qh9++AH9+/eHUqnEM888g7feegsA0KFDBxQWFjbo3f7ezf6hZXtQbrHicFEJAGDnX+5kJ3u5rRzlfD1lszx5EPmY05OdV5Gxgz2RvHIy9ztdj5ndV6ZMqLGsezXD6XrCy3NlyiTwFC875HQdM7W3TJkEJ9m62c+ZMwcAoNFokJ2djf/973944okn0K1bt98V8gAQEhKC7t27Y8aMGcjLy0NWVhY0v810VLyroTIzM2H/bfnNxIkTqy3kAWDy5MlSvHjxYhiNRo/kUMFut2Pq1KmwWq144YUXkJCQ4NH3+7uX7kuUCvnPnhzCQp6IfFblRndsekdE1PjunDIV544fwbnjR3DnlKlypxNQRLsozcyLdraj9lUeLeavXbuGnTt3QhAEZGdn45FHHqnzOyZNmoSsrCyIoogdO3Y0eKm7xWLBxo0bpevhw4fXeH+fPn3QrFkzAEBZWRlyc3MbNH5VixcvRl5eHrp164bnn3/eo+8OBBOXfu8yJhlNXO06JgpySq0WHbJWoUPWKja9I5KZ/lcjLp8pwfmfdbBa7Eh+LEnulMjLystKsfr5pwAAoWFhWPcK/1ztUWI1MfkUjxbz3377Lex2OwYOHIgHHnig3u958MEHMXDgQNjtdnzzzTcNyikvLw/Xrl2Trvv161fj/YIgON2zdevWBo1f2blz5/Diiy8CAJYuXVrjCoFgZTDbXMZERL7Gptfj9ORUnJ6cClsjHa1KRK6tfWMvBEFAqFqJaxcNUIeHyp0SeVnOvNek5neW8nK50wk4glKAqk0TqNo0gaAUan+AZOHRYr6oqAgAcPfddzf4Xffcc4/TO+vr8OHDUqxWq9G2bdtan+nUqZPL5xvqySefRElJCaZMmYI77rjDY+8NJEltm7qMSUY8mo7IpXMznnQZExGR9105c1pqfifa7Wx+52EtUhJdxuRbPFrMX79+HYIgIDo6usHvio6OhiiKDV5mf/TojXMR27Rp49YzlQv+ys83xMaNG5GTk4MWLVrgnXfe8cg7A43OaEGIQoEIlRJ92jfDqvRb5U6JiMglm16P8mPHUH7sGI+oI/IBDzzTBxaTDRaTDQ8800fudKgRRMd1gEKhgDo8Am27JSIyppXcKQUUhSYEMVN7I2Zqbyg0PCbaV3m0mG/WrBlEUcSVK1ca/K4rV65AEARp/3p9FRcXS7G776p8n16vh8XSsONNSkpKMGPGDADAu+++65EvOwLRtOx9CFEqkNhGixCFgEgNl8j5BB5NR/Q752Y8CfVvDUxNJ06yAR6RjPS/GrHuLcdpSC07NMWudSdlzogaQ/KTz7iMqeHsRiuKlx1C8bJDsBv5hbUv82gxXzGjvW3btga/64svvnB6Z32VlJRIsbtnuoeFhVX7jvr429/+hnPnzuGOO+7AlClTGvSuqkwmE/R6vdOHyKM2PgG07u34bHxC7myIfIJos8J00lEwqLsmsAEekYzWvrFXii+fbtif2ch/fPHP+WjZMR4tO8bji3/OlzudgPJr9lGXMfkejxbzQ4cOhUKhwPfff49NmzbV+z2ffvop8vLyoFAocPvttzcop8pHy7nbcK7qfQaDod7j79u3D4sWLYJKpcKSJUvq/Z7qzJ07F5GRkdInLi7O42M0lqUp/V3GRL5Ab9Yj/Yt0pH+RDr2ZX5oFPXb5JfIpIaobf6RlJ3siChYeLeajoqJwxx13QBRFPPLII/j444/r/I7//Oc/SE1NhSAIuPPOOxu8zL7izHoAMJvNbj1T9b7w8PqddW6z2TB16lTY7XY899xz6N69e73eU5OMjAzodDrpc/bsWY+P0VgiNaFYM3UQ1kwdxCX2voRH0wEAZu2Y5TKm4CTabLAbDLAbDBBtPHmDSE4T/zZA6mQ/6fVb2ck+SNz/7IsuY2o4Nr/zHx7vZvDaa69h6NChMBgMeOihhzB//nxMnjwZw4cPR+fOnSEIzkcb2O12/Pzzz9i2bRuysrKwd+9eiKIIQRDw2muvNTifpk1vdEQ3mUxuPVNe5XiLyu+oi/nz5+PAgQNISEiQjqTzNLVa7fb2AaJ60TQDpmyWOwsin2L65ReXMRE1Pm0LDf78XsNWcpL/CYtoggkvz5U7jYBU0fyOfJ/Hi/khQ4ZgxowZWLRoEQRBwPfff4/vv/8egGP5esuWLREREQFRFFFaWori4mKnBnMVhfxTTz2FQYMGNTifmJgYKXa3M75Op5NirVaL0NC6f8N7+vRpvPzyywCADz74gAU3kZ+bP2y+NCM/f9h8WXMh+QmCAKGeq7aIyLNMBgtylziOEk5+LIkz80QUNLxyzsD777+Py5cvY926dRAEAaLo2FBoMplqXAZeMWv/yCOPIDMz0yO5JCbeWBpy/vx5t56pfLZ95efr4oknnkBZWRkmTZqE4cOH1+sdwUZntGBatqMb7dKU/lxq7wuM12+cLT9xtWOWPkhpVVosv2e53GmQj+iUswGnxjwgxUQkn4pCviIeM7uvjNkQETUej+6ZryAIAtasWYPMzEyn/eaCILj8VIiIiMCCBQuQlZXlsVySkm40QTGZTE6FenUKCgpcPl8Xn3/+OQBg9erV1f66BUFAWlqa9Mzp06d/9/NXXnmlXuP7o4pCvmpMMqoo5KvGREHMptfjQsYLCOvRA122b4OqgaeuEBEREdWHV2bmK8yaNQupqan45z//ic2bNyM/Px9Wq/NZhSEhIejfvz9Gjx6NadOmNbjhXVUDBw5EVFQUrl27BgDIz8+v8bg7URSRn58vXY8YMaJe46amprp1388//4zvvvsOgOPLjHHjxjn9/Oabb67X+P7Iahdx4qLjSJmurevXp4CIyNvOzXjSKe6QtUrGbIgo+bEkp2X2FBzKy0qx6d03ATga4IVFNJE5I6LGJ4gVa+AbgdFoRFFREa5evQoAaNGiBdq2bfu7c909LS0tDStXrgQAzJw5E++//3619+7fvx/9+vUD4Ciui4uLnTrie9rKlSul2fkOHTqgsLCwQe/T6/WIjIyETqeD1o/OPdYZLbjt7e0wWOwIVynRrVUTfDJ9iNxpEZfZE/3O6cnOX9aymCcianzrXs1wumYzPAok7tZ0XllmXx2NRoMuXbpg4MCBGDhwIDp37uz1Qh4Ann76aSgUjl/q2rVrazyirvIS/8cff9yrhTzdMC17HwRBQIRKCQFAiLJR/9Gk6lR0sp+ymYU80W/aLVroMiYiosZRXlaKy4UFuFxYALudx4NS8AqKiql3797S7PelS5eqba5XUFCApUuXAgCio6ORkZHh8j6LxYKUlBQ0bdoUffr0waFDh7yTeJDp2urG8qilKf1lzISIqHpKrRYdslahQ9YqKP1oBRQRUaDY9O6biI7rAAC4cuY0z5mnoFWnYv4Pf/gDxo8f761cnDz44IMe7QK/YMEC9O3r6G46Z84crFixwunnJ0+eRHJyMsrLy6FSqbB+/XpERUW5fFd2djZWr16N0tJSHDx4EDNmzPBYnsFqaUp/hCgVSGyjxbfP/YGd7InIZ9n0epyenIrTk1Nh0+vlToeIKCgplEq07BiPlh3juV+egladGuDt3LkTrVu39lYuTnbv3o3Lly977H3h4eHYunUrUlNTkZubi/T0dMybNw9JSUkoLi7Grl27YLVaERsbi6ysLAwdOtTtd1fuyF+b48eP4+2335auf/75Zym+cuUKpkyZIl1HR0fj3Xffdfvd/qzougF7C69JcaQmUuaMCABw7TSw9Ld/F6Z9C0R1kDcfIh9w5s9TUf7jj1Lcae0amTMiCm48Zz743P/si07N78iz7EYrfs0+CgBokZIIhcarPdOpAerUAE+hUKB169Zun9feELGxsbh8+TJsNs/vg/nss8+wcuVK7N+/HxcuXIBWq0Xnzp0xbtw4pKWloXnz5jU+b7FYkJaWhpycHCQkJGDVqlXo3bu3W2Pv3LkTw4YNc+ve+jTE89cGeJ1f2OJ0/ctbI2XKhJy83d75+vkz8uRB5EOO9ezldN3jyI8yZUJEAJCTud/pmufMEzVM8TLnLcQxU92rc8hz3K3p6vw1i81mw9mzZ+HtJvjeKOIr3Hfffbjvvvvq/XxoaChWr15dr2fvvPNOr/+98zdnrxlgszv+nigFAXB/oQMRUaMTNBqIRqMUExEREcmhzjPzdVlS3hCiKEIQBK8W9YHIH2fme7/yBWw2G8osjn8Utzx1GxJjuczeJ3CZPdHvmIuKcGrMAwCATjkboGrbVuaMiIIbl9kTeRaX2cvPazPzADizTB6nVCqhVTpiFvI+JKoDl9YTVaFq2xbd9ubJnQYR/UYdHsql9UQUlLx6NJ0gCPWayW+s2X/yDWum3Yoysw1lZhvWTLtV7nSIiIiIiIJWxax81Zh8T51n5kNDQzFo0CC37v3666+hUqncvr+y3bt3w2q11vk58j+vfXYUAzpGSfGaqXX/54WIiIiIiCiY1LmYb968OXbs2OHWvQqFok73V1bRzZ6IyBfozXrM2jELADB/2HxoVf7Rk4KIiCjQlJeVOh1Nx3PmPatFSqLTnnnyXV5dZk/kjqUp/V3GRL6kopCvGlNwMf70E4717IVjPXvB+NNPcqdDFPRMBgtyMvcjJ3M/TAaL3OlQI8mZ9xouFxbgcmEBcua9Jnc6AUehCUHM1N6Imdqbze98nM8W82yyFzwiNaFYM3UQ1kwdhEgNO9ASke8qHPugy5iI5PH54kMoPluK4rOl+HzxodofIL9XXlaK8yeOw1xuhAgRV86cljulgGM3WlG87BCKlx2C3chtz76sTl+1rFixAppGOlN3wYIFMP52ji8RkdzmD5vvtMyeiIjk9+v5MpcxBa5N776JUHUYLKZyWMrL0Sahu9wpBZyqDfBipvaWMRuqSZ2K+dTUVG/l8TsTJkxotLFIXjqjBdOy9wFwLLPn7Dz5Iq1Ki+X3LJc7DZJZx/WfSjPyHdd/KnM2RBTVKhyXz5QAAFq2bypzNtRYYtp3xJWzjhn5Mc+9JHM2RPLx2WX2FDwqCvmqMRGRr9F064YeR35EjyM/QtOtm9zpEAU9hRIIVSsRqlZCoZQ7G2oM9zw+SyrkU+YtYPM7L6jc9I4N8Hwbi3mSlc5owdHzehw9r4fVzj4JRERE5D6FUoGYuCaIiWsChZJ/rA0GuYv+4TImz2EDPP/B3/VIVtOy96FrK8c3qiculrCbPRH5NJtej9OTU3F6cipser3c6RAFvT+k9pAa4P0htYfc6VAjKD5TCHO5EeZyI4rPFMqdDpGsWMyT7EKUCiS20SKxjZb75YnIp52b8aTLmIjk8dWqY9LM/FerjsmdDjUGsZqYKAixmCdZ8Yx5IiIiInJXTPuOUIVpoArTIKZ9R7nTIZJVnYr5b775Bt9//723cnHy/fff45tvvmmUsYiIaqM365H+RTrSv0iH3szl1cGq3aKFLmMikkfyY0kuYwpcY557CS07xqNlx3h2sqegJ4ii6PYCFYVCgdjYWBQVFbl9f+vWrXH+/Pk6JxYbG4vi4mJYrdY6PxvM9Ho9IiMjodPpoNVq5U6nVg8t2+N0vWbqIJkyIapZ+hfpTtc8po6IiKhx6YovIfuvMwE4OtlHxrSSOSMi73C3pqvzMvs61P4N1phjERER1YTN74iI5FVRyFeNyXPsRiuKlx1C8bJDsBs5qerruGeeZMU98+Qv5g+b7zKm4MHmd0REFOh+zT7qMibfVOeDA3U6HdLT02u/sZ73V36OAl+kJpRL633VtdPA0qGOeNq3QFQHefORmVal5dJ6IiIiGaXMW+C0zJ4o2NV5z7wgCG6/XBTFOt3v6lmbzVav54OVv+2ZJx/2dnvn6+fPyJMHkY8w/vQTCsc+CADouP5TaLp1kzkjIqLgUl5Wik3vvgkAuP/ZFxEW0UTmjAKP3WiVZuRbpCRCoanz3C95gNf2zNdFfQt5Cg5nrxmQ9PJWdH5hCx7453fQGS1yp0SViXagXOf4iHa5syGSlbmoCIWjxwA2GxRhYTgzKUXulIiIgk5FIV81Js9RaEIQM7U3Yqb2ZiHvB+pVzIui6PUPBb7k+d+ixGSDzS7i0FkdpmXvkzslqkysJiYKQqfGPCDF9rIyGTMhIiIicqjz1y1RUVH49NNPvZGLRBRFPPjgg7h+/bpXxyH56IwWlJpudMi08Qsc36NQAGGRcmdB5DsiIoDfCvlOORtkToaITAYLcpccBuA4Y14dHipzRuRt9z/7otMye6JgV+c98/U9N76uYmNjcfnyZe6ZryN/2TOftiIPO38qliZ8lQKw/6W7Eanhf4h9BhvgEUnMRUXS7HynnA1QtW0rc0ZElJO53+l6zOy+MmVCRORZ7tZ03AhBsrDaRUSoFCg126FUCNj5lztZyPuaqA5sekf0G1Xbtui2N0/uNIiIiIgkddoz3759e8TFxXkrFyft2rVD+/bta7+R/JMoQqFQQBsWgj5xkYiLCpc7IyIiIvIjyY8luYyJqP7sRiuKlx1C8bJDsButtT9AsqrTzHxhYaGX0vi9vXv3NtpY1PhClAoktvHdbQBERJXZ9Hqcm/EkAKDdooVQ+vA2JiKiQMWj6byv4li6ijhmam8Zs6HaePVoOqLqzH+ojxQvTekvYyZEtdOb9Uj/Ih3pX6RDb9bLnQ7JoKKQrxoTkXwqmt9VjSlw8Wg67xPtIsznS2E+XwrRzgbVvi4oi/nNmzdj/PjxiI+Ph0ajQevWrTF48GC89957uHr1qsfHM5vN+OqrrzBnzhyMGDECHTp0QEREBFQqFaKjo3Hrrbdi9uzZOHjwoMfH9lWRmlCsmToIa6YO4l558nmzdsxyGRMRkXzsNhHFZ0tRfLYUdhuLDiKP4NHEfiWoivkrV67g3nvvxX333YdPPvkEKpUKo0aNQvfu3ZGXl4fZs2ejV69e2L59u8fGzMjIQKtWrTB8+HC88cYb2LFjB1q0aIERI0ZgzJgxaNu2Lf73v//hvffeQ58+fTBlyhSUl5d7bHwiImq4dosWuoyJSE6sOoJN5ePoeDSddwhKAao2TaBq0wSCUpA7HapF0HSzNxgMGDFiBPLz86FUKrFs2TKkp6dLPz958iRGjRqFEydOYOTIkdi2bRuGDh3a4HFzc3Nx/fp1AMBDDz2EefPm/a6J4MGDB/HII4/g6NGjWLVqFYqLi/H55583eGwi8oz5w+ZLM/Lzh82XNRciInJQKBWIieOe6WASFtEEE16eK3caAa1FSqK0b75FSqLM2VBt6nTOvD/705/+hI8++ggA8NZbbyEjI+N39xQUFKBnz54oLy9HdHQ0Tp48iWbNmjVo3Jtvvhk//PAD7rzzTmzbtg1KpdLlfWfOnEG3bt2kWfmcnByMHj26zuP5yznzRET+wqbX4+fhdwEA1F0TIChD0CFrlcxZEZHJYJH2yic/lgR1OLftEVFgcLemC4pl9ocPH8aKFSsAAK1atcIzzzzj8r74+HhMmzYNgGNJ/ty5nvvm79lnn622kAccx/7de++90vWmTZs8NjYREdVf5YZ3phMnZcyEiCpTh4dizOy+GDO7Lwt5IgpKQVHMZ2Zmwm63AwAmTpwIlUpV7b2TJ0+W4sWLF8NoNDZo7PHjx2PatGm44447ar03ISFBis+dO9egcYmIyHPUlX5/5p55IiIi8gUBX8xbLBZs3LhRuh4+fHiN9/fp00daWl9WVobc3NwGjf/iiy9iyZIlaNKk9j1dlRvfNXR5v6/TGS14aNkePLRsD3RGi9zpEBFVK3buWzCddMzId8rZwDPmiYiIyCcEfDGfl5eHa9euSdf9+vWr8X5BEJzu2bp1q9dyqyovL0+Ka/vSwd/NWnNAiqdl75MxE3LJeB1YOcrxMV6XOxsiWRXNfgZ2gwF2gwFFs11v0yIiIiJqbAFfzB8+fFiK1Wo12rZtW+sznTp1cvm8N+Xm5mL37t0AgK5duzot9ydqdGsnuY6DlN6sR/oX6Uj/Ih16s17udKiRlf/4o8uYiIiISE4BX8wfPXpUitu0aePWM5UL/srPe4PBYMCiRYswfvx4AEC3bt2Qm5uLsLAwr44rt/kP9ZHipSn9ZcyEqHYzts/A8avHcfzqcczYPkPudKiRCRqNy5iIiBpPeVkp1r2agXWvZqC8rFTudIh8QsCfM19cXCzF7u5Dr3yfXq+HxWJBaKhnuqTqdDo89dRTMBqNOHfuHA4ePAiDwYCkpCSkp6dj+vTpUKvVHhnLl0VqQrFm6iC506DqTFx9Y0Z+4mp5c/EBP1//2WVMgc+m10MdH4/yI0eg0GjQaWOO3CkREQWlTe++6RTzvHnvsButTufMKzQBXy76tYD/f6ekpESK3S2Sq86Kl5SUoHnz5h7Jx2g0YtUq5/OJmzVrhi5duqB58+YQRbFO7zOZTDCZTNK1Xs8lwOQBmmbAlM1yZ+EzujTrIhXxXZp1kTkbakznZjwJRVgYwn/rpaJyY6sWERGRv6oo5CvimKm9ZcyGahPwy+wrHy1X05F0lVW9z2AweCyf1q1bQxRFWK1WFBcX48svv8SoUaOQk5OD1NRU9OjRA998843b75s7dy4iIyOlT1xcnMdy9SZ2syd/smj4InRv3h3dm3fHouGL5E6HiIgo6Nz/7IsuY6JgFvDFvKbS/kaz2ezWM1XvCw8P92hOAKBUKhEdHY277roL2dnZ2LBhA5RKJQoLC/HHP/4RO3bscOs9GRkZ0Ol00ufs2bMez9Ub2M2e/IlWpcXye5Zj+T3LoVXxWLJgUvlMeZ4vT0REga5FSqLLmHxTwBfzTZs2leLKy9FrUvm896rv8JbRo0fj2WefBeD4MmHSpEm/y8MVtVoNrVbr9CEiIs9QarXokLUKHbJW8Xx5IiIZ5cx7DZcLC3C5sAA5816TO52ApdCEIGZqb8RM7c398n4g4Iv5mJgYKb5+/bpbz+h0OinWarUea35Xm5kzZ0rx+fPn8fHHHzfKuHKw2uw4el4Pq11kN3si8lnmoiL8NGAgfhowEOaiIrnTIaLfmAwW5GTuR07mfpgM3K4XDK6cOe0yJs+xG60oXnYIxcsOwW60yp0OuSHgi/nExBvLQ86fP+/WM0WV/sBW+Xlva9OmDTp27Chd79y5s9HGbmw/F984UiRS0zhflhAR1YVNr8cvd98Du8EAiCJOjXlA7pSI6De5Sw67jClwRcd1cBmT51Rtfke+L+CL+aSkJCk2mUxOhXp1CgoKXD7fGFq3bi3F7n754M9OXCyp/SYiIhmcm/GkFNsrNVMlIvnZbSKKz5ai+Gwp7La6nQRE/mnMcy+hZcd4tOwYjzHPvSR3OkQ+IeA3QgwcOBBRUVG4du0aACA/Px9tazhaSBRF5OfnS9cjRoyo99i7d+/G7t27cc8997j9pYDFcmOpmLvd9/1Z11ZN5E6BiMgl0WYFwsKAsjJAqUSnnA1yp0REErGamAJVWEQTni3vZS1SEp3OmCffF/Az86GhoRg9erR0vX379hrvP3DggLS3PiIiAsnJyfUe+//+7//wl7/8BVu3bnXrfrvdjl9++UW69pdj5uqje2stEtto8dGUgXKnQq4YrwMrRzk+xutyZyMrvVmP9C/Skf5FOvRmvdzpUGMSAYVCAUXTptDcdBPPmCfyIQqlAjFxTRAT1wQKZcD/cZaoUbD5nf8Jit/9nn76aSgUjl/q2rVrazyiLisrS4off/xxp6Pt6svdve9ffvmlU5O+e+65p8Fj+6oVaQOxZuog7pf3VWsnuY6D0Kwds1zGFPiEkBCE9eiBsB49IITwDzVEviT5sSSXMRFRMAmKYr53795IS0sDAFy6dAmZmZku7ysoKMDSpUsBANHR0cjIyHB5n8ViQUpKCpo2bYo+ffrg0KFDNY6fm5uLr7/+usZ7SktLMXv2bKecR44cWeMzRETkPTxjnsh3qcNDMWZ2X4yZ3RfqcE4MEFFwCopiHgAWLFiAvn37AgDmzJmDFStWOP385MmTSE5ORnl5OVQqFdavX4+oqCiX78rOzsbq1atRWlqKgwcPYsaMGTWOLYoi7r//fnz00UcuVwXk5+dj6NChOHrUsUclOjoa//73v6FUKuvzSyVquNGLgYuHHJ/Ri+XORlbzh813GVPg4xnzRES+QVd8CYvSJmJR2kToii/JnQ6RzxBEUQyariHFxcVITU1Fbm4uAKB79+5ISkpCcXExdu3aBavVitjYWGRlZeGuu+6q9j3Lly/Ho48+Kl3ffvvtLmfe9+zZg4yMDKefNWvWDP3790fLli1hMplw9OhRHDt2zOld//rXv5CQkFCvX6Ner0dkZCR0Oh20PvyHz7QVeTBabFia0p9L7X3RylHO11M2y5MHERERBb1FaROdrmesWCtTJkSNw92aLqg2AcbExGDLli347LPPsHLlSuzfvx+bNm2CVqtF//79MW7cOKSlpaF58+Y1viclJQVfffUVcnJykJCQgIULXS+/HDRoEHbu3InCwkJ8/vnn+Pbbb3H06FHk5+ejpKQEISEhiIyMxJAhQzBgwABMnDgRt956qzd+6T5rWvY+rJk6SO40qCq7Bbj825dMLXvImwsREVEVJoNFOl8++bEkLrUnoqAUVDPzwcDfZuYBsJj3NcbrwPwkwFIGhEYALXsCj7p3IgMREVFjyMnc73Q9ZnZfmTKhxnD59Cmsfv4pAMCkt99Hyw6dZM6IyLvcremCZs88+aalKf3lToGqWjsJEARA1cTxv8qgWsBDJDEXFeGnAQPx04CBMBcVyZ0OEVHQ2rlyGdp174l23Xti58plcqdD5DNYzJMseDSdj4tJvBFPXC1fHkQysen1+OXue2A3GABRxKkxD8idEhFV8ofUHig+W4ris6X4Qyq3gxFRcGIxT0TOJq52zMa37g08dQjQNJM7I6JGd27Gk1JsNxplzISIXPlq1THExDVBTFwTfLXqWO0PkF+7/9kXXcZEwY7rZ0kW7GbvwzTN2L2eCEBYz54oP3IEANApZ4PM2RARBa+wiCaY8PJcudMg8jmcmSdZTcveJ3cKRES/027RQijCwhDerx+67tkNVdu2cqdERJUkP5bkMiYiCiacmSciIqpCqdWiQ9YqudMgIhf0vxqx9o29AICJfxvAY+mIPMRutOLX7KMAgBYpiVBoWCr6Os7Mk6zYzZ6IfJFNr8fpyak4PTkVNr1e7nSIqJKKQr5qTIGrvKwU617NwLpXM1BeVip3OgGropCvGpPvYjFPsmA3eyLyZZUb4FWOiYiocZWXleJfMx7F5cIC2O02bHr3TblTIvIZLOaJiIiIyG9M/NsAlzEFpsrF+5Uzp2XMJPC1SEl0GZPvEkRRFOVOgjxHr9cjMjISOp0OWq1W7nSIiPySuahIOlu+U84GNsAjIpLJulczYLfZcOWso5D/06KPEBbRROasiLzL3ZqOxXyAYTFPRNRwpyenOl2zGR6R7zAZLMhdchiAo5M9G+AFtvKyUml2/v5nX2QhT0HB3ZqOLQqJiIiIyG98vvgQfj1fJsVj/9JP5ozIm3jGPFH1uGeeiIioinaLFrqMiUh+FYV81ZiIKNhwZp6IiKgKnjNP5LtatAnHr+cNUkxEFKw4M0+NSme0AADSVuRJMREREZG77n3iJsTENUFMXBPc+8RNcqdDRCQbFvPUqGb8Zz8A4PhFPR5dtVfmbIiIiMjfqMNDMWZ2X4yZ3ZfN74g8yG60onjZIRQvOwS70Sp3OuQGFvPUqH6+XCLFJy6W1HAnEREREQW78rJSrHs1A+tezUB5Wanc6QS0X7OPuozJd7GYp0bVJebGcSJdW/FoESIiIiKqXsWxdFVjImIxT41s0SOO42O6t9bioykDZc6GiIiIiIgAoEVKosuYfJcgiqIodxLkOXq9HpGRkdDpdNBqtXKnQ/7IeB1YO8kRT1wNaJrJmQ0REREFsfKyUmlG/v5nX0RYBFd2UuBzt6ZjMR9gfL2Y1xktiNSEIm1FHuY/1AeRGjau8TkrRzlfT9ksTx5ERERVmAwW5C45DABIfiyJDfCIKCC5W9NxmT01qllrDkjxtOx9MmZCRERE/qaikK8aU+BiAzyi6rGYJyJnE1e7joOU3qxH+hfpSP8iHXqzXu50iIiIggob4BFVj8U8Nar5D/WR4qUp/WXMhKqlaeZYWj9lM/fLA5i1Y5bLmIiIGl/yY0kuYyKiYMRinhpVxR75FWkDuV+eiIiI6kQdHooxs/tizOy+3C8fJO5/9kWXMRGxAV7A8fUGeET+Rm/WSzPy84fNh1bFf6+IiOTCBnhEFAzYzT5IsZgnIiKiQJWTud/peszsvjJlQkTkPe7WdCGNmBMR+QOeM09ERD7KbhPx6/kyAECLNhEyZ0NEJC/umSciZxWFfNWYKEjY9HoUPvwIfhowEIWPPAKbnqcYEPkOsZqYiCj4BGUxv3nzZowfPx7x8fHQaDRo3bo1Bg8ejPfeew9Xr171+Hjl5eVYv349pk6dij59+qBFixYIDQ1FVFQUevbsiSlTpuDzzz+H3W73+NhERFQ352Y8CdPJkwAA04mTODfjSZkzIqIKCqUCMXFNEBPXBAplUP4xlohIElS/C165cgX33nsv7rvvPnzyySdQqVQYNWoUunfvjry8PMyePRu9evXC9u3bPTLehQsX8Oyzz6Jly5Z48MEH8eGHH+Ly5csYMmQIJkyYgL59++LUqVNYtWoVRo0ahX79+uHw4cMeGdsX6YwWpK3Ik2LyUTxnnoiIfBSPpgs+5WWlWPdqBta9moHyslK50yHyKUHTAM9gMOD2229Hfn4+lEolli1bhvT0dOnnJ0+exKhRo3DixAmoVCps27YNQ4cObdCYr7zyCl599VUAQFRUFJYuXYpx48ZBEATpnqtXr2LWrFnIzs4GAERGRuKrr75C3771a+jiyw3wHlq2B5pQJVakDUTaijysSBsod0pERL9jLipCwegxEI1GhPXqhfYfLoPSx34/JSIKFutezXC6nvDyXJkyIWo87tZ0QTMzP3PmTOTn5wMAXn/9dadCHgASEhKQm5uLsLAwmM1mjB07FtevX/fY+OvXr8f48eOdCnkAaN68ObKysnD//fcDAHQ6HR5++GFYLJy5JiKSw4WMF6BJTER4v35QqNUs5ImIiMgnBUUxf/jwYaxYsQIA0KpVKzzzzDMu74uPj8e0adMAOJbkz53rmW/+7rrrLtx555013lN5rJ9++gkbN270yNi+ZGlKfyme/1AfGTMhIqqeaLOi/NgxlB87BtFmlTsdIqKgdv+zL7qMiShIivnMzEypudzEiROhUqmqvXfy5MlSvHjxYhiNxgaPf88999R6T2JiItq2bStdf/nllw0e19dEakKlpfWRmlCZsyEiqgabZRMR+YywiCaY8PJcTHh5LsIimsidDpFPCfhi3mKxOM1yDx8+vMb7+/Tpg2bNmgEAysrKkJubW++xJ02ahNzcXDzyyCNu3R8XFyfF586dq/e4RERUP+aiIhh/+AF2gwGq+HgIISFyp0RERETkUsAX83l5ebh27Zp03a9fvxrvFwTB6Z6tW7fWe+wuXbpgxIgRiI2Ndev+ykfThfAPkEREje7U6DGAKAI2G8p//BHtFi2UOyUioqDFTvZENQv4Yr7yUW9qtdppKXt1OnXq5PJ5bztz5owU9+kTeHvKeTQdEfk6u9EICAKgVAIAm98REclo07tvuoyJyCHgi/mjR49KcZs2bdx6pnLBX/l5bzp16hQuXrwoXU+cOLFRxm1M07L3SfGsNQdkzISIyLWwnj1dxkRERES+JuCL+eLiYimu2Atfm8r36fX6Rjkm7r///a8Ujx07Fj169PD6mERE5Kz9vz5EeL9+CO/XD+3/9aHc6RBRJSaDBTmZ+5GTuR8mA1f4BQN2sieqWcBvzC4pKZFitVrt1jNhYWG/e0fz5s09mldlpaWlWLjQsS8zIiIC//jHP9x+1mQywWQySdd6vd7j+XnK0pT+0ow8j6YjIl+k1GrRIWuV3GkQkQu5Sw47xWNm95UxG2oMFZ3sici1gJ+Zr3y0XE1H0lVW9T6DweDRnKqaM2eOtMR+8eLF6Nixo9vPzp07F5GRkdKnckd8X8Oj6YiIiIiIiDwj4It5jUYjxWaz2a1nqt4XHh7u0Zwq27JlC95//30AwBNPPIHU1NQ6PZ+RkQGdTid9zp496400iYiIiGSV/FiSy5iIKFgF/DL7pk2bSnHl5eg1KS8vr/YdnvTjjz/i//2//wdRFPHAAw9IRX1dqNVqt7cPEBEREfkrdXgol9YTEVUS8DPzMTExUnz9+nW3ntHpdFKs1WoRGur5JeEFBQW4++67odfrkZycjDVr1kD521FIRERERERERDUJ+GI+MTFRis+fP+/WM0VFRS6f95RTp05h2LBhuHDhAu69915s2LDB7f38RERERERERAFfzCcl3dhTZTKZnAr16hQUFLh83hNOnTqFO++8E2fOnMHIkSPx6aefcpk8ERERERHJym60onjZIRQvOwS70Sp3OuSGgC/mBw4ciKioKOk6Pz+/xvtFUXS6Z8SIER7LpbCwEMOGDZMK+fXr17OQJyIiIiIi2f2afdRlTL4r4Iv50NBQjB49Wrrevn17jfcfOHBA2lsfERGB5ORkj+RRWFiIO++8E6dPn0ZycnKNhfykSZNw1113eWRcIiIiIiIiCjwBX8wDwNNPPw2FwvFLXbt2bY1H1GVlZUnx448/7nS0XX2dPn0aw4YNw+nTpzFixAhs2LChxhn5Xbt21fqlAxERERERkae0SEl0GZPvCopivnfv3khLSwMAXLp0CZmZmS7vKygowNKlSwEA0dHRyMjIcHmfxWJBSkoKmjZtij59+uDQoUPVjn369GnceeedKCwsxIgRI5CTk8Ol9URERERE5FMUmhDETO2NmKm9odAE/AnmASFo/l9asGABDhw4gP3792POnDlo1aqVVOADwMmTJzFq1CiUl5dDpVJh/fr1TnvtK8vOzsbq1asBAAcPHsSMGTPwzTff/O6+M2fOYNiwYSgsLAQAWK1WPPjgg7Xmevny5Xr8ComIiIiIiChYBE0xHx4ejq1btyI1NRW5ublIT0/HvHnzkJSUhOLiYuzatQtWqxWxsbHIysrC0KFD3X63IAgu//qzzz6LU6dOSdfbtm1r8K+DiIiIiIjIk+xGq9T0rkVKImfm/URQLLOvEBMTgy1btmDTpk0YO3YsysvLsWnTJhw5cgT9+/fHu+++ix9//LHW5nMpKSl45JFHEBERgZtvvhkLFy50eV9Ne/OJiIiIiIh8ATvZ+ydBFEVR7iTIc/R6PSIjI6HT6aDVauVOh4iIiIiIfFzxMuceYDFTe8uUCQHu13RBNTNPRLUwXgdWjnJ8jNflzoaIiIiIGgE72fsnzswHGM7MU4OsHOV8PWWzPHkQycSm1+PcjCcBAO0WLYSSv48S+QyTwYLcJYcBAMmPJUEdHipzRkRE3sGZeSIiojqqKOSrxkQkv4pCvmpMRBSsWMwT0Q0TV7uOiYiIiIjIp7CYJ6IbNM0cS+unbHbEREGm3aKFLmMikl/yY0kuYyKiYMU98wGGe+aJiIiIiIj8F/fME1HdsZs9EREREZFfYDFPRDesneQ6JiIiIiIin8JinoiIiIiIiMjPsJgnohvYzZ6IiHyUyWBBTuZ+5GTuh8lgkTsdIiLZsZgnohvYzZ6IiHwUz5knInLGYp6IiIiIiIjIz7CYJyIiIiKfx3PmiYichcidABERERFRbdThoRgzu6/caRAR+QzOzBMRERERERH5GRbzRERERERERH6Gy+yJiIiIyOeZDBapi33yY0lQh4fKnBERkbw4M09EREREPo9H0xEROePMPBERERER+ZzyslJsevdNAMD9z76IsIgmMmdE5Fs4M09EREREPs1ksMBus6P4bCnsNpFH0wWJikK+akxEDpyZJyIiIiKflrvkMBRKBWLiHDOz3C9PRMSZeSIiIiIi8kH3P/uiy5iIHARRFEW5kyDP0ev1iIyMhE6ng1arlTsdIiIiogZjJ3siCibu1nRcZk9EREREPk0dHooxs/vKnQYRkU/hMnsiIiIiIiIiP8NinhqNzmhB2oo8KSYiIiIiIqL6YTFPjWZa9j4pnrXmgIyZEBERERER+TfumSciIiIiIgpidqMVv2YfBQC0SEmEQsMy0R8E5cz85s2bMX78eMTHx0Oj0aB169YYPHgw3nvvPVy9etXr4xcXF2PixIkQBAGCIGDnzp1eH9MXLE3pL8XzH+ojYyZERERERFShopCvGpNvC6pi/sqVK7j33ntx33334ZNPPoFKpcKoUaPQvXt35OXlYfbs2ejVqxe2b9/utRz++9//IjExEevWrfPaGL4qUhOKFWkDpZiIiIiIiIjqJ2iKeYPBgBEjRmDLli1QKpX46KOPcPz4cXz88cfYuXMnjh07hq5du+LChQsYOXIkvv32W4+Of+HCBYwePRoPP/wwrl+/7tF3ExERERER1VeLlESXMfm2oCnmZ86cifz8fADA66+/jvT0dKefJyQkIDc3F2FhYTCbzRg7dqzHiu6VK1ciMTERmzZtQt++fbF3716PvJeIiIiIiKihFJoQxEztjZipvblf3o8ERTF/+PBhrFixAgDQqlUrPPPMMy7vi4+Px7Rp0wA4luTPnTvXI+PPmjULRqMRb731Fv73v//h5ptv9sh7iYiIiIiIKDgFRTGfmZkJu90OAJg4cSJUKlW1906ePFmKFy9eDKPR2ODxb7vtNhw8eBAZGRkICeE3XURERERERNQwAV/MWywWbNy4UboePnx4jff36dMHzZo1AwCUlZUhNze3wTls3rwZ3bt3b/B7iIiIiIiIiIAgKObz8vJw7do16bpfv3413i8IgtM9W7du9VpuRERERERERPUR8MX84cOHpVitVqNt27a1PtOpUyeXzxMRERERERH5goAv5o8ePSrFbdq0ceuZygV/5eeJiIiIiIiIfEHAF/PFxcVSXLEXvjaV79Pr9bBYLB7OioiIiIiIiKj+Ar61eklJiRSr1Wq3ngkLC/vdO5o3b+7RvDzFZDLBZDJJ13q9XsZsiIiIiIiIqDEE/Mx85aPlajqSrrKq9xkMBo/m5Elz585FZGSk9ImLi5M7JSIiIiIiIvKygC/mNRqNFJvNZreeqXpfeHi4R3PypIyMDOh0Oulz9uxZuVMiIiIiIiIiLwv4ZfZNmzaV4srL0WtSXl5e7Tt8jVqtdnv7ABEREREREQWGgJ+Zj4mJkeLr16+79YxOp5NirVaL0NBQT6dFREREREREVG8BX8wnJiZK8fnz5916pqioyOXzRERERERERL4g4Iv5pKQkKTaZTE6FenUKCgpcPk9ERERERETkCwK+mB84cCCioqKk6/z8/BrvF0XR6Z4RI0Z4LTciIiIiIiKi+gj4Yj40NBSjR4+Wrrdv317j/QcOHJD21kdERCA5Odmb6RERERERERHVWcAX8wDw9NNPQ6Fw/FLXrl1b4xF1WVlZUvz44487HW1HRERERERE5AuCopjv3bs30tLSAACXLl1CZmamy/sKCgqwdOlSAEB0dDQyMjJc3mexWJCSkoKmTZuiT58+OHTokHcSJyIiIiIiInIhKIp5AFiwYAH69u0LAJgzZw5WrFjh9POTJ08iOTkZ5eXlUKlUWL9+vdNe+8qys7OxevVqlJaW4uDBg5gxY4bX8yciIiIiIiKqECJ3Ao0lPDwcW7duRWpqKnJzc5Geno558+YhKSkJxcXF2LVrF6xWK2JjY5GVlYWhQ4e6/W5BEGr8+fHjx/H2229X+/O3334bK1eulK7HjBmDMWPGuD0+ERERERERBZegKeYBICYmBlu2bMFnn32GlStXYv/+/di0aRO0Wi369++PcePGIS0tDc2bN6/xPSkpKfjqq6+Qk5ODhIQELFy4sMb7L168iFWrVlX78y+++MLpumPHjizmiYiIiIiIqFqCKIqi3EmQ5+j1ekRGRkKn00Gr1cqdDhEREREREdWBuzVd0OyZJyIiIiIiIgoULOaJiIiIiIiI/AyLeSIiIiIiIiI/w2KeiIiIiIiIyM+wmCciIiIiIiLyMyzmiYiIiIiIiPwMi3kiIiIiIiIiP8NinoiIiIiIiMjPsJgnIiIiIiIi8jMs5omIiIiIiIj8DIt5IiIiIiIiIj/DYp6IiIiIiIjIz7CYJyIiIiIiIvIzLOaJiIiIiIiI/AyLeSIiIiIiIiI/w2KeiIiIiIiIyM+wmCciIiIiIiLyMyzmiYiIiIiIiPwMi3kiIiIiIiIiP8NinoiIiIiIiMjPsJgnIiIiIiIi8jMs5omIiIiIiIj8DIt5IiIiIiIiIj/DYp6IiIiIiIjIz7CYJyIiIiIiIvIzLOaJiIiIiIiI/AyLeSIiIiIiIiI/w2KeiIiIiIiIyM+wmCciIiIiIiLyM0FZzG/evBnjx49HfHw8NBoNWrdujcGDB+O9997D1atXA3ZsIiIiIiIiCgyCKIqi3Ek0litXriA1NRVbtmwBAHTr1g1JSUkoLi7Grl27YLPZEBsbi+zsbAwfPtwvx9br9YiMjIROp4NWq/VU+kRERERERNQI3K3pgqaYNxgMuP3225Gfnw+lUolly5YhPT1d+vnJkycxatQonDhxAiqVCtu2bcPQoUP9bmwW80RERERERP7L3ZouaJbZz5w5E/n5+QCA119/3amYBoCEhATk5uYiLCwMZrMZY8eOxfXr1/1+bCIiIiIiIgo8QVHMHz58GCtWrAAAtGrVCs8884zL++Lj4zFt2jQAjmXxc+fO9euxiYiIiIiIKDAFRTGfmZkJu90OAJg4cSJUKlW1906ePFmKFy9eDKPR6LdjExERERERUWAK+GLeYrFg48aN0nVtzeX69OmDZs2aAQDKysqQm5vrl2MTERERERFR4Ar4Yj4vLw/Xrl2Trvv161fj/YIgON2zdetWvxybiIiIiIiIAlfAF/OHDx+WYrVajbZt29b6TKdOnVw+709jExERERERUeAK+GL+6NGjUtymTRu3nqlcdFd+3p/GJiIiIiIiosAV8MV8cXGxFFfsR69N5fv0ej0sFovfjU1ERERERESBK0TuBLytpKREitVqtVvPhIWF/e4dzZs398mxTSYTTCaTdK3T6QA4vgggIiIiIiIi/1JRy4miWON9AV/MVz7eraZj4Sqrep/BYKhXMd8YY8+dOxevvvrqWVZETAAAAyVJREFU7/56XFycm1kSERERERGRrykpKUFkZGS1Pw/4Yl6j0Uix2Wx265mq94WHh/vs2BkZGZg9e7Z0bbfbcfXqVbRo0QKCINQhWyIiIiIiIpKbKIooKSmpte9awBfzTZs2leLKy9FrUl5eXu07fG1stVr9uyX87u7PJyIiIiIiIt9T04x8hYBvgBcTEyPF169fd+uZin3nAKDVahEaGup3YxMREREREVHgCvhiPjExUYrPnz/v1jNFRUUun/ensYmIiIiIiChwBXwxn5SUJMUmk8mpWK5OQUGBy+f9aWwiIiIiIiIKXAFfzA8cOBBRUVHSdX5+fo33i6LodM+IESP8cmwiIiIiIiIKXAFfzIeGhmL06NHS9fbt22u8/8CBA9L+9oiICCQnJ/vl2ERERERERBS4Ar6YB4Cnn34aCoXjl7p27doaj4nLysqS4scff9zpeDl/G5uIiIiIiIgCU1AU871790ZaWhoA4NKlS8jMzHR5X0FBAZYuXQoAiI6ORkZGhsv7LBYLUlJS0LRpU/Tp0weHDh1qtLGJiIiIiIiIBFEURbmTaAwGgwFDhw7F/v37ERISgmXLlklFNgCcPHkSo0aNwokTJ6BSqbBt2zYMHTrU5buWL1+ORx99VLoeOnQovvnmm0YZm4iIiIiIiCgoZuYBIDw8HFu3bkVycjKsVivS09PRo0cPTJgwAcOGDUNiYiJOnDiB2NhYfP7553UqpgVBkG1sIiIiIiIiCj5BMzNf2WeffYaVK1di//79uHDhArRaLTp37oxx48YhLS0NzZs3r/F5i8WCtLQ05OTkICEhAatWrULv3r0bZWwiIiIiIiKioCzmiYiIiIiIiPxZ0CyzJyIiIiIiIgoULOaJiIiIiIiI/AyLeSIiIiIiIiI/w2KeiIiIiIiIyM+wmCciIiIiIiLyMyzmiYiIiIiIiPwMi3kiIiIiIiIiP8NinoiIiIiIiMjPsJgnIiIiIiIi8jMs5omIiIiIiIj8DIt5IiIiIiIiIj/DYp6IiIiIiIjIz7CYJyIiIiIiIvIzLOaJiIiIiIiI/AyLeSIiIiIiIiI/8/8BRo5tJKqIneEAAAAASUVORK5CYIA==",
"text/plain": [
"