Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adding hypergraph/cell complex networks models/unit tests #182

Merged
merged 32 commits into from
Sep 8, 2023
Merged
Show file tree
Hide file tree
Changes from 28 commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 40 additions & 0 deletions test/nn/cell/test_can.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
"""Test for the CAN class."""

import numpy as np
import torch

from topomodelx.nn.cell.can import CAN


class TestCAN:
"""Test CAN."""

def test_fowared(self):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Typo

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed across all files.

"""Test forward method."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = CAN(
in_channels_0=2,
in_channels_1=2,
out_channels=2,
dropout=0.5,
heads=1,
num_classes=1,
n_layers=2,
att_lift=False,
)

x_0 = torch.rand(2, 2)
x_1 = torch.rand(2, 2)

adjacency_1 = torch.from_numpy(np.random.rand(2, 2)).to_sparse()

x_0, x_1 = (
torch.tensor(x_0).float().to(device),
torch.tensor(x_1).float().to(device),
)
adjacency_1 = adjacency_1.float().to(device)
adjacency_2 = adjacency_1.float().to(device)
incidence_2 = adjacency_1.float().to(device)

y = model(x_0, x_1, adjacency_1, adjacency_2, incidence_2)
assert y.shape == torch.Size([1])
37 changes: 37 additions & 0 deletions test/nn/cell/test_ccxn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
"""Test for the CCXN class."""

import torch

from topomodelx.nn.cell.ccxn import CCXN


class TestCCXN:
"""Test CWN."""

def test_fowared(self):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

typo

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

"""Test forward method."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = CCXN(
in_channels_0=2,
in_channels_1=2,
in_channels_2=2,
num_classes=1,
n_layers=2,
att=False,
)

x_0 = torch.rand(2, 2)
x_1 = torch.rand(2, 2)

adjacency_1 = torch.rand(2, 2)
incidence_2 = torch.rand(2, 2)

x_0, x_1 = (
torch.tensor(x_0).float().to(device),
torch.tensor(x_1).float().to(device),
)
adjacency_1 = adjacency_1.float().to(device)
incidence_2 = incidence_2.float().to(device)

y = model(x_0, x_1, adjacency_1, incidence_2)
assert y.shape == torch.Size([1])
40 changes: 40 additions & 0 deletions test/nn/cell/test_cwn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
"""Test for the CWN class."""

import torch

from topomodelx.nn.cell.cwn import CWN


class TestCWN:
"""Test CWN."""

def test_fowared(self):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

typo

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

"""Test forward method."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = CWN(
in_channels_0=2,
in_channels_1=2,
in_channels_2=2,
hid_channels=16,
num_classes=1,
n_layers=2,
)

x_0 = torch.rand(2, 2)
x_1 = torch.rand(2, 2)
x_2 = torch.rand(2, 2)
adjacency_1 = torch.rand(2, 2)
incidence_2 = torch.rand(2, 2)
incidence_1_t = torch.rand(2, 2)

x_0, x_1, x_2 = (
torch.tensor(x_0).float().to(device),
torch.tensor(x_1).float().to(device),
torch.tensor(x_2).float().to(device),
)
adjacency_1 = adjacency_1.float().to(device)
incidence_2 = incidence_2.float().to(device)
incidence_1_t = incidence_1_t.float().to(device)

y = model(x_0, x_1, x_2, adjacency_1, incidence_2, incidence_1_t)
assert y.shape == torch.Size([1])
30 changes: 30 additions & 0 deletions test/nn/hypergraph/test_allset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
"""Allset class."""

import numpy as np
import torch

from topomodelx.nn.hypergraph.allset import AllSet


class TestAllSet:
"""Test AllSet."""

def test_fowared(self):
"""Test forward method."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = AllSet(
in_channels=4,
hidden_channels=4,
out_channels=4,
n_layers=2,
mlp_num_layers=1,
)

x_0 = torch.rand(4, 4)
incidence_1 = torch.from_numpy(np.random.rand(4, 4)).to_sparse()

x_0 = torch.tensor(x_0).float().to(device)
incidence_1 = incidence_1.float().to(device)

y = model(x_0, incidence_1)
assert y.shape == torch.Size([])
159 changes: 20 additions & 139 deletions test/nn/hypergraph/test_allset_transformer.py
Original file line number Diff line number Diff line change
@@ -1,150 +1,31 @@
"""Test the AllSetTransformer layer."""
import pytest

import numpy as np
import torch

from topomodelx.nn.hypergraph.allset_transformer_layer import (
MLP,
AllSetTransformerLayer,
)
from topomodelx.nn.hypergraph.allset_transformer import AllSetTransformer


class TestAllSetTransformerLayer:
"""Test the AllSetTransformer layer."""
class TestAllSetTransfomer:
"""Test AllSet."""

@pytest.fixture
def allset_transformer_layer(self):
"""Return a allsettransformer layer."""
in_dim = 10
hid_dim = 64
heads = 4
layer = AllSetTransformerLayer(
in_channels=in_dim,
hidden_channels=hid_dim,
heads=heads,
number_queries=1,
dropout=0.0,
def test_fowared(self):
"""Test forward method."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = AllSetTransformer(
in_channels=2,
hidden_channels=2,
heads=1,
out_channels=1,
n_layers=2,
mlp_num_layers=1,
mlp_activation=None,
mlp_dropout=0.0,
mlp_norm=None,
)
return layer

def test_forward(self, allset_transformer_layer):
"""Test the forward pass of the allsettransformer layer."""
x_0 = torch.randn(3, 10)
incidence_1 = torch.tensor(
[[1, 0, 0], [0, 1, 1], [1, 1, 1]], dtype=torch.float32
).to_sparse()
output = allset_transformer_layer.forward(x_0, incidence_1)
assert output.shape == (3, 64)

def test_forward_with_invalid_input(self, allset_transformer_layer):
"""Test the forward pass of the allsettransformer layer with invalid input."""
x_0 = torch.randn(4, 10)
incidence_1 = torch.tensor(
[[1, 0, 0], [0, 1, 1], [1, 1, 1]], dtype=torch.float32
).to_sparse()
with pytest.raises(ValueError):
allset_transformer_layer.forward(x_0, incidence_1)

def test_reset_parameters(self, allset_transformer_layer):
"""Test the reset_parameters method."""
in_dim = 10
hid_dim = 64
heads = 4

allset_transformer_layer.reset_parameters()
assert allset_transformer_layer.vertex2edge.mlp[0].weight.requires_grad
assert allset_transformer_layer.edge2vertex.mlp[0].weight.requires_grad

# Test with attention weights & xavier_uniform
allset_transformer_layer.vertex2edge.multihead_att.initialization = (
"xavier_uniform"
)
allset_transformer_layer.edge2vertex.multihead_att.initialization = (
"xavier_uniform"
)
allset_transformer_layer.reset_parameters()
assert allset_transformer_layer.vertex2edge.multihead_att.K_weight.shape == (
heads,
in_dim,
hid_dim // heads,
)
assert allset_transformer_layer.edge2vertex.multihead_att.K_weight.shape == (
heads,
hid_dim,
hid_dim // heads,
)

def test_initialisation_heads_zero(self):
"""Test the initialisation of the allsettransformer layer with invalid input."""
with pytest.raises(ValueError):
heads = 0
_ = AllSetTransformerLayer(
in_channels=10,
hidden_channels=64,
heads=heads,
)

def test_initialisation_heads_wrong(self):
"""Test the initialisation of the allsettransformer layer with invalid input."""
with pytest.raises(ValueError):
in_channels = 10
heads = 3

_ = AllSetTransformerLayer(
in_channels=in_channels,
hidden_channels=64,
heads=heads,
)

def test_initialisation_mlp_num_layers_zero(self):
"""Test the initialisation of the allsettransformer layer with invalid input."""
with pytest.raises(ValueError):
mlp_num_layers = 0
_ = AllSetTransformerLayer(
in_channels=10,
hidden_channels=64,
heads=4,
mlp_num_layers=mlp_num_layers,
)

def test_initialisation_mlp_num_layers_negative(self):
"""Test the initialisation of the allsettransformer layer with invalid input."""
with pytest.raises(ValueError):
mlp_num_layers = -1
_ = AllSetTransformerLayer(
in_channels=10,
hidden_channels=64,
heads=4,
mlp_num_layers=mlp_num_layers,
)

def test_MLP(self):
"""Test the MLP class.
x_0 = torch.rand(2, 2)
incidence_1 = torch.from_numpy(np.random.rand(2, 2)).to_sparse()

(used in AllSetTransformerLayer)
"""
in_channels_ = [10]
hidden_channels_ = [[64], [64, 64]]
norm_layers = [None, torch.nn.LayerNorm]
activation_layers = [torch.nn.ReLU, torch.nn.LeakyReLU]
dropouts = [0.0, 0.5]
bias_ = [True, False]
x_0 = torch.tensor(x_0).float().to(device)
incidence_1 = incidence_1.float().to(device)

for in_channels in in_channels_:
for hidden_channels in hidden_channels_:
for norm_layer in norm_layers:
for activation_layer in activation_layers:
for dropout in dropouts:
for bias in bias_:
mlp = MLP(
in_channels=in_channels,
hidden_channels=hidden_channels,
norm_layer=norm_layer,
activation_layer=activation_layer,
dropout=dropout,
bias=bias,
)
assert mlp is not None
y = model(x_0, incidence_1)
assert y.shape == torch.Size([])
Loading