Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Activate more ruff rules #270

Merged
merged 9 commits into from
Mar 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def copy_thumbnails():
if len(c) == 0:
all_directories = b
continue
elif len(b) != 0:
if len(b) != 0:
raise NotImplementedError(
"Not yet implemented for the case with more than one nested directory."
)
Expand Down
7 changes: 7 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,14 @@ select = [
"W", # warnings
"I", # import order
"UP", # pyupgrade rules
"B", # bugbear rules
"PIE", # pie rules
"Q", # quote rules
"RET", # return rules
"SIM", # code simplifications
"NPY", # numpy rules
"PERF", # performance rules
"RUF", # miscellaneous rules
]
ignore = ["E501"] # line too long

Expand Down
4 changes: 3 additions & 1 deletion test/nn/cell/test_can.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@ def test_forward(self):
x_0 = torch.rand(2, 2)
x_1 = torch.rand(2, 2)

adjacency_1 = torch.from_numpy(np.random.rand(2, 2)).to_sparse()
adjacency_1 = torch.from_numpy(
np.random.default_rng().random((2, 2))
).to_sparse()

x_0, x_1 = (
torch.tensor(x_0).float().to(device),
Expand Down
4 changes: 2 additions & 2 deletions test/nn/cell/test_can_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,10 @@ def test_forward(self):

# Test if there are no non-zero values in the neighborhood
heads = 1
concat = [True, False]
concat_list = [True, False]
skip_connection = True

for concat in concat:
for concat in concat_list:
for version in ["v1", "v2"]:
can_layer = CANLayer(
in_channels=in_channels,
Expand Down
4 changes: 3 additions & 1 deletion test/nn/combinatorial/test_hmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@ def test_forward(self):
x_0 = torch.rand(2, 2)
x_1 = torch.rand(2, 2)
x_2 = torch.rand(2, 2)
adjacency_0 = torch.from_numpy(np.random.rand(2, 2)).to_sparse()
adjacency_0 = torch.from_numpy(
np.random.default_rng().random((2, 2))
).to_sparse()

x_0, x_1, x_2 = (
torch.tensor(x_0).float().to(device),
Expand Down
2 changes: 1 addition & 1 deletion test/nn/combinatorial/test_hmc_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ class TestHBS:

def set_weights_to_one(self):
"""Set the weights to constant values."""
for w, a in zip(self.hbs.weight, self.hbs.att_weight):
for w, a in zip(self.hbs.weight, self.hbs.att_weight, strict=False):
torch.nn.init.constant_(w, 1.0)
torch.nn.init.constant_(a, 1.0)

Expand Down
4 changes: 3 additions & 1 deletion test/nn/hypergraph/test_allset.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@ def test_forward(self):
).to(device)

x_0 = torch.rand(4, 4)
incidence_1 = torch.from_numpy(np.random.rand(4, 4)).to_sparse()
incidence_1 = torch.from_numpy(
np.random.default_rng().random((4, 4))
).to_sparse()

x_0 = x_0.float().to(device)
incidence_1 = incidence_1.float().to(device)
Expand Down
3 changes: 1 addition & 2 deletions test/nn/hypergraph/test_allset_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def allset_layer(self):
"""Return a AllSet layer."""
in_dim = 10
hid_dim = 64
layer = AllSetLayer(
return AllSetLayer(
in_channels=in_dim,
hidden_channels=hid_dim,
dropout=0.0,
Expand All @@ -22,7 +22,6 @@ def allset_layer(self):
mlp_dropout=0.0,
mlp_norm=None,
)
return layer

def test_forward(self, allset_layer):
"""Test the forward pass of the AllSet layer."""
Expand Down
4 changes: 3 additions & 1 deletion test/nn/hypergraph/test_allset_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@ def test_forward(self):
).to(device)

x_0 = torch.rand(2, 2)
incidence_1 = torch.from_numpy(np.random.rand(2, 2)).to_sparse()
incidence_1 = torch.from_numpy(
np.random.default_rng().random((2, 2))
).to_sparse()

x_0 = x_0.float().to(device)
incidence_1 = incidence_1.float().to(device)
Expand Down
3 changes: 1 addition & 2 deletions test/nn/hypergraph/test_allset_transformer_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def allset_transformer_layer(self):
in_dim = 10
hid_dim = 64
heads = 4
layer = AllSetTransformerLayer(
return AllSetTransformerLayer(
in_channels=in_dim,
hidden_channels=hid_dim,
heads=heads,
Expand All @@ -28,7 +28,6 @@ def allset_transformer_layer(self):
mlp_dropout=0.0,
mlp_norm=None,
)
return layer

def test_forward(self, allset_transformer_layer):
"""Test the forward pass of the allsettransformer layer."""
Expand Down
4 changes: 3 additions & 1 deletion test/nn/hypergraph/test_hmpnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,9 @@ def test_forward(self):
x_0 = torch.rand(n_nodes, in_channels).float().to(device)
x_1 = torch.rand(n_edges, in_channels).float().to(device)

adjacency_1 = torch.from_numpy(np.random.rand(n_nodes, n_edges)).to_sparse()
adjacency_1 = torch.from_numpy(
np.random.default_rng().random((n_nodes, n_edges))
).to_sparse()
adjacency_1 = adjacency_1.float().to(device)

x_0, x_1 = model(x_0, x_1, adjacency_1)
Expand Down
4 changes: 3 additions & 1 deletion test/nn/hypergraph/test_hnhn.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@ def test_forward(self):
"""Test forward method."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

adjacency_1 = torch.from_numpy(np.random.rand(2, 2)).to_sparse()
adjacency_1 = torch.from_numpy(
np.random.default_rng().random((2, 2))
).to_sparse()
adjacency_1 = adjacency_1.float()
hidden_channels = 5

Expand Down
4 changes: 3 additions & 1 deletion test/nn/hypergraph/test_hypergat.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ def test_forward(self):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

n_nodes, n_edges = 2, 2
incidence = torch.from_numpy(np.random.rand(n_nodes, n_edges)).to_sparse()
incidence = torch.from_numpy(
np.random.default_rng().random((n_nodes, n_edges))
).to_sparse()
incidence = incidence.float().to(device)

in_channels, hidden_channels = 2, 6
Expand Down
2 changes: 1 addition & 1 deletion test/nn/hypergraph/test_hypersage.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def test_forward(self):
"""Test forward method."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

incidence = torch.from_numpy(np.random.rand(2, 2)).to_sparse()
incidence = torch.from_numpy(np.random.default_rng().random((2, 2))).to_sparse()
incidence = incidence.float().to(device)
model = HyperSAGE(
in_channels=2,
Expand Down
4 changes: 3 additions & 1 deletion test/nn/hypergraph/test_unigcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@ def test_forward(self):
"""Test forward method."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_nodes, n_edges = 2, 5
incidence = torch.from_numpy(np.random.rand(n_nodes, n_edges)).to_sparse()
incidence = torch.from_numpy(
np.random.default_rng().random((n_nodes, n_edges))
).to_sparse()
incidence = incidence.float().to(device)
in_channels, hidden_channels = 2, 10
model = UniGCN(
Expand Down
4 changes: 3 additions & 1 deletion test/nn/hypergraph/test_unigcnii.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ def test_forward(self):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

n_nodes, n_edges = 2, 5
incidence = torch.from_numpy(np.random.rand(n_nodes, n_edges)).to_sparse()
incidence = torch.from_numpy(
np.random.default_rng().random((n_nodes, n_edges))
).to_sparse()
incidence = incidence.float().to(device)
in_channels, hidden_channels = 2, 10
model = UniGCNII(
Expand Down
4 changes: 3 additions & 1 deletion test/nn/hypergraph/test_unigin.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ def test_forward(self):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

n_nodes, n_edges = 2, 5
incidence = torch.from_numpy(np.random.rand(n_nodes, n_edges)).to_sparse()
incidence = torch.from_numpy(
np.random.default_rng().random((n_nodes, n_edges))
).to_sparse()
incidence = incidence.float().to(device)
in_channels, hidden_channels = 2, 10
model = UniGIN(
Expand Down
4 changes: 3 additions & 1 deletion test/nn/hypergraph/test_unigin_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ def UniGIN_layer(self):
def test_forward(self, UniGIN_layer):
"""Test the forward pass of the UniGIN layer."""
n_nodes, n_edges = 2, 3
incidence = torch.from_numpy(np.random.rand(n_nodes, n_edges)).to_sparse()
incidence = torch.from_numpy(
np.random.default_rng().random((n_nodes, n_edges))
).to_sparse()
incidence = incidence.float()
x_0 = torch.rand(n_nodes, self.in_channels).float()
x_0, x_1 = UniGIN_layer.forward(x_0, incidence)
Expand Down
4 changes: 3 additions & 1 deletion test/nn/hypergraph/test_unisage.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ def test_forward(self):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

n_nodes, n_edges = 2, 5
incidence = torch.from_numpy(np.random.rand(n_nodes, n_edges)).to_sparse()
incidence = torch.from_numpy(
np.random.default_rng().random((n_nodes, n_edges))
).to_sparse()
incidence = incidence.float().to(device)
in_channels, hidden_channels = 2, 10
model = UniSAGE(
Expand Down
19 changes: 5 additions & 14 deletions test/nn/simplicial/test_sca_cmps_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def test_sca_cmps_forward(self):
incidence_t_list.append(incidence_transpose)

x_list = []
for chan, n in zip(channels_list, n_chains_list):
for chan, n in zip(channels_list, n_chains_list, strict=False):
x = torch.randn(n, chan)
x_list.append(x)

Expand All @@ -34,7 +34,7 @@ def test_sca_cmps_forward(self):
)
output = sca_cmps.forward(x_list, down_lap_list, incidence_t_list)

for x, n, chan in zip(output, n_chains_list, channels_list):
for x, n, chan in zip(output, n_chains_list, channels_list, strict=False):
assert x.shape == (n, chan)

def test_reset_parameters(self):
Expand All @@ -55,21 +55,12 @@ def test_reset_parameters(self):
param.add_(1.0)

sca.reset_parameters()

reset_params = []
for module in sca.modules():
if isinstance(module, torch.nn.ModuleList):
for sub in module:
if isinstance(sub, Conv):
reset_params.append(list(sub.parameters()))

count = 0
for module, reset_param, initial_param in zip(
sca.modules(), reset_params, initial_params
):
if isinstance(module, torch.nn.ModuleList):
for sub, r_param, i_param in zip(module, reset_param, initial_param):
if isinstance(sub, Conv):
torch.testing.assert_close(i_param, r_param)
count += 1
reset_params.append(list(sub.parameters())) # noqa: PERF401

assert count > 0 # Ensuring if-statements were not just failed
assert initial_params == reset_params
8 changes: 2 additions & 6 deletions test/nn/simplicial/test_scnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,8 @@ def get_simplicial_features(dataset, rank):
"input dimension must be 0, 1 or 2, because features are supported on nodes, edges and faces"
)

x = []
for _, v in dataset.get_simplex_attributes(which_feat).items():
x.append(v)

x = torch.tensor(np.stack(x))
return x
x = list(dataset.get_simplex_attributes(which_feat).values())
return torch.tensor(np.stack(x))

model = SCNN(
in_channels=in_channels,
Expand Down
32 changes: 16 additions & 16 deletions test/nn/simplicial/test_scone.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,10 @@ class TestScone:

def test_forward(self):
"""Test the forward method of Scone."""
np.random.seed(42)
torch.manual_seed(42)
random.seed(42)
N = 150
sc, coords = generate_complex(N)
sc, coords = generate_complex(N, rng=np.random.default_rng(42))
incidence_1 = torch.Tensor(sc.incidence_matrix(1).toarray())
incidence_2 = torch.Tensor(sc.incidence_matrix(2).toarray())

Expand All @@ -37,27 +36,28 @@ def test_forward(self):
traj, mask, last_nodes = batch
with torch.no_grad():
forward_pass = model(traj, incidence_1, incidence_2)
print(forward_pass[0][0])
assert torch.any(
torch.isclose(
forward_pass[0][0],
torch.tensor(
[
1.0000,
-0.9999,
0.8368,
-0.9315,
-0.1243,
-0.9998,
-0.9989,
-1.0000,
0.9306,
0.9723,
0.9349,
0.9901,
0.9999,
-0.4076,
0.9999,
0.7362,
0.9758,
-0.2776,
-0.8334,
0.6433,
-0.9924,
-0.9983,
-0.9999,
0.9906,
0.9882,
0.9999,
0.3494,
0.9565,
-0.9947,
-0.6742,
]
),
rtol=1e-02,
Expand Down
2 changes: 1 addition & 1 deletion test/utils/test_scatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def test_scatter(self):
]
for scat in ["add", "sum", "mean"]:
sc = scatter(scat)
for i in range(0, len(tests)):
for i in range(len(tests)):
computed = sc(
torch.tensor(tests[i]["src"]),
torch.tensor(tests[i]["index"]),
Expand Down
2 changes: 1 addition & 1 deletion test/utils/test_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
def test_from_sparse():
"""Tests from_sparse matches sparse -> dense -> sparse."""
# test values matche
test_matrix = sparse._csc.csc_matrix(np.random.rand(100, 100))
test_matrix = sparse._csc.csc_matrix(np.random.default_rng().random((100, 100)))
a = torch.from_numpy(test_matrix.todense()).to_sparse()
b = from_sparse(test_matrix)

Expand Down
1 change: 1 addition & 0 deletions topomodelx/base/aggregation.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
return torch.nn.functional.relu(inputs)
if self.update_func == "tanh":
return torch.tanh(inputs)
return None

Check warning on line 46 in topomodelx/base/aggregation.py

View check run for this annotation

Codecov / codecov/patch

topomodelx/base/aggregation.py#L46

Added line #L46 was not covered by tests

def forward(self, x):
"""Forward pass.
Expand Down
5 changes: 4 additions & 1 deletion topomodelx/nn/cell/can.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,16 @@ def __init__(
heads=2,
concat=True,
skip_connection=True,
att_activation=torch.nn.LeakyReLU(0.2),
att_activation=None,
n_layers=2,
att_lift=True,
k_pool=0.5,
):
super().__init__()

if att_activation is None:
att_activation = torch.nn.LeakyReLU(0.2)

if att_lift:
self.lift_layer = MultiHeadLiftLayer(
in_channels_0=in_channels_0,
Expand Down
Loading
Loading