diff --git a/src/pygenstability/pygenstability.py b/src/pygenstability/pygenstability.py index d526655..4c8779f 100644 --- a/src/pygenstability/pygenstability.py +++ b/src/pygenstability/pygenstability.py @@ -110,13 +110,19 @@ def _get_params(all_locals): @_timing def _get_constructor_data(constructor, scales, pool, tqdm_disable=False): return list( - tqdm(pool.imap(constructor.get_data, scales), total=len(scales), disable=tqdm_disable) + tqdm( + pool.imap(constructor.get_data, scales), + total=len(scales), + disable=tqdm_disable, + ) ) def _check_method(method): # pragma: no cover if _NO_LEIDEN and _NO_LOUVAIN: - raise Exception("Without Louvain or Leiden solver, we cannot run PyGenStability") + raise Exception( + "Without Louvain or Leiden solver, we cannot run PyGenStability" + ) if method == "louvain" and _NO_LOUVAIN: print("Louvain is not available, we fallback to leiden.") @@ -195,9 +201,12 @@ def run( - 'run_params': dict with parameters used to run the code - 'scales': scales of the scan - 'number_of_communities': number of communities at each scale + - 'stability': value of stability cost function at each scale - 'community_id': community node labels at each scale - - 'NVI': NVI at each scale - - 'ttprime': ttprime matrix + - 'NVI': NVI(t) at each scale + - 'ttprime': NVI(t,tprime) matrix + - 'block_detection_curve': block detection curve (included if with_optimal_scales==True) + - 'selected_partitions': selected partitions (included if with_optimal_scales==True) """ method = _check_method(method) run_params = _get_params(locals()) @@ -240,13 +249,17 @@ def run( communities = _process_runs(t, results, all_results) if with_NVI: - _compute_NVI(communities, all_results, pool, n_partitions=min(n_NVI, n_tries)) + _compute_NVI( + communities, all_results, pool, n_partitions=min(n_NVI, n_tries) + ) save_results(all_results, filename=result_file) if with_postprocessing: L.info("Apply postprocessing...") - _apply_postprocessing(all_results, pool, constructor_data, tqdm_disable, method=method) + _apply_postprocessing( + all_results, pool, constructor_data, tqdm_disable, method=method + ) if with_ttprime or with_optimal_scales: L.info("Compute ttprimes...") @@ -260,7 +273,9 @@ def run( "window_size": max(2, int(0.1 * n_scale)), "basin_radius": max(1, int(0.01 * n_scale)), } - all_results = identify_optimal_scales(all_results, **optimal_scales_kwargs) + all_results = identify_optimal_scales( + all_results, **optimal_scales_kwargs + ) save_results(all_results, filename=result_file) @@ -289,7 +304,9 @@ def _compute_NVI(communities, all_results, pool, n_partitions=10): worker = partial(evaluate_NVI, partitions=selected_partitions) index_pairs = [[i, j] for i in range(n_partitions) for j in range(n_partitions)] chunksize = _get_chunksize(len(index_pairs), pool) - all_results["NVI"].append(np.mean(list(pool.imap(worker, index_pairs, chunksize=chunksize)))) + all_results["NVI"].append( + np.mean(list(pool.imap(worker, index_pairs, chunksize=chunksize))) + ) def evaluate_NVI(index_pair, partitions): @@ -334,7 +351,9 @@ def _to_indices(matrix, directed=False): @_timing -def _optimise(_, quality_indices, quality_values, null_model, global_shift, method="louvain"): +def _optimise( + _, quality_indices, quality_values, null_model, global_shift, method="louvain" +): """Worker for generalized Markov Stability optimisation runs.""" if method == "louvain": stability, community_id = generalized_louvain.run_louvain( @@ -357,7 +376,10 @@ def _optimise(_, quality_indices, quality_values, null_model, global_shift, meth for null in null_model[::2]: partitions.append( leidenalg.CPMVertexPartition( - G, weights=quality_values, node_sizes=null.tolist(), correct_self_loops=True + G, + weights=quality_values, + node_sizes=null.tolist(), + correct_self_loops=True, ) ) optimiser = leidenalg.Optimiser() @@ -371,7 +393,9 @@ def _optimise(_, quality_indices, quality_values, null_model, global_shift, meth return stability + global_shift, community_id -def _evaluate_quality(partition_id, qualities_index, null_model, global_shift, method="louvain"): +def _evaluate_quality( + partition_id, qualities_index, null_model, global_shift, method="louvain" +): """Worker for generalized Markov Stability optimisation runs.""" if method == "louvain": quality = generalized_louvain.evaluate_quality( @@ -428,14 +452,18 @@ def _compute_ttprime(all_results, pool): chunksize = _get_chunksize(len(index_pairs), pool) ttprime_list = pool.map(worker, index_pairs, chunksize=chunksize) - all_results["ttprime"] = np.zeros([len(all_results["scales"]), len(all_results["scales"])]) + all_results["ttprime"] = np.zeros( + [len(all_results["scales"]), len(all_results["scales"])] + ) for i, ttp in enumerate(ttprime_list): all_results["ttprime"][index_pairs[i][0], index_pairs[i][1]] = ttp all_results["ttprime"] += all_results["ttprime"].T @_timing -def _apply_postprocessing(all_results, pool, constructors, tqdm_disable=False, method="louvain"): +def _apply_postprocessing( + all_results, pool, constructors, tqdm_disable=False, method="louvain" +): """Apply postprocessing.""" all_results_raw = all_results.copy() @@ -457,8 +485,10 @@ def _apply_postprocessing(all_results, pool, constructors, tqdm_disable=False, m ) ) - all_results["community_id"][i] = all_results_raw["community_id"][best_quality_id] - all_results["stability"][i] = all_results_raw["stability"][best_quality_id] - all_results["number_of_communities"][i] = all_results_raw["number_of_communities"][ + all_results["community_id"][i] = all_results_raw["community_id"][ best_quality_id ] + all_results["stability"][i] = all_results_raw["stability"][best_quality_id] + all_results["number_of_communities"][i] = all_results_raw[ + "number_of_communities" + ][best_quality_id]