Skip to content

Commit

Permalink
Merge branch 'master' into add_mrds_script_part3
Browse files Browse the repository at this point in the history
  • Loading branch information
arnaudbore committed Nov 6, 2024
2 parents acd20f7 + 96f56ce commit 9f3e579
Show file tree
Hide file tree
Showing 2 changed files with 91 additions and 101 deletions.
126 changes: 62 additions & 64 deletions scripts/scil_bundle_shape_measures.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,70 +215,68 @@ def main():
pool.join()

output_measures_dict = {}
for measure_dict in all_measures_dict:
# Empty bundle should not make the script crash
if measure_dict is not None:
for measure_name in measure_dict.keys():
# Create an empty list first
if measure_name not in output_measures_dict:
output_measures_dict[measure_name] = []
output_measures_dict[measure_name].append(
measure_dict[measure_name])
# add group stats if user wants
if args.group_statistics:
# length and span are weighted by streamline count
group_total_length = np.sum(
np.multiply(output_measures_dict['avg_length'],
output_measures_dict['streamlines_count']))
group_total_span = np.sum(
np.multiply(output_measures_dict['span'],
output_measures_dict['streamlines_count']))
group_streamlines_count = \
np.sum(output_measures_dict['streamlines_count'])
group_avg_length = group_total_length / group_streamlines_count
group_avg_span = group_total_span / group_streamlines_count
group_avg_vol = np.average(output_measures_dict['volume'])
group_avg_diam = \
2 * np.sqrt(group_avg_vol / (np.pi * group_avg_length))
output_measures_dict['group_stats'] = {}
output_measures_dict['group_stats']['total_streamlines_count'] = \
float(group_streamlines_count)
output_measures_dict['group_stats']['avg_streamline_length'] = \
group_avg_length
# max and min length of all streamlines in all input bundles
output_measures_dict['group_stats']['max_streamline_length'] = \
float(np.max(output_measures_dict['max_length']))
output_measures_dict['group_stats']['min_streamline_length'] = \
float(np.min(output_measures_dict['min_length']))
output_measures_dict['group_stats']['avg_streamline_span'] = \
group_avg_span
# computed with other set averages and not weighted by streamline count
output_measures_dict['group_stats']['avg_volume'] = group_avg_vol
output_measures_dict['group_stats']['avg_curl'] = \
group_avg_length / group_avg_span
output_measures_dict['group_stats']['avg_diameter'] = group_avg_diam
output_measures_dict['group_stats']['avg_elongation'] = \
group_avg_length / group_avg_diam
output_measures_dict['group_stats']['avg_surface_area'] = \
np.average(output_measures_dict['surface_area'])
output_measures_dict['group_stats']['avg_irreg'] = \
np.average(output_measures_dict['irregularity'])
output_measures_dict['group_stats']['avg_end_surface_area_head'] = \
np.average(output_measures_dict['end_surface_area_head'])
output_measures_dict['group_stats']['avg_end_surface_area_tail'] = \
np.average(output_measures_dict['end_surface_area_tail'])
output_measures_dict['group_stats']['avg_radius_head'] = \
np.average(output_measures_dict['radius_head'])
output_measures_dict['group_stats']['avg_radius_tail'] = \
np.average(output_measures_dict['radius_tail'])
output_measures_dict['group_stats']['avg_irregularity_head'] = \
np.average(
output_measures_dict['irregularity_of_end_surface_head'])
output_measures_dict['group_stats']['avg_irregularity_tail'] = \
np.average(
output_measures_dict['irregularity_of_end_surface_tail'])
output_measures_dict['group_stats']['avg_fractal_dimension'] = \
np.average(output_measures_dict['fractal_dimension'])
if len(args.in_bundles) == 1:
output_measures_dict = all_measures_dict[0]
else:
for measure_dict in all_measures_dict:
# Empty bundle should not make the script crash
if measure_dict is not None:
for measure_name in measure_dict.keys():
# Create an empty list first
if measure_name not in output_measures_dict:
output_measures_dict[measure_name] = []
output_measures_dict[measure_name].append(
measure_dict[measure_name])
# add group stats if user wants
if args.group_statistics:
# length and span are weighted by streamline count
group_total_length = np.sum(
np.multiply(output_measures_dict['avg_length'],
output_measures_dict['streamlines_count']))
group_total_span = np.sum(
np.multiply(output_measures_dict['span'],
output_measures_dict['streamlines_count']))
group_streamlines_count = \
np.sum(output_measures_dict['streamlines_count'])
group_avg_length = group_total_length / group_streamlines_count
group_avg_span = group_total_span / group_streamlines_count
group_avg_vol = np.average(output_measures_dict['volume'])
group_avg_diam = \
2 * np.sqrt(group_avg_vol / (np.pi * group_avg_length))
output_measures_dict['group_stats'] = {}
output_measures_dict['group_stats']['total_streamlines_count'] = \
float(group_streamlines_count)
output_measures_dict['group_stats']['avg_streamline_length'] = \
group_avg_length
# max and min length of all streamlines in all input bundles
output_measures_dict['group_stats']['max_streamline_length'] = \
float(np.max(output_measures_dict['max_length']))
output_measures_dict['group_stats']['min_streamline_length'] = \
float(np.min(output_measures_dict['min_length']))
output_measures_dict['group_stats']['avg_streamline_span'] = \
group_avg_span
# computed with other set averages and not weighted by
# streamline count
output_measures_dict['group_stats']['avg_volume'] = group_avg_vol
output_measures_dict['group_stats']['avg_curl'] = \
group_avg_length / group_avg_span
output_measures_dict['group_stats']['avg_diameter'] = group_avg_diam
output_measures_dict['group_stats']['avg_elongation'] = \
group_avg_length / group_avg_diam
output_measures_dict['group_stats']['avg_irregularity_head'] = \
np.average(
output_measures_dict['irregularity_of_end_surface_head'])
output_measures_dict['group_stats']['avg_irregularity_tail'] = \
np.average(
output_measures_dict['irregularity_of_end_surface_tail'])

list_metrics = ['surface_area', 'irregularity',
'end_surface_area_head',
'end_surface_area_tail', 'radius_head',
'radius_tail', 'fractal_dimension']
for curr_metric in list_metrics:
output_measures_dict['group_stats']['avg_' + curr_metric] = \
np.average(output_measures_dict[curr_metric])

if args.out_json:
with open(args.out_json, 'w') as outfile:
Expand Down
66 changes: 29 additions & 37 deletions scripts/scil_json_convert_entries_to_xlsx.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
from scilpy.io.utils import (add_overwrite_arg, add_verbose_arg,
assert_inputs_exist, assert_outputs_exist)

dps_dpp = ['data_per_streamline_keys', 'data_per_point_keys']


def _get_all_bundle_names(stats):
bnames = set()
Expand Down Expand Up @@ -73,20 +75,16 @@ def _get_stats_parse_function(stats, stats_over_population):
first_bundle_stats.keys())[0]]

if len(first_bundle_stats.keys()) == 1 and\
_are_all_elements_scalars(first_bundle_stats):
_are_all_elements_scalars(first_bundle_stats): # when you have only on key per bundle
return _parse_scalar_stats
elif len(first_bundle_stats.keys()) == 4 and \
if len(first_bundle_stats.keys()) == 4 and \
set(first_bundle_stats.keys()) == \
set(['lesion_total_vol', 'lesion_avg_vol', 'lesion_std_vol',
'lesion_count']):
'lesion_count']): # when you have lesion stats
return _parse_lesion
elif len(first_bundle_stats.keys()) == 4 and \
set(first_bundle_stats.keys()) == \
set(['min_length', 'max_length', 'mean_length', 'std_length']):
return _parse_lengths
elif type(first_bundle_substat) is dict:
sub_keys = list(first_bundle_substat.keys())
if set(sub_keys) == set(['mean', 'std']):
if set(sub_keys) == set(['mean', 'std']): # when you have mean and std per stats
if stats_over_population:
return _parse_per_label_population_stats
else:
Expand All @@ -95,6 +93,8 @@ def _get_stats_parse_function(stats, stats_over_population):
return _parse_per_point_meanstd
elif _are_all_elements_scalars(first_bundle_substat):
return _parse_per_label_scalar
else: # when you have multiple metrics per bundle
return _parse_stats

raise IOError('Unable to recognize stats type!')

Expand Down Expand Up @@ -201,39 +201,31 @@ def _parse_scalar_lesions(stats, subs, bundles):
return dataframes, df_names


def _parse_lengths(stats, subs, bundles):
def _parse_stats(stats, subs, bundles):
nb_subs = len(subs)
nb_bundles = len(bundles)

min_lengths = np.full((nb_subs, nb_bundles), np.NaN)
max_lengths = np.full((nb_subs, nb_bundles), np.NaN)
mean_lengths = np.full((nb_subs, nb_bundles), np.NaN)
std_lengths = np.full((nb_subs, nb_bundles), np.NaN)

for sub_id, sub_name in enumerate(subs):
for bundle_id, bundle_name in enumerate(bundles):
b_stat = stats[sub_name].get(bundle_name)

if b_stat is not None:
min_lengths[sub_id, bundle_id] = b_stat['min_length']
max_lengths[sub_id, bundle_id] = b_stat['max_length']
mean_lengths[sub_id, bundle_id] = b_stat['mean_length']
std_lengths[sub_id, bundle_id] = b_stat['std_length']

dataframes = [pd.DataFrame(data=min_lengths,
index=subs,
columns=bundles),
pd.DataFrame(data=max_lengths,
index=subs,
columns=bundles),
pd.DataFrame(data=mean_lengths,
index=subs,
columns=bundles),
pd.DataFrame(data=std_lengths,
index=subs,
columns=bundles)]
dataframes = []

df_names = ["min_length", "max_length", "mean_length", "std_length"]
# Check all metrics keys
metrics_keys = stats[subs[0]][bundles[0]].keys()
df_names = list(metrics_keys)

for metric_name in metrics_keys:
if metric_name in dps_dpp: # remove dps and dpp keys
df_names.remove(metric_name)
else:
curr_metric = np.full((nb_subs, nb_bundles), np.NaN)
for bundle_id, bundle_name in enumerate(bundles):
for sub_id, sub_name in enumerate(subs):
b_stat = stats[sub_name][bundle_name].get(metric_name)
if b_stat is not None:
curr_metric[sub_id, bundle_id] = b_stat

dataframes.append(pd.DataFrame(data=curr_metric,
index=subs,
columns=bundles))
del curr_metric

return dataframes, df_names

Expand Down

0 comments on commit 9f3e579

Please sign in to comment.