From 90d99a2ff1de586931a07c436ecbcc036ad08260 Mon Sep 17 00:00:00 2001 From: Jacob Buchanan Date: Fri, 21 Jul 2023 10:26:20 -0700 Subject: [PATCH] Try ifo/search group --- bin/pygrb/pycbc_grb_trig_cluster | 2 +- bin/pygrb/pycbc_grb_trig_combiner | 4 ++-- pycbc/events/eventmgr.py | 5 +---- pycbc/results/pygrb_postprocessing_utils.py | 4 ++-- 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/bin/pygrb/pycbc_grb_trig_cluster b/bin/pygrb/pycbc_grb_trig_cluster index 8d483cd7c7c..952cad43c2a 100644 --- a/bin/pygrb/pycbc_grb_trig_cluster +++ b/bin/pygrb/pycbc_grb_trig_cluster @@ -63,7 +63,7 @@ def slice_hdf5(inputfile, outfile, include, verbose=False): nevents = include.size with h5py.File(inputfile, "r") as h5in: - ifos = [k for k in h5in.keys() if k not in ("network", "search")] + ifos = [k for k in h5in.keys() if k != "network"] # find which single-ifo events to keep ifo_index = { diff --git a/bin/pygrb/pycbc_grb_trig_combiner b/bin/pygrb/pycbc_grb_trig_combiner index 979a09771c5..9412d85e8c0 100644 --- a/bin/pygrb/pycbc_grb_trig_combiner +++ b/bin/pygrb/pycbc_grb_trig_combiner @@ -185,7 +185,7 @@ def merge_hdf5_files(inputfiles, outputfile, verbose=False, **compression_kw): # handle search datasets as a special case # (they will the same in all files) - search_datasets = set(filter(lambda x: "search/" in x, dataset_names)) + search_datasets = set(filter(lambda x: "/search/" in x, dataset_names)) # record where we are in the global dataset position = defaultdict(int) @@ -230,7 +230,7 @@ def bin_events(inputfile, bins, outdir, filetag, ifotag, _, seg = filename_metadata(inputfile) with h5py.File(inputfile, "r") as h5in: - ifos = [k for k in h5in.keys() if k not in ("network", "search")] + ifos = [k for k in h5in.keys() if k != "network"] times = h5in[column][()] for bin_, segl in bins.items(): diff --git a/pycbc/events/eventmgr.py b/pycbc/events/eventmgr.py index 703b253f033..b1094e35d1e 100644 --- a/pycbc/events/eventmgr.py +++ b/pycbc/events/eventmgr.py @@ -688,10 +688,6 @@ def __setitem__(self, name, data): th = numpy.array( [p['tmplt'].template_hash for p in self.template_params]) f = fw(outname) - # Write timeslides to search group - f.prefix = 'search' - for ifo in self.ifos: - f['time_slides_'+ifo] = self.time_slides[ifo] # Output network stuff f.prefix = 'network' network_events = numpy.array( @@ -776,6 +772,7 @@ def __setitem__(self, name, data): f['chisq_dof'] = numpy.zeros(len(ifo_events)) f['template_hash'] = th[tid] + f['search/time_slides'] = numpy.array(self.time_slides[ifo]) if self.opt.trig_start_time: f['search/start_time'] = numpy.array([ self.opt.trig_start_time[ifo]], dtype=numpy.int32) diff --git a/pycbc/results/pygrb_postprocessing_utils.py b/pycbc/results/pygrb_postprocessing_utils.py index a3f8360e72c..cc1a4416969 100644 --- a/pycbc/results/pygrb_postprocessing_utils.py +++ b/pycbc/results/pygrb_postprocessing_utils.py @@ -619,10 +619,10 @@ def load_time_slides(hdf_file_path): """Loads timeslides from PyGRB output file as a dictionary""" hdf_file = h5py.File(hdf_file_path, 'r') ifos = extract_ifos(hdf_file_path) - ids = numpy.arange(len(hdf_file[f'search/time_slides_{ifos[0]}'])) + ids = numpy.arange(len(hdf_file[f'{ifos[0]}/search/time_slides'])) time_slide_dict = { slide_id: { - ifo: hdf_file[f'search/time_slides_{ifo}'][slide_id] + ifo: hdf_file[f'{ifo}/search/time_slides'][slide_id] for ifo in ifos} for slide_id in ids}