11import hashlib
2+ import json
3+ import logging
4+ import shutil
5+
6+ import h5py
27import numpy as np
38from typing import Dict , Union
4- import json
9+
10+ logger = logging .getLogger (__name__ )
511
612
713def hash_numpy_dict (input_dictionary ):
@@ -292,3 +298,119 @@ def check_consistency(dqmap: np.ndarray, sqmap: np.ndarray, mask: np.ndarray) ->
292298 sq_to_dq [sq_value ] = dq_value
293299
294300 return True
301+
302+
303+ def combine_qmap_files (qmap_file1 , qmap_file2 , output_file ):
304+ """
305+ Combine two qmap files into a single qmap file.
306+
307+ Parameters
308+ ----------
309+ qmap_file1 : str
310+ Path to the first qmap file.
311+ qmap_file2 : str
312+ Path to the second qmap file.
313+ output_file : str
314+ Path to the output qmap file.
315+ """
316+ logger .info ("Combining qmap files:" )
317+ logger .info (" file1 : %s" , qmap_file1 )
318+ logger .info (" file2 : %s" , qmap_file2 )
319+ logger .info (" output: %s" , output_file )
320+
321+ with h5py .File (qmap_file1 , "r" ) as f1 , h5py .File (qmap_file2 , "r" ) as f2 :
322+ map_names1 = tuple (f1 ["/qmap/map_names" ][()])
323+ map_names2 = tuple (f2 ["/qmap/map_names" ][()])
324+ assert map_names1 == map_names2 , (
325+ f"map_names must be the same: { map_names1 !r} != { map_names2 !r} "
326+ )
327+ logger .info ("map_names validated: %s" , map_names1 )
328+
329+ logger .info ("Copying file1 -> output file as base ..." )
330+ shutil .copy (qmap_file1 , output_file )
331+
332+ with h5py .File (output_file , "r+" ) as fo :
333+ # Combine masks
334+ mask1 = f1 ["/qmap/mask" ][()]
335+ mask2 = f2 ["/qmap/mask" ][()]
336+ combined_mask = np .logical_or (mask1 , mask2 )
337+ logger .info (
338+ "Mask: file1 valid=%d file2 valid=%d combined valid=%d" ,
339+ mask1 .sum (), mask2 .sum (), combined_mask .sum (),
340+ )
341+ del fo ["/qmap/mask" ]
342+ fo ["/qmap/mask" ] = combined_mask
343+
344+ for prefix in ["static" , "dynamic" ]:
345+ logger .info ("--- Processing '%s' partition ---" , prefix )
346+
347+ f1_num_pts = f1 [f"/qmap/{ prefix } _num_pts" ][()]
348+ f2_num_pts = f2 [f"/qmap/{ prefix } _num_pts" ][()]
349+ logger .debug (
350+ " num_pts: file1=%s file2=%s" ,
351+ f1_num_pts .tolist (), f2_num_pts .tolist (),
352+ )
353+
354+ dim0_num_pts = f1_num_pts [0 ] + f2_num_pts [0 ]
355+ dim1_num_pts = max (f1_num_pts [1 ], f2_num_pts [1 ])
356+ logger .info (
357+ " Combined num_pts: dim0=%d (file1 %d + file2 %d) dim1=%d" ,
358+ dim0_num_pts , f1_num_pts [0 ], f2_num_pts [0 ], dim1_num_pts ,
359+ )
360+ del fo [f"/qmap/{ prefix } _num_pts" ]
361+ fo [f"/qmap/{ prefix } _num_pts" ] = np .array ([dim0_num_pts , dim1_num_pts ])
362+
363+ # Combine dim0 value list (concatenate both ranges)
364+ v_list_dim0 = np .concatenate (
365+ [
366+ f1 [f"/qmap/{ prefix } _v_list_dim0" ][()],
367+ f2 [f"/qmap/{ prefix } _v_list_dim0" ][()],
368+ ]
369+ )
370+ logger .debug (
371+ " v_list_dim0: range [%.6g, %.6g], %d entries" ,
372+ v_list_dim0 .min (), v_list_dim0 .max (), len (v_list_dim0 ),
373+ )
374+ del fo [f"/qmap/{ prefix } _v_list_dim0" ]
375+ fo [f"/qmap/{ prefix } _v_list_dim0" ] = v_list_dim0
376+
377+ # Keep the longer dim1 value list
378+ if f1_num_pts [1 ] < f2_num_pts [1 ]:
379+ logger .debug (
380+ " v_list_dim1: using file2's list (%d > %d entries)" ,
381+ f2_num_pts [1 ], f1_num_pts [1 ],
382+ )
383+ del fo [f"/qmap/{ prefix } _v_list_dim1" ]
384+ fo [f"/qmap/{ prefix } _v_list_dim1" ] = f2 [f"/qmap/{ prefix } _v_list_dim1" ][()]
385+
386+ # Merge roi maps: offset file2's non-zero indices so they don't
387+ # collide with file1's, then add the two maps together.
388+ roi_map1 = f1 [f"/qmap/{ prefix } _roi_map" ][()]
389+ roi_map2 = f2 [f"/qmap/{ prefix } _roi_map" ][()].copy ()
390+ logger .debug (
391+ " roi_map: file1 max=%d file2 max=%d" ,
392+ roi_map1 .max (), roi_map2 .max (),
393+ )
394+ roi_map2 [roi_map2 > 0 ] += np .max (roi_map1 [roi_map1 > 0 ])
395+ roi_map = roi_map1 + roi_map2
396+
397+ start_index = np .min (roi_map )
398+
399+ # Re-index to natural order (0 = masked, 1-based = valid)
400+ unique_idx , inverse = np .unique (roi_map , return_inverse = True )
401+ partition_natural_order = inverse .reshape (roi_map .shape ).astype (np .uint32 )
402+
403+ # If no masked pixels exist, shift up so index 0 stays reserved
404+ if start_index > 0 :
405+ partition_natural_order += 1
406+
407+ num_partitions = int ((unique_idx > 0 ).sum ())
408+ logger .info (" Combined roi_map: %d valid partitions" , num_partitions )
409+
410+ del fo [f"/qmap/{ prefix } _roi_map" ]
411+ fo [f"/qmap/{ prefix } _roi_map" ] = partition_natural_order
412+
413+ del fo [f"/qmap/{ prefix } _index_mapping" ]
414+ fo [f"/qmap/{ prefix } _index_mapping" ] = unique_idx [unique_idx > 0 ] - 1
415+
416+ logger .info ("Done. Output written to: %s" , output_file )
0 commit comments