@@ -168,8 +168,15 @@ def episode_calculation(
168168 subject_episode_data ['id' ] = subject_id
169169
170170 # Append to main dataframes
171- episode_data_df = pd .concat ([episode_data_df , subject_episode_data ], ignore_index = True )
172- episode_summary_df = pd .concat ([episode_summary_df , subject_summary ], ignore_index = True )
171+ if episode_data_df .empty :
172+ episode_data_df = subject_episode_data
173+ else :
174+ episode_data_df = pd .concat ([episode_data_df , subject_episode_data ], ignore_index = True )
175+
176+ if episode_summary_df .empty :
177+ episode_summary_df = subject_summary
178+ else :
179+ episode_summary_df = pd .concat ([episode_summary_df , subject_summary ], ignore_index = True )
173180
174181
175182
@@ -238,7 +245,7 @@ def episode_single(
238245 day_one = day_one .tz_convert (local_tz )
239246 ndays = len (gd2d_tuple [1 ])
240247 # generate grid times by starting from day one and cumulatively summing
241- time_ip = pd .date_range (start = day_one + pd .Timedelta (minutes = dt0 ), periods = ndays * 24 * 60 / dt0 , freq = f"{ dt0 } min" )
248+ time_ip = pd .date_range (start = day_one + pd .Timedelta (minutes = dt0 ), periods = int ( ndays * 24 * 60 / dt0 ) , freq = f"{ dt0 } min" )
242249 data_ip = gd2d_tuple [0 ].flatten ().tolist ()
243250 new_data = pd .DataFrame ({
244251 "time" : time_ip ,
@@ -297,29 +304,25 @@ def episode_single(
297304 x , "hypo" , lv1_hypo , int (120 / dt0 ) + 1 , end_idx
298305 ),
299306 }
300- )
307+ ),
308+ include_groups = False
301309 )
302310 .reset_index ()
303311 .drop (columns = ['level_1' ])
304312 )
305313
306314
307- # Add exclusive labels
308- def hypo_exclusion_logic (group_df ):
309- # group_df is a DataFrame with all columns for the current group
310- if (group_df ['lv2_hypo' ] > 0 ).any ():
311- return pd .Series ([0 ] * len (group_df ), index = group_df .index )
312- else :
313- return group_df ['lv1_hypo' ]
314- ep_per_seg ['lv1_hypo_excl' ] = ep_per_seg .groupby (['segment' , 'lv1_hypo' ]).apply (hypo_exclusion_logic ).reset_index (level = [0 ,1 ], drop = True ).values .flatten ()
315-
316- def hyper_exclusion_logic (group_df ):
317- # group_df is a DataFrame with all columns for the current group
318- if (group_df ['lv2_hyper' ] > 0 ).any ():
319- return pd .Series ([0 ] * len (group_df ), index = group_df .index )
320- else :
321- return group_df ['lv1_hyper' ]
322- ep_per_seg ['lv1_hyper_excl' ] = ep_per_seg .groupby (['segment' , 'lv1_hyper' ]).apply (hyper_exclusion_logic ).reset_index (level = [0 ,1 ], drop = True ).values .flatten ()
315+ # Add exclusive labels using the correct original logic without DeprecationWarning
316+ # For hypo exclusion: group by both segment and lv1_hypo, set to 0 if any lv2_hypo > 0 in that group
317+ def calculate_exclusion (df , lv1_col , lv2_col ):
318+ """Calculate exclusion labels for lv1 episodes based on lv2 episodes in same group"""
319+ df = df .copy ()
320+ df ['group_id' ] = df .groupby (['segment' , lv1_col ]).ngroup ()
321+ group_has_lv2 = df .groupby ('group_id' )[lv2_col ].transform (lambda x : (x > 0 ).any ())
322+ return df [lv1_col ].where (~ group_has_lv2 , 0 )
323+
324+ ep_per_seg ['lv1_hypo_excl' ] = calculate_exclusion (ep_per_seg , 'lv1_hypo' , 'lv2_hypo' )
325+ ep_per_seg ['lv1_hyper_excl' ] = calculate_exclusion (ep_per_seg , 'lv1_hyper' , 'lv2_hyper' )
323326
324327 full_segment_df = pd .concat ([segment_data , ep_per_seg .drop (["segment" ], axis = 1 )], axis = 1 )
325328
@@ -402,7 +405,8 @@ def event_class(
402405 else None ] + [None ]* (len (x )- 1 )
403406 ),
404407 }
405- )
408+ ),
409+ include_groups = False
406410 )
407411 .reset_index ()
408412 .drop (columns = ['level_1' ])
@@ -471,7 +475,8 @@ def lv1_excl(data: pd.DataFrame) -> np.ndarray:
471475 lambda x : pd .DataFrame (
472476 {
473477 "excl" :[0 if (x [lv2_first ].values > 0 ).any () else x [lv1_first ].iloc [0 ]]* len (x )
474- })
478+ }),
479+ include_groups = False
475480 )
476481
477482 excl = excl .reset_index ()
0 commit comments