From 68bd32f352f0cdf85b309e90e4fbf4ec5904ac2b Mon Sep 17 00:00:00 2001 From: GeorgWa Date: Mon, 22 Jan 2024 14:18:04 +0100 Subject: [PATCH 1/7] FEAT fixed center quant --- alphadia/features.py | 56 +++++++++++++++++++++------- alphadia/plexscoring.py | 28 ++++++++++++++ alphadia/workflow/peptidecentric.py | 16 +++++--- gui/workflows/PeptideCentric.v1.json | 2 +- misc/config/default.yaml | 4 +- nbs/debug/debug_lvl1.ipynb | 2 +- 6 files changed, 87 insertions(+), 21 deletions(-) diff --git a/alphadia/features.py b/alphadia/features.py index ce8abf4c..b1657370 100644 --- a/alphadia/features.py +++ b/alphadia/features.py @@ -667,10 +667,13 @@ def location_features( @nb.njit() def fragment_features( dense_fragments: np.ndarray, + fragments_frame_profile: np.ndarray, + frame_rt: np.ndarray, observation_importance: np.ndarray, template: np.ndarray, fragments: np.ndarray, feature_array: nb_float32_array, + quant_window: nb.uint32 = 3, ): fragment_feature_dict = nb.typed.Dict.empty( key_type=nb.types.unicode_type, value_type=float_array @@ -706,26 +709,50 @@ def fragment_features( n_fragments, -1 ) + # most intense observation across all observations + best_observation = np.argmax(observation_importance) + # (n_fragments, n_frames) + best_profile = fragments_frame_profile[:, best_observation] + + # center the profile around the expected frame center + center = best_profile.shape[1] // 2 + # (n_fragments, quant_window * 2 + 1) + best_profile = best_profile[:, center - quant_window : center + quant_window + 1] + + # (quant_window * 2 + 1) + frame_rt_quant = frame_rt[center - quant_window : center + quant_window + 1] + quant_durarion = frame_rt_quant[-1] - frame_rt_quant[0] + + # (quant_window * 2) + delta_rt = frame_rt_quant[1:] - frame_rt_quant[:-1] + + # (n_fragments) + fragment_area = np.sum((best_profile[:, 1:]+best_profile[:, :-1])*delta_rt.reshape(1, -1)*0.5, axis=-1) + fragment_area_norm = fragment_area / quant_durarion + + observed_fragment_intensity = np.sum(best_profile, axis=-1) + # create fragment masks for filtering + fragment_profiles = np.sum(dense_fragments[0], axis=-1) # (n_fragments, n_observations) - sum_fragment_intensity = np.sum(np.sum(dense_fragments[0], axis=-1), axis=-1) + sum_fragment_intensity = np.sum(fragment_profiles, axis=-1) # create fragment intensity mask - fragment_intensity_mask_2d = sum_fragment_intensity > 0 - fragment_intensity_weights_2d = ( - fragment_intensity_mask_2d * observation_importance_reshaped - ) + #fragment_intensity_mask_2d = sum_fragment_intensity > 0 + #fragment_intensity_weights_2d = ( + # fragment_intensity_mask_2d * observation_importance_reshaped + #) # (n_fragments, n_observations) # normalize rows to 1 - fragment_intensity_weights_2d = fragment_intensity_weights_2d / ( - np.sum(fragment_intensity_weights_2d, axis=-1).reshape(-1, 1) + 1e-20 - ) + #fragment_intensity_weights_2d = fragment_intensity_weights_2d / ( + # np.sum(fragment_intensity_weights_2d, axis=-1).reshape(-1, 1) + 1e-20 + #) # (n_fragments) - observed_fragment_intensity = weighted_mean_a1( - sum_fragment_intensity, fragment_intensity_weights_2d - ) + #observed_fragment_intensity = weighted_mean_a1( + # sum_fragment_intensity, fragment_intensity_weights_2d + #) # (n_observations) sum_template_intensity = np.sum(np.sum(template, axis=-1), axis=-1) @@ -770,7 +797,7 @@ def fragment_features( if np.sum(fragment_height_mask_1d) > 0.0: feature_array[18] = np.corrcoef( - observed_fragment_intensity, fragment_intensity_norm + fragment_area_norm, fragment_intensity_norm )[0, 1] if np.sum(observed_fragment_height) > 0.0: @@ -823,7 +850,7 @@ def fragment_features( observed_fragment_mz_mean, mass_error, observed_fragment_height, - observed_fragment_intensity, + fragment_area_norm, ) @@ -897,6 +924,9 @@ def profile_features( feature_array, ): n_observations = len(observation_importance) + # most intense observation across all observations + best_observation = np.argmax(observation_importance) + fragment_idx_sorted = np.argsort(fragment_intensity)[::-1] # ============= FRAGMENT RT CORRELATIONS ============= diff --git a/alphadia/plexscoring.py b/alphadia/plexscoring.py index eac3647b..af458f8c 100644 --- a/alphadia/plexscoring.py +++ b/alphadia/plexscoring.py @@ -167,6 +167,7 @@ class CandidateConfigJIT: top_k_fragments: nb.uint32 top_k_isotopes: nb.uint32 reference_channel: nb.int16 + quant_window: nb.uint32 precursor_mz_tolerance: nb.float32 fragment_mz_tolerance: nb.float32 @@ -179,6 +180,7 @@ def __init__( top_k_fragments: nb.uint32, top_k_isotopes: nb.uint32, reference_channel: nb.int16, + quant_window: nb.uint32, precursor_mz_tolerance: nb.float32, fragment_mz_tolerance: nb.float32, ) -> None: @@ -194,6 +196,7 @@ def __init__( self.top_k_fragments = top_k_fragments self.top_k_isotopes = top_k_isotopes self.reference_channel = reference_channel + self.quant_window = quant_window self.precursor_mz_tolerance = precursor_mz_tolerance self.fragment_mz_tolerance = fragment_mz_tolerance @@ -213,6 +216,7 @@ def __init__(self): self.top_k_fragments = 12 self.top_k_isotopes = 4 self.reference_channel = -1 + self.quant_window = 3 self.precursor_mz_tolerance = 15 self.fragment_mz_tolerance = 15 @@ -283,6 +287,18 @@ def reference_channel(self) -> int: def reference_channel(self, value): self._reference_channel = value + @property + def quant_window(self) -> int: + """The quantification window size in cycles. + the area will be calculated from `scan_center - quant_window` to `scan_center + quant_window`. + Default: `quant_window = 3`""" + return self._quant_window + + @quant_window.setter + def quant_window(self, value): + self._quant_window = value + + @property def precursor_mz_tolerance(self) -> float: """The precursor m/z tolerance in ppm. @@ -626,6 +642,12 @@ def process( features.frame_profile_2d(dense_fragments[0]) ) + cycle_len = jit_data.cycle.shape[1] + + frame_rt = jit_data.rt_values[ + self.frame_start:self.frame_stop:cycle_len + ] + # (n_observations, n_frames) template_frame_profile = features.or_envelope_1d( features.frame_profile_1d(template) @@ -636,6 +658,7 @@ def process( features.scan_profile_2d(dense_fragments[0]) ) + # (n_observations, n_scans) template_scan_profile = features.or_envelope_1d( features.scan_profile_1d(template) @@ -676,14 +699,19 @@ def process( feature_array, ) + + # retrive first fragment features # (n_valid_fragments) mz_observed, mass_error, height, intensity = features.fragment_features( dense_fragments, + fragments_frame_profile, + frame_rt, observation_importance, template, fragments, feature_array, + quant_window=config.quant_window, ) # store fragment features if requested diff --git a/alphadia/workflow/peptidecentric.py b/alphadia/workflow/peptidecentric.py index f04a4b9f..e94bf8b5 100644 --- a/alphadia/workflow/peptidecentric.py +++ b/alphadia/workflow/peptidecentric.py @@ -551,16 +551,20 @@ def recalibration(self, precursor_df, fragments_df): ) ] - min_fragments = 1000 + min_fragments = 500 + max_fragments = 5000 min_correlation = 0.7 fragments_df_filtered = fragments_df_filtered.sort_values( by=["correlation"], ascending=False ) - stop_rank = max( - np.searchsorted( - fragments_df_filtered["correlation"].values, min_correlation + stop_rank = min( + max( + np.searchsorted( + fragments_df_filtered["correlation"].values, min_correlation + ), + min_fragments, ), - min_fragments, + max_fragments, ) fragments_df_filtered = fragments_df_filtered.iloc[:stop_rank] @@ -669,6 +673,7 @@ def extract_batch(self, batch_df): "precursor_mz_tolerance": self.com.ms1_error, "fragment_mz_tolerance": self.com.ms2_error, "exclude_shared_ions": self.config["search"]["exclude_shared_ions"], + "min_size_rt": self.config["search"]["quant_window"], } ) @@ -696,6 +701,7 @@ def extract_batch(self, batch_df): "precursor_mz_tolerance": self.com.ms1_error, "fragment_mz_tolerance": self.com.ms2_error, "exclude_shared_ions": self.config["search"]["exclude_shared_ions"], + "quant_window": self.config["search"]["quant_window"], } ) diff --git a/gui/workflows/PeptideCentric.v1.json b/gui/workflows/PeptideCentric.v1.json index 92db807d..e8afffb5 100644 --- a/gui/workflows/PeptideCentric.v1.json +++ b/gui/workflows/PeptideCentric.v1.json @@ -198,7 +198,7 @@ { "id": "fragment_mz", "name": "Fragment mz", - "value": [100.0, 2000.0], + "value": [200.0, 2000.0], "description": "Size limit for generated fragments.", "type": "float_range" }, diff --git a/misc/config/default.yaml b/misc/config/default.yaml index 8f96320b..2a01c3ca 100644 --- a/misc/config/default.yaml +++ b/misc/config/default.yaml @@ -33,7 +33,7 @@ library_prediction: - 400 - 1200 fragment_mz: - - 100 + - 200 - 2000 nce: 25.0 instrument: QE @@ -50,6 +50,8 @@ search: target_mobility_tolerance: 0.04 target_rt_tolerance: 60 + quant_window: 3 + search_advanced: top_k_fragments: 12 diff --git a/nbs/debug/debug_lvl1.ipynb b/nbs/debug/debug_lvl1.ipynb index a811242c..09d4e6a3 100644 --- a/nbs/debug/debug_lvl1.ipynb +++ b/nbs/debug/debug_lvl1.ipynb @@ -337,7 +337,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.9.18" }, "orig_nbformat": 4 }, From 0e696634b6dd8554b51714c2a72054c88bddd18d Mon Sep 17 00:00:00 2001 From: GeorgWa Date: Mon, 22 Jan 2024 14:42:55 +0100 Subject: [PATCH 2/7] FEAT skipp dense mass representation --- alphadia/data/thermo.py | 100 ++++++++++++++++++++++++++++++++++++ alphadia/hybridselection.py | 58 ++++++++++++++------- 2 files changed, 140 insertions(+), 18 deletions(-) diff --git a/alphadia/data/thermo.py b/alphadia/data/thermo.py index 67814ffa..689baec1 100644 --- a/alphadia/data/thermo.py +++ b/alphadia/data/thermo.py @@ -616,6 +616,106 @@ def get_dense( dense_output[1, k, j, 0, i] = new_dim1 dense_output[1, k, j, 1, i] = new_dim1 + idx += 1 + + return dense_output, precursor_idx_list + + def get_dense_intensity( + self, + frame_limits, + scan_limits, + mz_query_list, + mass_tolerance, + quadrupole_mz, + absolute_masses=False, + custom_cycle=None, + ): + """ + Get a dense representation of the data for a given set of parameters. + + Parameters + ---------- + + frame_limits : np.ndarray, shape = (1,2,) + array of frame indices + + scan_limits : np.ndarray, shape = (1,2,) + array of scan indices + + mz_query_list : np.ndarray, shape = (n_tof_slices,) + array of query m/z values + + mass_tolerance : float + mass tolerance in ppm + + quadrupole_mz : np.ndarray, shape = (1,2,) + array of quadrupole m/z values + + absolute_masses : bool, default = False + if True, the first slice of the dense output will contain the absolute m/z values instead of the mass error + + custom_cycle : np.ndarray, shape = (1, n_precursor, 1, 2), default = None + custom cycle quadrupole mask, for example after calibration + + Returns + ------- + + np.ndarray, shape = (1, n_tof_slices, n_precursor_indices, 2, n_precursor_cycles) + + """ + + # (n_tof_slices, 2) array of start, stop mz for each slice + mz_query_slices = utils.mass_range(mz_query_list, mass_tolerance) + n_tof_slices = len(mz_query_slices) + + cycle_length = self.cycle.shape[1] + + # (n_precursors) array of precursor indices, the precursor index refers to each scan within the cycle + precursor_idx_list = calculate_valid_scans(quadrupole_mz, self.cycle) + n_precursor_indices = len(precursor_idx_list) + + precursor_cycle_start = frame_limits[0, 0] // cycle_length + precursor_cycle_stop = frame_limits[0, 1] // cycle_length + precursor_cycle_len = precursor_cycle_stop - precursor_cycle_start + + dense_output = np.zeros( + (1, n_tof_slices, n_precursor_indices, 2, precursor_cycle_len), + dtype=np.float32, + ) + + + for i, cycle_idx in enumerate( + range(precursor_cycle_start, precursor_cycle_stop) + ): + for j, precursor_idx in enumerate(precursor_idx_list): + scan_idx = precursor_idx + cycle_idx * cycle_length + + peak_start_idx = self.peak_start_idx_list[scan_idx] + peak_stop_idx = self.peak_stop_idx_list[scan_idx] + + idx = peak_start_idx + + for k, (mz_query_start, mz_query_stop) in enumerate(mz_query_slices): + rel_idx = np.searchsorted( + self.mz_values[idx:peak_stop_idx], mz_query_start, "left" + ) + + idx += rel_idx + + while idx < peak_stop_idx and self.mz_values[idx] <= mz_query_stop: + accumulated_intensity = dense_output[0, k, j, 0, i] + #accumulated_dim1 = dense_output[1, k, j, 0, i] + + new_intensity = self.intensity_values[idx] + + dense_output[0, k, j, 0, i] = ( + accumulated_intensity + new_intensity + ) + dense_output[0, k, j, 1, i] = ( + accumulated_intensity + new_intensity + ) + + idx += 1 return dense_output, precursor_idx_list diff --git a/alphadia/hybridselection.py b/alphadia/hybridselection.py index 5c2e5258..9707e54a 100644 --- a/alphadia/hybridselection.py +++ b/alphadia/hybridselection.py @@ -763,15 +763,26 @@ def process(self, jit_data, fragment_container, config, kernel, debug): if len(precursor_mz) == 0: self.set_status(101, "No precursor masses after grouping") return - - # shape = (2, n_fragments, n_observations, n_scans, n_frames), dtype = np.float32 - _dense_precursors, _ = jit_data.get_dense( - frame_limits, - scan_limits, - precursor_mz, - config.precursor_mz_tolerance, - np.array([[-1.0, -1.0]], dtype=np.float32), - ) + + if jit_data.has_mobility: + + # shape = (2, n_fragments, n_observations, n_scans, n_frames), dtype = np.float32 + _dense_precursors, _ = jit_data.get_dense( + frame_limits, + scan_limits, + precursor_mz, + config.precursor_mz_tolerance, + np.array([[-1.0, -1.0]], dtype=np.float32), + ) + else: + # shape = (2, n_fragments, n_observations, n_scans, n_frames), dtype = np.float32 + _dense_precursors, _ = jit_data.get_dense_intensity( + frame_limits, + scan_limits, + precursor_mz, + config.precursor_mz_tolerance, + np.array([[-1.0, -1.0]], dtype=np.float32), + ) dense_precursors = _dense_precursors.sum(axis=2) # FLAG: needed for debugging @@ -784,15 +795,26 @@ def process(self, jit_data, fragment_container, config, kernel, debug): self.set_status(102, "Unexpected quadrupole_mz.shape") return - # shape = (2, n_fragments, n_observations, n_scans, n_frames), dtype = np.float32 - _dense_fragments, _ = jit_data.get_dense( - frame_limits, - scan_limits, - fragment_mz, - config.fragment_mz_tolerance, - quadrupole_mz, - custom_cycle=jit_data.cycle, - ) + if jit_data.has_mobility: + # shape = (2, n_fragments, n_observations, n_scans, n_frames), dtype = np.float32 + _dense_fragments, _ = jit_data.get_dense( + frame_limits, + scan_limits, + fragment_mz, + config.fragment_mz_tolerance, + quadrupole_mz, + custom_cycle=jit_data.cycle, + ) + else: + # shape = (2, n_fragments, n_observations, n_scans, n_frames), dtype = np.float32 + _dense_fragments, _ = jit_data.get_dense_intensity( + frame_limits, + scan_limits, + fragment_mz, + config.fragment_mz_tolerance, + quadrupole_mz, + custom_cycle=jit_data.cycle, + ) dense_fragments = _dense_fragments.sum(axis=2) # FLAG: needed for debugging From 3bf148b46bfcc18bc0f3024560152c7476b82de4 Mon Sep 17 00:00:00 2001 From: GeorgWa Date: Tue, 23 Jan 2024 12:13:41 +0100 Subject: [PATCH 3/7] FEATimproved quant --- alphadia/outputtransform.py | 2 +- misc/config/default.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/alphadia/outputtransform.py b/alphadia/outputtransform.py index 4cb49bed..91eff7d3 100644 --- a/alphadia/outputtransform.py +++ b/alphadia/outputtransform.py @@ -283,7 +283,7 @@ def lfq( return protein_df -def prepare_df(df, psm_df, column="height"): +def prepare_df(df, psm_df, column="intensity"): df = df[df["precursor_idx"].isin(psm_df["precursor_idx"])].copy() df["ion"] = utils.ion_hash( df["precursor_idx"].values, diff --git a/misc/config/default.yaml b/misc/config/default.yaml index 2a01c3ca..17ce767d 100644 --- a/misc/config/default.yaml +++ b/misc/config/default.yaml @@ -119,10 +119,10 @@ fdr: search_output: peptide_level_lfq: false precursor_level_lfq: false - min_k_fragments: 3 - min_correlation: 0.7 + min_k_fragments: 12 + min_correlation: 0.9 num_samples_quadratic: 50 - min_nonnan: 1 + min_nonnan: 3 normalize_lfq: True # configuration for the optimization manager From b3d76319148e6246223426797a43f1f96c585d20 Mon Sep 17 00:00:00 2001 From: github-actions Date: Tue, 23 Jan 2024 12:40:58 +0000 Subject: [PATCH 4/7] Apply Black formatting --- alphadia/cli.py | 20 +++++++++++--------- alphadia/data/thermo.py | 6 ++---- alphadia/features.py | 31 +++++++++++++++++-------------- alphadia/hybridselection.py | 3 +-- alphadia/plexscoring.py | 10 ++-------- 5 files changed, 33 insertions(+), 37 deletions(-) diff --git a/alphadia/cli.py b/alphadia/cli.py index 288c9a60..b84dc1fd 100644 --- a/alphadia/cli.py +++ b/alphadia/cli.py @@ -217,20 +217,22 @@ def parse_raw_path_list(args: argparse.Namespace, config: dict) -> list: raw_path_list += [os.path.join(directory, f) for f in os.listdir(directory)] # filter files based on regex - #pattern = re.compile() - - print('args.regex', args.regex) + # pattern = re.compile() + + print("args.regex", args.regex) for path in raw_path_list: - print('path', path) - print('os.path.basename(path)', os.path.basename(path)) - #print('re.search(args.regex, os.path.basename(path))', re.search(args.regex, os.path.basename(path))) - #print('re.search(args.regex, os.path.basename(path)) is not None', re.search(args.regex, os.path.basename(path)) is not None) - #print('') + print("path", path) + print("os.path.basename(path)", os.path.basename(path)) + # print('re.search(args.regex, os.path.basename(path))', re.search(args.regex, os.path.basename(path))) + # print('re.search(args.regex, os.path.basename(path)) is not None', re.search(args.regex, os.path.basename(path)) is not None) + # print('') len_before = len(raw_path_list) raw_path_list = [ - f for f in raw_path_list if re.search(args.regex, os.path.basename(f)) is not None + f + for f in raw_path_list + if re.search(args.regex, os.path.basename(f)) is not None ] len_after = len(raw_path_list) print(f"Removed {len_before - len_after} of {len_before} files.") diff --git a/alphadia/data/thermo.py b/alphadia/data/thermo.py index 689baec1..1cda1970 100644 --- a/alphadia/data/thermo.py +++ b/alphadia/data/thermo.py @@ -619,7 +619,7 @@ def get_dense( idx += 1 return dense_output, precursor_idx_list - + def get_dense_intensity( self, frame_limits, @@ -682,7 +682,6 @@ def get_dense_intensity( (1, n_tof_slices, n_precursor_indices, 2, precursor_cycle_len), dtype=np.float32, ) - for i, cycle_idx in enumerate( range(precursor_cycle_start, precursor_cycle_stop) @@ -704,7 +703,7 @@ def get_dense_intensity( while idx < peak_stop_idx and self.mz_values[idx] <= mz_query_stop: accumulated_intensity = dense_output[0, k, j, 0, i] - #accumulated_dim1 = dense_output[1, k, j, 0, i] + # accumulated_dim1 = dense_output[1, k, j, 0, i] new_intensity = self.intensity_values[idx] @@ -714,7 +713,6 @@ def get_dense_intensity( dense_output[0, k, j, 1, i] = ( accumulated_intensity + new_intensity ) - idx += 1 diff --git a/alphadia/features.py b/alphadia/features.py index b1657370..10f33caf 100644 --- a/alphadia/features.py +++ b/alphadia/features.py @@ -725,9 +725,12 @@ def fragment_features( # (quant_window * 2) delta_rt = frame_rt_quant[1:] - frame_rt_quant[:-1] - + # (n_fragments) - fragment_area = np.sum((best_profile[:, 1:]+best_profile[:, :-1])*delta_rt.reshape(1, -1)*0.5, axis=-1) + fragment_area = np.sum( + (best_profile[:, 1:] + best_profile[:, :-1]) * delta_rt.reshape(1, -1) * 0.5, + axis=-1, + ) fragment_area_norm = fragment_area / quant_durarion observed_fragment_intensity = np.sum(best_profile, axis=-1) @@ -738,21 +741,21 @@ def fragment_features( sum_fragment_intensity = np.sum(fragment_profiles, axis=-1) # create fragment intensity mask - #fragment_intensity_mask_2d = sum_fragment_intensity > 0 - #fragment_intensity_weights_2d = ( + # fragment_intensity_mask_2d = sum_fragment_intensity > 0 + # fragment_intensity_weights_2d = ( # fragment_intensity_mask_2d * observation_importance_reshaped - #) + # ) # (n_fragments, n_observations) # normalize rows to 1 - #fragment_intensity_weights_2d = fragment_intensity_weights_2d / ( + # fragment_intensity_weights_2d = fragment_intensity_weights_2d / ( # np.sum(fragment_intensity_weights_2d, axis=-1).reshape(-1, 1) + 1e-20 - #) + # ) # (n_fragments) - #observed_fragment_intensity = weighted_mean_a1( + # observed_fragment_intensity = weighted_mean_a1( # sum_fragment_intensity, fragment_intensity_weights_2d - #) + # ) # (n_observations) sum_template_intensity = np.sum(np.sum(template, axis=-1), axis=-1) @@ -796,9 +799,9 @@ def fragment_features( ) if np.sum(fragment_height_mask_1d) > 0.0: - feature_array[18] = np.corrcoef( - fragment_area_norm, fragment_intensity_norm - )[0, 1] + feature_array[18] = np.corrcoef(fragment_area_norm, fragment_intensity_norm)[ + 0, 1 + ] if np.sum(observed_fragment_height) > 0.0: feature_array[19] = np.corrcoef( @@ -924,9 +927,9 @@ def profile_features( feature_array, ): n_observations = len(observation_importance) - # most intense observation across all observations + # most intense observation across all observations best_observation = np.argmax(observation_importance) - + fragment_idx_sorted = np.argsort(fragment_intensity)[::-1] # ============= FRAGMENT RT CORRELATIONS ============= diff --git a/alphadia/hybridselection.py b/alphadia/hybridselection.py index 9707e54a..ce4554ad 100644 --- a/alphadia/hybridselection.py +++ b/alphadia/hybridselection.py @@ -763,9 +763,8 @@ def process(self, jit_data, fragment_container, config, kernel, debug): if len(precursor_mz) == 0: self.set_status(101, "No precursor masses after grouping") return - - if jit_data.has_mobility: + if jit_data.has_mobility: # shape = (2, n_fragments, n_observations, n_scans, n_frames), dtype = np.float32 _dense_precursors, _ = jit_data.get_dense( frame_limits, diff --git a/alphadia/plexscoring.py b/alphadia/plexscoring.py index af458f8c..703c0c4e 100644 --- a/alphadia/plexscoring.py +++ b/alphadia/plexscoring.py @@ -293,12 +293,11 @@ def quant_window(self) -> int: the area will be calculated from `scan_center - quant_window` to `scan_center + quant_window`. Default: `quant_window = 3`""" return self._quant_window - + @quant_window.setter def quant_window(self, value): self._quant_window = value - @property def precursor_mz_tolerance(self) -> float: """The precursor m/z tolerance in ppm. @@ -644,9 +643,7 @@ def process( cycle_len = jit_data.cycle.shape[1] - frame_rt = jit_data.rt_values[ - self.frame_start:self.frame_stop:cycle_len - ] + frame_rt = jit_data.rt_values[self.frame_start : self.frame_stop : cycle_len] # (n_observations, n_frames) template_frame_profile = features.or_envelope_1d( @@ -658,7 +655,6 @@ def process( features.scan_profile_2d(dense_fragments[0]) ) - # (n_observations, n_scans) template_scan_profile = features.or_envelope_1d( features.scan_profile_1d(template) @@ -699,8 +695,6 @@ def process( feature_array, ) - - # retrive first fragment features # (n_valid_fragments) mz_observed, mass_error, height, intensity = features.fragment_features( From 0657043ea76bf20c0a54d0743edb0ae108371362 Mon Sep 17 00:00:00 2001 From: GeorgWa Date: Wed, 24 Jan 2024 12:15:41 +0100 Subject: [PATCH 5/7] fixed directlfq version --- requirements/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index f3b64ec0..1a278646 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -9,5 +9,5 @@ seaborn rocket_fft xxhash torchmetrics -directlfq>=0.2.15 +directlfq==0.2.15 pythonnet \ No newline at end of file From 25fda401289b75423791142e850aa8584051e290 Mon Sep 17 00:00:00 2001 From: GeorgWa Date: Wed, 24 Jan 2024 12:42:48 +0100 Subject: [PATCH 6/7] FIX updated setup --- setup.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/setup.py b/setup.py index d1ae9821..bd3bacad 100644 --- a/setup.py +++ b/setup.py @@ -25,23 +25,16 @@ def get_requirements(): requirement_file_name, ) with open(full_requirement_file_name) as requirements_file: - if extra != "": - extra_stable = f"{extra}-stable" - else: - extra_stable = "stable" - extra_requirements[extra_stable] = [] extra_requirements[extra] = [] for line in requirements_file: - extra_requirements[extra_stable].append(line) - requirement, *comparison = re.split("[><=~!]", line) - requirement == requirement.strip() - extra_requirements[extra].append(requirement) + extra_requirements[extra].append(line) requirements = extra_requirements.pop("") return requirements, extra_requirements def create_pip_wheel(): requirements, extra_requirements = get_requirements() + print("extra_requirements", requirements, flush=True) setuptools.setup( name=package2install.__project__, version=package2install.__version__, From fc9963cf6b0c8fc41a18edf5a1a2d4c5160c71da Mon Sep 17 00:00:00 2001 From: GeorgWa Date: Wed, 24 Jan 2024 12:48:08 +0100 Subject: [PATCH 7/7] dont fix dev versions --- requirements/requirements_development.txt | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/requirements/requirements_development.txt b/requirements/requirements_development.txt index c5255d28..4a47f6b0 100644 --- a/requirements/requirements_development.txt +++ b/requirements/requirements_development.txt @@ -1,14 +1,14 @@ -jupyter==1.0.0 -jupyter_contrib_nbextensions==0.5.1 -pyinstaller==4.2 -autodocsumm==0.2.6 -sphinx-rtd-theme==0.5.2 -twine==3.4.1 -bumpversion==0.6.0 -pipdeptree==2.1.0 -ipykernel==6.4.0 -tqdm==4.61.1 -psutil==5.8.0 +jupyter +jupyter_contrib_nbextensions +pyinstaller +autodocsumm +sphinx-rtd-theme +twine +bumpversion +pipdeptree +ipykernel +tqdm +psutil coverage coverage-badge pytest \ No newline at end of file