diff --git a/CHANGELOG.md b/CHANGELOG.md index d88916e..e13d170 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,13 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm N/A +## [0.2.1] 27-05-2024 + +### Changed + +- Removed 'phenotypes' as separate Feature attribute: write to Annotation object instead. +- Implemented sorting of annotation entries in descending order + ## [0.2.1] 26-05-2024 ### Fixed diff --git a/fermo_core/data_analysis/phenotype_manager/class_phen_qual_assigner.py b/fermo_core/data_analysis/phenotype_manager/class_phen_qual_assigner.py index d5ff48e..85d03fe 100644 --- a/fermo_core/data_analysis/phenotype_manager/class_phen_qual_assigner.py +++ b/fermo_core/data_analysis/phenotype_manager/class_phen_qual_assigner.py @@ -27,7 +27,11 @@ from pydantic import BaseModel -from fermo_core.data_processing.builder_feature.dataclass_feature import Phenotype +from fermo_core.data_processing.builder_feature.dataclass_feature import ( + Annotations, + Feature, + Phenotype, +) from fermo_core.data_processing.class_repository import Repository from fermo_core.data_processing.class_stats import Stats from fermo_core.input_output.class_parameter_manager import ParameterManager @@ -60,6 +64,22 @@ def return_values(self: Self) -> tuple[Stats, Repository]: """ return self.stats, self.features + @staticmethod + def add_annotation_attribute(feature: Feature) -> Feature: + """Add annotation attribute to feature if not existing + + Arguments: + feature: the Feature object to modify + + Returns: + The modified feature object + """ + if feature.Annotations is None: + feature.Annotations = Annotations() + if feature.Annotations.phenotypes is None: + feature.Annotations.phenotypes = [] + return feature + def collect_sets(self: Self): """Collect sets of active and inactive features and assign actives""" f_ids_all_actives = set() @@ -80,11 +100,12 @@ def collect_sets(self: Self): self.stats.phenotypes[0].f_ids_positive.update(f_ids_only_actives) for f_id in f_ids_only_actives: feature = self.features.get(f_id) - feature.phenotypes = [ + feature = self.add_annotation_attribute(feature=feature) + feature.Annotations.phenotypes.append( Phenotype( score=0, format="qualitative", descr="only in positive samples" ) - ] + ) self.features.modify(f_id, feature) self.f_ids_intersect = f_ids_all_actives.intersection(f_ids_all_inactives) @@ -145,23 +166,26 @@ def bin_intersection(self: Self): case "minmax": factor = min(vals_act) / max(vals_inact) if factor >= self.params.PhenoQualAssgnParams.factor: - feature.phenotypes = [ + feature = self.add_annotation_attribute(feature=feature) + feature.Annotations.phenotypes.append( Phenotype(score=factor, format="qualitative") - ] + ) self.stats.phenotypes[0].f_ids_positive.add(f_id) case "mean": factor = mean(vals_act) / mean(vals_inact) if factor >= self.params.PhenoQualAssgnParams.factor: - feature.phenotypes = [ + feature = self.add_annotation_attribute(feature=feature) + feature.Annotations.phenotypes.append( Phenotype(score=factor, format="qualitative") - ] + ) self.stats.phenotypes[0].f_ids_positive.add(f_id) case "median": factor = median(vals_act) / median(vals_inact) if factor >= self.params.PhenoQualAssgnParams.factor: - feature.phenotypes = [ + feature = self.add_annotation_attribute(feature=feature) + feature.Annotations.phenotypes.append( Phenotype(score=factor, format="qualitative") - ] + ) self.stats.phenotypes[0].f_ids_positive.add(f_id) case _: raise RuntimeError("'PhenQualAssigner': Unsupported algorithm.") diff --git a/fermo_core/data_analysis/phenotype_manager/class_phen_quant_conc_assigner.py b/fermo_core/data_analysis/phenotype_manager/class_phen_quant_conc_assigner.py index 6a1a7c4..2eb61ba 100644 --- a/fermo_core/data_analysis/phenotype_manager/class_phen_quant_conc_assigner.py +++ b/fermo_core/data_analysis/phenotype_manager/class_phen_quant_conc_assigner.py @@ -27,7 +27,11 @@ from pydantic import BaseModel from scipy.stats import pearsonr, zscore -from fermo_core.data_processing.builder_feature.dataclass_feature import Phenotype +from fermo_core.data_processing.builder_feature.dataclass_feature import ( + Annotations, + Feature, + Phenotype, +) from fermo_core.data_processing.class_repository import Repository from fermo_core.data_processing.class_stats import Stats from fermo_core.input_output.class_parameter_manager import ParameterManager @@ -60,6 +64,22 @@ def return_values(self: Self) -> tuple[Stats, Repository]: """ return self.stats, self.features + @staticmethod + def add_annotation_attribute(feature: Feature) -> Feature: + """Add annotation attribute to feature if not existing + + Arguments: + feature: the Feature object to modify + + Returns: + The modified feature object + """ + if feature.Annotations is None: + feature.Annotations = Annotations() + if feature.Annotations.phenotypes is None: + feature.Annotations.phenotypes = [] + return feature + def find_relevant_f_ids(self: Self): """Determines features detected in > 3 samples""" for f_id in self.stats.active_features: @@ -120,9 +140,8 @@ def calculate_correlation(self: Self): self.params.PhenoQuantConcAssgnParams.coeff_cutoff == 0 or self.params.PhenoQuantConcAssgnParams.p_val_cutoff == 0 ): - if feature.phenotypes is None: - feature.phenotypes = [] - feature.phenotypes.append( + feature = self.add_annotation_attribute(feature=feature) + feature.Annotations.phenotypes.append( Phenotype( format=assay.datatype, category=assay.category, @@ -136,9 +155,8 @@ def calculate_correlation(self: Self): pearson_s > self.params.PhenoQuantConcAssgnParams.coeff_cutoff and p_val_cor < self.params.PhenoQuantConcAssgnParams.p_val_cutoff ): - if feature.phenotypes is None: - feature.phenotypes = [] - feature.phenotypes.append( + feature = self.add_annotation_attribute(feature=feature) + feature.Annotations.phenotypes.append( Phenotype( format=assay.datatype, category=assay.category, diff --git a/fermo_core/data_analysis/phenotype_manager/class_phen_quant_perc_assigner.py b/fermo_core/data_analysis/phenotype_manager/class_phen_quant_perc_assigner.py index edd3eb4..39e4e36 100644 --- a/fermo_core/data_analysis/phenotype_manager/class_phen_quant_perc_assigner.py +++ b/fermo_core/data_analysis/phenotype_manager/class_phen_quant_perc_assigner.py @@ -27,7 +27,11 @@ from pydantic import BaseModel from scipy.stats import pearsonr, zscore -from fermo_core.data_processing.builder_feature.dataclass_feature import Phenotype +from fermo_core.data_processing.builder_feature.dataclass_feature import ( + Annotations, + Feature, + Phenotype, +) from fermo_core.data_processing.class_repository import Repository from fermo_core.data_processing.class_stats import Stats from fermo_core.input_output.class_parameter_manager import ParameterManager @@ -60,6 +64,22 @@ def return_values(self: Self) -> tuple[Stats, Repository]: """ return self.stats, self.features + @staticmethod + def add_annotation_attribute(feature: Feature) -> Feature: + """Add annotation attribute to feature if not existing + + Arguments: + feature: the Feature object to modify + + Returns: + The modified feature object + """ + if feature.Annotations is None: + feature.Annotations = Annotations() + if feature.Annotations.phenotypes is None: + feature.Annotations.phenotypes = [] + return feature + def find_relevant_f_ids(self: Self): """Determines features detected in > 3 samples""" for f_id in self.stats.active_features: @@ -118,9 +138,8 @@ def calculate_correlation(self: Self): self.params.PhenoQuantPercentAssgnParams.coeff_cutoff == 0 or self.params.PhenoQuantPercentAssgnParams.p_val_cutoff == 0 ): - if feature.phenotypes is None: - feature.phenotypes = [] - feature.phenotypes.append( + feature = self.add_annotation_attribute(feature=feature) + feature.Annotations.phenotypes.append( Phenotype( format=assay.datatype, category=assay.category, @@ -135,9 +154,8 @@ def calculate_correlation(self: Self): and p_val_cor < self.params.PhenoQuantPercentAssgnParams.p_val_cutoff ): - if feature.phenotypes is None: - feature.phenotypes = [] - feature.phenotypes.append( + feature = self.add_annotation_attribute(feature=feature) + feature.Annotations.phenotypes.append( Phenotype( format=assay.datatype, category=assay.category, diff --git a/fermo_core/data_analysis/phenotype_manager/class_phenotype_manager.py b/fermo_core/data_analysis/phenotype_manager/class_phenotype_manager.py index 375aa57..2d3e1e0 100644 --- a/fermo_core/data_analysis/phenotype_manager/class_phenotype_manager.py +++ b/fermo_core/data_analysis/phenotype_manager/class_phenotype_manager.py @@ -146,7 +146,7 @@ def run_assigner_quant_percentage(self: Self): return logger.info( - "'PhenotypeManager': started quantitative phenotype data analysis for " + "'PhenotypeManager': completed quantitative phenotype data analysis for " "percentage data." ) diff --git a/fermo_core/data_analysis/score_assigner/class_score_assigner.py b/fermo_core/data_analysis/score_assigner/class_score_assigner.py index 70b3a29..6d9699c 100644 --- a/fermo_core/data_analysis/score_assigner/class_score_assigner.py +++ b/fermo_core/data_analysis/score_assigner/class_score_assigner.py @@ -71,8 +71,12 @@ def assign_feature_scores(self: Self): feature = self.features.get(f_id) feature.Scores = FeatureScores() - if feature.phenotypes is not None and len(feature.phenotypes) != 0: - phen_scores = [assay.score for assay in feature.phenotypes] + if ( + feature.Annotations is not None + and feature.Annotations.phenotypes is not None + and len(feature.Annotations.phenotypes) != 0 + ): + phen_scores = [assay.score for assay in feature.Annotations.phenotypes] feature.Scores.phenotype = max(phen_scores) if ( diff --git a/fermo_core/data_processing/builder_feature/dataclass_feature.py b/fermo_core/data_processing/builder_feature/dataclass_feature.py index 69fb190..059c935 100644 --- a/fermo_core/data_processing/builder_feature/dataclass_feature.py +++ b/fermo_core/data_processing/builder_feature/dataclass_feature.py @@ -154,6 +154,39 @@ def to_json(self: Self) -> dict: } +class Phenotype(BaseModel): + """A Pydantic-based class to represent phenotype information + + Attributes: + format: the format of the phenotype file + category: the assay category (column) if applicable + descr: additional data if applicable + score: the score calculated + p_value: the calculated p-value if applicable + p_value_corr: the corrected p-value if applicable + + """ + + format: str + category: Optional[str] = None + descr: Optional[str] = None + score: float + p_value: Optional[float] = None + p_value_corr: Optional[float] = None + + def to_json(self: Self) -> dict: + return { + "format": self.format, + "category": self.category if self.category is not None else "N/A", + "descr": self.descr if self.descr is not None else "N/A", + "score": round(self.score, 6), + "p_value": round(self.p_value, 10) if self.p_value is not None else 1.0, + "p_value_corr": ( + round(self.p_value_corr, 10) if self.p_value_corr is not None else 1.0 + ), + } + + class Annotations(BaseModel): """A Pydantic-based class to represent annotation information @@ -162,14 +195,41 @@ class Annotations(BaseModel): matches: list of Match objects repr. putative library matching hits losses: list of NeutralLoss objects annotating functional groups of feature fragments: list of CharFrag objects annotating characteristic ion fragments + phenotypes: list of Phenotype objects if feature phenotype-associated """ adducts: Optional[list] = None matches: Optional[list] = None losses: Optional[list] = None fragments: Optional[list] = None + phenotypes: Optional[list] = None + + def sort_entries(self: Self, attr: str, score: str, direction: bool): + """Sort the entries in 'attr' based on 'score' in descending order + + Arguments: + attr: the attribute to target + score: the score to sort for + direction: False to sort low to high, True for reverse + + """ + if getattr(self, attr) is not None: + setattr( + self, + attr, + sorted( + getattr(self, attr), + key=lambda x: getattr(x, score), + reverse=direction, + ), + ) def to_json(self: Self) -> dict: + self.sort_entries(attr="adducts", score="diff_ppm", direction=False) + self.sort_entries(attr="matches", score="score", direction=True) + self.sort_entries(attr="losses", score="diff", direction=False) + self.sort_entries(attr="fragments", score="diff", direction=False) + self.sort_entries(attr="phenotypes", score="score", direction=True) return { "adducts": ( [adduct.to_json() for adduct in self.adducts] @@ -191,6 +251,11 @@ def to_json(self: Self) -> dict: if self.fragments is not None else [] ), + "phenotypes": ( + [phenotype.to_json() for phenotype in self.phenotypes] + if self.phenotypes is not None + else [] + ), } @@ -251,39 +316,6 @@ def to_json(self: Self) -> dict: } -class Phenotype(BaseModel): - """A Pydantic-based class to represent phenotype information - - Attributes: - format: the format of the phenotype file - category: the assay category (column) if applicable - descr: additional data if applicable - score: the score calculated - p_value: the calculated p-value if applicable - p_value_corr: the Bonferroni-corrected p-value if applicable - - """ - - format: str - category: Optional[str] = None - descr: Optional[str] = None - score: float - p_value: Optional[float] = None - p_value_corr: Optional[float] = None - - def to_json(self: Self) -> dict: - return { - "format": self.format, - "category": self.category if self.category is not None else "N/A", - "descr": self.descr if self.descr is not None else "N/A", - "score": round(self.score, 6), - "p_value": round(self.p_value, 10) if self.p_value is not None else 1.0, - "p_value_corr": ( - round(self.p_value_corr, 10) if self.p_value_corr is not None else 1.0 - ), - } - - class Scores(BaseModel): """A Pydantic-based class to represent feature score information @@ -328,7 +360,6 @@ class Feature(BaseModel): blank: bool to indicate if feature is blank-associated (if provided). groups: association to categories and groups is such data was provided. group_factors: indicates the group factors(fold differences) if provided. - phenotypes: a list of Phenotype objects if phenotype was assigned Annotations: objects summarizing associated annotation data networks: dict of objects representing associated networking data Scores: Object representing feature-associated scores @@ -354,7 +385,6 @@ class Feature(BaseModel): blank: Optional[bool] = None groups: Optional[dict] = None group_factors: Optional[dict] = None - phenotypes: Optional[list] = None Annotations: Optional[Any] = None networks: Optional[dict] = None Scores: Optional[Any] = None @@ -410,24 +440,15 @@ def _add_per_sample(attr: str): else: json_dict["group_factors"] = {} - if self.phenotypes is not None: - json_dict["phenotypes"] = [obj.to_json() for obj in self.phenotypes] - else: - json_dict["phenotypes"] = [] - if self.Scores is not None: json_dict["scores"] = self.Scores.to_json() else: json_dict["scores"] = {} - if self.Spectrum is not None: - json_dict["spectrum"] = { - "mz": list(self.Spectrum.mz), - "int": [round(i, 3) for i in self.Spectrum.intensities], - "metadata": self.Spectrum.metadata, - } + if self.Annotations is not None: + json_dict["annotations"] = self.Annotations.to_json() else: - json_dict["spectrum"] = {} + json_dict["annotations"] = {} if self.networks is not None: json_dict["networks"] = { @@ -436,9 +457,13 @@ def _add_per_sample(attr: str): else: json_dict["networks"] = {} - if self.Annotations is not None: - json_dict["annotations"] = self.Annotations.to_json() + if self.Spectrum is not None: + json_dict["spectrum"] = { + "mz": list(self.Spectrum.mz), + "int": [round(i, 3) for i in self.Spectrum.intensities], + "metadata": self.Spectrum.metadata, + } else: - json_dict["annotations"] = {} + json_dict["spectrum"] = {} return json_dict diff --git a/fermo_core/input_output/class_export_manager.py b/fermo_core/input_output/class_export_manager.py index ea16a53..a94810d 100644 --- a/fermo_core/input_output/class_export_manager.py +++ b/fermo_core/input_output/class_export_manager.py @@ -24,7 +24,6 @@ import json import logging import platform -import shutil from datetime import datetime from typing import Any, Self @@ -32,7 +31,6 @@ import pandas as pd from pydantic import BaseModel -from fermo_core.config.class_default_settings import DefaultPaths from fermo_core.data_processing.class_repository import Repository from fermo_core.data_processing.class_stats import Stats from fermo_core.input_output.class_parameter_manager import ParameterManager diff --git a/pyproject.toml b/pyproject.toml index a9979d4..0eb32f2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "fermo_core" -version = "0.2.1" +version = "0.2.2" description = "Data processing/analysis functionality of metabolomics dashboard FERMO" readme = "README.md" requires-python = ">=3.11,<3.12" diff --git a/tests/test_data_analysis/test_phenotype_manager/test_class_phen_quant_conc_assigner.py b/tests/test_data_analysis/test_phenotype_manager/test_class_phen_quant_conc_assigner.py index 79ae076..368015e 100644 --- a/tests/test_data_analysis/test_phenotype_manager/test_class_phen_quant_conc_assigner.py +++ b/tests/test_data_analysis/test_phenotype_manager/test_class_phen_quant_conc_assigner.py @@ -3,13 +3,13 @@ from fermo_core.data_analysis.phenotype_manager.class_phen_quant_conc_assigner import ( PhenQuantConcAssigner, ) -from fermo_core.data_processing.class_stats import Stats, PhenoData, SamplePhenotype -from fermo_core.data_processing.class_repository import Repository from fermo_core.data_processing.builder_feature.dataclass_feature import ( Feature, SampleInfo, ) from fermo_core.data_processing.builder_sample.dataclass_sample import Sample +from fermo_core.data_processing.class_repository import Repository +from fermo_core.data_processing.class_stats import PhenoData, SamplePhenotype, Stats from fermo_core.input_output.class_parameter_manager import ParameterManager @@ -76,14 +76,14 @@ def test_find_relevant_f_ids(phen_quant_conc): def test_calculate_correlation_valid(phen_quant_conc): phen_quant_conc.find_relevant_f_ids() phen_quant_conc.calculate_correlation() - assert phen_quant_conc.features.entries[2].phenotypes[0] is not None + assert phen_quant_conc.features.entries[2].Annotations.phenotypes[0] is not None def test_calculate_correlation_0_cutoffs_valid(phen_quant_conc): phen_quant_conc.params.PhenoQuantConcAssgnParams.p_val_cutoff = 0 phen_quant_conc.find_relevant_f_ids() phen_quant_conc.calculate_correlation() - assert phen_quant_conc.features.entries[1].phenotypes[0] is not None + assert phen_quant_conc.features.entries[1].Annotations.phenotypes[0] is not None def test_calculate_correlation_invalid(phen_quant_conc): @@ -93,7 +93,7 @@ def test_calculate_correlation_invalid(phen_quant_conc): def test_run_analysis_valid(phen_quant_conc): phen_quant_conc.run_analysis() - assert phen_quant_conc.features.entries[2].phenotypes[0] is not None + assert phen_quant_conc.features.entries[2].Annotations.phenotypes[0] is not None def test_run_analysis_invalid(phen_quant_conc): diff --git a/tests/test_data_analysis/test_phenotype_manager/test_class_phen_quant_perc_assigner.py b/tests/test_data_analysis/test_phenotype_manager/test_class_phen_quant_perc_assigner.py index fb9b7f9..37d5227 100644 --- a/tests/test_data_analysis/test_phenotype_manager/test_class_phen_quant_perc_assigner.py +++ b/tests/test_data_analysis/test_phenotype_manager/test_class_phen_quant_perc_assigner.py @@ -3,13 +3,13 @@ from fermo_core.data_analysis.phenotype_manager.class_phen_quant_perc_assigner import ( PhenQuantPercAssigner, ) -from fermo_core.data_processing.class_stats import Stats, PhenoData, SamplePhenotype -from fermo_core.data_processing.class_repository import Repository from fermo_core.data_processing.builder_feature.dataclass_feature import ( Feature, SampleInfo, ) from fermo_core.data_processing.builder_sample.dataclass_sample import Sample +from fermo_core.data_processing.class_repository import Repository +from fermo_core.data_processing.class_stats import PhenoData, SamplePhenotype, Stats from fermo_core.input_output.class_parameter_manager import ParameterManager @@ -76,7 +76,7 @@ def test_find_relevant_f_ids(phen_quant_perc): def test_calculate_correlation_valid(phen_quant_perc): phen_quant_perc.find_relevant_f_ids() phen_quant_perc.calculate_correlation() - assert phen_quant_perc.features.entries[2].phenotypes[0] is not None + assert phen_quant_perc.features.entries[2].Annotations.phenotypes[0] is not None def test_calculate_correlation_invalid(phen_quant_perc): @@ -86,7 +86,7 @@ def test_calculate_correlation_invalid(phen_quant_perc): def test_run_analysis_valid(phen_quant_perc): phen_quant_perc.run_analysis() - assert phen_quant_perc.features.entries[2].phenotypes[0] is not None + assert phen_quant_perc.features.entries[2].Annotations.phenotypes[0] is not None def test_run_analysis_invalid(phen_quant_perc): diff --git a/tests/test_data_analysis/test_score_assigner/test_class_score_assigner.py b/tests/test_data_analysis/test_score_assigner/test_class_score_assigner.py index 4ea54d1..fd33ffa 100644 --- a/tests/test_data_analysis/test_score_assigner/test_class_score_assigner.py +++ b/tests/test_data_analysis/test_score_assigner/test_class_score_assigner.py @@ -36,15 +36,6 @@ def score_assigner(): samples={ "s1", }, - phenotypes=[ - Phenotype( - format="quantitative-concentration", - category="assay:assay1", - score=0.9, - p_value=0.000005, - p_value_corr=0.0005, - ) - ], Annotations=Annotations( matches=[ Match( @@ -65,7 +56,16 @@ def score_assigner(): diff_mz=0.011, module="library-annotation", ), - ] + ], + phenotypes=[ + Phenotype( + format="quantitative-concentration", + category="assay:assay1", + score=0.9, + p_value=0.000005, + p_value_corr=0.0005, + ) + ], ), ) score_assigner.features.add(1, f1) @@ -96,14 +96,14 @@ def test_assign_feature_scores_valid(score_assigner): def test_assign_feature_scores_phenotype_invalid(score_assigner): - score_assigner.features.entries[1].phenotypes = None + score_assigner.features.entries[1].Annotations.phenotypes = None score_assigner.assign_feature_scores() assert score_assigner.features.entries[1].Scores.phenotype is None assert round(score_assigner.features.entries[1].Scores.novelty, 1) == 0.1 def test_assign_feature_scores_novelty_invalid(score_assigner): - score_assigner.features.entries[1].Annotations = None + score_assigner.features.entries[1].Annotations.matches = None score_assigner.assign_feature_scores() assert score_assigner.features.entries[1].Scores.phenotype == 0.9 assert score_assigner.features.entries[1].Scores.novelty is None @@ -111,7 +111,6 @@ def test_assign_feature_scores_novelty_invalid(score_assigner): def test_assign_feature_scores_invalid(score_assigner): score_assigner.features.entries[1].Annotations = None - score_assigner.features.entries[1].phenotypes = None score_assigner.assign_feature_scores() assert score_assigner.features.entries[1].Scores.phenotype is None diff --git a/tests/test_data_processing/test_builder_feature/test_dataclass_feature.py b/tests/test_data_processing/test_builder_feature/test_dataclass_feature.py index 45c77f4..5d33ced 100644 --- a/tests/test_data_processing/test_builder_feature/test_dataclass_feature.py +++ b/tests/test_data_processing/test_builder_feature/test_dataclass_feature.py @@ -4,15 +4,15 @@ from fermo_core.data_processing.builder_feature.dataclass_feature import ( Adduct, Annotations, + CharFrag, Feature, + GroupFactor, Match, NeutralLoss, - SimNetworks, - CharFrag, - SampleInfo, - GroupFactor, Phenotype, + SampleInfo, Scores, + SimNetworks, ) @@ -114,6 +114,16 @@ def test_to_json_fragments_valid(): assert f_dict["annotations"]["fragments"][0]["id"] == "Ala-Ala" +def test_to_json_phenotype_valid(): + feature = Feature() + feature.Annotations = Annotations() + feature.Annotations.phenotypes = [ + Phenotype(score=0.1, format="qualitative", descr="asdf") + ] + f_dict = feature.to_json() + assert f_dict["annotations"]["phenotypes"][0]["score"] == 0.1 + + def test_to_json_area_per_sample_valid(): feature = Feature() feature.area_per_sample = [ @@ -153,15 +163,122 @@ def test_to_json_groups_valid(): assert len(f_dict["groups"]["cat1"]) == 3 -def test_to_json_phenotypes_valid(): - feature = Feature() - feature.phenotypes = [Phenotype(score=0.1, format="qualitative", descr="asdf")] - f_dict = feature.to_json() - assert f_dict["phenotypes"][0]["score"] == 0.1 - - def test_to_json_scores_valid(): feature = Feature() feature.Scores = Scores(novelty=1.0, phenotype=0.88) f_dict = feature.to_json() assert f_dict["scores"]["novelty"] == 1.0 + + +def test_sort_entries_adducts_invalid(): + annotation = Annotations() + annotation.sort_entries(attr="adducts", score="diff_ppm", direction=True) + assert annotation.adducts is None + + +def test_sort_entries_adducts_valid(): + annotation = Annotations( + adducts=[ + Adduct( + partner_adduct="A", + partner_mz=12, + diff_ppm=11.0, + partner_id=12, + adduct_type="B", + ), + Adduct( + partner_adduct="A", + partner_mz=12, + diff_ppm=1.0, + partner_id=12, + adduct_type="B", + ), + ] + ) + annotation.sort_entries(attr="adducts", score="diff_ppm", direction=False) + assert annotation.adducts[0].diff_ppm == 1.0 + + +def test_sort_entries_matches_invalid(): + annotation = Annotations() + annotation.sort_entries(attr="matches", score="score", direction=True) + assert annotation.matches is None + + +def test_sort_entries_matches_valid(): + annotation = Annotations() + annotation.matches = [ + Match( + id="fakeomycin", + library="default_library", + algorithm="modified cosine", + score=0.1, + mz=1234.5, + diff_mz=300.2, + module="user-library-matching", + ), + Match( + id="fakeomycin", + library="default_library", + algorithm="modified cosine", + score=0.99, + mz=1234.5, + diff_mz=300.2, + module="user-library-matching", + ), + ] + annotation.sort_entries(attr="matches", score="score", direction=True) + assert annotation.matches[0].score == 0.99 + + +def test_sort_entries_losses_invalid(): + annotation = Annotations() + annotation.sort_entries(attr="losses", score="diff", direction=False) + assert annotation.losses is None + + +def test_sort_entries_losses_valid(): + annotation = Annotations( + losses=[ + NeutralLoss( + id="alanine", loss_det=100.0, loss_ex=100.01, mz_frag=80.0, diff=12.0 + ), + NeutralLoss( + id="non-ala", loss_det=100.0, loss_ex=100.01, mz_frag=80.0, diff=1.0 + ), + ] + ) + annotation.sort_entries(attr="losses", score="diff", direction=False) + assert annotation.losses[0].diff == 1.0 + + +def test_sort_entries_fragments_invalid(): + annotation = Annotations() + annotation.sort_entries(attr="fragments", score="diff", direction=False) + assert annotation.fragments is None + + +def test_sort_entries_fragments_valid(): + annotation = Annotations() + annotation.fragments = [ + CharFrag(id="Ala-Ala", frag_det=100.0, frag_ex=100.01, diff=12.0), + CharFrag(id="yalla-yalla", frag_det=100.0, frag_ex=100.01, diff=1.0), + ] + annotation.sort_entries(attr="fragments", score="diff", direction=False) + assert annotation.fragments[0].diff == 1.0 + + +def test_sort_entries_phenotypes_invalid(): + annotation = Annotations() + annotation.sort_entries(attr="phenotypes", score="score", direction=True) + assert annotation.phenotypes is None + + +def test_sort_entries_phenotypes_valid(): + annotation = Annotations() + annotation.phenotypes = [ + Phenotype(score=0.1, format="qualitative", descr="asdf"), + Phenotype(score=1.0, format="qualitative", descr="qwertz"), + ] + annotation.sort_entries(attr="phenotypes", score="score", direction=True) + assert annotation.phenotypes[0].score == 1.0