5
5
experiment as prb_experiment ,
6
6
organism as prb_organism ,
7
7
original_file as prb_original_file ,
8
+ job as prb_job ,
8
9
)
9
10
from pyrefinebio .api_interface import get_by_endpoint
10
11
from pyrefinebio .base import Base
@@ -56,12 +57,17 @@ def __init__(
56
57
compound = None ,
57
58
time = None ,
58
59
is_processed = None ,
60
+ is_unable_to_be_processed = None ,
59
61
created_at = None ,
60
62
last_modified = None ,
61
63
contributed_metadata = None ,
62
64
contributed_keywords = None ,
63
65
original_files = [],
64
66
computed_files = [],
67
+ last_processor_job = None ,
68
+ last_downloader_job = None ,
69
+ most_recent_smashable_file = None ,
70
+ most_recent_quant_file = None ,
65
71
experiment_accession_codes = None ,
66
72
experiments = None ,
67
73
):
@@ -103,6 +109,7 @@ def __init__(
103
109
self .compound = compound
104
110
self .time = time
105
111
self .is_processed = is_processed
112
+ self .is_unable_to_be_processed = is_unable_to_be_processed
106
113
self .created_at = parse_date (created_at )
107
114
self .last_modified = parse_date (last_modified )
108
115
self .contributed_metadata = contributed_metadata
@@ -118,6 +125,15 @@ def __init__(
118
125
else []
119
126
)
120
127
128
+ # this isn't populated yet but the api does include these keys in the response
129
+ # so for now let's just try to apply them
130
+ if last_processor_job :
131
+ self .last_processor_job = prb_job .ProcessorJob (** last_processor_job )
132
+ if last_downloader_job :
133
+ self .last_downloader_job = prb_job .DownloaderJob (** last_downloader_job )
134
+
135
+ self .most_recent_smashable_file = most_recent_smashable_file
136
+ self .most_recent_quant_file = most_recent_quant_file
121
137
self .experiment_accession_codes = experiment_accession_codes
122
138
self .experiments = experiments
123
139
@@ -163,7 +179,9 @@ def search(cls, **kwargs):
163
179
164
180
title (str): filter based on the Sample's title
165
181
166
- organism (str): filter based on the Organism that the Sample was taken from
182
+ organism__name (str): filter based on the Organism that the Sample was taken from
183
+
184
+ organism__taxonomy_id (int): filter based on the Organism that the Sample was taken from
167
185
168
186
source_database (str): filter based on the publically available repository
169
187
that the Sample was taken from
@@ -204,8 +222,6 @@ def search(cls, **kwargs):
204
222
205
223
is_processed (bool): filter based on if the Sample has been processed
206
224
207
- is_public (bool): filter based on if the Sample is public
208
-
209
225
limit (int): number of results to return per page.
210
226
211
227
offset (int): the initial index from which to return the results.
0 commit comments