@@ -204,23 +204,27 @@ async def retrieve_upload(self, upload_id: int) -> Upload:
204
204
upload = await self ._http_client .fetch_one (Resource .Upload , upload_id )
205
205
return self ._deserializer (Resource .Upload , upload )
206
206
207
- async def export_annotations_to_json (self , queue_id : int ) -> AsyncIterator [Annotation ]:
207
+ async def export_annotations_to_json (
208
+ self , queue_id : int , ** filters : Any
209
+ ) -> AsyncIterator [Annotation ]:
208
210
"""https://elis.rossum.ai/api/docs/#export-annotations.
209
211
210
212
JSON export is paginated and returns the result in a way similar to other list_all methods.
211
213
"""
212
- async for chunk in self ._http_client .export (Resource .Queue , queue_id , "json" ):
214
+ async for chunk in self ._http_client .export (Resource .Queue , queue_id , "json" , ** filters ):
213
215
# JSON export can be translated directly to Annotation object
214
216
yield self ._deserializer (Resource .Annotation , typing .cast (typing .Dict , chunk ))
215
217
216
218
async def export_annotations_to_file (
217
- self , queue_id : int , export_format : ExportFileFormats
219
+ self , queue_id : int , export_format : ExportFileFormats , ** filters : Any
218
220
) -> AsyncIterator [bytes ]:
219
221
"""https://elis.rossum.ai/api/docs/#export-annotations.
220
222
221
223
XLSX/CSV/XML exports can be huge, therefore byte streaming is used to keep memory consumption low.
222
224
"""
223
- async for chunk in self ._http_client .export (Resource .Queue , queue_id , str (export_format )):
225
+ async for chunk in self ._http_client .export (
226
+ Resource .Queue , queue_id , str (export_format ), ** filters
227
+ ):
224
228
yield typing .cast (bytes , chunk )
225
229
226
230
# ##### ORGANIZATIONS #####
0 commit comments