Skip to content

Commit

Permalink
Add JSONL-based verbatim manifest format (#6028)
Browse files Browse the repository at this point in the history
  • Loading branch information
nadove-ucsc committed Mar 13, 2024
1 parent d51eea2 commit 127d952
Show file tree
Hide file tree
Showing 8 changed files with 164 additions and 6 deletions.
2 changes: 1 addition & 1 deletion lambdas/service/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@
# changes and reset the minor version to zero. Otherwise, increment only
# the minor version for backwards compatible changes. A backwards
# compatible change is one that does not require updates to clients.
'version': '4.0'
'version': '4.1'
},
'tags': [
{
Expand Down
8 changes: 5 additions & 3 deletions lambdas/service/openapi.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
"info": {
"title": "azul_service",
"description": "\n# Overview\n\nAzul is a REST web service for querying metadata associated with\nboth experimental and analysis data from a data repository. In order\nto deliver response times that make it suitable for interactive use\ncases, the set of metadata properties that it exposes for sorting,\nfiltering, and aggregation is limited. Azul provides a uniform view\nof the metadata over a range of diverse schemas, effectively\nshielding clients from changes in the schemas as they occur over\ntime. It does so, however, at the expense of detail in the set of\nmetadata properties it exposes and in the accuracy with which it\naggregates them.\n\nAzul denormalizes and aggregates metadata into several different\nindices for selected entity types. Metadata entities can be queried\nusing the [Index](#operations-tag-Index) endpoints.\n\nA set of indices forms a catalog. There is a default catalog called\n`dcp2` which will be used unless a\ndifferent catalog name is specified using the `catalog` query\nparameter. Metadata from different catalogs is completely\nindependent: a response obtained by querying one catalog does not\nnecessarily correlate to a response obtained by querying another\none. Two catalogs can contain metadata from the same sources or\ndifferent sources. It is only guaranteed that the body of a\nresponse by any given endpoint adheres to one schema,\nindependently of which catalog was specified in the request.\n\nAzul provides the ability to download data and metadata via the\n[Manifests](#operations-tag-Manifests) endpoints. The\n`curl` format manifests can be used to\ndownload data files. Other formats provide various views of the\nmetadata. Manifests can be generated for a selection of files using\nfilters. These filters are interchangeable with the filters used by\nthe [Index](#operations-tag-Index) endpoints.\n\nAzul also provides a [summary](#operations-Index-get_index_summary)\nview of indexed data.\n\n## Data model\n\nAny index, when queried, returns a JSON array of hits. Each hit\nrepresents a metadata entity. Nested in each hit is a summary of the\nproperties of entities associated with the hit. An entity is\nassociated either by a direct edge in the original metadata graph,\nor indirectly as a series of edges. The nested properties are\ngrouped by the type of the associated entity. The properties of all\ndata files associated with a particular sample, for example, are\nlisted under `hits[*].files` in a `/index/samples` response. It is\nimportant to note that while each _hit_ represents a discrete\nentity, the properties nested within that hit are the result of an\naggregation over potentially many associated entities.\n\nTo illustrate this, consider a data file that is part of two\nprojects (a project is a group of related experiments, typically by\none laboratory, institution or consortium). Querying the `files`\nindex for this file yields a hit looking something like:\n\n```\n{\n \"projects\": [\n {\n \"projectTitle\": \"Project One\"\n \"laboratory\": ...,\n ...\n },\n {\n \"projectTitle\": \"Project Two\"\n \"laboratory\": ...,\n ...\n }\n ],\n \"files\": [\n {\n \"format\": \"pdf\",\n \"name\": \"Team description.pdf\",\n ...\n }\n ]\n}\n```\n\nThis example hit contains two kinds of nested entities (a hit in an\nactual Azul response will contain more): There are the two projects\nentities, and the file itself. These nested entities contain\nselected metadata properties extracted in a consistent way. This\nmakes filtering and sorting simple.\n\nAlso notice that there is only one file. When querying a particular\nindex, the corresponding entity will always be a singleton like\nthis.\n",
"version": "4.0"
"version": "4.1"
},
"tags": [
{
Expand Down Expand Up @@ -9478,7 +9478,8 @@
"compact",
"terra.bdbag",
"terra.pfb",
"curl"
"curl",
"verbatim.jsonl"
]
},
"description": "\nThe desired format of the output.\n\n- `compact` (the default) for a compact,\n tab-separated manifest\n\n- `terra.bdbag` for a manifest in the\n [BDBag format][1]. This provides a ZIP file containing two\n manifests: one for Participants (aka Donors) and one for\n Samples (aka Specimens). For more on the format of the\n manifests see [documentation here][2].\n\n- `terra.pfb` for a manifest in the [PFB\n format][3]. This format is mainly used for exporting data to\n Terra.\n\n- `curl` for a [curl configuration\n file][4] manifest. This manifest can be used with the curl\n program to download all the files listed in the manifest.\n\n[1]: https://bd2k.ini.usc.edu/tools/bdbag/\n\n[2]: https://software.broadinstitute.org/firecloud/documentation/article?id=10954\n\n[3]: https://github.com/uc-cdis/pypfb\n\n[4]: https://curl.haxx.se/docs/manpage.html#-K\n"
Expand Down Expand Up @@ -10885,7 +10886,8 @@
"compact",
"terra.bdbag",
"terra.pfb",
"curl"
"curl",
"verbatim.jsonl"
]
},
"description": "\nThe desired format of the output.\n\n- `compact` (the default) for a compact,\n tab-separated manifest\n\n- `terra.bdbag` for a manifest in the\n [BDBag format][1]. This provides a ZIP file containing two\n manifests: one for Participants (aka Donors) and one for\n Samples (aka Specimens). For more on the format of the\n manifests see [documentation here][2].\n\n- `terra.pfb` for a manifest in the [PFB\n format][3]. This format is mainly used for exporting data to\n Terra.\n\n- `curl` for a [curl configuration\n file][4] manifest. This manifest can be used with the curl\n program to download all the files listed in the manifest.\n\n[1]: https://bd2k.ini.usc.edu/tools/bdbag/\n\n[2]: https://software.broadinstitute.org/firecloud/documentation/article?id=10954\n\n[3]: https://github.com/uc-cdis/pypfb\n\n[4]: https://curl.haxx.se/docs/manpage.html#-K\n"
Expand Down
6 changes: 6 additions & 0 deletions src/azul/plugins/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ class ManifestFormat(Enum):
terra_bdbag = 'terra.bdbag'
terra_pfb = 'terra.pfb'
curl = 'curl'
verbatim_jsonl = 'verbatim.jsonl'


T = TypeVar('T', bound='Plugin')
Expand Down Expand Up @@ -403,6 +404,11 @@ def _field_mapping(self) -> _FieldMapping:
def source_id_field(self) -> str:
raise NotImplementedError

@property
@abstractmethod
def implicit_hub_type(self) -> str:
raise NotImplementedError

@property
def facets(self) -> Sequence[str]:
return [
Expand Down
13 changes: 12 additions & 1 deletion src/azul/plugins/metadata/anvil/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
Type,
)

from azul import (
config,
)
from azul.indexer.document import (
DocumentType,
EntityType,
Expand Down Expand Up @@ -63,7 +66,11 @@ def exposed_indices(self) -> dict[EntityType, Sorting]:

@property
def manifest_formats(self) -> Sequence[ManifestFormat]:
return [ManifestFormat.compact, ManifestFormat.terra_pfb]
return [
ManifestFormat.compact,
ManifestFormat.terra_pfb,
*([ManifestFormat.verbatim_jsonl] if config.enable_replicas else [])
]

def transformer_types(self) -> Iterable[Type[BaseTransformer]]:
return (
Expand Down Expand Up @@ -215,6 +222,10 @@ def _field_mapping(self) -> MetadataPlugin._FieldMapping:
def source_id_field(self) -> str:
return 'sourceId'

@property
def implicit_hub_type(self) -> str:
return 'datasets'

@property
def facets(self) -> Sequence[str]:
return [
Expand Down
8 changes: 8 additions & 0 deletions src/azul/plugins/metadata/hca/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@
Type,
)

from azul import (
config,
)
from azul.indexer.document import (
Aggregate,
DocumentType,
Expand Down Expand Up @@ -160,6 +163,7 @@ def manifest_formats(self) -> Sequence[ManifestFormat]:
ManifestFormat.terra_bdbag,
ManifestFormat.terra_pfb,
ManifestFormat.curl,
*([ManifestFormat.verbatim_jsonl] if config.enable_replicas else [])
]

@property
Expand Down Expand Up @@ -267,6 +271,10 @@ def _field_mapping(self) -> MetadataPlugin._FieldMapping:
def source_id_field(self) -> str:
return 'sourceId'

@property
def implicit_hub_type(self) -> str:
return 'projects'

@property
def facets(self) -> Sequence[str]:
return [
Expand Down
95 changes: 95 additions & 0 deletions src/azul/service/manifest_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from itertools import (
chain,
)
import json
import logging
from math import (
ceil,
Expand Down Expand Up @@ -69,6 +70,7 @@
bdbag_api,
)
from elasticsearch_dsl import (
Q,
Search,
)
from elasticsearch_dsl.response import (
Expand All @@ -78,6 +80,7 @@
furl,
)
from more_itertools import (
chunked,
one,
)
import msgpack
Expand Down Expand Up @@ -107,6 +110,7 @@
aws,
)
from azul.indexer.document import (
DocumentType,
FieldPath,
FieldTypes,
null_str,
Expand Down Expand Up @@ -2005,3 +2009,94 @@ def qualify(qualifier, column_name, index=None):
# Join concatenated values using the joiner
row = {k: self.padded_joiner.join(sorted(v)) if isinstance(v, set) else v for k, v in row.items()}
bundle_tsv_writer.writerow(row)


class VerbatimManifestGenerator(FileBasedManifestGenerator):

@property
def content_type(self) -> str:
return 'application/jsonl'

@classmethod
def file_name_extension(cls) -> str:
return 'jsonl'

@classmethod
def format(cls) -> ManifestFormat:
return ManifestFormat.verbatim_jsonl

@property
def entity_type(self) -> str:
return 'files'

@property
def included_fields(self) -> list[FieldPath]:
# This is only used when searching the aggregates, which are only used
# to perform a "join" on the replicas index. Therefore, we only need the
# "keys" used for the join.
return [
('entity_id',),
('contents', self.implicit_hub_type, 'document_id')
]

@property
def implicit_hub_type(self) -> str:
return self.service.metadata_plugin(self.catalog).implicit_hub_type

def _replica_keys(self) -> Iterable[dict[str, str]]:
request = self._create_request()
for hit in request.scan():
yield {
# Most replicas track their hubs explicitly, however...
'hub_ids': hit['entity_id'],
# ... for projects and datasets, there are too many hubs to
# track them all in the replica, so they are instead retrieved
# by entity ID.
'entity_id': one(one(hit['contents'][self.implicit_hub_type])['document_id'])
}

def _all_replicas(self) -> Iterable[JSON]:
emitted_replica_ids = set()
page_size = 100
for page in chunked(self._replica_keys(), page_size):
for replica in self._join_replicas(page):
# A single replica may have many hubs. To prevent replicas from
# being emitted more than once, we need to keep track of
# replicas already emitted.
replica_id = replica.meta.id
if replica_id not in emitted_replica_ids:
yield replica.contents.to_dict()
# Note that this will be zero for replicas that use implicit
# hubs, in which case there are actually many hubs
explicit_hub_count = len(replica.hub_ids)
# We don't have to track the IDs of replicas with only one
# hub, since we know that there are no other hubs that could
# cause their re-emission.
if explicit_hub_count != 1:
emitted_replica_ids.add(replica_id)

def _join_replicas(self,
keys_page: Iterable[dict[str, str]]
) -> Iterable[Hit]:
terms_by_field = defaultdict(set)
for keys in keys_page:
for field, term in keys.items():
terms_by_field[field].add(term)
request = self.service.create_request(catalog=self.catalog,
entity_type='replica',
doc_type=DocumentType.replica)
request = request.query(Q('bool', should=[
{'terms': {f'{field}.keyword': list(terms)}}
for field, terms in terms_by_field.items()
]
))
return request.scan()

def create_file(self) -> tuple[str, Optional[str]]:
fd, path = mkstemp()
os.close(fd)
with open(path, 'w') as f:
for replica in self._all_replicas():
json.dump(replica, f)
f.write('\n')
return path, None
12 changes: 11 additions & 1 deletion test/integration_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -583,7 +583,8 @@ def _test_manifest(self, catalog: CatalogName):
ManifestFormat.compact: self._check_compact_manifest,
ManifestFormat.terra_bdbag: self._check_terra_bdbag_manifest,
ManifestFormat.terra_pfb: self._check_terra_pfb_manifest,
ManifestFormat.curl: self._check_curl_manifest
ManifestFormat.curl: self._check_curl_manifest,
ManifestFormat.verbatim_jsonl: self._check_jsonl_manifest
}
for format in [None, *supported_formats]:
# IT catalogs with just one public source are always indexed
Expand Down Expand Up @@ -1000,6 +1001,15 @@ def _check_curl_manifest(self, _catalog: CatalogName, response: bytes):
log.info(f'Manifest contains {num_files} files.')
self.assertGreater(num_files, 0)

def _check_jsonl_manifest(self, _catalog: CatalogName, response: bytes):
text = TextIOWrapper(BytesIO(response))
num_replicas = 0
for line in text:
json.loads(line)
num_replicas += 1
log.info('Manifest contains %d replicas', num_replicas)
self.assertGreater(num_replicas, 0)

def _test_repository_files(self, catalog: str):
with self.subTest('repository_files', catalog=catalog):
file = self._get_one_inner_file(catalog)
Expand Down
26 changes: 26 additions & 0 deletions test/service/test_manifest.py
Original file line number Diff line number Diff line change
Expand Up @@ -1273,6 +1273,32 @@ def test_manifest_content_disposition_header(self):
actual_cd = query.params['response-content-disposition']
self.assertEqual(expected_cd, actual_cd)

@unittest.skipIf(not config.enable_replicas,
'The format is replica-based')
@manifest_test
def test_verbatim_jsonl_manifest(self):
bundle = self._load_canned_bundle(one(self.bundles()))
expected_contents = [
bundle.metadata_files[d]
for d in [
'cell_suspension_0.json',
'project_0.json',
'sequence_file_0.json',
'sequence_file_1.json',
'specimen_from_organism_0.json'
]
]
response = self._get_manifest(ManifestFormat.verbatim_jsonl, {})
self.assertEqual(200, response.status_code)
response_contents = list(map(json.loads, response.content.decode().splitlines()))

def sort_key(hca_doc: JSON) -> str:
return hca_doc['provenance']['document_id']

expected_contents.sort(key=sort_key)
response_contents.sort(key=sort_key)
self.assertEqual(expected_contents, response_contents)


class TestManifestCache(ManifestTestCase):

Expand Down

0 comments on commit 127d952

Please sign in to comment.