Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 12 additions & 9 deletions src/app_neo4j_queries.py
Original file line number Diff line number Diff line change
Expand Up @@ -673,7 +673,7 @@ def get_descendants(
if record and record[record_field_name]:
results = record[record_field_name]

schema.schema_manager.rearrange_datasets(results, entity_type)
schema.schema_manager.rearrange_datasets(results)

return results

Expand Down Expand Up @@ -1872,14 +1872,17 @@ def get_individual_prov_info(neo4j_driver, dataset_uuid):
content_sixteen, key=lambda d: d["last_modified_timestamp"], reverse=True
)

published_processed_dataset_location = next(
(i for i, item in enumerate(content_sixteen) if item["status"] == "Published"), None
)
if published_processed_dataset_location and published_processed_dataset_location != 0:
published_processed_dataset = content_sixteen.pop(
published_processed_dataset_location
)
content_sixteen.insert(0, published_processed_dataset)
# We are modifying the response of /prov-info to return derived datasets simply by last modified timestamp.
# We want to highlight QA datasets that were more recently touched over older published datasets.

# published_processed_dataset_location = next(
# (i for i, item in enumerate(content_sixteen) if item["status"] == "Published"), None
# )
# if published_processed_dataset_location and published_processed_dataset_location != 0:
# published_processed_dataset = content_sixteen.pop(
# published_processed_dataset_location
# )
# content_sixteen.insert(0, published_processed_dataset)

record_dict["processed_dataset"] = content_sixteen
return record_dict
Expand Down
25 changes: 12 additions & 13 deletions src/schema/schema_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,25 +442,24 @@ def get_schema_defaults(properties=[], is_include_action=True, target_entity_typ
return defaults


def rearrange_datasets(results, entity_type="Dataset"):
def rearrange_datasets(results):
"""
If asked for the descendants of a Dataset then sort by last_modified_timestamp and place the published dataset at the top

:param results : List[dict]
:param entity_type : str
:return:
"""
if isinstance(results[0], str) is False and equals(
entity_type, Ontology.ops().entities().DATASET
):
results = sorted(results, key=lambda d: d["last_modified_timestamp"], reverse=True)
if not isinstance(results[0], str) and "last_modified_timestamp" in results[0]:
results.sort(key=lambda d: d["last_modified_timestamp"], reverse=True)

published_processed_dataset_location = next(
(i for i, item in enumerate(results) if item["status"] == "Published"), None
)
if published_processed_dataset_location and published_processed_dataset_location != 0:
published_processed_dataset = results.pop(published_processed_dataset_location)
results.insert(0, published_processed_dataset)
# We are modifying the response of /prov-info to return derived datasets simply by last modified timestamp.
# We want to highlight QA datasets that were more recently touched over older published datasets.

# published_processed_dataset_location = next(
# (i for i, item in enumerate(results) if item["status"] == "Published"), None
# )
# if published_processed_dataset_location and published_processed_dataset_location != 0:
# published_processed_dataset = results.pop(published_processed_dataset_location)
# results.insert(0, published_processed_dataset)


"""
Expand Down