Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
102 changes: 102 additions & 0 deletions aimmdb/ingest/aimmdb_backup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
import json
import os
import pandas as pd

def download_aimmdd_data(c):
"""
Navigate through a tiled client and download all the entries that are found

Parameters
----------
c : tiled.client

Returns
-------
None.

"""

if not os.path.exists("files/"):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is an optional argument to say "Create the directory only if it doesn't already exist." https://docs.python.org/3/library/os.html#os.makedirs

os.mkdir("files/")

for key in c['dataset'].keys():
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is perfectly fine. It's useful to know that iterating through a dict-like object iterates through its keys, so you can just write:

Suggested change
for key in c['dataset'].keys():
for key in c['dataset']:

print(f"Downloading {key} dataset...")
if not os.path.exists(f"files/{key}/"):
os.mkdir(f"files/{key}/")

for node_value in c['dataset'][key]['uid'].values():
uid = node_value.item['id']
print(f"---- Saving node {uid}...")
metadata = dict(node_value.metadata)
del metadata['_tiled'] # it's not necessary to keep the uid generated by the server. The new server will generate a new uid
specs = list(node_value.specs)

data_dict = node_value.read().to_dict()

meta_content = {"metadata": metadata, "specs": specs, "data": data_dict}

with open(f"files/{key}/{uid}.json", 'w') as file:
json.dump(meta_content, file)

print("Download completed!")

def upload_aimmdb_data(c, folder_path):
"""
Walks through a path in the local machine and writes the data into a remote instance of tiled

Parameters
----------
c : tiled.client
folder_path : pathlib.Path
Path to folder contianing backup data.

Returns
-------
None.

"""

filepaths = sorted(folder_path.iterdir())

for i in range(len(filepaths)):
if filepaths[i].name.startswith("."):
# Skip hidden files.
continue
if not filepaths[i].is_file():
# Explore subfolder for more labview files recursively
upload_aimmdb_data(c, filepaths[i])
if filepaths[i].suffix == ".json":
if filepaths[i].stem not in c:
with open(filepaths[i]) as f:
sample = json.load(f)

metadata = sample["metadata"]
specs = sample["specs"]
data = pd.DataFrame(sample["data"])

node = c.write_dataframe(data, key=filepaths[i].stem, metadata=metadata, specs=specs)
print(f"dataset: {folder_path.name} - node {node.item['id']} was created")
else:
print(f"dataset: {folder_path.name} - node {filepaths[i].stem} already exists")


def delete_dataset(c, dataset):
"""
Deletes all the entries of a given dataset

Parameters
----------
c : tiled.client
dataset : str
name of a data set in a remote tiled instance.

Returns
-------
None.

"""

if dataset in c:
for key in c[dataset].keys():
c[dataset].delete(key)
print(f"Node {key} has been deleted")