Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions .coveragerc
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
[run]
branch = True
source = brukerapi

[report]
# Ignore lines matching this regex in coverage report
exclude_lines =
# Ignore raise statements (intentional exceptions)
raise
# Ignore debug-only code
if __name__ == .__main__.:
57 changes: 29 additions & 28 deletions .github/workflows/assets/zenodo.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
import requests
import json
import sys
from pathlib import Path

import pkg_resources
import docutils
import requests

PARENT_ID = 698342
BASE_URL = 'https://sandbox.zenodo.org/api/deposit/depositions/'
BASE_URL = "https://sandbox.zenodo.org/api/deposit/depositions/"

def publish(path_dist, access_token, verbose=False):

def publish(path_dist, access_token, *, verbose=False):
"""Publish a new version of software to Zenodo

Parameters:
Expand All @@ -18,75 +19,75 @@ def publish(path_dist, access_token, verbose=False):

"""

params = {'access_token': access_token}
params = {"access_token": access_token}
headers = {"Content-Type": "application/json"}

# Create a new version of the deposition
r = requests.post(BASE_URL + '{}/actions/newversion'.format(PARENT_ID),
params=params,
json={},
headers=headers)

r = requests.post(BASE_URL + f"{PARENT_ID}/actions/newversion", params=params, json={}, headers=headers)

if verbose:
print('Create a new version of the deposition: {}'.format(r.status_code))
print(f"Create a new version of the deposition: {r.status_code}")

# Get the new version, its id and bucket_url
r = requests.get(r.json()['links']['latest_draft'], params=params)
deposition_id = r.json()['id']
r = requests.get(r.json()["links"]["latest_draft"], params=params)
deposition_id = r.json()["id"]
bucket_url = r.json()["links"]["bucket"]

if verbose:
print('Get the new version: {}'.format(r.status_code))
print('id: {}'.format(deposition_id))
print('bucket_url: {}'.format(bucket_url))
print(f"Get the new version: {r.status_code}")
print(f"id: {deposition_id}")
print(f"bucket_url: {bucket_url}")

# Delete existing files
for file in r.json()['files']:
requests.delete(BASE_URL + '%s/files/%s' % (deposition_id, file['id']), params=params)
for file in r.json()["files"]:
requests.delete(BASE_URL + "{}/files/{}".format(deposition_id, file["id"]), params=params)

# Locate distributuon file
files = [file for file in Path(path_dist).glob('**/*') if file.name.endswith('tar.gz')]
files = [file for file in Path(path_dist).glob("**/*") if file.name.endswith("tar.gz")]

# Put distribution file
with files[0].open(mode="rb") as fp:
r = requests.put(
'{}/{}'.format(bucket_url, files[0].name),
f"{bucket_url}/{files[0].name}",
data=fp,
params=params,
)

if verbose:
print('Put distribution file: {}'.format(r.status_code))
print(f"Put distribution file: {r.status_code}")

# Load metadata
metadata = load_metadata()

# Put metadata
r = requests.put(BASE_URL + '%s' % deposition_id, params=params, data=json.dumps(metadata), headers=headers)
r = requests.put(BASE_URL + f"{deposition_id}", params=params, data=json.dumps(metadata), headers=headers)

if verbose:
print('Put metadata: {}'.format(r.status_code))
print(f"Put metadata: {r.status_code}")

# Publish new version
r = requests.post(BASE_URL + '%s/actions/publish' % deposition_id, params=params )
r = requests.post(BASE_URL + f"{deposition_id}/actions/publish", params=params)

if verbose:
print('Publish new version: {}'.format(r.status_code))
print(f"Publish new version: {r.status_code}")


def get_version():
return pkg_resources.get_distribution("brukerapi").version


def load_metadata():
with open(Path(__file__).parent / 'fixed.json') as f:
with open(Path(__file__).parent / "fixed.json") as f:
data = json.load(f)

data['metadata']['version'] = get_version()
data["metadata"]["version"] = get_version()

return data


def append_changelog():
pass


if __name__ == "__main__":
publish(sys.argv[0], sys.argv[1])

22 changes: 20 additions & 2 deletions .github/workflows/python-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ jobs:
run: |
git clone https://github.com/isi-nmr/brukerapi-python.git
cd brukerapi-python
pip install pytest zenodo_get
pip install pytest zenodo_get pytest-cov
pip install -e .[dev] --use-pep517

- name: Cache Zenodo data
Expand All @@ -75,4 +75,22 @@ jobs:
key: zenodo-4522220

- name: Run all dataset tests
run: python -m pytest test -v
run: |
python -m pytest test -v --cov=brukerapi --cov-branch --cov-report=xml --cov-report=term-missing --cov-report=html

- name: Upload coverage HTML
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: htmlcov/

- name: Print total coverage
run: |
echo "Total coverage:"
python - <<EOF
import xml.etree.ElementTree as ET
tree = ET.parse('coverage.xml')
root = tree.getroot()
line_rate = float(root.attrib['line-rate']) * 100
print(f"{line_rate:.2f}%")
EOF
18 changes: 18 additions & 0 deletions .github/workflows/ruff.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
name: Ruff Lint

on: [push, pull_request]

jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.13"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install ruff
- name: Run Ruff
run: ruff check .
3 changes: 3 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ brukerapi-python
:target: https://bruker-api.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status

.. image:: https://codecov.io/gh/isi-nmr/brukerapi-python/branch/main/graph/badge.svg
:target: https://codecov.io/gh/isi-nmr/brukerapi-python
:alt: Test coverage
A Python package providing I/O interface for Bruker data sets.

tl;dr
Expand Down
49 changes: 20 additions & 29 deletions brukerapi/cli.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,17 @@
from argparse import ArgumentParser
from brukerapi.splitters import *
from brukerapi.folders import *
import sys
from argparse import ArgumentParser

from brukerapi.folders import Dataset, Filter, Folder, Path
from brukerapi.splitters import FrameGroupSplitter, SlicePackageSplitter

def main():
"""

"""
parser = ArgumentParser(prog='bruker')
def main():
""" """
parser = ArgumentParser(prog="bruker")
subparsers = parser.add_subparsers()

# report sub-command
parser_report = subparsers.add_parser('report', help='export properties of data sets to json, or yaml file')
parser_report = subparsers.add_parser("report", help="export properties of data sets to json, or yaml file")
parser_report.add_argument(
"-i",
"--input",
Expand All @@ -27,27 +26,22 @@ def main():
parser_report.add_argument(
"-f",
"--format",
choices=['json', 'yml'],
default='json',
choices=["json", "yml"],
default="json",
help="Format of report files",
)
parser_report.add_argument(
"-p",
"--props",
type=str,
nargs='+',
nargs="+",
help="List of properties to include",
)
parser_report.add_argument(
"-v",
"--verbose",
help="make verbose",
action="store_true"
)
parser_report.add_argument("-v", "--verbose", help="make verbose", action="store_true")
parser_report.set_defaults(func=report)

# report sub-command
parser_split = subparsers.add_parser('split', help='split dataset into several sub-datasets')
parser_split = subparsers.add_parser("split", help="split dataset into several sub-datasets")
parser_split.add_argument(
"-i",
"--input",
Expand All @@ -68,7 +62,7 @@ def main():
"-s",
"--slice_package",
dest="slice_package",
action='store_true',
action="store_true",
help="Split by slice package",
)
parser_split.add_argument(
Expand All @@ -81,7 +75,7 @@ def main():
parser_split.set_defaults(func=split)

# filter sub-command
parser_filter = subparsers.add_parser('filter', help='get files based on query')
parser_filter = subparsers.add_parser("filter", help="get files based on query")
parser_filter.add_argument(
"-i",
"--input",
Expand All @@ -108,8 +102,6 @@ def main():
func(args)




def split(args):
"""
split sub-command
Expand Down Expand Up @@ -145,13 +137,12 @@ def report(args):
elif output.is_dir():
# folder to folder
Folder(input).report(path_out=output, format_=args.format, props=args.props, verbose=args.verbose)
else:
# dataset in-place
if output is None:
Dataset(input, add_parameters=['subject']).report(props=args.props, verbose=args.verbose)
# dataset to folder, or dataset to file
elif output.is_dir():
Dataset(input, add_parameters=['subject']).report(path=output, props=args.props, verbose=args.verbose)
# dataset in-place
elif output is None:
Dataset(input, add_parameters=["subject"]).report(props=args.props, verbose=args.verbose)
# dataset to folder, or dataset to file
elif output.is_dir():
Dataset(input, add_parameters=["subject"]).report(path=output, props=args.props, verbose=args.verbose)


def filter(args):
Expand Down
5 changes: 2 additions & 3 deletions brukerapi/data.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
class DataRandomAccess():

class DataRandomAccess:
def __init__(self, dataset):
self._dataset = dataset
self._scheme = dataset._scheme

def __getitem__(self, slice):
return self._scheme.ra(slice)
return self._scheme.ra(slice)
Loading