From 029df28c0abbfed3ea7d9d49f761f87d3bc4f9de Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Fri, 12 Sep 2025 11:19:46 +0200 Subject: [PATCH 01/30] feat(python): add first version of linear reg workflow --- .../examples/example_linear_registration.py | 233 ++++++++++++++++++ 1 file changed, 233 insertions(+) create mode 100644 python/examples/example_linear_registration.py diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py new file mode 100644 index 000000000..e5fd8e61d --- /dev/null +++ b/python/examples/example_linear_registration.py @@ -0,0 +1,233 @@ +import argparse +import webbrowser +from time import time + +import neuroglancer +import neuroglancer.cli + + +def create_demo_data(size=(64, 64, 64), radius=20): + import numpy as np + + data = np.zeros(size, dtype=np.uint8) + zz, yy, xx = np.indices(data.shape) + center = np.array(data.shape) / 2 + sphere_mask = (xx - center[2]) ** 2 + (yy - center[1]) ** 2 + ( + zz - center[0] + ) ** 2 < radius**2 + data[sphere_mask] = 255 + return data + + +def create_dimensions(): + return neuroglancer.CoordinateSpace( + names=["x", "y", "z"], units="nm", scales=[1, 1, 1] + ) + + +class LinearRegistrationWorkflow: + def __init__(self, template_url, source_url): + self.template_url = template_url + self.source_url = source_url + + if template_url is None or source_url is None: + self.demo_data = create_demo_data() + + self.status_timers = {} + self.setup_viewer() + self.last_run_points = 0 + + def _clear_status_messages(self): + to_pop = [] + for k, v in self.status_timers.items(): + if time() - v > 5: + to_pop.append(k) + for k in to_pop: + with self.viewer.config_state.txn() as s: + s.status_messages.pop(k, None) + self.status_timers.pop(k) + + def _set_status_message(self, key, message): + with self.viewer.config_state.txn() as s: + s.status_messages[key] = message + self.status_timers[key] = time() + + def setup_viewer(self): + self.viewer = viewer = neuroglancer.Viewer() + source_layer = self.create_source_image() + template_layer = self.create_template_image() + + with viewer.txn() as s: + s.layers["template"] = template_layer + s.layers["source"] = source_layer + s.layers["registered"] = source_layer + s.layers["registered"].visible = False + s.layers["markers"] = neuroglancer.LocalAnnotationLayer( + dimensions=create_dimensions(), + annotation_color="#00FF00", + ) + s.layout = neuroglancer.row_layout( + [ + neuroglancer.LayerGroupViewer(layers=["template", "markers"]), + neuroglancer.LayerGroupViewer( + layers=["source", "registered", "markers"] + ), + ] + ) + s.layers["markers"].tool = "annotatePoint" + s.selected_layer.layer = "markers" + s.selected_layer.visible = True + + self._set_status_message( + "help", + "Place markers in pairs, starting with the template, and then the source. The registered layer will automatically update as you add markers.", + ) + + self.viewer.shared_state.add_changed_callback( + lambda: self.viewer.defer_callback(self.on_state_changed) + ) + + def on_state_changed(self): + self.viewer.defer_callback(self.update) + + def update(self): + print("Updating") + with self.viewer.txn() as s: + self.estimate_affine(s) + self._clear_status_messages() + + def create_template_image(self): + if self.template_url is None: + return neuroglancer.ImageLayer( + source=[ + neuroglancer.LayerDataSource( + neuroglancer.LocalVolume( + self.demo_data, dimensions=create_dimensions() + ) + ) + ] + ) + else: + return neuroglancer.ImageLayer(source=self.template_url) + + def create_source_image(self): + if self.source_url is None: + import scipy.ndimage + + transformed = scipy.ndimage.affine_transform( + self.demo_data, + matrix=[[1, 0, 0], [0, 1, 0], [0.2, 0, 1]], + offset=1.0, + order=1, + ) + return neuroglancer.ImageLayer( + source=[ + neuroglancer.LayerDataSource( + neuroglancer.LocalVolume( + transformed, dimensions=create_dimensions() + ) + ) + ] + ) + else: + return neuroglancer.ImageLayer(source=self.source_url) + + def estimate_affine(self, s): + annotations = s.layers["markers"].annotations + # TODO expand this to estimate different types of transforms + # Depending on the number of points + # For now, just ignore non pairs + annotations = annotations[: (len(annotations) // 2) * 2] + if len(annotations) < 2: + return False + + print(len(annotations) // 2, self.last_run_points) + if len(annotations) // 2 == self.last_run_points: + return False + + template_points = [] + source_points = [] + for i, a in enumerate(annotations): + if i % 2 == 0: + template_points.append(a.point) + else: + source_points.append(a.point) + + import numpy as np + + template_points = np.array(template_points) + source_points = np.array(source_points) + + # Estimate affine transform using least squares, for now using + # https://stackoverflow.com/questions/20546182/how-to-perform-coordinates-affine-transformation-using-python-part-2 + # but can replace later + + n = template_points.shape[0] + pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))]) + unpad = lambda x: x[:, :-1] + X = pad(template_points) + Y = pad(source_points) + + # Solve the least squares problem X * A = Y + # to find our transformation matrix A + A, res, rank, sd = np.linalg.lstsq(X, Y) + # Zero out really small values on A + A[np.abs(A) < 1e-8] = 0 + + transform = lambda x: unpad(np.dot(pad(x), A)) + + transformed = transform(source_points) + print(f"Transformed points: {transformed}") + + # Set the transformation on the layer that is being registered + print(A.T[:3]) + # s.layers["registered"].source.transform = neuroglancer.CoordinateSpaceTransform( + # matrix=A.T[:3], + # ) + s.layers["registered"].source.transform = ( + neuroglancer.CoordinateSpaceTransform( + output_dimensions=create_dimensions(), + matrix=[[1, 0, 0, 0], [1, 1, 0, 0], [0, 0, 1, 0]], + ), + ) + + self._set_status_message( + ("info"), f"Estimated affine transform with {n} point pairs" + ) + # TODO actually want to check if the number of points or the points + # themselves have changed + self.last_run_points = n + return True + + +def handle_args(): + ap = argparse.ArgumentParser() + neuroglancer.cli.add_server_arguments(ap) + ap.add_argument( + "--template", + type=str, + help="Source URL for the template image", + ) + ap.add_argument( + "--source", + type=str, + help="Source URL for the image to be registered", + ) + args = ap.parse_args() + neuroglancer.cli.handle_server_arguments(args) + return args + + +def main(): + args = handle_args() + + demo = LinearRegistrationWorkflow( + template_url=args.template, + source_url=args.source, + ) + + webbrowser.open_new(demo.viewer.get_viewer_url()) + + +if __name__ == "__main__": + main() From eaf602a0ac72f18fced54d02a9fb694ee2b6e3e9 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Fri, 12 Sep 2025 12:54:57 +0200 Subject: [PATCH 02/30] feat(python): update the lin reg with more functions and TODOs --- .../examples/example_linear_registration.py | 73 +++++++++++++------ 1 file changed, 52 insertions(+), 21 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index e5fd8e61d..631e89941 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -2,6 +2,9 @@ import webbrowser from time import time +import scipy.ndimage +import numpy as np + import neuroglancer import neuroglancer.cli @@ -56,19 +59,24 @@ def setup_viewer(self): self.viewer = viewer = neuroglancer.Viewer() source_layer = self.create_source_image() template_layer = self.create_template_image() + registered_layer = self.create_registered_image() with viewer.txn() as s: s.layers["template"] = template_layer s.layers["source"] = source_layer - s.layers["registered"] = source_layer + s.layers["registered"] = registered_layer s.layers["registered"].visible = False s.layers["markers"] = neuroglancer.LocalAnnotationLayer( dimensions=create_dimensions(), annotation_color="#00FF00", ) + # TODO set these to be in 3D layout + # TODO unlink any controls that should be unlinked s.layout = neuroglancer.row_layout( [ - neuroglancer.LayerGroupViewer(layers=["template", "markers"]), + neuroglancer.LayerGroupViewer( + layers=["template", "registered", "markers"] + ), neuroglancer.LayerGroupViewer( layers=["source", "registered", "markers"] ), @@ -110,27 +118,47 @@ def create_template_image(self): else: return neuroglancer.ImageLayer(source=self.template_url) - def create_source_image(self): + # TODO probably need to be a little more careful about the size of the T matrix + # based on the number of input dims + def create_source_image(self, registration_matrix=None): + if registration_matrix is None: + registration_matrix = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]] if self.source_url is None: - import scipy.ndimage - + # TODO might be helpful to randomize this and check how close after registration + desired_output_matrix_homogenous = [ + [0.8, 0, 0, 0], + [0, 0.2, 0, 0], + [0, 0, 0.9, 0], + [0, 0, 0, 1], + ] + inverse_matrix = np.linalg.inv(desired_output_matrix_homogenous) transformed = scipy.ndimage.affine_transform( self.demo_data, - matrix=[[1, 0, 0], [0, 1, 0], [0.2, 0, 1]], - offset=1.0, - order=1, + matrix=inverse_matrix, ) return neuroglancer.ImageLayer( source=[ neuroglancer.LayerDataSource( neuroglancer.LocalVolume( transformed, dimensions=create_dimensions() - ) + ), + transform=neuroglancer.CoordinateSpaceTransform( + output_dimensions=create_dimensions(), + matrix=registration_matrix, + ), ) ] ) else: - return neuroglancer.ImageLayer(source=self.source_url) + return neuroglancer.ImageLayer( + source=self.source_url, + transform=neuroglancer.CoordinateSpaceTransform( + matrix=registration_matrix + ), + ) + + def create_registered_image(self, registration_matrix=None): + return self.create_source_image(registration_matrix=registration_matrix) def estimate_affine(self, s): annotations = s.layers["markers"].annotations @@ -141,6 +169,9 @@ def estimate_affine(self, s): if len(annotations) < 2: return False + # TODO allow a different way to group, such as by description + # TODO color points differently based on whether template or source + # in the shader print(len(annotations) // 2, self.last_run_points) if len(annotations) // 2 == self.last_run_points: return False @@ -172,7 +203,9 @@ def estimate_affine(self, s): # to find our transformation matrix A A, res, rank, sd = np.linalg.lstsq(X, Y) # Zero out really small values on A - A[np.abs(A) < 1e-8] = 0 + A[np.abs(A) < 1e-4] = 0 + # Round all other values to 4 decimal places + A = np.round(A, 4) transform = lambda x: unpad(np.dot(pad(x), A)) @@ -180,16 +213,14 @@ def estimate_affine(self, s): print(f"Transformed points: {transformed}") # Set the transformation on the layer that is being registered - print(A.T[:3]) - # s.layers["registered"].source.transform = neuroglancer.CoordinateSpaceTransform( - # matrix=A.T[:3], - # ) - s.layers["registered"].source.transform = ( - neuroglancer.CoordinateSpaceTransform( - output_dimensions=create_dimensions(), - matrix=[[1, 0, 0, 0], [1, 1, 0, 0], [0, 0, 1, 0]], - ), - ) + # Something seems to go wrong with the state updates once this happens + # s.layers["registered"].source[0].transform.matrix = A.T[:3] + # Because of this, trying to replace the whole layer to see if that helps + # TODO in theory should be able to just update the matrix, + # but if cannot, can replace whole layer and restore settings + old_visible = s.layers["registered"].visible + s.layers["registered"] = self.create_registered_image(A[:3]) + s.layers["registered"].visible = old_visible self._set_status_message( ("info"), f"Estimated affine transform with {n} point pairs" From dbbfef47d0bce8ea416e63b403de12a347e3fdcc Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Mon, 15 Sep 2025 16:20:42 +0200 Subject: [PATCH 03/30] feat: lin reg export and lin reg unlink --- .../examples/example_linear_registration.py | 152 ++++++++++++------ 1 file changed, 101 insertions(+), 51 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 631e89941..4263f809e 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -2,11 +2,14 @@ import webbrowser from time import time -import scipy.ndimage -import numpy as np - import neuroglancer import neuroglancer.cli +import numpy as np +import scipy.ndimage + +MESSAGE_DURATION = 5 # seconds +# TODO maybe can avoid this being a param or const +NUM_DIMS = 3 def create_demo_data(size=(64, 64, 64), radius=20): @@ -28,22 +31,32 @@ def create_dimensions(): ) +def create_identity_matrix(): + id_list = [[int(i == j) for j in range(NUM_DIMS + 1)] for i in range(NUM_DIMS)] + return np.array(id_list) + + class LinearRegistrationWorkflow: def __init__(self, template_url, source_url): self.template_url = template_url self.source_url = source_url + self.status_timers = {} + self.stored_points = [[], []] + self.affine = create_identity_matrix() if template_url is None or source_url is None: self.demo_data = create_demo_data() - self.status_timers = {} self.setup_viewer() - self.last_run_points = 0 + + def __str__(self): + with self.viewer.txn() as s: + return str(s) def _clear_status_messages(self): to_pop = [] for k, v in self.status_timers.items(): - if time() - v > 5: + if time() - v > MESSAGE_DURATION: to_pop.append(k) for k in to_pop: with self.viewer.config_state.txn() as s: @@ -55,6 +68,20 @@ def _set_status_message(self, key, message): s.status_messages[key] = message self.status_timers[key] = time() + def transform_template_points(self, template_points): + # Apply the current affine transform to the template points + n = template_points.shape[0] + pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))]) + unpad = lambda x: x[:, :-1] + X = pad(template_points) + transformed = X @ self.affine.T + return unpad(transformed) + + def toggle_registered_visibility(self, _): + with self.viewer.txn() as s: + s.layers["registered"].visible = not s.layers["registered"].visible + s.layers["template"].visible = not s.layers["registered"].visible + def setup_viewer(self): self.viewer = viewer = neuroglancer.Viewer() source_layer = self.create_source_image() @@ -70,25 +97,35 @@ def setup_viewer(self): dimensions=create_dimensions(), annotation_color="#00FF00", ) - # TODO set these to be in 3D layout - # TODO unlink any controls that should be unlinked s.layout = neuroglancer.row_layout( [ neuroglancer.LayerGroupViewer( - layers=["template", "registered", "markers"] + layers=["template", "registered", "markers"], layout="xy-3d" ), neuroglancer.LayerGroupViewer( - layers=["source", "registered", "markers"] + layers=["source", "markers"], layout="xy-3d" ), ] ) + s.layout.children[1].position.link = "unlinked" + s.layout.children[1].crossSectionOrientation.link = "unlinked" + s.layout.children[1].crossSectionScale.link = "unlinked" + s.layout.children[1].projectionOrientation.link = "unlinked" + s.layout.children[1].projectionScale.link = "unlinked" s.layers["markers"].tool = "annotatePoint" s.selected_layer.layer = "markers" s.selected_layer.visible = True + viewer.actions.add( + "toggle-registered-visibility", self.toggle_registered_visibility + ) + + with viewer.config_state.txn() as s: + s.input_event_bindings.viewer["keyt"] = "toggle-registered-visibility" + self._set_status_message( "help", - "Place markers in pairs, starting with the template, and then the source. The registered layer will automatically update as you add markers.", + "Place markers in pairs, starting with the template, and then the source. The registered layer will automatically update as you add markers. Press 't' to toggle between viewing the template and registered layers.", ) self.viewer.shared_state.add_changed_callback( @@ -99,7 +136,6 @@ def on_state_changed(self): self.viewer.defer_callback(self.update) def update(self): - print("Updating") with self.viewer.txn() as s: self.estimate_affine(s) self._clear_status_messages() @@ -118,11 +154,12 @@ def create_template_image(self): else: return neuroglancer.ImageLayer(source=self.template_url) - # TODO probably need to be a little more careful about the size of the T matrix - # based on the number of input dims def create_source_image(self, registration_matrix=None): - if registration_matrix is None: - registration_matrix = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]] + registration_matrix = ( + list(create_identity_matrix()) + if registration_matrix is None + else registration_matrix + ) if self.source_url is None: # TODO might be helpful to randomize this and check how close after registration desired_output_matrix_homogenous = [ @@ -157,25 +194,11 @@ def create_source_image(self, registration_matrix=None): ), ) - def create_registered_image(self, registration_matrix=None): - return self.create_source_image(registration_matrix=registration_matrix) - - def estimate_affine(self, s): - annotations = s.layers["markers"].annotations - # TODO expand this to estimate different types of transforms - # Depending on the number of points - # For now, just ignore non pairs - annotations = annotations[: (len(annotations) // 2) * 2] - if len(annotations) < 2: - return False + def create_registered_image(self): + return self.create_source_image(registration_matrix=self.affine) + def split_points_into_pairs(self, annotations): # TODO allow a different way to group, such as by description - # TODO color points differently based on whether template or source - # in the shader - print(len(annotations) // 2, self.last_run_points) - if len(annotations) // 2 == self.last_run_points: - return False - template_points = [] source_points = [] for i, a in enumerate(annotations): @@ -183,11 +206,30 @@ def estimate_affine(self, s): template_points.append(a.point) else: source_points.append(a.point) + return np.array(template_points), np.array(source_points) + + def estimate_affine(self, s): + # TODO do we need to throttle this update or make it manually triggered? + annotations = s.layers["markers"].annotations + if len(annotations) < 2: + return False + # TODO expand this to estimate different types of transforms + # Depending on the number of points - import numpy as np + # Ignore annotations not part of a pair + annotations = annotations[: (len(annotations) // 2) * 2] + template_points, source_points = self.split_points_into_pairs(annotations) + if len(self.stored_points[0]) == len(template_points) and len( + self.stored_points[1] + ) == len(source_points): + if np.all(np.isclose(self.stored_points[0], template_points)) and np.all( + np.isclose(self.stored_points[1], source_points) + ): + return False - template_points = np.array(template_points) - source_points = np.array(source_points) + # TODO color points differently based on whether template or source + # in the shader + template_points, source_points = self.split_points_into_pairs(annotations) # Estimate affine transform using least squares, for now using # https://stackoverflow.com/questions/20546182/how-to-perform-coordinates-affine-transformation-using-python-part-2 @@ -195,7 +237,6 @@ def estimate_affine(self, s): n = template_points.shape[0] pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))]) - unpad = lambda x: x[:, :-1] X = pad(template_points) Y = pad(source_points) @@ -206,11 +247,7 @@ def estimate_affine(self, s): A[np.abs(A) < 1e-4] = 0 # Round all other values to 4 decimal places A = np.round(A, 4) - - transform = lambda x: unpad(np.dot(pad(x), A)) - - transformed = transform(source_points) - print(f"Transformed points: {transformed}") + self.affine = A[:NUM_DIMS] # Set the transformation on the layer that is being registered # Something seems to go wrong with the state updates once this happens @@ -219,17 +256,34 @@ def estimate_affine(self, s): # TODO in theory should be able to just update the matrix, # but if cannot, can replace whole layer and restore settings old_visible = s.layers["registered"].visible - s.layers["registered"] = self.create_registered_image(A[:3]) + s.layers["registered"] = self.create_registered_image() s.layers["registered"].visible = old_visible self._set_status_message( ("info"), f"Estimated affine transform with {n} point pairs" ) - # TODO actually want to check if the number of points or the points - # themselves have changed - self.last_run_points = n + self.stored_points = [template_points, source_points] return True + def get_registration_info(self): + info = {} + with self.viewer.txn() as s: + annotations = s.layers["markers"].annotations + template_points, source_points = self.split_points_into_pairs(annotations) + # transformed_points = self.transform_template_points(template_points) + info["template"] = template_points.tolist() + info["source"] = source_points.tolist() + # info["transformed_template"] = transformed_points.tolist() + info["transform"] = self.affine.tolist() + return info + + def dump_info(self, path: str): + import json + + info = self.get_registration_info() + with open(path, "w") as f: + json.dump(info, f, indent=4) + def handle_args(): ap = argparse.ArgumentParser() @@ -249,7 +303,7 @@ def handle_args(): return args -def main(): +if __name__ == "__main__": args = handle_args() demo = LinearRegistrationWorkflow( @@ -258,7 +312,3 @@ def main(): ) webbrowser.open_new(demo.viewer.get_viewer_url()) - - -if __name__ == "__main__": - main() From 97837bfa50201a63564feae17f3bdc8275651e3f Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Mon, 15 Sep 2025 16:46:17 +0200 Subject: [PATCH 04/30] feat: add shader and annotation props to lin reg --- .../examples/example_linear_registration.py | 49 +++++++++++++++++-- 1 file changed, 45 insertions(+), 4 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 4263f809e..099f7c801 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -11,6 +11,20 @@ # TODO maybe can avoid this being a param or const NUM_DIMS = 3 +MARKERS_SHADER = """ +#uicontrol vec3 templatePointColor color(default="#00FF00") +#uicontrol vec3 sourcePointColor color(default="#0000FF") +#uicontrol float pointSize slider(min=1, max=16, default=6) +void main() { + if (int(prop_index()) % 2 == 0) { + setColor(templatePointColor); + } else { + setColor(sourcePointColor); + } + setPointMarkerSize(pointSize); +} +""" + def create_demo_data(size=(64, 64, 64), radius=20): import numpy as np @@ -95,7 +109,26 @@ def setup_viewer(self): s.layers["registered"].visible = False s.layers["markers"] = neuroglancer.LocalAnnotationLayer( dimensions=create_dimensions(), - annotation_color="#00FF00", + annotation_properties=[ + neuroglancer.AnnotationPropertySpec( + id="label", + type="uint32", + default=0, + ), + neuroglancer.AnnotationPropertySpec( + id="group", + type="uint8", + default=0, + enum_labels=["template", "source"], + enum_values=[0, 1], + ), + neuroglancer.AnnotationPropertySpec( + id="index", + type="uint32", + default=0, + ), + ], + shader=MARKERS_SHADER, ) s.layout = neuroglancer.row_layout( [ @@ -136,6 +169,12 @@ def on_state_changed(self): self.viewer.defer_callback(self.update) def update(self): + # TODO would either remove or throttle this + # It could be useful to keep it with a throttle + # because sometimes the state updates can get killed + # and this would help for seeing that to the user + # since it might be hard for us to catch all cases + print("State updated, handling") with self.viewer.txn() as s: self.estimate_affine(s) self._clear_status_messages() @@ -210,6 +249,7 @@ def split_points_into_pairs(self, annotations): def estimate_affine(self, s): # TODO do we need to throttle this update or make it manually triggered? + # While updating by moving a point, this can break right now annotations = s.layers["markers"].annotations if len(annotations) < 2: return False @@ -226,9 +266,10 @@ def estimate_affine(self, s): np.isclose(self.stored_points[1], source_points) ): return False + else: + for i, a in enumerate(s.layers["markers"].annotations): + a.props = [i // 2, i % 2, i] - # TODO color points differently based on whether template or source - # in the shader template_points, source_points = self.split_points_into_pairs(annotations) # Estimate affine transform using least squares, for now using @@ -260,7 +301,7 @@ def estimate_affine(self, s): s.layers["registered"].visible = old_visible self._set_status_message( - ("info"), f"Estimated affine transform with {n} point pairs" + "info", f"Estimated affine transform with {n} point pairs" ) self.stored_points = [template_points, source_points] return True From 8fbfeb4be164d184ee65f2305e7cf49b9935e5a4 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Mon, 15 Sep 2025 16:50:23 +0200 Subject: [PATCH 05/30] chore: add light typing to lin reg --- python/examples/example_linear_registration.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 099f7c801..f28c49022 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -26,7 +26,7 @@ """ -def create_demo_data(size=(64, 64, 64), radius=20): +def create_demo_data(size: tuple[int, int, int] = (64, 64, 64), radius: float = 20): import numpy as np data = np.zeros(size, dtype=np.uint8) @@ -51,7 +51,7 @@ def create_identity_matrix(): class LinearRegistrationWorkflow: - def __init__(self, template_url, source_url): + def __init__(self, template_url: str, source_url: str): self.template_url = template_url self.source_url = source_url self.status_timers = {} @@ -77,12 +77,12 @@ def _clear_status_messages(self): s.status_messages.pop(k, None) self.status_timers.pop(k) - def _set_status_message(self, key, message): + def _set_status_message(self, key: str, message: str): with self.viewer.config_state.txn() as s: s.status_messages[key] = message self.status_timers[key] = time() - def transform_template_points(self, template_points): + def transform_template_points(self, template_points : np.ndarray): # Apply the current affine transform to the template points n = template_points.shape[0] pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))]) @@ -193,7 +193,7 @@ def create_template_image(self): else: return neuroglancer.ImageLayer(source=self.template_url) - def create_source_image(self, registration_matrix=None): + def create_source_image(self, registration_matrix : list | np.ndarray | None = None): registration_matrix = ( list(create_identity_matrix()) if registration_matrix is None @@ -247,7 +247,7 @@ def split_points_into_pairs(self, annotations): source_points.append(a.point) return np.array(template_points), np.array(source_points) - def estimate_affine(self, s): + def estimate_affine(self, s : neuroglancer.ViewerState): # TODO do we need to throttle this update or make it manually triggered? # While updating by moving a point, this can break right now annotations = s.layers["markers"].annotations From 990ee0d45679d0fbbbe1855498d429645f830135 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Mon, 15 Sep 2025 17:11:28 +0200 Subject: [PATCH 06/30] feat: small update to print of updates --- .../examples/example_linear_registration.py | 28 +++++++++++-------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index f28c49022..a5a7338fe 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -62,6 +62,7 @@ def __init__(self, template_url: str, source_url: str): self.demo_data = create_demo_data() self.setup_viewer() + self.last_updated_print = -1 def __str__(self): with self.viewer.txn() as s: @@ -82,7 +83,7 @@ def _set_status_message(self, key: str, message: str): s.status_messages[key] = message self.status_timers[key] = time() - def transform_template_points(self, template_points : np.ndarray): + def transform_template_points(self, template_points: np.ndarray): # Apply the current affine transform to the template points n = template_points.shape[0] pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))]) @@ -169,12 +170,11 @@ def on_state_changed(self): self.viewer.defer_callback(self.update) def update(self): - # TODO would either remove or throttle this - # It could be useful to keep it with a throttle - # because sometimes the state updates can get killed - # and this would help for seeing that to the user - # since it might be hard for us to catch all cases - print("State updated, handling") + current_time = time() + if current_time - self.last_updated_print > 1: + # TODO format the time nicely in the print + print(f"Viewer states are successfully syncing at {current_time}") + self.last_updated_print = current_time with self.viewer.txn() as s: self.estimate_affine(s) self._clear_status_messages() @@ -193,14 +193,13 @@ def create_template_image(self): else: return neuroglancer.ImageLayer(source=self.template_url) - def create_source_image(self, registration_matrix : list | np.ndarray | None = None): + def create_source_image(self, registration_matrix: list | np.ndarray | None = None): registration_matrix = ( list(create_identity_matrix()) if registration_matrix is None else registration_matrix ) if self.source_url is None: - # TODO might be helpful to randomize this and check how close after registration desired_output_matrix_homogenous = [ [0.8, 0, 0, 0], [0, 0.2, 0, 0], @@ -237,7 +236,12 @@ def create_registered_image(self): return self.create_source_image(registration_matrix=self.affine) def split_points_into_pairs(self, annotations): - # TODO allow a different way to group, such as by description + # TODO allow a different way to group. Right now the order informs + # the properties + # But ideally we'd like to allow the other way around as well + # This needs to be reworked just a bit to allow that + # As a first step for that, this should inspect the properties + # and group based on that instead of this grouping template_points = [] source_points = [] for i, a in enumerate(annotations): @@ -247,7 +251,7 @@ def split_points_into_pairs(self, annotations): source_points.append(a.point) return np.array(template_points), np.array(source_points) - def estimate_affine(self, s : neuroglancer.ViewerState): + def estimate_affine(self, s: neuroglancer.ViewerState): # TODO do we need to throttle this update or make it manually triggered? # While updating by moving a point, this can break right now annotations = s.layers["markers"].annotations @@ -267,6 +271,8 @@ def estimate_affine(self, s : neuroglancer.ViewerState): ): return False else: + # TODO instead of directly doing the update, debounce it and only do + # it after a short delay if no other updates for i, a in enumerate(s.layers["markers"].annotations): a.props = [i // 2, i % 2, i] From f5eba1033f423b7cf68ece0586e83202e3fe81b8 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Thu, 18 Sep 2025 11:46:02 +0200 Subject: [PATCH 07/30] feat: remove dims const --- .../examples/example_linear_registration.py | 25 ++++++++----------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index a5a7338fe..3406c79f2 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -8,8 +8,6 @@ import scipy.ndimage MESSAGE_DURATION = 5 # seconds -# TODO maybe can avoid this being a param or const -NUM_DIMS = 3 MARKERS_SHADER = """ #uicontrol vec3 templatePointColor color(default="#00FF00") @@ -45,8 +43,8 @@ def create_dimensions(): ) -def create_identity_matrix(): - id_list = [[int(i == j) for j in range(NUM_DIMS + 1)] for i in range(NUM_DIMS)] +def create_identity_matrix(num_dims: int): + id_list = [[int(i == j) for j in range(num_dims + 1)] for i in range(num_dims)] return np.array(id_list) @@ -56,7 +54,7 @@ def __init__(self, template_url: str, source_url: str): self.source_url = source_url self.status_timers = {} self.stored_points = [[], []] - self.affine = create_identity_matrix() + self.affine = [] if template_url is None or source_url is None: self.demo_data = create_demo_data() @@ -194,11 +192,9 @@ def create_template_image(self): return neuroglancer.ImageLayer(source=self.template_url) def create_source_image(self, registration_matrix: list | np.ndarray | None = None): - registration_matrix = ( - list(create_identity_matrix()) - if registration_matrix is None - else registration_matrix - ) + transform_kwargs = {} + if registration_matrix is not None: + transform_kwargs["matrix"] = registration_matrix if self.source_url is None: desired_output_matrix_homogenous = [ [0.8, 0, 0, 0], @@ -219,7 +215,7 @@ def create_source_image(self, registration_matrix: list | np.ndarray | None = No ), transform=neuroglancer.CoordinateSpaceTransform( output_dimensions=create_dimensions(), - matrix=registration_matrix, + **transform_kwargs, ), ) ] @@ -227,9 +223,7 @@ def create_source_image(self, registration_matrix: list | np.ndarray | None = No else: return neuroglancer.ImageLayer( source=self.source_url, - transform=neuroglancer.CoordinateSpaceTransform( - matrix=registration_matrix - ), + transform=neuroglancer.CoordinateSpaceTransform(**transform_kwargs), ) def create_registered_image(self): @@ -294,7 +288,8 @@ def estimate_affine(self, s: neuroglancer.ViewerState): A[np.abs(A) < 1e-4] = 0 # Round all other values to 4 decimal places A = np.round(A, 4) - self.affine = A[:NUM_DIMS] + num_dims = X.shape[1] - 1 + self.affine = A[:num_dims] # Set the transformation on the layer that is being registered # Something seems to go wrong with the state updates once this happens From f18f20796ca4ba7215d63082c12a379e5167894e Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Thu, 18 Sep 2025 14:49:41 +0200 Subject: [PATCH 08/30] feat: scaffold n dim lsqs affine --- .../examples/example_linear_registration.py | 117 ++++++++++++------ 1 file changed, 81 insertions(+), 36 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 3406c79f2..e56725fdb 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -8,6 +8,7 @@ import scipy.ndimage MESSAGE_DURATION = 5 # seconds +NUM_DEMO_DIMS = 2 # Currently can be 2D or 3D MARKERS_SHADER = """ #uicontrol vec3 templatePointColor color(default="#00FF00") @@ -24,10 +25,57 @@ """ -def create_demo_data(size: tuple[int, int, int] = (64, 64, 64), radius: float = 20): +def affine_fit(template_points: np.ndarray, target_points: np.ndarray): + # Source points and target points are NxD arrays + assert template_points.shape == target_points.shape + N = template_points.shape[0] + D = template_points.shape[1] + T = template_points + + # Target values (B) is a D * N array + # Input values (A) is a D * N, (D * (D + 1)) array + # Output estimation is a (D * (D + 1)) array + A = np.zeros(((D * N), D * (D + 1))) + for i in range(N): + for j in range(D): + start_index = j * D + end_index = (j + 1) * D + A[D * i + j, start_index:end_index] = T[i] + A[D * i + j, D * D + j] = 1 + B = target_points.T.flatten() + + print(A.shape, B.shape) + print(A, B) + # The estimated affine transform params will be flattened + # and there will be D * (D + 1) of them + # Format is x1, x2, ..., b1, b2, ... + tvec, res, rank, sd = np.linalg.lstsq(A, B) + print(A, target_points, tvec) + # Put the flattened version back into the matrix + affine = np.zeros((D, D + 1)) + for i in range(D): + start_index = i * D + end_index = start_index + D + affine[i, :D] = tvec[start_index:end_index] + affine[i, -1] = tvec[D * D + i] + + # Round to close decimal + affine = np.round(affine, decimals=2) + print(affine) + return affine + + +def create_demo_data(size: int | tuple = 60, radius: float = 20): import numpy as np - data = np.zeros(size, dtype=np.uint8) + data_size = (size,) * NUM_DEMO_DIMS if isinstance(size, int) else size + data = np.zeros(data_size, dtype=np.uint8) + if NUM_DEMO_DIMS == 2: + yy, xx = np.indices(data.shape) + center = np.array(data.shape) / 2 + circle_mask = (xx - center[1]) ** 2 + (yy - center[0]) ** 2 < radius**2 + data[circle_mask] = 255 + return data zz, yy, xx = np.indices(data.shape) center = np.array(data.shape) / 2 sphere_mask = (xx - center[2]) ** 2 + (yy - center[1]) ** 2 + ( @@ -38,6 +86,8 @@ def create_demo_data(size: tuple[int, int, int] = (64, 64, 64), radius: float = def create_dimensions(): + if NUM_DEMO_DIMS == 2: + return neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1]) return neuroglancer.CoordinateSpace( names=["x", "y", "z"], units="nm", scales=[1, 1, 1] ) @@ -54,7 +104,8 @@ def __init__(self, template_url: str, source_url: str): self.source_url = source_url self.status_timers = {} self.stored_points = [[], []] - self.affine = [] + # Will be an Nx(N+1) matrix for N input dimensions + self.affine = None if template_url is None or source_url is None: self.demo_data = create_demo_data() @@ -81,14 +132,13 @@ def _set_status_message(self, key: str, message: str): s.status_messages[key] = message self.status_timers[key] = time() - def transform_template_points(self, template_points: np.ndarray): - # Apply the current affine transform to the template points - n = template_points.shape[0] - pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))]) - unpad = lambda x: x[:, :-1] - X = pad(template_points) - transformed = X @ self.affine.T - return unpad(transformed) + def transform_points(self, points: np.ndarray): + # Apply the current affine transform to the points + transformed = np.zeros_like(points) + padded = np.pad(points, ((0, 0), (0, 1)), constant_values=1) + for i in range(len(points)): + transformed[i] = self.affine @ padded[i] + return transformed def toggle_registered_visibility(self, _): with self.viewer.txn() as s: @@ -196,17 +246,25 @@ def create_source_image(self, registration_matrix: list | np.ndarray | None = No if registration_matrix is not None: transform_kwargs["matrix"] = registration_matrix if self.source_url is None: - desired_output_matrix_homogenous = [ - [0.8, 0, 0, 0], - [0, 0.2, 0, 0], - [0, 0, 0.9, 0], - [0, 0, 0, 1], - ] + if NUM_DEMO_DIMS == 2: + desired_output_matrix_homogenous = [ + [0.8, 0, 0], + [0, 0.2, 0], + [0, 0, 1], + ] + else: + desired_output_matrix_homogenous = [ + [0.8, 0, 0, 0], + [0, 0.2, 0, 0], + [0, 0, 0.9, 0], + [0, 0, 0, 1], + ] inverse_matrix = np.linalg.inv(desired_output_matrix_homogenous) transformed = scipy.ndimage.affine_transform( self.demo_data, matrix=inverse_matrix, ) + print(inverse_matrix) return neuroglancer.ImageLayer( source=[ neuroglancer.LayerDataSource( @@ -272,24 +330,8 @@ def estimate_affine(self, s: neuroglancer.ViewerState): template_points, source_points = self.split_points_into_pairs(annotations) - # Estimate affine transform using least squares, for now using - # https://stackoverflow.com/questions/20546182/how-to-perform-coordinates-affine-transformation-using-python-part-2 - # but can replace later - - n = template_points.shape[0] - pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))]) - X = pad(template_points) - Y = pad(source_points) - - # Solve the least squares problem X * A = Y - # to find our transformation matrix A - A, res, rank, sd = np.linalg.lstsq(X, Y) - # Zero out really small values on A - A[np.abs(A) < 1e-4] = 0 - # Round all other values to 4 decimal places - A = np.round(A, 4) - num_dims = X.shape[1] - 1 - self.affine = A[:num_dims] + # Estimate transform + self.affine = affine_fit(template_points, source_points) # Set the transformation on the layer that is being registered # Something seems to go wrong with the state updates once this happens @@ -302,8 +344,11 @@ def estimate_affine(self, s: neuroglancer.ViewerState): s.layers["registered"].visible = old_visible self._set_status_message( - "info", f"Estimated affine transform with {n} point pairs" + "info", + f"Estimated affine transform with {len(annotations) // 2} point pairs", ) + print("Estimated points are", self.transform_points(source_points)) + print("Original points are", template_points) self.stored_points = [template_points, source_points] return True From b9ec70cd84f8f1d08d5c0c81c1f7a9fd72021084 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Thu, 18 Sep 2025 15:23:58 +0200 Subject: [PATCH 09/30] fix: correct calling affine --- python/examples/example_linear_registration.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index e56725fdb..a748c0d91 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -42,10 +42,8 @@ def affine_fit(template_points: np.ndarray, target_points: np.ndarray): end_index = (j + 1) * D A[D * i + j, start_index:end_index] = T[i] A[D * i + j, D * D + j] = 1 - B = target_points.T.flatten() + B = target_points.flatten() - print(A.shape, B.shape) - print(A, B) # The estimated affine transform params will be flattened # and there will be D * (D + 1) of them # Format is x1, x2, ..., b1, b2, ... @@ -62,9 +60,19 @@ def affine_fit(template_points: np.ndarray, target_points: np.ndarray): # Round to close decimal affine = np.round(affine, decimals=2) print(affine) + print(transform_points(affine, template_points)) return affine +def transform_points(affine: np.ndarray, points: np.ndarray): + # Apply the current affine transform to the points + transformed = np.zeros_like(points) + padded = np.pad(points, ((0, 0), (0, 1)), constant_values=1) + for i in range(len(points)): + transformed[i] = affine @ padded[i] + return transformed + + def create_demo_data(size: int | tuple = 60, radius: float = 20): import numpy as np @@ -331,7 +339,7 @@ def estimate_affine(self, s: neuroglancer.ViewerState): template_points, source_points = self.split_points_into_pairs(annotations) # Estimate transform - self.affine = affine_fit(template_points, source_points) + self.affine = affine_fit(source_points, template_points) # Set the transformation on the layer that is being registered # Something seems to go wrong with the state updates once this happens From e58ca3a9c341df33f0aeb3f80e4a9795994dcf62 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Thu, 18 Sep 2025 15:53:58 +0200 Subject: [PATCH 10/30] refactor: standardise terms --- .../examples/example_linear_registration.py | 155 ++++++++---------- 1 file changed, 71 insertions(+), 84 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index a748c0d91..8a0315b60 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -1,6 +1,6 @@ import argparse import webbrowser -from time import time +from time import time, ctime import neuroglancer import neuroglancer.cli @@ -11,26 +11,26 @@ NUM_DEMO_DIMS = 2 # Currently can be 2D or 3D MARKERS_SHADER = """ -#uicontrol vec3 templatePointColor color(default="#00FF00") -#uicontrol vec3 sourcePointColor color(default="#0000FF") +#uicontrol vec3 fixedPointColor color(default="#00FF00") +#uicontrol vec3 movingPointColor color(default="#0000FF") #uicontrol float pointSize slider(min=1, max=16, default=6) void main() { - if (int(prop_index()) % 2 == 0) { - setColor(templatePointColor); + if (int(prop_group()) == 0) { + setColor(fixedPointColor); } else { - setColor(sourcePointColor); + setColor(movingPointColor); } setPointMarkerSize(pointSize); } """ -def affine_fit(template_points: np.ndarray, target_points: np.ndarray): - # Source points and target points are NxD arrays - assert template_points.shape == target_points.shape - N = template_points.shape[0] - D = template_points.shape[1] - T = template_points +def affine_fit(fixed_points: np.ndarray, moving_points: np.ndarray): + # Points are NxD arrays + assert fixed_points.shape == moving_points.shape + N = fixed_points.shape[0] + D = fixed_points.shape[1] + T = fixed_points # Target values (B) is a D * N array # Input values (A) is a D * N, (D * (D + 1)) array @@ -42,13 +42,13 @@ def affine_fit(template_points: np.ndarray, target_points: np.ndarray): end_index = (j + 1) * D A[D * i + j, start_index:end_index] = T[i] A[D * i + j, D * D + j] = 1 - B = target_points.flatten() + B = moving_points.flatten() # The estimated affine transform params will be flattened # and there will be D * (D + 1) of them # Format is x1, x2, ..., b1, b2, ... tvec, res, rank, sd = np.linalg.lstsq(A, B) - print(A, target_points, tvec) + # Put the flattened version back into the matrix affine = np.zeros((D, D + 1)) for i in range(D): @@ -57,10 +57,8 @@ def affine_fit(template_points: np.ndarray, target_points: np.ndarray): affine[i, :D] = tvec[start_index:end_index] affine[i, -1] = tvec[D * D + i] - # Round to close decimal - affine = np.round(affine, decimals=2) - print(affine) - print(transform_points(affine, template_points)) + # Round to close decimals + affine = np.round(affine, decimals=3) return affine @@ -74,8 +72,6 @@ def transform_points(affine: np.ndarray, points: np.ndarray): def create_demo_data(size: int | tuple = 60, radius: float = 20): - import numpy as np - data_size = (size,) * NUM_DEMO_DIMS if isinstance(size, int) else size data = np.zeros(data_size, dtype=np.uint8) if NUM_DEMO_DIMS == 2: @@ -93,6 +89,7 @@ def create_demo_data(size: int | tuple = 60, radius: float = 20): return data +# TODO can we avoid calling this at all? Can the layers just be created and dims inferred? def create_dimensions(): if NUM_DEMO_DIMS == 2: return neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1]) @@ -101,21 +98,15 @@ def create_dimensions(): ) -def create_identity_matrix(num_dims: int): - id_list = [[int(i == j) for j in range(num_dims + 1)] for i in range(num_dims)] - return np.array(id_list) - - class LinearRegistrationWorkflow: - def __init__(self, template_url: str, source_url: str): - self.template_url = template_url - self.source_url = source_url + def __init__(self, fixed_url: str, moving_url: str): + self.fixed_url = fixed_url + self.moving_url = moving_url self.status_timers = {} self.stored_points = [[], []] - # Will be an Nx(N+1) matrix for N input dimensions self.affine = None - if template_url is None or source_url is None: + if fixed_url is None or moving_url is None: self.demo_data = create_demo_data() self.setup_viewer() @@ -140,28 +131,26 @@ def _set_status_message(self, key: str, message: str): s.status_messages[key] = message self.status_timers[key] = time() - def transform_points(self, points: np.ndarray): - # Apply the current affine transform to the points - transformed = np.zeros_like(points) - padded = np.pad(points, ((0, 0), (0, 1)), constant_values=1) - for i in range(len(points)): - transformed[i] = self.affine @ padded[i] - return transformed + def transform_points_with_affine(self, points: np.ndarray): + if self.affine is not None: + return transform_points(self.affine, points) def toggle_registered_visibility(self, _): with self.viewer.txn() as s: s.layers["registered"].visible = not s.layers["registered"].visible - s.layers["template"].visible = not s.layers["registered"].visible + s.layers["fixed"].visible = not s.layers["registered"].visible + s.layers["markers"].visible = not s.layers["registered"].visible + s.layers["mappedMarkers"].visible = s.layers["registered"].visible def setup_viewer(self): self.viewer = viewer = neuroglancer.Viewer() - source_layer = self.create_source_image() - template_layer = self.create_template_image() + fixed_layer = self.create_fixed_image() + moving_layer = self.create_moving_image() registered_layer = self.create_registered_image() with viewer.txn() as s: - s.layers["template"] = template_layer - s.layers["source"] = source_layer + s.layers["fixed"] = fixed_layer + s.layers["moving"] = moving_layer s.layers["registered"] = registered_layer s.layers["registered"].visible = False s.layers["markers"] = neuroglancer.LocalAnnotationLayer( @@ -176,7 +165,7 @@ def setup_viewer(self): id="group", type="uint8", default=0, - enum_labels=["template", "source"], + enum_labels=["fixed", "moving"], enum_values=[0, 1], ), neuroglancer.AnnotationPropertySpec( @@ -190,10 +179,10 @@ def setup_viewer(self): s.layout = neuroglancer.row_layout( [ neuroglancer.LayerGroupViewer( - layers=["template", "registered", "markers"], layout="xy-3d" + layers=["fixed", "registered", "markers"], layout="xy-3d" ), neuroglancer.LayerGroupViewer( - layers=["source", "markers"], layout="xy-3d" + layers=["moving", "markers"], layout="xy-3d" ), ] ) @@ -215,7 +204,7 @@ def setup_viewer(self): self._set_status_message( "help", - "Place markers in pairs, starting with the template, and then the source. The registered layer will automatically update as you add markers. Press 't' to toggle between viewing the template and registered layers.", + "Place markers in pairs, starting with the fixed, and then the moving. The registered layer will automatically update as you add markers. Press 't' to toggle between viewing the fixed and registered layers.", ) self.viewer.shared_state.add_changed_callback( @@ -227,16 +216,15 @@ def on_state_changed(self): def update(self): current_time = time() - if current_time - self.last_updated_print > 1: - # TODO format the time nicely in the print - print(f"Viewer states are successfully syncing at {current_time}") + if current_time - self.last_updated_print > 5: + print(f"Viewer states are successfully syncing at {ctime()}") self.last_updated_print = current_time with self.viewer.txn() as s: self.estimate_affine(s) self._clear_status_messages() - def create_template_image(self): - if self.template_url is None: + def create_fixed_image(self): + if self.fixed_url is None: return neuroglancer.ImageLayer( source=[ neuroglancer.LayerDataSource( @@ -247,13 +235,13 @@ def create_template_image(self): ] ) else: - return neuroglancer.ImageLayer(source=self.template_url) + return neuroglancer.ImageLayer(source=self.fixed_url) - def create_source_image(self, registration_matrix: list | np.ndarray | None = None): + def create_moving_image(self, registration_matrix: list | np.ndarray | None = None): transform_kwargs = {} if registration_matrix is not None: transform_kwargs["matrix"] = registration_matrix - if self.source_url is None: + if self.moving_url is None: if NUM_DEMO_DIMS == 2: desired_output_matrix_homogenous = [ [0.8, 0, 0], @@ -288,12 +276,12 @@ def create_source_image(self, registration_matrix: list | np.ndarray | None = No ) else: return neuroglancer.ImageLayer( - source=self.source_url, + source=self.moving_url, transform=neuroglancer.CoordinateSpaceTransform(**transform_kwargs), ) def create_registered_image(self): - return self.create_source_image(registration_matrix=self.affine) + return self.create_moving_image(registration_matrix=self.affine) def split_points_into_pairs(self, annotations): # TODO allow a different way to group. Right now the order informs @@ -302,14 +290,14 @@ def split_points_into_pairs(self, annotations): # This needs to be reworked just a bit to allow that # As a first step for that, this should inspect the properties # and group based on that instead of this grouping - template_points = [] - source_points = [] + fixed_points = [] + moving_points = [] for i, a in enumerate(annotations): if i % 2 == 0: - template_points.append(a.point) + fixed_points.append(a.point) else: - source_points.append(a.point) - return np.array(template_points), np.array(source_points) + moving_points.append(a.point) + return np.array(fixed_points), np.array(moving_points) def estimate_affine(self, s: neuroglancer.ViewerState): # TODO do we need to throttle this update or make it manually triggered? @@ -322,12 +310,12 @@ def estimate_affine(self, s: neuroglancer.ViewerState): # Ignore annotations not part of a pair annotations = annotations[: (len(annotations) // 2) * 2] - template_points, source_points = self.split_points_into_pairs(annotations) - if len(self.stored_points[0]) == len(template_points) and len( + fixed_points, moving_points = self.split_points_into_pairs(annotations) + if len(self.stored_points[0]) == len(fixed_points) and len( self.stored_points[1] - ) == len(source_points): - if np.all(np.isclose(self.stored_points[0], template_points)) and np.all( - np.isclose(self.stored_points[1], source_points) + ) == len(moving_points): + if np.all(np.isclose(self.stored_points[0], fixed_points)) and np.all( + np.isclose(self.stored_points[1], moving_points) ): return False else: @@ -336,10 +324,8 @@ def estimate_affine(self, s: neuroglancer.ViewerState): for i, a in enumerate(s.layers["markers"].annotations): a.props = [i // 2, i % 2, i] - template_points, source_points = self.split_points_into_pairs(annotations) - - # Estimate transform - self.affine = affine_fit(source_points, template_points) + fixed_points, moving_points = self.split_points_into_pairs(annotations) + self.affine = affine_fit(moving_points, fixed_points) # Set the transformation on the layer that is being registered # Something seems to go wrong with the state updates once this happens @@ -355,21 +341,20 @@ def estimate_affine(self, s: neuroglancer.ViewerState): "info", f"Estimated affine transform with {len(annotations) // 2} point pairs", ) - print("Estimated points are", self.transform_points(source_points)) - print("Original points are", template_points) - self.stored_points = [template_points, source_points] + self.stored_points = [fixed_points, moving_points] return True def get_registration_info(self): info = {} with self.viewer.txn() as s: annotations = s.layers["markers"].annotations - template_points, source_points = self.split_points_into_pairs(annotations) - # transformed_points = self.transform_template_points(template_points) - info["template"] = template_points.tolist() - info["source"] = source_points.tolist() - # info["transformed_template"] = transformed_points.tolist() - info["transform"] = self.affine.tolist() + fixed_points, moving_points = self.split_points_into_pairs(annotations) + transformed_points = self.transform_points_with_affine(moving_points) + info["fixedPoints"] = fixed_points.tolist() + info["movingPoints"] = moving_points.tolist() + if self.affine is not None and transformed_points is not None: + info["transformedPoints"] = transformed_points.tolist() + info["affineTransform"] = self.affine.tolist() return info def dump_info(self, path: str): @@ -384,12 +369,14 @@ def handle_args(): ap = argparse.ArgumentParser() neuroglancer.cli.add_server_arguments(ap) ap.add_argument( - "--template", + "--fixed", + "-f", type=str, - help="Source URL for the template image", + help="Source URL for the fixed image", ) ap.add_argument( - "--source", + "--moving", + "-m", type=str, help="Source URL for the image to be registered", ) @@ -402,8 +389,8 @@ def handle_args(): args = handle_args() demo = LinearRegistrationWorkflow( - template_url=args.template, - source_url=args.source, + fixed_url=args.fixed, + moving_url=args.moving, ) webbrowser.open_new(demo.viewer.get_viewer_url()) From dfd654c6455e98e472e1141c71662bfcd8b646f6 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Thu, 18 Sep 2025 16:45:14 +0200 Subject: [PATCH 11/30] feat: improve perf with debounce of lin reg --- .../examples/example_linear_registration.py | 79 +++++++++++++------ 1 file changed, 57 insertions(+), 22 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 8a0315b60..088966f2f 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -1,6 +1,8 @@ import argparse +import threading import webbrowser -from time import time, ctime +from time import ctime, time +from typing import Optional import neuroglancer import neuroglancer.cli @@ -25,6 +27,24 @@ """ +def debounce(wait: float): + def decorator(fn): + timer = None + + def debounced(*args, **kwargs): + nonlocal timer + + if timer is not None: + timer.cancel() + + timer = threading.Timer(wait, lambda: fn(*args, **kwargs)) + timer.start() + + return debounced + + return decorator + + def affine_fit(fixed_points: np.ndarray, moving_points: np.ndarray): # Points are NxD arrays assert fixed_points.shape == moving_points.shape @@ -104,6 +124,7 @@ def __init__(self, fixed_url: str, moving_url: str): self.moving_url = moving_url self.status_timers = {} self.stored_points = [[], []] + self.stored_group_number = -1 self.affine = None if fixed_url is None or moving_url is None: @@ -168,11 +189,6 @@ def setup_viewer(self): enum_labels=["fixed", "moving"], enum_values=[0, 1], ), - neuroglancer.AnnotationPropertySpec( - id="index", - type="uint32", - default=0, - ), ], shader=MARKERS_SHADER, ) @@ -219,9 +235,17 @@ def update(self): if current_time - self.last_updated_print > 5: print(f"Viewer states are successfully syncing at {ctime()}") self.last_updated_print = current_time + # with self.viewer.txn() as s: + # self.automatically_group_markers(s) + # self.estimate_affine(s) + self._clear_status_messages() + # TODO for some reason I need to keep the layer change for states + self.update_affine() + + @debounce(1.0) + def update_affine(self): with self.viewer.txn() as s: self.estimate_affine(s) - self._clear_status_messages() def create_fixed_image(self): if self.fixed_url is None: @@ -260,7 +284,7 @@ def create_moving_image(self, registration_matrix: list | np.ndarray | None = No self.demo_data, matrix=inverse_matrix, ) - print(inverse_matrix) + print("target demo affine", inverse_matrix) return neuroglancer.ImageLayer( source=[ neuroglancer.LayerDataSource( @@ -284,29 +308,42 @@ def create_registered_image(self): return self.create_moving_image(registration_matrix=self.affine) def split_points_into_pairs(self, annotations): - # TODO allow a different way to group. Right now the order informs - # the properties - # But ideally we'd like to allow the other way around as well - # This needs to be reworked just a bit to allow that - # As a first step for that, this should inspect the properties - # and group based on that instead of this grouping fixed_points = [] moving_points = [] for i, a in enumerate(annotations): - if i % 2 == 0: + props = a.props + if props[1] == 0: fixed_points.append(a.point) else: moving_points.append(a.point) + # If the moving points is not evenly split, instead split differently + if len(moving_points) != len(fixed_points): + fixed_points = [] + moving_points = [] + for i, a in enumerate(annotations): + if i % 2 == 0: + fixed_points.append(a.point) + else: + moving_points.append(a.point) return np.array(fixed_points), np.array(moving_points) + def automatically_group_markers(self, s: neuroglancer.ViewerState): + annotations = s.layers["markers"].annotations + if len(annotations) < 2: + return False + if len(annotations) == self.stored_group_number: + return False + print("Updating marker groups") + for i, a in enumerate(s.layers["markers"].annotations): + a.props = [i // 2, i % 2] + print(a.props, i // 2, i % 2) + self.stored_group_number = len(annotations) + return True + def estimate_affine(self, s: neuroglancer.ViewerState): - # TODO do we need to throttle this update or make it manually triggered? - # While updating by moving a point, this can break right now annotations = s.layers["markers"].annotations if len(annotations) < 2: return False - # TODO expand this to estimate different types of transforms - # Depending on the number of points # Ignore annotations not part of a pair annotations = annotations[: (len(annotations) // 2) * 2] @@ -322,9 +359,7 @@ def estimate_affine(self, s: neuroglancer.ViewerState): # TODO instead of directly doing the update, debounce it and only do # it after a short delay if no other updates for i, a in enumerate(s.layers["markers"].annotations): - a.props = [i // 2, i % 2, i] - - fixed_points, moving_points = self.split_points_into_pairs(annotations) + a.props = [i // 2, i % 2] self.affine = affine_fit(moving_points, fixed_points) # Set the transformation on the layer that is being registered From b8da450730d5c4ede1657d7085379f63de41da13 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Fri, 19 Sep 2025 10:48:42 +0200 Subject: [PATCH 12/30] feat: two diff debounces in lin reg update --- .../examples/example_linear_registration.py | 69 ++++++++----------- 1 file changed, 29 insertions(+), 40 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 088966f2f..c2839924e 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -2,7 +2,6 @@ import threading import webbrowser from time import ctime, time -from typing import Optional import neuroglancer import neuroglancer.cli @@ -158,10 +157,9 @@ def transform_points_with_affine(self, points: np.ndarray): def toggle_registered_visibility(self, _): with self.viewer.txn() as s: - s.layers["registered"].visible = not s.layers["registered"].visible - s.layers["fixed"].visible = not s.layers["registered"].visible - s.layers["markers"].visible = not s.layers["registered"].visible - s.layers["mappedMarkers"].visible = s.layers["registered"].visible + is_registered_visible = s.layers["registered"].visible + s.layers["registered"].visible = not is_registered_visible + s.layers["fixed"].visible = is_registered_visible def setup_viewer(self): self.viewer = viewer = neuroglancer.Viewer() @@ -235,12 +233,14 @@ def update(self): if current_time - self.last_updated_print > 5: print(f"Viewer states are successfully syncing at {ctime()}") self.last_updated_print = current_time - # with self.viewer.txn() as s: - # self.automatically_group_markers(s) - # self.estimate_affine(s) - self._clear_status_messages() - # TODO for some reason I need to keep the layer change for states + self.automatically_group_markers_and_update() self.update_affine() + self._clear_status_messages() + + @debounce(0.2) + def automatically_group_markers_and_update(self): + with self.viewer.txn() as s: + self.automatically_group_markers(s) @debounce(1.0) def update_affine(self): @@ -308,23 +308,17 @@ def create_registered_image(self): return self.create_moving_image(registration_matrix=self.affine) def split_points_into_pairs(self, annotations): - fixed_points = [] - moving_points = [] + num_points = len(annotations) // 2 + num_dims = len(annotations[0].point) + fixed_points = np.zeros((num_points, num_dims)) + moving_points = np.zeros((num_points, num_dims)) for i, a in enumerate(annotations): props = a.props if props[1] == 0: - fixed_points.append(a.point) + fixed_points[props[0]] = a.point else: - moving_points.append(a.point) - # If the moving points is not evenly split, instead split differently - if len(moving_points) != len(fixed_points): - fixed_points = [] - moving_points = [] - for i, a in enumerate(annotations): - if i % 2 == 0: - fixed_points.append(a.point) - else: - moving_points.append(a.point) + moving_points[props[0]] = a.point + return np.array(fixed_points), np.array(moving_points) def automatically_group_markers(self, s: neuroglancer.ViewerState): @@ -333,13 +327,22 @@ def automatically_group_markers(self, s: neuroglancer.ViewerState): return False if len(annotations) == self.stored_group_number: return False - print("Updating marker groups") for i, a in enumerate(s.layers["markers"].annotations): a.props = [i // 2, i % 2] - print(a.props, i // 2, i % 2) self.stored_group_number = len(annotations) return True + def update_registered_layer(self, s: neuroglancer.ViewerState): + existing_transform = s.layers["registered"].source[0].transform + if existing_transform is None: + # TODO again the create dimensions call needs to be fixed + existing_transform = neuroglancer.CoordinateSpaceTransform( + output_dimensions=create_dimensions() + ) + s.layers["registered"].source[0].transform = existing_transform + if self.affine is not None: + s.layers["registered"].source[0].transform.matrix = self.affine.tolist() + def estimate_affine(self, s: neuroglancer.ViewerState): annotations = s.layers["markers"].annotations if len(annotations) < 2: @@ -355,22 +358,8 @@ def estimate_affine(self, s: neuroglancer.ViewerState): np.isclose(self.stored_points[1], moving_points) ): return False - else: - # TODO instead of directly doing the update, debounce it and only do - # it after a short delay if no other updates - for i, a in enumerate(s.layers["markers"].annotations): - a.props = [i // 2, i % 2] self.affine = affine_fit(moving_points, fixed_points) - - # Set the transformation on the layer that is being registered - # Something seems to go wrong with the state updates once this happens - # s.layers["registered"].source[0].transform.matrix = A.T[:3] - # Because of this, trying to replace the whole layer to see if that helps - # TODO in theory should be able to just update the matrix, - # but if cannot, can replace whole layer and restore settings - old_visible = s.layers["registered"].visible - s.layers["registered"] = self.create_registered_image() - s.layers["registered"].visible = old_visible + self.update_registered_layer(s) self._set_status_message( "info", From 61488cf97a856ac2356944ff873edcf5f798b677 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Fri, 19 Sep 2025 10:54:38 +0200 Subject: [PATCH 13/30] fix: correct caching of number --- python/examples/example_linear_registration.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index c2839924e..8ef7598b8 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -323,13 +323,13 @@ def split_points_into_pairs(self, annotations): def automatically_group_markers(self, s: neuroglancer.ViewerState): annotations = s.layers["markers"].annotations - if len(annotations) < 2: - return False if len(annotations) == self.stored_group_number: return False + self.stored_group_number = len(annotations) + if len(annotations) < 2: + return False for i, a in enumerate(s.layers["markers"].annotations): a.props = [i // 2, i % 2] - self.stored_group_number = len(annotations) return True def update_registered_layer(self, s: neuroglancer.ViewerState): From c91e036b039950226350c3e1a933e1e34ae9d5e9 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Fri, 19 Sep 2025 12:08:18 +0200 Subject: [PATCH 14/30] feat: remove fixed dims --- .../examples/example_linear_registration.py | 86 ++++++++++--------- 1 file changed, 44 insertions(+), 42 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 8ef7598b8..d3a437be2 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -108,12 +108,10 @@ def create_demo_data(size: int | tuple = 60, radius: float = 20): return data -# TODO can we avoid calling this at all? Can the layers just be created and dims inferred? -def create_dimensions(): - if NUM_DEMO_DIMS == 2: - return neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1]) +def create_dimensions(viewer_dims: neuroglancer.CoordinateSpace): + print(viewer_dims) return neuroglancer.CoordinateSpace( - names=["x", "y", "z"], units="nm", scales=[1, 1, 1] + names=viewer_dims.names, units=viewer_dims.units, scales=viewer_dims.scales ) @@ -125,6 +123,7 @@ def __init__(self, fixed_url: str, moving_url: str): self.stored_points = [[], []] self.stored_group_number = -1 self.affine = None + self.ready = False if fixed_url is None or moving_url is None: self.demo_data = create_demo_data() @@ -132,9 +131,12 @@ def __init__(self, fixed_url: str, moving_url: str): self.setup_viewer() self.last_updated_print = -1 - def __str__(self): + def get_state(self): with self.viewer.txn() as s: - return str(s) + return s + + def __str__(self): + return str(self.get_state()) def _clear_status_messages(self): to_pop = [] @@ -172,8 +174,29 @@ def setup_viewer(self): s.layers["moving"] = moving_layer s.layers["registered"] = registered_layer s.layers["registered"].visible = False + + viewer.actions.add( + "toggle-registered-visibility", self.toggle_registered_visibility + ) + + with viewer.config_state.txn() as s: + s.input_event_bindings.viewer["keyt"] = "toggle-registered-visibility" + self.viewer.shared_state.add_changed_callback( + lambda: self.viewer.defer_callback(self.on_state_changed) + ) + + self._set_status_message( + "help", + "Waiting for viewer to initialize...", + ) + + @debounce(0.5) + def post_setup_viewer(self): + with self.viewer.txn() as s: + if s.dimensions.names == []: + return s.layers["markers"] = neuroglancer.LocalAnnotationLayer( - dimensions=create_dimensions(), + dimensions=create_dimensions(s.dimensions), annotation_properties=[ neuroglancer.AnnotationPropertySpec( id="label", @@ -190,6 +213,10 @@ def setup_viewer(self): ], shader=MARKERS_SHADER, ) + s.layers["markers"].tool = "annotatePoint" + s.selected_layer.layer = "markers" + s.selected_layer.visible = True + s.layout = neuroglancer.row_layout( [ neuroglancer.LayerGroupViewer( @@ -205,30 +232,20 @@ def setup_viewer(self): s.layout.children[1].crossSectionScale.link = "unlinked" s.layout.children[1].projectionOrientation.link = "unlinked" s.layout.children[1].projectionScale.link = "unlinked" - s.layers["markers"].tool = "annotatePoint" - s.selected_layer.layer = "markers" - s.selected_layer.visible = True - - viewer.actions.add( - "toggle-registered-visibility", self.toggle_registered_visibility - ) - - with viewer.config_state.txn() as s: - s.input_event_bindings.viewer["keyt"] = "toggle-registered-visibility" self._set_status_message( "help", "Place markers in pairs, starting with the fixed, and then the moving. The registered layer will automatically update as you add markers. Press 't' to toggle between viewing the fixed and registered layers.", ) - - self.viewer.shared_state.add_changed_callback( - lambda: self.viewer.defer_callback(self.on_state_changed) - ) + self.ready = True def on_state_changed(self): self.viewer.defer_callback(self.update) def update(self): + if not self.ready: + self.post_setup_viewer() + return current_time = time() if current_time - self.last_updated_print > 5: print(f"Viewer states are successfully syncing at {ctime()}") @@ -252,19 +269,14 @@ def create_fixed_image(self): return neuroglancer.ImageLayer( source=[ neuroglancer.LayerDataSource( - neuroglancer.LocalVolume( - self.demo_data, dimensions=create_dimensions() - ) + neuroglancer.LocalVolume(self.demo_data) ) ] ) else: return neuroglancer.ImageLayer(source=self.fixed_url) - def create_moving_image(self, registration_matrix: list | np.ndarray | None = None): - transform_kwargs = {} - if registration_matrix is not None: - transform_kwargs["matrix"] = registration_matrix + def create_moving_image(self): if self.moving_url is None: if NUM_DEMO_DIMS == 2: desired_output_matrix_homogenous = [ @@ -287,25 +299,16 @@ def create_moving_image(self, registration_matrix: list | np.ndarray | None = No print("target demo affine", inverse_matrix) return neuroglancer.ImageLayer( source=[ - neuroglancer.LayerDataSource( - neuroglancer.LocalVolume( - transformed, dimensions=create_dimensions() - ), - transform=neuroglancer.CoordinateSpaceTransform( - output_dimensions=create_dimensions(), - **transform_kwargs, - ), - ) + neuroglancer.LayerDataSource(neuroglancer.LocalVolume(transformed)) ] ) else: return neuroglancer.ImageLayer( source=self.moving_url, - transform=neuroglancer.CoordinateSpaceTransform(**transform_kwargs), ) def create_registered_image(self): - return self.create_moving_image(registration_matrix=self.affine) + return self.create_moving_image() def split_points_into_pairs(self, annotations): num_points = len(annotations) // 2 @@ -335,9 +338,8 @@ def automatically_group_markers(self, s: neuroglancer.ViewerState): def update_registered_layer(self, s: neuroglancer.ViewerState): existing_transform = s.layers["registered"].source[0].transform if existing_transform is None: - # TODO again the create dimensions call needs to be fixed existing_transform = neuroglancer.CoordinateSpaceTransform( - output_dimensions=create_dimensions() + output_dimensions=create_dimensions(s.dimensions) ) s.layers["registered"].source[0].transform = existing_transform if self.affine is not None: From 085563e7493e5c9e896a94d4f8e2e6e424d59d8c Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Fri, 19 Sep 2025 12:26:49 +0200 Subject: [PATCH 15/30] fix: handle channel dims --- .../examples/example_linear_registration.py | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index d3a437be2..cb4cf6544 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -343,7 +343,31 @@ def update_registered_layer(self, s: neuroglancer.ViewerState): ) s.layers["registered"].source[0].transform = existing_transform if self.affine is not None: - s.layers["registered"].source[0].transform.matrix = self.affine.tolist() + transform = self.affine.tolist() + if s.layers["registered"].source[0].transform is not None: + final_transform = [] + layer_transform = s.layers["registered"].source[0].transform + local_channel_indices = [ + i + for i, name in enumerate(layer_transform.outputDimensions.names) + if name.endswith(("'", "^", "#")) + ] + num_local_count = 0 + for i, name in enumerate(layer_transform.outputDimensions.names): + is_local = i in local_channel_indices + if is_local: + final_transform.append(layer_transform.matrix[i].tolist()) + num_local_count += 1 + else: + row = transform[i - num_local_count] + # At the indices corresponding to local channels, insert 0s + for j in local_channel_indices: + row.insert(j, 0) + final_transform.append(row) + else: + final_transform = transform + print("Updated affine transform:", final_transform) + s.layers["registered"].source[0].transform.matrix = final_transform def estimate_affine(self, s: neuroglancer.ViewerState): annotations = s.layers["markers"].annotations From 79793a7edb227e92cdc50583e0700213b1146d78 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Fri, 19 Sep 2025 16:34:03 +0200 Subject: [PATCH 16/30] feat: use input state in lin reg to be more robust --- .../examples/example_linear_registration.py | 170 ++++++++---------- 1 file changed, 79 insertions(+), 91 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index cb4cf6544..13f80d631 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -116,20 +116,28 @@ def create_dimensions(viewer_dims: neuroglancer.CoordinateSpace): class LinearRegistrationWorkflow: - def __init__(self, fixed_url: str, moving_url: str): - self.fixed_url = fixed_url - self.moving_url = moving_url + def __init__(self, starting_state=neuroglancer.ViewerState | None): self.status_timers = {} self.stored_points = [[], []] self.stored_group_number = -1 self.affine = None self.ready = False + self.last_updated_print = -1 + self.viewer = neuroglancer.Viewer() + self.viewer.shared_state.add_changed_callback( + lambda: self.viewer.defer_callback(self.on_state_changed) + ) - if fixed_url is None or moving_url is None: + if starting_state is None: self.demo_data = create_demo_data() + self.setup_demo_viewer() + else: + self.viewer.set_state(starting_state) - self.setup_viewer() - self.last_updated_print = -1 + self._set_status_message( + "help", + "Waiting for viewer to initialize with one layer called fixed and one layer called moving.", + ) def get_state(self): with self.viewer.txn() as s: @@ -163,38 +171,37 @@ def toggle_registered_visibility(self, _): s.layers["registered"].visible = not is_registered_visible s.layers["fixed"].visible = is_registered_visible - def setup_viewer(self): - self.viewer = viewer = neuroglancer.Viewer() - fixed_layer = self.create_fixed_image() - moving_layer = self.create_moving_image() - registered_layer = self.create_registered_image() + def setup_demo_viewer(self): + viewer = self.viewer + fixed_layer = self.create_demo_fixed_image() + moving_layer = self.create_demo_moving_image() with viewer.txn() as s: s.layers["fixed"] = fixed_layer s.layers["moving"] = moving_layer - s.layers["registered"] = registered_layer - s.layers["registered"].visible = False + def setup_viewer(self): + viewer = self.viewer viewer.actions.add( "toggle-registered-visibility", self.toggle_registered_visibility ) with viewer.config_state.txn() as s: s.input_event_bindings.viewer["keyt"] = "toggle-registered-visibility" - self.viewer.shared_state.add_changed_callback( - lambda: self.viewer.defer_callback(self.on_state_changed) - ) - - self._set_status_message( - "help", - "Waiting for viewer to initialize...", - ) @debounce(0.5) def post_setup_viewer(self): with self.viewer.txn() as s: - if s.dimensions.names == []: + if ( + s.dimensions.names == [] + or s.layers.index("fixed") == -1 + or s.layers.index("moving") == -1 + ): return + # registered_layer = self.create_registered_image() + s.layers["moving1"] = self.create_registered_image() + s.layers["moving1"].name = "registered" + s.layers["registered"].visible = False s.layers["markers"] = neuroglancer.LocalAnnotationLayer( dimensions=create_dimensions(s.dimensions), annotation_properties=[ @@ -213,18 +220,23 @@ def post_setup_viewer(self): ], shader=MARKERS_SHADER, ) + s.layers["moving"].visible = True + s.layers["fixed"].visible = True s.layers["markers"].tool = "annotatePoint" s.selected_layer.layer = "markers" s.selected_layer.visible = True + all_layer_names = [layer.name for layer in s.layers] + group_1_names = [name for name in all_layer_names if name != "moving"] + group_2_names = [ + name + for name in all_layer_names + if name != "fixed" and name != "registered" + ] s.layout = neuroglancer.row_layout( [ - neuroglancer.LayerGroupViewer( - layers=["fixed", "registered", "markers"], layout="xy-3d" - ), - neuroglancer.LayerGroupViewer( - layers=["moving", "markers"], layout="xy-3d" - ), + neuroglancer.LayerGroupViewer(layers=group_1_names, layout="xy-3d"), + neuroglancer.LayerGroupViewer(layers=group_2_names, layout="xy-3d"), ] ) s.layout.children[1].position.link = "unlinked" @@ -238,77 +250,67 @@ def post_setup_viewer(self): "Place markers in pairs, starting with the fixed, and then the moving. The registered layer will automatically update as you add markers. Press 't' to toggle between viewing the fixed and registered layers.", ) self.ready = True + self.setup_viewer() def on_state_changed(self): self.viewer.defer_callback(self.update) def update(self): - if not self.ready: - self.post_setup_viewer() - return current_time = time() if current_time - self.last_updated_print > 5: print(f"Viewer states are successfully syncing at {ctime()}") self.last_updated_print = current_time + if not self.ready: + self.post_setup_viewer() + return self.automatically_group_markers_and_update() self.update_affine() self._clear_status_messages() - @debounce(0.2) + @debounce(0.25) def automatically_group_markers_and_update(self): with self.viewer.txn() as s: self.automatically_group_markers(s) - @debounce(1.0) + @debounce(1.5) def update_affine(self): with self.viewer.txn() as s: self.estimate_affine(s) - def create_fixed_image(self): - if self.fixed_url is None: - return neuroglancer.ImageLayer( - source=[ - neuroglancer.LayerDataSource( - neuroglancer.LocalVolume(self.demo_data) - ) - ] - ) - else: - return neuroglancer.ImageLayer(source=self.fixed_url) - - def create_moving_image(self): - if self.moving_url is None: - if NUM_DEMO_DIMS == 2: - desired_output_matrix_homogenous = [ - [0.8, 0, 0], - [0, 0.2, 0], - [0, 0, 1], - ] - else: - desired_output_matrix_homogenous = [ - [0.8, 0, 0, 0], - [0, 0.2, 0, 0], - [0, 0, 0.9, 0], - [0, 0, 0, 1], - ] - inverse_matrix = np.linalg.inv(desired_output_matrix_homogenous) - transformed = scipy.ndimage.affine_transform( - self.demo_data, - matrix=inverse_matrix, - ) - print("target demo affine", inverse_matrix) - return neuroglancer.ImageLayer( - source=[ - neuroglancer.LayerDataSource(neuroglancer.LocalVolume(transformed)) - ] - ) + def create_demo_fixed_image(self): + return neuroglancer.ImageLayer( + source=[ + neuroglancer.LayerDataSource(neuroglancer.LocalVolume(self.demo_data)) + ] + ) + + def create_demo_moving_image(self): + if NUM_DEMO_DIMS == 2: + desired_output_matrix_homogenous = [ + [0.8, 0, 0], + [0, 0.2, 0], + [0, 0, 1], + ] else: - return neuroglancer.ImageLayer( - source=self.moving_url, - ) + desired_output_matrix_homogenous = [ + [0.8, 0, 0, 0], + [0, 0.2, 0, 0], + [0, 0, 0.9, 0], + [0, 0, 0, 1], + ] + inverse_matrix = np.linalg.inv(desired_output_matrix_homogenous) + transformed = scipy.ndimage.affine_transform( + self.demo_data, + matrix=inverse_matrix, + ) + print("target demo affine", inverse_matrix) + return neuroglancer.ImageLayer( + source=[neuroglancer.LayerDataSource(neuroglancer.LocalVolume(transformed))] + ) def create_registered_image(self): - return self.create_moving_image() + with self.viewer.txn() as s: + return s.layers["moving"] def split_points_into_pairs(self, annotations): num_points = len(annotations) // 2 @@ -417,19 +419,8 @@ def dump_info(self, path: str): def handle_args(): ap = argparse.ArgumentParser() + neuroglancer.cli.add_state_arguments(ap, required=False) neuroglancer.cli.add_server_arguments(ap) - ap.add_argument( - "--fixed", - "-f", - type=str, - help="Source URL for the fixed image", - ) - ap.add_argument( - "--moving", - "-m", - type=str, - help="Source URL for the image to be registered", - ) args = ap.parse_args() neuroglancer.cli.handle_server_arguments(args) return args @@ -438,9 +429,6 @@ def handle_args(): if __name__ == "__main__": args = handle_args() - demo = LinearRegistrationWorkflow( - fixed_url=args.fixed, - moving_url=args.moving, - ) + demo = LinearRegistrationWorkflow(args.state) webbrowser.open_new(demo.viewer.get_viewer_url()) From 38d56f7bc33f343fbfe4f71e544f598510e5f686 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Mon, 22 Sep 2025 12:30:48 +0200 Subject: [PATCH 17/30] feat: making lin reg pipeline more custom --- .../examples/example_linear_registration.py | 218 ++++++++++++------ 1 file changed, 151 insertions(+), 67 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 13f80d631..ac6004180 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -44,6 +44,8 @@ def debounced(*args, **kwargs): return decorator +# TODO add other types of fits +# Inspired by https://github.com/AllenInstitute/render-python/blob/master/renderapi/transform/leaf/affine_models.py def affine_fit(fixed_points: np.ndarray, moving_points: np.ndarray): # Points are NxD arrays assert fixed_points.shape == moving_points.shape @@ -109,14 +111,16 @@ def create_demo_data(size: int | tuple = 60, radius: float = 20): def create_dimensions(viewer_dims: neuroglancer.CoordinateSpace): - print(viewer_dims) return neuroglancer.CoordinateSpace( names=viewer_dims.names, units=viewer_dims.units, scales=viewer_dims.scales ) class LinearRegistrationWorkflow: - def __init__(self, starting_state=neuroglancer.ViewerState | None): + def __init__(self, args): + starting_state = args.state + self.moving_name = args.moving_name + self.annotations_name = args.annotations_name self.status_timers = {} self.stored_points = [[], []] self.stored_group_number = -1 @@ -130,7 +134,7 @@ def __init__(self, starting_state=neuroglancer.ViewerState | None): if starting_state is None: self.demo_data = create_demo_data() - self.setup_demo_viewer() + self.add_fake_data_to_viewer() else: self.viewer.set_state(starting_state) @@ -165,20 +169,21 @@ def transform_points_with_affine(self, points: np.ndarray): if self.affine is not None: return transform_points(self.affine, points) + # TODO change this to replace the transform matrix directly? + # so then only need one layer def toggle_registered_visibility(self, _): with self.viewer.txn() as s: is_registered_visible = s.layers["registered"].visible s.layers["registered"].visible = not is_registered_visible - s.layers["fixed"].visible = is_registered_visible - def setup_demo_viewer(self): + def add_fake_data_to_viewer(self): viewer = self.viewer fixed_layer = self.create_demo_fixed_image() moving_layer = self.create_demo_moving_image() with viewer.txn() as s: s.layers["fixed"] = fixed_layer - s.layers["moving"] = moving_layer + s.layers[self.moving_name] = moving_layer def setup_viewer(self): viewer = self.viewer @@ -191,48 +196,48 @@ def setup_viewer(self): @debounce(0.5) def post_setup_viewer(self): + # TODO why do we need the moving? In theory only at setup? with self.viewer.txn() as s: - if ( - s.dimensions.names == [] - or s.layers.index("fixed") == -1 - or s.layers.index("moving") == -1 - ): + if s.dimensions.names == [] or s.layers.index(self.moving_name) == -1: return # registered_layer = self.create_registered_image() - s.layers["moving1"] = self.create_registered_image() - s.layers["moving1"].name = "registered" + # TODO might be able to use deepcopy to avoid this akwardness + # of rename + s.layers[self.moving_name + "1"] = self.create_registered_image() + s.layers[self.moving_name + "1"].name = "registered" s.layers["registered"].visible = False - s.layers["markers"] = neuroglancer.LocalAnnotationLayer( - dimensions=create_dimensions(s.dimensions), - annotation_properties=[ - neuroglancer.AnnotationPropertySpec( - id="label", - type="uint32", - default=0, - ), - neuroglancer.AnnotationPropertySpec( - id="group", - type="uint8", - default=0, - enum_labels=["fixed", "moving"], - enum_values=[0, 1], - ), - ], - shader=MARKERS_SHADER, - ) - s.layers["moving"].visible = True - s.layers["fixed"].visible = True - s.layers["markers"].tool = "annotatePoint" - s.selected_layer.layer = "markers" + if s.layers.index("registered") == -1: + s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( + dimensions=create_dimensions(s.dimensions), + annotation_properties=[ + neuroglancer.AnnotationPropertySpec( + id="label", + type="uint32", + default=0, + ), + neuroglancer.AnnotationPropertySpec( + id="group", + type="uint8", + default=0, + enum_labels=["fixed", "moving"], + enum_values=[0, 1], + ), + ], + shader=MARKERS_SHADER, + ) + s.layers[self.moving_name].visible = True + s.layers[self.annotations_name].tool = "annotatePoint" + s.selected_layer.layer = self.annotations_name s.selected_layer.visible = True all_layer_names = [layer.name for layer in s.layers] - group_1_names = [name for name in all_layer_names if name != "moving"] - group_2_names = [ - name - for name in all_layer_names - if name != "fixed" and name != "registered" + group_1_names = [ + name for name in all_layer_names if name != self.moving_name ] + group_2_names = [name for name in all_layer_names if name != "registered"] + # TODO in two coord space set the main one for group 2 + # TODO could load a link with layout already setup + # in which case should just add registered to group 1 s.layout = neuroglancer.row_layout( [ neuroglancer.LayerGroupViewer(layers=group_1_names, layout="xy-3d"), @@ -310,34 +315,63 @@ def create_demo_moving_image(self): def create_registered_image(self): with self.viewer.txn() as s: - return s.layers["moving"] - - def split_points_into_pairs(self, annotations): - num_points = len(annotations) // 2 - num_dims = len(annotations[0].point) - fixed_points = np.zeros((num_points, num_dims)) - moving_points = np.zeros((num_points, num_dims)) - for i, a in enumerate(annotations): - props = a.props - if props[1] == 0: - fixed_points[props[0]] = a.point - else: - moving_points[props[0]] = a.point - - return np.array(fixed_points), np.array(moving_points) - + return s.layers[self.moving_name] + + # TODO this check could maybe be more robust + def check_for_two_coord_spaces(self, dim_names): + set_of_names = set() + for name in dim_names: + # rstrip any number off the end + stripped_name = name.rstrip("0123456789") + set_of_names.add(stripped_name) + return len(set_of_names) * 2 == len(dim_names) + + def split_points_into_pairs(self, annotations, dim_names): + two_coord_spaces = self.check_for_two_coord_spaces(dim_names) + # TODO need some way to indicate which coord space is which + if two_coord_spaces: + num_points = len(annotations) + num_dims = len(annotations[0].point) // 2 + fixed_points = np.zeros((num_points, num_dims)) + moving_points = np.zeros((num_points, num_dims)) + for i, a in enumerate(annotations): + for j in range(num_dims): + fixed_points[i, j] = a.point[j + num_dims] + moving_points[i, j] = a.point[j] + return np.array(fixed_points), np.array(moving_points) + else: + num_points = len(annotations) // 2 + num_dims = len(annotations[0].point) + fixed_points = np.zeros((num_points, num_dims)) + moving_points = np.zeros((num_points, num_dims)) + for i, a in enumerate(annotations): + props = a.props + if props[1] == 0: + fixed_points[props[0]] = a.point + else: + moving_points[props[0]] = a.point + + return np.array(fixed_points), np.array(moving_points) + + # TODO can disable this check completely if using two coord spaces def automatically_group_markers(self, s: neuroglancer.ViewerState): - annotations = s.layers["markers"].annotations + dimensions = s.dimensions.names + if self.check_for_two_coord_spaces(dimensions): + return False + annotations = s.layers[self.annotations_name].annotations if len(annotations) == self.stored_group_number: return False self.stored_group_number = len(annotations) if len(annotations) < 2: return False - for i, a in enumerate(s.layers["markers"].annotations): + for i, a in enumerate(s.layers[self.annotations_name].annotations): a.props = [i // 2, i % 2] return True def update_registered_layer(self, s: neuroglancer.ViewerState): + # TODO might need to add some mechanism to neuroglancer to do this + # this seems off that we can't directly set the transform matrix + # without first creating a new transform object existing_transform = s.layers["registered"].source[0].transform if existing_transform is None: existing_transform = neuroglancer.CoordinateSpaceTransform( @@ -346,6 +380,7 @@ def update_registered_layer(self, s: neuroglancer.ViewerState): s.layers["registered"].source[0].transform = existing_transform if self.affine is not None: transform = self.affine.tolist() + # TODO this is where that mapping needs to happen of affine dims if s.layers["registered"].source[0].transform is not None: final_transform = [] layer_transform = s.layers["registered"].source[0].transform @@ -369,16 +404,36 @@ def update_registered_layer(self, s: neuroglancer.ViewerState): else: final_transform = transform print("Updated affine transform:", final_transform) + # TODO for some reason with global dims not matching local dims + # nothing happens at this step + # TODO if global dims don't match local dims then the + # tranform matrix doesn't match properly + # the easiest to fix this would be to make the local dims of the + # markers be the same as the local dims of the image layers + # but we don't seem to always be able to access that info + # we could try to check similar to the above if a transform + # has been setup on viewer init and use that to get the + # names of the local dims which might be easier overall + # than trying to fiddle with these dims + # Worst comes to worst you could ask for a copy of the layer by the user + # with the dims if command line not specified + # but command line could spec the markers layer dims + # Try https://neuroglancer-demo.appspot.com/#!%7B%22dimensions%22:%7B%22x%22:%5B6.500000000000001e-7%2C%22m%22%5D%2C%22y%22:%5B6.500000000000001e-7%2C%22m%22%5D%2C%22z%22:%5B0.00003%2C%22m%22%5D%7D%2C%22position%22:%5B14231.224609375%2C30510.12109375%2C0%5D%2C%22crossSectionScale%22:121.51041751873487%2C%22projectionScale%22:131072%2C%22layers%22:%5B%7B%22type%22:%22image%22%2C%22source%22:%22s3://allen-genetic-tools/epifluorescence/1383646325/ome_zarr_conversion/1383646325.zarr/%7Czarr2:%22%2C%22localDimensions%22:%7B%22c%27%22:%5B1%2C%22%22%5D%7D%2C%22localPosition%22:%5B1%5D%2C%22tab%22:%22source%22%2C%22name%22:%221383646325.zarr%22%7D%5D%2C%22selectedLayer%22:%7B%22size%22:379%2C%22visible%22:true%2C%22layer%22:%221383646325.zarr%22%7D%2C%22layout%22:%224panel-alt%22%7D for this + print(s.layers["registered"].source[0].transform) + print(final_transform) s.layers["registered"].source[0].transform.matrix = final_transform + print(s.layers["registered"].source[0].transform) def estimate_affine(self, s: neuroglancer.ViewerState): - annotations = s.layers["markers"].annotations - if len(annotations) < 2: + annotations = s.layers[self.annotations_name].annotations + if len(annotations) < 1: return False - # Ignore annotations not part of a pair - annotations = annotations[: (len(annotations) // 2) * 2] - fixed_points, moving_points = self.split_points_into_pairs(annotations) + dim_names = s.dimensions.names + # TODO likely broken right now for non pairs non two coord spaces + fixed_points, moving_points = self.split_points_into_pairs( + annotations, dim_names + ) if len(self.stored_points[0]) == len(fixed_points) and len( self.stored_points[1] ) == len(moving_points): @@ -391,7 +446,7 @@ def estimate_affine(self, s: neuroglancer.ViewerState): self._set_status_message( "info", - f"Estimated affine transform with {len(annotations) // 2} point pairs", + f"Estimated affine transform with {len(moving_points)} point pairs", ) self.stored_points = [fixed_points, moving_points] return True @@ -399,8 +454,11 @@ def estimate_affine(self, s: neuroglancer.ViewerState): def get_registration_info(self): info = {} with self.viewer.txn() as s: - annotations = s.layers["markers"].annotations - fixed_points, moving_points = self.split_points_into_pairs(annotations) + annotations = s.layers[self.annotations_name].annotations + dim_names = s.dimensions.names + fixed_points, moving_points = self.split_points_into_pairs( + annotations, dim_names + ) transformed_points = self.transform_points_with_affine(moving_points) info["fixedPoints"] = fixed_points.tolist() info["movingPoints"] = moving_points.tolist() @@ -417,10 +475,36 @@ def dump_info(self, path: str): json.dump(info, f, indent=4) +def add_mapping_args(ap: argparse.ArgumentParser): + ap.add_argument( + "--annotations-name", + "-a", + type=str, + help="Name of the annotation layer (default is annotations)", + default="annotation", + required=False, + ) + ap.add_argument( + "--moving-name", + "-m", + type=str, + help="Name of the moving image layer (default is moving)", + default="moving", + required=False, + ) + + +# TODO need to add some way to handle mapping the output affine +# For e.g. the points can be in XYZ order, but the image dims +# are CZYX order +# TODO allow input arg of layer name to be the moving and the fixed +# TODO don't actually need fixed in theory? +# TODO same for markers / annotations name def handle_args(): ap = argparse.ArgumentParser() neuroglancer.cli.add_state_arguments(ap, required=False) neuroglancer.cli.add_server_arguments(ap) + add_mapping_args(ap) args = ap.parse_args() neuroglancer.cli.handle_server_arguments(args) return args @@ -429,6 +513,6 @@ def handle_args(): if __name__ == "__main__": args = handle_args() - demo = LinearRegistrationWorkflow(args.state) + demo = LinearRegistrationWorkflow(args) webbrowser.open_new(demo.viewer.get_viewer_url()) From 0d467f5d4e1b20a219967c6d0444420e4fc95a68 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Mon, 22 Sep 2025 14:12:30 +0200 Subject: [PATCH 18/30] fix: correct a number of TODOs for lin reg --- .../examples/example_linear_registration.py | 163 ++++++++++-------- 1 file changed, 92 insertions(+), 71 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index ac6004180..b9924cc63 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -1,6 +1,7 @@ import argparse import threading import webbrowser +from copy import deepcopy from time import ctime, time import neuroglancer @@ -110,10 +111,22 @@ def create_demo_data(size: int | tuple = 60, radius: float = 20): return data -def create_dimensions(viewer_dims: neuroglancer.CoordinateSpace): - return neuroglancer.CoordinateSpace( - names=viewer_dims.names, units=viewer_dims.units, scales=viewer_dims.scales - ) +# TODO this should be more intelligent for the copy +# in that case it should take the original layers dimensions +# and actually map the names +# the problem is that right now we can't query those names from python +# since it wraps the state and that info isn't in the state if it is still the +# default information +def create_dimensions(viewer_dims: neuroglancer.CoordinateSpace, indices=None): + names = viewer_dims.names + units = viewer_dims.units + scales = viewer_dims.scales + if indices is not None: + names = [viewer_dims.names[i] for i in indices] + units = [viewer_dims.units[i] for i in indices] + scales = [viewer_dims.scales[i] for i in indices] + + return neuroglancer.CoordinateSpace(names=names, units=units, scales=scales) class LinearRegistrationWorkflow: @@ -127,6 +140,7 @@ def __init__(self, args): self.affine = None self.ready = False self.last_updated_print = -1 + self.two_coord_spaces = False self.viewer = neuroglancer.Viewer() self.viewer.shared_state.add_changed_callback( lambda: self.viewer.defer_callback(self.on_state_changed) @@ -140,7 +154,7 @@ def __init__(self, args): self._set_status_message( "help", - "Waiting for viewer to initialize with one layer called fixed and one layer called moving.", + "Waiting for viewer to initialize with one layer called moving.", ) def get_state(self): @@ -169,8 +183,6 @@ def transform_points_with_affine(self, points: np.ndarray): if self.affine is not None: return transform_points(self.affine, points) - # TODO change this to replace the transform matrix directly? - # so then only need one layer def toggle_registered_visibility(self, _): with self.viewer.txn() as s: is_registered_visible = s.layers["registered"].visible @@ -185,7 +197,7 @@ def add_fake_data_to_viewer(self): s.layers["fixed"] = fixed_layer s.layers[self.moving_name] = moving_layer - def setup_viewer(self): + def setup_viewer_actions(self): viewer = self.viewer viewer.actions.add( "toggle-registered-visibility", self.toggle_registered_visibility @@ -194,19 +206,40 @@ def setup_viewer(self): with viewer.config_state.txn() as s: s.input_event_bindings.viewer["keyt"] = "toggle-registered-visibility" + def is_fixed_image_space_last(self, dim_names): + first_name = dim_names[0] + return first_name[-1] in "0123456789" + + def init_registered_transform(self, s: neuroglancer.ViewerState, force=True): + if not force and s.layers["registered"].source[0].transform is not None: + return + # TODO this can fail to solve the no matrix issue if ends up as the identity + # think I need to change something in neuroglancer python for this instead + indices = None + if self.check_for_two_coord_spaces(s.dimensions.names): + self.coord_spaces = True + num_dims = len(s.dimensions.names) // 2 + if self.is_fixed_image_space_last(s.dimensions.names): + indices = list( + range(len(s.dimensions.names) - num_dims, len(s.dimensions.names)) + ) + else: + indices = list(range(num_dims)) + existing_transform = neuroglancer.CoordinateSpaceTransform( + output_dimensions=create_dimensions(s.dimensions, indices) + ) + s.layers["registered"].source[0].transform = existing_transform + print(s.layers["registered"].source[0].transform.matrix) + @debounce(0.5) - def post_setup_viewer(self): - # TODO why do we need the moving? In theory only at setup? + def check_viewer_ready_and_setup(self): with self.viewer.txn() as s: if s.dimensions.names == [] or s.layers.index(self.moving_name) == -1: return - # registered_layer = self.create_registered_image() - # TODO might be able to use deepcopy to avoid this akwardness - # of rename - s.layers[self.moving_name + "1"] = self.create_registered_image() - s.layers[self.moving_name + "1"].name = "registered" + s.layers["registered"] = self.create_registered_image() s.layers["registered"].visible = False - if s.layers.index("registered") == -1: + self.init_registered_transform(s) + if s.layers.index(self.annotations_name) == -1: s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( dimensions=create_dimensions(s.dimensions), annotation_properties=[ @@ -235,27 +268,32 @@ def post_setup_viewer(self): name for name in all_layer_names if name != self.moving_name ] group_2_names = [name for name in all_layer_names if name != "registered"] - # TODO in two coord space set the main one for group 2 - # TODO could load a link with layout already setup - # in which case should just add registered to group 1 - s.layout = neuroglancer.row_layout( - [ - neuroglancer.LayerGroupViewer(layers=group_1_names, layout="xy-3d"), - neuroglancer.LayerGroupViewer(layers=group_2_names, layout="xy-3d"), - ] - ) - s.layout.children[1].position.link = "unlinked" - s.layout.children[1].crossSectionOrientation.link = "unlinked" - s.layout.children[1].crossSectionScale.link = "unlinked" - s.layout.children[1].projectionOrientation.link = "unlinked" - s.layout.children[1].projectionScale.link = "unlinked" + if isinstance(s.layout, neuroglancer.DataPanelLayout): + s.layout = neuroglancer.row_layout( + [ + neuroglancer.LayerGroupViewer( + layers=group_1_names, layout="xy-3d" + ), + neuroglancer.LayerGroupViewer( + layers=group_2_names, layout="xy-3d" + ), + ] + ) + s.layout.children[1].position.link = "unlinked" + s.layout.children[1].crossSectionOrientation.link = "unlinked" + s.layout.children[1].crossSectionScale.link = "unlinked" + s.layout.children[1].projectionOrientation.link = "unlinked" + s.layout.children[1].projectionScale.link = "unlinked" + # TODO expand to unlink coords and make two coord spaces + else: + s.layout.children[0].layers.append("registered") self._set_status_message( "help", - "Place markers in pairs, starting with the fixed, and then the moving. The registered layer will automatically update as you add markers. Press 't' to toggle between viewing the fixed and registered layers.", + "Place markers in pairs, starting with the fixed, and then the moving. The registered layer will automatically update as you add markers. Press 't' to toggle visiblity of the registered layer.", ) self.ready = True - self.setup_viewer() + self.setup_viewer_actions() def on_state_changed(self): self.viewer.defer_callback(self.update) @@ -266,9 +304,10 @@ def update(self): print(f"Viewer states are successfully syncing at {ctime()}") self.last_updated_print = current_time if not self.ready: - self.post_setup_viewer() + self.check_viewer_ready_and_setup() return - self.automatically_group_markers_and_update() + if not self.two_coord_spaces: + self.automatically_group_markers_and_update() self.update_affine() self._clear_status_messages() @@ -315,10 +354,14 @@ def create_demo_moving_image(self): def create_registered_image(self): with self.viewer.txn() as s: - return s.layers[self.moving_name] + layer = deepcopy(s.layers[self.moving_name]) + layer.name = "registered" + return layer - # TODO this check could maybe be more robust def check_for_two_coord_spaces(self, dim_names): + # Dims should be exactly double the number of unique names + if len(dim_names) == 0 or len(dim_names) % 2 != 0: + return False set_of_names = set() for name in dim_names: # rstrip any number off the end @@ -327,20 +370,25 @@ def check_for_two_coord_spaces(self, dim_names): return len(set_of_names) * 2 == len(dim_names) def split_points_into_pairs(self, annotations, dim_names): + if len(annotations) == 0: + return np.zeros((0, 0)), np.zeros((0, 0)) two_coord_spaces = self.check_for_two_coord_spaces(dim_names) - # TODO need some way to indicate which coord space is which if two_coord_spaces: + real_dims_last = self.is_fixed_image_space_last(dim_names) num_points = len(annotations) num_dims = len(annotations[0].point) // 2 fixed_points = np.zeros((num_points, num_dims)) moving_points = np.zeros((num_points, num_dims)) for i, a in enumerate(annotations): for j in range(num_dims): - fixed_points[i, j] = a.point[j + num_dims] - moving_points[i, j] = a.point[j] + fixed_index = j + num_dims if real_dims_last else j + moving_index = j if real_dims_last else j + num_dims + fixed_points[i, j] = a.point[fixed_index] + moving_points[i, j] = a.point[moving_index] return np.array(fixed_points), np.array(moving_points) else: num_points = len(annotations) // 2 + annotations = annotations[: num_points * 2] num_dims = len(annotations[0].point) fixed_points = np.zeros((num_points, num_dims)) moving_points = np.zeros((num_points, num_dims)) @@ -353,7 +401,6 @@ def split_points_into_pairs(self, annotations, dim_names): return np.array(fixed_points), np.array(moving_points) - # TODO can disable this check completely if using two coord spaces def automatically_group_markers(self, s: neuroglancer.ViewerState): dimensions = s.dimensions.names if self.check_for_two_coord_spaces(dimensions): @@ -369,18 +416,14 @@ def automatically_group_markers(self, s: neuroglancer.ViewerState): return True def update_registered_layer(self, s: neuroglancer.ViewerState): - # TODO might need to add some mechanism to neuroglancer to do this - # this seems off that we can't directly set the transform matrix - # without first creating a new transform object - existing_transform = s.layers["registered"].source[0].transform - if existing_transform is None: - existing_transform = neuroglancer.CoordinateSpaceTransform( - output_dimensions=create_dimensions(s.dimensions) - ) - s.layers["registered"].source[0].transform = existing_transform + self.init_registered_transform(s, force=False) if self.affine is not None: + print(s.layers["registered"].source[0].transform.matrix) transform = self.affine.tolist() # TODO this is where that mapping needs to happen of affine dims + # overall this is a bit awkward right now, we need a lot of + # mapping info which we just don't have + # right now you can't input it from the command line if s.layers["registered"].source[0].transform is not None: final_transform = [] layer_transform = s.layers["registered"].source[0].transform @@ -404,21 +447,6 @@ def update_registered_layer(self, s: neuroglancer.ViewerState): else: final_transform = transform print("Updated affine transform:", final_transform) - # TODO for some reason with global dims not matching local dims - # nothing happens at this step - # TODO if global dims don't match local dims then the - # tranform matrix doesn't match properly - # the easiest to fix this would be to make the local dims of the - # markers be the same as the local dims of the image layers - # but we don't seem to always be able to access that info - # we could try to check similar to the above if a transform - # has been setup on viewer init and use that to get the - # names of the local dims which might be easier overall - # than trying to fiddle with these dims - # Worst comes to worst you could ask for a copy of the layer by the user - # with the dims if command line not specified - # but command line could spec the markers layer dims - # Try https://neuroglancer-demo.appspot.com/#!%7B%22dimensions%22:%7B%22x%22:%5B6.500000000000001e-7%2C%22m%22%5D%2C%22y%22:%5B6.500000000000001e-7%2C%22m%22%5D%2C%22z%22:%5B0.00003%2C%22m%22%5D%7D%2C%22position%22:%5B14231.224609375%2C30510.12109375%2C0%5D%2C%22crossSectionScale%22:121.51041751873487%2C%22projectionScale%22:131072%2C%22layers%22:%5B%7B%22type%22:%22image%22%2C%22source%22:%22s3://allen-genetic-tools/epifluorescence/1383646325/ome_zarr_conversion/1383646325.zarr/%7Czarr2:%22%2C%22localDimensions%22:%7B%22c%27%22:%5B1%2C%22%22%5D%7D%2C%22localPosition%22:%5B1%5D%2C%22tab%22:%22source%22%2C%22name%22:%221383646325.zarr%22%7D%5D%2C%22selectedLayer%22:%7B%22size%22:379%2C%22visible%22:true%2C%22layer%22:%221383646325.zarr%22%7D%2C%22layout%22:%224panel-alt%22%7D for this print(s.layers["registered"].source[0].transform) print(final_transform) s.layers["registered"].source[0].transform.matrix = final_transform @@ -430,7 +458,6 @@ def estimate_affine(self, s: neuroglancer.ViewerState): return False dim_names = s.dimensions.names - # TODO likely broken right now for non pairs non two coord spaces fixed_points, moving_points = self.split_points_into_pairs( annotations, dim_names ) @@ -494,12 +521,6 @@ def add_mapping_args(ap: argparse.ArgumentParser): ) -# TODO need to add some way to handle mapping the output affine -# For e.g. the points can be in XYZ order, but the image dims -# are CZYX order -# TODO allow input arg of layer name to be the moving and the fixed -# TODO don't actually need fixed in theory? -# TODO same for markers / annotations name def handle_args(): ap = argparse.ArgumentParser() neuroglancer.cli.add_state_arguments(ap, required=False) From c812212655ce4d0ef66d2e254e06762467a2a242 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Thu, 25 Sep 2025 11:07:44 +0200 Subject: [PATCH 19/30] feat: add different fits to reg workflow --- .../examples/example_linear_registration.py | 83 +++++++++++++++++-- 1 file changed, 75 insertions(+), 8 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index b9924cc63..1aeb460ac 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -45,14 +45,81 @@ def debounced(*args, **kwargs): return decorator -# TODO add other types of fits # Inspired by https://github.com/AllenInstitute/render-python/blob/master/renderapi/transform/leaf/affine_models.py -def affine_fit(fixed_points: np.ndarray, moving_points: np.ndarray): - # Points are NxD arrays + + +def fit_model(fixed_points: np.ndarray, moving_points: np.ndarray): assert fixed_points.shape == moving_points.shape - N = fixed_points.shape[0] - D = fixed_points.shape[1] - T = fixed_points + N, D = fixed_points.shape + + if N == 1: + return translation_fit(fixed_points, moving_points) + if N == 2: + return rigid_or_similarity_fit(fixed_points, moving_points, rigid=True) + if N == 3 and D == 2: + return affine_fit(fixed_points, moving_points) + if N == 3 and D > 2: + return rigid_or_similarity_fit(fixed_points, moving_points, rigid=False) + return affine_fit(fixed_points, moving_points) + + +# See https://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem +# and https://math.nist.gov/~JBernal/kujustf.pdf +def rigid_or_similarity_fit( + fixed_points: np.ndarray, moving_points: np.ndarray, rigid: bool = True +): + N, D = fixed_points.shape + + # Remove translation aspect to first determine rotation/scale + X = fixed_points - fixed_points.mean(axis=0) + Y = moving_points - moving_points.mean(axis=0) + + # Cross-covariance + sigma = (Y.T @ X) / N + + # SVD - Unitary matrix, Diagonal, conjugate transpose of unitary matrix + U, S, Vt = np.linalg.svd(sigma) # Sigma ≈ U diag(S) V* + + d = np.ones(D) + if np.linalg.det(U @ Vt) < 0: + d[-1] = -1 + R = U @ np.diag(d) @ Vt + + # Scale + if rigid: + s = 1.0 + else: + var_src = (X**2).sum() / N # sum of variances across dims + s = (S * d).sum() / var_src + + # Translation + t = Y - s * (R @ X) + + # Homogeneous (D+1)x(D+1) + T = np.zeros((D, D + 1)) + T[:D, :D] = s * R + T[:, -1] = t + + affine = np.round(T, decimals=3) + return affine + + +# TODO bring these fits together a bit more nicely +def translation_fit(fixed_points: np.ndarray, moving_points: np.ndarray): + N, D = fixed_points.shape + + estimated_translation = np.mean(moving_points - fixed_points, axis=0) + + affine = np.zeros((D, D + 1)) + affine[:, :D] = np.eye(D) + affine[:, -1] = estimated_translation + + affine = np.round(affine, decimals=3) + return affine + + +def affine_fit(fixed_points: np.ndarray, moving_points: np.ndarray): + N, D = fixed_points.shape # Target values (B) is a D * N array # Input values (A) is a D * N, (D * (D + 1)) array @@ -62,7 +129,7 @@ def affine_fit(fixed_points: np.ndarray, moving_points: np.ndarray): for j in range(D): start_index = j * D end_index = (j + 1) * D - A[D * i + j, start_index:end_index] = T[i] + A[D * i + j, start_index:end_index] = fixed_points[i] A[D * i + j, D * D + j] = 1 B = moving_points.flatten() @@ -468,7 +535,7 @@ def estimate_affine(self, s: neuroglancer.ViewerState): np.isclose(self.stored_points[1], moving_points) ): return False - self.affine = affine_fit(moving_points, fixed_points) + self.affine = fit_model(moving_points, fixed_points) self.update_registered_layer(s) self._set_status_message( From 4cddc66aabfe46e32b9790e320b7118018042ba0 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Thu, 25 Sep 2025 12:09:38 +0200 Subject: [PATCH 20/30] feat(python): add layout display dims unlinked to state --- python/neuroglancer/__init__.py | 1 + python/neuroglancer/viewer_state.py | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/python/neuroglancer/__init__.py b/python/neuroglancer/__init__.py index 4799b4724..51e179fa6 100644 --- a/python/neuroglancer/__init__.py +++ b/python/neuroglancer/__init__.py @@ -107,6 +107,7 @@ LocalAnnotationLayer, # noqa: F401 ManagedLayer, # noqa: F401 Layers, # noqa: F401 + LinkedDisplayDimensions, # noqa: F401 LinkedPosition, # noqa: F401 LinkedZoomFactor, # noqa: F401 LinkedDepthRange, # noqa: F401 diff --git a/python/neuroglancer/viewer_state.py b/python/neuroglancer/viewer_state.py index 3aff9d396..7f384b011 100644 --- a/python/neuroglancer/viewer_state.py +++ b/python/neuroglancer/viewer_state.py @@ -1482,6 +1482,14 @@ class Linked(LinkedType): return Linked +if typing.TYPE_CHECKING or _BUILDING_DOCS: + _LinkedDisplayDimensionsBase = LinkedType[list[str]] +else: + _LinkedDisplayDimensionsBase = make_linked_navigation_type(typed_list(str), lambda x: x) + +@export +class LinkedDisplayDimensions(_LinkedDisplayDimensionsBase): + __slots__ = () if typing.TYPE_CHECKING or _BUILDING_DOCS: _LinkedPositionBase = LinkedType[np.typing.NDArray[np.float32]] @@ -1710,6 +1718,9 @@ class LayerGroupViewer(JsonObjectWrapper): layers = wrapped_property("layers", typed_list(str)) layout = wrapped_property("layout", data_panel_layout_wrapper("xy")) position = wrapped_property("position", LinkedPosition) + display_dimensions = displayDimensions = wrapped_property( + "displayDimensions", LinkedDisplayDimensions + ) velocity = wrapped_property( "velocity", typed_map(key_type=str, value_type=DimensionPlaybackVelocity) ) From 83a1b308ef944ebad4690c25edfa368546b377cb Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Thu, 25 Sep 2025 12:09:57 +0200 Subject: [PATCH 21/30] refactor: in progress with large lin reg workflow improvement --- .../examples/example_linear_registration.py | 308 +++++++++--------- 1 file changed, 154 insertions(+), 154 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 1aeb460ac..d9755c20b 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -12,6 +12,7 @@ MESSAGE_DURATION = 5 # seconds NUM_DEMO_DIMS = 2 # Currently can be 2D or 3D +# TODO may not be needed, depends on how we handle the two coord spaces MARKERS_SHADER = """ #uicontrol vec3 fixedPointColor color(default="#00FF00") #uicontrol vec3 movingPointColor color(default="#0000FF") @@ -160,7 +161,8 @@ def transform_points(affine: np.ndarray, points: np.ndarray): return transformed -def create_demo_data(size: int | tuple = 60, radius: float = 20): +# Only used if no data provided +def _create_demo_data(size: int | tuple = 60, radius: float = 20): data_size = (size,) * NUM_DEMO_DIMS if isinstance(size, int) else size data = np.zeros(data_size, dtype=np.uint8) if NUM_DEMO_DIMS == 2: @@ -178,6 +180,40 @@ def create_demo_data(size: int | tuple = 60, radius: float = 20): return data +# Only used if no data provided +def _create_demo_fixed_image(): + return neuroglancer.ImageLayer( + source=[ + neuroglancer.LayerDataSource(neuroglancer.LocalVolume(_create_demo_data())) + ] + ) + + +def _create_demo_moving_image(): + if NUM_DEMO_DIMS == 2: + desired_output_matrix_homogenous = [ + [0.8, 0, 0], + [0, 0.2, 0], + [0, 0, 1], + ] + else: + desired_output_matrix_homogenous = [ + [0.8, 0, 0, 0], + [0, 0.2, 0, 0], + [0, 0, 0.9, 0], + [0, 0, 0, 1], + ] + inverse_matrix = np.linalg.inv(desired_output_matrix_homogenous) + transformed = scipy.ndimage.affine_transform( + _create_demo_data(), + matrix=inverse_matrix, + ) + print("target demo affine", inverse_matrix) + return neuroglancer.ImageLayer( + source=[neuroglancer.LayerDataSource(neuroglancer.LocalVolume(transformed))] + ) + + # TODO this should be more intelligent for the copy # in that case it should take the original layers dimensions # and actually map the names @@ -199,7 +235,6 @@ def create_dimensions(viewer_dims: neuroglancer.CoordinateSpace, indices=None): class LinearRegistrationWorkflow: def __init__(self, args): starting_state = args.state - self.moving_name = args.moving_name self.annotations_name = args.annotations_name self.status_timers = {} self.stored_points = [[], []] @@ -214,55 +249,101 @@ def __init__(self, args): ) if starting_state is None: - self.demo_data = create_demo_data() - self.add_fake_data_to_viewer() + self._add_demo_data_to_viewer() else: self.viewer.set_state(starting_state) self._set_status_message( "help", - "Waiting for viewer to initialize with one layer called moving.", + "Place fixed (reference) layers in the left hand panel, and moving layers (to be registered) in the right hand panel. Then press 't' once you have completed this setup.", ) - - def get_state(self): with self.viewer.txn() as s: - return s + self.create_two_panel_layout(s) + self.setup_viewer_actions() - def __str__(self): - return str(self.get_state()) + def update(self): + """Primary update loop, called whenever the viewer state changes.""" + current_time = time() + if current_time - self.last_updated_print > 5: + print(f"Viewer states are successfully syncing at {ctime()}") + self.last_updated_print = current_time + return # for now don't do regular actions + if not self.two_coord_spaces: + self.automatically_group_markers_and_update() + self.update_affine() + self._clear_status_messages() - def _clear_status_messages(self): - to_pop = [] - for k, v in self.status_timers.items(): - if time() - v > MESSAGE_DURATION: - to_pop.append(k) - for k in to_pop: - with self.viewer.config_state.txn() as s: - s.status_messages.pop(k, None) - self.status_timers.pop(k) + def setup_viewer(self): + with self.viewer.txn() as s: + self.init_coord_spaces(s) + self.create_registration_layers(s) + self._set_status_message( + "help", + "Place markers in pairs, starting with the fixed, and then the moving. The registered layer will automatically update as you add markers. Press 't' to toggle visiblity of the registered layer.", + ) + self.ready = True - def _set_status_message(self, key: str, message: str): - with self.viewer.config_state.txn() as s: - s.status_messages[key] = message - self.status_timers[key] = time() + def create_two_panel_layout(self, s: neuroglancer.ViewerState): + all_layer_names = [layer.name for layer in s.layers] + half_point = len(all_layer_names) // 2 + group1_names = all_layer_names[:half_point] + group2_names = all_layer_names[half_point:] + s.layout = neuroglancer.row_layout( + [ + neuroglancer.LayerGroupViewer(layers=group1_names, layout="xy-3d"), + neuroglancer.LayerGroupViewer(layers=group2_names, layout="xy-3d"), + ] + ) + s.layout.children[1].position.link = "unlinked" + s.layout.children[1].position.value = [512, 0, 40] + s.layout.children[1].crossSectionOrientation.link = "unlinked" + s.layout.children[1].crossSectionScale.link = "unlinked" + s.layout.children[1].projectionOrientation.link = "unlinked" + s.layout.children[1].projectionScale.link = "unlinked" + + def create_registration_layers(self, s: neuroglancer.ViewerState): + # TODO consider making a copy of registered layers in left panel + self.init_registered_transform(s) + # Make the annotation layer if needed + # TODO probably don't need the properties, to be confirmed if + # one co-ord space is fine or need two + if s.layers.index(self.annotations_name) == -1: + s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( + dimensions=create_dimensions(s.dimensions), + # annotation_properties=[ + # neuroglancer.AnnotationPropertySpec( + # id="label", + # type="uint32", + # default=0, + # ), + # neuroglancer.AnnotationPropertySpec( + # id="group", + # type="uint8", + # default=0, + # enum_labels=["fixed", "moving"], + # enum_values=[0, 1], + # ), + # ], + # shader=MARKERS_SHADER, + ) + s.layers[self.annotations_name].tool = "annotatePoint" + s.selected_layer.layer = self.annotations_name + s.selected_layer.visible = True + s.layout.children[0].layers.append(self.annotations_name) + s.layout.children[1].layers.append(self.annotations_name) - def transform_points_with_affine(self, points: np.ndarray): - if self.affine is not None: - return transform_points(self.affine, points) + def init_coord_spaces(self, s: neuroglancer.ViewerState): + dimensions = s.dimensions.names + s.layout.children[1].displayDimensions.link = "unlinked" + print(s.layout.children[1].displayDimensions.value) + s.layout.children[1].displayDimensions.value = [d + "2" for d in dimensions] + # Now I think we need some help from Python, which would be to get the + # current transform and replace all the names by the above for the output def toggle_registered_visibility(self, _): - with self.viewer.txn() as s: - is_registered_visible = s.layers["registered"].visible - s.layers["registered"].visible = not is_registered_visible - - def add_fake_data_to_viewer(self): - viewer = self.viewer - fixed_layer = self.create_demo_fixed_image() - moving_layer = self.create_demo_moving_image() - - with viewer.txn() as s: - s.layers["fixed"] = fixed_layer - s.layers[self.moving_name] = moving_layer + self.setup_viewer() + # is_registered_visible = s.layers["registered"].visible + # s.layers["registered"].visible = not is_registered_visible def setup_viewer_actions(self): viewer = self.viewer @@ -277,6 +358,7 @@ def is_fixed_image_space_last(self, dim_names): first_name = dim_names[0] return first_name[-1] in "0123456789" + # TODO this shouldn't be needed, need to check main python side def init_registered_transform(self, s: neuroglancer.ViewerState, force=True): if not force and s.layers["registered"].source[0].transform is not None: return @@ -298,86 +380,9 @@ def init_registered_transform(self, s: neuroglancer.ViewerState, force=True): s.layers["registered"].source[0].transform = existing_transform print(s.layers["registered"].source[0].transform.matrix) - @debounce(0.5) - def check_viewer_ready_and_setup(self): - with self.viewer.txn() as s: - if s.dimensions.names == [] or s.layers.index(self.moving_name) == -1: - return - s.layers["registered"] = self.create_registered_image() - s.layers["registered"].visible = False - self.init_registered_transform(s) - if s.layers.index(self.annotations_name) == -1: - s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( - dimensions=create_dimensions(s.dimensions), - annotation_properties=[ - neuroglancer.AnnotationPropertySpec( - id="label", - type="uint32", - default=0, - ), - neuroglancer.AnnotationPropertySpec( - id="group", - type="uint8", - default=0, - enum_labels=["fixed", "moving"], - enum_values=[0, 1], - ), - ], - shader=MARKERS_SHADER, - ) - s.layers[self.moving_name].visible = True - s.layers[self.annotations_name].tool = "annotatePoint" - s.selected_layer.layer = self.annotations_name - s.selected_layer.visible = True - - all_layer_names = [layer.name for layer in s.layers] - group_1_names = [ - name for name in all_layer_names if name != self.moving_name - ] - group_2_names = [name for name in all_layer_names if name != "registered"] - if isinstance(s.layout, neuroglancer.DataPanelLayout): - s.layout = neuroglancer.row_layout( - [ - neuroglancer.LayerGroupViewer( - layers=group_1_names, layout="xy-3d" - ), - neuroglancer.LayerGroupViewer( - layers=group_2_names, layout="xy-3d" - ), - ] - ) - s.layout.children[1].position.link = "unlinked" - s.layout.children[1].crossSectionOrientation.link = "unlinked" - s.layout.children[1].crossSectionScale.link = "unlinked" - s.layout.children[1].projectionOrientation.link = "unlinked" - s.layout.children[1].projectionScale.link = "unlinked" - # TODO expand to unlink coords and make two coord spaces - else: - s.layout.children[0].layers.append("registered") - - self._set_status_message( - "help", - "Place markers in pairs, starting with the fixed, and then the moving. The registered layer will automatically update as you add markers. Press 't' to toggle visiblity of the registered layer.", - ) - self.ready = True - self.setup_viewer_actions() - def on_state_changed(self): self.viewer.defer_callback(self.update) - def update(self): - current_time = time() - if current_time - self.last_updated_print > 5: - print(f"Viewer states are successfully syncing at {ctime()}") - self.last_updated_print = current_time - if not self.ready: - self.check_viewer_ready_and_setup() - return - if not self.two_coord_spaces: - self.automatically_group_markers_and_update() - self.update_affine() - self._clear_status_messages() - @debounce(0.25) def automatically_group_markers_and_update(self): with self.viewer.txn() as s: @@ -388,37 +393,6 @@ def update_affine(self): with self.viewer.txn() as s: self.estimate_affine(s) - def create_demo_fixed_image(self): - return neuroglancer.ImageLayer( - source=[ - neuroglancer.LayerDataSource(neuroglancer.LocalVolume(self.demo_data)) - ] - ) - - def create_demo_moving_image(self): - if NUM_DEMO_DIMS == 2: - desired_output_matrix_homogenous = [ - [0.8, 0, 0], - [0, 0.2, 0], - [0, 0, 1], - ] - else: - desired_output_matrix_homogenous = [ - [0.8, 0, 0, 0], - [0, 0.2, 0, 0], - [0, 0, 0.9, 0], - [0, 0, 0, 1], - ] - inverse_matrix = np.linalg.inv(desired_output_matrix_homogenous) - transformed = scipy.ndimage.affine_transform( - self.demo_data, - matrix=inverse_matrix, - ) - print("target demo affine", inverse_matrix) - return neuroglancer.ImageLayer( - source=[neuroglancer.LayerDataSource(neuroglancer.LocalVolume(transformed))] - ) - def create_registered_image(self): with self.viewer.txn() as s: layer = deepcopy(s.layers[self.moving_name]) @@ -568,6 +542,40 @@ def dump_info(self, path: str): with open(path, "w") as f: json.dump(info, f, indent=4) + def get_state(self): + with self.viewer.txn() as s: + return s + + def __str__(self): + return str(self.get_state()) + + def _clear_status_messages(self): + to_pop = [] + for k, v in self.status_timers.items(): + if time() - v > MESSAGE_DURATION: + to_pop.append(k) + for k in to_pop: + with self.viewer.config_state.txn() as s: + s.status_messages.pop(k, None) + self.status_timers.pop(k) + + def _set_status_message(self, key: str, message: str): + with self.viewer.config_state.txn() as s: + s.status_messages[key] = message + self.status_timers[key] = time() + + def transform_points_with_affine(self, points: np.ndarray): + if self.affine is not None: + return transform_points(self.affine, points) + + def _add_demo_data_to_viewer(self): + fixed_layer = _create_demo_fixed_image() + moving_layer = _create_demo_moving_image() + + with self.viewer.txn() as s: + s.layers["fixed"] = fixed_layer + s.layers["moving"] = moving_layer + def add_mapping_args(ap: argparse.ArgumentParser): ap.add_argument( @@ -578,14 +586,6 @@ def add_mapping_args(ap: argparse.ArgumentParser): default="annotation", required=False, ) - ap.add_argument( - "--moving-name", - "-m", - type=str, - help="Name of the moving image layer (default is moving)", - default="moving", - required=False, - ) def handle_args(): From bcafa3cf4e4c93286118067706b156361f2b1e2a Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Mon, 29 Sep 2025 12:17:05 +0200 Subject: [PATCH 22/30] feat: filling out full lin reg workflow --- .../examples/example_linear_registration.py | 267 ++++++++++-------- 1 file changed, 151 insertions(+), 116 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index d9755c20b..a5980424c 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -11,6 +11,7 @@ MESSAGE_DURATION = 5 # seconds NUM_DEMO_DIMS = 2 # Currently can be 2D or 3D +AFFINE_NUM_DECIMALS = 4 # TODO may not be needed, depends on how we handle the two coord spaces MARKERS_SHADER = """ @@ -46,10 +47,12 @@ def debounced(*args, **kwargs): return decorator -# Inspired by https://github.com/AllenInstitute/render-python/blob/master/renderapi/transform/leaf/affine_models.py - - def fit_model(fixed_points: np.ndarray, moving_points: np.ndarray): + """ + Choose the appropriate model based on number of points and dimensions. + + Inspired by https://github.com/AllenInstitute/render-python/blob/master/renderapi/transform/leaf/affine_models.py + """ assert fixed_points.shape == moving_points.shape N, D = fixed_points.shape @@ -99,13 +102,12 @@ def rigid_or_similarity_fit( # Homogeneous (D+1)x(D+1) T = np.zeros((D, D + 1)) T[:D, :D] = s * R - T[:, -1] = t + T[:, -1] = np.diagonal(t) - affine = np.round(T, decimals=3) + affine = np.round(T, decimals=AFFINE_NUM_DECIMALS) return affine -# TODO bring these fits together a bit more nicely def translation_fit(fixed_points: np.ndarray, moving_points: np.ndarray): N, D = fixed_points.shape @@ -115,7 +117,7 @@ def translation_fit(fixed_points: np.ndarray, moving_points: np.ndarray): affine[:, :D] = np.eye(D) affine[:, -1] = estimated_translation - affine = np.round(affine, decimals=3) + affine = np.round(affine, decimals=AFFINE_NUM_DECIMALS) return affine @@ -130,9 +132,9 @@ def affine_fit(fixed_points: np.ndarray, moving_points: np.ndarray): for j in range(D): start_index = j * D end_index = (j + 1) * D - A[D * i + j, start_index:end_index] = fixed_points[i] + A[D * i + j, start_index:end_index] = moving_points[i] A[D * i + j, D * D + j] = 1 - B = moving_points.flatten() + B = fixed_points.flatten() # The estimated affine transform params will be flattened # and there will be D * (D + 1) of them @@ -148,12 +150,12 @@ def affine_fit(fixed_points: np.ndarray, moving_points: np.ndarray): affine[i, -1] = tvec[D * D + i] # Round to close decimals - affine = np.round(affine, decimals=3) + affine = np.round(affine, decimals=AFFINE_NUM_DECIMALS) return affine def transform_points(affine: np.ndarray, points: np.ndarray): - # Apply the current affine transform to the points + # Apply the affine transform to the points transformed = np.zeros_like(points) padded = np.pad(points, ((0, 0), (0, 1)), constant_values=1) for i in range(len(points)): @@ -189,6 +191,7 @@ def _create_demo_fixed_image(): ) +# Only used if no data provided def _create_demo_moving_image(): if NUM_DEMO_DIMS == 2: desired_output_matrix_homogenous = [ @@ -214,20 +217,22 @@ def _create_demo_moving_image(): ) -# TODO this should be more intelligent for the copy -# in that case it should take the original layers dimensions -# and actually map the names -# the problem is that right now we can't query those names from python -# since it wraps the state and that info isn't in the state if it is still the -# default information +def change_coord_names(dims: neuroglancer.CoordinateSpace, name_mod): + return neuroglancer.CoordinateSpace( + names=[n + name_mod for n in dims.names], + units=dims.units, + scales=dims.scales, + ) + + def create_dimensions(viewer_dims: neuroglancer.CoordinateSpace, indices=None): names = viewer_dims.names units = viewer_dims.units scales = viewer_dims.scales if indices is not None: - names = [viewer_dims.names[i] for i in indices] - units = [viewer_dims.units[i] for i in indices] - scales = [viewer_dims.scales[i] for i in indices] + names = [names[i] for i in indices] + units = [units[i] for i in indices] + scales = [scales[i] for i in indices] return neuroglancer.CoordinateSpace(names=names, units=units, scales=scales) @@ -235,14 +240,17 @@ def create_dimensions(viewer_dims: neuroglancer.CoordinateSpace, indices=None): class LinearRegistrationWorkflow: def __init__(self, args): starting_state = args.state + self.two_coord_spaces = not args.single_coord_space self.annotations_name = args.annotations_name self.status_timers = {} self.stored_points = [[], []] + self.stored_moving_dims = {} + self.moving_layer_names = [] self.stored_group_number = -1 self.affine = None + self.co_ords_ready = False self.ready = False self.last_updated_print = -1 - self.two_coord_spaces = False self.viewer = neuroglancer.Viewer() self.viewer.shared_state.add_changed_callback( lambda: self.viewer.defer_callback(self.on_state_changed) @@ -258,7 +266,7 @@ def __init__(self, args): "Place fixed (reference) layers in the left hand panel, and moving layers (to be registered) in the right hand panel. Then press 't' once you have completed this setup.", ) with self.viewer.txn() as s: - self.create_two_panel_layout(s) + self.setup_two_panel_layout(s) self.setup_viewer_actions() def update(self): @@ -267,43 +275,63 @@ def update(self): if current_time - self.last_updated_print > 5: print(f"Viewer states are successfully syncing at {ctime()}") self.last_updated_print = current_time - return # for now don't do regular actions - if not self.two_coord_spaces: - self.automatically_group_markers_and_update() - self.update_affine() - self._clear_status_messages() + # TODO make ready a status instead of two vars + # TODO overall update the class attributes at the end to cleaner + if self.co_ords_ready and not self.ready: + with self.viewer.txn() as s: + self.setup_registration_layers(s) + if self.ready: + if not self.two_coord_spaces: + self.automatically_group_markers_and_update() + self.update_affine() + self._clear_status_messages() def setup_viewer(self): - with self.viewer.txn() as s: - self.init_coord_spaces(s) - self.create_registration_layers(s) + self.setup_second_coord_space() self._set_status_message( "help", "Place markers in pairs, starting with the fixed, and then the moving. The registered layer will automatically update as you add markers. Press 't' to toggle visiblity of the registered layer.", ) - self.ready = True + self.co_ords_ready = True - def create_two_panel_layout(self, s: neuroglancer.ViewerState): + def setup_two_panel_layout(self, s: neuroglancer.ViewerState): all_layer_names = [layer.name for layer in s.layers] - half_point = len(all_layer_names) // 2 - group1_names = all_layer_names[:half_point] - group2_names = all_layer_names[half_point:] + if len(all_layer_names) >= 2: + half_point = len(all_layer_names) // 2 + group1_names = all_layer_names[:half_point] + group2_names = all_layer_names[half_point:] + else: + group1_names = all_layer_names + group2_names = all_layer_names s.layout = neuroglancer.row_layout( [ neuroglancer.LayerGroupViewer(layers=group1_names, layout="xy-3d"), neuroglancer.LayerGroupViewer(layers=group2_names, layout="xy-3d"), ] ) - s.layout.children[1].position.link = "unlinked" - s.layout.children[1].position.value = [512, 0, 40] + # Unliked position solves rendering problem but makes navigation awkward + # s.layout.children[1].position.link = "unlinked" + # In theory we could make keep unlinked and then on state change check + # but that could be not worth compared to trying to improve rendering s.layout.children[1].crossSectionOrientation.link = "unlinked" s.layout.children[1].crossSectionScale.link = "unlinked" s.layout.children[1].projectionOrientation.link = "unlinked" s.layout.children[1].projectionScale.link = "unlinked" - def create_registration_layers(self, s: neuroglancer.ViewerState): - # TODO consider making a copy of registered layers in left panel - self.init_registered_transform(s) + def setup_second_coord_space(self): + if not self.moving_layer_names: + moving_layers = self.get_state().layout.children[1].layers + self.moving_layer_names = moving_layers + self._moving_idx = 0 + layer_name = self.moving_layer_names[self._moving_idx] + info_future = self.viewer.volume_info(layer_name) + info_future.add_done_callback(lambda f: self.save_coord_space_info(f)) + + def setup_registration_layers(self, s: neuroglancer.ViewerState): + dimensions = s.dimensions + # It is possible that the dimensions are not ready yet, return if so + if len(dimensions.names) != self.num_dims: + return # Make the annotation layer if needed # TODO probably don't need the properties, to be confirmed if # one co-ord space is fine or need two @@ -331,14 +359,39 @@ def create_registration_layers(self, s: neuroglancer.ViewerState): s.selected_layer.visible = True s.layout.children[0].layers.append(self.annotations_name) s.layout.children[1].layers.append(self.annotations_name) + self.setup_panel_coordinates(s) + self.ready = True - def init_coord_spaces(self, s: neuroglancer.ViewerState): + def setup_panel_coordinates(self, s: neuroglancer.ViewerState): dimensions = s.dimensions.names s.layout.children[1].displayDimensions.link = "unlinked" - print(s.layout.children[1].displayDimensions.value) - s.layout.children[1].displayDimensions.value = [d + "2" for d in dimensions] - # Now I think we need some help from Python, which would be to get the - # current transform and replace all the names by the above for the output + s.layout.children[1].displayDimensions.value = self.output_dim_names[:3] + s.layout.children[0].displayDimensions.link = "unlinked" + s.layout.children[0].displayDimensions.value = self.input_dim_names[:3] + + def save_coord_space_info(self, info_future): + result = info_future.result() + self.moving_name = self.moving_layer_names[self._moving_idx] + self.stored_moving_dims[self.moving_name] = result.dimensions + done = len(self.stored_moving_dims) == len(self.moving_layer_names) + if not done: + self._moving_idx += 1 + self.setup_second_coord_space() + return + # If we get here we have all the coord spaces ready and can update viewer + with self.viewer.txn() as s: + for layer_name in self.moving_layer_names: + input_dims = self.stored_moving_dims[layer_name] + output_dims = change_coord_names(input_dims, "2") + self.input_dim_names = input_dims.names + self.output_dim_names = output_dims.names + self.num_dims = len(input_dims.names) * 2 + new_coord_space = neuroglancer.CoordinateSpaceTransform( + input_dimensions=input_dims, + output_dimensions=output_dims, + ) + for source in s.layers[layer_name].source: + source.transform = new_coord_space def toggle_registered_visibility(self, _): self.setup_viewer() @@ -348,37 +401,16 @@ def toggle_registered_visibility(self, _): def setup_viewer_actions(self): viewer = self.viewer viewer.actions.add( - "toggle-registered-visibility", self.toggle_registered_visibility + "toggleRegisteredVisibility", self.toggle_registered_visibility ) with viewer.config_state.txn() as s: - s.input_event_bindings.viewer["keyt"] = "toggle-registered-visibility" + s.input_event_bindings.viewer["keyt"] = "toggleRegisteredVisibility" + s.input_event_bindings.viewer["keyp"] = "screenshotStatistics" def is_fixed_image_space_last(self, dim_names): first_name = dim_names[0] - return first_name[-1] in "0123456789" - - # TODO this shouldn't be needed, need to check main python side - def init_registered_transform(self, s: neuroglancer.ViewerState, force=True): - if not force and s.layers["registered"].source[0].transform is not None: - return - # TODO this can fail to solve the no matrix issue if ends up as the identity - # think I need to change something in neuroglancer python for this instead - indices = None - if self.check_for_two_coord_spaces(s.dimensions.names): - self.coord_spaces = True - num_dims = len(s.dimensions.names) // 2 - if self.is_fixed_image_space_last(s.dimensions.names): - indices = list( - range(len(s.dimensions.names) - num_dims, len(s.dimensions.names)) - ) - else: - indices = list(range(num_dims)) - existing_transform = neuroglancer.CoordinateSpaceTransform( - output_dimensions=create_dimensions(s.dimensions, indices) - ) - s.layers["registered"].source[0].transform = existing_transform - print(s.layers["registered"].source[0].transform.matrix) + return first_name not in self.input_dim_names def on_state_changed(self): self.viewer.defer_callback(self.update) @@ -399,22 +431,10 @@ def create_registered_image(self): layer.name = "registered" return layer - def check_for_two_coord_spaces(self, dim_names): - # Dims should be exactly double the number of unique names - if len(dim_names) == 0 or len(dim_names) % 2 != 0: - return False - set_of_names = set() - for name in dim_names: - # rstrip any number off the end - stripped_name = name.rstrip("0123456789") - set_of_names.add(stripped_name) - return len(set_of_names) * 2 == len(dim_names) - def split_points_into_pairs(self, annotations, dim_names): if len(annotations) == 0: return np.zeros((0, 0)), np.zeros((0, 0)) - two_coord_spaces = self.check_for_two_coord_spaces(dim_names) - if two_coord_spaces: + if self.two_coord_spaces: real_dims_last = self.is_fixed_image_space_last(dim_names) num_points = len(annotations) num_dims = len(annotations[0].point) // 2 @@ -444,7 +464,7 @@ def split_points_into_pairs(self, annotations, dim_names): def automatically_group_markers(self, s: neuroglancer.ViewerState): dimensions = s.dimensions.names - if self.check_for_two_coord_spaces(dimensions): + if self.two_coord_spaces: return False annotations = s.layers[self.annotations_name].annotations if len(annotations) == self.stored_group_number: @@ -456,46 +476,53 @@ def automatically_group_markers(self, s: neuroglancer.ViewerState): a.props = [i // 2, i % 2] return True - def update_registered_layer(self, s: neuroglancer.ViewerState): - self.init_registered_transform(s, force=False) + def update_registered_layers(self, s: neuroglancer.ViewerState): if self.affine is not None: - print(s.layers["registered"].source[0].transform.matrix) transform = self.affine.tolist() + # TODO handle layer being renamed + for k, v in self.stored_moving_dims.items(): + # TODO not sure if need to handle local channels here + # keeping code below just in case + for source in s.layers[k].source: + source.transform = neuroglancer.CoordinateSpaceTransform( + input_dimensions=v, + output_dimensions=change_coord_names(v, "2"), + matrix=transform, + ) + + # print(s.layers["registered"].source[0].transform.matrix) # TODO this is where that mapping needs to happen of affine dims # overall this is a bit awkward right now, we need a lot of # mapping info which we just don't have # right now you can't input it from the command line - if s.layers["registered"].source[0].transform is not None: - final_transform = [] - layer_transform = s.layers["registered"].source[0].transform - local_channel_indices = [ - i - for i, name in enumerate(layer_transform.outputDimensions.names) - if name.endswith(("'", "^", "#")) - ] - num_local_count = 0 - for i, name in enumerate(layer_transform.outputDimensions.names): - is_local = i in local_channel_indices - if is_local: - final_transform.append(layer_transform.matrix[i].tolist()) - num_local_count += 1 - else: - row = transform[i - num_local_count] - # At the indices corresponding to local channels, insert 0s - for j in local_channel_indices: - row.insert(j, 0) - final_transform.append(row) - else: - final_transform = transform - print("Updated affine transform:", final_transform) - print(s.layers["registered"].source[0].transform) - print(final_transform) - s.layers["registered"].source[0].transform.matrix = final_transform + # if s.layers["registered"].source[0].transform is not None: + # final_transform = [] + # layer_transform = s.layers["registered"].source[0].transform + # local_channel_indices = [ + # i + # for i, name in enumerate(layer_transform.outputDimensions.names) + # if name.endswith(("'", "^", "#")) + # ] + # num_local_count = 0 + # for i, name in enumerate(layer_transform.outputDimensions.names): + # is_local = i in local_channel_indices + # if is_local: + # final_transform.append(layer_transform.matrix[i].tolist()) + # num_local_count += 1 + # else: + # row = transform[i - num_local_count] + # # At the indices corresponding to local channels, insert 0s + # for j in local_channel_indices: + # row.insert(j, 0) + # final_transform.append(row) + # else: + # final_transform = transform + print("Updated affine transform:", transform) print(s.layers["registered"].source[0].transform) def estimate_affine(self, s: neuroglancer.ViewerState): annotations = s.layers[self.annotations_name].annotations - if len(annotations) < 1: + if len(annotations) == 0: return False dim_names = s.dimensions.names @@ -509,8 +536,8 @@ def estimate_affine(self, s: neuroglancer.ViewerState): np.isclose(self.stored_points[1], moving_points) ): return False - self.affine = fit_model(moving_points, fixed_points) - self.update_registered_layer(s) + self.affine = fit_model(fixed_points, moving_points) + self.update_registered_layers(s) self._set_status_message( "info", @@ -586,6 +613,14 @@ def add_mapping_args(ap: argparse.ArgumentParser): default="annotation", required=False, ) + ap.add_argument( + "--single-coord-space", + "-s", + action="store_true", + help="Use a single coordinate space for both fixed and moving layers (default is two coord spaces)", + default=False, + required=False, + ) def handle_args(): From db7f2347f8eb62f8de8e5040b7c1cba2dfb8d3b9 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Mon, 29 Sep 2025 12:53:55 +0200 Subject: [PATCH 23/30] feat: apply affine to annotations --- .../examples/example_linear_registration.py | 110 +++++++++++++----- 1 file changed, 83 insertions(+), 27 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index a5980424c..0406dc6f1 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -13,7 +13,6 @@ NUM_DEMO_DIMS = 2 # Currently can be 2D or 3D AFFINE_NUM_DECIMALS = 4 -# TODO may not be needed, depends on how we handle the two coord spaces MARKERS_SHADER = """ #uicontrol vec3 fixedPointColor color(default="#00FF00") #uicontrol vec3 movingPointColor color(default="#0000FF") @@ -102,7 +101,7 @@ def rigid_or_similarity_fit( # Homogeneous (D+1)x(D+1) T = np.zeros((D, D + 1)) T[:D, :D] = s * R - T[:, -1] = np.diagonal(t) + T[:, -1] = -1 * np.diagonal(t) affine = np.round(T, decimals=AFFINE_NUM_DECIMALS) return affine @@ -246,6 +245,8 @@ def __init__(self, args): self.stored_points = [[], []] self.stored_moving_dims = {} self.moving_layer_names = [] + self.input_dim_names = [] + self.output_dim_names = [] self.stored_group_number = -1 self.affine = None self.co_ords_ready = False @@ -314,9 +315,9 @@ def setup_two_panel_layout(self, s: neuroglancer.ViewerState): # In theory we could make keep unlinked and then on state change check # but that could be not worth compared to trying to improve rendering s.layout.children[1].crossSectionOrientation.link = "unlinked" - s.layout.children[1].crossSectionScale.link = "unlinked" + # s.layout.children[1].crossSectionScale.link = "unlinked" s.layout.children[1].projectionOrientation.link = "unlinked" - s.layout.children[1].projectionScale.link = "unlinked" + # s.layout.children[1].projectionScale.link = "unlinked" def setup_second_coord_space(self): if not self.moving_layer_names: @@ -327,33 +328,69 @@ def setup_second_coord_space(self): info_future = self.viewer.volume_info(layer_name) info_future.add_done_callback(lambda f: self.save_coord_space_info(f)) + def combine_affine_across_dims(self, s: neuroglancer.ViewerState, affine): + all_dims = s.dimensions.names + moving_dims = self.output_dim_names + # The affine matrix only applies to the moving dims + # so we need to create a larger matrix that applies to all dims + # by adding identity transforms for the real dims + full_matrix = np.zeros((len(all_dims), len(all_dims) + 1)) + for i, dim in enumerate(all_dims): + for j, dim2 in enumerate(all_dims): + if dim in moving_dims and dim2 in moving_dims: + moving_i = moving_dims.index(dim) + moving_j = moving_dims.index(dim2) + full_matrix[i, j] = affine[moving_i, moving_j] + elif dim == dim2: + full_matrix[i, j] = 1 + if dim in moving_dims: + moving_i = moving_dims.index(dim) + full_matrix[i, -1] = affine[moving_i, -1] + return full_matrix + def setup_registration_layers(self, s: neuroglancer.ViewerState): dimensions = s.dimensions # It is possible that the dimensions are not ready yet, return if so if len(dimensions.names) != self.num_dims: return + # Make the annotation layer if needed - # TODO probably don't need the properties, to be confirmed if - # one co-ord space is fine or need two if s.layers.index(self.annotations_name) == -1: - s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( - dimensions=create_dimensions(s.dimensions), - # annotation_properties=[ - # neuroglancer.AnnotationPropertySpec( - # id="label", - # type="uint32", - # default=0, - # ), - # neuroglancer.AnnotationPropertySpec( - # id="group", - # type="uint8", - # default=0, - # enum_labels=["fixed", "moving"], - # enum_values=[0, 1], - # ), - # ], - # shader=MARKERS_SHADER, - ) + if self.two_coord_spaces: + s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( + dimensions=create_dimensions(s.dimensions) + ) + else: + s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( + dimensions=create_dimensions(s.dimensions), + annotation_properties=[ + neuroglancer.AnnotationPropertySpec( + id="label", + type="uint32", + default=0, + ), + neuroglancer.AnnotationPropertySpec( + id="group", + type="uint8", + default=0, + enum_labels=["fixed", "moving"], + enum_values=[0, 1], + ), + ], + shader=MARKERS_SHADER, + ) + + # Make a copy of all the moving layers but in original coord space + # and as part of the left hand panel + for layer_name in self.moving_layer_names: + copy = deepcopy(s.layers[layer_name]) + copy.name = layer_name + "_registered" + copy.visible = False + for source in copy.source: + # TODO might need mapping + source.transform = None + s.layers[copy.name] = copy + s.layout.children[0].layers.append(copy.name) s.layers[self.annotations_name].tool = "annotatePoint" s.selected_layer.layer = self.annotations_name s.selected_layer.visible = True @@ -394,9 +431,14 @@ def save_coord_space_info(self, info_future): source.transform = new_coord_space def toggle_registered_visibility(self, _): - self.setup_viewer() - # is_registered_visible = s.layers["registered"].visible - # s.layers["registered"].visible = not is_registered_visible + if not self.ready: + self.setup_viewer() + return + with self.viewer.txn() as s: + for layer_name in self.moving_layer_names: + registered_name = layer_name + "_registered" + is_registered_visible = s.layers[registered_name].visible + s.layers[registered_name].visible = not is_registered_visible def setup_viewer_actions(self): viewer = self.viewer @@ -489,6 +531,20 @@ def update_registered_layers(self, s: neuroglancer.ViewerState): output_dimensions=change_coord_names(v, "2"), matrix=transform, ) + for source in s.layers[k + "_registered"].source: + source.transform = neuroglancer.CoordinateSpaceTransform( + input_dimensions=v, + output_dimensions=v, + matrix=transform, + ) + print(self.combine_affine_across_dims(s, self.affine).tolist()) + s.layers[self.annotations_name].source[ + 0 + ].transform = neuroglancer.CoordinateSpaceTransform( + input_dimensions=create_dimensions(s.dimensions), + output_dimensions=create_dimensions(s.dimensions), + matrix=self.combine_affine_across_dims(s, self.affine).tolist(), + ) # print(s.layers["registered"].source[0].transform.matrix) # TODO this is where that mapping needs to happen of affine dims From d9855e64bf4f29d1d085011e1148f3338ebb4858 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Fri, 3 Oct 2025 14:05:32 +0200 Subject: [PATCH 24/30] feat: small updates --- .../examples/example_linear_registration.py | 35 +++++++++---------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 0406dc6f1..278856454 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -276,8 +276,6 @@ def update(self): if current_time - self.last_updated_print > 5: print(f"Viewer states are successfully syncing at {ctime()}") self.last_updated_print = current_time - # TODO make ready a status instead of two vars - # TODO overall update the class attributes at the end to cleaner if self.co_ords_ready and not self.ready: with self.viewer.txn() as s: self.setup_registration_layers(s) @@ -310,13 +308,14 @@ def setup_two_panel_layout(self, s: neuroglancer.ViewerState): neuroglancer.LayerGroupViewer(layers=group2_names, layout="xy-3d"), ] ) - # Unliked position solves rendering problem but makes navigation awkward - # s.layout.children[1].position.link = "unlinked" - # In theory we could make keep unlinked and then on state change check - # but that could be not worth compared to trying to improve rendering + # Unlinked position solves rendering problem but makes navigation awkward + if not self.two_coord_spaces + s.layout.children[1].position.link = "unlinked" s.layout.children[1].crossSectionOrientation.link = "unlinked" - # s.layout.children[1].crossSectionScale.link = "unlinked" s.layout.children[1].projectionOrientation.link = "unlinked" + + # Can also unlink scales if desired + # s.layout.children[1].crossSectionScale.link = "unlinked" # s.layout.children[1].projectionScale.link = "unlinked" def setup_second_coord_space(self): @@ -329,12 +328,15 @@ def setup_second_coord_space(self): info_future.add_done_callback(lambda f: self.save_coord_space_info(f)) def combine_affine_across_dims(self, s: neuroglancer.ViewerState, affine): + """ + The affine matrix only applies to the moving dims + but the annotation layer in the two coord space case + applies to all dims so we need to create a larger matrix + """ all_dims = s.dimensions.names moving_dims = self.output_dim_names - # The affine matrix only applies to the moving dims - # so we need to create a larger matrix that applies to all dims - # by adding identity transforms for the real dims full_matrix = np.zeros((len(all_dims), len(all_dims) + 1)) + for i, dim in enumerate(all_dims): for j, dim2 in enumerate(all_dims): if dim in moving_dims and dim2 in moving_dims: @@ -350,8 +352,7 @@ def combine_affine_across_dims(self, s: neuroglancer.ViewerState, affine): def setup_registration_layers(self, s: neuroglancer.ViewerState): dimensions = s.dimensions - # It is possible that the dimensions are not ready yet, return if so - if len(dimensions.names) != self.num_dims: + if len(dimensions.names) != self.num_dims # loading: return # Make the annotation layer if needed @@ -448,11 +449,6 @@ def setup_viewer_actions(self): with viewer.config_state.txn() as s: s.input_event_bindings.viewer["keyt"] = "toggleRegisteredVisibility" - s.input_event_bindings.viewer["keyp"] = "screenshotStatistics" - - def is_fixed_image_space_last(self, dim_names): - first_name = dim_names[0] - return first_name not in self.input_dim_names def on_state_changed(self): self.viewer.defer_callback(self.update) @@ -477,7 +473,8 @@ def split_points_into_pairs(self, annotations, dim_names): if len(annotations) == 0: return np.zeros((0, 0)), np.zeros((0, 0)) if self.two_coord_spaces: - real_dims_last = self.is_fixed_image_space_last(dim_names) + first_name = dim_names[0] + real_dims_last = first_name not in self.input_dim_names num_points = len(annotations) num_dims = len(annotations[0].point) // 2 fixed_points = np.zeros((num_points, num_dims)) @@ -647,7 +644,7 @@ def _set_status_message(self, key: str, message: str): s.status_messages[key] = message self.status_timers[key] = time() - def transform_points_with_affine(self, points: np.ndarray): + def _transform_points_with_affine(self, points: np.ndarray): if self.affine is not None: return transform_points(self.affine, points) From d6ef631991425af06c0745af84c9c5e91f4ad63e Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Mon, 6 Oct 2025 13:08:29 +0200 Subject: [PATCH 25/30] refactor: clarify function names and class --- .../examples/example_linear_registration.py | 230 ++++++++---------- 1 file changed, 104 insertions(+), 126 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 278856454..b03a57c28 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -3,6 +3,7 @@ import webbrowser from copy import deepcopy from time import ctime, time +from enum import Enum import neuroglancer import neuroglancer.cli @@ -216,15 +217,15 @@ def _create_demo_moving_image(): ) -def change_coord_names(dims: neuroglancer.CoordinateSpace, name_mod): +def new_coord_space_names(dims: neuroglancer.CoordinateSpace, name_suffix): return neuroglancer.CoordinateSpace( - names=[n + name_mod for n in dims.names], + names=[n + name_suffix for n in dims.names], units=dims.units, scales=dims.scales, ) -def create_dimensions(viewer_dims: neuroglancer.CoordinateSpace, indices=None): +def create_coord_space_matching_global_dims(viewer_dims: neuroglancer.CoordinateSpace, indices=None): names = viewer_dims.names units = viewer_dims.units scales = viewer_dims.scales @@ -235,6 +236,10 @@ def create_dimensions(viewer_dims: neuroglancer.CoordinateSpace, indices=None): return neuroglancer.CoordinateSpace(names=names, units=units, scales=scales) +class ReadyState(Enum): + NOT_READY = 0 + COORDS_READY = 1 + READY = 2 class LinearRegistrationWorkflow: def __init__(self, args): @@ -243,15 +248,14 @@ def __init__(self, args): self.annotations_name = args.annotations_name self.status_timers = {} self.stored_points = [[], []] - self.stored_moving_dims = {} + self.stored_map_moving_name_to_coords = {} self.moving_layer_names = [] self.input_dim_names = [] self.output_dim_names = [] - self.stored_group_number = -1 self.affine = None self.co_ords_ready = False - self.ready = False - self.last_updated_print = -1 + self.ready_state = ReadyState.NOT_READY + self.last_updated_print_time = -1 self.viewer = neuroglancer.Viewer() self.viewer.shared_state.add_changed_callback( lambda: self.viewer.defer_callback(self.on_state_changed) @@ -266,57 +270,53 @@ def __init__(self, args): "help", "Place fixed (reference) layers in the left hand panel, and moving layers (to be registered) in the right hand panel. Then press 't' once you have completed this setup.", ) - with self.viewer.txn() as s: - self.setup_two_panel_layout(s) + self.setup_initial_two_panel_layout() self.setup_viewer_actions() def update(self): """Primary update loop, called whenever the viewer state changes.""" current_time = time() - if current_time - self.last_updated_print > 5: + if current_time - self.last_updated_print_time > 5: print(f"Viewer states are successfully syncing at {ctime()}") - self.last_updated_print = current_time - if self.co_ords_ready and not self.ready: - with self.viewer.txn() as s: - self.setup_registration_layers(s) - if self.ready: - if not self.two_coord_spaces: - self.automatically_group_markers_and_update() + self.last_updated_print_time = current_time + if self.ready_state == ReadyState.COORDS_READY: + self.setup_registration_layers() + elif self.ready_state == ReadyState.READY: self.update_affine() self._clear_status_messages() - def setup_viewer(self): + def setup_viewer_after_user_ready(self): self.setup_second_coord_space() self._set_status_message( "help", "Place markers in pairs, starting with the fixed, and then the moving. The registered layer will automatically update as you add markers. Press 't' to toggle visiblity of the registered layer.", ) - self.co_ords_ready = True - - def setup_two_panel_layout(self, s: neuroglancer.ViewerState): - all_layer_names = [layer.name for layer in s.layers] - if len(all_layer_names) >= 2: - half_point = len(all_layer_names) // 2 - group1_names = all_layer_names[:half_point] - group2_names = all_layer_names[half_point:] - else: - group1_names = all_layer_names - group2_names = all_layer_names - s.layout = neuroglancer.row_layout( - [ - neuroglancer.LayerGroupViewer(layers=group1_names, layout="xy-3d"), - neuroglancer.LayerGroupViewer(layers=group2_names, layout="xy-3d"), - ] - ) - # Unlinked position solves rendering problem but makes navigation awkward - if not self.two_coord_spaces - s.layout.children[1].position.link = "unlinked" - s.layout.children[1].crossSectionOrientation.link = "unlinked" - s.layout.children[1].projectionOrientation.link = "unlinked" + self.ready_state = ReadyState.COORDS_READY + + def setup_initial_two_panel_layout(self): + with self.viewer.txn() as s: + all_layer_names = [layer.name for layer in s.layers] + if len(all_layer_names) >= 2: + half_point = len(all_layer_names) // 2 + group1_names = all_layer_names[:half_point] + group2_names = all_layer_names[half_point:] + else: + group1_names = all_layer_names + group2_names = all_layer_names + s.layout = neuroglancer.row_layout( + [ + neuroglancer.LayerGroupViewer(layers=group1_names, layout="xy-3d"), + neuroglancer.LayerGroupViewer(layers=group2_names, layout="xy-3d"), + ] + ) + if not self.two_coord_spaces + s.layout.children[1].position.link = "unlinked" + s.layout.children[1].crossSectionOrientation.link = "unlinked" + s.layout.children[1].projectionOrientation.link = "unlinked" - # Can also unlink scales if desired - # s.layout.children[1].crossSectionScale.link = "unlinked" - # s.layout.children[1].projectionScale.link = "unlinked" + # Can also unlink scales if desired + # s.layout.children[1].crossSectionScale.link = "unlinked" + # s.layout.children[1].projectionScale.link = "unlinked" def setup_second_coord_space(self): if not self.moving_layer_names: @@ -350,55 +350,56 @@ def combine_affine_across_dims(self, s: neuroglancer.ViewerState, affine): full_matrix[i, -1] = affine[moving_i, -1] return full_matrix - def setup_registration_layers(self, s: neuroglancer.ViewerState): - dimensions = s.dimensions - if len(dimensions.names) != self.num_dims # loading: - return - - # Make the annotation layer if needed - if s.layers.index(self.annotations_name) == -1: - if self.two_coord_spaces: - s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( - dimensions=create_dimensions(s.dimensions) - ) - else: - s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( - dimensions=create_dimensions(s.dimensions), - annotation_properties=[ - neuroglancer.AnnotationPropertySpec( - id="label", - type="uint32", - default=0, - ), - neuroglancer.AnnotationPropertySpec( - id="group", - type="uint8", - default=0, - enum_labels=["fixed", "moving"], - enum_values=[0, 1], - ), - ], - shader=MARKERS_SHADER, - ) + def setup_registration_layers(self): + with self.viewer.txn() as s: + dimensions = s.dimensions + if len(dimensions.names) != self.num_dims # loading: + return + + # Make the annotation layer if needed + if s.layers.index(self.annotations_name) == -1: + if self.two_coord_spaces: + s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( + dimensions=create_coord_space_matching_global_dims(s.dimensions) + ) + else: + s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( + dimensions=create_coord_space_matching_global_dims(s.dimensions), + annotation_properties=[ + neuroglancer.AnnotationPropertySpec( + id="label", + type="uint32", + default=0, + ), + neuroglancer.AnnotationPropertySpec( + id="group", + type="uint8", + default=0, + enum_labels=["fixed", "moving"], + enum_values=[0, 1], + ), + ], + shader=MARKERS_SHADER, + ) - # Make a copy of all the moving layers but in original coord space - # and as part of the left hand panel - for layer_name in self.moving_layer_names: - copy = deepcopy(s.layers[layer_name]) - copy.name = layer_name + "_registered" - copy.visible = False - for source in copy.source: - # TODO might need mapping - source.transform = None - s.layers[copy.name] = copy - s.layout.children[0].layers.append(copy.name) - s.layers[self.annotations_name].tool = "annotatePoint" - s.selected_layer.layer = self.annotations_name - s.selected_layer.visible = True - s.layout.children[0].layers.append(self.annotations_name) - s.layout.children[1].layers.append(self.annotations_name) - self.setup_panel_coordinates(s) - self.ready = True + # Make a copy of all the moving layers but in original coord space + # and as part of the left hand panel + for layer_name in self.moving_layer_names: + copy = deepcopy(s.layers[layer_name]) + copy.name = layer_name + "_registered" + copy.visible = False + for source in copy.source: + # TODO might need mapping + source.transform = None + s.layers[copy.name] = copy + s.layout.children[0].layers.append(copy.name) + s.layers[self.annotations_name].tool = "annotatePoint" + s.selected_layer.layer = self.annotations_name + s.selected_layer.visible = True + s.layout.children[0].layers.append(self.annotations_name) + s.layout.children[1].layers.append(self.annotations_name) + self.setup_panel_coordinates(s) + self.ready_state = ReadyState.READY def setup_panel_coordinates(self, s: neuroglancer.ViewerState): dimensions = s.dimensions.names @@ -410,8 +411,8 @@ def setup_panel_coordinates(self, s: neuroglancer.ViewerState): def save_coord_space_info(self, info_future): result = info_future.result() self.moving_name = self.moving_layer_names[self._moving_idx] - self.stored_moving_dims[self.moving_name] = result.dimensions - done = len(self.stored_moving_dims) == len(self.moving_layer_names) + self.stored_map_moving_name_to_coords[self.moving_name] = result.dimensions + done = len(self.stored_map_moving_name_to_coords) == len(self.moving_layer_names) if not done: self._moving_idx += 1 self.setup_second_coord_space() @@ -419,8 +420,8 @@ def save_coord_space_info(self, info_future): # If we get here we have all the coord spaces ready and can update viewer with self.viewer.txn() as s: for layer_name in self.moving_layer_names: - input_dims = self.stored_moving_dims[layer_name] - output_dims = change_coord_names(input_dims, "2") + input_dims = self.stored_map_moving_name_to_coords[layer_name] + output_dims = new_coord_space_names(input_dims, "2") self.input_dim_names = input_dims.names self.output_dim_names = output_dims.names self.num_dims = len(input_dims.names) * 2 @@ -432,8 +433,10 @@ def save_coord_space_info(self, info_future): source.transform = new_coord_space def toggle_registered_visibility(self, _): - if not self.ready: - self.setup_viewer() + if self.ready_state == ReadyState.NOT_READY: + self.setup_viewer_after_user_ready() + return + elif self.ready_state == ReadyState.COORDS_READY: return with self.viewer.txn() as s: for layer_name in self.moving_layer_names: @@ -453,22 +456,11 @@ def setup_viewer_actions(self): def on_state_changed(self): self.viewer.defer_callback(self.update) - @debounce(0.25) - def automatically_group_markers_and_update(self): - with self.viewer.txn() as s: - self.automatically_group_markers(s) - @debounce(1.5) def update_affine(self): with self.viewer.txn() as s: self.estimate_affine(s) - def create_registered_image(self): - with self.viewer.txn() as s: - layer = deepcopy(s.layers[self.moving_name]) - layer.name = "registered" - return layer - def split_points_into_pairs(self, annotations, dim_names): if len(annotations) == 0: return np.zeros((0, 0)), np.zeros((0, 0)) @@ -501,31 +493,17 @@ def split_points_into_pairs(self, annotations, dim_names): return np.array(fixed_points), np.array(moving_points) - def automatically_group_markers(self, s: neuroglancer.ViewerState): - dimensions = s.dimensions.names - if self.two_coord_spaces: - return False - annotations = s.layers[self.annotations_name].annotations - if len(annotations) == self.stored_group_number: - return False - self.stored_group_number = len(annotations) - if len(annotations) < 2: - return False - for i, a in enumerate(s.layers[self.annotations_name].annotations): - a.props = [i // 2, i % 2] - return True - def update_registered_layers(self, s: neuroglancer.ViewerState): if self.affine is not None: transform = self.affine.tolist() # TODO handle layer being renamed - for k, v in self.stored_moving_dims.items(): + for k, v in self.stored_map_moving_name_to_coords.items(): # TODO not sure if need to handle local channels here # keeping code below just in case for source in s.layers[k].source: source.transform = neuroglancer.CoordinateSpaceTransform( input_dimensions=v, - output_dimensions=change_coord_names(v, "2"), + output_dimensions=new_coord_space_names(v, "2"), matrix=transform, ) for source in s.layers[k + "_registered"].source: @@ -538,8 +516,8 @@ def update_registered_layers(self, s: neuroglancer.ViewerState): s.layers[self.annotations_name].source[ 0 ].transform = neuroglancer.CoordinateSpaceTransform( - input_dimensions=create_dimensions(s.dimensions), - output_dimensions=create_dimensions(s.dimensions), + input_dimensions=create_coord_space_matching_global_dims(s.dimensions), + output_dimensions=create_coord_space_matching_global_dims(s.dimensions), matrix=self.combine_affine_across_dims(s, self.affine).tolist(), ) From 94e52700a12de00a36fce2a0e1f2ab4869fc40bc Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Mon, 6 Oct 2025 17:31:12 +0200 Subject: [PATCH 26/30] feat: save progress of various fixes --- .../examples/example_linear_registration.py | 113 +++++++++++++----- 1 file changed, 86 insertions(+), 27 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index b03a57c28..17c4aa7de 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -11,7 +11,7 @@ import scipy.ndimage MESSAGE_DURATION = 5 # seconds -NUM_DEMO_DIMS = 2 # Currently can be 2D or 3D +NUM_DEMO_DIMS = 3 # Currently can be 2D or 3D AFFINE_NUM_DECIMALS = 4 MARKERS_SHADER = """ @@ -218,8 +218,12 @@ def _create_demo_moving_image(): def new_coord_space_names(dims: neuroglancer.CoordinateSpace, name_suffix): + def change_name(n): + if n.endswith(("'", "^", "#")): + return n + return n + name_suffix return neuroglancer.CoordinateSpace( - names=[n + name_suffix for n in dims.names], + names=[change_name(n) for n in dims.names], units=dims.units, scales=dims.scales, ) @@ -240,6 +244,7 @@ class ReadyState(Enum): NOT_READY = 0 COORDS_READY = 1 READY = 2 + ERROR = 3 class LinearRegistrationWorkflow: def __init__(self, args): @@ -248,14 +253,17 @@ def __init__(self, args): self.annotations_name = args.annotations_name self.status_timers = {} self.stored_points = [[], []] - self.stored_map_moving_name_to_coords = {} + self.stored_map_moving_name_to_data_coords = {} + self.stored_map_moving_name_to_viewer_coords = {} self.moving_layer_names = [] self.input_dim_names = [] self.output_dim_names = [] self.affine = None self.co_ords_ready = False + self.stored_last_grouped_points_num = -1 self.ready_state = ReadyState.NOT_READY self.last_updated_print_time = -1 + self.num_dims = 0 self.viewer = neuroglancer.Viewer() self.viewer.shared_state.add_changed_callback( lambda: self.viewer.defer_callback(self.on_state_changed) @@ -281,17 +289,49 @@ def update(self): self.last_updated_print_time = current_time if self.ready_state == ReadyState.COORDS_READY: self.setup_registration_layers() + if self.ready_state == ReadyState.ERROR: + # TODO put in right place + self._set_status_message( + "help", + "Please manually enter second coordinate space information for the moving layers." + ) elif self.ready_state == ReadyState.READY: + if not self.two_coord_spaces: + self.update_based_on_annotations_debounced() self.update_affine() self._clear_status_messages() + @debounce(0.25) + def update_based_on_annotations_debounced(self): + with self.viewer.txn() as s: + self.update_based_on_annotations(s) + + def update_based_on_annotations(self, s: neuroglancer.ViewerState): + annotations = s.layers[self.annotations_name].annotations + if len(annotations) > 2 and self.stored_last_grouped_points_num != len(annotations): + # TODO check the annotations for being modified + for i, a in enumerate(s.layers[self.annotations_name].annotations): + a.props = [i // 2, i % 2] + # Check if we are at one of the annotations in the left hand panel + # or right hand panel, if so we can move to that in the other panel + right_hand_position = s.layout.children[1].position.value + left_hand_position = s.position + + def setup_viewer_after_user_ready(self): - self.setup_second_coord_space() + if not self.moving_layer_names: + moving_layers = self.get_state().layout.children[1].layers + self.moving_layer_names = moving_layers + print(self.moving_layer_names) + self._moving_idx = 0 + if self.two_coord_spaces: + self.setup_second_coord_space() + else: + self.setup_registration_layers() self._set_status_message( "help", "Place markers in pairs, starting with the fixed, and then the moving. The registered layer will automatically update as you add markers. Press 't' to toggle visiblity of the registered layer.", ) - self.ready_state = ReadyState.COORDS_READY def setup_initial_two_panel_layout(self): with self.viewer.txn() as s: @@ -309,7 +349,7 @@ def setup_initial_two_panel_layout(self): neuroglancer.LayerGroupViewer(layers=group2_names, layout="xy-3d"), ] ) - if not self.two_coord_spaces + if not self.two_coord_spaces: s.layout.children[1].position.link = "unlinked" s.layout.children[1].crossSectionOrientation.link = "unlinked" s.layout.children[1].projectionOrientation.link = "unlinked" @@ -319,10 +359,6 @@ def setup_initial_two_panel_layout(self): # s.layout.children[1].projectionScale.link = "unlinked" def setup_second_coord_space(self): - if not self.moving_layer_names: - moving_layers = self.get_state().layout.children[1].layers - self.moving_layer_names = moving_layers - self._moving_idx = 0 layer_name = self.moving_layer_names[self._moving_idx] info_future = self.viewer.volume_info(layer_name) info_future.add_done_callback(lambda f: self.save_coord_space_info(f)) @@ -353,7 +389,7 @@ def combine_affine_across_dims(self, s: neuroglancer.ViewerState, affine): def setup_registration_layers(self): with self.viewer.txn() as s: dimensions = s.dimensions - if len(dimensions.names) != self.num_dims # loading: + if not self.ready_state == ReadyState.ERROR or (self.two_coord_spaces and (len(dimensions.names) != self.num_dims)): # loading return # Make the annotation layer if needed @@ -388,9 +424,10 @@ def setup_registration_layers(self): copy = deepcopy(s.layers[layer_name]) copy.name = layer_name + "_registered" copy.visible = False - for source in copy.source: - # TODO might need mapping - source.transform = None + if self.two_coord_spaces: + for source in copy.source: + # TODO might need mapping + source.transform = None s.layers[copy.name] = copy s.layout.children[0].layers.append(copy.name) s.layers[self.annotations_name].tool = "annotatePoint" @@ -409,28 +446,48 @@ def setup_panel_coordinates(self, s: neuroglancer.ViewerState): s.layout.children[0].displayDimensions.value = self.input_dim_names[:3] def save_coord_space_info(self, info_future): - result = info_future.result() self.moving_name = self.moving_layer_names[self._moving_idx] - self.stored_map_moving_name_to_coords[self.moving_name] = result.dimensions - done = len(self.stored_map_moving_name_to_coords) == len(self.moving_layer_names) + try: + result = info_future.result() + except Exception as e: + print(f"ERROR: Could not parse volume info for {self.moving_name}: {e} {info_future}") + print("Please manually enter the coordinate space information as a second co-ordinate space.") + self.ready_state = ReadyState.ERROR + self._moving_idx += 1 + if self._moving_idx < len(self.moving_layer_names): + self.setup_second_coord_space() + return + # TODO not sure how to recover from this, maybe ask user to manually + # input the coord space info? + # TODO clean up + done = len(self.stored_map_moving_name_to_data_coords) == len(self.moving_layer_names) if not done: self._moving_idx += 1 self.setup_second_coord_space() return + self.stored_map_moving_name_to_data_coords[self.moving_name] = result.dimensions # If we get here we have all the coord spaces ready and can update viewer with self.viewer.txn() as s: for layer_name in self.moving_layer_names: - input_dims = self.stored_map_moving_name_to_coords[layer_name] - output_dims = new_coord_space_names(input_dims, "2") + input_dims = self.stored_map_moving_name_to_data_coords[layer_name] + print(layer_name, input_dims) self.input_dim_names = input_dims.names - self.output_dim_names = output_dims.names - self.num_dims = len(input_dims.names) * 2 - new_coord_space = neuroglancer.CoordinateSpaceTransform( - input_dimensions=input_dims, - output_dimensions=output_dims, - ) + self.stored_map_moving_name_to_viewer_coords[layer_name] = [] for source in s.layers[layer_name].source: + if source.transform is None: + output_dims = new_coord_space_names(input_dims, "2") + else: + output_dims = new_coord_space_names(source.transform.output_dimensions, "2") + self.output_dim_names = output_dims.names + new_coord_space = neuroglancer.CoordinateSpaceTransform( + input_dimensions=input_dims, + output_dimensions=output_dims, + ) + self.num_dims = max(self.num_dims, 2 * len([n for n in new_coord_space.output_dimensions.names if not n.endswith(("'", "^", "#"))])) + self.stored_map_moving_name_to_viewer_coords[layer_name].append(new_coord_space) source.transform = new_coord_space + if not self.ready_state == ReadyState.ERROR: + self.ready_state = ReadyState.COORDS_READY def toggle_registered_visibility(self, _): if self.ready_state == ReadyState.NOT_READY: @@ -438,6 +495,8 @@ def toggle_registered_visibility(self, _): return elif self.ready_state == ReadyState.COORDS_READY: return + elif self.ready_state == ReadyState.ERROR: + self.setup_registration_layers() with self.viewer.txn() as s: for layer_name in self.moving_layer_names: registered_name = layer_name + "_registered" @@ -497,7 +556,7 @@ def update_registered_layers(self, s: neuroglancer.ViewerState): if self.affine is not None: transform = self.affine.tolist() # TODO handle layer being renamed - for k, v in self.stored_map_moving_name_to_coords.items(): + for k, v in self.stored_map_moving_name_to_data_coords.items(): # TODO not sure if need to handle local channels here # keeping code below just in case for source in s.layers[k].source: @@ -553,7 +612,7 @@ def update_registered_layers(self, s: neuroglancer.ViewerState): def estimate_affine(self, s: neuroglancer.ViewerState): annotations = s.layers[self.annotations_name].annotations - if len(annotations) == 0: + if len(annotations) == 0 or (not self.two_coord_spaces and len(annotations) < 2): return False dim_names = s.dimensions.names From 409c744b6ed13ad0eaf5cbea1f151d237aecb547 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Tue, 7 Oct 2025 16:17:54 +0200 Subject: [PATCH 27/30] refactor: remove second co-ord space --- .../examples/example_linear_registration.py | 159 +++++++----------- 1 file changed, 59 insertions(+), 100 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 17c4aa7de..3ff9c14c8 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -2,8 +2,8 @@ import threading import webbrowser from copy import deepcopy -from time import ctime, time from enum import Enum +from time import ctime, time import neuroglancer import neuroglancer.cli @@ -222,6 +222,7 @@ def change_name(n): if n.endswith(("'", "^", "#")): return n return n + name_suffix + return neuroglancer.CoordinateSpace( names=[change_name(n) for n in dims.names], units=dims.units, @@ -229,7 +230,9 @@ def change_name(n): ) -def create_coord_space_matching_global_dims(viewer_dims: neuroglancer.CoordinateSpace, indices=None): +def create_coord_space_matching_global_dims( + viewer_dims: neuroglancer.CoordinateSpace, indices=None +): names = viewer_dims.names units = viewer_dims.units scales = viewer_dims.scales @@ -240,16 +243,17 @@ def create_coord_space_matching_global_dims(viewer_dims: neuroglancer.Coordinate return neuroglancer.CoordinateSpace(names=names, units=units, scales=scales) + class ReadyState(Enum): NOT_READY = 0 COORDS_READY = 1 READY = 2 ERROR = 3 + class LinearRegistrationWorkflow: def __init__(self, args): starting_state = args.state - self.two_coord_spaces = not args.single_coord_space self.annotations_name = args.annotations_name self.status_timers = {} self.stored_points = [[], []] @@ -293,41 +297,19 @@ def update(self): # TODO put in right place self._set_status_message( "help", - "Please manually enter second coordinate space information for the moving layers." + "Please manually enter second coordinate space information for the moving layers.", ) elif self.ready_state == ReadyState.READY: - if not self.two_coord_spaces: - self.update_based_on_annotations_debounced() self.update_affine() self._clear_status_messages() - @debounce(0.25) - def update_based_on_annotations_debounced(self): - with self.viewer.txn() as s: - self.update_based_on_annotations(s) - - def update_based_on_annotations(self, s: neuroglancer.ViewerState): - annotations = s.layers[self.annotations_name].annotations - if len(annotations) > 2 and self.stored_last_grouped_points_num != len(annotations): - # TODO check the annotations for being modified - for i, a in enumerate(s.layers[self.annotations_name].annotations): - a.props = [i // 2, i % 2] - # Check if we are at one of the annotations in the left hand panel - # or right hand panel, if so we can move to that in the other panel - right_hand_position = s.layout.children[1].position.value - left_hand_position = s.position - - def setup_viewer_after_user_ready(self): if not self.moving_layer_names: moving_layers = self.get_state().layout.children[1].layers self.moving_layer_names = moving_layers print(self.moving_layer_names) self._moving_idx = 0 - if self.two_coord_spaces: - self.setup_second_coord_space() - else: - self.setup_registration_layers() + self.setup_second_coord_space() self._set_status_message( "help", "Place markers in pairs, starting with the fixed, and then the moving. The registered layer will automatically update as you add markers. Press 't' to toggle visiblity of the registered layer.", @@ -349,8 +331,6 @@ def setup_initial_two_panel_layout(self): neuroglancer.LayerGroupViewer(layers=group2_names, layout="xy-3d"), ] ) - if not self.two_coord_spaces: - s.layout.children[1].position.link = "unlinked" s.layout.children[1].crossSectionOrientation.link = "unlinked" s.layout.children[1].projectionOrientation.link = "unlinked" @@ -389,34 +369,16 @@ def combine_affine_across_dims(self, s: neuroglancer.ViewerState, affine): def setup_registration_layers(self): with self.viewer.txn() as s: dimensions = s.dimensions - if not self.ready_state == ReadyState.ERROR or (self.two_coord_spaces and (len(dimensions.names) != self.num_dims)): # loading + if not self.ready_state == ReadyState.ERROR or ( + len(dimensions.names) != self.num_dims + ): return # Make the annotation layer if needed if s.layers.index(self.annotations_name) == -1: - if self.two_coord_spaces: - s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( - dimensions=create_coord_space_matching_global_dims(s.dimensions) - ) - else: - s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( - dimensions=create_coord_space_matching_global_dims(s.dimensions), - annotation_properties=[ - neuroglancer.AnnotationPropertySpec( - id="label", - type="uint32", - default=0, - ), - neuroglancer.AnnotationPropertySpec( - id="group", - type="uint8", - default=0, - enum_labels=["fixed", "moving"], - enum_values=[0, 1], - ), - ], - shader=MARKERS_SHADER, - ) + s.layers[self.annotations_name] = neuroglancer.LocalAnnotationLayer( + dimensions=create_coord_space_matching_global_dims(s.dimensions) + ) # Make a copy of all the moving layers but in original coord space # and as part of the left hand panel @@ -424,10 +386,9 @@ def setup_registration_layers(self): copy = deepcopy(s.layers[layer_name]) copy.name = layer_name + "_registered" copy.visible = False - if self.two_coord_spaces: - for source in copy.source: - # TODO might need mapping - source.transform = None + for source in copy.source: + # TODO might need mapping, probably better is to copy before making the second coord space + source.transform = None s.layers[copy.name] = copy s.layout.children[0].layers.append(copy.name) s.layers[self.annotations_name].tool = "annotatePoint" @@ -450,8 +411,12 @@ def save_coord_space_info(self, info_future): try: result = info_future.result() except Exception as e: - print(f"ERROR: Could not parse volume info for {self.moving_name}: {e} {info_future}") - print("Please manually enter the coordinate space information as a second co-ordinate space.") + print( + f"ERROR: Could not parse volume info for {self.moving_name}: {e} {info_future}" + ) + print( + "Please manually enter the coordinate space information as a second co-ordinate space." + ) self.ready_state = ReadyState.ERROR self._moving_idx += 1 if self._moving_idx < len(self.moving_layer_names): @@ -460,7 +425,9 @@ def save_coord_space_info(self, info_future): # TODO not sure how to recover from this, maybe ask user to manually # input the coord space info? # TODO clean up - done = len(self.stored_map_moving_name_to_data_coords) == len(self.moving_layer_names) + done = len(self.stored_map_moving_name_to_data_coords) == len( + self.moving_layer_names + ) if not done: self._moving_idx += 1 self.setup_second_coord_space() @@ -477,14 +444,28 @@ def save_coord_space_info(self, info_future): if source.transform is None: output_dims = new_coord_space_names(input_dims, "2") else: - output_dims = new_coord_space_names(source.transform.output_dimensions, "2") + output_dims = new_coord_space_names( + source.transform.output_dimensions, "2" + ) self.output_dim_names = output_dims.names new_coord_space = neuroglancer.CoordinateSpaceTransform( input_dimensions=input_dims, output_dimensions=output_dims, ) - self.num_dims = max(self.num_dims, 2 * len([n for n in new_coord_space.output_dimensions.names if not n.endswith(("'", "^", "#"))])) - self.stored_map_moving_name_to_viewer_coords[layer_name].append(new_coord_space) + self.num_dims = max( + self.num_dims, + 2 + * len( + [ + n + for n in new_coord_space.output_dimensions.names + if not n.endswith(("'", "^", "#")) + ] + ), + ) + self.stored_map_moving_name_to_viewer_coords[layer_name].append( + new_coord_space + ) source.transform = new_coord_space if not self.ready_state == ReadyState.ERROR: self.ready_state = ReadyState.COORDS_READY @@ -523,34 +504,20 @@ def update_affine(self): def split_points_into_pairs(self, annotations, dim_names): if len(annotations) == 0: return np.zeros((0, 0)), np.zeros((0, 0)) - if self.two_coord_spaces: - first_name = dim_names[0] - real_dims_last = first_name not in self.input_dim_names - num_points = len(annotations) - num_dims = len(annotations[0].point) // 2 - fixed_points = np.zeros((num_points, num_dims)) - moving_points = np.zeros((num_points, num_dims)) - for i, a in enumerate(annotations): - for j in range(num_dims): - fixed_index = j + num_dims if real_dims_last else j - moving_index = j if real_dims_last else j + num_dims - fixed_points[i, j] = a.point[fixed_index] - moving_points[i, j] = a.point[moving_index] - return np.array(fixed_points), np.array(moving_points) - else: - num_points = len(annotations) // 2 - annotations = annotations[: num_points * 2] - num_dims = len(annotations[0].point) - fixed_points = np.zeros((num_points, num_dims)) - moving_points = np.zeros((num_points, num_dims)) - for i, a in enumerate(annotations): - props = a.props - if props[1] == 0: - fixed_points[props[0]] = a.point - else: - moving_points[props[0]] = a.point - - return np.array(fixed_points), np.array(moving_points) + first_name = dim_names[0] + # TODO can this be avoided? + real_dims_last = first_name not in self.input_dim_names + num_points = len(annotations) + num_dims = len(annotations[0].point) // 2 + fixed_points = np.zeros((num_points, num_dims)) + moving_points = np.zeros((num_points, num_dims)) + for i, a in enumerate(annotations): + for j in range(num_dims): + fixed_index = j + num_dims if real_dims_last else j + moving_index = j if real_dims_last else j + num_dims + fixed_points[i, j] = a.point[fixed_index] + moving_points[i, j] = a.point[moving_index] + return np.array(fixed_points), np.array(moving_points) def update_registered_layers(self, s: neuroglancer.ViewerState): if self.affine is not None: @@ -612,7 +579,7 @@ def update_registered_layers(self, s: neuroglancer.ViewerState): def estimate_affine(self, s: neuroglancer.ViewerState): annotations = s.layers[self.annotations_name].annotations - if len(annotations) == 0 or (not self.two_coord_spaces and len(annotations) < 2): + if len(annotations) == 0: return False dim_names = s.dimensions.names @@ -703,14 +670,6 @@ def add_mapping_args(ap: argparse.ArgumentParser): default="annotation", required=False, ) - ap.add_argument( - "--single-coord-space", - "-s", - action="store_true", - help="Use a single coordinate space for both fixed and moving layers (default is two coord spaces)", - default=False, - required=False, - ) def handle_args(): From b105a58d7baa4fe9408073f75c00101b2dc39ed4 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Tue, 7 Oct 2025 16:49:08 +0200 Subject: [PATCH 28/30] refactor: continue to clarify with only one path --- .../examples/example_linear_registration.py | 104 ++++++++++-------- 1 file changed, 60 insertions(+), 44 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 3ff9c14c8..87e5d71d9 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -264,7 +264,6 @@ def __init__(self, args): self.output_dim_names = [] self.affine = None self.co_ords_ready = False - self.stored_last_grouped_points_num = -1 self.ready_state = ReadyState.NOT_READY self.last_updated_print_time = -1 self.num_dims = 0 @@ -293,8 +292,7 @@ def update(self): self.last_updated_print_time = current_time if self.ready_state == ReadyState.COORDS_READY: self.setup_registration_layers() - if self.ready_state == ReadyState.ERROR: - # TODO put in right place + elif self.ready_state == ReadyState.ERROR: self._set_status_message( "help", "Please manually enter second coordinate space information for the moving layers.", @@ -303,12 +301,23 @@ def update(self): self.update_affine() self._clear_status_messages() + def copy_moving_layers_to_left_panel(self): + """Make copies of the moving layers to show the registered result.""" + with self.viewer.txn() as s: + for layer_name in self.moving_layer_names: + copy = deepcopy(s.layers[layer_name]) + copy.name = layer_name + "_registered" + copy.visible = False + s.layers[copy.name] = copy + s.layout.children[0].layers.append(copy.name) + def setup_viewer_after_user_ready(self): + """Called when the user indicates they have placed layers in the two panels.""" if not self.moving_layer_names: moving_layers = self.get_state().layout.children[1].layers self.moving_layer_names = moving_layers - print(self.moving_layer_names) self._moving_idx = 0 + self.copy_moving_layers_to_left_panel() self.setup_second_coord_space() self._set_status_message( "help", @@ -316,6 +325,7 @@ def setup_viewer_after_user_ready(self): ) def setup_initial_two_panel_layout(self): + """Set up a two panel layout if not already present.""" with self.viewer.txn() as s: all_layer_names = [layer.name for layer in s.layers] if len(all_layer_names) >= 2: @@ -339,6 +349,7 @@ def setup_initial_two_panel_layout(self): # s.layout.children[1].projectionScale.link = "unlinked" def setup_second_coord_space(self): + """Set up the second coordinate space for the moving layers.""" layer_name = self.moving_layer_names[self._moving_idx] info_future = self.viewer.volume_info(layer_name) info_future.add_done_callback(lambda f: self.save_coord_space_info(f)) @@ -380,17 +391,6 @@ def setup_registration_layers(self): dimensions=create_coord_space_matching_global_dims(s.dimensions) ) - # Make a copy of all the moving layers but in original coord space - # and as part of the left hand panel - for layer_name in self.moving_layer_names: - copy = deepcopy(s.layers[layer_name]) - copy.name = layer_name + "_registered" - copy.visible = False - for source in copy.source: - # TODO might need mapping, probably better is to copy before making the second coord space - source.transform = None - s.layers[copy.name] = copy - s.layout.children[0].layers.append(copy.name) s.layers[self.annotations_name].tool = "annotatePoint" s.selected_layer.layer = self.annotations_name s.selected_layer.visible = True @@ -400,7 +400,6 @@ def setup_registration_layers(self): self.ready_state = ReadyState.READY def setup_panel_coordinates(self, s: neuroglancer.ViewerState): - dimensions = s.dimensions.names s.layout.children[1].displayDimensions.link = "unlinked" s.layout.children[1].displayDimensions.value = self.output_dim_names[:3] s.layout.children[0].displayDimensions.link = "unlinked" @@ -417,29 +416,30 @@ def save_coord_space_info(self, info_future): print( "Please manually enter the coordinate space information as a second co-ordinate space." ) - self.ready_state = ReadyState.ERROR - self._moving_idx += 1 - if self._moving_idx < len(self.moving_layer_names): - self.setup_second_coord_space() - return - # TODO not sure how to recover from this, maybe ask user to manually - # input the coord space info? - # TODO clean up - done = len(self.stored_map_moving_name_to_data_coords) == len( - self.moving_layer_names - ) - if not done: - self._moving_idx += 1 + else: + self.stored_map_moving_name_to_data_coords[self.moving_name] = ( + result.dimensions + ) + + self._moving_idx += 1 + if self._moving_idx < len(self.moving_layer_names): self.setup_second_coord_space() return - self.stored_map_moving_name_to_data_coords[self.moving_name] = result.dimensions + # If we get here we have all the coord spaces ready and can update viewer + self.ready_state = ReadyState.COORDS_READY with self.viewer.txn() as s: for layer_name in self.moving_layer_names: - input_dims = self.stored_map_moving_name_to_data_coords[layer_name] - print(layer_name, input_dims) + input_dims = self.stored_map_moving_name_to_data_coords.get( + layer_name, None + ) + if input_dims is None: + self.ready_state = ReadyState.ERROR + continue self.input_dim_names = input_dims.names self.stored_map_moving_name_to_viewer_coords[layer_name] = [] + # TODO this logic I think needs to be improved because volume info + # is actually the mapped dims not the input dims for source in s.layers[layer_name].source: if source.transform is None: output_dims = new_coord_space_names(input_dims, "2") @@ -467,10 +467,9 @@ def save_coord_space_info(self, info_future): new_coord_space ) source.transform = new_coord_space - if not self.ready_state == ReadyState.ERROR: - self.ready_state = ReadyState.COORDS_READY + return self.ready_state - def toggle_registered_visibility(self, _): + def continue_workflow(self, _): if self.ready_state == ReadyState.NOT_READY: self.setup_viewer_after_user_ready() return @@ -486,12 +485,11 @@ def toggle_registered_visibility(self, _): def setup_viewer_actions(self): viewer = self.viewer - viewer.actions.add( - "toggleRegisteredVisibility", self.toggle_registered_visibility - ) + name = "continueLinearRegistrationWorkflow" + viewer.actions.add(name, self.continue_workflow) with viewer.config_state.txn() as s: - s.input_event_bindings.viewer["keyt"] = "toggleRegisteredVisibility" + s.input_event_bindings.viewer["keyt"] = name def on_state_changed(self): self.viewer.defer_callback(self.update) @@ -501,12 +499,31 @@ def update_affine(self): with self.viewer.txn() as s: self.estimate_affine(s) + def get_moving_and_fixed_dims( + self, s: neuroglancer.ViewerState | None, dim_names=() + ): + if s is None: + dimensions = dim_names + else: + dimensions = s.dimensions.names + # The moving dims are the same as the input dims, but end with an extra number + # to indicate the second coord space + # e.g. x -> x2, y -> y2, z -> z2 + moving_dims = [] + fixed_dims = [] + for dim in dimensions: + if dim[:-1] in dimensions: + moving_dims.append(dim) + else: + fixed_dims.append(dim) + return fixed_dims, moving_dims + def split_points_into_pairs(self, annotations, dim_names): if len(annotations) == 0: return np.zeros((0, 0)), np.zeros((0, 0)) first_name = dim_names[0] - # TODO can this be avoided? - real_dims_last = first_name not in self.input_dim_names + fixed_dims, _ = self.get_moving_and_fixed_dims(dim_names) + real_dims_last = first_name not in fixed_dims num_points = len(annotations) num_dims = len(annotations[0].point) // 2 fixed_points = np.zeros((num_points, num_dims)) @@ -538,7 +555,6 @@ def update_registered_layers(self, s: neuroglancer.ViewerState): output_dimensions=v, matrix=transform, ) - print(self.combine_affine_across_dims(s, self.affine).tolist()) s.layers[self.annotations_name].source[ 0 ].transform = neuroglancer.CoordinateSpaceTransform( @@ -611,10 +627,10 @@ def get_registration_info(self): fixed_points, moving_points = self.split_points_into_pairs( annotations, dim_names ) - transformed_points = self.transform_points_with_affine(moving_points) info["fixedPoints"] = fixed_points.tolist() info["movingPoints"] = moving_points.tolist() - if self.affine is not None and transformed_points is not None: + if self.affine is not None: + transformed_points = transform_points(self.affine, moving_points) info["transformedPoints"] = transformed_points.tolist() info["affineTransform"] = self.affine.tolist() return info From 363509bc2ef860baea6e20619d0f81dd8e492b30 Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Wed, 8 Oct 2025 17:30:53 +0200 Subject: [PATCH 29/30] fix: address various assumptions to make pipeline more stable --- .../examples/example_linear_registration.py | 113 +++++++++--------- 1 file changed, 59 insertions(+), 54 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 87e5d71d9..0a6abe85b 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -4,6 +4,7 @@ from copy import deepcopy from enum import Enum from time import ctime, time +from typing import Union import neuroglancer import neuroglancer.cli @@ -47,6 +48,7 @@ def debounced(*args, **kwargs): return decorator +# TODO further test these fits, 2 point fit not right at the moment def fit_model(fixed_points: np.ndarray, moving_points: np.ndarray): """ Choose the appropriate model based on number of points and dimensions. @@ -164,7 +166,7 @@ def transform_points(affine: np.ndarray, points: np.ndarray): # Only used if no data provided -def _create_demo_data(size: int | tuple = 60, radius: float = 20): +def _create_demo_data(size: Union[int, tuple] = 60, radius: float = 20): data_size = (size,) * NUM_DEMO_DIMS if isinstance(size, int) else size data = np.zeros(data_size, dtype=np.uint8) if NUM_DEMO_DIMS == 2: @@ -255,23 +257,22 @@ class LinearRegistrationWorkflow: def __init__(self, args): starting_state = args.state self.annotations_name = args.annotations_name - self.status_timers = {} - self.stored_points = [[], []] + + self.stored_points = ([], []) self.stored_map_moving_name_to_data_coords = {} self.stored_map_moving_name_to_viewer_coords = {} - self.moving_layer_names = [] - self.input_dim_names = [] - self.output_dim_names = [] self.affine = None - self.co_ords_ready = False self.ready_state = ReadyState.NOT_READY - self.last_updated_print_time = -1 - self.num_dims = 0 self.viewer = neuroglancer.Viewer() self.viewer.shared_state.add_changed_callback( lambda: self.viewer.defer_callback(self.on_state_changed) ) + self._last_updated_print_time = -1 + self._status_timers = {} + self._current_moving_layer_idx = 0 + self._cached_moving_layer_names = [] + if starting_state is None: self._add_demo_data_to_viewer() else: @@ -287,9 +288,9 @@ def __init__(self, args): def update(self): """Primary update loop, called whenever the viewer state changes.""" current_time = time() - if current_time - self.last_updated_print_time > 5: + if current_time - self._last_updated_print_time > 5: print(f"Viewer states are successfully syncing at {ctime()}") - self.last_updated_print_time = current_time + self._last_updated_print_time = current_time if self.ready_state == ReadyState.COORDS_READY: self.setup_registration_layers() elif self.ready_state == ReadyState.ERROR: @@ -301,10 +302,17 @@ def update(self): self.update_affine() self._clear_status_messages() + def get_moving_layer_names(self, s: neuroglancer.ViewerState): + right_panel_layers = [ + n for n in s.layout.children[1].layers if n != self.annotations_name + ] + return right_panel_layers + def copy_moving_layers_to_left_panel(self): """Make copies of the moving layers to show the registered result.""" with self.viewer.txn() as s: - for layer_name in self.moving_layer_names: + self._cached_moving_layer_names = self.get_moving_layer_names(s) + for layer_name in self._cached_moving_layer_names: copy = deepcopy(s.layers[layer_name]) copy.name = layer_name + "_registered" copy.visible = False @@ -313,10 +321,6 @@ def copy_moving_layers_to_left_panel(self): def setup_viewer_after_user_ready(self): """Called when the user indicates they have placed layers in the two panels.""" - if not self.moving_layer_names: - moving_layers = self.get_state().layout.children[1].layers - self.moving_layer_names = moving_layers - self._moving_idx = 0 self.copy_moving_layers_to_left_panel() self.setup_second_coord_space() self._set_status_message( @@ -350,7 +354,7 @@ def setup_initial_two_panel_layout(self): def setup_second_coord_space(self): """Set up the second coordinate space for the moving layers.""" - layer_name = self.moving_layer_names[self._moving_idx] + layer_name = self._cached_moving_layer_names[self._current_moving_layer_idx] info_future = self.viewer.volume_info(layer_name) info_future.add_done_callback(lambda f: self.save_coord_space_info(f)) @@ -361,7 +365,7 @@ def combine_affine_across_dims(self, s: neuroglancer.ViewerState, affine): applies to all dims so we need to create a larger matrix """ all_dims = s.dimensions.names - moving_dims = self.output_dim_names + moving_dims = self.get_fixed_and_moving_dims(None, all_dims) full_matrix = np.zeros((len(all_dims), len(all_dims) + 1)) for i, dim in enumerate(all_dims): @@ -377,12 +381,13 @@ def combine_affine_across_dims(self, s: neuroglancer.ViewerState, affine): full_matrix[i, -1] = affine[moving_i, -1] return full_matrix + def has_two_coord_spaces(self, s: neuroglancer.ViewerState): + fixed_dims, moving_dims = self.get_fixed_and_moving_dims(s) + return len(fixed_dims) == len(moving_dims) + def setup_registration_layers(self): with self.viewer.txn() as s: - dimensions = s.dimensions - if not self.ready_state == ReadyState.ERROR or ( - len(dimensions.names) != self.num_dims - ): + if self.ready_state == ReadyState.ERROR or not self.has_two_coord_spaces(s): return # Make the annotation layer if needed @@ -400,13 +405,16 @@ def setup_registration_layers(self): self.ready_state = ReadyState.READY def setup_panel_coordinates(self, s: neuroglancer.ViewerState): + fixed_dims, moving_dims = self.get_fixed_and_moving_dims(s) s.layout.children[1].displayDimensions.link = "unlinked" - s.layout.children[1].displayDimensions.value = self.output_dim_names[:3] + s.layout.children[1].displayDimensions.value = moving_dims[:3] s.layout.children[0].displayDimensions.link = "unlinked" - s.layout.children[0].displayDimensions.value = self.input_dim_names[:3] + s.layout.children[0].displayDimensions.value = fixed_dims[:3] def save_coord_space_info(self, info_future): - self.moving_name = self.moving_layer_names[self._moving_idx] + self.moving_name = self._cached_moving_layer_names[ + self._current_moving_layer_idx + ] try: result = info_future.result() except Exception as e: @@ -421,48 +429,32 @@ def save_coord_space_info(self, info_future): result.dimensions ) - self._moving_idx += 1 - if self._moving_idx < len(self.moving_layer_names): + self._current_moving_layer_idx += 1 + if self._current_moving_layer_idx < len(self._cached_moving_layer_names): self.setup_second_coord_space() return # If we get here we have all the coord spaces ready and can update viewer self.ready_state = ReadyState.COORDS_READY with self.viewer.txn() as s: - for layer_name in self.moving_layer_names: - input_dims = self.stored_map_moving_name_to_data_coords.get( + for layer_name in self._cached_moving_layer_names: + output_dims = self.stored_map_moving_name_to_data_coords.get( layer_name, None ) - if input_dims is None: + if output_dims is None: self.ready_state = ReadyState.ERROR continue - self.input_dim_names = input_dims.names self.stored_map_moving_name_to_viewer_coords[layer_name] = [] - # TODO this logic I think needs to be improved because volume info - # is actually the mapped dims not the input dims for source in s.layers[layer_name].source: if source.transform is None: - output_dims = new_coord_space_names(input_dims, "2") + output_dims = new_coord_space_names(output_dims, "2") else: output_dims = new_coord_space_names( source.transform.output_dimensions, "2" ) - self.output_dim_names = output_dims.names new_coord_space = neuroglancer.CoordinateSpaceTransform( - input_dimensions=input_dims, output_dimensions=output_dims, ) - self.num_dims = max( - self.num_dims, - 2 - * len( - [ - n - for n in new_coord_space.output_dimensions.names - if not n.endswith(("'", "^", "#")) - ] - ), - ) self.stored_map_moving_name_to_viewer_coords[layer_name].append( new_coord_space ) @@ -478,7 +470,7 @@ def continue_workflow(self, _): elif self.ready_state == ReadyState.ERROR: self.setup_registration_layers() with self.viewer.txn() as s: - for layer_name in self.moving_layer_names: + for layer_name in self.get_moving_layer_names(s): registered_name = layer_name + "_registered" is_registered_visible = s.layers[registered_name].visible s.layers[registered_name].visible = not is_registered_visible @@ -499,8 +491,8 @@ def update_affine(self): with self.viewer.txn() as s: self.estimate_affine(s) - def get_moving_and_fixed_dims( - self, s: neuroglancer.ViewerState | None, dim_names=() + def get_fixed_and_moving_dims( + self, s: Union[neuroglancer.ViewerState, None], dim_names: list | tuple = () ): if s is None: dimensions = dim_names @@ -522,7 +514,7 @@ def split_points_into_pairs(self, annotations, dim_names): if len(annotations) == 0: return np.zeros((0, 0)), np.zeros((0, 0)) first_name = dim_names[0] - fixed_dims, _ = self.get_moving_and_fixed_dims(dim_names) + fixed_dims, _ = self.get_fixed_and_moving_dims(None, dim_names) real_dims_last = first_name not in fixed_dims num_points = len(annotations) num_dims = len(annotations[0].point) // 2 @@ -596,6 +588,18 @@ def update_registered_layers(self, s: neuroglancer.ViewerState): def estimate_affine(self, s: neuroglancer.ViewerState): annotations = s.layers[self.annotations_name].annotations if len(annotations) == 0: + if len(self.stored_points[0]) > 0: + # Again not sure if need channels + _, moving_dims = self.get_fixed_and_moving_dims(s) + n_dims = len(moving_dims) + affine = np.zeros(shape=(n_dims, n_dims + 1)) + for i in range(n_dims): + affine[i][i] = 1 + print(affine) + self.affine = affine + self.update_registered_layers(s) + self.stored_points = ([], []) + return True return False dim_names = s.dimensions.names @@ -627,6 +631,7 @@ def get_registration_info(self): fixed_points, moving_points = self.split_points_into_pairs( annotations, dim_names ) + info["annotations"] = annotations.tolist() info["fixedPoints"] = fixed_points.tolist() info["movingPoints"] = moving_points.tolist() if self.affine is not None: @@ -651,18 +656,18 @@ def __str__(self): def _clear_status_messages(self): to_pop = [] - for k, v in self.status_timers.items(): + for k, v in self._status_timers.items(): if time() - v > MESSAGE_DURATION: to_pop.append(k) for k in to_pop: with self.viewer.config_state.txn() as s: s.status_messages.pop(k, None) - self.status_timers.pop(k) + self._status_timers.pop(k) def _set_status_message(self, key: str, message: str): with self.viewer.config_state.txn() as s: s.status_messages[key] = message - self.status_timers[key] = time() + self._status_timers[key] = time() def _transform_points_with_affine(self, points: np.ndarray): if self.affine is not None: From 1fa3d05e9f34df8610090e8a6023575e3a61b63c Mon Sep 17 00:00:00 2001 From: Sean Martin Date: Wed, 8 Oct 2025 17:51:17 +0200 Subject: [PATCH 30/30] feat: small updates --- .../examples/example_linear_registration.py | 29 ++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/python/examples/example_linear_registration.py b/python/examples/example_linear_registration.py index 0a6abe85b..592291af4 100644 --- a/python/examples/example_linear_registration.py +++ b/python/examples/example_linear_registration.py @@ -257,12 +257,14 @@ class LinearRegistrationWorkflow: def __init__(self, args): starting_state = args.state self.annotations_name = args.annotations_name + self.ready_state = ( + ReadyState.READY if args.continue_workflow else ReadyState.NOT_READY + ) self.stored_points = ([], []) self.stored_map_moving_name_to_data_coords = {} self.stored_map_moving_name_to_viewer_coords = {} self.affine = None - self.ready_state = ReadyState.NOT_READY self.viewer = neuroglancer.Viewer() self.viewer.shared_state.add_changed_callback( lambda: self.viewer.defer_callback(self.on_state_changed) @@ -278,12 +280,13 @@ def __init__(self, args): else: self.viewer.set_state(starting_state) - self._set_status_message( - "help", - "Place fixed (reference) layers in the left hand panel, and moving layers (to be registered) in the right hand panel. Then press 't' once you have completed this setup.", - ) - self.setup_initial_two_panel_layout() self.setup_viewer_actions() + if self.ready_state != ReadyState.READY: + self._set_status_message( + "help", + "Place fixed (reference) layers in the left hand panel, and moving layers (to be registered) in the right hand panel. Then press 't' once you have completed this setup.", + ) + self.setup_initial_two_panel_layout() def update(self): """Primary update loop, called whenever the viewer state changes.""" @@ -300,7 +303,12 @@ def update(self): ) elif self.ready_state == ReadyState.READY: self.update_affine() - self._clear_status_messages() + # TODO consider allowing to dump to disk + self._set_status_message( + "help", + "Place points to inform registration by first placing the crosshair in the left/right panel to the correct place for the fixed/moving data, and then placing an actual annotation with ctrl+right click in the other panel. You can move the annotations afterwards if needed. Press 't' to toggle visibility of the registered layer.", + ) + # self._clear_status_messages() def get_moving_layer_names(self, s: neuroglancer.ViewerState): right_panel_layers = [ @@ -625,6 +633,7 @@ def estimate_affine(self, s: neuroglancer.ViewerState): def get_registration_info(self): info = {} + # TODO consider also dumping the full viewer state with self.viewer.txn() as s: annotations = s.layers[self.annotations_name].annotations dim_names = s.dimensions.names @@ -691,6 +700,12 @@ def add_mapping_args(ap: argparse.ArgumentParser): default="annotation", required=False, ) + ap.add_argument( + "--continue-workflow", + "-c", + action="store_true", + help="Indicates that we are continuing the workflow from a previously saved state. This will skip the inital setup steps and resume from the affine estimation step directly.", + ) def handle_args():