From 03d6b2a53fb6b6ed5713135010d07de06d4915ea Mon Sep 17 00:00:00 2001 From: emiglietta Date: Fri, 16 Aug 2024 13:47:11 -0400 Subject: [PATCH 01/20] load measurementtemplate.py --- active_plugins/measureRWCperObject.py | 676 ++++++++++++++++++++++++++ 1 file changed, 676 insertions(+) create mode 100644 active_plugins/measureRWCperObject.py diff --git a/active_plugins/measureRWCperObject.py b/active_plugins/measureRWCperObject.py new file mode 100644 index 00000000..8488b678 --- /dev/null +++ b/active_plugins/measureRWCperObject.py @@ -0,0 +1,676 @@ +################################# +# +# Imports from useful Python libraries +# +################################# + +import centrosome.cpmorphology +import centrosome.zernike +import numpy +import scipy.ndimage + +################################# +# +# Imports from CellProfiler +# +################################## + +# Reference +# +# If you're using a technique or method that's used in this module +# and has a publication attached to it, please include the paper link below. +# Otherwise, remove the line below and remove the "References" section from __doc__. +# + +cite_paper_link = "https://doi.org/10.1016/1047-3203(90)90014-M" + +__doc__ = """\ +MeasurementTemplate +=================== + +**MeasurementTemplate** - an example measurement module. It's recommended to +put a brief description of this module here and go into more detail below. + +This is an example of a module that measures a property of an image both +for the image as a whole and for every object in the image. It +demonstrates how to load an image, how to load an object and how to +record a measurement. + +The text you see here will be displayed as the help for your module, formatted +as `reStructuredText `_. + +Note whether or not this module supports 3D image data and respects masks. +A module which respects masks applies an image's mask and operates only on +the image data not obscured by the mask. Update the table below to indicate +which image processing features this module supports. + +| + +============ ============ =============== +Supports 2D? Supports 3D? Respects masks? +============ ============ =============== +YES NO YES +============ ============ =============== + +See also +^^^^^^^^ + +Is there another **Module** that is related to this one? If so, refer +to that **Module** in this section. Otherwise, this section can be omitted. + +What do I need as input? +^^^^^^^^^^^^^^^^^^^^^^^^ + +Are there any assumptions about input data someone using this module +should be made aware of? For example, is there a strict requirement that +image data be single-channel, or that the foreground is brighter than +the background? Describe any assumptions here. + +This section can be omitted if there is no requirement on the input. + +What do I get as output? +^^^^^^^^^^^^^^^^^^^^^^^^ + +Describe the output of this module. This is necessary if the output is +more complex than a single image. For example, if there is data displayed +over the image then describe what the data represents. + +This section can be omitted if there is no specialized output. + +Measurements made by this module +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Describe the measurements made by this module. Typically, measurements +are described in the following format: + +**Measurement category:** + +- *MeasurementName*: A brief description of the measurement. +- *MeasurementName*: A brief description of the measurement. + +**Measurement category:** + +- *MeasurementName*: A brief description of the measurement. +- *MeasurementName*: A brief description of the measurement. + +This module makes the following measurements: + +**MT** (the MeasurementTemplate category): + +- *Intensity_[IMAGE_NAME]_N[Ni]_M[Mj]*: the Zernike feature of the + IMAGE_NAME image with radial degree Ni and Azimuthal degree Mj, + Mj >= 0. +- *Intensity_[IMAGE_NAME]_N[Ni]_MM[Mj]*: the Zernike feature of + the IMAGE_NAME image with radial degree Ni and Azimuthal degree + Mj, Mj < 0. + +Technical notes +^^^^^^^^^^^^^^^ + +Include implementation details or notes here. Additionally provide any +other background information about this module, including definitions +or adopted conventions. Information which may be too specific to fit into +the general description should be provided here. + +Omit this section if there is no technical information to mention. + +The Zernike features measured here are themselves interesting. You can +reconstruct the image of a cell, approximately, by constructing the Zernike +functions on a unit circle, multiplying the real parts by the corresponding +features for positive M, multiplying the imaginary parts by the corresponding +features for negative M and adding real and imaginary parts. + +References +^^^^^^^^^^ + +Provide citations here, if appropriate. Citations are formatted as a list and, +wherever possible, include a link to the original work. For example, + +- Meyer F, Beucher S (1990) “Morphological segmentation.” *J Visual + Communication and Image Representation* 1, 21-46. + {cite_paper_link} +""" + +# +# Constants +# +# It's good programming practice to replace things like strings with +# constants if they will appear more than once in your program. That way, +# if someone wants to change the text, that text will change everywhere. +# Also, you can't misspell it by accident. +# +from cellprofiler_core.constants.measurement import COLTYPE_FLOAT +from cellprofiler_core.module import Module +from cellprofiler_core.setting.subscriber import ImageSubscriber, LabelSubscriber +from cellprofiler_core.setting.text import Integer + +"""This is the measurement template category""" +C_MEASUREMENT_TEMPLATE = "MT" + + +# +# The module class +# +# Your module should "inherit" from cellprofiler_core.module.Module. +# This means that your module will use the methods from Module unless +# you re-implement them. You can let Module do most of the work and +# implement only what you need. +# +class MeasurementTemplate(Module): + # + # The module starts by declaring the name that's used for display, + # the category under which it is stored and the variable revision + # number which can be used to provide backwards compatibility if + # you add user-interface functionality later. + # + module_name = "MeasurementTemplate" + category = "Measurement" + variable_revision_number = 1 + # + # Citation + # + # If you're using a technique or method that's used in this module + # and has a publication attached to it, please include the paper link below. + # Edit accordingly and add the link for the paper as "https://doi.org/XXXX". + # If no citation is necessary, remove the "doi" dictionary below. + # + + doi = {"Please cite the following when using ImageTemplate:": 'https://doi.org/10.1016/1047-3203(90)90014-M', + "If you're also using specific technique X, cite:": 'https://doi.org/10.1016/1047-3203(90)90014-M'} + # + # "create_settings" is where you declare the user interface elements + # (the "settings") which the user will use to customize your module. + # + # You can look at other modules and in cellprofiler_core.settings for + # settings you can use. + # + def create_settings(self): + # + # The ImageNameSubscriber "subscribes" to all ImageNameProviders in + # prior modules. Modules before yours will put images into CellProfiler. + # The ImageSubscriber gives your user a list of these images + # which can then be used as inputs in your module. + # + self.input_image_name = ImageSubscriber( + # The text to the left of the edit box + text="Input image name:", + # reST help that gets displayed when the user presses the + # help button to the right of the edit box. + doc="""\ +This is the image that the module operates on. You can choose any image +that is made available by a prior module. + +**MeasurementTemplate** will measure something about this image. +""", + ) + + # + # The ObjectNameSubscriber is similar to the ImageNameSubscriber. + # It will ask the user which object to pick from the list of + # objects provided by upstream modules. + # + self.input_object_name = LabelSubscriber( + text="Input object name", + doc="These are the objects that the module operates on.", + ) + + # + # The radial degree is the "N" parameter in the Zernike - how many + # inflection points there are, radiating out from the center. Higher + # N means more features and a more detailed description + # + # The setting is an integer setting, bounded between 1 and 50. + # N = 50 generates 1200 features! + # + self.radial_degree = Integer( + text="Radial degree", + value=10, + minval=1, + maxval=50, + doc="""\ +Calculate all Zernike features up to the given radial +degree. The Zernike function is parameterized by a radial +and azimuthal degree. The module will calculate all Zernike +features for all azimuthal degrees up to and including the +radial degree you enter here. +""", + ) + + # + # The "settings" method tells CellProfiler about the settings you + # have in your module. CellProfiler uses the list for saving + # and restoring values for your module when it saves or loads a + # pipeline file. + # + # This module does not have a "visible_settings" method. CellProfiler + # will use "settings" to make the list of user-interface elements + # that let the user configure the module. See imagetemplate.py for + # a template for visible_settings that you can cut and paste here. + # + def settings(self): + return [self.input_image_name, self.input_object_name, self.radial_degree] + + # + # CellProfiler calls "run" on each image set in your pipeline. + # + def run(self, workspace): + # + # Get the measurements object - we put the measurements we + # make in here + # + measurements = workspace.measurements + + # + # We record some statistics which we will display later. + # We format them so that Matplotlib can display them in a table. + # The first row is a header that tells what the fields are. + # + statistics = [["Feature", "Mean", "Median", "SD"]] + + # + # Put the statistics in the workspace display data so we + # can get at them when we display + # + workspace.display_data.statistics = statistics + + # + # Get the input image and object. You need to get the .value + # because otherwise you'll get the setting object instead of + # the string name. + # + input_image_name = self.input_image_name.value + input_object_name = self.input_object_name.value + + ################################################################ + # + # GETTING AN IMAGE FROM THE IMAGE SET + # + # Get the image set. The image set has all of the images in it. + # + image_set = workspace.image_set + # + # Get the input image object. We want a grayscale image here. + # The image set will convert a color image to a grayscale one + # and warn the user. + # + input_image = image_set.get_image(input_image_name, must_be_grayscale=True) + # + # Get the pixels - these are a 2-d Numpy array. + # + pixels = input_image.pixel_data + # + ############################################################### + + ############################################################### + # + # GETTING THE LABELS MATRIX FROM THE OBJECT SET + # + # The object set has all of the objects in it. + # + object_set = workspace.object_set + # + # Get objects from the object set. The most useful array in + # the objects is "objects.segmented" which is a labels matrix + # in which each pixel has an integer value. + # + # The value, "0", is reserved for "background" - a pixel with + # a zero value is not in an object. Each object has an object + # number, starting at "1" and each pixel in the object is + # labeled with that number. + # + # The other useful array is "objects.small_removed_segmented" which + # is another labels matrix. There are objects that touch the edge of + # the image and get filtered out and there are large objects that + # get filtered out. Modules like "IdentifySecondaryObjects" may + # want to associate pixels near the objects in the labels matrix to + # those objects - the large and touching objects should compete with + # the real ones, so you should use "objects.small_removed_segmented" + # for those cases. + # + objects = object_set.get_objects(input_object_name) + labels = objects.segmented + ############################################################### + + # + # The minimum enclosing circle (MEC) is the smallest circle that + # will fit around the object. We get the centers and radii of + # all of the objects at once. You'll see how that lets us + # compute the X and Y position of each pixel in a label all at + # one go. + # + # First, get an array that lists the whole range of indexes in + # the labels matrix. + # + indexes = objects.indices + # + # Then ask for the minimum_enclosing_circle for each object named + # in those indexes. MEC returns the i and j coordinate of the center + # and the radius of the circle and that defines the circle entirely. + # + centers, radius = centrosome.cpmorphology.minimum_enclosing_circle( + labels, indexes + ) + # + # The module computes a measurement based on the image intensity + # inside an object times a Zernike polynomial inscribed in the + # minimum enclosing circle around the object. The details are + # in the "measure_zernike" function. We call into the function with + # an N and M which describe the polynomial. + # + for n, m in self.get_zernike_indexes(): + # Compute the zernikes for each object, returned in an array + zr, zi = self.measure_zernike( + pixels, labels, indexes, centers, radius, n, m + ) + + # Get the name of the measurement feature for this zernike + feature = self.get_measurement_name(n, m) + + # Add a measurement for this kind of object + if m != 0: + measurements.add_measurement(input_object_name, feature, zr) + + # Do the same with -m + feature = self.get_measurement_name(n, -m) + measurements.add_measurement(input_object_name, feature, zi) + else: + # For zero, the total is the sum of real and imaginary parts + measurements.add_measurement(input_object_name, feature, zr + zi) + + # Record the statistics. + zmean = numpy.mean(zr) + zmedian = numpy.median(zr) + zsd = numpy.std(zr) + statistics.append([feature, zmean, zmedian, zsd]) + + # + # "display" lets you use matplotlib to display your results. + # + def display(self, workspace, figure): + statistics = workspace.display_data.statistics + + figure.set_subplots((1, 1)) + + figure.subplot_table(0, 0, statistics) + + def get_zernike_indexes(self, wants_negative=False): + """Get an N x 2 numpy array containing the M and N Zernike degrees + + Use the radial_degree setting to determine which Zernikes to do. + + wants_negative - if True, return both positive and negative M, if false + return only positive + """ + zi = centrosome.zernike.get_zernike_indexes(self.radial_degree.value + 1) + + if wants_negative: + # + # numpy.vstack means concatenate rows of two 2d arrays. + # The multiplication by [1, -1] negates every m, but preserves n. + # zi[zi[:, 1] != 0] picks out only elements with m not equal to zero. + # + zi = numpy.vstack([zi, zi[zi[:, 1] != 0] * numpy.array([1, -1])]) + + # + # Sort by azimuth degree and radial degree so they are ordered + # reasonably + # + order = numpy.lexsort((zi[:, 1], zi[:, 0])) + zi = zi[order, :] + + return zi + + # + # "measure_zernike" makes one Zernike measurement on each object + # + def measure_zernike(self, pixels, labels, indexes, centers, radius, n, m): + """Measure the intensity of the image with Zernike (N, M) + + pixels - the intensity image to be measured + labels - the labels matrix that labels each object with an integer + indexes - the label #s in the image + centers - the centers of the minimum enclosing circle for each object + radius - the radius of the minimum enclosing circle for each object + n, m - the Zernike coefficients. + + See http://en.wikipedia.org/wiki/Zernike_polynomials for an + explanation of the Zernike polynomials + """ + # + # The strategy here is to operate on the whole array instead + # of operating on one object at a time. The most important thing + # is to avoid having to run the Python interpreter once per pixel + # in the image and the second most important is to avoid running + # it per object in case there are hundreds of objects. + # + # We play lots of indexing tricks here to operate on the whole image. + # I'll try to explain some - hopefully, you can reuse. + center_x = centers[:, 1] + center_y = centers[:, 0] + + # + # Make up fake values for 0 (the background). This lets us do our + # indexing tricks. Really, we're going to ignore the background, + # but we want to do the indexing without ignoring the background + # because that's easier. + # + center_x = numpy.hstack([[0], center_x]) + center_y = numpy.hstack([[0], center_y]) + radius = numpy.hstack([[1], radius]) + + # + # Now get one array that's the y coordinate of each pixel and one + # that's the x coordinate. This might look stupid and wasteful, + # but these "arrays" are never actually realized and made into + # real memory. + # + # Using 0.0:X creates floating point indices. This makes the + # data type of x, y consistent with center_x, center_y and + # raidus. Numpy requires consistent data types for in-place + # operations like -= and /=. + # + y, x = numpy.mgrid[0.0 : labels.shape[0], 0.0 : labels.shape[1]] + + # + # Get the x and y coordinates relative to the object centers. + # This uses Numpy broadcasting. For each pixel, we use the + # value in the labels matrix as an index into the appropriate + # one-dimensional array. So we get the value for that object. + # + y -= center_y[labels] + x -= center_x[labels] + + # + # Zernikes take x and y values from zero to one. We scale the + # integer coordinate values by dividing them by the radius of + # the circle. Again, we use the indexing trick to look up the + # values for each object. + # + y /= radius[labels] + x /= radius[labels] + + # + # Now we can get Zernike polynomials per-pixel where each pixel + # value is calculated according to its object's MEC. + # + # We use a mask of all of the non-zero labels so the calculation + # runs a little faster. + # + zernike_polynomial = centrosome.zernike.construct_zernike_polynomials( + x, y, numpy.array([[n, m]]), labels > 0 + ) + + # + # For historical reasons, centrosome didn't multiply by the per/zernike + # normalizing factor: 2*n + 2 / E / pi where E is 2 if m is zero and 1 + # if m is one. We do it here to aid with the reconstruction + # + zernike_polynomial *= (2 * n + 2) / (2 if m == 0 else 1) / numpy.pi + + # + # Multiply the Zernike polynomial by the image to dissect + # the image by the Zernike basis set. + # + output_pixels = pixels * zernike_polynomial[:, :, 0] + + # + # The sum function calculates the sum of the pixel values for + # each pixel in an object, using the labels matrix to name + # the pixels in an object + # + zr = scipy.ndimage.sum(output_pixels.real, labels, indexes) + zi = scipy.ndimage.sum(output_pixels.imag, labels, indexes) + + return zr, zi + + # + # Here, we go about naming the measurements. + # + # Measurement names have parts to them, separated by underbars. + # There's always a category and a feature name + # and sometimes there are modifiers such as the image that + # was measured or the scale at which it was measured. + # + # We have functions that build the names so that we can + # use the same functions in different places. + # + def get_feature_name(self, n, m): + """Return a measurement feature name for the given Zernike""" + # + # Something nice and simple for a name... Intensity_DNA_N4M2 for instance + # + if m >= 0: + return "Intensity_%s_N%dM%d" % (self.input_image_name.value, n, m) + + return "Intensity_%s_N%dMM%d" % (self.input_image_name.value, n, -m) + + def get_measurement_name(self, n, m): + """Return the whole measurement name""" + input_image_name = self.input_image_name.value + + return "_".join([C_MEASUREMENT_TEMPLATE, self.get_feature_name(n, m)]) + + # + # We have to tell CellProfiler about the measurements we produce. + # There are two parts: one that is for database-type modules and one + # that is for the UI. The first part gives a comprehensive list + # of measurement columns produced. The second is more informal and + # tells CellProfiler how to categorize its measurements. + # + # "get_measurement_columns" gets the measurements for use in the database + # or in a spreadsheet. Some modules need this because they + # might make measurements of measurements and need those names. + # + def get_measurement_columns(self, pipeline): + # + # We use a list comprehension here. + # See http://docs.python.org/tutorial/datastructures.html#list-comprehensions + # for how this works. + # + # The first thing in the list is the object being measured. If it's + # the whole image, use IMAGE as the name. + # + # The second thing is the measurement name. + # + # The third thing is the column type. See the COLTYPE constants + # in measurement.py for what you can use + # + input_object_name = self.input_object_name.value + + return [ + (input_object_name, self.get_measurement_name(n, m), COLTYPE_FLOAT,) + for n, m in self.get_zernike_indexes(True) + ] + + # + # "get_categories" returns a list of the measurement categories produced + # by this module. It takes an object name - only return categories + # if the name matches. + # + def get_categories(self, pipeline, object_name): + if object_name == self.input_object_name: + return [C_MEASUREMENT_TEMPLATE] + + return [] + + # + # Return the feature names if the object_name and category match + # + def get_measurements(self, pipeline, object_name, category): + if object_name == self.input_object_name and category == C_MEASUREMENT_TEMPLATE: + return ["Intensity"] + + return [] + + # + # This module makes per-image measurements. That means we need + # "get_measurement_images" to distinguish measurements made on two + # different images by this module + # + def get_measurement_images(self, pipeline, object_name, category, measurement): + # + # This might seem wasteful, but UI code can be slow. Just see + # if the measurement is in the list returned by get_measurements + # + if measurement in self.get_measurements(pipeline, object_name, category): + return [self.input_image_name.value] + + return [] + + def get_measurement_scales( + self, pipeline, object_name, category, measurement, image_name + ): + """Get the scales for a measurement + + For the Zernikes, the scales are of the form, N2M2 or N2MM2 for + negative azimuthal degree + """ + + def get_scale(n, m): + if m >= 0: + return "N%dM%d" % (n, m) + + return "N%dMM%d" % (n, -m) + + if image_name in self.get_measurement_images( + pipeline, object_name, category, measurement + ): + return [get_scale(n, m) for n, m in self.get_zernike_indexes(True)] + + return [] + + @staticmethod + def get_image_from_features(radius, feature_dictionary): + """Reconstruct the intensity image from the zernike features + + radius - the radius of the minimum enclosing circle + + feature_dictionary - keys are (n, m) tuples and values are the + magnitudes. + + returns a greyscale image based on the feature dictionary. + """ + i, j = ( + numpy.mgrid[-radius : (radius + 1), -radius : (radius + 1)].astype(float) + / radius + ) + mask = (i * i + j * j) <= 1 + + zernike_indexes = numpy.array(list(feature_dictionary.keys())) + zernike_features = numpy.array(list(feature_dictionary.values())) + + z = centrosome.zernike.construct_zernike_polynomials( + j, i, numpy.abs(zernike_indexes), mask=mask + ) + zn = ( + (2 * zernike_indexes[:, 0] + 2) + / ((zernike_indexes[:, 1] == 0) + 1) + / numpy.pi + ) + z *= zn[numpy.newaxis, numpy.newaxis, :] + z = ( + z.real * (zernike_indexes[:, 1] >= 0)[numpy.newaxis, numpy.newaxis, :] + + z.imag * (zernike_indexes[:, 1] <= 0)[numpy.newaxis, numpy.newaxis, :] + ) + + return numpy.sum(z * zernike_features[numpy.newaxis, numpy.newaxis, :], 2) \ No newline at end of file From 4e673ff23b75f14601b6d375d0200cea02ae7f47 Mon Sep 17 00:00:00 2001 From: emiglietta Date: Sun, 18 Aug 2024 15:36:05 -0400 Subject: [PATCH 02/20] load all necessary parts Now, I have to create the for loop function. I deleted of commented all parts having to do with other measurements (including per image measurements) and also removed if statements, assuming here wants_obects is always True and perform_RWC is always True --- active_plugins/measureRWCperObject.py | 891 +++++++++++++------------- 1 file changed, 448 insertions(+), 443 deletions(-) diff --git a/active_plugins/measureRWCperObject.py b/active_plugins/measureRWCperObject.py index 8488b678..f2be6911 100644 --- a/active_plugins/measureRWCperObject.py +++ b/active_plugins/measureRWCperObject.py @@ -15,120 +15,12 @@ # ################################## -# Reference -# -# If you're using a technique or method that's used in this module -# and has a publication attached to it, please include the paper link below. -# Otherwise, remove the line below and remove the "References" section from __doc__. -# - -cite_paper_link = "https://doi.org/10.1016/1047-3203(90)90014-M" __doc__ = """\ -MeasurementTemplate +MeasureRWCperObject =================== -**MeasurementTemplate** - an example measurement module. It's recommended to -put a brief description of this module here and go into more detail below. - -This is an example of a module that measures a property of an image both -for the image as a whole and for every object in the image. It -demonstrates how to load an image, how to load an object and how to -record a measurement. - -The text you see here will be displayed as the help for your module, formatted -as `reStructuredText `_. - -Note whether or not this module supports 3D image data and respects masks. -A module which respects masks applies an image's mask and operates only on -the image data not obscured by the mask. Update the table below to indicate -which image processing features this module supports. - -| - -============ ============ =============== -Supports 2D? Supports 3D? Respects masks? -============ ============ =============== -YES NO YES -============ ============ =============== - -See also -^^^^^^^^ - -Is there another **Module** that is related to this one? If so, refer -to that **Module** in this section. Otherwise, this section can be omitted. - -What do I need as input? -^^^^^^^^^^^^^^^^^^^^^^^^ - -Are there any assumptions about input data someone using this module -should be made aware of? For example, is there a strict requirement that -image data be single-channel, or that the foreground is brighter than -the background? Describe any assumptions here. - -This section can be omitted if there is no requirement on the input. - -What do I get as output? -^^^^^^^^^^^^^^^^^^^^^^^^ - -Describe the output of this module. This is necessary if the output is -more complex than a single image. For example, if there is data displayed -over the image then describe what the data represents. - -This section can be omitted if there is no specialized output. - -Measurements made by this module -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Describe the measurements made by this module. Typically, measurements -are described in the following format: - -**Measurement category:** - -- *MeasurementName*: A brief description of the measurement. -- *MeasurementName*: A brief description of the measurement. - -**Measurement category:** - -- *MeasurementName*: A brief description of the measurement. -- *MeasurementName*: A brief description of the measurement. -This module makes the following measurements: - -**MT** (the MeasurementTemplate category): - -- *Intensity_[IMAGE_NAME]_N[Ni]_M[Mj]*: the Zernike feature of the - IMAGE_NAME image with radial degree Ni and Azimuthal degree Mj, - Mj >= 0. -- *Intensity_[IMAGE_NAME]_N[Ni]_MM[Mj]*: the Zernike feature of - the IMAGE_NAME image with radial degree Ni and Azimuthal degree - Mj, Mj < 0. - -Technical notes -^^^^^^^^^^^^^^^ - -Include implementation details or notes here. Additionally provide any -other background information about this module, including definitions -or adopted conventions. Information which may be too specific to fit into -the general description should be provided here. - -Omit this section if there is no technical information to mention. - -The Zernike features measured here are themselves interesting. You can -reconstruct the image of a cell, approximately, by constructing the Zernike -functions on a unit circle, multiplying the real parts by the corresponding -features for positive M, multiplying the imaginary parts by the corresponding -features for negative M and adding real and imaginary parts. - -References -^^^^^^^^^^ - -Provide citations here, if appropriate. Citations are formatted as a list and, -wherever possible, include a link to the original work. For example, - -- Meyer F, Beucher S (1990) “Morphological segmentation.” *J Visual - Communication and Image Representation* 1, 21-46. - {cite_paper_link} """ # @@ -146,382 +38,495 @@ """This is the measurement template category""" C_MEASUREMENT_TEMPLATE = "MT" +M_PER_OBJECT = "Within each object individually" + +"""Feature name format for the RWC Coefficient measurement""" +F_RWC_FORMAT = "Correlation_RWC_%s_%s" -# -# The module class -# -# Your module should "inherit" from cellprofiler_core.module.Module. -# This means that your module will use the methods from Module unless -# you re-implement them. You can let Module do most of the work and -# implement only what you need. -# class MeasurementTemplate(Module): - # - # The module starts by declaring the name that's used for display, - # the category under which it is stored and the variable revision - # number which can be used to provide backwards compatibility if - # you add user-interface functionality later. - # - module_name = "MeasurementTemplate" + module_name = "MeasureRWCperObject" category = "Measurement" variable_revision_number = 1 - # - # Citation - # - # If you're using a technique or method that's used in this module - # and has a publication attached to it, please include the paper link below. - # Edit accordingly and add the link for the paper as "https://doi.org/XXXX". - # If no citation is necessary, remove the "doi" dictionary below. - # - doi = {"Please cite the following when using ImageTemplate:": 'https://doi.org/10.1016/1047-3203(90)90014-M', - "If you're also using specific technique X, cite:": 'https://doi.org/10.1016/1047-3203(90)90014-M'} - # - # "create_settings" is where you declare the user interface elements - # (the "settings") which the user will use to customize your module. - # - # You can look at other modules and in cellprofiler_core.settings for - # settings you can use. - # def create_settings(self): - # - # The ImageNameSubscriber "subscribes" to all ImageNameProviders in - # prior modules. Modules before yours will put images into CellProfiler. - # The ImageSubscriber gives your user a list of these images - # which can then be used as inputs in your module. - # - self.input_image_name = ImageSubscriber( - # The text to the left of the edit box - text="Input image name:", - # reST help that gets displayed when the user presses the - # help button to the right of the edit box. - doc="""\ -This is the image that the module operates on. You can choose any image -that is made available by a prior module. + """Create the initial settings for the module""" -**MeasurementTemplate** will measure something about this image. -""", + self.images_list = ImageListSubscriber( + "Select images to measure", + [], + doc="""Select images to measure the correlation/colocalization in.""", ) - # - # The ObjectNameSubscriber is similar to the ImageNameSubscriber. - # It will ask the user which object to pick from the list of - # objects provided by upstream modules. - # self.input_object_name = LabelSubscriber( text="Input object name", doc="These are the objects that the module operates on.", ) - # - # The radial degree is the "N" parameter in the Zernike - how many - # inflection points there are, radiating out from the center. Higher - # N means more features and a more detailed description - # - # The setting is an integer setting, bounded between 1 and 50. - # N = 50 generates 1200 features! - # - self.radial_degree = Integer( - text="Radial degree", - value=10, - minval=1, - maxval=50, + self.objects_list = LabelListSubscriber( + "Select objects to measure", + [], doc="""\ -Calculate all Zernike features up to the given radial -degree. The Zernike function is parameterized by a radial -and azimuthal degree. The module will calculate all Zernike -features for all azimuthal degrees up to and including the -radial degree you enter here. -""", +Select the object to be measured.""", ) - # - # The "settings" method tells CellProfiler about the settings you - # have in your module. CellProfiler uses the list for saving - # and restoring values for your module when it saves or loads a - # pipeline file. - # - # This module does not have a "visible_settings" method. CellProfiler - # will use "settings" to make the list of user-interface elements - # that let the user configure the module. See imagetemplate.py for - # a template for visible_settings that you can cut and paste here. - # - def settings(self): - return [self.input_image_name, self.input_object_name, self.radial_degree] - - # - # CellProfiler calls "run" on each image set in your pipeline. - # - def run(self, workspace): - # - # Get the measurements object - we put the measurements we - # make in here - # - measurements = workspace.measurements - - # - # We record some statistics which we will display later. - # We format them so that Matplotlib can display them in a table. - # The first row is a header that tells what the fields are. - # - statistics = [["Feature", "Mean", "Median", "SD"]] - - # - # Put the statistics in the workspace display data so we - # can get at them when we display - # - workspace.display_data.statistics = statistics - - # - # Get the input image and object. You need to get the .value - # because otherwise you'll get the setting object instead of - # the string name. - # - input_image_name = self.input_image_name.value - input_object_name = self.input_object_name.value - - ################################################################ - # - # GETTING AN IMAGE FROM THE IMAGE SET - # - # Get the image set. The image set has all of the images in it. - # - image_set = workspace.image_set - # - # Get the input image object. We want a grayscale image here. - # The image set will convert a color image to a grayscale one - # and warn the user. - # - input_image = image_set.get_image(input_image_name, must_be_grayscale=True) - # - # Get the pixels - these are a 2-d Numpy array. - # - pixels = input_image.pixel_data - # - ############################################################### - - ############################################################### - # - # GETTING THE LABELS MATRIX FROM THE OBJECT SET - # - # The object set has all of the objects in it. - # - object_set = workspace.object_set - # - # Get objects from the object set. The most useful array in - # the objects is "objects.segmented" which is a labels matrix - # in which each pixel has an integer value. - # - # The value, "0", is reserved for "background" - a pixel with - # a zero value is not in an object. Each object has an object - # number, starting at "1" and each pixel in the object is - # labeled with that number. - # - # The other useful array is "objects.small_removed_segmented" which - # is another labels matrix. There are objects that touch the edge of - # the image and get filtered out and there are large objects that - # get filtered out. Modules like "IdentifySecondaryObjects" may - # want to associate pixels near the objects in the labels matrix to - # those objects - the large and touching objects should compete with - # the real ones, so you should use "objects.small_removed_segmented" - # for those cases. - # - objects = object_set.get_objects(input_object_name) - labels = objects.segmented - ############################################################### + self.thr = Float( + "Set threshold as percentage of maximum intensity for the images", + 15, + minval=0, + maxval=99, + doc="""\ +You may choose to measure colocalization metrics only for those pixels above +a certain threshold. Select the threshold as a percentage of the maximum intensity +of the above image [0-99]. - # - # The minimum enclosing circle (MEC) is the smallest circle that - # will fit around the object. We get the centers and radii of - # all of the objects at once. You'll see how that lets us - # compute the X and Y position of each pixel in a label all at - # one go. - # - # First, get an array that lists the whole range of indexes in - # the labels matrix. - # - indexes = objects.indices - # - # Then ask for the minimum_enclosing_circle for each object named - # in those indexes. MEC returns the i and j coordinate of the center - # and the radius of the circle and that defines the circle entirely. - # - centers, radius = centrosome.cpmorphology.minimum_enclosing_circle( - labels, indexes +This value is used by the Overlap, Manders, and Rank Weighted Colocalization +measurements. +""", ) - # - # The module computes a measurement based on the image intensity - # inside an object times a Zernike polynomial inscribed in the - # minimum enclosing circle around the object. The details are - # in the "measure_zernike" function. We call into the function with - # an N and M which describe the polynomial. - # - for n, m in self.get_zernike_indexes(): - # Compute the zernikes for each object, returned in an array - zr, zi = self.measure_zernike( - pixels, labels, indexes, centers, radius, n, m - ) - # Get the name of the measurement feature for this zernike - feature = self.get_measurement_name(n, m) + self.do_rwc = True - # Add a measurement for this kind of object - if m != 0: - measurements.add_measurement(input_object_name, feature, zr) + self.spacer = Divider(line=True) - # Do the same with -m - feature = self.get_measurement_name(n, -m) - measurements.add_measurement(input_object_name, feature, zi) - else: - # For zero, the total is the sum of real and imaginary parts - measurements.add_measurement(input_object_name, feature, zr + zi) + def settings(self): + """Return the settings to be saved in the pipeline""" + result = [ + self.images_list, + self.thr, + # self.images_or_objects, + self.objects_list, + # self.do_all, + # self.do_corr_and_slope, + # self.do_manders, + self.do_rwc, + # self.do_overlap, + # self.do_costes, + # self.fast_costes, + ] + return result + + def visible_settings(self): + result = [ + self.images_list, + self.spacer, + self.thr, + self.do_rwc, + # self.images_or_objects, + ] + return result + + def help_settings(self): + """Return the settings to be displayed in the help menu""" + help_settings = [ + # self.images_or_objects, + self.thr, + self.images_list, + self.objects_list, + # self.do_all, + # self.fast_costes, + ] + return help_settings + + def get_image_pairs(self): + """Yield all permutations of pairs of images to correlate - # Record the statistics. - zmean = numpy.mean(zr) - zmedian = numpy.median(zr) - zsd = numpy.std(zr) - statistics.append([feature, zmean, zmedian, zsd]) + Yields the pairs of images in a canonical order. + """ + for i in range(len(self.images_list.value) - 1): + for j in range(i + 1, len(self.images_list.value)): + yield ( + self.images_list.value[i], + self.images_list.value[j], + ) + + + def run(self, workspace): + """Calculate measurements on an image set""" + col_labels = ["First image", "Second image", "Objects", "Measurement", "Value"] + statistics = [] + if len(self.images_list.value) < 2: + raise ValueError("At least 2 images must be selected for analysis.") + for first_image_name, second_image_name in self.get_image_pairs(): + for object_name in self.objects_list.value: + statistics += self.run_image_pair_objects( + workspace, first_image_name, second_image_name, object_name + ) + if self.show_window: + workspace.display_data.statistics = statistics + workspace.display_data.col_labels = col_labels - # - # "display" lets you use matplotlib to display your results. - # def display(self, workspace, figure): statistics = workspace.display_data.statistics - + helptext = "default" figure.set_subplots((1, 1)) + figure.subplot_table( + 0, 0, statistics, workspace.display_data.col_labels, title=helptext + ) + + def run_image_pair_objects( + self, workspace, first_image_name, second_image_name, object_name + ): + """Calculate per-object correlations between intensities in two images""" + first_image = workspace.image_set.get_image( + first_image_name, must_be_grayscale=True + ) + second_image = workspace.image_set.get_image( + second_image_name, must_be_grayscale=True + ) + objects = workspace.object_set.get_objects(object_name) + # + # Crop both images to the size of the labels matrix + # + labels = objects.segmented + try: + first_pixels = objects.crop_image_similarly(first_image.pixel_data) + first_mask = objects.crop_image_similarly(first_image.mask) + except ValueError: + first_pixels, m1 = size_similarly(labels, first_image.pixel_data) + first_mask, m1 = size_similarly(labels, first_image.mask) + first_mask[~m1] = False + try: + second_pixels = objects.crop_image_similarly(second_image.pixel_data) + second_mask = objects.crop_image_similarly(second_image.mask) + except ValueError: + second_pixels, m1 = size_similarly(labels, second_image.pixel_data) + second_mask, m1 = size_similarly(labels, second_image.mask) + second_mask[~m1] = False + mask = (labels > 0) & first_mask & second_mask + first_pixels = first_pixels[mask] + second_pixels = second_pixels[mask] + labels = labels[mask] + result = [] + first_pixel_data = first_image.pixel_data + first_mask = first_image.mask + first_pixel_count = numpy.product(first_pixel_data.shape) + second_pixel_data = second_image.pixel_data + second_mask = second_image.mask + second_pixel_count = numpy.product(second_pixel_data.shape) + # + # Crop the larger image similarly to the smaller one + # + if first_pixel_count < second_pixel_count: + second_pixel_data = first_image.crop_image_similarly(second_pixel_data) + second_mask = first_image.crop_image_similarly(second_mask) + elif second_pixel_count < first_pixel_count: + first_pixel_data = second_image.crop_image_similarly(first_pixel_data) + first_mask = second_image.crop_image_similarly(first_mask) + mask = ( + first_mask + & second_mask + & (~numpy.isnan(first_pixel_data)) + & (~numpy.isnan(second_pixel_data)) + ) + if numpy.any(mask): + fi = first_pixel_data[mask] + si = second_pixel_data[mask] + + n_objects = objects.count + # Handle case when both images for the correlation are completely masked out + + if n_objects == 0: + # corr = numpy.zeros((0,)) + # overlap = numpy.zeros((0,)) + # K1 = numpy.zeros((0,)) + # K2 = numpy.zeros((0,)) + # M1 = numpy.zeros((0,)) + # M2 = numpy.zeros((0,)) + RWC1 = numpy.zeros((0,)) + RWC2 = numpy.zeros((0,)) + # C1 = numpy.zeros((0,)) + # C2 = numpy.zeros((0,)) + elif numpy.where(mask)[0].__len__() == 0: + corr = numpy.zeros((n_objects,)) + corr[:] = numpy.NaN + # overlap = K1 = K2 = M1 = M2 = RWC1 = RWC2 = C1 = C2 = corr + RWC1 = RWC2 = corr + + else: + lrange = numpy.arange(n_objects, dtype=numpy.int32) + 1 + + # RWC Coefficient + RWC1 = numpy.zeros(len(lrange)) + RWC2 = numpy.zeros(len(lrange)) + for label in labels: + # set first_pixels to only what's inside that label and rename + # same with second_pixels to only what's inside that label and rename + # same with labels to only what's inside that label and rename + # same with lrange to only what's inside that label and rename + # same with fi_thresh, si_thresh, combined_thresh, tot_fi_thr, tot_si_thr + # - move the 770 block inside this function after subsettingfirst_pixels and second_pixels + + [Rank1] = numpy.lexsort(([labels], [first_pixels])) + [Rank2] = numpy.lexsort(([labels], [second_pixels])) + Rank1_U = numpy.hstack( + [[False], first_pixels[Rank1[:-1]] != first_pixels[Rank1[1:]]] + ) + Rank2_U = numpy.hstack( + [[False], second_pixels[Rank2[:-1]] != second_pixels[Rank2[1:]]] + ) + Rank1_S = numpy.cumsum(Rank1_U) + Rank2_S = numpy.cumsum(Rank2_U) + Rank_im1 = numpy.zeros(first_pixels.shape, dtype=int) + Rank_im2 = numpy.zeros(second_pixels.shape, dtype=int) + Rank_im1[Rank1] = Rank1_S + Rank_im2[Rank2] = Rank2_S + + R = max(Rank_im1.max(), Rank_im2.max()) + 1 + Di = abs(Rank_im1 - Rank_im2) + weight = (R - Di) * 1.0 / R + weight_thresh = weight[combined_thresh] + + if numpy.any(combined_thresh): + RWC1 = numpy.array( + scipy.ndimage.sum( + fi_thresh * weight_thresh, labels[combined_thresh], lrange + ) + ) / numpy.array(tot_fi_thr) + RWC2 = numpy.array( + scipy.ndimage.sum( + si_thresh * weight_thresh, labels[combined_thresh], lrange + ) + ) / numpy.array(tot_si_thr) + + # Threshold as percentage of maximum intensity of objects in each channel + tff = (self.thr.value / 100) * fix( + scipy.ndimage.maximum(first_pixels, labels, lrange) + ) + tss = (self.thr.value / 100) * fix( + scipy.ndimage.maximum(second_pixels, labels, lrange) + ) + + combined_thresh = (first_pixels >= tff[labels - 1]) & ( + second_pixels >= tss[labels - 1] + ) + fi_thresh = first_pixels[combined_thresh] + si_thresh = second_pixels[combined_thresh] + tot_fi_thr = scipy.ndimage.sum( + first_pixels[first_pixels >= tff[labels - 1]], + labels[first_pixels >= tff[labels - 1]], + lrange, + ) + tot_si_thr = scipy.ndimage.sum( + second_pixels[second_pixels >= tss[labels - 1]], + labels[second_pixels >= tss[labels - 1]], + lrange, + ) + + result += [ + [ + first_image_name, + second_image_name, + object_name, + "Mean RWC coeff", + "%.3f" % numpy.mean(RWC1), + ], + [ + first_image_name, + second_image_name, + object_name, + "Median RWC coeff", + "%.3f" % numpy.median(RWC1), + ], + [ + first_image_name, + second_image_name, + object_name, + "Min RWC coeff", + "%.3f" % numpy.min(RWC1), + ], + [ + first_image_name, + second_image_name, + object_name, + "Max RWC coeff", + "%.3f" % numpy.max(RWC1), + ], + ] + result += [ + [ + second_image_name, + first_image_name, + object_name, + "Mean RWC coeff", + "%.3f" % numpy.mean(RWC2), + ], + [ + second_image_name, + first_image_name, + object_name, + "Median RWC coeff", + "%.3f" % numpy.median(RWC2), + ], + [ + second_image_name, + first_image_name, + object_name, + "Min RWC coeff", + "%.3f" % numpy.min(RWC2), + ], + [ + second_image_name, + first_image_name, + object_name, + "Max RWC coeff", + "%.3f" % numpy.max(RWC2), + ], + ] + + rwc_measurement_1 = F_RWC_FORMAT % (first_image_name, second_image_name) + rwc_measurement_2 = F_RWC_FORMAT % (second_image_name, first_image_name) + workspace.measurements.add_measurement(object_name, rwc_measurement_1, RWC1) + workspace.measurements.add_measurement(object_name, rwc_measurement_2, RWC2) + + if n_objects == 0: + return [ + [ + first_image_name, + second_image_name, + object_name, + "Mean correlation", + "-", + ], + [ + first_image_name, + second_image_name, + object_name, + "Median correlation", + "-", + ], + [ + first_image_name, + second_image_name, + object_name, + "Min correlation", + "-", + ], + [ + first_image_name, + second_image_name, + object_name, + "Max correlation", + "-", + ], + ] + else: + return result + + def get_measurement_columns(self, pipeline): + """Return column definitions for all measurements made by this module""" + columns = [] + for first_image, second_image in self.get_image_pairs(): + for i in range(len(self.objects_list.value)): + object_name = self.objects_list.value[i] + if self.do_rwc: + columns += [ + ( + object_name, + F_RWC_FORMAT % (first_image, second_image), + COLTYPE_FLOAT, + ), + ( + object_name, + F_RWC_FORMAT % (second_image, first_image), + COLTYPE_FLOAT, + ), + ] + return columns - figure.subplot_table(0, 0, statistics) - - def get_zernike_indexes(self, wants_negative=False): - """Get an N x 2 numpy array containing the M and N Zernike degrees - - Use the radial_degree setting to determine which Zernikes to do. + def get_categories(self, pipeline, object_name): + """Return the categories supported by this module for the given object - wants_negative - if True, return both positive and negative M, if false - return only positive + object_name - name of the measured object or IMAGE """ - zi = centrosome.zernike.get_zernike_indexes(self.radial_degree.value + 1) + return ["Correlation"] - if wants_negative: - # - # numpy.vstack means concatenate rows of two 2d arrays. - # The multiplication by [1, -1] negates every m, but preserves n. - # zi[zi[:, 1] != 0] picks out only elements with m not equal to zero. - # - zi = numpy.vstack([zi, zi[zi[:, 1] != 0] * numpy.array([1, -1])]) - - # - # Sort by azimuth degree and radial degree so they are ordered - # reasonably - # - order = numpy.lexsort((zi[:, 1], zi[:, 0])) - zi = zi[order, :] - - return zi + def get_measurements(self, pipeline, object_name, category): + if self.get_categories(pipeline, object_name) == [category]: + results = [] + results += ["RWC"] + return results + return [] - # - # "measure_zernike" makes one Zernike measurement on each object - # - def measure_zernike(self, pixels, labels, indexes, centers, radius, n, m): - """Measure the intensity of the image with Zernike (N, M) - - pixels - the intensity image to be measured - labels - the labels matrix that labels each object with an integer - indexes - the label #s in the image - centers - the centers of the minimum enclosing circle for each object - radius - the radius of the minimum enclosing circle for each object - n, m - the Zernike coefficients. - - See http://en.wikipedia.org/wiki/Zernike_polynomials for an - explanation of the Zernike polynomials - """ - # - # The strategy here is to operate on the whole array instead - # of operating on one object at a time. The most important thing - # is to avoid having to run the Python interpreter once per pixel - # in the image and the second most important is to avoid running - # it per object in case there are hundreds of objects. - # - # We play lots of indexing tricks here to operate on the whole image. - # I'll try to explain some - hopefully, you can reuse. - center_x = centers[:, 1] - center_y = centers[:, 0] + def get_measurement_images(self, pipeline, object_name, category, measurement): + """Return the joined pairs of images measured""" + result = [] + if measurement in self.get_measurements(pipeline, object_name, category): + for i1, i2 in self.get_image_pairs(): + result.append("%s_%s" % (i1, i2)) + # For asymmetric, return both orderings + if measurement in ("K", "Manders", "RWC", "Costes"): + result.append("%s_%s" % (i2, i1)) + return result + + def validate_module(self, pipeline): + """Make sure chosen objects are selected only once""" + if len(self.images_list.value) < 2: + raise ValidationError("This module needs at least 2 images to be selected", self.images_list) + + if len(self.objects_list.value) == 0: + raise ValidationError("No object sets selected", self.objects_list) + + def upgrade_settings(self, setting_values, variable_revision_number, module_name): + """Adjust the setting values for pipelines saved under old revisions""" + if variable_revision_number < 2: + raise NotImplementedError( + "Automatic upgrade for this module is not supported in CellProfiler 3." + ) - # - # Make up fake values for 0 (the background). This lets us do our - # indexing tricks. Really, we're going to ignore the background, - # but we want to do the indexing without ignoring the background - # because that's easier. - # - center_x = numpy.hstack([[0], center_x]) - center_y = numpy.hstack([[0], center_y]) - radius = numpy.hstack([[1], radius]) + if variable_revision_number == 2: + image_count = int(setting_values[0]) + idx_thr = image_count + 2 + setting_values = ( + setting_values[:idx_thr] + ["15.0"] + setting_values[idx_thr:] + ) + variable_revision_number = 3 + + if variable_revision_number == 3: + num_images = int(setting_values[0]) + num_objects = int(setting_values[1]) + div_img = 2 + num_images + div_obj = div_img + 2 + num_objects + images_set = set(setting_values[2:div_img]) + thr_mode = setting_values[div_img : div_img + 2] + objects_set = set(setting_values[div_img + 2 : div_obj]) + other_settings = setting_values[div_obj:] + if "None" in images_set: + images_set.remove("None") + if "None" in objects_set: + objects_set.remove("None") + images_string = ", ".join(map(str, images_set)) + objects_string = ", ".join(map(str, objects_set)) + setting_values = ( + [images_string] + thr_mode + [objects_string] + other_settings + ) + variable_revision_number = 4 + if variable_revision_number == 4: + # Add costes mode switch + setting_values += [M_FASTER] + variable_revision_number = 5 + return setting_values, variable_revision_number - # - # Now get one array that's the y coordinate of each pixel and one - # that's the x coordinate. This might look stupid and wasteful, - # but these "arrays" are never actually realized and made into - # real memory. - # - # Using 0.0:X creates floating point indices. This makes the - # data type of x, y consistent with center_x, center_y and - # raidus. Numpy requires consistent data types for in-place - # operations like -= and /=. - # - y, x = numpy.mgrid[0.0 : labels.shape[0], 0.0 : labels.shape[1]] + def volumetric(self): + return True - # - # Get the x and y coordinates relative to the object centers. - # This uses Numpy broadcasting. For each pixel, we use the - # value in the labels matrix as an index into the appropriate - # one-dimensional array. So we get the value for that object. - # - y -= center_y[labels] - x -= center_x[labels] - # - # Zernikes take x and y values from zero to one. We scale the - # integer coordinate values by dividing them by the radius of - # the circle. Again, we use the indexing trick to look up the - # values for each object. - # - y /= radius[labels] - x /= radius[labels] +def get_scale(scale_1, scale_2): + if scale_1 is not None and scale_2 is not None: + return max(scale_1, scale_2) + elif scale_1 is not None: + return scale_1 + elif scale_2 is not None: + return scale_2 + else: + return 255 + - # - # Now we can get Zernike polynomials per-pixel where each pixel - # value is calculated according to its object's MEC. - # - # We use a mask of all of the non-zero labels so the calculation - # runs a little faster. - # - zernike_polynomial = centrosome.zernike.construct_zernike_polynomials( - x, y, numpy.array([[n, m]]), labels > 0 - ) - # - # For historical reasons, centrosome didn't multiply by the per/zernike - # normalizing factor: 2*n + 2 / E / pi where E is 2 if m is zero and 1 - # if m is one. We do it here to aid with the reconstruction - # - zernike_polynomial *= (2 * n + 2) / (2 if m == 0 else 1) / numpy.pi - # - # Multiply the Zernike polynomial by the image to dissect - # the image by the Zernike basis set. - # - output_pixels = pixels * zernike_polynomial[:, :, 0] - # - # The sum function calculates the sum of the pixel values for - # each pixel in an object, using the labels matrix to name - # the pixels in an object - # - zr = scipy.ndimage.sum(output_pixels.real, labels, indexes) - zi = scipy.ndimage.sum(output_pixels.imag, labels, indexes) - return zr, zi + # # Here, we go about naming the measurements. From 280b6cdc207cddf771a3128fd8140f7b2dbb20ac Mon Sep 17 00:00:00 2001 From: emiglietta Date: Mon, 19 Aug 2024 11:33:44 -0400 Subject: [PATCH 03/20] add debug configuration --- .vscode/launch.json | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .vscode/launch.json diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000..e7cf8f0d --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,28 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Python Debugger: Current File", + "type": "debugpy", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + }, + { + "name": "Cellprofiler", + "request": "launch", + "type": "debugpy", + "python": "/Users/emigliet/anaconda3/envs/cellprofiler_426/bin/pythonw", + "justMyCode": false, + "subProcess": true, + "module": "cellprofiler", + "args": [ + "-L", + "10", + ], + }, + ] +} \ No newline at end of file From 9469c6f0fb7474918ecdbb2503465a10fac297bf Mon Sep 17 00:00:00 2001 From: emiglietta Date: Mon, 19 Aug 2024 12:37:42 -0400 Subject: [PATCH 04/20] remove the remainder of the sample plugin --- active_plugins/measureRWCperObject.py | 157 -------------------------- 1 file changed, 157 deletions(-) diff --git a/active_plugins/measureRWCperObject.py b/active_plugins/measureRWCperObject.py index f2be6911..853dd8ee 100644 --- a/active_plugins/measureRWCperObject.py +++ b/active_plugins/measureRWCperObject.py @@ -522,160 +522,3 @@ def get_scale(scale_1, scale_2): return 255 - - - - - - - # - # Here, we go about naming the measurements. - # - # Measurement names have parts to them, separated by underbars. - # There's always a category and a feature name - # and sometimes there are modifiers such as the image that - # was measured or the scale at which it was measured. - # - # We have functions that build the names so that we can - # use the same functions in different places. - # - def get_feature_name(self, n, m): - """Return a measurement feature name for the given Zernike""" - # - # Something nice and simple for a name... Intensity_DNA_N4M2 for instance - # - if m >= 0: - return "Intensity_%s_N%dM%d" % (self.input_image_name.value, n, m) - - return "Intensity_%s_N%dMM%d" % (self.input_image_name.value, n, -m) - - def get_measurement_name(self, n, m): - """Return the whole measurement name""" - input_image_name = self.input_image_name.value - - return "_".join([C_MEASUREMENT_TEMPLATE, self.get_feature_name(n, m)]) - - # - # We have to tell CellProfiler about the measurements we produce. - # There are two parts: one that is for database-type modules and one - # that is for the UI. The first part gives a comprehensive list - # of measurement columns produced. The second is more informal and - # tells CellProfiler how to categorize its measurements. - # - # "get_measurement_columns" gets the measurements for use in the database - # or in a spreadsheet. Some modules need this because they - # might make measurements of measurements and need those names. - # - def get_measurement_columns(self, pipeline): - # - # We use a list comprehension here. - # See http://docs.python.org/tutorial/datastructures.html#list-comprehensions - # for how this works. - # - # The first thing in the list is the object being measured. If it's - # the whole image, use IMAGE as the name. - # - # The second thing is the measurement name. - # - # The third thing is the column type. See the COLTYPE constants - # in measurement.py for what you can use - # - input_object_name = self.input_object_name.value - - return [ - (input_object_name, self.get_measurement_name(n, m), COLTYPE_FLOAT,) - for n, m in self.get_zernike_indexes(True) - ] - - # - # "get_categories" returns a list of the measurement categories produced - # by this module. It takes an object name - only return categories - # if the name matches. - # - def get_categories(self, pipeline, object_name): - if object_name == self.input_object_name: - return [C_MEASUREMENT_TEMPLATE] - - return [] - - # - # Return the feature names if the object_name and category match - # - def get_measurements(self, pipeline, object_name, category): - if object_name == self.input_object_name and category == C_MEASUREMENT_TEMPLATE: - return ["Intensity"] - - return [] - - # - # This module makes per-image measurements. That means we need - # "get_measurement_images" to distinguish measurements made on two - # different images by this module - # - def get_measurement_images(self, pipeline, object_name, category, measurement): - # - # This might seem wasteful, but UI code can be slow. Just see - # if the measurement is in the list returned by get_measurements - # - if measurement in self.get_measurements(pipeline, object_name, category): - return [self.input_image_name.value] - - return [] - - def get_measurement_scales( - self, pipeline, object_name, category, measurement, image_name - ): - """Get the scales for a measurement - - For the Zernikes, the scales are of the form, N2M2 or N2MM2 for - negative azimuthal degree - """ - - def get_scale(n, m): - if m >= 0: - return "N%dM%d" % (n, m) - - return "N%dMM%d" % (n, -m) - - if image_name in self.get_measurement_images( - pipeline, object_name, category, measurement - ): - return [get_scale(n, m) for n, m in self.get_zernike_indexes(True)] - - return [] - - @staticmethod - def get_image_from_features(radius, feature_dictionary): - """Reconstruct the intensity image from the zernike features - - radius - the radius of the minimum enclosing circle - - feature_dictionary - keys are (n, m) tuples and values are the - magnitudes. - - returns a greyscale image based on the feature dictionary. - """ - i, j = ( - numpy.mgrid[-radius : (radius + 1), -radius : (radius + 1)].astype(float) - / radius - ) - mask = (i * i + j * j) <= 1 - - zernike_indexes = numpy.array(list(feature_dictionary.keys())) - zernike_features = numpy.array(list(feature_dictionary.values())) - - z = centrosome.zernike.construct_zernike_polynomials( - j, i, numpy.abs(zernike_indexes), mask=mask - ) - zn = ( - (2 * zernike_indexes[:, 0] + 2) - / ((zernike_indexes[:, 1] == 0) + 1) - / numpy.pi - ) - z *= zn[numpy.newaxis, numpy.newaxis, :] - z = ( - z.real * (zernike_indexes[:, 1] >= 0)[numpy.newaxis, numpy.newaxis, :] - + z.imag * (zernike_indexes[:, 1] <= 0)[numpy.newaxis, numpy.newaxis, :] - ) - - return numpy.sum(z * zernike_features[numpy.newaxis, numpy.newaxis, :], 2) \ No newline at end of file From 8143d9eddb10039407e261bb9446cb901dba22fa Mon Sep 17 00:00:00 2001 From: emiglietta Date: Wed, 21 Aug 2024 15:19:14 -0400 Subject: [PATCH 05/20] renamed module in order to escape import errors in CP (name of file must be all lower case) and debugger cache (Nodar had to change the actual name bc the previous capitalized one remained cached) --- ...ureRWCperObject.py => measurerwcperobj.py} | 32 ++++++++++++------- 1 file changed, 21 insertions(+), 11 deletions(-) rename active_plugins/{measureRWCperObject.py => measurerwcperobj.py} (96%) diff --git a/active_plugins/measureRWCperObject.py b/active_plugins/measurerwcperobj.py similarity index 96% rename from active_plugins/measureRWCperObject.py rename to active_plugins/measurerwcperobj.py index 853dd8ee..d8ecc1db 100644 --- a/active_plugins/measureRWCperObject.py +++ b/active_plugins/measurerwcperobj.py @@ -4,20 +4,34 @@ # ################################# -import centrosome.cpmorphology -import centrosome.zernike import numpy import scipy.ndimage +import scipy.stats +from scipy.linalg import lstsq +import logging ################################# # # Imports from CellProfiler # ################################## - +from cellprofiler_core.constants.measurement import COLTYPE_FLOAT +from cellprofiler_core.module import Module +from cellprofiler_core.setting import Divider, Binary, ValidationError +from cellprofiler_core.setting.choice import Choice +from cellprofiler_core.setting.subscriber import ( + LabelSubscriber, + LabelListSubscriber, + ImageListSubscriber, +) +from cellprofiler_core.setting.text import Float +from cellprofiler_core.utilities.core.object import size_similarly +from centrosome.cpmorphology import fixup_scipy_ndimage_result as fix + +LOGGER = logging.getLogger(__name__) __doc__ = """\ -MeasureRWCperObject +MeasureRWCperObj =================== @@ -31,10 +45,6 @@ # if someone wants to change the text, that text will change everywhere. # Also, you can't misspell it by accident. # -from cellprofiler_core.constants.measurement import COLTYPE_FLOAT -from cellprofiler_core.module import Module -from cellprofiler_core.setting.subscriber import ImageSubscriber, LabelSubscriber -from cellprofiler_core.setting.text import Integer """This is the measurement template category""" C_MEASUREMENT_TEMPLATE = "MT" @@ -44,8 +54,8 @@ F_RWC_FORMAT = "Correlation_RWC_%s_%s" -class MeasurementTemplate(Module): - module_name = "MeasureRWCperObject" +class MeasureRWCperObj(Module): + module_name = "MeasureRWCperObj" category = "Measurement" variable_revision_number = 1 @@ -503,7 +513,7 @@ def upgrade_settings(self, setting_values, variable_revision_number, module_name variable_revision_number = 4 if variable_revision_number == 4: # Add costes mode switch - setting_values += [M_FASTER] + # setting_values += [M_FASTER] variable_revision_number = 5 return setting_values, variable_revision_number From 7a99c61437ab0c2f2b232433de5ec0c0bf6f11a0 Mon Sep 17 00:00:00 2001 From: emiglietta Date: Tue, 19 Nov 2024 17:03:13 -0500 Subject: [PATCH 06/20] Update measurerwcperobj.py --- active_plugins/measurerwcperobj.py | 103 +++++++++++++---------------- 1 file changed, 46 insertions(+), 57 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index d8ecc1db..2f145940 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -27,6 +27,7 @@ from cellprofiler_core.setting.text import Float from cellprofiler_core.utilities.core.object import size_similarly from centrosome.cpmorphology import fixup_scipy_ndimage_result as fix +from cellprofiler_core.utilities.core.object import crop_labels_and_image LOGGER = logging.getLogger(__name__) @@ -95,7 +96,15 @@ def create_settings(self): """, ) - self.do_rwc = True + self.do_rwc = Binary( + "Calculate the Rank Weighted Colocalization coefficients?", + True, + doc="""\ +Select *{YES}* to run the Rank Weighted Colocalization coefficients. +""".format( + **{"YES": "Yes"} + ), + ) self.spacer = Divider(line=True) @@ -119,9 +128,10 @@ def settings(self): def visible_settings(self): result = [ self.images_list, + self.objects_list, self.spacer, self.thr, - self.do_rwc, + # self.do_rwc, # self.images_or_objects, ] return result @@ -235,7 +245,16 @@ def run_image_pair_objects( n_objects = objects.count # Handle case when both images for the correlation are completely masked out - + + # Threshold as percentage of maximum intensity in each channel + thr_fi = self.thr.value * numpy.max(fi) / 100 + thr_si = self.thr.value * numpy.max(si) / 100 + combined_thresh = (fi > thr_fi) & (si > thr_si) # CHECK THIS DEFINITION, IT HAS BEEN CHANGED FROM THE ORIGINAL BUT IDK IF IT'S OK! + fi_thresh = fi[combined_thresh] + si_thresh = si[combined_thresh] + tot_fi_thr = fi[(fi > thr_fi)].sum() + tot_si_thr = si[(si > thr_si)].sum() + if n_objects == 0: # corr = numpy.zeros((0,)) # overlap = numpy.zeros((0,)) @@ -259,16 +278,25 @@ def run_image_pair_objects( # RWC Coefficient RWC1 = numpy.zeros(len(lrange)) RWC2 = numpy.zeros(len(lrange)) - for label in labels: - # set first_pixels to only what's inside that label and rename - # same with second_pixels to only what's inside that label and rename - # same with labels to only what's inside that label and rename - # same with lrange to only what's inside that label and rename + + for label in numpy.unique(labels): + # set first_pixels to only what's inside that label and rename --> obj_pixels_img1 + # same with second_pixels to only what's inside that label and rename --> obj_pixels_img2 + # same with labels to only what's inside that label and rename --> obj_label + # same with lrange to only what's inside that label and rename --> obj_lrange # same with fi_thresh, si_thresh, combined_thresh, tot_fi_thr, tot_si_thr # - move the 770 block inside this function after subsettingfirst_pixels and second_pixels - - [Rank1] = numpy.lexsort(([labels], [first_pixels])) - [Rank2] = numpy.lexsort(([labels], [second_pixels])) + + # objects.segmented is an array (10,10) were each pixel is assigned its corresponding label (1,2 or 3) + + #delete these and replace with Beth's suggestion + obj_pixels_img1 = numpy.where(labels==label,first_pixel_data,0) + obj_pixels_img2 = numpy.where(labels==label,second_pixel_data,0) + + obj_lrange = lrange[lrange==label] + + [Rank1] = numpy.lexsort(([obj_label], [obj_pixels_img1])) + [Rank2] = numpy.lexsort(([obj_label], [obj_pixels_img2])) Rank1_U = numpy.hstack( [[False], first_pixels[Rank1[:-1]] != first_pixels[Rank1[1:]]] ) @@ -277,13 +305,13 @@ def run_image_pair_objects( ) Rank1_S = numpy.cumsum(Rank1_U) Rank2_S = numpy.cumsum(Rank2_U) - Rank_im1 = numpy.zeros(first_pixels.shape, dtype=int) - Rank_im2 = numpy.zeros(second_pixels.shape, dtype=int) - Rank_im1[Rank1] = Rank1_S - Rank_im2[Rank2] = Rank2_S + Rank_obj_im1 = numpy.zeros(obj_pixels_img1.shape, dtype=int) + Rank_obj_im2 = numpy.zeros(obj_pixels_img2.shape, dtype=int) + Rank_obj_im1[Rank1] = Rank1_S + Rank_obj_im2[Rank2] = Rank2_S - R = max(Rank_im1.max(), Rank_im2.max()) + 1 - Di = abs(Rank_im1 - Rank_im2) + R = max(Rank_obj_im1.max(), Rank_obj_im2.max()) + 1 + Di = abs(Rank_obj_im1 - Rank_obj_im2) weight = (R - Di) * 1.0 / R weight_thresh = weight[combined_thresh] @@ -322,6 +350,7 @@ def run_image_pair_objects( labels[second_pixels >= tss[labels - 1]], lrange, ) + ### update RWC1 and RWC2 with ther right position and the right values result += [ [ @@ -477,46 +506,6 @@ def validate_module(self, pipeline): if len(self.objects_list.value) == 0: raise ValidationError("No object sets selected", self.objects_list) - def upgrade_settings(self, setting_values, variable_revision_number, module_name): - """Adjust the setting values for pipelines saved under old revisions""" - if variable_revision_number < 2: - raise NotImplementedError( - "Automatic upgrade for this module is not supported in CellProfiler 3." - ) - - if variable_revision_number == 2: - image_count = int(setting_values[0]) - idx_thr = image_count + 2 - setting_values = ( - setting_values[:idx_thr] + ["15.0"] + setting_values[idx_thr:] - ) - variable_revision_number = 3 - - if variable_revision_number == 3: - num_images = int(setting_values[0]) - num_objects = int(setting_values[1]) - div_img = 2 + num_images - div_obj = div_img + 2 + num_objects - images_set = set(setting_values[2:div_img]) - thr_mode = setting_values[div_img : div_img + 2] - objects_set = set(setting_values[div_img + 2 : div_obj]) - other_settings = setting_values[div_obj:] - if "None" in images_set: - images_set.remove("None") - if "None" in objects_set: - objects_set.remove("None") - images_string = ", ".join(map(str, images_set)) - objects_string = ", ".join(map(str, objects_set)) - setting_values = ( - [images_string] + thr_mode + [objects_string] + other_settings - ) - variable_revision_number = 4 - if variable_revision_number == 4: - # Add costes mode switch - # setting_values += [M_FASTER] - variable_revision_number = 5 - return setting_values, variable_revision_number - def volumetric(self): return True From 8bd6089185601a470c2699291d55cf45c98b386b Mon Sep 17 00:00:00 2001 From: emiglietta Date: Tue, 26 Nov 2024 09:14:29 -0800 Subject: [PATCH 07/20] looks on its way but i messed too much with the code before, might have looks on its way but i messed too much with the code before, might have to restart fresh --- active_plugins/measurerwcperobj.py | 43 +++++++++++++++++++----------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index 2f145940..c2b0ad5d 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -249,11 +249,11 @@ def run_image_pair_objects( # Threshold as percentage of maximum intensity in each channel thr_fi = self.thr.value * numpy.max(fi) / 100 thr_si = self.thr.value * numpy.max(si) / 100 - combined_thresh = (fi > thr_fi) & (si > thr_si) # CHECK THIS DEFINITION, IT HAS BEEN CHANGED FROM THE ORIGINAL BUT IDK IF IT'S OK! - fi_thresh = fi[combined_thresh] - si_thresh = si[combined_thresh] - tot_fi_thr = fi[(fi > thr_fi)].sum() - tot_si_thr = si[(si > thr_si)].sum() + + #fi_thresh = fi[combined_thresh] #array including ONLY the pixels from fi that are above threshold in both images + #si_thresh = si[combined_thresh] #array including ONLY the pixels from si that are above threshold in both images + #tot_fi_thr = fi[(fi > thr_fi)].sum() #single value of the integrated intensity of above-threshold pixels for fi (?) + #tot_si_thr = si[(si > thr_si)].sum() #single value of the integrated intensity of above-threshold pixels for si (?) if n_objects == 0: # corr = numpy.zeros((0,)) @@ -286,24 +286,35 @@ def run_image_pair_objects( # same with lrange to only what's inside that label and rename --> obj_lrange # same with fi_thresh, si_thresh, combined_thresh, tot_fi_thr, tot_si_thr # - move the 770 block inside this function after subsettingfirst_pixels and second_pixels + + #combined_thersh is an boolean array representing all the pixels in a single object, that is True in any pixel where BOTH fi and si are above their respective threshold + combined_thresh = (first_pixels[labels==label] > thr_fi) & (second_pixels[labels==label] > thr_si) + + #obj_pixels_img1 = numpy.where(labels==label,first_pixels,0) + obj_pixels_img1 = first_pixels[labels==label] + #ASK BETH - in this case where no object has disjointed pixels, the order of the values of first_pixels matches the order of the objects. What would happen with disjointed objects?! # objects.segmented is an array (10,10) were each pixel is assigned its corresponding label (1,2 or 3) + #obj_pixels_img2 = numpy.where(objects.segmented==label,second_pixel_data,0) + obj_pixels_img2 = second_pixels[labels==label] - #delete these and replace with Beth's suggestion - obj_pixels_img1 = numpy.where(labels==label,first_pixel_data,0) - obj_pixels_img2 = numpy.where(labels==label,second_pixel_data,0) + #obj_lrange = lrange[lrange==label] + + fi_thresh_obj = obj_pixels_img1[combined_thresh] #array of pixel values above threshold for the object + si_thresh_obj = obj_pixels_img2[combined_thresh] - obj_lrange = lrange[lrange==label] + tot_fi_thr = + tot_si_thr = - [Rank1] = numpy.lexsort(([obj_label], [obj_pixels_img1])) - [Rank2] = numpy.lexsort(([obj_label], [obj_pixels_img2])) + Rank1 = numpy.lexsort([obj_pixels_img1]) #array with a value assigned to each position according to ascending rank (0 is the rank of the lowest value) + Rank2 = numpy.lexsort([obj_pixels_img2]) Rank1_U = numpy.hstack( [[False], first_pixels[Rank1[:-1]] != first_pixels[Rank1[1:]]] - ) + ) #ASK BETH! this is a boolean array that has False every time pixel i from first_pixels (the list of pixel values from all objects in order) is equal to pixel i+1 Rank2_U = numpy.hstack( [[False], second_pixels[Rank2[:-1]] != second_pixels[Rank2[1:]]] ) - Rank1_S = numpy.cumsum(Rank1_U) + Rank1_S = numpy.cumsum(Rank1_U) #ask BETH, array with cumulative number of 'True' Rank2_S = numpy.cumsum(Rank2_U) Rank_obj_im1 = numpy.zeros(obj_pixels_img1.shape, dtype=int) Rank_obj_im2 = numpy.zeros(obj_pixels_img2.shape, dtype=int) @@ -313,17 +324,17 @@ def run_image_pair_objects( R = max(Rank_obj_im1.max(), Rank_obj_im2.max()) + 1 Di = abs(Rank_obj_im1 - Rank_obj_im2) weight = (R - Di) * 1.0 / R - weight_thresh = weight[combined_thresh] + weight_thresh = weight[combined_thresh[label]] if numpy.any(combined_thresh): RWC1 = numpy.array( scipy.ndimage.sum( - fi_thresh * weight_thresh, labels[combined_thresh], lrange + fi_thresh_obj * weight_thresh, labels[combined_thresh], lrange ) ) / numpy.array(tot_fi_thr) RWC2 = numpy.array( scipy.ndimage.sum( - si_thresh * weight_thresh, labels[combined_thresh], lrange + si_thresh_obj * weight_thresh, labels[combined_thresh], lrange ) ) / numpy.array(tot_si_thr) From b8a3f3a7b502fdbb4fd7784ebdfc03abd41ee556 Mon Sep 17 00:00:00 2001 From: emiglietta Date: Tue, 26 Nov 2024 12:56:13 -0800 Subject: [PATCH 08/20] not fancy, but it works!! --- active_plugins/measurerwcperobj.py | 64 ++++++++++++++++-------------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index c2b0ad5d..1f7fe29b 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -279,6 +279,25 @@ def run_image_pair_objects( RWC1 = numpy.zeros(len(lrange)) RWC2 = numpy.zeros(len(lrange)) + # Threshold as percentage of maximum intensity of objects in each channel + tff = (self.thr.value / 100) * fix( + scipy.ndimage.maximum(first_pixels, labels, lrange) + ) + tss = (self.thr.value / 100) * fix( + scipy.ndimage.maximum(second_pixels, labels, lrange) + ) + + tot_fi_thr = scipy.ndimage.sum( + first_pixels[first_pixels >= tff[labels - 1]], + labels[first_pixels >= tff[labels - 1]], + lrange, + ) + tot_si_thr = scipy.ndimage.sum( + second_pixels[second_pixels >= tss[labels - 1]], + labels[second_pixels >= tss[labels - 1]], + lrange, + ) + for label in numpy.unique(labels): # set first_pixels to only what's inside that label and rename --> obj_pixels_img1 # same with second_pixels to only what's inside that label and rename --> obj_pixels_img2 @@ -303,16 +322,25 @@ def run_image_pair_objects( fi_thresh_obj = obj_pixels_img1[combined_thresh] #array of pixel values above threshold for the object si_thresh_obj = obj_pixels_img2[combined_thresh] +<<<<<<< Updated upstream tot_fi_thr = tot_si_thr = +======= + tot_fi_thr_perObj = scipy.ndimage.sum( + obj_pixels_img1[obj_pixels_img1 >= tff[label - 1]] + ) + tot_si_thr_perObj = scipy.ndimage.sum( + obj_pixels_img2[obj_pixels_img2 >= tff[label - 1]] + ) +>>>>>>> Stashed changes Rank1 = numpy.lexsort([obj_pixels_img1]) #array with a value assigned to each position according to ascending rank (0 is the rank of the lowest value) Rank2 = numpy.lexsort([obj_pixels_img2]) Rank1_U = numpy.hstack( - [[False], first_pixels[Rank1[:-1]] != first_pixels[Rank1[1:]]] + [[False], obj_pixels_img1[Rank1[:-1]] != obj_pixels_img1[Rank1[1:]]] ) #ASK BETH! this is a boolean array that has False every time pixel i from first_pixels (the list of pixel values from all objects in order) is equal to pixel i+1 Rank2_U = numpy.hstack( - [[False], second_pixels[Rank2[:-1]] != second_pixels[Rank2[1:]]] + [[False], obj_pixels_img2[Rank2[:-1]] != obj_pixels_img2[Rank2[1:]]] ) Rank1_S = numpy.cumsum(Rank1_U) #ask BETH, array with cumulative number of 'True' Rank2_S = numpy.cumsum(Rank2_U) @@ -324,43 +352,21 @@ def run_image_pair_objects( R = max(Rank_obj_im1.max(), Rank_obj_im2.max()) + 1 Di = abs(Rank_obj_im1 - Rank_obj_im2) weight = (R - Di) * 1.0 / R - weight_thresh = weight[combined_thresh[label]] + weight_thresh = weight[combined_thresh] if numpy.any(combined_thresh): RWC1 = numpy.array( scipy.ndimage.sum( - fi_thresh_obj * weight_thresh, labels[combined_thresh], lrange + fi_thresh_obj * weight_thresh ) - ) / numpy.array(tot_fi_thr) + ) / numpy.array(tot_fi_thr_perObj) RWC2 = numpy.array( scipy.ndimage.sum( - si_thresh_obj * weight_thresh, labels[combined_thresh], lrange + si_thresh_obj * weight_thresh ) - ) / numpy.array(tot_si_thr) + ) / numpy.array(tot_si_thr_perObj) - # Threshold as percentage of maximum intensity of objects in each channel - tff = (self.thr.value / 100) * fix( - scipy.ndimage.maximum(first_pixels, labels, lrange) - ) - tss = (self.thr.value / 100) * fix( - scipy.ndimage.maximum(second_pixels, labels, lrange) - ) - combined_thresh = (first_pixels >= tff[labels - 1]) & ( - second_pixels >= tss[labels - 1] - ) - fi_thresh = first_pixels[combined_thresh] - si_thresh = second_pixels[combined_thresh] - tot_fi_thr = scipy.ndimage.sum( - first_pixels[first_pixels >= tff[labels - 1]], - labels[first_pixels >= tff[labels - 1]], - lrange, - ) - tot_si_thr = scipy.ndimage.sum( - second_pixels[second_pixels >= tss[labels - 1]], - labels[second_pixels >= tss[labels - 1]], - lrange, - ) ### update RWC1 and RWC2 with ther right position and the right values result += [ From b5357872dd48a65b2af6fbb33dbb6e7a322cab0a Mon Sep 17 00:00:00 2001 From: emiglietta Date: Tue, 26 Nov 2024 14:36:31 -0800 Subject: [PATCH 09/20] resolve conflict --- active_plugins/measurerwcperobj.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index 1f7fe29b..b0a6218d 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -321,18 +321,12 @@ def run_image_pair_objects( fi_thresh_obj = obj_pixels_img1[combined_thresh] #array of pixel values above threshold for the object si_thresh_obj = obj_pixels_img2[combined_thresh] - -<<<<<<< Updated upstream - tot_fi_thr = - tot_si_thr = -======= + tot_fi_thr_perObj = scipy.ndimage.sum( obj_pixels_img1[obj_pixels_img1 >= tff[label - 1]] ) tot_si_thr_perObj = scipy.ndimage.sum( obj_pixels_img2[obj_pixels_img2 >= tff[label - 1]] - ) ->>>>>>> Stashed changes Rank1 = numpy.lexsort([obj_pixels_img1]) #array with a value assigned to each position according to ascending rank (0 is the rank of the lowest value) Rank2 = numpy.lexsort([obj_pixels_img2]) From 894e792de2bf0d10459cbb31144626da95506b8f Mon Sep 17 00:00:00 2001 From: emiglietta Date: Tue, 26 Nov 2024 14:42:45 -0800 Subject: [PATCH 10/20] correct typo --- active_plugins/measurerwcperobj.py | 1 + 1 file changed, 1 insertion(+) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index b0a6218d..288f822b 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -327,6 +327,7 @@ def run_image_pair_objects( ) tot_si_thr_perObj = scipy.ndimage.sum( obj_pixels_img2[obj_pixels_img2 >= tff[label - 1]] + ) Rank1 = numpy.lexsort([obj_pixels_img1]) #array with a value assigned to each position according to ascending rank (0 is the rank of the lowest value) Rank2 = numpy.lexsort([obj_pixels_img2]) From cb46ab917cfb3aae033a10ac2547df5d96243a76 Mon Sep 17 00:00:00 2001 From: emiglietta Date: Tue, 26 Nov 2024 16:34:16 -0800 Subject: [PATCH 11/20] rename variables, working version! still have to check that the values make sense --- active_plugins/measurerwcperobj.py | 104 ++++++++++++++++++----------- 1 file changed, 64 insertions(+), 40 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index 288f822b..1a27f5fc 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -27,7 +27,8 @@ from cellprofiler_core.setting.text import Float from cellprofiler_core.utilities.core.object import size_similarly from centrosome.cpmorphology import fixup_scipy_ndimage_result as fix -from cellprofiler_core.utilities.core.object import crop_labels_and_image +from cellprofiler_core.utilities.core.object import crop_labels_and_image #NOT USED! +import cellprofiler_core.measurement LOGGER = logging.getLogger(__name__) @@ -247,6 +248,7 @@ def run_image_pair_objects( # Handle case when both images for the correlation are completely masked out # Threshold as percentage of maximum intensity in each channel + # Global threshold for all objects thr_fi = self.thr.value * numpy.max(fi) / 100 thr_si = self.thr.value * numpy.max(si) / 100 @@ -280,6 +282,7 @@ def run_image_pair_objects( RWC2 = numpy.zeros(len(lrange)) # Threshold as percentage of maximum intensity of objects in each channel + # Single threshold per object (it is calculated based on the highest pixel intensity in each object) tff = (self.thr.value / 100) * fix( scipy.ndimage.maximum(first_pixels, labels, lrange) ) @@ -287,6 +290,7 @@ def run_image_pair_objects( scipy.ndimage.maximum(second_pixels, labels, lrange) ) + #NOT USED! tot_fi_thr = scipy.ndimage.sum( first_pixels[first_pixels >= tff[labels - 1]], labels[first_pixels >= tff[labels - 1]], @@ -307,60 +311,80 @@ def run_image_pair_objects( # - move the 770 block inside this function after subsettingfirst_pixels and second_pixels #combined_thersh is an boolean array representing all the pixels in a single object, that is True in any pixel where BOTH fi and si are above their respective threshold - combined_thresh = (first_pixels[labels==label] > thr_fi) & (second_pixels[labels==label] > thr_si) + #is thr_fi == tff[labels==label] ?? + # combined_thresh_perObj = (first_pixels[labels==label] > tff[labels==label]) & (second_pixels[labels==label] > tss[labels==label]) + combined_thresh_perObj = (first_pixels[labels==label] > thr_fi) & (second_pixels[labels==label] > thr_si) - #obj_pixels_img1 = numpy.where(labels==label,first_pixels,0) - obj_pixels_img1 = first_pixels[labels==label] #ASK BETH - in this case where no object has disjointed pixels, the order of the values of first_pixels matches the order of the objects. What would happen with disjointed objects?! + first_pixels_perObj = first_pixels[labels==label] + second_pixels_perObj = second_pixels[labels==label] - # objects.segmented is an array (10,10) were each pixel is assigned its corresponding label (1,2 or 3) - #obj_pixels_img2 = numpy.where(objects.segmented==label,second_pixel_data,0) - obj_pixels_img2 = second_pixels[labels==label] - - #obj_lrange = lrange[lrange==label] - - fi_thresh_obj = obj_pixels_img1[combined_thresh] #array of pixel values above threshold for the object - si_thresh_obj = obj_pixels_img2[combined_thresh] - + # sum of the above-threshold (for both channels) pixel intensities per object tot_fi_thr_perObj = scipy.ndimage.sum( - obj_pixels_img1[obj_pixels_img1 >= tff[label - 1]] + first_pixels_perObj[first_pixels_perObj >= tff[label - 1]] ) tot_si_thr_perObj = scipy.ndimage.sum( - obj_pixels_img2[obj_pixels_img2 >= tff[label - 1]] + second_pixels_perObj[second_pixels_perObj >= tff[label - 1]] ) + + #array of pixel values above threshold for the object + fi_thresh_obj = first_pixels_perObj[combined_thresh_perObj] + si_thresh_obj = second_pixels_perObj[combined_thresh_perObj] - Rank1 = numpy.lexsort([obj_pixels_img1]) #array with a value assigned to each position according to ascending rank (0 is the rank of the lowest value) - Rank2 = numpy.lexsort([obj_pixels_img2]) - Rank1_U = numpy.hstack( - [[False], obj_pixels_img1[Rank1[:-1]] != obj_pixels_img1[Rank1[1:]]] - ) #ASK BETH! this is a boolean array that has False every time pixel i from first_pixels (the list of pixel values from all objects in order) is equal to pixel i+1 - Rank2_U = numpy.hstack( - [[False], obj_pixels_img2[Rank2[:-1]] != obj_pixels_img2[Rank2[1:]]] + #array with a value assigned to each position according to ascending rank (0 is the rank of the lowest value) + Rank1_perObj = numpy.lexsort([first_pixels_perObj]) + Rank2_perObj = numpy.lexsort([second_pixels_perObj]) + + #ASK BETH! this is a boolean array that has False every time pixel i from first_pixels (the list of pixel values from all objects in order) is equal to pixel i+1 + Rank1_U_perObj = numpy.hstack( + [[False], first_pixels_perObj[Rank1_perObj[:-1]] != first_pixels_perObj[Rank1_perObj[1:]]] + ) + Rank2_U_perObj = numpy.hstack( + [[False], second_pixels_perObj[Rank2_perObj[:-1]] != second_pixels_perObj[Rank2_perObj[1:]]] ) - Rank1_S = numpy.cumsum(Rank1_U) #ask BETH, array with cumulative number of 'True' - Rank2_S = numpy.cumsum(Rank2_U) - Rank_obj_im1 = numpy.zeros(obj_pixels_img1.shape, dtype=int) - Rank_obj_im2 = numpy.zeros(obj_pixels_img2.shape, dtype=int) - Rank_obj_im1[Rank1] = Rank1_S - Rank_obj_im2[Rank2] = Rank2_S - - R = max(Rank_obj_im1.max(), Rank_obj_im2.max()) + 1 - Di = abs(Rank_obj_im1 - Rank_obj_im2) - weight = (R - Di) * 1.0 / R - weight_thresh = weight[combined_thresh] - - if numpy.any(combined_thresh): - RWC1 = numpy.array( + + #ask BETH, array with cumulative number of 'True' + Rank1_S_perObj = numpy.cumsum(Rank1_U_perObj) + Rank2_S_perObj = numpy.cumsum(Rank2_U_perObj) + + Rank_im1_perObj = numpy.zeros(first_pixels_perObj.shape, dtype=int) + Rank_im2_perObj = numpy.zeros(second_pixels_perObj.shape, dtype=int) + + Rank_im1_perObj[Rank1_perObj] = Rank1_S_perObj + Rank_im2_perObj[Rank2_perObj] = Rank2_S_perObj + + R_perObj = max(Rank_im1_perObj.max(), Rank_im2_perObj.max()) + 1 + Di_perObj = abs(Rank_im1_perObj - Rank_im2_perObj) + + weight_perObj = (R_perObj - Di_perObj) * 1.0 / R_perObj + weight_thresh_perObj = weight_perObj[combined_thresh_perObj] + + # Calculate RWC only if any of the object pixels are above threshold + # ...which will always be the case since the thr is calculated as a % of the max intensity pixel in each object + # ...unless the above-threshold pixels on one channel don't match the ones on the other, so I guess it makes sense... + if numpy.any(combined_thresh_perObj): + # RWC1 = numpy.array( + # scipy.ndimage.sum( + # fi_thresh_obj * weight_thresh_perObj + # ) + # ) / numpy.array(tot_fi_thr_perObj) + # RWC2 = numpy.array( + # scipy.ndimage.sum( + # si_thresh_obj * weight_thresh_perObj + # ) + # ) / numpy.array(tot_si_thr_perObj) + + # RWC1 and 2 are arrays + RWC1[label-1] = numpy.array( scipy.ndimage.sum( - fi_thresh_obj * weight_thresh + fi_thresh_obj * weight_thresh_perObj ) ) / numpy.array(tot_fi_thr_perObj) - RWC2 = numpy.array( + RWC2[label-1] = numpy.array( scipy.ndimage.sum( - si_thresh_obj * weight_thresh + si_thresh_obj * weight_thresh_perObj ) ) / numpy.array(tot_si_thr_perObj) - ### update RWC1 and RWC2 with ther right position and the right values From f3029e468ee1250314c97939853871adc8b42415 Mon Sep 17 00:00:00 2001 From: emiglietta Date: Mon, 2 Dec 2024 16:21:33 -0800 Subject: [PATCH 12/20] cleanup and change F_RWC_FORMAT --- active_plugins/measurerwcperobj.py | 33 +++++++++--------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index 1a27f5fc..94c1ef58 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -53,7 +53,7 @@ M_PER_OBJECT = "Within each object individually" """Feature name format for the RWC Coefficient measurement""" -F_RWC_FORMAT = "Correlation_RWC_%s_%s" +F_RWC_FORMAT = "Correlation_RWC_perObj_%s_%s" class MeasureRWCperObj(Module): @@ -363,18 +363,7 @@ def run_image_pair_objects( # ...which will always be the case since the thr is calculated as a % of the max intensity pixel in each object # ...unless the above-threshold pixels on one channel don't match the ones on the other, so I guess it makes sense... if numpy.any(combined_thresh_perObj): - # RWC1 = numpy.array( - # scipy.ndimage.sum( - # fi_thresh_obj * weight_thresh_perObj - # ) - # ) / numpy.array(tot_fi_thr_perObj) - # RWC2 = numpy.array( - # scipy.ndimage.sum( - # si_thresh_obj * weight_thresh_perObj - # ) - # ) / numpy.array(tot_si_thr_perObj) - - # RWC1 and 2 are arrays + # RWC1 and 2 are arrays with the RWC value for each object in the set RWC1[label-1] = numpy.array( scipy.ndimage.sum( fi_thresh_obj * weight_thresh_perObj @@ -386,35 +375,33 @@ def run_image_pair_objects( ) ) / numpy.array(tot_si_thr_perObj) - ### update RWC1 and RWC2 with ther right position and the right values - result += [ [ first_image_name, second_image_name, object_name, - "Mean RWC coeff", + "Mean RWC_perObj coeff", "%.3f" % numpy.mean(RWC1), ], [ first_image_name, second_image_name, object_name, - "Median RWC coeff", + "Median RWC_perObj coeff", "%.3f" % numpy.median(RWC1), ], [ first_image_name, second_image_name, object_name, - "Min RWC coeff", + "Min RWC_perObj coeff", "%.3f" % numpy.min(RWC1), ], [ first_image_name, second_image_name, object_name, - "Max RWC coeff", + "Max RWC_perObj coeff", "%.3f" % numpy.max(RWC1), ], ] @@ -423,28 +410,28 @@ def run_image_pair_objects( second_image_name, first_image_name, object_name, - "Mean RWC coeff", + "Mean RWC_perObj coeff", "%.3f" % numpy.mean(RWC2), ], [ second_image_name, first_image_name, object_name, - "Median RWC coeff", + "Median RWC_perObj coeff", "%.3f" % numpy.median(RWC2), ], [ second_image_name, first_image_name, object_name, - "Min RWC coeff", + "Min RWC_perObj coeff", "%.3f" % numpy.min(RWC2), ], [ second_image_name, first_image_name, object_name, - "Max RWC coeff", + "Max RWC_perObj coeff", "%.3f" % numpy.max(RWC2), ], ] From c47c2bcf03367086a093d2b075335471c9db2514 Mon Sep 17 00:00:00 2001 From: emiglietta Date: Wed, 4 Dec 2024 11:05:51 -0800 Subject: [PATCH 13/20] fix naming of results --- active_plugins/measurerwcperobj.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index 94c1ef58..50afc4ed 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -53,7 +53,7 @@ M_PER_OBJECT = "Within each object individually" """Feature name format for the RWC Coefficient measurement""" -F_RWC_FORMAT = "Correlation_RWC_perObj_%s_%s" +F_RWCperObj_FORMAT = "Correlation_RWCperObj_%s_%s" class MeasureRWCperObj(Module): @@ -380,28 +380,28 @@ def run_image_pair_objects( first_image_name, second_image_name, object_name, - "Mean RWC_perObj coeff", + "Mean RWCperObj coeff", "%.3f" % numpy.mean(RWC1), ], [ first_image_name, second_image_name, object_name, - "Median RWC_perObj coeff", + "Median RWCperObj coeff", "%.3f" % numpy.median(RWC1), ], [ first_image_name, second_image_name, object_name, - "Min RWC_perObj coeff", + "Min RWCperObj coeff", "%.3f" % numpy.min(RWC1), ], [ first_image_name, second_image_name, object_name, - "Max RWC_perObj coeff", + "Max RWCperObj coeff", "%.3f" % numpy.max(RWC1), ], ] @@ -410,34 +410,34 @@ def run_image_pair_objects( second_image_name, first_image_name, object_name, - "Mean RWC_perObj coeff", + "Mean RWCperObj coeff", "%.3f" % numpy.mean(RWC2), ], [ second_image_name, first_image_name, object_name, - "Median RWC_perObj coeff", + "Median RWCperObj coeff", "%.3f" % numpy.median(RWC2), ], [ second_image_name, first_image_name, object_name, - "Min RWC_perObj coeff", + "Min RWCperObj coeff", "%.3f" % numpy.min(RWC2), ], [ second_image_name, first_image_name, object_name, - "Max RWC_perObj coeff", + "Max RWCperObj coeff", "%.3f" % numpy.max(RWC2), ], ] - rwc_measurement_1 = F_RWC_FORMAT % (first_image_name, second_image_name) - rwc_measurement_2 = F_RWC_FORMAT % (second_image_name, first_image_name) + rwc_measurement_1 = F_RWCperObj_FORMAT % (first_image_name, second_image_name) + rwc_measurement_2 = F_RWCperObj_FORMAT % (second_image_name, first_image_name) workspace.measurements.add_measurement(object_name, rwc_measurement_1, RWC1) workspace.measurements.add_measurement(object_name, rwc_measurement_2, RWC2) @@ -485,12 +485,12 @@ def get_measurement_columns(self, pipeline): columns += [ ( object_name, - F_RWC_FORMAT % (first_image, second_image), + F_RWCperObj_FORMAT % (first_image, second_image), COLTYPE_FLOAT, ), ( object_name, - F_RWC_FORMAT % (second_image, first_image), + F_RWCperObj_FORMAT % (second_image, first_image), COLTYPE_FLOAT, ), ] @@ -506,7 +506,7 @@ def get_categories(self, pipeline, object_name): def get_measurements(self, pipeline, object_name, category): if self.get_categories(pipeline, object_name) == [category]: results = [] - results += ["RWC"] + results += ["RWCperObj"] return results return [] From d3801f1e704355082ddda40c1bde7cb4f3f055e2 Mon Sep 17 00:00:00 2001 From: emiglietta Date: Sat, 11 Jan 2025 11:49:57 -0800 Subject: [PATCH 14/20] Update measurerwcperobj.py add comments clarifying R_perObj and Di_perObj --- active_plugins/measurerwcperobj.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index 50afc4ed..d804d62e 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -353,8 +353,8 @@ def run_image_pair_objects( Rank_im1_perObj[Rank1_perObj] = Rank1_S_perObj Rank_im2_perObj[Rank2_perObj] = Rank2_S_perObj - R_perObj = max(Rank_im1_perObj.max(), Rank_im2_perObj.max()) + 1 - Di_perObj = abs(Rank_im1_perObj - Rank_im2_perObj) + R_perObj = max(Rank_im1_perObj.max(), Rank_im2_perObj.max()) + 1 #max rank among all ranks in both ch + Di_perObj = abs(Rank_im1_perObj - Rank_im2_perObj) #absolute difference of rank between ch in each pixel weight_perObj = (R_perObj - Di_perObj) * 1.0 / R_perObj weight_thresh_perObj = weight_perObj[combined_thresh_perObj] From f8da85d6760156b80451febebdc7cf3633b85ce8 Mon Sep 17 00:00:00 2001 From: emiglietta Date: Wed, 5 Feb 2025 13:56:37 -0800 Subject: [PATCH 15/20] calculate threshold based on the max pixel intensity of each object individually --- active_plugins/measurerwcperobj.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index d804d62e..e71aa01a 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -310,15 +310,20 @@ def run_image_pair_objects( # same with fi_thresh, si_thresh, combined_thresh, tot_fi_thr, tot_si_thr # - move the 770 block inside this function after subsettingfirst_pixels and second_pixels - #combined_thersh is an boolean array representing all the pixels in a single object, that is True in any pixel where BOTH fi and si are above their respective threshold - #is thr_fi == tff[labels==label] ?? - # combined_thresh_perObj = (first_pixels[labels==label] > tff[labels==label]) & (second_pixels[labels==label] > tss[labels==label]) - combined_thresh_perObj = (first_pixels[labels==label] > thr_fi) & (second_pixels[labels==label] > thr_si) #ASK BETH - in this case where no object has disjointed pixels, the order of the values of first_pixels matches the order of the objects. What would happen with disjointed objects?! first_pixels_perObj = first_pixels[labels==label] second_pixels_perObj = second_pixels[labels==label] + # Local threshold for each object individually + thr_fi_perObj = self.thr.value * numpy.max(first_pixels_perObj) / 100 + thr_si_perObj = self.thr.value * numpy.max(second_pixels_perObj) / 100 + + #combined_thersh is an boolean array representing all the pixels in a single object, that is True in any pixel where BOTH fi and si are above their respective threshold + #is thr_fi == tff[labels==label] ?? + # combined_thresh_perObj = (first_pixels[labels==label] > tff[labels==label]) & (second_pixels[labels==label] > tss[labels==label]) + combined_thresh_perObj = (first_pixels_perObj > thr_fi_perObj) & (second_pixels_perObj > thr_si_perObj) + # sum of the above-threshold (for both channels) pixel intensities per object tot_fi_thr_perObj = scipy.ndimage.sum( first_pixels_perObj[first_pixels_perObj >= tff[label - 1]] From e5f155d0b240b782667a86f7311d2bf9fc2dbc45 Mon Sep 17 00:00:00 2001 From: emiglietta Date: Wed, 5 Feb 2025 14:47:29 -0800 Subject: [PATCH 16/20] correct thresholding per object using tff and tss. Need to check why Correlation_RWCperObj_V5_ER gives inf values and values way above 1! --- active_plugins/measurerwcperobj.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index e71aa01a..01237f1a 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -281,7 +281,7 @@ def run_image_pair_objects( RWC1 = numpy.zeros(len(lrange)) RWC2 = numpy.zeros(len(lrange)) - # Threshold as percentage of maximum intensity of objects in each channel + # List of thresholds for each object (as percentage of maximum intensity of objects in each channel # Single threshold per object (it is calculated based on the highest pixel intensity in each object) tff = (self.thr.value / 100) * fix( scipy.ndimage.maximum(first_pixels, labels, lrange) @@ -315,14 +315,9 @@ def run_image_pair_objects( first_pixels_perObj = first_pixels[labels==label] second_pixels_perObj = second_pixels[labels==label] - # Local threshold for each object individually - thr_fi_perObj = self.thr.value * numpy.max(first_pixels_perObj) / 100 - thr_si_perObj = self.thr.value * numpy.max(second_pixels_perObj) / 100 - - #combined_thersh is an boolean array representing all the pixels in a single object, that is True in any pixel where BOTH fi and si are above their respective threshold - #is thr_fi == tff[labels==label] ?? - # combined_thresh_perObj = (first_pixels[labels==label] > tff[labels==label]) & (second_pixels[labels==label] > tss[labels==label]) - combined_thresh_perObj = (first_pixels_perObj > thr_fi_perObj) & (second_pixels_perObj > thr_si_perObj) + #combined_thresh_perObj is an boolean array representing all the pixels in a single object + # It is True in any pixel where BOTH first_pixels_perObj and second_pixels_perObj are above their respective threshold + combined_thresh_perObj = (first_pixels_perObj > tff[label-1]) & (second_pixels_perObj > tss[label-1]) # sum of the above-threshold (for both channels) pixel intensities per object tot_fi_thr_perObj = scipy.ndimage.sum( From 6b4ed58a38f0aad2d1577c823181cc2984269a6b Mon Sep 17 00:00:00 2001 From: emiglietta Date: Wed, 19 Feb 2025 16:57:18 -0800 Subject: [PATCH 17/20] fix thresholding per object --- active_plugins/measurerwcperobj.py | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index 01237f1a..d716a075 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -275,7 +275,7 @@ def run_image_pair_objects( RWC1 = RWC2 = corr else: - lrange = numpy.arange(n_objects, dtype=numpy.int32) + 1 + lrange = numpy.arange(n_objects, dtype=numpy.int32) + 1 # +1 bc Objects start at index 0 # RWC Coefficient RWC1 = numpy.zeros(len(lrange)) @@ -283,26 +283,15 @@ def run_image_pair_objects( # List of thresholds for each object (as percentage of maximum intensity of objects in each channel # Single threshold per object (it is calculated based on the highest pixel intensity in each object) - tff = (self.thr.value / 100) * fix( + tff_perObj = (self.thr.value / 100) * fix( scipy.ndimage.maximum(first_pixels, labels, lrange) ) - tss = (self.thr.value / 100) * fix( + tss_perObj = (self.thr.value / 100) * fix( scipy.ndimage.maximum(second_pixels, labels, lrange) ) - - #NOT USED! - tot_fi_thr = scipy.ndimage.sum( - first_pixels[first_pixels >= tff[labels - 1]], - labels[first_pixels >= tff[labels - 1]], - lrange, - ) - tot_si_thr = scipy.ndimage.sum( - second_pixels[second_pixels >= tss[labels - 1]], - labels[second_pixels >= tss[labels - 1]], - lrange, - ) - for label in numpy.unique(labels): + + for label in lrange: # set first_pixels to only what's inside that label and rename --> obj_pixels_img1 # same with second_pixels to only what's inside that label and rename --> obj_pixels_img2 # same with labels to only what's inside that label and rename --> obj_label @@ -317,14 +306,14 @@ def run_image_pair_objects( #combined_thresh_perObj is an boolean array representing all the pixels in a single object # It is True in any pixel where BOTH first_pixels_perObj and second_pixels_perObj are above their respective threshold - combined_thresh_perObj = (first_pixels_perObj > tff[label-1]) & (second_pixels_perObj > tss[label-1]) + combined_thresh_perObj = (first_pixels_perObj > tff_perObj[label-1]) & (second_pixels_perObj > tss_perObj[label-1]) # sum of the above-threshold (for both channels) pixel intensities per object tot_fi_thr_perObj = scipy.ndimage.sum( - first_pixels_perObj[first_pixels_perObj >= tff[label - 1]] + first_pixels_perObj[first_pixels_perObj >= tff_perObj[label - 1]] ) tot_si_thr_perObj = scipy.ndimage.sum( - second_pixels_perObj[second_pixels_perObj >= tff[label - 1]] + second_pixels_perObj[second_pixels_perObj >= tss_perObj[label - 1]] ) #array of pixel values above threshold for the object @@ -353,7 +342,7 @@ def run_image_pair_objects( Rank_im1_perObj[Rank1_perObj] = Rank1_S_perObj Rank_im2_perObj[Rank2_perObj] = Rank2_S_perObj - R_perObj = max(Rank_im1_perObj.max(), Rank_im2_perObj.max()) + 1 #max rank among all ranks in both ch + R_perObj = max(Rank_im1_perObj.max(), Rank_im2_perObj.max()) + 1 #max rank among all ranks in both ch (+1 to avoid division by 0) Di_perObj = abs(Rank_im1_perObj - Rank_im2_perObj) #absolute difference of rank between ch in each pixel weight_perObj = (R_perObj - Di_perObj) * 1.0 / R_perObj From f5b4ba19735a930f5c0cd4c99ea2ddf6cde661bc Mon Sep 17 00:00:00 2001 From: emiglietta Date: Fri, 21 Feb 2025 18:04:07 -0800 Subject: [PATCH 18/20] implement threshold check measure absolute and relative (vs the pixels of the object) above-threshold (both thresholds) pixels --- active_plugins/measurerwcperobj.py | 61 ++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index d716a075..f4f008a6 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -280,6 +280,8 @@ def run_image_pair_objects( # RWC Coefficient RWC1 = numpy.zeros(len(lrange)) RWC2 = numpy.zeros(len(lrange)) + above_thresh_pixels_perObj = numpy.zeros(len(lrange)) + relative_above_thresh_pixels_perObj = numpy.zeros(len(lrange)) # List of thresholds for each object (as percentage of maximum intensity of objects in each channel # Single threshold per object (it is calculated based on the highest pixel intensity in each object) @@ -307,6 +309,11 @@ def run_image_pair_objects( #combined_thresh_perObj is an boolean array representing all the pixels in a single object # It is True in any pixel where BOTH first_pixels_perObj and second_pixels_perObj are above their respective threshold combined_thresh_perObj = (first_pixels_perObj > tff_perObj[label-1]) & (second_pixels_perObj > tss_perObj[label-1]) + + # Count the number of above-threshold pixels remaining in the object, and get relative value vs the size of the object + #store values in arrays (remember label starts at 1 and array index at 0) + above_thresh_pixels_perObj[label-1] = numpy.count_nonzero(combined_thresh_perObj) + relative_above_thresh_pixels_perObj[label-1] = above_thresh_pixels_perObj[label-1] / numpy.count_nonzero(first_pixels_perObj) # sum of the above-threshold (for both channels) pixel intensities per object tot_fi_thr_perObj = scipy.ndimage.sum( @@ -424,11 +431,47 @@ def run_image_pair_objects( "%.3f" % numpy.max(RWC2), ], ] + result += [ + [ + first_image_name, + second_image_name, + object_name, + "Mean relative above-thresh px", + "%.3f" % numpy.mean(relative_above_thresh_pixels_perObj), + ], + [ + first_image_name, + second_image_name, + object_name, + "Median relative above-thresh px", + "%.3f" % numpy.median(relative_above_thresh_pixels_perObj), + ], + [ + first_image_name, + second_image_name, + object_name, + "Min relative above-thresh px", + "%.3f" % numpy.min(relative_above_thresh_pixels_perObj), + ], + [ + first_image_name, + second_image_name, + object_name, + "Max relative above-thresh px", + "%.3f" % numpy.max(relative_above_thresh_pixels_perObj), + ], + ] rwc_measurement_1 = F_RWCperObj_FORMAT % (first_image_name, second_image_name) rwc_measurement_2 = F_RWCperObj_FORMAT % (second_image_name, first_image_name) + rwc_measurement_3 = F_RWCperObj_FORMAT % (first_image_name, second_image_name) + "_aboveThreshPixels_abs" + rwc_measurement_4 = F_RWCperObj_FORMAT % (first_image_name, second_image_name) + "_aboveThreshPixels_rel" + + workspace.measurements.add_measurement(object_name, rwc_measurement_1, RWC1) workspace.measurements.add_measurement(object_name, rwc_measurement_2, RWC2) + workspace.measurements.add_measurement(object_name, rwc_measurement_3, above_thresh_pixels_perObj) + workspace.measurements.add_measurement(object_name, rwc_measurement_4, relative_above_thresh_pixels_perObj) if n_objects == 0: return [ @@ -436,28 +479,28 @@ def run_image_pair_objects( first_image_name, second_image_name, object_name, - "Mean correlation", + "Mean RWCperObj coeff", "-", ], [ first_image_name, second_image_name, object_name, - "Median correlation", + "Median RWCperObj coeff", "-", ], [ first_image_name, second_image_name, object_name, - "Min correlation", + "Min RWCperObj coeff", "-", ], [ first_image_name, second_image_name, object_name, - "Max correlation", + "Max RWCperObj coeff", "-", ], ] @@ -482,6 +525,16 @@ def get_measurement_columns(self, pipeline): F_RWCperObj_FORMAT % (second_image, first_image), COLTYPE_FLOAT, ), + ( + object_name, + F_RWCperObj_FORMAT % (first_image, second_image) + "_aboveThreshPixels_abs", + COLTYPE_FLOAT, + ), + ( + object_name, + F_RWCperObj_FORMAT % (first_image, second_image) + "_aboveThreshPixels_rel", + COLTYPE_FLOAT, + ), ] return columns From c9eb3151fcb7f70af33c812d47fcc3805ce1b030 Mon Sep 17 00:00:00 2001 From: emiglietta Date: Fri, 21 Feb 2025 20:19:33 -0800 Subject: [PATCH 19/20] correct thresholding I changed thresholding to be >= instead of >, bc otherwise, pixels of value 0 get automatically excluded no matter the threshold --- active_plugins/measurerwcperobj.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index f4f008a6..348f1ece 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -308,12 +308,13 @@ def run_image_pair_objects( #combined_thresh_perObj is an boolean array representing all the pixels in a single object # It is True in any pixel where BOTH first_pixels_perObj and second_pixels_perObj are above their respective threshold - combined_thresh_perObj = (first_pixels_perObj > tff_perObj[label-1]) & (second_pixels_perObj > tss_perObj[label-1]) + # I needed to add 'above or equal' bc otherwise, 0 value pixels automatically get excluded no matter the threshold + combined_thresh_perObj = (first_pixels_perObj >= tff_perObj[label-1]) & (second_pixels_perObj >= tss_perObj[label-1]) # Count the number of above-threshold pixels remaining in the object, and get relative value vs the size of the object #store values in arrays (remember label starts at 1 and array index at 0) above_thresh_pixels_perObj[label-1] = numpy.count_nonzero(combined_thresh_perObj) - relative_above_thresh_pixels_perObj[label-1] = above_thresh_pixels_perObj[label-1] / numpy.count_nonzero(first_pixels_perObj) + relative_above_thresh_pixels_perObj[label-1] = above_thresh_pixels_perObj[label-1] / len(first_pixels_perObj) # sum of the above-threshold (for both channels) pixel intensities per object tot_fi_thr_perObj = scipy.ndimage.sum( From 4b39f9dac8bf9af92cdbe9404fb521ea161c1133 Mon Sep 17 00:00:00 2001 From: emiglietta Date: Mon, 24 Mar 2025 15:50:09 -0700 Subject: [PATCH 20/20] delete unused code, fix naming of RWC1/2 and go back to setting threshold as greater only --- active_plugins/measurerwcperobj.py | 52 +++++++++++++----------------- 1 file changed, 22 insertions(+), 30 deletions(-) diff --git a/active_plugins/measurerwcperobj.py b/active_plugins/measurerwcperobj.py index 348f1ece..5d7fc86e 100644 --- a/active_plugins/measurerwcperobj.py +++ b/active_plugins/measurerwcperobj.py @@ -264,22 +264,22 @@ def run_image_pair_objects( # K2 = numpy.zeros((0,)) # M1 = numpy.zeros((0,)) # M2 = numpy.zeros((0,)) - RWC1 = numpy.zeros((0,)) - RWC2 = numpy.zeros((0,)) + RWC1_perObj = numpy.zeros((0,)) + RWC2_perObj = numpy.zeros((0,)) # C1 = numpy.zeros((0,)) # C2 = numpy.zeros((0,)) elif numpy.where(mask)[0].__len__() == 0: corr = numpy.zeros((n_objects,)) corr[:] = numpy.NaN - # overlap = K1 = K2 = M1 = M2 = RWC1 = RWC2 = C1 = C2 = corr - RWC1 = RWC2 = corr + # overlap = K1 = K2 = M1 = M2 = RWC1_perObj = RWC2_perObj = C1 = C2 = corr + RWC1_perObj = RWC2_perObj = corr else: lrange = numpy.arange(n_objects, dtype=numpy.int32) + 1 # +1 bc Objects start at index 0 # RWC Coefficient - RWC1 = numpy.zeros(len(lrange)) - RWC2 = numpy.zeros(len(lrange)) + RWC1_perObj = numpy.zeros(len(lrange)) + RWC2_perObj = numpy.zeros(len(lrange)) above_thresh_pixels_perObj = numpy.zeros(len(lrange)) relative_above_thresh_pixels_perObj = numpy.zeros(len(lrange)) @@ -294,22 +294,14 @@ def run_image_pair_objects( for label in lrange: - # set first_pixels to only what's inside that label and rename --> obj_pixels_img1 - # same with second_pixels to only what's inside that label and rename --> obj_pixels_img2 - # same with labels to only what's inside that label and rename --> obj_label - # same with lrange to only what's inside that label and rename --> obj_lrange - # same with fi_thresh, si_thresh, combined_thresh, tot_fi_thr, tot_si_thr - # - move the 770 block inside this function after subsettingfirst_pixels and second_pixels - - #ASK BETH - in this case where no object has disjointed pixels, the order of the values of first_pixels matches the order of the objects. What would happen with disjointed objects?! first_pixels_perObj = first_pixels[labels==label] second_pixels_perObj = second_pixels[labels==label] #combined_thresh_perObj is an boolean array representing all the pixels in a single object # It is True in any pixel where BOTH first_pixels_perObj and second_pixels_perObj are above their respective threshold # I needed to add 'above or equal' bc otherwise, 0 value pixels automatically get excluded no matter the threshold - combined_thresh_perObj = (first_pixels_perObj >= tff_perObj[label-1]) & (second_pixels_perObj >= tss_perObj[label-1]) + combined_thresh_perObj = (first_pixels_perObj > tff_perObj[label-1]) & (second_pixels_perObj > tss_perObj[label-1]) # Count the number of above-threshold pixels remaining in the object, and get relative value vs the size of the object #store values in arrays (remember label starts at 1 and array index at 0) @@ -318,10 +310,10 @@ def run_image_pair_objects( # sum of the above-threshold (for both channels) pixel intensities per object tot_fi_thr_perObj = scipy.ndimage.sum( - first_pixels_perObj[first_pixels_perObj >= tff_perObj[label - 1]] + first_pixels_perObj[first_pixels_perObj > tff_perObj[label - 1]] ) tot_si_thr_perObj = scipy.ndimage.sum( - second_pixels_perObj[second_pixels_perObj >= tss_perObj[label - 1]] + second_pixels_perObj[second_pixels_perObj > tss_perObj[label - 1]] ) #array of pixel values above threshold for the object @@ -360,13 +352,13 @@ def run_image_pair_objects( # ...which will always be the case since the thr is calculated as a % of the max intensity pixel in each object # ...unless the above-threshold pixels on one channel don't match the ones on the other, so I guess it makes sense... if numpy.any(combined_thresh_perObj): - # RWC1 and 2 are arrays with the RWC value for each object in the set - RWC1[label-1] = numpy.array( + # RWC1_perObj and 2 are arrays with the RWC value for each object in the set + RWC1_perObj[label-1] = numpy.array( scipy.ndimage.sum( fi_thresh_obj * weight_thresh_perObj ) ) / numpy.array(tot_fi_thr_perObj) - RWC2[label-1] = numpy.array( + RWC2_perObj[label-1] = numpy.array( scipy.ndimage.sum( si_thresh_obj * weight_thresh_perObj ) @@ -378,28 +370,28 @@ def run_image_pair_objects( second_image_name, object_name, "Mean RWCperObj coeff", - "%.3f" % numpy.mean(RWC1), + "%.3f" % numpy.mean(RWC1_perObj), ], [ first_image_name, second_image_name, object_name, "Median RWCperObj coeff", - "%.3f" % numpy.median(RWC1), + "%.3f" % numpy.median(RWC1_perObj), ], [ first_image_name, second_image_name, object_name, "Min RWCperObj coeff", - "%.3f" % numpy.min(RWC1), + "%.3f" % numpy.min(RWC1_perObj), ], [ first_image_name, second_image_name, object_name, "Max RWCperObj coeff", - "%.3f" % numpy.max(RWC1), + "%.3f" % numpy.max(RWC1_perObj), ], ] result += [ @@ -408,28 +400,28 @@ def run_image_pair_objects( first_image_name, object_name, "Mean RWCperObj coeff", - "%.3f" % numpy.mean(RWC2), + "%.3f" % numpy.mean(RWC2_perObj), ], [ second_image_name, first_image_name, object_name, "Median RWCperObj coeff", - "%.3f" % numpy.median(RWC2), + "%.3f" % numpy.median(RWC2_perObj), ], [ second_image_name, first_image_name, object_name, "Min RWCperObj coeff", - "%.3f" % numpy.min(RWC2), + "%.3f" % numpy.min(RWC2_perObj), ], [ second_image_name, first_image_name, object_name, "Max RWCperObj coeff", - "%.3f" % numpy.max(RWC2), + "%.3f" % numpy.max(RWC2_perObj), ], ] result += [ @@ -469,8 +461,8 @@ def run_image_pair_objects( rwc_measurement_4 = F_RWCperObj_FORMAT % (first_image_name, second_image_name) + "_aboveThreshPixels_rel" - workspace.measurements.add_measurement(object_name, rwc_measurement_1, RWC1) - workspace.measurements.add_measurement(object_name, rwc_measurement_2, RWC2) + workspace.measurements.add_measurement(object_name, rwc_measurement_1, RWC1_perObj) + workspace.measurements.add_measurement(object_name, rwc_measurement_2, RWC2_perObj) workspace.measurements.add_measurement(object_name, rwc_measurement_3, above_thresh_pixels_perObj) workspace.measurements.add_measurement(object_name, rwc_measurement_4, relative_above_thresh_pixels_perObj)