Skip to content

multi-material CT scan: avoid saving data to disk between gVXR and CIL #10

@paskino

Description

@paskino

One could skip saving the data to disk between gVXR and CIL

source_position = [0.0, sod, 0.0];
gvxr.setSourcePosition(*source_position, "mm");

# Set the detector properties
detector_position = (0.0, -(sdd - sod), 0.0)
gvxr.setDetectorPosition(*detector_position, "mm")

detectorUp = (0, 0, 1)
gvxr.setDetectorUpVector(*detectorUp)

detectorNumberOfPixels = (raw_reference.shape[1] // scaling_factor, raw_reference.shape[0] // scaling_factor)
gvxr.setDetectorNumberOfPixels(*detectorNumberOfPixels);

ctscan = np.asarray(gvxr.getLastProjectionSet(), dtype=np.float32)


###
scaling_factor = 8
detectorSpacing = (imager_pixel_spacing_in_um[0] * scaling_factor, imager_pixel_spacing_in_um[1] * scaling_factor);
gvxr.setDetectorPixelSize(*detectorSpacing, "um");

gvxr.computeCTAcquisition("", "",
                          number_of_projections, # The total number of projections to simulate.
                          0, # The rotation angle corresponding to the first projection.
                          True, # A boolean flag to include or exclude the last angle. It is used to calculate the angular step between successive projections.
                          360,
                          0, # The number of white images used to perform the flat-field correction. If zero, then no correction will be performed.
                          *translation_vector_in_mm, # The location of the rotation centre.
                          "mm", # The corresponding unit of length.
                          *gvxr.getDetectorUpVector(), # The rotation axis
                          True # If true the energy fluence is returned, otherwise the number of photons is returned
                               # (default value: true)
);


from cil.framework import AcquisitionGeometry, ImageGeometry
from cil.utilities.display import show_geometry

ag = AcquisitionGeometry.create_Cone3D(source_position=source_position,
                                       detector_position=detector_position,
                                        # detector_direction=detectorUp,
                                        rotation_axis_direction=[0, 0, 1],
                                        )
ag.set_panel(detectorNumberOfPixels, [el/1000 for el in detectorSpacing], 'bottom-right')
ag.set_angles(np.linspace(0, 2 * np.pi, number_of_projections, endpoint=False), 
              angle_unit='radian')

data = AcquisitionData(ctscan, geometry=ag, deep_copy=False)

These changes correctly reconstruct the simulated data:

Image

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions