From d65855d67ea0f16416bde2efa0b9d50b179cc726 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Tue, 7 May 2024 09:30:02 +0200 Subject: [PATCH 01/28] add a some code for the slice monitor (incomplete) --- xfields/beam_elements/sliced_element.py | 25 ++++-- xfields/beam_elements/sliced_monitor.py | 102 ++++++++++++++++++++++++ xfields/beam_elements/wakefield.py | 14 ++-- 3 files changed, 124 insertions(+), 17 deletions(-) create mode 100644 xfields/beam_elements/sliced_monitor.py diff --git a/xfields/beam_elements/sliced_element.py b/xfields/beam_elements/sliced_element.py index f9544258..edcd4d59 100644 --- a/xfields/beam_elements/sliced_element.py +++ b/xfields/beam_elements/sliced_element.py @@ -55,14 +55,10 @@ def __init__(self, self.with_compressed_profile = with_compressed_profile - if filling_scheme is None and bunch_numbers is None: - if num_slots is None: - num_slots = 1 - filling_scheme = np.ones(num_slots, dtype=np.int64) - bunch_numbers = np.arange(num_slots, dtype=np.int64) - else: - assert (num_slots is None and filling_scheme is not None and - bunch_numbers is not None) + filling_scheme, bunch_numbers = self._check_filling_scheme_info( + filling_scheme=filling_scheme, + bunch_numbers=bunch_numbers, + num_slots=num_slots) self.source_moments = slicer_moments.copy() @@ -86,6 +82,19 @@ def __init__(self, num_turns=num_turns, circumference=circumference) + @staticmethod + def _check_filling_scheme_info(filling_scheme, bunch_numbers, num_slots): + if filling_scheme is None and bunch_numbers is None: + if num_slots is None: + num_slots = 1 + filling_scheme = np.ones(num_slots, dtype=np.int64) + bunch_numbers = np.arange(num_slots, dtype=np.int64) + else: + assert (num_slots is None and filling_scheme is not None and + bunch_numbers is not None) + + return filling_scheme, bunch_numbers + def init_slicer(self, zeta_range, num_slices, filling_scheme, bunch_numbers, bunch_spacing_zeta, slicer_moments): if zeta_range is not None: diff --git a/xfields/beam_elements/sliced_monitor.py b/xfields/beam_elements/sliced_monitor.py new file mode 100644 index 00000000..a5fc6995 --- /dev/null +++ b/xfields/beam_elements/sliced_monitor.py @@ -0,0 +1,102 @@ +import numpy as np + +from .sliced_element import SlicedElement + + +class SlicedMonitor(SlicedElement): + """ + Base class for elements with a slicer. + + Parameters + ---------- + slicer_moments: List + Moments for the slicer + zeta_range : Tuple + Zeta range for each bunch used in the underlying slicer. + num_slices : int + Number of slices per bunch used in the underlying slicer. + bunch_spacing_zeta : float + Bunch spacing in meters. + num_slots : int + Number of filled slots. + filling_scheme: np.ndarray + List of zeros and ones representing the filling scheme. The length + of the array is equal to the number of slots in the machine and each + element of the array holds a one if the slot is filled or a zero + otherwise. + bunch_numbers: np.ndarray + List of the bunches indicating which slots from the filling scheme are + used (not all the bunches are used when using multi-processing) + _flatten: bool + Use flattened wakes + """ + + def __init__(self, + file_backend, + slicer_moments=None, + zeta_range=None, # These are [a, b] in the paper + num_slices=None, # Per bunch, this is N_1 in the paper + bunch_spacing_zeta=None, # This is P in the paper + num_slots=None, + filling_scheme=None, + bunch_numbers=None, + _flatten=False): + + self.file_backend = file_backend + + super().__init__( + slicer_moments=slicer_moments, + zeta_range=zeta_range, # These are [a, b] in the paper + num_slices=num_slices, # Per bunch, this is N_1 in the paper + bunch_spacing_zeta=bunch_spacing_zeta, # This is P in the paper + num_slots=num_slots, + filling_scheme=filling_scheme, + bunch_numbers=bunch_numbers, + with_compressed_profile=False + ) + + self.i_turn = 0 + + def track(self, particles, _slice_result=None, _other_bunch_slicers=None): + super().track(particles=particles, + _slice_result=_slice_result, + _other_bunch_slicers=_other_bunch_slicers + ) + + # dump beam data + self.file_backend.dump_data(self.i_turn, self.slicer, particles) + + self.i_turn += 1 + + +class BaseFileBackend: + def __init__(self, base_filename, monitor_beam, beam_monitor_stride, + monitor_bunches, bunch_monitor_stride, + monitor_particles, particle_monitor_stride): + + self.base_filename = base_filename + self.monitor_beam = monitor_beam + self.beam_monitor_stride = beam_monitor_stride + self.monitor_bunches = monitor_bunches + self.bunch_monitor_stride = bunch_monitor_stride + self.monitor_particles = monitor_particles + self.particle_monitor_stride = particle_monitor_stride + + def init_files(self): + raise RuntimeError('File backend must implement init_files') + + def dump_data(self, i_turn, slicer, particles): + if self.monitor_beam and i_turn % self.beam_monitor_stride == 0: + self.dump_beam_data(slicer) + + if self.monitor_beam and i_turn % self.beam_monitor_stride == 0: + self.dump_beam_data(slicer) + + def dump_beam_data(self, slicer): + raise RuntimeError('File backend must implement dump_beam_data') + + def dump_bunch_data(self, slicer): + raise RuntimeError('File backend must implement dump_bunch_data') + + def dump_particle_data(self, slicer, particles): + raise RuntimeError('File backend must implement dump_bunch_data') \ No newline at end of file diff --git a/xfields/beam_elements/wakefield.py b/xfields/beam_elements/wakefield.py index d64462a3..e7616a01 100644 --- a/xfields/beam_elements/wakefield.py +++ b/xfields/beam_elements/wakefield.py @@ -56,15 +56,11 @@ def __init__(self, wakefields, _flatten=False): self.wakefields = wakefields - - if filling_scheme is None and bunch_numbers is None: - if num_slots is None: - num_slots = 1 - filling_scheme = np.ones(num_slots, dtype=np.int64) - bunch_numbers = np.arange(num_slots, dtype=np.int64) - else: - assert (num_slots is None and filling_scheme is not None and - bunch_numbers is not None) + + filling_scheme, bunch_numbers = self._check_filling_scheme_info( + filling_scheme=filling_scheme, + bunch_numbers=bunch_numbers, + num_slots=num_slots) all_slicer_moments = [] for wf in self.wakefields: From 905dd79aee41e5b786e21774b20ead509e3039f5 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Fri, 17 May 2024 11:51:57 +0200 Subject: [PATCH 02/28] start moving handling of buffer in sliced element --- xfields/beam_elements/sliced_monitor.py | 237 ++++++++++++++++++++++-- 1 file changed, 221 insertions(+), 16 deletions(-) diff --git a/xfields/beam_elements/sliced_monitor.py b/xfields/beam_elements/sliced_monitor.py index a5fc6995..817be798 100644 --- a/xfields/beam_elements/sliced_monitor.py +++ b/xfields/beam_elements/sliced_monitor.py @@ -1,6 +1,8 @@ import numpy as np from .sliced_element import SlicedElement +from mpi4py import MPI +import h5py as hp class SlicedMonitor(SlicedElement): @@ -31,8 +33,19 @@ class SlicedMonitor(SlicedElement): Use flattened wakes """ + _stats_to_store = [ + 'mean_x', 'mean_px', 'mean_y', 'mean_py', 'mean_zeta', + 'mean_delta', 'sigma_x', 'sigma_y', 'sigma_zeta', 'sigma_delta', + 'epsn_x', 'epsn_y', 'epsn_z', 'num_particles'] + def __init__(self, file_backend, + monitor_bunches, + monitor_slices, + monitor_particles, + n_steps, + buffer_size, + beta_gamma, slicer_moments=None, zeta_range=None, # These are [a, b] in the paper num_slices=None, # Per bunch, this is N_1 in the paper @@ -40,9 +53,29 @@ def __init__(self, num_slots=None, filling_scheme=None, bunch_numbers=None, + stats_to_store=None, _flatten=False): + if stats_to_store is not None: + for stat in stats_to_store: + assert stat in self._stats_to_store + self.stats_to_store = stats_to_store + else: + self.stats_to_store = [ + 'mean_x', 'mean_px', 'mean_y', 'mean_py', 'mean_zeta', + 'mean_delta', 'sigma_x', 'sigma_y', 'sigma_zeta', 'sigma_delta', + 'epsn_x', 'epsn_y', 'epsn_z', 'num_particles'] + self.file_backend = file_backend + self.bunch_buffer = None + self.slice_buffer = None + self.particle_buffer = None + self.n_steps = n_steps + self.buffer_size = buffer_size + self.beta_gamma = beta_gamma + self.monitor_bunches = monitor_bunches, + self.monitor_slices = monitor_slices, + self.monitor_particles = monitor_particles, super().__init__( slicer_moments=slicer_moments, @@ -57,46 +90,218 @@ def __init__(self, self.i_turn = 0 + def _init_bunch_buffer(self): + buf = {} + for bid in self.slicer.bunch_numbers: + buf[bid] = {} + for stats in self.stats_to_store: + buf[bid][stats] = np.zeros(self.n_steps) + + return buf + + def _init_slice_buffer(self): + buf = {} + for bid in self.slicer.bunch_numbers: + buf[bid] = {} + for stats in self.stats_to_store: + buf[bid][stats] = np.zeros(self.slicer.num_slices, self.n_steps) + + return buf + def track(self, particles, _slice_result=None, _other_bunch_slicers=None): super().track(particles=particles, _slice_result=_slice_result, _other_bunch_slicers=_other_bunch_slicers ) - # dump beam data + if self.monitor_bunches: + if self.bunch_buffer is None: + self.bunch_buffer = self._init_bunch_buffer() + + if self.monitor_slices: + if self.slice_buffer is None: + self.slice_buffer = self._init_slice_buffer() + + self._update_bunch_buffer(particles) + self.file_backend.dump_data(self.i_turn, self.slicer, particles) self.i_turn += 1 + def _update_bunch_buffer(self, particles): + i_bunch_particles = self._slice_result['i_bunch_particles'] + write_pos = self.i_turn % self.buffer_size + for i_bunch, bid in enumerate(self.slicer.bunch_numbers): + for stat in self.stats_to_store: + mom = getattr(particles, stat.split('_')[-1]) + if stat.startswith('mean'): + val = self.slicer.mean(mom)[i_bunch, :] + self.bunch_buffer[bid][stat][:, write_pos] = val + elif stat.startswith('sigma'): + val = self.slicer.std(mom)[i_bunch, :] + self.bunch_buffer[bid][stat][write_pos] = val + elif stat.startswith('epsn'): + mom_p = getattr(particles, 'p'+stat.split('_')[-1]) + val = np.linalg.det(np.cov(mom, mom_p)) * self.beta_gamma + self.bunch_buffer[bid][stat][write_pos] = val + + def _update_slice_buffer(self, particles): + i_bunch_particles = self._slice_result['i_bunch_particles'] + + write_pos = self.i_turn % self.buffer_size + for bid in self.slicer.bunch_numbers: + for stat in self.stats_to_store: + mom = getattr(particles, stat.split('_')[-1]) + if stat.startswith('mean'): + val = np.mean(mom[i_bunch_particles == bid]) + self.bunch_buffer[bid][stat][write_pos] = val + elif stat.startswith('sigma'): + val = np.std(mom[i_bunch_particles == bid]) + self.bunch_buffer[bid][stat][write_pos] = val + elif stat.startswith('epsn'): + mom_p = getattr(particles, 'p'+stat.split('_')[-1]) + val = np.linalg.det(np.cov(mom, mom_p)) * self.beta_gamma + self.bunch_buffer[bid][stat][write_pos] = val + +''' class BaseFileBackend: - def __init__(self, base_filename, monitor_beam, beam_monitor_stride, + def __init__(self, base_filename, monitor_bunches, bunch_monitor_stride, - monitor_particles, particle_monitor_stride): + monitor_slices, slice_monitor_stride, + monitor_particles, particle_monitor_stride, + parameters_dict=None, bunch_ids=None): self.base_filename = base_filename - self.monitor_beam = monitor_beam - self.beam_monitor_stride = beam_monitor_stride self.monitor_bunches = monitor_bunches self.bunch_monitor_stride = bunch_monitor_stride + self.monitor_slices = monitor_slices + self.slice_monitor_stride = slice_monitor_stride self.monitor_particles = monitor_particles self.particle_monitor_stride = particle_monitor_stride + self.parameters_dict = parameters_dict + + if bunch_ids is not None: + self.bunch_ids = bunch_ids + else: + self.bunch_ids = np.array([0]) + + self.bunch_buffer = None + self.slice_buffer = None + self.particle_buffer = None + + def init_files(self, stats_to_store, n_steps): + if self.monitor_bunches: + self.init_bunch_monitor_file(stats_to_store, n_steps) + + if self.monitor_slices: + self.init_slice_monitor_file() + + if self.monitor_particles: + self.init_particle_monitor_file() + + def init_bunch_monitor_file(self, stats_to_store, n_steps): + raise RuntimeError('File backend must implement ' + 'init_bunch_monitor_file') - def init_files(self): - raise RuntimeError('File backend must implement init_files') + def init_slice_monitor_file(self): + raise RuntimeError('File backend must implement ' + 'init_slice_monitor_file') + + def init_particle_monitor_file(self): + raise RuntimeError('File backend must implement ' + 'init_particle_monitor_file') def dump_data(self, i_turn, slicer, particles): - if self.monitor_beam and i_turn % self.beam_monitor_stride == 0: - self.dump_beam_data(slicer) + if self.monitor_bunches and i_turn % self.bunch_monitor_stride == 0: + self.dump_bunch_data(i_turn, particles) - if self.monitor_beam and i_turn % self.beam_monitor_stride == 0: - self.dump_beam_data(slicer) + if self.monitor_slices and i_turn % self.slice_monitor_stride == 0: + self.dump_slice_data(i_turn, slicer) - def dump_beam_data(self, slicer): - raise RuntimeError('File backend must implement dump_beam_data') + if self.monitor_particles and i_turn % self.particle_monitor_stride == 0: + self.dump_particle_data(i_turn, slicer, particles) - def dump_bunch_data(self, slicer): + def dump_bunch_data(self, i_turn, slicer): raise RuntimeError('File backend must implement dump_bunch_data') - def dump_particle_data(self, slicer, particles): - raise RuntimeError('File backend must implement dump_bunch_data') \ No newline at end of file + def dump_slice_data(self, i_turn, slicer): + raise RuntimeError('File backend must implement dump_slice_data') + + def dump_particle_data(self, i_turn, slicer, particles): + raise RuntimeError('File backend must implement dump_bunch_data') + + +class HDF5BackEnd(BaseFileBackend): + def __init__(self, base_filename, + monitor_bunches, bunch_monitor_stride, + monitor_slices, slice_monitor_stride, + monitor_particles, particle_monitor_stride, + parameters_dict=None, bunch_ids=None, use_mpi=False): + + self.use_mpi = use_mpi + + super().__init__(base_filename=base_filename, + monitor_bunches=monitor_bunches, + bunch_monitor_stride=bunch_monitor_stride, + monitor_slices=monitor_slices, + slice_monitor_stride=slice_monitor_stride, + monitor_particles=monitor_particles, + particle_monitor_stride=particle_monitor_stride, + parameters_dict=parameters_dict, + bunch_ids=bunch_ids) + + def init_bunch_monitor_file(self, stats_to_store, n_steps): + """ + Initialize HDF5 file and create its basic structure (groups and + datasets). One group is created for bunch-specific data. One dataset for + each of the quantities defined in self.stats_to_store is generated. + If specified by the user, write the contents of the parameters_dict as + metadata (attributes) to the file. Maximum file compression is activated + only if not using MPI. + """ + if self.base_filename is not None: + filename = f'{self.base_filename}_bunchmonitor.h5' + else: + filename = f'bunchmonitor.h5' + + if self.use_mpi: + h5file = hp.File(filename, 'w', driver='mpio', + comm=MPI.COMM_WORLD) + kwargs_gr = {} + else: + h5file = hp.File(filename, 'w') + kwargs_gr = {'compression': 'gzip', 'compression_opts': 9} + + if self.parameters_dict: + for key in self.parameters_dict: + h5file.attrs[key] = self.parameters_dict[key] + + h5group = h5file.create_group('Bunches') + for bid in self.bunch_ids: + gr = h5group.create_group(repr(bid)) + for stats in sorted(stats_to_store): + gr.create_dataset(stats, shape=(n_steps,), + **kwargs_gr) + + h5file.close() + + def dump_bunch_data(self, i_turn, particles): + pass + + def _write_data_to_buffer(self, i_turn, particles): + """ Store the data in the self.buffer dictionary before writing + them to file. The buffer is implemented as a shift register. To + find the slice_set-specific data, a slice_set, defined by the + slicing configuration self.slicer must be requested from the + bunch (instance of the Particles class), including all the + statistics that are to be saved. """ + + # Handle the different statistics quantities, which can + # either be methods (like mean(), ...) or simply attributes + # (macroparticlenumber or n_macroparticles_per_slice) of the bunch + # or slice_set resp. + + # bunch-specific data. + pass +''' \ No newline at end of file From c2282e226e149df458b36560068ff1d7782305b4 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Sat, 18 May 2024 18:53:08 +0200 Subject: [PATCH 03/28] serveral fixes --- examples/006_monitor/000_buffers.py | 92 +++++++++++++++ ...liced_monitor.py => collective_monitor.py} | 111 ++++++++++++------ 2 files changed, 165 insertions(+), 38 deletions(-) create mode 100644 examples/006_monitor/000_buffers.py rename xfields/beam_elements/{sliced_monitor.py => collective_monitor.py} (75%) diff --git a/examples/006_monitor/000_buffers.py b/examples/006_monitor/000_buffers.py new file mode 100644 index 00000000..2ff23d24 --- /dev/null +++ b/examples/006_monitor/000_buffers.py @@ -0,0 +1,92 @@ +import xfields as xf +import xtrack as xt +import xpart as xp +import xobjects as xo + +import numpy as np +from scipy.constants import e, c, physical_constants + +context = xo.ContextCpu(omp_num_threads=0) + +n_macroparticles = int(1e5) +num_slices = n_macroparticles +zeta_range = (-1, 1) +dz = (zeta_range[1] - zeta_range[0])/num_slices + +E0 = physical_constants['proton mass energy equivalent in MeV'][0]*1e6 +E = 7000e9 +p0 = E * e / c +gamma = E/E0 +beta = np.sqrt(1-1/gamma**2) + +intensity = 1e11 +epsn_x = 2e-6 +epsn_y = 2e-6 +taub = 0.9e-9 +sigma_z = taub*beta*c/4 + +zeta = np.linspace(zeta_range[0] + dz/2, zeta_range[1]-dz/2, + n_macroparticles) + +monitor = xf.CollectiveMonitor( + file_backend=None, + monitor_bunches=True, + monitor_slices=True, + monitor_particles=False, + n_steps=10, + buffer_size=10, + beta_gamma=beta*gamma, + slicer_moments='all', + zeta_range=zeta_range, # These are [a, b] in the paper + num_slices=num_slices, # Per bunch, this is N_1 in the paper + bunch_spacing_zeta=10, # This is P in the paper + num_slots=1 +) + +accQ_x = 60.275 +accQ_y = 60.295 +circumference = 26658.883 +averageRadius = circumference / (2 * np.pi) +beta_x = averageRadius/accQ_x +beta_y = averageRadius/accQ_y +chroma = 10 + +alpha = 53.86**-2 +h_RF = 35640 +momentumCompaction = alpha +eta = momentumCompaction - 1.0 / gamma ** 2 +V = 5e6 +Q_s = np.sqrt((e*V*h_RF*eta)/(2*np.pi*beta*c*p0)) +sigma_delta = Q_s * sigma_z / (averageRadius * eta) +beta_s = sigma_z / sigma_delta + +betatron_map = xt.LineSegmentMap( + length=circumference, betx=beta_x, bety=beta_y, + qx=accQ_x, qy=accQ_y, + longitudinal_mode='linear_fixed_qs', + dqx=chroma, dqy=chroma, + qs=Q_s, bets=beta_s +) + +line = xt.Line(elements=[monitor, betatron_map], + element_names=['monitor', 'betatron_map']) + +line.particle_ref = xt.Particles(p0c=E) +line.build_tracker(_context=context) + +particles = xp.generate_matched_gaussian_bunch( + num_particles=n_macroparticles, total_intensity_particles=intensity, + nemitt_x=epsn_x, nemitt_y=epsn_y, sigma_z=sigma_z, + line=line, _context=context +) + +particles.x += 1e-3 +particles.y += 2e-3 +particles.xp += 3e-3 +particles.yp += 4e-3 +particles.zeta += 6e-3 +particles.delta += 7e-3 + +line.track(particles) + +print('a') diff --git a/xfields/beam_elements/sliced_monitor.py b/xfields/beam_elements/collective_monitor.py similarity index 75% rename from xfields/beam_elements/sliced_monitor.py rename to xfields/beam_elements/collective_monitor.py index 817be798..9e1bcda3 100644 --- a/xfields/beam_elements/sliced_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -4,8 +4,16 @@ from mpi4py import MPI import h5py as hp +COORDS = ['x', 'px', 'y', 'py', 'zeta', 'delta'] +SECOND_MOMENTS = {} +for c1 in COORDS: + for c2 in COORDS: + if c1 + '_' + c2 in SECOND_MOMENTS or c2 + '_' + c1 in SECOND_MOMENTS: + continue + SECOND_MOMENTS[c1 + '_' + c2] = (c1, c2) -class SlicedMonitor(SlicedElement): + +class CollectiveMonitor(SlicedElement): """ Base class for elements with a slicer. @@ -35,8 +43,9 @@ class SlicedMonitor(SlicedElement): _stats_to_store = [ 'mean_x', 'mean_px', 'mean_y', 'mean_py', 'mean_zeta', - 'mean_delta', 'sigma_x', 'sigma_y', 'sigma_zeta', 'sigma_delta', - 'epsn_x', 'epsn_y', 'epsn_z', 'num_particles'] + 'mean_delta', 'sigma_x', 'sigma_y', 'sigma_zeta', + 'sigma_px', 'sigma_py', 'sigma_delta', + 'epsn_x', 'epsn_y', 'epsn_zeta', 'num_particles'] def __init__(self, file_backend, @@ -46,7 +55,7 @@ def __init__(self, n_steps, buffer_size, beta_gamma, - slicer_moments=None, + slicer_moments='all', zeta_range=None, # These are [a, b] in the paper num_slices=None, # Per bunch, this is N_1 in the paper bunch_spacing_zeta=None, # This is P in the paper @@ -61,10 +70,7 @@ def __init__(self, assert stat in self._stats_to_store self.stats_to_store = stats_to_store else: - self.stats_to_store = [ - 'mean_x', 'mean_px', 'mean_y', 'mean_py', 'mean_zeta', - 'mean_delta', 'sigma_x', 'sigma_y', 'sigma_zeta', 'sigma_delta', - 'epsn_x', 'epsn_y', 'epsn_z', 'num_particles'] + self.stats_to_store = self._stats_to_store self.file_backend = file_backend self.bunch_buffer = None @@ -77,6 +83,11 @@ def __init__(self, self.monitor_slices = monitor_slices, self.monitor_particles = monitor_particles, + if slicer_moments == 'all': + slicer_moments = COORDS + list(SECOND_MOMENTS.keys()) + + self.pipeline_manager = None + super().__init__( slicer_moments=slicer_moments, zeta_range=zeta_range, # These are [a, b] in the paper @@ -104,7 +115,8 @@ def _init_slice_buffer(self): for bid in self.slicer.bunch_numbers: buf[bid] = {} for stats in self.stats_to_store: - buf[bid][stats] = np.zeros(self.slicer.num_slices, self.n_steps) + buf[bid][stats] = np.zeros((self.slicer.num_slices, + self.n_steps)) return buf @@ -118,13 +130,15 @@ def track(self, particles, _slice_result=None, _other_bunch_slicers=None): if self.bunch_buffer is None: self.bunch_buffer = self._init_bunch_buffer() - if self.monitor_slices: - if self.slice_buffer is None: - self.slice_buffer = self._init_slice_buffer() - self._update_bunch_buffer(particles) - self.file_backend.dump_data(self.i_turn, self.slicer, particles) + if self.monitor_slices: + if self.slice_buffer is None: + self.slice_buffer = self._init_slice_buffer() + + self._update_slice_buffer() + + # self.file_backend.dump_data(self.i_turn, self.slicer, particles) self.i_turn += 1 @@ -133,36 +147,57 @@ def _update_bunch_buffer(self, particles): write_pos = self.i_turn % self.buffer_size for i_bunch, bid in enumerate(self.slicer.bunch_numbers): + bunch_mask = (i_bunch_particles == bid) for stat in self.stats_to_store: - mom = getattr(particles, stat.split('_')[-1]) - if stat.startswith('mean'): - val = self.slicer.mean(mom)[i_bunch, :] - self.bunch_buffer[bid][stat][:, write_pos] = val - elif stat.startswith('sigma'): - val = self.slicer.std(mom)[i_bunch, :] - self.bunch_buffer[bid][stat][write_pos] = val - elif stat.startswith('epsn'): - mom_p = getattr(particles, 'p'+stat.split('_')[-1]) - val = np.linalg.det(np.cov(mom, mom_p)) * self.beta_gamma - self.bunch_buffer[bid][stat][write_pos] = val - - def _update_slice_buffer(self, particles): - i_bunch_particles = self._slice_result['i_bunch_particles'] - + if stat == 'num_particles': + val = np.sum(self.slicer.num_particles[i_bunch, :]) + else: + mom = getattr(particles, stat.split('_')[-1]) + if stat.startswith('mean'): + val = np.mean(mom[bunch_mask]) + elif stat.startswith('sigma'): + val = np.std(mom[bunch_mask]) + elif stat.startswith('epsn'): + if stat.split('_')[-1] != 'zeta': + mom_p_str = 'p' + stat.split('_')[-1] + else: + mom_p_str = 'delta' + mom_p = getattr(particles, mom_p_str) + val = (np.sqrt(np.linalg.det(np.cov(mom[bunch_mask], + mom_p[bunch_mask]))) * + self.beta_gamma) + elif stat == 'num_particles': + val = np.sum(self.slicer.num_particles[i_bunch, :]) + else: + raise ValueError('Unknown statistics f{stat}') + + self.bunch_buffer[bid][stat][write_pos] = val + + def _update_slice_buffer(self): write_pos = self.i_turn % self.buffer_size - for bid in self.slicer.bunch_numbers: + for i_bunch, bid in enumerate(self.slicer.bunch_numbers): for stat in self.stats_to_store: - mom = getattr(particles, stat.split('_')[-1]) + mom_str = stat.split('_')[-1] if stat.startswith('mean'): - val = np.mean(mom[i_bunch_particles == bid]) - self.bunch_buffer[bid][stat][write_pos] = val + val = self.slicer.mean(mom_str)[i_bunch, :] elif stat.startswith('sigma'): - val = np.std(mom[i_bunch_particles == bid]) - self.bunch_buffer[bid][stat][write_pos] = val + val = self.slicer.std(mom_str)[i_bunch, :] elif stat.startswith('epsn'): - mom_p = getattr(particles, 'p'+stat.split('_')[-1]) - val = np.linalg.det(np.cov(mom, mom_p)) * self.beta_gamma - self.bunch_buffer[bid][stat][write_pos] = val + if mom_str != 'zeta': + mom_p_str = 'p' + stat.split('_')[-1] + else: + mom_p_str = 'delta' + + val = (np.sqrt(self.slicer.var(mom_str)[i_bunch, :] * + self.slicer.var(mom_p_str)[i_bunch, :] - + self.slicer.cov(mom_str, mom_p_str)[i_bunch, :]) * + self.beta_gamma) + elif stat == 'num_particles': + val = self.slicer.num_particles[i_bunch, :] + else: + raise ValueError('Unknown statistics f{stat}') + + self.slice_buffer[bid][stat][:, write_pos] = val ''' class BaseFileBackend: From cb6a4ac3fd5af58ff9bdee9fd8f4212e560097a7 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Tue, 21 May 2024 17:25:14 +0200 Subject: [PATCH 04/28] remove unwanted commas --- xfields/__init__.py | 1 + xfields/beam_elements/collective_monitor.py | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/xfields/__init__.py b/xfields/__init__.py index 927d942d..3f64877d 100644 --- a/xfields/__init__.py +++ b/xfields/__init__.py @@ -25,6 +25,7 @@ from .beam_elements.wakefield import Wakefield, ResonatorWake from .beam_elements.wakefield import MultiWakefield from .beam_elements.transverse_damper import TransverseDamper +from .beam_elements.collective_monitor import CollectiveMonitor from .general import _pkg_root from .config_tools import replace_spacecharge_with_quasi_frozen diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py index 9e1bcda3..7c594e5d 100644 --- a/xfields/beam_elements/collective_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -79,9 +79,9 @@ def __init__(self, self.n_steps = n_steps self.buffer_size = buffer_size self.beta_gamma = beta_gamma - self.monitor_bunches = monitor_bunches, - self.monitor_slices = monitor_slices, - self.monitor_particles = monitor_particles, + self.monitor_bunches = monitor_bunches + self.monitor_slices = monitor_slices + self.monitor_particles = monitor_particles if slicer_moments == 'all': slicer_moments = COORDS + list(SECOND_MOMENTS.keys()) @@ -174,6 +174,7 @@ def _update_bunch_buffer(self, particles): self.bunch_buffer[bid][stat][write_pos] = val def _update_slice_buffer(self): + print('bla') write_pos = self.i_turn % self.buffer_size for i_bunch, bid in enumerate(self.slicer.bunch_numbers): for stat in self.stats_to_store: @@ -189,8 +190,8 @@ def _update_slice_buffer(self): mom_p_str = 'delta' val = (np.sqrt(self.slicer.var(mom_str)[i_bunch, :] * - self.slicer.var(mom_p_str)[i_bunch, :] - - self.slicer.cov(mom_str, mom_p_str)[i_bunch, :]) * + self.slicer.var(mom_p_str)[i_bunch, :] - + self.slicer.cov(mom_str, mom_p_str)[i_bunch, :]) * self.beta_gamma) elif stat == 'num_particles': val = self.slicer.num_particles[i_bunch, :] From dc5b8b0ca08afc810c697e4da11719048f173ea5 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Tue, 21 May 2024 17:26:06 +0200 Subject: [PATCH 05/28] add tests for monitor buffers --- examples/006_monitor/test_monitor_buffers.py | 213 +++++++++++++++++++ 1 file changed, 213 insertions(+) create mode 100644 examples/006_monitor/test_monitor_buffers.py diff --git a/examples/006_monitor/test_monitor_buffers.py b/examples/006_monitor/test_monitor_buffers.py new file mode 100644 index 00000000..2209098e --- /dev/null +++ b/examples/006_monitor/test_monitor_buffers.py @@ -0,0 +1,213 @@ +import xfields as xf +import xtrack as xt +import xobjects as xo +from xobjects.test_helpers import for_all_test_contexts + +import numpy as np +from scipy.constants import e, c, physical_constants + + +#@for_all_test_contexts +#def test_bunch_buffer(test_context): +test_context = xo.ContextCpu(omp_num_threads=0) + +n_macroparticles = int(1e6) +num_slices = 10 +zeta_range = (-1, 1) + +E0 = physical_constants['proton mass energy equivalent in MeV'][0]*1e6 +E = 7000e9 +gamma = E/E0 +beta = np.sqrt(1-1/gamma**2) + +offs_x = 1e-3 +offs_px = 2e-3 +offs_y = 3e-3 +offs_py = 4e-3 +offs_zeta = 5e-3 +offs_delta = 6e-3 + +sigma_x = 1e-3 +sigma_px = 2e-3 +sigma_y = 3e-3 +sigma_py = 4e-3 +sigma_zeta = 5e-3 +sigma_delta = 6e-3 + +monitor = xf.CollectiveMonitor( + file_backend=None, + monitor_bunches=True, + monitor_slices=False, + monitor_particles=False, + n_steps=10, + buffer_size=10, + beta_gamma=beta*gamma, + slicer_moments='all', + zeta_range=zeta_range, # These are [a, b] in the paper + num_slices=num_slices, # Per bunch, this is N_1 in the paper + bunch_spacing_zeta=10, # This is P in the paper + num_slots=1 +) + +x_coord = sigma_x*np.random.random(n_macroparticles) + offs_x +px_coord = sigma_px*np.random.random(n_macroparticles) + offs_px +y_coord = sigma_y*np.random.random(n_macroparticles) + offs_y +py_coord = sigma_py*np.random.random(n_macroparticles) + offs_py +zeta_coord = sigma_zeta*np.random.random(n_macroparticles) + offs_zeta +delta_coord = sigma_delta*np.random.random(n_macroparticles) + offs_delta + +particles = xt.Particles( + _context=test_context, p0c=E, + x=x_coord, + px=px_coord, + y=y_coord, + py=py_coord, + zeta=zeta_coord, + delta=delta_coord +) + +monitor.track(particles) + +assert monitor.bunch_buffer[0]['mean_x'][0] == np.mean(x_coord) +assert monitor.bunch_buffer[0]['mean_px'][0] == np.mean(px_coord) +assert monitor.bunch_buffer[0]['mean_y'][0] == np.mean(y_coord) +assert monitor.bunch_buffer[0]['mean_py'][0] == np.mean(py_coord) +assert monitor.bunch_buffer[0]['mean_zeta'][0] == np.mean(zeta_coord) +assert monitor.bunch_buffer[0]['mean_delta'][0] == np.mean(delta_coord) + +assert monitor.bunch_buffer[0]['sigma_x'][0] == np.std(x_coord) +assert monitor.bunch_buffer[0]['sigma_px'][0] == np.std(px_coord) +assert monitor.bunch_buffer[0]['sigma_y'][0] == np.std(y_coord) +assert monitor.bunch_buffer[0]['sigma_py'][0] == np.std(py_coord) +assert monitor.bunch_buffer[0]['sigma_zeta'][0] == np.std(zeta_coord) +assert monitor.bunch_buffer[0]['sigma_delta'][0] == np.std(delta_coord) + +epsn_x = np.sqrt(np.linalg.det(np.cov(x_coord, px_coord))) * beta*gamma +epsn_y = np.sqrt(np.linalg.det(np.cov(y_coord, py_coord))) * beta*gamma +epsn_zeta = np.sqrt(np.linalg.det(np.cov(zeta_coord, + delta_coord))) * beta*gamma + +assert np.isclose(monitor.bunch_buffer[0]['epsn_x'][0], epsn_x) +assert np.isclose(monitor.bunch_buffer[0]['epsn_y'][0], epsn_y) +assert np.isclose(monitor.bunch_buffer[0]['epsn_zeta'][0], epsn_zeta) + +assert monitor.bunch_buffer[0]['num_particles'][0] == n_macroparticles +''' + + +test_context = xo.ContextCpu(omp_num_threads=0) + +n_macroparticles = int(1e7) +num_slices = 10 +zeta_range = (0, 1) + +E0 = physical_constants['proton mass energy equivalent in MeV'][0] * 1e6 +E = 7000e9 +gamma = E / E0 +beta = np.sqrt(1 - 1 / gamma ** 2) + +offs_x = 1 +offs_px = 2 +offs_y = 3 +offs_py = 4 +offs_delta = 5 + +sigma_x = 6 +sigma_px = 7 +sigma_y = 8 +sigma_py = 9 +sigma_delta = 10 + +monitor = xf.CollectiveMonitor( + file_backend=None, + monitor_bunches=False, + monitor_slices=True, + monitor_particles=False, + n_steps=1, + buffer_size=1, + beta_gamma=beta * gamma, + slicer_moments='all', + zeta_range=zeta_range, # These are [a, b] in the paper + num_slices=num_slices, # Per bunch, this is N_1 in the paper + bunch_spacing_zeta=10, # This is P in the paper + num_slots=1 +) + +zeta_coord_tot = np.random.random(n_macroparticles) + +particles = xt.Particles( + _context=test_context, + p0c=E, + zeta=zeta_coord_tot +) + +n_slice = 5 + +dzeta = monitor.slicer.dzeta + +bin_min = monitor.slicer.zeta_centers[n_slice] - dzeta/2 +bin_max = monitor.slicer.zeta_centers[n_slice] + dzeta/2 + +slice_mask = np.logical_and(zeta_coord_tot < bin_max, zeta_coord_tot >= bin_min) + +n_macroparticles_slice = np.sum(slice_mask) + +x_coord = sigma_x * np.random.random(n_macroparticles_slice) + offs_x +px_coord = sigma_px * np.random.random(n_macroparticles_slice) + offs_px +y_coord = sigma_y * np.random.random(n_macroparticles_slice) + offs_y +py_coord = sigma_py * np.random.random(n_macroparticles_slice) + offs_py +delta_coord = (sigma_delta * np.random.random(n_macroparticles_slice) + + offs_delta) + +particles.x[slice_mask] = x_coord +particles.px[slice_mask] = px_coord +particles.y[slice_mask] = y_coord +particles.py[slice_mask] = py_coord +particles.delta[slice_mask] = delta_coord + +monitor.track(particles) + + +assert np.isclose(monitor.slice_buffer[0]['mean_x'][n_slice, 0], + np.mean(x_coord)) +assert np.isclose(monitor.slice_buffer[0]['mean_px'][n_slice, 0], + np.mean(px_coord)) +assert np.isclose(monitor.slice_buffer[0]['mean_y'][n_slice, 0], + np.mean(y_coord)) +assert np.isclose(monitor.slice_buffer[0]['mean_py'][n_slice, 0], + np.mean(py_coord)) +zeta_coord = particles.zeta[slice_mask] +assert np.isclose(monitor.slice_buffer[0]['mean_zeta'][n_slice, 0], + np.mean(zeta_coord)) +assert np.isclose(monitor.slice_buffer[0]['mean_delta'][n_slice, 0], + np.mean(delta_coord)) + +assert np.isclose(monitor.slice_buffer[0]['sigma_x'][n_slice, 0], + np.std(x_coord)) +assert np.isclose(monitor.slice_buffer[0]['sigma_px'][n_slice, 0], + np.std(px_coord)) +assert np.isclose(monitor.slice_buffer[0]['sigma_y'][n_slice, 0], + np.std(y_coord)) +assert np.isclose(monitor.slice_buffer[0]['sigma_py'][n_slice, 0], + np.std(py_coord)) +assert np.isclose(monitor.slice_buffer[0]['sigma_zeta'][n_slice, 0], + np.std(zeta_coord)) +assert np.isclose(monitor.slice_buffer[0]['sigma_delta'][n_slice, 0], + np.std(delta_coord)) + + +epsn_x = (np.sqrt(np.var(x_coord) * np.var(px_coord) - + np.cov(x_coord, px_coord)[0, 1]) * beta * gamma) +epsn_y = (np.sqrt(np.var(y_coord) * np.var(py_coord) - + np.cov(y_coord, py_coord)[0, 1])*beta*gamma) +epsn_zeta = (np.sqrt(np.var(zeta_coord) * np.var(delta_coord) - + np.cov(zeta_coord, delta_coord)[0, 1]) * beta*gamma) + + +assert np.isclose(monitor.slice_buffer[0]['epsn_x'][n_slice, 0], epsn_x) +assert np.isclose(monitor.slice_buffer[0]['epsn_y'][n_slice, 0], epsn_y) +assert np.isclose(monitor.slice_buffer[0]['epsn_zeta'][n_slice, 0], epsn_zeta) + +assert (monitor.slice_buffer[0]['num_particles'][n_slice, 0] == + n_macroparticles_slice) +''' \ No newline at end of file From e76bcb2d610ffe608b3b2a2db423311d7a4ed46b Mon Sep 17 00:00:00 2001 From: lgiacome Date: Thu, 30 May 2024 14:24:16 +0200 Subject: [PATCH 06/28] more work --- examples/006_monitor/000_buffers.py | 9 +- xfields/beam_elements/collective_monitor.py | 107 +++++++++++++++----- 2 files changed, 83 insertions(+), 33 deletions(-) diff --git a/examples/006_monitor/000_buffers.py b/examples/006_monitor/000_buffers.py index 2ff23d24..941a82ff 100644 --- a/examples/006_monitor/000_buffers.py +++ b/examples/006_monitor/000_buffers.py @@ -80,13 +80,8 @@ line=line, _context=context ) -particles.x += 1e-3 -particles.y += 2e-3 -particles.xp += 3e-3 -particles.yp += 4e-3 -particles.zeta += 6e-3 -particles.delta += 7e-3 - line.track(particles) print('a') + + diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py index 7c594e5d..f4280242 100644 --- a/xfields/beam_elements/collective_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -1,8 +1,7 @@ import numpy as np from .sliced_element import SlicedElement -from mpi4py import MPI -import h5py as hp +import openpmd_api as io COORDS = ['x', 'px', 'y', 'py', 'zeta', 'delta'] SECOND_MOMENTS = {} @@ -48,12 +47,11 @@ class CollectiveMonitor(SlicedElement): 'epsn_x', 'epsn_y', 'epsn_zeta', 'num_particles'] def __init__(self, - file_backend, + base_file_name, monitor_bunches, monitor_slices, monitor_particles, n_steps, - buffer_size, beta_gamma, slicer_moments='all', zeta_range=None, # These are [a, b] in the paper @@ -63,6 +61,7 @@ def __init__(self, filling_scheme=None, bunch_numbers=None, stats_to_store=None, + output_extension=None, _flatten=False): if stats_to_store is not None: @@ -72,17 +71,18 @@ def __init__(self, else: self.stats_to_store = self._stats_to_store - self.file_backend = file_backend + self.base_file_name = base_file_name self.bunch_buffer = None self.slice_buffer = None self.particle_buffer = None self.n_steps = n_steps - self.buffer_size = buffer_size self.beta_gamma = beta_gamma self.monitor_bunches = monitor_bunches self.monitor_slices = monitor_slices self.monitor_particles = monitor_particles + self.output_extension = output_extension + if slicer_moments == 'all': slicer_moments = COORDS + list(SECOND_MOMENTS.keys()) @@ -99,8 +99,24 @@ def __init__(self, with_compressed_profile=False ) + if self.monitor_bunches: + self.bunch_series = io.Series( + self.base_file_name + '_bunches.h5', + io.Access.create) + + if self.monitor_slices: + self.slice_series = io.Series( + self.base_file_name + '_slices.h5', + io.Access.create) + + if self.monitor_particles: + self.particle_series = io.Series( + self.base_file_name + '_particles.h5', + io.Access.create) + self.i_turn = 0 + ''' def _init_bunch_buffer(self): buf = {} for bid in self.slicer.bunch_numbers: @@ -119,6 +135,7 @@ def _init_slice_buffer(self): self.n_steps)) return buf + ''' def track(self, particles, _slice_result=None, _other_bunch_slicers=None): super().track(particles=particles, @@ -126,22 +143,58 @@ def track(self, particles, _slice_result=None, _other_bunch_slicers=None): _other_bunch_slicers=_other_bunch_slicers ) - if self.monitor_bunches: - if self.bunch_buffer is None: - self.bunch_buffer = self._init_bunch_buffer() - - self._update_bunch_buffer(particles) + #if self.monitor_bunches: + # if self.bunch_buffer is None: + # self.bunch_buffer = self._init_bunch_buffer() + # + # self._update_bunch_buffer(particles) - if self.monitor_slices: - if self.slice_buffer is None: - self.slice_buffer = self._init_slice_buffer() + #if self.monitor_slices: + # if self.slice_buffer is None: + # self.slice_buffer = self._init_slice_buffer() + # + # self._update_slice_buffer() - self._update_slice_buffer() + if self.monitor_bunches: + self._update_bunch_series(particles) # self.file_backend.dump_data(self.i_turn, self.slicer, particles) self.i_turn += 1 + def _update_bunch_series(self, particles): + i_bunch_particles = self._slice_result['i_bunch_particles'] + + bunch_it = self.bunch_series.iterations[self.i_turn] + bunches = bunch_it.particles['bunches'] + + for i_bunch, bid in enumerate(self.slicer.bunch_numbers): + bunch_mask = (i_bunch_particles == bid) + for stat in self.stats_to_store: + if stat == 'num_particles': + val = np.sum(self.slicer.num_particles[i_bunch, :]) + else: + mom = getattr(particles, stat.split('_')[-1]) + if stat.startswith('mean'): + val = np.mean(mom[bunch_mask]) + elif stat.startswith('sigma'): + val = np.std(mom[bunch_mask]) + elif stat.startswith('epsn'): + if stat.split('_')[-1] != 'zeta': + mom_p_str = 'p' + stat.split('_')[-1] + else: + mom_p_str = 'delta' + mom_p = getattr(particles, mom_p_str) + val = (np.sqrt(np.linalg.det(np.cov(mom[bunch_mask], + mom_p[bunch_mask]))) * + self.beta_gamma) + elif stat == 'num_particles': + val = np.sum(self.slicer.num_particles[i_bunch, :]) + else: + raise ValueError('Unknown statistics f{stat}') + + self.bunch_buffer[bid][stat][write_pos] = val + def _update_bunch_buffer(self, particles): i_bunch_particles = self._slice_result['i_bunch_particles'] @@ -200,21 +253,21 @@ def _update_slice_buffer(self): self.slice_buffer[bid][stat][:, write_pos] = val -''' + class BaseFileBackend: def __init__(self, base_filename, - monitor_bunches, bunch_monitor_stride, - monitor_slices, slice_monitor_stride, - monitor_particles, particle_monitor_stride, + #monitor_bunches, bunch_monitor_stride, + #monitor_slices, slice_monitor_stride, + #monitor_particles, particle_monitor_stride, parameters_dict=None, bunch_ids=None): self.base_filename = base_filename - self.monitor_bunches = monitor_bunches - self.bunch_monitor_stride = bunch_monitor_stride - self.monitor_slices = monitor_slices - self.slice_monitor_stride = slice_monitor_stride - self.monitor_particles = monitor_particles - self.particle_monitor_stride = particle_monitor_stride + #self.monitor_bunches = monitor_bunches + #self.bunch_monitor_stride = bunch_monitor_stride + #self.monitor_slices = monitor_slices + #self.slice_monitor_stride = slice_monitor_stride + #self.monitor_particles = monitor_particles + #self.particle_monitor_stride = particle_monitor_stride self.parameters_dict = parameters_dict if bunch_ids is not None: @@ -226,6 +279,7 @@ def __init__(self, base_filename, self.slice_buffer = None self.particle_buffer = None + ''' def init_files(self, stats_to_store, n_steps): if self.monitor_bunches: self.init_bunch_monitor_file(stats_to_store, n_steps) @@ -235,6 +289,7 @@ def init_files(self, stats_to_store, n_steps): if self.monitor_particles: self.init_particle_monitor_file() + ''' def init_bunch_monitor_file(self, stats_to_store, n_steps): raise RuntimeError('File backend must implement ' @@ -267,7 +322,7 @@ def dump_slice_data(self, i_turn, slicer): def dump_particle_data(self, i_turn, slicer, particles): raise RuntimeError('File backend must implement dump_bunch_data') - +''' class HDF5BackEnd(BaseFileBackend): def __init__(self, base_filename, monitor_bunches, bunch_monitor_stride, From 7dbe1ed1d4dce7df9323b863b7d35e7b6b7648d0 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Thu, 6 Jun 2024 14:35:35 +0200 Subject: [PATCH 07/28] small cahnges --- xfields/beam_elements/collective_monitor.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py index f4280242..e3a4c9e5 100644 --- a/xfields/beam_elements/collective_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -193,7 +193,9 @@ def _update_bunch_series(self, particles): else: raise ValueError('Unknown statistics f{stat}') - self.bunch_buffer[bid][stat][write_pos] = val + bunches[stat] = val + + self.bunch_series.flush() def _update_bunch_buffer(self, particles): i_bunch_particles = self._slice_result['i_bunch_particles'] From bca6d8813b12dd356cd57aae3f0296b3edebcf23 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Sat, 15 Jun 2024 19:18:53 +0200 Subject: [PATCH 08/28] finished up and tested collective monitor --- tests/test_collective_monitor.py | 663 ++++++++++++++++++++ xfields/beam_elements/collective_monitor.py | 514 +++++++-------- 2 files changed, 928 insertions(+), 249 deletions(-) create mode 100644 tests/test_collective_monitor.py diff --git a/tests/test_collective_monitor.py b/tests/test_collective_monitor.py new file mode 100644 index 00000000..a90c2fa2 --- /dev/null +++ b/tests/test_collective_monitor.py @@ -0,0 +1,663 @@ +import xfields as xf +import xtrack as xt +from xobjects.test_helpers import for_all_test_contexts + +import json +import h5py + +import numpy as np + + +@for_all_test_contexts +def test_bunch_monitor_hdf5(test_context): + n_macroparticles = int(1e6) + num_slices = 10 + zeta_range = (-1, 1) + + energy = 7e12 + + offs_x = 1e-3 + offs_px = 2e-3 + offs_y = 3e-3 + offs_py = 4e-3 + offs_zeta = 5e-3 + offs_delta = 6e-3 + + sigma_x = 1e-3 + sigma_px = 2e-3 + sigma_y = 3e-3 + sigma_py = 4e-3 + sigma_zeta = 5e-3 + sigma_delta = 6e-3 + + # base coordinates + x_coord = sigma_x * np.random.random(n_macroparticles) + offs_x + px_coord = sigma_px * np.random.random(n_macroparticles) + offs_px + y_coord = sigma_y * np.random.random(n_macroparticles) + offs_y + py_coord = sigma_py * np.random.random(n_macroparticles) + offs_py + zeta_coord = sigma_zeta * np.random.random(n_macroparticles) + offs_zeta + delta_coord = sigma_delta * np.random.random(n_macroparticles) + offs_delta + + bunch_spacing_zeta = 10 + + n_bunches = 3 + + # n_bunches bunches with n_macroparticles each with different coordinates + particles = xt.Particles( + _context=test_context, p0c=energy, + x=np.concatenate([x_coord * (bid + 1) for bid in range(n_bunches)]), + px=np.concatenate([px_coord * (bid + 1) for bid in range(n_bunches)]), + y=np.concatenate([y_coord * (bid + 1) for bid in range(n_bunches)]), + py=np.concatenate([py_coord * (bid + 1) for bid in range(n_bunches)]), + zeta=np.concatenate([zeta_coord * (bid + 1) - bunch_spacing_zeta * bid + for bid in range(n_bunches)]), + delta=np.concatenate( + [delta_coord * (bid + 1) for bid in range(n_bunches)]), + ) + + beta = particles.beta0[0] + gamma = np.sqrt(1 / (1 - beta ** 2)) + + # dummy filling scheme + filling_scheme = np.ones(n_bunches, dtype=int) + bunch_numbers = np.arange(n_bunches, dtype=int) + + flush_data_every = 10 + + monitor = xf.CollectiveMonitor( + base_file_name='test_monitor', + backend='hdf5', + monitor_bunches=True, + monitor_slices=False, + monitor_particles=False, + flush_data_every=flush_data_every, + zeta_range=zeta_range, + num_slices=num_slices, + filling_scheme=filling_scheme, + bunch_numbers=bunch_numbers, + bunch_spacing_zeta=bunch_spacing_zeta, + ) + + # track for twice flush_data_every turns so that we test the reshaping + for _ in range(2 * flush_data_every): + monitor.track(particles) + + with h5py.File(monitor._bunches_filename, 'r') as h5file: + for bid in bunch_numbers: + bunch = h5file[str(bid)] + print(bunch['mean_x'][:], np.mean(x_coord) * (bid + 1)) + assert np.allclose(bunch['mean_x'][:], + np.mean(x_coord) * (bid + 1)) + assert np.allclose(bunch['mean_px'][:], + np.mean(px_coord) * (bid + 1)) + assert np.allclose(bunch['mean_y'][:], + np.mean(y_coord) * (bid + 1)) + assert np.allclose(bunch['mean_py'][:], + np.mean(py_coord) * (bid + 1)) + assert np.allclose(bunch['mean_zeta'][:], + (np.mean(zeta_coord * (bid + 1) - + bunch_spacing_zeta * bid))) + assert np.allclose(bunch['mean_delta'][:], + np.mean(delta_coord) * (bid + 1)) + + assert np.allclose(bunch['sigma_x'][:], + np.std(x_coord) * (bid + 1)) + assert np.allclose(bunch['sigma_px'][:], + np.std(px_coord) * (bid + 1)) + assert np.allclose(bunch['sigma_y'][:], + np.std(y_coord) * (bid + 1)) + assert np.allclose(bunch['sigma_py'][:], + np.std(py_coord) * (bid + 1)) + assert np.allclose(bunch['sigma_zeta'][:], + np.std(zeta_coord) * (bid + 1)) + assert np.allclose(bunch['sigma_delta'][:], + np.std(delta_coord) * (bid + 1)) + + epsn_x = np.sqrt( + np.linalg.det(np.cov(x_coord, px_coord))) * beta * gamma + epsn_y = np.sqrt( + np.linalg.det(np.cov(y_coord, py_coord))) * beta * gamma + epsn_zeta = np.sqrt( + np.linalg.det(np.cov(zeta_coord, delta_coord))) * beta * gamma + + assert np.allclose(bunch['epsn_x'][:], + epsn_x * (bid + 1) ** 2) + assert np.allclose(bunch['epsn_y'][:], + epsn_y * (bid + 1) ** 2) + assert np.allclose(bunch['epsn_zeta'][:], + epsn_zeta * (bid + 1) ** 2) + + assert np.allclose(bunch['num_particles'][:], n_macroparticles) + + +@for_all_test_contexts +def test_bunch_monitor_json(test_context): + n_macroparticles = int(1e6) + num_slices = 10 + zeta_range = (-1, 1) + + energy = 7e12 + + offs_x = 1e-3 + offs_px = 2e-3 + offs_y = 3e-3 + offs_py = 4e-3 + offs_zeta = 5e-3 + offs_delta = 6e-3 + + sigma_x = 1e-3 + sigma_px = 2e-3 + sigma_y = 3e-3 + sigma_py = 4e-3 + sigma_zeta = 5e-3 + sigma_delta = 6e-3 + + # base coordinates + x_coord = sigma_x * np.random.random(n_macroparticles) + offs_x + px_coord = sigma_px * np.random.random(n_macroparticles) + offs_px + y_coord = sigma_y * np.random.random(n_macroparticles) + offs_y + py_coord = sigma_py * np.random.random(n_macroparticles) + offs_py + zeta_coord = sigma_zeta * np.random.random(n_macroparticles) + offs_zeta + delta_coord = sigma_delta * np.random.random(n_macroparticles) + offs_delta + + bunch_spacing_zeta = 10 + + n_bunches = 3 + + # n_bunches bunches with n_macroparticles each with different coordinates + particles = xt.Particles( + _context=test_context, p0c=energy, + x=np.concatenate([x_coord * (bid + 1) for bid in range(n_bunches)]), + px=np.concatenate([px_coord * (bid + 1) for bid in range(n_bunches)]), + y=np.concatenate([y_coord * (bid + 1) for bid in range(n_bunches)]), + py=np.concatenate([py_coord * (bid + 1) for bid in range(n_bunches)]), + zeta=np.concatenate([zeta_coord * (bid + 1) - bunch_spacing_zeta * bid + for bid in range(n_bunches)]), + delta=np.concatenate( + [delta_coord * (bid + 1) for bid in range(n_bunches)]), + ) + + beta = particles.beta0[0] + gamma = np.sqrt(1 / (1 - beta ** 2)) + + # dummy filling scheme + filling_scheme = np.ones(n_bunches, dtype=int) + bunch_numbers = np.arange(n_bunches, dtype=int) + + n_turns = 10 + + monitor = xf.CollectiveMonitor( + base_file_name='test_monitor', + backend='json', + monitor_bunches=True, + monitor_slices=False, + monitor_particles=False, + flush_data_every=n_turns, + zeta_range=zeta_range, + num_slices=num_slices, + filling_scheme=filling_scheme, + bunch_numbers=bunch_numbers, + bunch_spacing_zeta=bunch_spacing_zeta, + ) + + # track for twice flush_data_every turns so that we test the reshaping + for _ in range(2 * n_turns): + monitor.track(particles) + + with open(monitor._bunches_filename, 'r') as f: + bunches_dict = json.load(f) + + for bid in bunch_numbers: + bunch = bunches_dict[str(bid)] + assert np.allclose(bunch['mean_x'][:], (np.mean(x_coord) * + (bid + 1))) + assert np.allclose(bunch['mean_px'][:], (np.mean(px_coord) * + (bid + 1))) + assert np.allclose(bunch['mean_y'][:], (np.mean(y_coord) * + (bid + 1))) + assert np.allclose(bunch['mean_py'][:], (np.mean(py_coord) * + (bid + 1))) + assert np.allclose(bunch['mean_zeta'][:], + (np.mean(zeta_coord * (bid + 1) - + bunch_spacing_zeta * bid))) + assert np.allclose(bunch['mean_delta'][:], + np.mean(delta_coord) * (bid + 1)) + + assert np.allclose(bunch['sigma_x'][:], (np.std(x_coord) * + (bid + 1))) + assert np.allclose(bunch['sigma_px'][:], (np.std(px_coord) * + (bid + 1))) + assert np.allclose(bunch['sigma_y'][:], (np.std(y_coord) * + (bid + 1))) + assert np.allclose(bunch['sigma_py'][:], (np.std(py_coord) * + (bid + 1))) + assert np.allclose(bunch['sigma_zeta'][:], + (np.std(zeta_coord) * + (bid + 1))) + assert np.allclose(bunch['sigma_delta'][:], + (np.std(delta_coord) * (bid + 1))) + + epsn_x = np.sqrt( + np.linalg.det(np.cov(x_coord, px_coord))) * beta * gamma + epsn_y = np.sqrt( + np.linalg.det(np.cov(y_coord, py_coord))) * beta * gamma + epsn_zeta = np.sqrt(np.linalg.det(np.cov(zeta_coord, + delta_coord))) * beta * gamma + + assert np.allclose(bunch['epsn_x'][:], epsn_x * (bid + 1) ** 2) + assert np.allclose(bunch['epsn_y'][:], epsn_y * (bid + 1) ** 2) + assert np.allclose(bunch['epsn_zeta'][:], epsn_zeta * (bid + 1) ** 2) + + assert np.allclose(bunch['num_particles'][:], n_macroparticles) + + +@for_all_test_contexts +def test_slice_monitor_hdf5(test_context): + n_macroparticles = int(1e6) + num_slices = 10 + zeta_range = (0, 1) + + energy = 7e12 + + offs_x = 1 + offs_px = 2 + offs_y = 3 + offs_py = 4 + offs_delta = 5 + + sigma_x = 6 + sigma_px = 7 + sigma_y = 8 + sigma_py = 9 + sigma_delta = 10 + + # dummy filling scheme + n_bunches = 3 + filling_scheme = np.ones(n_bunches, dtype=int) + bunch_numbers = np.arange(n_bunches, dtype=int) + bunch_spacing_zeta = 10 + + flush_data_every = 10 + + monitor = xf.CollectiveMonitor( + base_file_name='test_monitor', + backend='hdf5', + monitor_bunches=False, + monitor_slices=True, + monitor_particles=False, + flush_data_every=flush_data_every, + zeta_range=zeta_range, + num_slices=num_slices, + bunch_spacing_zeta=bunch_spacing_zeta, + filling_scheme=filling_scheme, + bunch_numbers=bunch_numbers + ) + + particles = xt.Particles( + _context=test_context, + p0c=energy, + x=np.zeros(n_macroparticles), + ) + + beta = particles.beta0[0] + gamma = np.sqrt(1 / (1 - beta ** 2)) + + n_slice = 5 + + dzeta = monitor.slicer.dzeta + zeta_coord_base = np.random.random(n_macroparticles) + + x_coord_bunch = [] + px_coord_bunch = [] + y_coord_bunch = [] + py_coord_bunch = [] + zeta_coord_bunch = [] + delta_coord_bunch = [] + n_macroparticles_slice_bunch = [] + + for bid in range(n_bunches): + zeta_coord = zeta_coord_base * (bid + 1) - bid * bunch_spacing_zeta + + bin_min = monitor.slicer.zeta_centers[bid, n_slice] - dzeta / 2 + bin_max = monitor.slicer.zeta_centers[bid, n_slice] + dzeta / 2 + + slice_mask = np.logical_and(zeta_coord < bin_max, zeta_coord >= bin_min) + + n_macroparticles_slice = np.sum(slice_mask) + + x_coord = (sigma_x * np.random.random( + n_macroparticles_slice) + offs_x) * (bid + 1) + px_coord = (sigma_px * np.random.random( + n_macroparticles_slice) + offs_px) * (bid + 1) + y_coord = (sigma_y * np.random.random( + n_macroparticles_slice) + offs_y) * (bid + 1) + py_coord = (sigma_py * np.random.random( + n_macroparticles_slice) + offs_py) * (bid + 1) + delta_coord = (sigma_delta * np.random.random(n_macroparticles_slice) + + offs_delta) * (bid + 1) + + x_coord_bunch.append(x_coord) + px_coord_bunch.append(px_coord) + y_coord_bunch.append(y_coord) + py_coord_bunch.append(py_coord) + delta_coord_bunch.append(delta_coord) + zeta_coord_bunch.append(zeta_coord[slice_mask]) + n_macroparticles_slice_bunch.append(n_macroparticles_slice) + + particles.x[slice_mask] = x_coord + particles.px[slice_mask] = px_coord + particles.y[slice_mask] = y_coord + particles.py[slice_mask] = py_coord + particles.zeta[slice_mask] = zeta_coord[slice_mask] + particles.delta[slice_mask] = delta_coord + + # track for twice flush_data_every turns so that we test the reshaping + for _ in range(flush_data_every * 2): + monitor.track(particles) + + with h5py.File(monitor._slices_filename, 'r') as h5file: + for bid in bunch_numbers: + slice_data = h5file[str(bid)] + print(x_coord_bunch[bid]) + assert np.allclose(slice_data['mean_x'][:, n_slice], + np.mean(x_coord_bunch[bid])) + assert np.allclose(slice_data['mean_px'][:, n_slice], + np.mean(px_coord_bunch[bid])) + assert np.allclose(slice_data['mean_y'][:, n_slice], + np.mean(y_coord_bunch[bid])) + assert np.allclose(slice_data['mean_py'][:, n_slice], + np.mean(py_coord_bunch[bid])) + assert np.allclose(slice_data['mean_zeta'][:, n_slice], + np.mean(zeta_coord_bunch[bid])) + assert np.allclose(slice_data['mean_delta'][:, n_slice], + np.mean(delta_coord_bunch[bid])) + + assert np.allclose(slice_data['sigma_x'][:, n_slice], + np.std(x_coord_bunch[bid])) + assert np.allclose(slice_data['sigma_px'][:, n_slice], + np.std(px_coord_bunch[bid])) + assert np.allclose(slice_data['sigma_y'][:, n_slice], + np.std(y_coord_bunch[bid])) + assert np.allclose(slice_data['sigma_py'][:, n_slice], + np.std(py_coord_bunch[bid])) + assert np.allclose(slice_data['sigma_zeta'][:, n_slice], + np.std(zeta_coord_bunch[bid])) + assert np.allclose(slice_data['sigma_delta'][:, n_slice], + np.std(delta_coord_bunch[bid])) + + epsn_x = (np.sqrt(np.var(x_coord_bunch[bid]) * + np.var(px_coord_bunch[bid]) - + np.cov(x_coord_bunch[bid], px_coord_bunch[bid])[ + 0, 1]) + * beta * gamma) + epsn_y = (np.sqrt(np.var(y_coord_bunch[bid]) * + np.var(py_coord_bunch[bid]) - + np.cov(y_coord_bunch[bid], py_coord_bunch[bid])[ + 0, 1]) + * beta * gamma) + epsn_zeta = (np.sqrt(np.var(zeta_coord_bunch[bid]) * + np.var(delta_coord_bunch[bid]) - + np.cov(zeta_coord_bunch[bid], + delta_coord_bunch[bid])[ + 0, 1]) * beta * gamma) + + assert np.allclose(slice_data['epsn_x'][:, n_slice], epsn_x) + assert np.allclose(slice_data['epsn_y'][:, n_slice], epsn_y) + assert np.allclose(slice_data['epsn_zeta'][:, n_slice], epsn_zeta) + + assert np.allclose(slice_data['num_particles'][:, n_slice], + n_macroparticles_slice_bunch[bid]) + + +@for_all_test_contexts +def test_slice_monitor_json(test_context): + n_macroparticles = int(1e6) + num_slices = 10 + zeta_range = (0, 1) + + energy = 7e12 + + offs_x = 1 + offs_px = 2 + offs_y = 3 + offs_py = 4 + offs_delta = 5 + + sigma_x = 6 + sigma_px = 7 + sigma_y = 8 + sigma_py = 9 + sigma_delta = 10 + + # dummy filling scheme + n_bunches = 3 + filling_scheme = np.ones(n_bunches, dtype=int) + bunch_numbers = np.arange(n_bunches, dtype=int) + bunch_spacing_zeta = 10 + + flush_data_every = 10 + + monitor = xf.CollectiveMonitor( + base_file_name='test_monitor', + backend='json', + monitor_bunches=False, + monitor_slices=True, + monitor_particles=False, + flush_data_every=flush_data_every, + zeta_range=zeta_range, + num_slices=num_slices, + bunch_spacing_zeta=bunch_spacing_zeta, + filling_scheme=filling_scheme, + bunch_numbers=bunch_numbers + ) + + particles = xt.Particles( + _context=test_context, + p0c=energy, + x=np.zeros(n_macroparticles), + ) + + beta = particles.beta0[0] + gamma = np.sqrt(1 / (1 - beta ** 2)) + + n_slice = 5 + + dzeta = monitor.slicer.dzeta + zeta_coord_base = np.random.random(n_macroparticles) + + x_coord_bunch = [] + px_coord_bunch = [] + y_coord_bunch = [] + py_coord_bunch = [] + zeta_coord_bunch = [] + delta_coord_bunch = [] + n_macroparticles_slice_bunch = [] + + for bid in range(n_bunches): + zeta_coord = zeta_coord_base * (bid + 1) - bid * bunch_spacing_zeta + + bin_min = monitor.slicer.zeta_centers[bid, n_slice] - dzeta / 2 + bin_max = monitor.slicer.zeta_centers[bid, n_slice] + dzeta / 2 + + slice_mask = np.logical_and(zeta_coord < bin_max, zeta_coord >= bin_min) + + n_macroparticles_slice = np.sum(slice_mask) + + x_coord = (sigma_x * np.random.random( + n_macroparticles_slice) + offs_x) * (bid + 1) + px_coord = (sigma_px * np.random.random( + n_macroparticles_slice) + offs_px) * (bid + 1) + y_coord = (sigma_y * np.random.random( + n_macroparticles_slice) + offs_y) * (bid + 1) + py_coord = (sigma_py * np.random.random( + n_macroparticles_slice) + offs_py) * (bid + 1) + delta_coord = (sigma_delta * np.random.random(n_macroparticles_slice) + + offs_delta) * (bid + 1) + + x_coord_bunch.append(x_coord) + px_coord_bunch.append(px_coord) + y_coord_bunch.append(y_coord) + py_coord_bunch.append(py_coord) + delta_coord_bunch.append(delta_coord) + zeta_coord_bunch.append(zeta_coord[slice_mask]) + n_macroparticles_slice_bunch.append(n_macroparticles_slice) + + particles.x[slice_mask] = x_coord + particles.px[slice_mask] = px_coord + particles.y[slice_mask] = y_coord + particles.py[slice_mask] = py_coord + particles.zeta[slice_mask] = zeta_coord[slice_mask] + particles.delta[slice_mask] = delta_coord + + # track for twice flush_data_every turns so that we test the reshaping + for _ in range(2 * flush_data_every): + monitor.track(particles) + + with open(monitor._slices_filename, 'r') as f: + bunches_data = json.load(f) + + for bid in bunch_numbers: + slice_data = bunches_data[str(bid)] + + for turn in range(2 * flush_data_every): + assert np.isclose(slice_data['mean_x'][turn][n_slice], + np.mean(x_coord_bunch[bid])) + assert np.isclose(slice_data['mean_px'][turn][n_slice], + np.mean(px_coord_bunch[bid])) + assert np.isclose(slice_data['mean_y'][turn][n_slice], + np.mean(y_coord_bunch[bid])) + assert np.isclose(slice_data['mean_py'][turn][n_slice], + np.mean(py_coord_bunch[bid])) + assert np.isclose(slice_data['mean_zeta'][turn][n_slice], + np.mean(zeta_coord_bunch[bid])) + assert np.isclose(slice_data['mean_delta'][turn][n_slice], + np.mean(delta_coord_bunch[bid])) + + assert np.isclose(slice_data['sigma_x'][turn][n_slice], + np.std(x_coord_bunch[bid])) + assert np.isclose(slice_data['sigma_px'][turn][n_slice], + np.std(px_coord_bunch[bid])) + assert np.isclose(slice_data['sigma_y'][turn][n_slice], + np.std(y_coord_bunch[bid])) + assert np.isclose(slice_data['sigma_py'][turn][n_slice], + np.std(py_coord_bunch[bid])) + assert np.isclose(slice_data['sigma_zeta'][turn][n_slice], + np.std(zeta_coord_bunch[bid])) + assert np.isclose(slice_data['sigma_delta'][turn][n_slice], + np.std(delta_coord_bunch[bid])) + + epsn_x = (np.sqrt(np.var(x_coord_bunch[bid]) * + np.var(px_coord_bunch[bid]) - + np.cov(x_coord_bunch[bid], px_coord_bunch[bid])[ + 0, 1]) + * beta * gamma) + epsn_y = (np.sqrt(np.var(y_coord_bunch[bid]) * + np.var(py_coord_bunch[bid]) - + np.cov(y_coord_bunch[bid], py_coord_bunch[bid])[ + 0, 1]) + * beta * gamma) + epsn_zeta = (np.sqrt(np.var(zeta_coord_bunch[bid]) * + np.var(delta_coord_bunch[bid]) - + np.cov(zeta_coord_bunch[bid], + delta_coord_bunch[bid])[ + 0, 1]) * beta * gamma) + + assert np.isclose(slice_data['epsn_x'][turn][n_slice], epsn_x) + assert np.isclose(slice_data['epsn_y'][turn][n_slice], epsn_y) + assert np.isclose(slice_data['epsn_zeta'][turn][n_slice], epsn_zeta) + + assert np.isclose(slice_data['num_particles'][turn][n_slice], + n_macroparticles_slice_bunch[bid]) + + +@for_all_test_contexts +def test_particle_monitor_hdf5(test_context): + n_macroparticles = int(1e2) + num_slices = 10 + zeta_range = (-1, 1) + + particles = xt.Particles( + _context=test_context, p0c=7e12, + x=np.random.random(n_macroparticles), + px=np.random.random(n_macroparticles), + y=np.random.random(n_macroparticles), + py=np.random.random(n_macroparticles), + zeta=np.random.random(n_macroparticles), + delta=np.random.random(n_macroparticles), + ) + + # dummy particle mask + particle_monitor_mask = particles.x > 0 + + flush_data_every = 10 + + monitor = xf.CollectiveMonitor( + base_file_name='test_monitor', + backend='hdf5', + monitor_bunches=False, + monitor_slices=False, + monitor_particles=True, + flush_data_every=flush_data_every, + zeta_range=zeta_range, + num_slices=num_slices, + particle_monitor_mask=particle_monitor_mask, + ) + + # track for twice flush_data_every turns so that we test the reshaping + for _ in range(2*flush_data_every): + monitor.track(particles) + + with h5py.File(monitor._particles_filename, 'r') as h5file: + for stat in monitor._stats_to_store_particles: + saved_data = h5file[stat] + for turn in range(2 * flush_data_every): + assert np.allclose(getattr(particles, + stat)[particle_monitor_mask], + saved_data[turn, :]) + + +@for_all_test_contexts +def test_particle_monitor_json(test_context): + n_macroparticles = int(1e2) + num_slices = 10 + zeta_range = (-1, 1) + + particles = xt.Particles( + _context=test_context, p0c=7e12, + x=np.random.random(n_macroparticles), + px=np.random.random(n_macroparticles), + y=np.random.random(n_macroparticles), + py=np.random.random(n_macroparticles), + zeta=np.random.random(n_macroparticles), + delta=np.random.random(n_macroparticles), + ) + + # dummy particle mask + particle_monitor_mask = particles.x > 0 + + flush_data_every = 10 + + monitor = xf.CollectiveMonitor( + base_file_name='test_monitor', + backend='json', + monitor_bunches=False, + monitor_slices=False, + monitor_particles=True, + flush_data_every=flush_data_every, + zeta_range=zeta_range, + num_slices=num_slices, + particle_monitor_mask=particle_monitor_mask, + ) + + # track for twice flush_data_every turns so that we test the reshaping + for _ in range(2*flush_data_every): + monitor.track(particles) + + with open(monitor._particles_filename, 'r') as f: + buffer = json.load(f) + + for stat in monitor._stats_to_store_particles: + saved_data = buffer[stat] + for turn in range(2*flush_data_every): + assert np.allclose(getattr(particles, stat)[particle_monitor_mask], + saved_data[turn][:]) diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py index e3a4c9e5..41a6aa53 100644 --- a/xfields/beam_elements/collective_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -1,7 +1,9 @@ import numpy as np -from .sliced_element import SlicedElement -import openpmd_api as io +from .element_with_slicer import ElementWithSlicer +import h5py +import json +import os COORDS = ['x', 'px', 'y', 'py', 'zeta', 'delta'] SECOND_MOMENTS = {} @@ -12,22 +14,31 @@ SECOND_MOMENTS[c1 + '_' + c2] = (c1, c2) -class CollectiveMonitor(SlicedElement): +class CollectiveMonitor(ElementWithSlicer): """ - Base class for elements with a slicer. + A class to monitor the collective motion of the beam. The monitor can save + bunch-by-bunch, slice-by-slice and particle-by-particle data. For the + particle-by-particle data, a mask can be used to select the particles to + be monitored. Parameters ---------- - slicer_moments: List - Moments for the slicer + base_file_name : str + Base file name for the output files. + monitor_bunches : Bool + If True, the bunch-by-bunch data is monitored + monitor_slices : Bool + If True, the slice-by-slice data is monitored + monitor_particles : Bool + If True, the particle-by-particle data is monitored + particle_monitor_mask : np.ndarray + Mask identifying the particles to be monitored zeta_range : Tuple Zeta range for each bunch used in the underlying slicer. num_slices : int Number of slices per bunch used in the underlying slicer. bunch_spacing_zeta : float Bunch spacing in meters. - num_slots : int - Number of filled slots. filling_scheme: np.ndarray List of zeros and ones representing the filling scheme. The length of the array is equal to the number of slots in the machine and each @@ -36,34 +47,64 @@ class CollectiveMonitor(SlicedElement): bunch_numbers: np.ndarray List of the bunches indicating which slots from the filling scheme are used (not all the bunches are used when using multi-processing) + stats_to_store : list + List of the statistics to store + backend: str + Backend used to save the data. For the moment only 'hdf5' and 'json' + are supported, with 'hdf5' being the default _flatten: bool Use flattened wakes """ _stats_to_store = [ - 'mean_x', 'mean_px', 'mean_y', 'mean_py', 'mean_zeta', - 'mean_delta', 'sigma_x', 'sigma_y', 'sigma_zeta', - 'sigma_px', 'sigma_py', 'sigma_delta', - 'epsn_x', 'epsn_y', 'epsn_zeta', 'num_particles'] + 'mean_x', 'mean_px', 'mean_y', 'mean_py', 'mean_zeta', + 'mean_delta', 'sigma_x', 'sigma_y', 'sigma_zeta', + 'sigma_px', 'sigma_py', 'sigma_delta', + 'epsn_x', 'epsn_y', 'epsn_zeta', 'num_particles'] + + _stats_to_store_particles = [ + 'x', 'px', 'y', 'py', 'zeta', 'delta', 'weight', 'state'] + + # Mapping from the stats to store in the monitor to the moments to store in + # the slicer + stat_to_slicer_moments = { + 'mean_x': ['x'], + 'mean_px': ['px'], + 'mean_y': ['y'], + 'mean_py': ['py'], + 'mean_zeta': ['zeta'], + 'mean_delta': ['delta'], + 'sigma_x': ['x', 'x_x'], + 'sigma_y': ['y', 'y_y'], + 'sigma_zeta': ['zeta', 'zeta_zeta'], + 'sigma_px': ['px', 'px_px'], + 'sigma_py': ['py', 'py_py'], + 'sigma_delta': ['delta', 'delta_delta'], + 'epsn_x': ['x', 'px', 'x_x', 'px_px', 'x_px'], + 'epsn_y': ['y', 'py', 'y_y', 'py_py', 'y_py'], + 'epsn_zeta': ['zeta', 'delta', 'delta_delta', 'zeta_zeta', + 'zeta_delta'], + 'num_particles': ['num_particles'] + } def __init__(self, base_file_name, monitor_bunches, monitor_slices, monitor_particles, - n_steps, - beta_gamma, - slicer_moments='all', + particle_monitor_mask=None, + flush_data_every=1, zeta_range=None, # These are [a, b] in the paper num_slices=None, # Per bunch, this is N_1 in the paper bunch_spacing_zeta=None, # This is P in the paper - num_slots=None, filling_scheme=None, bunch_numbers=None, stats_to_store=None, - output_extension=None, + stats_to_store_particles=None, + backend='hdf5', _flatten=False): + slicer_moments = [] if stats_to_store is not None: for stat in stats_to_store: assert stat in self._stats_to_store @@ -71,105 +112,150 @@ def __init__(self, else: self.stats_to_store = self._stats_to_store + if stats_to_store_particles is not None: + for stat in stats_to_store_particles: + assert stat in self._stats_to_store_particles + self.stats_to_store_particles = stats_to_store_particles + else: + self.stats_to_store_particles = self._stats_to_store_particles + + for stat in self.stats_to_store: + # For each stat we store, we add the corresponding moment + if stat == 'num_particles': + slicer_moments.append(stat) + else: + slicer_moments.extend(self.stat_to_slicer_moments[stat]) + + if backend == 'hdf5': + self.extension = 'h5' + self.flush_buffer_to_file_func = flush_buffer_to_file_hdf5 + elif backend == 'json': + self.extension = 'json' + self.flush_buffer_to_file_func = flush_buffer_to_file_json + else: + raise ValueError('Only hdf5 and json backends are supported.') + self.base_file_name = base_file_name self.bunch_buffer = None self.slice_buffer = None self.particle_buffer = None - self.n_steps = n_steps - self.beta_gamma = beta_gamma self.monitor_bunches = monitor_bunches self.monitor_slices = monitor_slices self.monitor_particles = monitor_particles + self.flush_data_every = flush_data_every + self.particle_monitor_mask = particle_monitor_mask + + self._bunches_filename = (self.base_file_name + '_bunches.' + + self.extension) + + if os.path.exists(self._bunches_filename) and monitor_bunches: + os.remove(self._bunches_filename) + + self._slices_filename = (self.base_file_name + '_slices.' + + self.extension) - self.output_extension = output_extension + if os.path.exists(self._slices_filename) and monitor_slices: + os.remove(self._slices_filename) - if slicer_moments == 'all': - slicer_moments = COORDS + list(SECOND_MOMENTS.keys()) + self._particles_filename = (self.base_file_name + '_particles.' + + self.extension) + + if os.path.exists(self._particles_filename) and monitor_particles: + os.remove(self._particles_filename) self.pipeline_manager = None + self.i_turn = 0 + super().__init__( slicer_moments=slicer_moments, zeta_range=zeta_range, # These are [a, b] in the paper num_slices=num_slices, # Per bunch, this is N_1 in the paper bunch_spacing_zeta=bunch_spacing_zeta, # This is P in the paper - num_slots=num_slots, filling_scheme=filling_scheme, bunch_numbers=bunch_numbers, with_compressed_profile=False ) + def track(self, particles, _slice_result=None, _other_bunch_slicers=None): + super().track(particles=particles, + _slice_result=_slice_result, + _other_bunch_slicers=_other_bunch_slicers + ) + + self.i_turn += 1 + if self.monitor_bunches: - self.bunch_series = io.Series( - self.base_file_name + '_bunches.h5', - io.Access.create) + if self.bunch_buffer is None: + self.bunch_buffer = self._init_bunch_buffer() + + self._update_bunch_buffer(particles) + + if self.i_turn % self.flush_data_every == 0: + self.flush_buffer_to_file_func( + self.bunch_buffer, self._bunches_filename) + self.bunch_buffer = self._init_bunch_buffer() if self.monitor_slices: - self.slice_series = io.Series( - self.base_file_name + '_slices.h5', - io.Access.create) + if self.slice_buffer is None: + self.slice_buffer = self._init_slice_buffer() + + self._update_slice_buffer(particles) + + if self.i_turn % self.flush_data_every == 0: + self.flush_buffer_to_file_func( + self.slice_buffer, self._slices_filename) + self.slice_buffer = self._init_slice_buffer() if self.monitor_particles: - self.particle_series = io.Series( - self.base_file_name + '_particles.h5', - io.Access.create) + if self.particle_buffer is None: + self.particle_buffer = self._init_particle_buffer() - self.i_turn = 0 + self._update_particle_buffer(particles) + + if self.i_turn % self.flush_data_every == 0: + self.flush_buffer_to_file_func( + self.particle_buffer, self._particles_filename) + self.particle_buffer = self._init_particle_buffer() - ''' def _init_bunch_buffer(self): buf = {} for bid in self.slicer.bunch_numbers: - buf[bid] = {} + # Convert to int to avoid json serialization issues + buf[int(bid)] = {} for stats in self.stats_to_store: - buf[bid][stats] = np.zeros(self.n_steps) + buf[int(bid)][stats] = np.zeros(self.flush_data_every) return buf def _init_slice_buffer(self): buf = {} for bid in self.slicer.bunch_numbers: - buf[bid] = {} + # Convert to int to avoid json serialization issues + buf[int(bid)] = {} for stats in self.stats_to_store: - buf[bid][stats] = np.zeros((self.slicer.num_slices, - self.n_steps)) + buf[int(bid)][stats] = np.zeros((self.flush_data_every, + self.slicer.num_slices)) return buf - ''' - def track(self, particles, _slice_result=None, _other_bunch_slicers=None): - super().track(particles=particles, - _slice_result=_slice_result, - _other_bunch_slicers=_other_bunch_slicers - ) - - #if self.monitor_bunches: - # if self.bunch_buffer is None: - # self.bunch_buffer = self._init_bunch_buffer() - # - # self._update_bunch_buffer(particles) - - #if self.monitor_slices: - # if self.slice_buffer is None: - # self.slice_buffer = self._init_slice_buffer() - # - # self._update_slice_buffer() - - if self.monitor_bunches: - self._update_bunch_series(particles) + def _init_particle_buffer(self): + num_particles = np.sum(self.particle_monitor_mask) + buf = {} - # self.file_backend.dump_data(self.i_turn, self.slicer, particles) + for stat in self.stats_to_store_particles: + buf[stat] = np.zeros((self.flush_data_every, num_particles)) - self.i_turn += 1 + return buf - def _update_bunch_series(self, particles): + def _update_bunch_buffer(self, particles): i_bunch_particles = self._slice_result['i_bunch_particles'] - bunch_it = self.bunch_series.iterations[self.i_turn] - bunches = bunch_it.particles['bunches'] - for i_bunch, bid in enumerate(self.slicer.bunch_numbers): bunch_mask = (i_bunch_particles == bid) + beta = np.mean(particles.beta0[bunch_mask]) + gamma = 1 / np.sqrt(1 - beta**2) + beta_gamma = beta * gamma for stat in self.stats_to_store: if stat == 'num_particles': val = np.sum(self.slicer.num_particles[i_bunch, :]) @@ -186,52 +272,26 @@ def _update_bunch_series(self, particles): mom_p_str = 'delta' mom_p = getattr(particles, mom_p_str) val = (np.sqrt(np.linalg.det(np.cov(mom[bunch_mask], - mom_p[bunch_mask]))) * - self.beta_gamma) + mom_p[bunch_mask]))) + * beta_gamma) elif stat == 'num_particles': val = np.sum(self.slicer.num_particles[i_bunch, :]) else: raise ValueError('Unknown statistics f{stat}') - bunches[stat] = val + self.bunch_buffer[bid][stat][self.i_turn % + self.flush_data_every] = val - self.bunch_series.flush() - - def _update_bunch_buffer(self, particles): + def _update_slice_buffer(self, particles): i_bunch_particles = self._slice_result['i_bunch_particles'] - write_pos = self.i_turn % self.buffer_size for i_bunch, bid in enumerate(self.slicer.bunch_numbers): bunch_mask = (i_bunch_particles == bid) - for stat in self.stats_to_store: - if stat == 'num_particles': - val = np.sum(self.slicer.num_particles[i_bunch, :]) - else: - mom = getattr(particles, stat.split('_')[-1]) - if stat.startswith('mean'): - val = np.mean(mom[bunch_mask]) - elif stat.startswith('sigma'): - val = np.std(mom[bunch_mask]) - elif stat.startswith('epsn'): - if stat.split('_')[-1] != 'zeta': - mom_p_str = 'p' + stat.split('_')[-1] - else: - mom_p_str = 'delta' - mom_p = getattr(particles, mom_p_str) - val = (np.sqrt(np.linalg.det(np.cov(mom[bunch_mask], - mom_p[bunch_mask]))) * - self.beta_gamma) - elif stat == 'num_particles': - val = np.sum(self.slicer.num_particles[i_bunch, :]) - else: - raise ValueError('Unknown statistics f{stat}') - - self.bunch_buffer[bid][stat][write_pos] = val - - def _update_slice_buffer(self): - print('bla') - write_pos = self.i_turn % self.buffer_size - for i_bunch, bid in enumerate(self.slicer.bunch_numbers): + # we use the bunch beta_gamma to calculate the emittance, while + # we should use the slice beta_gamma + beta = np.mean(particles.beta0[bunch_mask]) + gamma = 1 / np.sqrt(1 - beta**2) + beta_gamma = beta * gamma for stat in self.stats_to_store: mom_str = stat.split('_')[-1] if stat.startswith('mean'): @@ -245,156 +305,112 @@ def _update_slice_buffer(self): mom_p_str = 'delta' val = (np.sqrt(self.slicer.var(mom_str)[i_bunch, :] * - self.slicer.var(mom_p_str)[i_bunch, :] - - self.slicer.cov(mom_str, mom_p_str)[i_bunch, :]) * - self.beta_gamma) + self.slicer.var(mom_p_str)[i_bunch, :] - + self.slicer.cov(mom_str, mom_p_str)[i_bunch, + :]) * + beta_gamma) elif stat == 'num_particles': val = self.slicer.num_particles[i_bunch, :] else: raise ValueError('Unknown statistics f{stat}') - self.slice_buffer[bid][stat][:, write_pos] = val - - -class BaseFileBackend: - def __init__(self, base_filename, - #monitor_bunches, bunch_monitor_stride, - #monitor_slices, slice_monitor_stride, - #monitor_particles, particle_monitor_stride, - parameters_dict=None, bunch_ids=None): - - self.base_filename = base_filename - #self.monitor_bunches = monitor_bunches - #self.bunch_monitor_stride = bunch_monitor_stride - #self.monitor_slices = monitor_slices - #self.slice_monitor_stride = slice_monitor_stride - #self.monitor_particles = monitor_particles - #self.particle_monitor_stride = particle_monitor_stride - self.parameters_dict = parameters_dict - - if bunch_ids is not None: - self.bunch_ids = bunch_ids - else: - self.bunch_ids = np.array([0]) - - self.bunch_buffer = None - self.slice_buffer = None - self.particle_buffer = None - - ''' - def init_files(self, stats_to_store, n_steps): - if self.monitor_bunches: - self.init_bunch_monitor_file(stats_to_store, n_steps) - - if self.monitor_slices: - self.init_slice_monitor_file() - - if self.monitor_particles: - self.init_particle_monitor_file() - ''' - - def init_bunch_monitor_file(self, stats_to_store, n_steps): - raise RuntimeError('File backend must implement ' - 'init_bunch_monitor_file') - - def init_slice_monitor_file(self): - raise RuntimeError('File backend must implement ' - 'init_slice_monitor_file') - - def init_particle_monitor_file(self): - raise RuntimeError('File backend must implement ' - 'init_particle_monitor_file') - - def dump_data(self, i_turn, slicer, particles): - if self.monitor_bunches and i_turn % self.bunch_monitor_stride == 0: - self.dump_bunch_data(i_turn, particles) - - if self.monitor_slices and i_turn % self.slice_monitor_stride == 0: - self.dump_slice_data(i_turn, slicer) - - if self.monitor_particles and i_turn % self.particle_monitor_stride == 0: - self.dump_particle_data(i_turn, slicer, particles) - - def dump_bunch_data(self, i_turn, slicer): - raise RuntimeError('File backend must implement dump_bunch_data') - - def dump_slice_data(self, i_turn, slicer): - raise RuntimeError('File backend must implement dump_slice_data') - - def dump_particle_data(self, i_turn, slicer, particles): - raise RuntimeError('File backend must implement dump_bunch_data') - -''' -class HDF5BackEnd(BaseFileBackend): - def __init__(self, base_filename, - monitor_bunches, bunch_monitor_stride, - monitor_slices, slice_monitor_stride, - monitor_particles, particle_monitor_stride, - parameters_dict=None, bunch_ids=None, use_mpi=False): - - self.use_mpi = use_mpi - - super().__init__(base_filename=base_filename, - monitor_bunches=monitor_bunches, - bunch_monitor_stride=bunch_monitor_stride, - monitor_slices=monitor_slices, - slice_monitor_stride=slice_monitor_stride, - monitor_particles=monitor_particles, - particle_monitor_stride=particle_monitor_stride, - parameters_dict=parameters_dict, - bunch_ids=bunch_ids) - - def init_bunch_monitor_file(self, stats_to_store, n_steps): - """ - Initialize HDF5 file and create its basic structure (groups and - datasets). One group is created for bunch-specific data. One dataset for - each of the quantities defined in self.stats_to_store is generated. - If specified by the user, write the contents of the parameters_dict as - metadata (attributes) to the file. Maximum file compression is activated - only if not using MPI. - """ - if self.base_filename is not None: - filename = f'{self.base_filename}_bunchmonitor.h5' - else: - filename = f'bunchmonitor.h5' - - if self.use_mpi: - h5file = hp.File(filename, 'w', driver='mpio', - comm=MPI.COMM_WORLD) - kwargs_gr = {} + self.slice_buffer[bid][stat][self.i_turn % + self.flush_data_every, :] = val + + def _update_particle_buffer(self, particles): + for stat in self.stats_to_store_particles: + val = getattr(particles, stat) + self.particle_buffer[stat][self.i_turn % + self.flush_data_every, :] = val + + +def flush_buffer_to_file_hdf5(buffer, filename): + # Iif the file does not exist, create it and write the buffer + if not os.path.exists(filename): + with h5py.File(filename, 'w') as f: + for key in buffer: + if not type(buffer[key]) == dict: + if len(buffer[key].shape) == 1: + f.create_dataset(str(key), + data=buffer[key], + chunks=True, maxshape=(None,)) + elif len(buffer[key].shape) == 2: + f.create_dataset(str(key), + data=buffer[key], + chunks=True, + maxshape=(None, buffer[key].shape[1])) + else: + raise ValueError('The shape of the buffer is not ' + 'supported') + else: + # this is for the bunch or slice buffer + for subkey in buffer[key]: + if len(buffer[key][subkey].shape) == 1: + f.create_dataset(str(key) + '/' + str(subkey), + data=buffer[key][subkey], + chunks=True, maxshape=(None,)) + elif len(buffer[key][subkey].shape) == 2: + f.create_dataset(str(key) + '/' + str(subkey), + data=buffer[key][subkey], + chunks=True, + maxshape=(None, + buffer[key][subkey].shape[1])) + else: + raise ValueError('The shape of the buffer is not ' + 'supported') + + # If the file exists, append the buffer + else: + with h5py.File(filename, 'a') as f: + for key in buffer: + if not type(buffer[key]) == dict: + # this is for the particle buffer + f[str(key)].resize((f[str(key)].shape[0] + + buffer[key].shape[0]), axis=0) + f[str(key)][-buffer[key].shape[0]:] = buffer[key] + else: + # this is for the bunch or slice buffer + for subkey in buffer[key]: + f[str(key) + '/' + str(subkey)].resize( + (f[str(key) + '/' + str(subkey)].shape[0] + + buffer[key][subkey].shape[0]), axis=0) + val = buffer[key][subkey] + f[str(key) + '/' + + str(subkey)][-buffer[key][subkey].shape[0]:] = val + + +def flush_buffer_to_file_json(buffer, filename): + # To prepare the buffer for json, we need to convert the numpy arrays to + # lists + buffer_lists = {} + for key in buffer: + if not type(buffer[key]) == dict: + # this is for the particle buffer + buffer_lists[str(key)] = buffer[key].tolist() else: - h5file = hp.File(filename, 'w') - kwargs_gr = {'compression': 'gzip', 'compression_opts': 9} - - if self.parameters_dict: - for key in self.parameters_dict: - h5file.attrs[key] = self.parameters_dict[key] - - h5group = h5file.create_group('Bunches') - for bid in self.bunch_ids: - gr = h5group.create_group(repr(bid)) - for stats in sorted(stats_to_store): - gr.create_dataset(stats, shape=(n_steps,), - **kwargs_gr) - - h5file.close() - - def dump_bunch_data(self, i_turn, particles): - pass - - def _write_data_to_buffer(self, i_turn, particles): - """ Store the data in the self.buffer dictionary before writing - them to file. The buffer is implemented as a shift register. To - find the slice_set-specific data, a slice_set, defined by the - slicing configuration self.slicer must be requested from the - bunch (instance of the Particles class), including all the - statistics that are to be saved. """ - - # Handle the different statistics quantities, which can - # either be methods (like mean(), ...) or simply attributes - # (macroparticlenumber or n_macroparticles_per_slice) of the bunch - # or slice_set resp. - - # bunch-specific data. - pass -''' \ No newline at end of file + # this is for the bunch or slice buffer + buffer_lists[str(key)] = {} + for subkey in buffer[key]: + buffer_lists[str(key)][subkey] = buffer[key][subkey].tolist() + + # Now we can write the buffer to the file + if not os.path.exists(filename): + with open(filename, 'w') as f: + json.dump(buffer_lists, f, indent=2) + else: + with open(filename, 'r') as f: + old_buffer = json.load(f) + for key in buffer_lists: + if not type(buffer_lists[key]) == dict: + # this is for the particle buffer + old_buffer[key] = np.concatenate( + (old_buffer[key], buffer_lists[key]), axis=0).tolist() + else: + # this is for the bunch or slice buffer + for subkey in buffer_lists[key]: + old_buffer[key][subkey] = np.concatenate( + (old_buffer[key][subkey], buffer_lists[key][subkey]), + axis=0).tolist() + + with open(filename, 'w') as f: + json.dump(old_buffer, f, indent=2) From eb33a528c10d168eb71c7538a05c658b2048a4f9 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Mon, 17 Jun 2024 09:27:47 +0200 Subject: [PATCH 09/28] small improvements in documentation --- xfields/beam_elements/collective_monitor.py | 25 +++++++++++++-------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py index 41a6aa53..637ebc76 100644 --- a/xfields/beam_elements/collective_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -19,7 +19,9 @@ class CollectiveMonitor(ElementWithSlicer): A class to monitor the collective motion of the beam. The monitor can save bunch-by-bunch, slice-by-slice and particle-by-particle data. For the particle-by-particle data, a mask can be used to select the particles to - be monitored. + be monitored. The monitor collects a buffer of `flush_data_every` turns + before saving the data to disk, in order to reduce the number of I/O and + avoid storing too much data in memory. Parameters ---------- @@ -33,6 +35,16 @@ class CollectiveMonitor(ElementWithSlicer): If True, the particle-by-particle data is monitored particle_monitor_mask : np.ndarray Mask identifying the particles to be monitored + flush_data_every: int + Number of turns after which the data is saved to disk + stats_to_store : list + List of the statistics to store for the bunch-by-bunch and + slice-by-slice data + stats_to_store_particles : list + List of the statistics to store for the particle-by-particle data + backend: str + Backend used to save the data. For the moment only 'hdf5' and 'json' + are supported, with 'hdf5' being the default zeta_range : Tuple Zeta range for each bunch used in the underlying slicer. num_slices : int @@ -47,11 +59,6 @@ class CollectiveMonitor(ElementWithSlicer): bunch_numbers: np.ndarray List of the bunches indicating which slots from the filling scheme are used (not all the bunches are used when using multi-processing) - stats_to_store : list - List of the statistics to store - backend: str - Backend used to save the data. For the moment only 'hdf5' and 'json' - are supported, with 'hdf5' being the default _flatten: bool Use flattened wakes """ @@ -94,9 +101,9 @@ def __init__(self, monitor_particles, particle_monitor_mask=None, flush_data_every=1, - zeta_range=None, # These are [a, b] in the paper - num_slices=None, # Per bunch, this is N_1 in the paper - bunch_spacing_zeta=None, # This is P in the paper + zeta_range=None, + num_slices=None, + bunch_spacing_zeta=None, filling_scheme=None, bunch_numbers=None, stats_to_store=None, From be5e38cdd3e22e44589f5527b98c7f871e95b2aa Mon Sep 17 00:00:00 2001 From: Lorenzo Giacomel Date: Tue, 18 Jun 2024 17:34:12 +0200 Subject: [PATCH 10/28] delete outdated examples --- examples/006_monitor/000_buffers.py | 87 ----------------------------- 1 file changed, 87 deletions(-) delete mode 100644 examples/006_monitor/000_buffers.py diff --git a/examples/006_monitor/000_buffers.py b/examples/006_monitor/000_buffers.py deleted file mode 100644 index 941a82ff..00000000 --- a/examples/006_monitor/000_buffers.py +++ /dev/null @@ -1,87 +0,0 @@ -import xfields as xf -import xtrack as xt -import xpart as xp -import xobjects as xo - -import numpy as np -from scipy.constants import e, c, physical_constants - -context = xo.ContextCpu(omp_num_threads=0) - -n_macroparticles = int(1e5) -num_slices = n_macroparticles -zeta_range = (-1, 1) -dz = (zeta_range[1] - zeta_range[0])/num_slices - -E0 = physical_constants['proton mass energy equivalent in MeV'][0]*1e6 -E = 7000e9 -p0 = E * e / c -gamma = E/E0 -beta = np.sqrt(1-1/gamma**2) - -intensity = 1e11 -epsn_x = 2e-6 -epsn_y = 2e-6 -taub = 0.9e-9 -sigma_z = taub*beta*c/4 - -zeta = np.linspace(zeta_range[0] + dz/2, zeta_range[1]-dz/2, - n_macroparticles) - -monitor = xf.CollectiveMonitor( - file_backend=None, - monitor_bunches=True, - monitor_slices=True, - monitor_particles=False, - n_steps=10, - buffer_size=10, - beta_gamma=beta*gamma, - slicer_moments='all', - zeta_range=zeta_range, # These are [a, b] in the paper - num_slices=num_slices, # Per bunch, this is N_1 in the paper - bunch_spacing_zeta=10, # This is P in the paper - num_slots=1 -) - -accQ_x = 60.275 -accQ_y = 60.295 -circumference = 26658.883 -averageRadius = circumference / (2 * np.pi) -beta_x = averageRadius/accQ_x -beta_y = averageRadius/accQ_y -chroma = 10 - -alpha = 53.86**-2 -h_RF = 35640 -momentumCompaction = alpha -eta = momentumCompaction - 1.0 / gamma ** 2 -V = 5e6 -Q_s = np.sqrt((e*V*h_RF*eta)/(2*np.pi*beta*c*p0)) -sigma_delta = Q_s * sigma_z / (averageRadius * eta) -beta_s = sigma_z / sigma_delta - -betatron_map = xt.LineSegmentMap( - length=circumference, betx=beta_x, bety=beta_y, - qx=accQ_x, qy=accQ_y, - longitudinal_mode='linear_fixed_qs', - dqx=chroma, dqy=chroma, - qs=Q_s, bets=beta_s -) - -line = xt.Line(elements=[monitor, betatron_map], - element_names=['monitor', 'betatron_map']) - -line.particle_ref = xt.Particles(p0c=E) -line.build_tracker(_context=context) - -particles = xp.generate_matched_gaussian_bunch( - num_particles=n_macroparticles, total_intensity_particles=intensity, - nemitt_x=epsn_x, nemitt_y=epsn_y, sigma_z=sigma_z, - line=line, _context=context -) - -line.track(particles) - -print('a') - - From 0122bcec71c7675f0f135d48e576338e9396d7df Mon Sep 17 00:00:00 2001 From: Lorenzo Giacomel Date: Tue, 18 Jun 2024 17:35:32 +0200 Subject: [PATCH 11/28] delete outdated examples --- examples/006_monitor/test_monitor_buffers.py | 213 ------------------- 1 file changed, 213 deletions(-) delete mode 100644 examples/006_monitor/test_monitor_buffers.py diff --git a/examples/006_monitor/test_monitor_buffers.py b/examples/006_monitor/test_monitor_buffers.py deleted file mode 100644 index 2209098e..00000000 --- a/examples/006_monitor/test_monitor_buffers.py +++ /dev/null @@ -1,213 +0,0 @@ -import xfields as xf -import xtrack as xt -import xobjects as xo -from xobjects.test_helpers import for_all_test_contexts - -import numpy as np -from scipy.constants import e, c, physical_constants - - -#@for_all_test_contexts -#def test_bunch_buffer(test_context): -test_context = xo.ContextCpu(omp_num_threads=0) - -n_macroparticles = int(1e6) -num_slices = 10 -zeta_range = (-1, 1) - -E0 = physical_constants['proton mass energy equivalent in MeV'][0]*1e6 -E = 7000e9 -gamma = E/E0 -beta = np.sqrt(1-1/gamma**2) - -offs_x = 1e-3 -offs_px = 2e-3 -offs_y = 3e-3 -offs_py = 4e-3 -offs_zeta = 5e-3 -offs_delta = 6e-3 - -sigma_x = 1e-3 -sigma_px = 2e-3 -sigma_y = 3e-3 -sigma_py = 4e-3 -sigma_zeta = 5e-3 -sigma_delta = 6e-3 - -monitor = xf.CollectiveMonitor( - file_backend=None, - monitor_bunches=True, - monitor_slices=False, - monitor_particles=False, - n_steps=10, - buffer_size=10, - beta_gamma=beta*gamma, - slicer_moments='all', - zeta_range=zeta_range, # These are [a, b] in the paper - num_slices=num_slices, # Per bunch, this is N_1 in the paper - bunch_spacing_zeta=10, # This is P in the paper - num_slots=1 -) - -x_coord = sigma_x*np.random.random(n_macroparticles) + offs_x -px_coord = sigma_px*np.random.random(n_macroparticles) + offs_px -y_coord = sigma_y*np.random.random(n_macroparticles) + offs_y -py_coord = sigma_py*np.random.random(n_macroparticles) + offs_py -zeta_coord = sigma_zeta*np.random.random(n_macroparticles) + offs_zeta -delta_coord = sigma_delta*np.random.random(n_macroparticles) + offs_delta - -particles = xt.Particles( - _context=test_context, p0c=E, - x=x_coord, - px=px_coord, - y=y_coord, - py=py_coord, - zeta=zeta_coord, - delta=delta_coord -) - -monitor.track(particles) - -assert monitor.bunch_buffer[0]['mean_x'][0] == np.mean(x_coord) -assert monitor.bunch_buffer[0]['mean_px'][0] == np.mean(px_coord) -assert monitor.bunch_buffer[0]['mean_y'][0] == np.mean(y_coord) -assert monitor.bunch_buffer[0]['mean_py'][0] == np.mean(py_coord) -assert monitor.bunch_buffer[0]['mean_zeta'][0] == np.mean(zeta_coord) -assert monitor.bunch_buffer[0]['mean_delta'][0] == np.mean(delta_coord) - -assert monitor.bunch_buffer[0]['sigma_x'][0] == np.std(x_coord) -assert monitor.bunch_buffer[0]['sigma_px'][0] == np.std(px_coord) -assert monitor.bunch_buffer[0]['sigma_y'][0] == np.std(y_coord) -assert monitor.bunch_buffer[0]['sigma_py'][0] == np.std(py_coord) -assert monitor.bunch_buffer[0]['sigma_zeta'][0] == np.std(zeta_coord) -assert monitor.bunch_buffer[0]['sigma_delta'][0] == np.std(delta_coord) - -epsn_x = np.sqrt(np.linalg.det(np.cov(x_coord, px_coord))) * beta*gamma -epsn_y = np.sqrt(np.linalg.det(np.cov(y_coord, py_coord))) * beta*gamma -epsn_zeta = np.sqrt(np.linalg.det(np.cov(zeta_coord, - delta_coord))) * beta*gamma - -assert np.isclose(monitor.bunch_buffer[0]['epsn_x'][0], epsn_x) -assert np.isclose(monitor.bunch_buffer[0]['epsn_y'][0], epsn_y) -assert np.isclose(monitor.bunch_buffer[0]['epsn_zeta'][0], epsn_zeta) - -assert monitor.bunch_buffer[0]['num_particles'][0] == n_macroparticles -''' - - -test_context = xo.ContextCpu(omp_num_threads=0) - -n_macroparticles = int(1e7) -num_slices = 10 -zeta_range = (0, 1) - -E0 = physical_constants['proton mass energy equivalent in MeV'][0] * 1e6 -E = 7000e9 -gamma = E / E0 -beta = np.sqrt(1 - 1 / gamma ** 2) - -offs_x = 1 -offs_px = 2 -offs_y = 3 -offs_py = 4 -offs_delta = 5 - -sigma_x = 6 -sigma_px = 7 -sigma_y = 8 -sigma_py = 9 -sigma_delta = 10 - -monitor = xf.CollectiveMonitor( - file_backend=None, - monitor_bunches=False, - monitor_slices=True, - monitor_particles=False, - n_steps=1, - buffer_size=1, - beta_gamma=beta * gamma, - slicer_moments='all', - zeta_range=zeta_range, # These are [a, b] in the paper - num_slices=num_slices, # Per bunch, this is N_1 in the paper - bunch_spacing_zeta=10, # This is P in the paper - num_slots=1 -) - -zeta_coord_tot = np.random.random(n_macroparticles) - -particles = xt.Particles( - _context=test_context, - p0c=E, - zeta=zeta_coord_tot -) - -n_slice = 5 - -dzeta = monitor.slicer.dzeta - -bin_min = monitor.slicer.zeta_centers[n_slice] - dzeta/2 -bin_max = monitor.slicer.zeta_centers[n_slice] + dzeta/2 - -slice_mask = np.logical_and(zeta_coord_tot < bin_max, zeta_coord_tot >= bin_min) - -n_macroparticles_slice = np.sum(slice_mask) - -x_coord = sigma_x * np.random.random(n_macroparticles_slice) + offs_x -px_coord = sigma_px * np.random.random(n_macroparticles_slice) + offs_px -y_coord = sigma_y * np.random.random(n_macroparticles_slice) + offs_y -py_coord = sigma_py * np.random.random(n_macroparticles_slice) + offs_py -delta_coord = (sigma_delta * np.random.random(n_macroparticles_slice) + - offs_delta) - -particles.x[slice_mask] = x_coord -particles.px[slice_mask] = px_coord -particles.y[slice_mask] = y_coord -particles.py[slice_mask] = py_coord -particles.delta[slice_mask] = delta_coord - -monitor.track(particles) - - -assert np.isclose(monitor.slice_buffer[0]['mean_x'][n_slice, 0], - np.mean(x_coord)) -assert np.isclose(monitor.slice_buffer[0]['mean_px'][n_slice, 0], - np.mean(px_coord)) -assert np.isclose(monitor.slice_buffer[0]['mean_y'][n_slice, 0], - np.mean(y_coord)) -assert np.isclose(monitor.slice_buffer[0]['mean_py'][n_slice, 0], - np.mean(py_coord)) -zeta_coord = particles.zeta[slice_mask] -assert np.isclose(monitor.slice_buffer[0]['mean_zeta'][n_slice, 0], - np.mean(zeta_coord)) -assert np.isclose(monitor.slice_buffer[0]['mean_delta'][n_slice, 0], - np.mean(delta_coord)) - -assert np.isclose(monitor.slice_buffer[0]['sigma_x'][n_slice, 0], - np.std(x_coord)) -assert np.isclose(monitor.slice_buffer[0]['sigma_px'][n_slice, 0], - np.std(px_coord)) -assert np.isclose(monitor.slice_buffer[0]['sigma_y'][n_slice, 0], - np.std(y_coord)) -assert np.isclose(monitor.slice_buffer[0]['sigma_py'][n_slice, 0], - np.std(py_coord)) -assert np.isclose(monitor.slice_buffer[0]['sigma_zeta'][n_slice, 0], - np.std(zeta_coord)) -assert np.isclose(monitor.slice_buffer[0]['sigma_delta'][n_slice, 0], - np.std(delta_coord)) - - -epsn_x = (np.sqrt(np.var(x_coord) * np.var(px_coord) - - np.cov(x_coord, px_coord)[0, 1]) * beta * gamma) -epsn_y = (np.sqrt(np.var(y_coord) * np.var(py_coord) - - np.cov(y_coord, py_coord)[0, 1])*beta*gamma) -epsn_zeta = (np.sqrt(np.var(zeta_coord) * np.var(delta_coord) - - np.cov(zeta_coord, delta_coord)[0, 1]) * beta*gamma) - - -assert np.isclose(monitor.slice_buffer[0]['epsn_x'][n_slice, 0], epsn_x) -assert np.isclose(monitor.slice_buffer[0]['epsn_y'][n_slice, 0], epsn_y) -assert np.isclose(monitor.slice_buffer[0]['epsn_zeta'][n_slice, 0], epsn_zeta) - -assert (monitor.slice_buffer[0]['num_particles'][n_slice, 0] == - n_macroparticles_slice) -''' \ No newline at end of file From db3d53b026b8ce92406f3436a37c6d12174ca26b Mon Sep 17 00:00:00 2001 From: lgiacome Date: Tue, 10 Sep 2024 17:55:24 +0200 Subject: [PATCH 12/28] fix transverse damper and improve tests --- tests/test_transverse_damper.py | 37 ++----- tests_mpi/test_transverse_damper_mpi.py | 121 +++++++++++++++++++++ xfields/beam_elements/transverse_damper.py | 41 +++++-- 3 files changed, 167 insertions(+), 32 deletions(-) create mode 100644 tests_mpi/test_transverse_damper_mpi.py diff --git a/tests/test_transverse_damper.py b/tests/test_transverse_damper.py index 13003174..ca1ef865 100644 --- a/tests/test_transverse_damper.py +++ b/tests/test_transverse_damper.py @@ -31,46 +31,35 @@ def test_transverse_damper(test_context): h_rf = 35640 bunch_spacing_buckets = 10 - n_bunches = 2 + num_bunches = 2 n_slices = 500 - q_x = 60.275 - q_y = 60.295 - chroma = 0 - epsn_x = 2e-6 epsn_y = 2e-6 taub = 0.9e-9 sigma_z = taub*beta*c/4 circumference = 26658.883 - average_radius = circumference / (2 * np.pi) momentum_compaction = alpha - eta = momentum_compaction - 1.0 / gamma ** 2 v_rf = 4e6 - q_s = np.sqrt((en*v_rf*h_rf*eta)/(2*np.pi*beta*c*p0)) - sigma_delta = q_s * sigma_z / (average_radius * eta) f_rev = beta*c/circumference f_rf = f_rev*h_rf - beta_x = average_radius/q_x - beta_y = average_radius/q_y - bucket_length = circumference / h_rf zeta_range = (-0.5*bucket_length, 0.5*bucket_length) filling_scheme = np.zeros(int(h_rf/bunch_spacing_buckets)) - filling_scheme[0:n_bunches] = 1 + filling_scheme[0: num_bunches] = 1 filled_slots = np.nonzero(filling_scheme)[0] - betatron_map = xt.LineSegmentMap( - length=circumference, betx=beta_x, bety=beta_y, - qx=q_x, qy=q_y, + one_turn_map = xt.LineSegmentMap( + length=circumference, + betx=70, bety=80, + qx=62.31, qy=60.32, longitudinal_mode=longitudinal_mode, voltage_rf=v_rf, frequency_rf=f_rf, lag_rf=180, momentum_compaction_factor=momentum_compaction, - dqx=chroma, dqy=chroma ) gain_x = 0.01 @@ -78,17 +67,15 @@ def test_transverse_damper(test_context): transverse_damper = xf.TransverseDamper( gain_x=gain_x, gain_y=gain_y, - num_bunches=n_bunches, filling_scheme=filling_scheme, - filled_slots=filled_slots, zeta_range=zeta_range, num_slices=n_slices, bunch_spacing_zeta=bunch_spacing_buckets*bucket_length, circumference=circumference ) - line = xt.Line(elements=[[betatron_map, transverse_damper][0]], - element_names=[['betatron_map', 'transverse_damper'][0]]) + line = xt.Line(elements=[[one_turn_map, transverse_damper][0]], + element_names=[['one_turn_map', 'transverse_damper'][0]]) line.particle_ref = xt.Particles(p0c=450e9) line.build_tracker(_context=test_context) @@ -111,13 +98,13 @@ def test_transverse_damper(test_context): particles.px += amplitude particles.py += amplitude - mean_x = np.zeros((n_turns, n_bunches)) - mean_y = np.zeros((n_turns, n_bunches)) + mean_x = np.zeros((n_turns, num_bunches)) + mean_y = np.zeros((n_turns, num_bunches)) for i_turn in range(n_turns): line.track(particles, num_turns=1) transverse_damper.track(particles, i_turn) - for ib in range(n_bunches): + for ib in range(num_bunches): mean_x[i_turn, ib] = np.mean(particles.x[n_macroparticles*ib: n_macroparticles*(ib+1)]) mean_y[i_turn, ib] = np.mean(particles.y[n_macroparticles*ib: @@ -128,7 +115,7 @@ def test_transverse_damper(test_context): i_fit_start = 200 i_fit_end = 600 - for i_bunch in range(n_bunches): + for i_bunch in range(num_bunches): ampls_x = np.abs(hilbert(mean_x[:, i_bunch])) fit_x = linregress(turns[i_fit_start: i_fit_end], np.log(ampls_x[i_fit_start: i_fit_end])) diff --git a/tests_mpi/test_transverse_damper_mpi.py b/tests_mpi/test_transverse_damper_mpi.py new file mode 100644 index 00000000..777d3636 --- /dev/null +++ b/tests_mpi/test_transverse_damper_mpi.py @@ -0,0 +1,121 @@ +import numpy as np +from scipy.constants import c +from scipy.constants import physical_constants +from scipy.signal import hilbert +from scipy.stats import linregress + +import xfields as xf +import xtrack as xt +import xpart as xp +from xobjects.test_helpers import for_all_test_contexts + + +exclude_contexts = ['ContextPyopencl', 'ContextCupy'] + +@for_all_test_contexts(excluding=exclude_contexts) +def test_transverse_damper_mpi(test_context): + longitudinal_mode = 'nonlinear' + # Machine settings + n_turns = 1000 + + n_macroparticles = 10000 + intensity = 8e9 + + alpha = 53.86**-2 + + e0 = physical_constants['proton mass energy equivalent in MeV'][0]*1e6 + en = 450e9 + gamma = en/e0 + beta = np.sqrt(1-1/gamma**2) + + h_rf = 35640 + bunch_spacing_buckets = 10 + num_bunches = 4 + n_slices = 500 + + epsn_x = 2e-6 + epsn_y = 2e-6 + taub = 0.9e-9 + sigma_z = taub*beta*c/4 + + circumference = 26658.883 + + momentum_compaction = alpha + v_rf = 4e6 + f_rev = beta*c/circumference + f_rf = f_rev*h_rf + + bucket_length = circumference / h_rf + zeta_range = (-0.5*bucket_length, 0.5*bucket_length) + + filling_scheme = np.zeros(int(h_rf/bunch_spacing_buckets)) + filling_scheme[0: num_bunches] = 1 + + one_turn_map = xt.LineSegmentMap( + length=circumference, + betx=70, bety=80, + qx=62.31, qy=60.32, + longitudinal_mode=longitudinal_mode, + voltage_rf=v_rf, frequency_rf=f_rf, lag_rf=180, + momentum_compaction_factor=momentum_compaction, + ) + + gain_x = 0.01 + gain_y = 0.01 + + transverse_damper = xf.TransverseDamper( + gain_x=gain_x, gain_y=gain_y, + filling_scheme=filling_scheme, + zeta_range=zeta_range, + num_slices=n_slices, + bunch_spacing_zeta=bunch_spacing_buckets*bucket_length, + circumference=circumference + ) + + line = xt.Line(elements=[one_turn_map, transverse_damper], + element_names=['one_turn_map', 'transverse_damper']) + + line.particle_ref = xt.Particles(p0c=450e9) + line.build_tracker(_context=test_context) + + particles = xp.generate_matched_gaussian_multibunch_beam( + _context=test_context, + filling_scheme=filling_scheme, + bunch_num_particles=n_macroparticles, + bunch_intensity_particles=intensity, + nemitt_x=epsn_x, nemitt_y=epsn_y, sigma_z=sigma_z, + line=line, bunch_spacing_buckets=bunch_spacing_buckets, + rf_harmonic=[h_rf], rf_voltage=[v_rf], + particle_ref=line.particle_ref, + prepare_line_and_particles_for_mpi_wake_sim=True + ) + + # apply a distortion to the bunches + amplitude = 1e-3 + particles.px += amplitude + particles.py += amplitude + + mean_x = np.zeros((n_turns)) + mean_y = np.zeros((n_turns)) + + for i_turn in range(n_turns): + line.track(particles, num_turns=1) + mean_x[i_turn] = np.mean(particles.x) + mean_y[i_turn] = np.mean(particles.y) + + turns = np.linspace(0, n_turns-1, n_turns) + + i_fit_start = 200 + i_fit_end = 600 + + ampls_x = np.abs(hilbert(mean_x)) + fit_x = linregress(turns[i_fit_start: i_fit_end], + np.log(ampls_x[i_fit_start: i_fit_end])) + + assert np.isclose(-fit_x.slope*2, gain_x, atol=1e-4, rtol=0) + + ampls_y = np.abs(hilbert(mean_y)) + fit_y = linregress(turns[i_fit_start: i_fit_end], + np.log(ampls_y[i_fit_start: i_fit_end])) + + assert np.isclose(-fit_y.slope*2, gain_y, atol=1e-4, rtol=0) diff --git a/xfields/beam_elements/transverse_damper.py b/xfields/beam_elements/transverse_damper.py index b87b9d8b..e8f2f202 100644 --- a/xfields/beam_elements/transverse_damper.py +++ b/xfields/beam_elements/transverse_damper.py @@ -1,10 +1,12 @@ import numpy as np +import xpart as xp import xfields as xf +import xtrack as xt from xfields.slicers.compressed_profile import CompressedProfile -class TransverseDamper: +class TransverseDamper(xt.BeamElement): """ A simple bunch-by-bunch transverse Damper implementation. @@ -26,9 +28,11 @@ class TransverseDamper: Returns: (TransverseDamper): A transverse damper beam element. """ - def __init__(self, gain_x, gain_y, num_bunches, filling_scheme, - filled_slots, zeta_range, num_slices, bunch_spacing_zeta, - circumference): + + iscollective = True + + def __init__(self, gain_x, gain_y, filling_scheme, zeta_range, num_slices, bunch_spacing_zeta, + circumference, bunch_selection=None, **kwargs): self.gains = { 'px': gain_x, 'py': gain_y, @@ -36,14 +40,18 @@ def __init__(self, gain_x, gain_y, num_bunches, filling_scheme, self.slicer = xf.UniformBinSlicer( filling_scheme=filling_scheme, - bunch_selection=filled_slots, + bunch_selection=bunch_selection, zeta_range=zeta_range, num_slices=num_slices, bunch_spacing_zeta=bunch_spacing_zeta, moments=['px', 'py'] ) - self.num_bunches = num_bunches + if filling_scheme is not None: + i_last_bunch = np.where(filling_scheme)[0][-1] + num_periods = i_last_bunch + 1 + else: + num_periods = 1 self.moments_data = {} for moment in self.gains.keys(): @@ -52,11 +60,30 @@ def __init__(self, gain_x, gain_y, num_bunches, filling_scheme, zeta_range=zeta_range, num_slices=num_slices, bunch_spacing_zeta=bunch_spacing_zeta, - num_periods=num_bunches, + num_periods=num_periods, num_turns=1, circumference=circumference ) + def _reconfigure_for_parallel(self, n_procs, my_rank): + filled_slots = self.slicer.filled_slots + scheme = np.zeros(np.max(filled_slots) + 1, + dtype=np.int64) + scheme[filled_slots] = 1 + + split_scheme = xp.matched_gaussian.split_scheme + bunch_selection_rank = split_scheme(filling_scheme=scheme, + n_chunk=int(n_procs)) + + self.slicer = xf.UniformBinSlicer( + filling_scheme=scheme, + bunch_selection=bunch_selection_rank[my_rank], + zeta_range=self.slicer.zeta_range, + num_slices=self.slicer.num_slices, + bunch_spacing_zeta=self.slicer.bunch_spacing_zeta, + moments=['px', 'py'] + ) + def track(self, particles, i_turn=0): i_slice_particles = particles.particle_id * 0 + -999 i_slot_particles = particles.particle_id * 0 + -9999 From 59ab01b92e1be0955de975ab22096a2be32c0122 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Mon, 16 Sep 2024 15:45:10 +0200 Subject: [PATCH 13/28] finalize damper --- tests/test_transverse_damper.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_transverse_damper.py b/tests/test_transverse_damper.py index ca1ef865..1c67fdcb 100644 --- a/tests/test_transverse_damper.py +++ b/tests/test_transverse_damper.py @@ -24,9 +24,8 @@ def test_transverse_damper(test_context): alpha = 53.86**-2 e0 = physical_constants['proton mass energy equivalent in MeV'][0]*1e6 - en = 450e9 - p0 = en * en / c - gamma = en/e0 + p0c = 450e9 + gamma = p0c/e0 beta = np.sqrt(1-1/gamma**2) h_rf = 35640 @@ -71,13 +70,14 @@ def test_transverse_damper(test_context): zeta_range=zeta_range, num_slices=n_slices, bunch_spacing_zeta=bunch_spacing_buckets*bucket_length, - circumference=circumference + circumference=circumference, + mode='bunch-by-bunch' ) line = xt.Line(elements=[[one_turn_map, transverse_damper][0]], element_names=[['one_turn_map', 'transverse_damper'][0]]) - line.particle_ref = xt.Particles(p0c=450e9) + line.particle_ref = xt.Particles(p0c=p0c) line.build_tracker(_context=test_context) particles = xp.generate_matched_gaussian_multibunch_beam( From 695a1804e5af86fcb25beb1f2e02289d0c38a93d Mon Sep 17 00:00:00 2001 From: lgiacome Date: Mon, 16 Sep 2024 15:45:26 +0200 Subject: [PATCH 14/28] finalize damper --- xfields/beam_elements/transverse_damper.py | 48 ++++++++-------------- 1 file changed, 18 insertions(+), 30 deletions(-) diff --git a/xfields/beam_elements/transverse_damper.py b/xfields/beam_elements/transverse_damper.py index e8f2f202..120f1907 100644 --- a/xfields/beam_elements/transverse_damper.py +++ b/xfields/beam_elements/transverse_damper.py @@ -24,6 +24,11 @@ class TransverseDamper(xt.BeamElement): num_slices (int): the number of slices used by the underlying slicer, bunch_spacing_zeta (float): the bunch spacing in meters circumference (float): the machine circumference + mode (str): the mode of operation of the damper. Either 'bunch-by-bunch' + (default) or 'slice-by-slice'. In 'bunch-by-bunch' mode, the damper + acts on the average of the particles in each bunch. In + 'slice-by-slice' mode, the damper acts on the average of the + particles in each slice. Returns: (TransverseDamper): A transverse damper beam element. @@ -92,37 +97,20 @@ def track(self, particles, i_turn=0): i_slot_particles=i_slot_particles) for moment in ['px', 'py']: - + particles_moment = getattr(particles, moment)[:] slice_means = self.slicer.mean(moment) for i_bunch, bunch_number in enumerate( - self.slicer.bunch_selection): - - nnz_slices = self.slicer.num_particles[i_bunch, :] > 0 - moments_bunch = { - moment: (np.ones_like(slice_means[i_bunch, :]) * - np.mean(slice_means[i_bunch, nnz_slices])) - } - - self.moments_data[moment].set_moments( - moments=moments_bunch, - i_turn=0, - i_source=self.slicer.filled_slots[bunch_number]) - - interpolated_result = particles.zeta * 0 - - md = self.moments_data[moment] - - self.moments_data[moment]._interp_result( - particles=particles, - data_shape_0=md.data.shape[0], - data_shape_1=md.data.shape[1], - data_shape_2=md.data.shape[2], - data=md.data, - i_slot_particles=i_slot_particles, - i_slice_particles=i_slice_particles, - out=interpolated_result - ) + self.slicer.bunch_selection): + slot_mask = i_slot_particles == bunch_number + slot_slices = np.unique(i_slice_particles[slot_mask]) + + if len(self.slicer.bunch_selection) == 1: + slot_mean = np.mean(slice_means[slot_slices]) + else: + slot_mean = np.mean(slice_means[i_bunch, slot_slices]) + + slot_mean = np.mean(particles_moment[slot_mask]) - getattr(particles, moment)[:] -= (self.gains[moment] * - interpolated_result) + particles_moment[slot_mask] -= (self.gains[moment] * + slot_mean) From 8e4f3158b28f773b82366aac235abcc51305d025 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Tue, 17 Sep 2024 14:48:06 +0200 Subject: [PATCH 15/28] update collective monitor and add mpi test --- xfields/beam_elements/collective_monitor.py | 75 ++++++++++++++++----- 1 file changed, 57 insertions(+), 18 deletions(-) diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py index 637ebc76..8af468e1 100644 --- a/xfields/beam_elements/collective_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -4,6 +4,8 @@ import h5py import json import os +import xfields as xf +import xpart as xp COORDS = ['x', 'px', 'y', 'py', 'zeta', 'delta'] SECOND_MOMENTS = {} @@ -56,7 +58,7 @@ class CollectiveMonitor(ElementWithSlicer): of the array is equal to the number of slots in the machine and each element of the array holds a one if the slot is filled or a zero otherwise. - bunch_numbers: np.ndarray + bunch_selection: np.ndarray List of the bunches indicating which slots from the filling scheme are used (not all the bunches are used when using multi-processing) _flatten: bool @@ -105,7 +107,7 @@ def __init__(self, num_slices=None, bunch_spacing_zeta=None, filling_scheme=None, - bunch_numbers=None, + bunch_selection=None, stats_to_store=None, stats_to_store_particles=None, backend='hdf5', @@ -180,10 +182,29 @@ def __init__(self, num_slices=num_slices, # Per bunch, this is N_1 in the paper bunch_spacing_zeta=bunch_spacing_zeta, # This is P in the paper filling_scheme=filling_scheme, - bunch_numbers=bunch_numbers, + bunch_selection=bunch_selection, with_compressed_profile=False ) + def _reconfigure_for_parallel(self, n_procs, my_rank): + filled_slots = self.slicer.filled_slots + scheme = np.zeros(np.max(filled_slots) + 1, + dtype=np.int64) + scheme[filled_slots] = 1 + + split_scheme = xp.matched_gaussian.split_scheme + bunch_selection_rank = split_scheme(filling_scheme=scheme, + n_chunk=int(n_procs)) + + self.slicer = xf.UniformBinSlicer( + filling_scheme=scheme, + bunch_selection=bunch_selection_rank[my_rank], + zeta_range=self.slicer.zeta_range, + num_slices=self.slicer.num_slices, + bunch_spacing_zeta=self.slicer.bunch_spacing_zeta, + moments=self.slicer.moments + ) + def track(self, particles, _slice_result=None, _other_bunch_slicers=None): super().track(particles=particles, _slice_result=_slice_result, @@ -227,7 +248,7 @@ def track(self, particles, _slice_result=None, _other_bunch_slicers=None): def _init_bunch_buffer(self): buf = {} - for bid in self.slicer.bunch_numbers: + for bid in self.slicer.bunch_selection: # Convert to int to avoid json serialization issues buf[int(bid)] = {} for stats in self.stats_to_store: @@ -237,7 +258,7 @@ def _init_bunch_buffer(self): def _init_slice_buffer(self): buf = {} - for bid in self.slicer.bunch_numbers: + for bid in self.slicer.bunch_selection: # Convert to int to avoid json serialization issues buf[int(bid)] = {} for stats in self.stats_to_store: @@ -256,16 +277,19 @@ def _init_particle_buffer(self): return buf def _update_bunch_buffer(self, particles): - i_bunch_particles = self._slice_result['i_bunch_particles'] + i_bunch_particles = self._slice_result['i_slot_particles'] - for i_bunch, bid in enumerate(self.slicer.bunch_numbers): + for i_bunch, bid in enumerate(self.slicer.bunch_selection): bunch_mask = (i_bunch_particles == bid) beta = np.mean(particles.beta0[bunch_mask]) gamma = 1 / np.sqrt(1 - beta**2) beta_gamma = beta * gamma for stat in self.stats_to_store: if stat == 'num_particles': - val = np.sum(self.slicer.num_particles[i_bunch, :]) + if len(self.slicer.bunch_selection) > 1: + val = np.sum(self.slicer.num_particles[i_bunch, :]) + else: + val = np.sum(self.slicer.num_particles) else: mom = getattr(particles, stat.split('_')[-1]) if stat.startswith('mean'): @@ -290,9 +314,9 @@ def _update_bunch_buffer(self, particles): self.flush_data_every] = val def _update_slice_buffer(self, particles): - i_bunch_particles = self._slice_result['i_bunch_particles'] + i_bunch_particles = self._slice_result['i_slot_particles'] - for i_bunch, bid in enumerate(self.slicer.bunch_numbers): + for i_bunch, bid in enumerate(self.slicer.bunch_selection): bunch_mask = (i_bunch_particles == bid) # we use the bunch beta_gamma to calculate the emittance, while # we should use the slice beta_gamma @@ -302,22 +326,37 @@ def _update_slice_buffer(self, particles): for stat in self.stats_to_store: mom_str = stat.split('_')[-1] if stat.startswith('mean'): - val = self.slicer.mean(mom_str)[i_bunch, :] + if len(self.slicer.bunch_selection) > 1: + val = self.slicer.mean(mom_str)[i_bunch, :] + else: + val = self.slicer.mean(mom_str) elif stat.startswith('sigma'): - val = self.slicer.std(mom_str)[i_bunch, :] + if len(self.slicer.bunch_selection) > 1: + val = self.slicer.std(mom_str)[i_bunch, :] + else: + val = self.slicer.std(mom_str) elif stat.startswith('epsn'): if mom_str != 'zeta': mom_p_str = 'p' + stat.split('_')[-1] else: mom_p_str = 'delta' - val = (np.sqrt(self.slicer.var(mom_str)[i_bunch, :] * - self.slicer.var(mom_p_str)[i_bunch, :] - - self.slicer.cov(mom_str, mom_p_str)[i_bunch, - :]) * - beta_gamma) + if len(self.slicer.bunch_selection) > 1: + val = (np.sqrt(self.slicer.var(mom_str)[i_bunch, :] * + self.slicer.var(mom_p_str)[i_bunch, :] - + self.slicer.cov(mom_str, mom_p_str)[i_bunch, + :]) * + beta_gamma) + else: + val = (np.sqrt(self.slicer.var(mom_str) * + self.slicer.var(mom_p_str) - + self.slicer.cov(mom_str, mom_p_str)) * + beta_gamma) elif stat == 'num_particles': - val = self.slicer.num_particles[i_bunch, :] + if len(self.slicer.bunch_selection) > 1: + val = self.slicer.num_particles[i_bunch, :] + else: + val = self.slicer.num_particles else: raise ValueError('Unknown statistics f{stat}') From 7d2a5edc3fa4564b8ab73aff41baa669409b6c5e Mon Sep 17 00:00:00 2001 From: lgiacome Date: Wed, 18 Sep 2024 09:30:19 +0200 Subject: [PATCH 16/28] fix a typo --- xfields/beam_elements/element_with_slicer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xfields/beam_elements/element_with_slicer.py b/xfields/beam_elements/element_with_slicer.py index 425edbd8..761b9821 100644 --- a/xfields/beam_elements/element_with_slicer.py +++ b/xfields/beam_elements/element_with_slicer.py @@ -252,7 +252,7 @@ def track(self, particles, _slice_result=None, _other_bunch_slicers=None): return xt.PipelineStatus(on_hold=True) if self.pipeline_manager is not None: - for i_partner, partner_name in enumerate(self.partners_names): + for i_partner, partner_name in enumerate(self.partner_names): self.pipeline_manager.receive_message(self._recv_buffer_length_buffer, self.name, partner_name, particles.name, internal_tag=0) self._ensure_recv_buffer_size() From 9fe5c11bef890c2ceb92ea8bd395fc18d44a7410 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Fri, 20 Sep 2024 14:48:41 +0200 Subject: [PATCH 17/28] improve transverse damper docstring --- xfields/beam_elements/transverse_damper.py | 50 +++++++++++----------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/xfields/beam_elements/transverse_damper.py b/xfields/beam_elements/transverse_damper.py index 120f1907..83bc8623 100644 --- a/xfields/beam_elements/transverse_damper.py +++ b/xfields/beam_elements/transverse_damper.py @@ -10,34 +10,36 @@ class TransverseDamper(xt.BeamElement): """ A simple bunch-by-bunch transverse Damper implementation. - Args: - gain_x (float): the horizontal damper gain in 1/turns (corresponding to - a damping rate of gain_x/2) - gain_y (float): the vertical damper gain in 1/turns (corresponding to - a damping rate of gain_x/2) - num_bunches (float): the number of bunches in the beam - filling_scheme (np.ndarray): an array of zeros and ones representing - the filling scheme - filled_slots (np.ndarray): an array indicating which slot each bunch - occupies - zeta_range (tuple): the range of zetas covered by the underlying slicer - num_slices (int): the number of slices used by the underlying slicer, - bunch_spacing_zeta (float): the bunch spacing in meters - circumference (float): the machine circumference - mode (str): the mode of operation of the damper. Either 'bunch-by-bunch' - (default) or 'slice-by-slice'. In 'bunch-by-bunch' mode, the damper - acts on the average of the particles in each bunch. In - 'slice-by-slice' mode, the damper acts on the average of the - particles in each slice. - - Returns: - (TransverseDamper): A transverse damper beam element. + Parameters + ---------- + gain_x : float + the horizontal damper gain in 1/turns (corresponding to a damping + rate of gain_x/2). + gain_y : float + the vertical damper gain in 1/turns (corresponding to a damping rate + of gain_x/2). + zeta_range : tuple + the range of zetas covered by the underlying slicer. + num_slices : int + the number of slices per bunch used by the underlying slicer. + filling_scheme : np.ndarray, optional + an array of zeros and ones representing the filling scheme. Only + needed for multi-bunch tracking. + bunch_selection : np.ndarray, optional + an array indicating which slot each bunch occupies in the filling + scheme. Only needed for multi-bunch tracking + bunch_spacing_zeta : float, optional + the bunch spacing in meters. Only needed for multi-bunch tracking + circumference : float, optional + the machine circumference. Only needed for multi-bunch tracking. + """ iscollective = True - def __init__(self, gain_x, gain_y, filling_scheme, zeta_range, num_slices, bunch_spacing_zeta, - circumference, bunch_selection=None, **kwargs): + def __init__(self, gain_x, gain_y, zeta_range, num_slices, + circumference=None, bunch_spacing_zeta=None, filling_scheme=None, + bunch_selection=None, **kwargs): self.gains = { 'px': gain_x, 'py': gain_y, From 156402a5fdb9c7cf6e86023399466baab243ac6f Mon Sep 17 00:00:00 2001 From: lgiacome Date: Fri, 20 Sep 2024 14:54:26 +0200 Subject: [PATCH 18/28] small fix to the collective monitor --- xfields/beam_elements/collective_monitor.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py index 8af468e1..d17f15af 100644 --- a/xfields/beam_elements/collective_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -36,7 +36,9 @@ class CollectiveMonitor(ElementWithSlicer): monitor_particles : Bool If True, the particle-by-particle data is monitored particle_monitor_mask : np.ndarray - Mask identifying the particles to be monitored + Mask identifying the particles to be monitored. If later on we try to + monitor a number of particles different than the length of the mask, an + error will be raised. flush_data_every: int Number of turns after which the data is saved to disk stats_to_store : list @@ -365,7 +367,7 @@ def _update_slice_buffer(self, particles): def _update_particle_buffer(self, particles): for stat in self.stats_to_store_particles: - val = getattr(particles, stat) + val = getattr(particles, stat)[self.particle_monitor_mask] self.particle_buffer[stat][self.i_turn % self.flush_data_every, :] = val From f908d9d13cfc1b73006df57b5f05742fd05680be Mon Sep 17 00:00:00 2001 From: lgiacome Date: Fri, 20 Sep 2024 14:55:59 +0200 Subject: [PATCH 19/28] handle an error --- xfields/beam_elements/collective_monitor.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py index d17f15af..14100b93 100644 --- a/xfields/beam_elements/collective_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -367,6 +367,10 @@ def _update_slice_buffer(self, particles): def _update_particle_buffer(self, particles): for stat in self.stats_to_store_particles: + if (self.particle_monitor_mask is not None + and len(self.particle_monitor_mask) != len(particles.x)): + raise ValueError('The length of the particle monitor mask is ' + 'different from the number of particles being tracked') val = getattr(particles, stat)[self.particle_monitor_mask] self.particle_buffer[stat][self.i_turn % self.flush_data_every, :] = val From 87e782346b10161117f60f32480669844f710667 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Fri, 20 Sep 2024 14:59:41 +0200 Subject: [PATCH 20/28] update collective monitor test --- tests/test_collective_monitor.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/test_collective_monitor.py b/tests/test_collective_monitor.py index a90c2fa2..e5650c8b 100644 --- a/tests/test_collective_monitor.py +++ b/tests/test_collective_monitor.py @@ -74,7 +74,6 @@ def test_bunch_monitor_hdf5(test_context): zeta_range=zeta_range, num_slices=num_slices, filling_scheme=filling_scheme, - bunch_numbers=bunch_numbers, bunch_spacing_zeta=bunch_spacing_zeta, ) @@ -196,7 +195,6 @@ def test_bunch_monitor_json(test_context): zeta_range=zeta_range, num_slices=num_slices, filling_scheme=filling_scheme, - bunch_numbers=bunch_numbers, bunch_spacing_zeta=bunch_spacing_zeta, ) @@ -290,7 +288,6 @@ def test_slice_monitor_hdf5(test_context): num_slices=num_slices, bunch_spacing_zeta=bunch_spacing_zeta, filling_scheme=filling_scheme, - bunch_numbers=bunch_numbers ) particles = xt.Particles( @@ -448,7 +445,6 @@ def test_slice_monitor_json(test_context): num_slices=num_slices, bunch_spacing_zeta=bunch_spacing_zeta, filling_scheme=filling_scheme, - bunch_numbers=bunch_numbers ) particles = xt.Particles( From 5f38dfcab39d955fdbfa829f70be073c45dc8ff7 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Fri, 20 Sep 2024 15:13:16 +0200 Subject: [PATCH 21/28] cosmetics --- xfields/beam_elements/collective_monitor.py | 8 ++++---- xfields/beam_elements/waketracker/waketracker.py | 9 +-------- 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py index 14100b93..c8c0e8d9 100644 --- a/xfields/beam_elements/collective_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -381,7 +381,7 @@ def flush_buffer_to_file_hdf5(buffer, filename): if not os.path.exists(filename): with h5py.File(filename, 'w') as f: for key in buffer: - if not type(buffer[key]) == dict: + if type(buffer[key]) is not dict: if len(buffer[key].shape) == 1: f.create_dataset(str(key), data=buffer[key], @@ -415,7 +415,7 @@ def flush_buffer_to_file_hdf5(buffer, filename): else: with h5py.File(filename, 'a') as f: for key in buffer: - if not type(buffer[key]) == dict: + if type(buffer[key]) is not dict: # this is for the particle buffer f[str(key)].resize((f[str(key)].shape[0] + buffer[key].shape[0]), axis=0) @@ -436,7 +436,7 @@ def flush_buffer_to_file_json(buffer, filename): # lists buffer_lists = {} for key in buffer: - if not type(buffer[key]) == dict: + if type(buffer[key]) is not dict: # this is for the particle buffer buffer_lists[str(key)] = buffer[key].tolist() else: @@ -453,7 +453,7 @@ def flush_buffer_to_file_json(buffer, filename): with open(filename, 'r') as f: old_buffer = json.load(f) for key in buffer_lists: - if not type(buffer_lists[key]) == dict: + if not type(buffer_lists[key]) is not dict: # this is for the particle buffer old_buffer[key] = np.concatenate( (old_buffer[key], buffer_lists[key]), axis=0).tolist() diff --git a/xfields/beam_elements/waketracker/waketracker.py b/xfields/beam_elements/waketracker/waketracker.py index da413a35..841c6b71 100644 --- a/xfields/beam_elements/waketracker/waketracker.py +++ b/xfields/beam_elements/waketracker/waketracker.py @@ -1,13 +1,6 @@ -from typing import Tuple - import numpy as np -from scipy.constants import c as clight -from scipy.constants import e as qe -from scipy.interpolate import interp1d - import xobjects as xo -import xtrack as xt from ..element_with_slicer import ElementWithSlicer from .convolution import _ConvData @@ -122,7 +115,7 @@ def track(self, particles): # Use common slicer from parent class to measure all moments status = super().track(particles) - if status and status.on_hold == True: + if status and status.on_hold: return status for wf in self.components: From bed07cc0bbb28cfadee5f1dde51ab699abe4aa04 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Fri, 20 Sep 2024 15:18:29 +0200 Subject: [PATCH 22/28] cosmetics --- xfields/slicers/uniform.py | 1 - 1 file changed, 1 deletion(-) diff --git a/xfields/slicers/uniform.py b/xfields/slicers/uniform.py index 336737f8..187ced93 100644 --- a/xfields/slicers/uniform.py +++ b/xfields/slicers/uniform.py @@ -1,4 +1,3 @@ -from pathlib import Path import numpy as np from ..general import _pkg_root From fe3208045a348ddbd8d88a6cdc12ebde2f6bee11 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Mon, 14 Oct 2024 09:39:06 +0200 Subject: [PATCH 23/28] flush data to file only if file name is specified --- xfields/beam_elements/collective_monitor.py | 41 ++++++++++++--------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py index c8c0e8d9..6f6e7e0c 100644 --- a/xfields/beam_elements/collective_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -27,14 +27,15 @@ class CollectiveMonitor(ElementWithSlicer): Parameters ---------- - base_file_name : str - Base file name for the output files. monitor_bunches : Bool If True, the bunch-by-bunch data is monitored monitor_slices : Bool If True, the slice-by-slice data is monitored monitor_particles : Bool If True, the particle-by-particle data is monitored + base_file_name : str + Base file name for the output files. If it is not specified, the data + will not be saved to disk. particle_monitor_mask : np.ndarray Mask identifying the particles to be monitored. If later on we try to monitor a number of particles different than the length of the mask, an @@ -99,10 +100,10 @@ class CollectiveMonitor(ElementWithSlicer): } def __init__(self, - base_file_name, monitor_bunches, monitor_slices, monitor_particles, + base_file_name=None, particle_monitor_mask=None, flush_data_every=1, zeta_range=None, @@ -156,23 +157,24 @@ def __init__(self, self.flush_data_every = flush_data_every self.particle_monitor_mask = particle_monitor_mask - self._bunches_filename = (self.base_file_name + '_bunches.' + - self.extension) + if base_file_name is not None: + self._bunches_filename = (self.base_file_name + '_bunches.' + + self.extension) - if os.path.exists(self._bunches_filename) and monitor_bunches: - os.remove(self._bunches_filename) + if os.path.exists(self._bunches_filename) and monitor_bunches: + os.remove(self._bunches_filename) - self._slices_filename = (self.base_file_name + '_slices.' + - self.extension) + self._slices_filename = (self.base_file_name + '_slices.' + + self.extension) - if os.path.exists(self._slices_filename) and monitor_slices: - os.remove(self._slices_filename) + if os.path.exists(self._slices_filename) and monitor_slices: + os.remove(self._slices_filename) - self._particles_filename = (self.base_file_name + '_particles.' + - self.extension) + self._particles_filename = (self.base_file_name + '_particles.' + + self.extension) - if os.path.exists(self._particles_filename) and monitor_particles: - os.remove(self._particles_filename) + if os.path.exists(self._particles_filename) and monitor_particles: + os.remove(self._particles_filename) self.pipeline_manager = None @@ -221,7 +223,8 @@ def track(self, particles, _slice_result=None, _other_bunch_slicers=None): self._update_bunch_buffer(particles) - if self.i_turn % self.flush_data_every == 0: + if (self.i_turn % self.flush_data_every == 0 and + self.base_file_name is not None): self.flush_buffer_to_file_func( self.bunch_buffer, self._bunches_filename) self.bunch_buffer = self._init_bunch_buffer() @@ -232,7 +235,8 @@ def track(self, particles, _slice_result=None, _other_bunch_slicers=None): self._update_slice_buffer(particles) - if self.i_turn % self.flush_data_every == 0: + if (self.i_turn % self.flush_data_every == 0 and + self.base_file_name is not None): self.flush_buffer_to_file_func( self.slice_buffer, self._slices_filename) self.slice_buffer = self._init_slice_buffer() @@ -243,7 +247,8 @@ def track(self, particles, _slice_result=None, _other_bunch_slicers=None): self._update_particle_buffer(particles) - if self.i_turn % self.flush_data_every == 0: + if (self.i_turn % self.flush_data_every == 0 and + self.base_file_name is not None): self.flush_buffer_to_file_func( self.particle_buffer, self._particles_filename) self.particle_buffer = self._init_particle_buffer() From 67c0403b59feec3b25b5a0701e6249053f11fb72 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Mon, 14 Oct 2024 10:35:10 +0200 Subject: [PATCH 24/28] default to 1 slice per bunch --- tests/test_collective_monitor.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tests/test_collective_monitor.py b/tests/test_collective_monitor.py index e5650c8b..81081a3e 100644 --- a/tests/test_collective_monitor.py +++ b/tests/test_collective_monitor.py @@ -11,7 +11,6 @@ @for_all_test_contexts def test_bunch_monitor_hdf5(test_context): n_macroparticles = int(1e6) - num_slices = 10 zeta_range = (-1, 1) energy = 7e12 @@ -72,7 +71,6 @@ def test_bunch_monitor_hdf5(test_context): monitor_particles=False, flush_data_every=flush_data_every, zeta_range=zeta_range, - num_slices=num_slices, filling_scheme=filling_scheme, bunch_spacing_zeta=bunch_spacing_zeta, ) @@ -132,7 +130,6 @@ def test_bunch_monitor_hdf5(test_context): @for_all_test_contexts def test_bunch_monitor_json(test_context): n_macroparticles = int(1e6) - num_slices = 10 zeta_range = (-1, 1) energy = 7e12 @@ -193,7 +190,6 @@ def test_bunch_monitor_json(test_context): monitor_particles=False, flush_data_every=n_turns, zeta_range=zeta_range, - num_slices=num_slices, filling_scheme=filling_scheme, bunch_spacing_zeta=bunch_spacing_zeta, ) @@ -569,7 +565,6 @@ def test_slice_monitor_json(test_context): @for_all_test_contexts def test_particle_monitor_hdf5(test_context): n_macroparticles = int(1e2) - num_slices = 10 zeta_range = (-1, 1) particles = xt.Particles( @@ -595,7 +590,6 @@ def test_particle_monitor_hdf5(test_context): monitor_particles=True, flush_data_every=flush_data_every, zeta_range=zeta_range, - num_slices=num_slices, particle_monitor_mask=particle_monitor_mask, ) @@ -615,7 +609,6 @@ def test_particle_monitor_hdf5(test_context): @for_all_test_contexts def test_particle_monitor_json(test_context): n_macroparticles = int(1e2) - num_slices = 10 zeta_range = (-1, 1) particles = xt.Particles( @@ -641,7 +634,6 @@ def test_particle_monitor_json(test_context): monitor_particles=True, flush_data_every=flush_data_every, zeta_range=zeta_range, - num_slices=num_slices, particle_monitor_mask=particle_monitor_mask, ) From 1d91ea17fe1f08434725effbb14599262d67ea70 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Mon, 14 Oct 2024 10:35:33 +0200 Subject: [PATCH 25/28] update tests --- xfields/beam_elements/collective_monitor.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py index 6f6e7e0c..efbd3193 100644 --- a/xfields/beam_elements/collective_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -53,7 +53,8 @@ class CollectiveMonitor(ElementWithSlicer): zeta_range : Tuple Zeta range for each bunch used in the underlying slicer. num_slices : int - Number of slices per bunch used in the underlying slicer. + Number of slices per bunch used in the underlying slicer. It should be + specified if the slice-by-slice data is monitored. bunch_spacing_zeta : float Bunch spacing in meters. filling_scheme: np.ndarray @@ -107,7 +108,7 @@ def __init__(self, particle_monitor_mask=None, flush_data_every=1, zeta_range=None, - num_slices=None, + num_slices=1, bunch_spacing_zeta=None, filling_scheme=None, bunch_selection=None, @@ -458,10 +459,9 @@ def flush_buffer_to_file_json(buffer, filename): with open(filename, 'r') as f: old_buffer = json.load(f) for key in buffer_lists: - if not type(buffer_lists[key]) is not dict: + if type(buffer_lists[key]) is not dict: # this is for the particle buffer - old_buffer[key] = np.concatenate( - (old_buffer[key], buffer_lists[key]), axis=0).tolist() + old_buffer[key] = np.concatenate((old_buffer[key], buffer_lists[key]), axis=0).tolist() else: # this is for the bunch or slice buffer for subkey in buffer_lists[key]: From f74dd2731b497cb88ea530bf6ec3e2427c646772 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Mon, 14 Oct 2024 10:36:55 +0200 Subject: [PATCH 26/28] import h5py only if needed --- xfields/beam_elements/collective_monitor.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/xfields/beam_elements/collective_monitor.py b/xfields/beam_elements/collective_monitor.py index efbd3193..49537578 100644 --- a/xfields/beam_elements/collective_monitor.py +++ b/xfields/beam_elements/collective_monitor.py @@ -1,7 +1,6 @@ import numpy as np from .element_with_slicer import ElementWithSlicer -import h5py import json import os import xfields as xf @@ -383,6 +382,10 @@ def _update_particle_buffer(self, particles): def flush_buffer_to_file_hdf5(buffer, filename): + try: + import h5py + except ImportError: + raise ImportError('h5py is required to save the data in hdf5 format') # Iif the file does not exist, create it and write the buffer if not os.path.exists(filename): with h5py.File(filename, 'w') as f: From 6bd4d01bb55faa7a4dcadf7e633723c246d46fe3 Mon Sep 17 00:00:00 2001 From: lgiacome Date: Mon, 14 Oct 2024 11:42:05 +0200 Subject: [PATCH 27/28] fix test --- tests/test_element_with_slicer_filling_scheme.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_element_with_slicer_filling_scheme.py b/tests/test_element_with_slicer_filling_scheme.py index 747bda3a..92414d1a 100644 --- a/tests/test_element_with_slicer_filling_scheme.py +++ b/tests/test_element_with_slicer_filling_scheme.py @@ -92,7 +92,7 @@ def test_element_with_slicer_filling_scheme(buffer_round_trip, num_turns): rtol=0, atol=1e-12) xo.assert_allclose(slicer2.zeta_centers, - np.array([[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9]]), + np.array([-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9]), rtol=0, atol=1e-12) xo.assert_allclose(slicer.zeta_range[0], -1, rtol=0, atol=1e-12) @@ -117,8 +117,8 @@ def test_element_with_slicer_filling_scheme(buffer_round_trip, num_turns): xo.assert_allclose(slicer2.num_particles, np.array([ - [ 5000., 5000., 5000., 5000., 5000., 5000., 5000., 5000., 5000., 5000.], - ]), + 5000., 5000., 5000., 5000., 5000., 5000., 5000., 5000., 5000., 5000.], + ), rtol=0, atol=1e-12) z_prof, prof = ele.moments_data.get_moment_profile('num_particles', i_turn=0) From d4e9d302261cc3db50fd3e588d54a2e863a7bbac Mon Sep 17 00:00:00 2001 From: lgiacome Date: Mon, 14 Oct 2024 11:45:19 +0200 Subject: [PATCH 28/28] add bash script to run mpi tests --- tests_mpi/run_all.sh | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 tests_mpi/run_all.sh diff --git a/tests_mpi/run_all.sh b/tests_mpi/run_all.sh new file mode 100644 index 00000000..b2fac628 --- /dev/null +++ b/tests_mpi/run_all.sh @@ -0,0 +1,2 @@ +mpiexec -n 3 pytest -v test_collective_monitor_mpi.py +mpiexec -n 3 pytest -v test_transverse_damper_mpi.py