diff --git a/exaspim_control/exa_spim_acquisition.py b/exaspim_control/exa_spim_acquisition.py index ac30b53..0663a4f 100644 --- a/exaspim_control/exa_spim_acquisition.py +++ b/exaspim_control/exa_spim_acquisition.py @@ -9,7 +9,7 @@ from voxel.writers.data_structures.shared_double_buffer import SharedDoubleBuffer from voxel.acquisition.acquisition import Acquisition import inflection - +from nidaqmx.constants import AcquisitionType as AcqType class ExASPIMAcquisition(Acquisition): @@ -31,6 +31,20 @@ def __init__(self, instrument: Instrument, config_filename: str): self.transfer_threads = dict() self.stop_engine = Event() # Event to flag a stop in engine + def _verify_acquisition(self): + """Check that chunk sizes are the same for all writers""" + super()._verify_acquisition() + + chunksize = None + for device in self.writers.values(): + for writer in device.values(): + if chunksize is None: + chunksize = writer.chunk_count_px + else: + if writer.chunk_count_px != chunksize: + raise ValueError (f'Chunksizes of writers must all be {chunksize}') + self.chunksize = chunksize # define chunksize to be used later in acquisiiton + def run(self): filenames = dict() @@ -92,19 +106,18 @@ def run(self): setattr(device, setting, value) self.log.info(f'setting {setting} for {device_type} {device_name} to {value}') - # fixme: is this right? - # for daq_name, daq in self.instrument.daqs.items(): - # if daq.tasks.get('ao_task', None) is not None: - # daq.add_task('ao') - # daq.generate_waveforms('ao', tile_channel) - # daq.write_ao_waveforms() - # if daq.tasks.get('do_task', None) is not None: - # daq.add_task('do') - # daq.generate_waveforms('do', tile_channel) - # daq.write_do_waveforms() - # if daq.tasks.get('co_task', None) is not None: - # pulse_count = daq.tasks['co_task']['timing'].get('pulse_count', None) - # daq.add_task('co', pulse_count) + for daq_name, daq in self.instrument.daqs.items(): + if daq.tasks.get('ao_task', None) is not None: + daq.add_task('ao') + daq.generate_waveforms('ao', tile_channel) + daq.write_ao_waveforms() + if daq.tasks.get('do_task', None) is not None: + daq.add_task('do') + daq.generate_waveforms('do', tile_channel) + daq.write_do_waveforms() + if daq.tasks.get('co_task', None) is not None: + pulse_count = self.chunksize + daq.add_task('co', pulse_count) # run any pre-routines for all devices for device_name, routine_dictionary in getattr(self, 'routines', {}).items(): @@ -135,12 +148,6 @@ def run(self): self.log.info(f'starting camera and writer for {camera_id}') self.acquisition_threads[camera_id].start() - #################### IMPORTANT #################### - # for the exaspim, the NIDAQ is the master, so we start this last - for daq_id, daq in self.instrument.daqs.items(): - self.log.info(f'starting daq {daq_id}') - daq.start() - # wait for the cameras/writers to finish for camera_id in self.acquisition_threads: self.log.info(f'waiting for camera {camera_id} to finish') @@ -196,7 +203,7 @@ def engine(self, tile, filename, camera, writers, processes): writer.filename = filename writer.channel = tile['channel'] - chunk_sizes[writer_name] = writer.chunk_count_px + # chunk_sizes[writer_name] = writer.chunk_count_px chunk_locks[writer_name] = Lock() img_buffers[writer_name] = SharedDoubleBuffer( (writer.chunk_count_px, camera.roi['height_px'], camera.roi['width_px']), @@ -230,22 +237,30 @@ def engine(self, tile, filename, camera, writers, processes): frame_index = 0 last_frame_index = tile['frame_count_px'] - 1 - # TODO: these variables aren't being used for anything? - # chunk_count = math.ceil(tile['frame_count_px'] / chunk_size) - # remainder = tile['frame_count_px'] % chunk_size - # last_chunk_size = chunk_size if not remainder else remainder + chunk_count = math.ceil(tile['frame_count_px'] / self.chunk_size) + remainder = tile['frame_count_px'] % self.chunk_size + last_chunk_size = chunk_size if not remainder else remainder # Images arrive serialized in repeating channel order. for stack_index in range(tile['frame_count_px']): if self.stop_engine.is_set(): break - chunk_indexes = {writer_name: stack_index % chunk_size for writer_name, chunk_size in chunk_sizes.items()} - # Start a batch of pulses to generate more frames and movements. # TODO: Is this a TODO? - - # TODO: these variables aren't being used for anything? - # if chunk_index == 0: - # chunks_filled = math.floor(stack_index / chunk_size) - # remaining_chunks = chunk_count - chunks_filled + chunk_indexes = stack_index % self.chunk_size + # Start a batch of pulses to generate more frames and movements. + if chunk_index == 0: + chunks_filled = math.floor(stack_index / self.chunk_size) + remaining_chunks = chunk_count - chunks_filled + num_pulses = last_chunk_size if remaining_chunks == 1 else chunk_size + for daq_name, daq in self.instrument.daqs.items(): + daq.co_task.timing.cfg_implicit_timing(sample_mode= AcqType.FINITE, + samps_per_chan= num_pulses) + #################### IMPORTANT #################### + # for the exaspim, the NIDAQ is the master, so we start this last + for daq_id, daq in self.instrument.daqs.items(): + self.log.info(f'starting daq {daq_id}') + for task in [self.ao_task, self.do_task, self.co_task]: + if task is not None: + task.start() # Grab camera frame current_frame = camera.grab_frame() @@ -257,10 +272,10 @@ def engine(self, tile, filename, camera, writers, processes): # Dispatch either a full chunk of frames or the last chunk, # which may not be a multiple of the chunk size. - for writer_name, writer in writers.items(): - if chunk_indexes[writer_name] + 1 == chunk_sizes[writer_name] or stack_index == last_frame_index: - while not writer.done_reading.is_set() and not self.stop_engine.is_set(): - time.sleep(0.001) + if chunk_indexes + 1 == self.chunk_sizes or stack_index == last_frame_index: + while not writer.done_reading.is_set() and not self.stop_engine.is_set(): + time.sleep(0.001) + for writer_name, writer in writers.items(): # Dispatch chunk to each StackWriter compression process. # Toggle double buffer to continue writing images. # To read the new data, the StackWriter needs the name of diff --git a/exaspim_control/experimental/exa_spim_view.py b/exaspim_control/experimental/exa_spim_view.py index 0645c77..12503e9 100644 --- a/exaspim_control/experimental/exa_spim_view.py +++ b/exaspim_control/experimental/exa_spim_view.py @@ -8,7 +8,8 @@ import os import yaml from napari.qt.threading import thread_worker -import pyclesperanto as cle +#import pyclesperanto as cle +from voxel.processes.gpu.gputools.downsample_2d import DownSample2D import inflection RESOURCES_DIR = (Path(os.path.dirname(os.path.realpath(__file__)))) @@ -39,14 +40,9 @@ def grab_frames(self, camera_name, frames=float("inf")): # TODO: Do we want to import from exaspim what to use? multiscale = [frame] - input_frame = cle.push(frame) for binning in range(2,6): # TODO: variable or get from somewhere? - downsampled_frame = cle.scale(input_frame, - factor_x=1 / binning, - factor_y=1 / binning, - device=cle.select_device(), - resize=True) - multiscale.append(cle.pull(downsampled_frame)) + downsampled_frame = DownSample2D(binning=binning).run(frame) + multiscale.append(downsampled_frame) yield multiscale, camera_name # wait until unlocking camera to be able to quit napari thread i += 1 diff --git a/exaspim_control/experimental/gui_config.yaml b/exaspim_control/experimental/gui_config.yaml index 63f9329..3ed2ee9 100644 --- a/exaspim_control/experimental/gui_config.yaml +++ b/exaspim_control/experimental/gui_config.yaml @@ -60,6 +60,12 @@ device_widgets: CH488: 1.0 CH561: 1.0 CH639: 1.0 + tasks.ao_task.ports.488 nm.parameters.max_volts.channels: + CH488: 5.0 + tasks.ao_task.ports.561 nm.parameters.max_volts.channels: + CH561: 5.0 + tasks.ao_task.ports.639 nm.parameters.max_volts.channels: + CH639: 5.0 operation_widgets: volume_widget: init: @@ -71,12 +77,16 @@ operation_widgets: lasers: [488 nm] cameras: [vp-151mx] CH561: - lasers: [561nm] + lasers: [561 nm] cameras: [vp-151mx] CH639: - lasers: [639nm] + lasers: [639 nm] cameras: [vp-151mx] settings: cameras: [binning] lasers: [power_setpoint_mw] - filter_wheels: [filter] \ No newline at end of file + +# I think livestream tasks will be different if no pulse_count is specifies in acquisition tasks? +#livestream_tasks: +# pcie-6738: +# tasks: diff --git a/exaspim_control/experimental/instrument.yaml b/exaspim_control/experimental/instrument.yaml index 0283f99..04cb3e0 100644 --- a/exaspim_control/experimental/instrument.yaml +++ b/exaspim_control/experimental/instrument.yaml @@ -17,7 +17,7 @@ instrument: driver: voxel.devices.camera.vieworks_egrabber module: Camera init: - id: MP151BBX006 + id: MP151BBX050 settings: exposure_time_ms: 10.0 pixel_type: mono16 @@ -30,9 +30,8 @@ instrument: mode: 'on' polarity: rising source: external - pixel_type: mono16 bit_packing_mode: msb - binning: '1' + binning: 1 488 nm: type: laser driver: voxel.devices.lasers.simulated @@ -221,8 +220,8 @@ instrument: max_volts: channels: CH488: 5.0 - CH561: 5.0 - CH639: 5.0 + CH561: 0.0 + CH639: 0.0 min_volts: channels: CH488: 0.0 @@ -246,9 +245,9 @@ instrument: CH639: 603.0 max_volts: channels: - CH488: 5.0 + CH488: 0.0 CH561: 5.0 - CH639: 5.0 + CH639: 0.0 min_volts: channels: CH488: 0.0 @@ -272,8 +271,8 @@ instrument: CH639: 603.0 max_volts: channels: - CH488: 5.0 - CH561: 5.0 + CH488: 0.0 + CH561: 0.0 CH639: 5.0 min_volts: channels: