diff --git a/install/linux/usr/share/odemis/sim/meteor-fibsem-sim.odm.yaml b/install/linux/usr/share/odemis/sim/meteor-fibsem-sim.odm.yaml index 8628350c36..74448ef868 100644 --- a/install/linux/usr/share/odemis/sim/meteor-fibsem-sim.odm.yaml +++ b/install/linux/usr/share/odemis/sim/meteor-fibsem-sim.odm.yaml @@ -84,10 +84,14 @@ METEOR-FIBSEM-Sim: { # Grid centers in SEM range # Adjusted so that at init (0,0,0), it's at the Grid 1. SAMPLE_CENTERS: {"GRID 1": {'x': 0, 'y': 0, 'z': 32.e-3}, "GRID 2": {'x': 5.0e-3, 'y': 0, 'z': 32.e-3}}, + # Mirroring values between SEM - METEOR + POS_COR: [0.0253126, 0.0024916], CALIB: {"version": "tfs_3", - "dx": 0.0506252, "dy": 0.0049832, - "Sample pre-tilt": 0.6108652381980153, # 35° - "SEM-Eucentric-Focus": 7.0e-3}, # aqulios=7.0e-3, hydra=4.0e-3 + "pre-tilt": 0.6108652381980153, # 35° + "SEM-Eucentric-Focus": 7.0e-3, # aqulios=7.0e-3, hydra=4.0e-3 + "use_linked_sem_focus_compensation": false, + "use_3d_transforms": false, + "use_scan_rotation": false,}, FAV_FM_POS_ACTIVE: {"rx": 0.12213888553625313 , "rz": 3.141592653589793}, # 7° - 270° FAV_SEM_POS_ACTIVE: {"rx": 0.6108652381980153, "rz": 0}, # pre-tilt 35° FAV_MILL_POS_ACTIVE: {"rx": 0.314159, "rz": 0}, # Note that milling angle (rx) can be changed per session diff --git a/install/linux/usr/share/odemis/sim/meteor-tfs3-sim.odm.yaml b/install/linux/usr/share/odemis/sim/meteor-tfs3-sim.odm.yaml index 6c681064a5..0d480a8a5d 100644 --- a/install/linux/usr/share/odemis/sim/meteor-tfs3-sim.odm.yaml +++ b/install/linux/usr/share/odemis/sim/meteor-tfs3-sim.odm.yaml @@ -67,7 +67,10 @@ CALIB: { "version": "tfs_3", "dx": 0.0506252, "dy": 0.0049832, - "Sample pre-tilt": 0.6108652381980153, + "pre-tilt": 0.6108652381980153, + "use_linked_sem_focus_compensation": false, + "use_3d_transforms": false, + "use_scan_rotation": false, }, # Active tilting (rx) & rotation(rz) angles positions when switching between SEM & FM. FAV_FM_POS_ACTIVE: {"rx": 0.29670597283903605 , "rz": 3.141592653589793}, # 17° - 180° diff --git a/install/linux/usr/share/odemis/sim/sparc2-ephemeron-sim.odm.yaml b/install/linux/usr/share/odemis/sim/sparc2-ephemeron-sim.odm.yaml new file mode 100644 index 0000000000..077bdb13af --- /dev/null +++ b/install/linux/usr/share/odemis/sim/sparc2-ephemeron-sim.odm.yaml @@ -0,0 +1,381 @@ +# Configuration for a simulated SPARC v2 system with a EBIC detector which must +# be independently configured (instead of it being a simple detector on by the +# SEM scan board) +"SPARC2 Independent EBIC Ephemeron": { + class: Microscope, + role: sparc2, +} + +# Light (lamp with known spectrum) +"Calibration Light": { + class: light.Light, + role: "brightlight", + power_supplier: "Power Control Unit", + affects: ["VisNIR Camera", "VisNIR Spectrometer"], +} + +"Power Control Unit": { + class: powerctrl.PowerControlUnit, + role: "power-control", + init: { + port: "/dev/fake", # for simulator + pin_map: {"Calibration Light": 0, + "CL PMT control unit": 3, + "VisNIR Camera": 4, + "Spectrograph": 6, + "Optical Actuators": 2}, + delay: { # Time it takes before a component is accessible + "Calibration Light": 3, # s, the light takes some time to actually turn on + "CL PMT control unit": 0, + "VisNIR Camera": 1, + "Spectrograph": 1, + "Optical Actuators": 1 + }, + init: { # Which component to power on from the very beginning (to save time) + "VisNIR Camera": True, + "Spectrograph": True, + "Optical Actuators": True + }, + } +} + +# Can simulate a SEM connected to a DAQ board by using the simulated NI card. +# Need to install it with: +# nidaqmxconfig --import ni-pci6363-sim.nce --replace +"SEM Scan Interface": { + class: semnidaq.AnalogSEM, + role: null, + init: { + device: "Dev1", + multi_detector_min_period: 2.e-6, # s, to limit sampling frequency when acquiring with multiple detectors (and reduce cross-talk) + }, + # more detectors can be added, if necessary + children: { + scanner: "SEM E-beam", + detector0: "SEM Detector", + detector1: "CL PMT", + }, +} + +# Connect: +# X -> AO 0 +# Y -> AO 1 +# Ground -> AO GND +# P0.16 -> MightyEBIC +"SEM E-beam": { + role: e-beam, + init: { + channels: [0, 1], + # On Delmic scanning box v2, the voltage is x2, so need to specify twice smaller values than needed. + max_res: [4096, 3072], # px, to force 4:3 ratio + limits: [[-4.85, 4.85], [-3.64, 3.64]], # V + park: [-5, -5], # V + scan_active_delay: 1.e-3, # s, time to wait between setting "external" and starting to scan + + settle_time: 120.e-6, # s + hfw_nomag: 0.2756, # m + # output ports -> [high_auto, high_enabled, prop_name] + # * high_auto: True = high when scanning, False = high when parked + # * high_enabled: True = high when property set to True, False = high when set to False + # * prop_name: name of the property to control it (null to not show any property) + # Digital output port mapping on the Delmic scanning box v2: + # 0 = Relay + # 1 = Open drain output (Y0.0) + # 2 = Digital Out 1 + # 3 = Digital Out 0 + # 4 = Status led + scanning_ttl: { + 3: [True, True, "external"], # High when scanning, High when VA set to True + 4: [True, True, null], # status LED + }, + # Pixel signal to synchronize with the cameras + image_ttl: { + pixel: { + ports: [16], # P0.16 is (confusingly) noted as PFI0/P1.0 on the BNC-2110 + inverted: [True], + affects: ["EBIC Detector",], + }, + }, + }, + affects: ["SEM Detector", "VisNIR Camera", "VisNIR Spectrometer", "EBIC Detector", "CL Detector"] # affects the CCD in case of cathodoluminescence +} + +# Must be connected on AI1/AI9 (differential) +"SEM Detector": { # aka ETD + # role: null, # Needs to be null if using "SEM Detector Full" + role: se-detector, + init: { + channel: 1, + limits: [-3, 3], # V + }, +} + +# If not used by the rest of the system, it's fine. +# Must be connected on AI0/AI8 (differential) +"CL PMT": { # the raw detector of the PMT + role: null, + init: { + channel: 0, + limits: [-2.5, 2.5], # V + }, +} + +"CL PMT control unit": { + class: pmtctrl.PMTControl, + role: null, + power_supplier: "Power Control Unit", + init: { + port: "/dev/fake", # for simulator + prot_time: 0.0002, # s + prot_curr: 50.e-6, # A + } +} + +"CL Detector": { + class: pmtctrl.PMT, + role: cl-detector, + init: { + settle_time: 0.1, # s, time to wait before detector is ready + }, + dependencies: { + "detector": "CL PMT", + "pmt-control": "CL PMT control unit", + }, +} + +# A detector which is independent from the DAQ board. Though in reality, it's +# synchronized on the "newPixel" TTL signal of the DAQ board scanner. +"EBIC Detector": { + class: ephemeron.MightyEBIC, + role: ebic-detector, + init: { + channel: 0, # should correspond to the EBIC channel on the MightyEBIC + url: "fake", # simulated + }, +} + +# In reality, this is a Zyla, but you need libandor3-dev to simulate an AndorCam3 +# Depending exactly on the configuration, it might also be used for spectrometer +"VisNIR Camera": { + class: andorcam2.AndorCam2, + role: ccd, + power_supplier: "Power Control Unit", + init: { + device: "fake", + image: "sparc-ar-mirror-align.h5", # only for simulator + }, +} + +"VisNIR Spectrometer": { + class: spectrometer.CompositedSpectrometer, + role: spectrometer, + dependencies: {detector: "VisNIR Camera", spectrograph: "Spectrograph"}, + init: { + transp: [1, -2], # only applied to the spectrometer data (not raw CCD) + }, +} + +"Spectrograph": { + class: andorshrk.Shamrock, + role: spectrograph, + power_supplier: "Power Control Unit", + init: { + device: "fake", + slits: {1: "slit-in", # INPUT_SIDE + }, + bands: { # The filters in the filter-wheel + 1: [320.e-9, 400.e-9], # pos -> m,m + 2: [400.e-9, 450.e-9], + 3: [500.e-9, 550.e-9], + 4: [550.e-9, 600.e-9], + 6: "pass-through" + }, + fstepsize: 10.9e-6, # m, size of one focus step (very roughly) + rng: {"focus": [10.9e-6, 4.9e-3]}, # the hardware has an issue, and cannot go further than 4.9mm (instead of the standard 6.5 mm) + }, + affects: ["VisNIR Camera", "VisNIR Spectrometer"], +} + +"Spectrograph focus": { + class: actuator.MultiplexActuator, + role: "focus", + dependencies: {"z": "Spectrograph"}, + init: { + axes_map: {"z": "focus"}, + }, + affects: ["VisNIR Camera", "VisNIR Spectrometer"], +} + +# Provide the mirror (aka port selector) of the spectrograph as a separate component +"Spec CCD Flipper": { + class: actuator.FixedPositionsActuator, + role: "spec-det-selector", + dependencies: {"rx": "Spectrograph"}, + init: { + axis_name: "flip-out", + positions: { + 0: [], # No detector on this position + 1.5707963267948966: ["VisNIR Camera", "VisNIR Spectrometer"], + }, + }, + affects: ["VisNIR Camera", "VisNIR Spectrometer"], +} + +# Provide the filter wheel of the spectrograph as a separate component +"Spec Filter Wheel": { + class: actuator.MultiplexActuator, + role: filter, + dependencies: {"band": "Spectrograph"}, + init: { + axes_map: {"band": "band"}, + }, + affects: ["VisNIR Camera", "VisNIR Spectrometer"], +} + +"Optical Path Properties": { + class: static.OpticalLens, + role: lens, + # Standard mirror config + init: { + mag: 0.34, # ratio + na: 0.2, # ratio, numerical aperture + ri: 1.0, # ratio, refractive index + pole_pos: [458, 519], # (px, px), position of the pole (aka the hole in the mirror) + x_max: 13.25e-3, # m, the distance between the parabola origin and the cutoff position + hole_diam: 0.6e-3, # m, diameter the hole in the mirror + focus_dist: 0.5e-3, # m, the vertical mirror cutoff, iow the min distance between the mirror and the sample + parabola_f: 2.5e-3, # m, parabola_parameter=1/4f + rotation: -1.570796326795, # rad, 90° rotation between optical axis and SEM Y axis + }, + affects: ["VisNIR Camera", "VisNIR Spectrometer"] +} + +# Controller for the motors moving the various parts of the optical box +"Optical Actuators": { + class: tmcm.TMCLController, + role: null, + power_supplier: "Power Control Unit", + init: { + port: "/dev/fake6", + address: null, + axes: ["l1", "l2", "cl-sel", "fw", "slit"], + # These values are adapted to make the simulator roughly the same speed + ustepsize: [25.1e-9, 25.1e-9, 26.1e-9, 3.392e-5, 5.e-9], # m/µstep, excepted for the fw: rad/µstep + refproc: "Standard", + refswitch: {"l1": 0, "l2": 0, "cl-sel": 4, "fw": 4}, + inverted: ["l2"], + }, +} + +# The first lens of Plate 1, able to move along the whole range +"Lens1 Mover": { + class: actuator.MultiplexActuator, + role: "lens-mover", + dependencies: {"x": "Optical Actuators"}, + init: { + axes_map: {"x": "l1"}, + ref_on_init: ["x"], + }, + metadata: { + # Default position of the lens (can be improved by user) + FAV_POS_ACTIVE: {"x": 0.0045} # m + }, + affects: ["VisNIR Camera", "VisNIR Spectrometer"], +} + +# The second lens of Plate 1, either to working or parking position +"Lens2 Switch": { + class: actuator.FixedPositionsActuator, + role: "lens-switch", + dependencies: {"x": "Optical Actuators"}, + init: { + axis_name: "l2", + positions: { + 4.e-3: "on", + 0.002: "off", # completely out of the path + }, + }, + affects: ["VisNIR Camera", "VisNIR Spectrometer"], +} + +# Control the slit position to either fully-open or small (dependent on the spectrometer slit-in) +"Slit": { + class: actuator.FixedPositionsActuator, + role: "slit-in-big", + dependencies: {"x": "Optical Actuators"}, + init: { + axis_name: "slit", + positions: { + 0: "on", # fully opened + -0.0012: "off", # opening based on the small slit + }, + }, + affects: ["VisNIR Camera", "VisNIR Spectrometer"], +} + +# Mirror in Plate 2, selects between spectrometer and cl-detector +"CL Selector": { + class: actuator.FixedPositionsActuator, + role: "cl-det-selector", + dependencies: {"x": "Optical Actuators"}, + init: { + axis_name: "cl-sel", + positions: { + 0.003: ["CL Detector"], + 0.01: ["Spec CCD Flipper"] + }, + }, + affects: ["VisNIR Camera", "VisNIR Spectrometer", "CL Detector"], +} + +"CL Filter Wheel": { + class: actuator.FixedPositionsActuator, + role: "cl-filter", + dependencies: {"band": "Optical Actuators"}, + init: { + axis_name: "fw", + # It supports up to 8 filters + positions: { + # pos (rad) -> m,m + 0: [420.e-9, 460.e-9], # 1 + 0.785398: [500.e-9, 550.e-9], # 2 + 1.570796: "polariser", # 3 + 3.9269908: "pass-through", # 6 + }, + cycle: 6.283185, # position of ref switch (0) after a full turn + }, + affects: ["CL Detector"], +} + +# Controller for moving the 'Redux' stage +# Note: the S axis can _only_ be moved when the l axis is near the active/engaged position +"Mirror Actuators": { + class: tmcm.TMCLController, + role: "mirror", + init: { + port: "/dev/fake6", + address: null, + axes: ["s", "l"], + ustepsize: [1.e-6, 1.e-6], # m/µstep (big, for the simulator to move fast) + refproc: "Standard", + refswitch: {"s": 0, "l": 0}, + }, + metadata: { + # Default position of the mirror engaged (will be improved during alignment) + FAV_POS_ACTIVE: {"l": 70.e-3, "s": 5.e-3}, # m, m + }, +} + +# Internal component to convert between the referential of the mirror actuators +# and the X/Y referential of the SEM. Used by the mirror alignment functions. +"Mirror Actuators in XY referential": { + class: actuator.ConvertStage, + role: "mirror-xy", + dependencies: {"orig": "Mirror Actuators"}, + init: { + axes: ["l", "s"], # mapped respectively to X, Y + rotation: -1.0471975511965976, # rad (= -60°) + # Note: if you want to use absolute moves, add a "translation" too + }, +} + diff --git a/src/odemis/acq/align/tdct.py b/src/odemis/acq/align/tdct.py new file mode 100644 index 0000000000..e44c67ef84 --- /dev/null +++ b/src/odemis/acq/align/tdct.py @@ -0,0 +1,193 @@ +""" +Created on 8 Jan 2025 + +Copyright © 2025 Delmic + +This file is part of Odemis. + +Odemis is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License version 2 as published by the Free +Software Foundation. + +Odemis is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +Odemis. If not, see http://www.gnu.org/licenses/. +""" + +import os +import logging +import sys +from typing import Dict, List, Tuple, Any + +import numpy +import yaml + +from odemis import model + +# install from: https://github.com/patrickcleeve2/3DCT/blob/refactor +sys.path.append(f"{os.path.expanduser('~')}/development/3DCT") + +from tdct.correlation_v2 import run_correlation +from tdct.util import multi_channel_get_z_guass + +def _convert_das_to_numpy_stack(das: List[model.DataArray]) -> numpy.ndarray: + """Convert a list of DataArrays to a numpy stack. + Channels are stored as list dimensions, rather than data array dimensions. + Therefore, multi-channel images are stored as list[CTZYX, CTZYX, ...] where C=1 + and length of list is number of channels. + :param das: list of meteor data arrays (supports 5D CTZYX, 3D ZYX, 2D YX arrays) + :return the data arrays reshapes to a 4D numpy array (CZYX)""" + arr = [] + for da in das: + if isinstance(da, model.DataArrayShadow): + da = da.getData() + + # convert to 3D ZYX + if da.ndim == 5: + if da.shape[0] != 1 or da.shape[1] != 1: + logging.warning(f"Only the first channel and time dimension will be used for 5D data array: {da.shape}") + # remove the channel, time dimensions + da = da[0, 0, :, :, :] + elif da.ndim == 2: + # expand to 3D ZYX + da = numpy.expand_dims(da, axis=0) + + assert da.ndim == 3, f"DataArray must be 3D ZYX, but is {da.shape}" + arr.append(da) + + return numpy.stack(arr, axis=0) + +def get_optimized_z_gauss(das: List[model.DataArray], x: int, y: int, z: int, show: bool = False) -> float: + """Get the best fitting z-coordinate for the given x, y coordinates. Supports multi-channel images. + :param das: the data arrays (CTZYX, ZYX, or YX), all arrays must have the same shape + :param x: the x-coordinate + :param y: the y-coordinate + :param z: the z-coordinate (initial guess) + :param show: show the plot for debugging + :return: the z-coordinate (optimized)""" + prev_z = z + prev_x, prev_y = x, y + + # fm_image must be 4D np.ndarray with shape (channels, z, y, x) + fm_image = _convert_das_to_numpy_stack(das) + + try: + # getzGauss can fail, so we need to catch the exception + zval, z, _ = multi_channel_get_z_guass(image=fm_image, x=x, y=y, show=show) + logging.info(f"Using Z-Gauss optimisation: {z}, previous z: {prev_z}") + + except RuntimeError as e: + logging.warning(f"Error in z-gauss optimisation: {e}, using previous z: {prev_z}") + z = prev_z + x, y = prev_x, prev_y + + return z + +def run_tdct_correlation(fib_coords: numpy.ndarray, + fm_coords: numpy.ndarray, + poi_coords: numpy.ndarray, + fib_image: model.DataArray, + fm_image: model.DataArray, + path: str) -> Dict[str, Any]: + """Run 3DCT Multi-point correlation between FIB and FM images. + :param fib_coords: the FIB coordinates (n, (x, y)) (in pixels, origin at top left) + :param fm_coords: the FM coordinates (n, (x, y, z)) (in pixels, origin at top left) + :param poi_coords: the point of interest coordinates (1, (x, y, z)). Expects only one point of interest. + :param fib_image: the FIB image (YX) + :param fm_image: the FM image (CTZTX, CZYX or ZYX) + :param path: the path to save the results + :return: the correlation results + output: + error: + delta_2d: reprojection error between 3D and 2D coordinates + reprojected_3d: 3D coordinates reprojected to 2D + mean_absolute_error: mean absolute error of the transformation (x, y) + rms_error: root mean square error of the transformation + poi: list of transformed point of interest coordinates + image_px: coordinates in image pixels (0, 0 top left) + px: coordinates in microscope image pixels (0, 0 image center) + px_um: coordinates in microscope image meters (0, 0 image center) + transformation: + rotation_eulers: transformation rotation (eulers) + rotation_quaternion: transformation rotation (quaternion) + scale: transformation scale + translation_around_rotation_center: transformation translation + """ + + # fib coordinates need to be x, y, z for 3DCT + if fib_coords.shape[-1] == 2: + fib_coords = numpy.column_stack((fib_coords, numpy.zeros(fib_coords.shape[0]))) + + # coordinates need to be float32 for 3DCT + fib_coords = fib_coords.astype(numpy.float32) + fm_coords = fm_coords.astype(numpy.float32) + + # get first channel only, assume all channels are the same shape + if fm_image.ndim == 4: + fm_image = fm_image[0, :, :, :] + if fm_image.ndim == 5: + fm_image = fm_image[0, 0, :, :, :] + + # get rotation center + halfmax_dim = int(max(fm_image.shape) * 0.5) + rotation_center = (halfmax_dim, halfmax_dim, halfmax_dim) + + # get fib pixel size (meters) + fib_pixel_size = fib_image.metadata[model.MD_PIXEL_SIZE][0] + + # fib image shape minus metadata, fib_pixelsize (microns), fm_image_shape + image_props = [fib_image.shape, fib_pixel_size * 1e6, fm_image.shape] + + assert fm_coords.dtype == numpy.float32, "FM coordinates must be float32" + assert fib_coords.dtype == numpy.float32, "FIB coordinates must be float32" + assert fm_coords.shape[-1] == 3, "FM coordinates must be 3D (x, y, z)" + assert fib_coords.shape[-1] == 3, "FIB coordinates must be 3D (x, y, z)" + assert fib_coords.shape == fm_coords.shape, "FIB and FM coordinates must have the same shape" + assert fib_image.ndim == 2, "FIB Image must be 2D" + assert fm_image.ndim == 3, "FM Image must be 3D" + assert fib_pixel_size is not None, "FIB Pixel Size must be set" + + logging.debug( + f"Running 3DCT correlation with FIB image shape: {fib_image.shape}, FM image shape: {fm_image.shape}" + ) + + # run correlation + correlation_results = run_correlation( + fib_coords=fib_coords, + fm_coords=fm_coords, + poi_coords=poi_coords, + image_props=image_props, + rotation_center=rotation_center, + path=path, + ) + + return correlation_results + +def get_reprojected_poi_coordinate(correlation_results: dict) -> Tuple[float, float]: + """Get the the point of interest coordinate from correlation data + and convert from micrometers to meters in the microscope image coordinate system. + The coordinate is centred at the image centre (x+ -> right, y+ -> up). + :param correlation_results: the correlation results + :return: the point of interest coordinate in meters + """ + # get the point of interest coordinate (in microscope coordinates, in metres) + poi_coord = correlation_results["output"]["poi"][0]["px_um"] + poi_coord = (poi_coord[0] * 1e-6, poi_coord[1] * 1e-6) + return poi_coord + +def parse_3dct_yaml_file(path: str) -> Tuple[float, float]: + """Parse the 3DCT yaml file and extract the point of interest (POI) + in microscope image coordinates (um). Convert the coordinates to metres. + Note: only the first POI is extracted. + :param path: path to the 3DCT yaml file. + :return: The point of interest in microscope image coordinates (metres, centred at the image centre). + """ + with open(path, "r") as f: + data = yaml.safe_load(f) + + pt = get_reprojected_poi_coordinate(data["correlation"]) + + return pt diff --git a/src/odemis/acq/align/test/tdct_test.py b/src/odemis/acq/align/test/tdct_test.py new file mode 100644 index 0000000000..97ddabb716 --- /dev/null +++ b/src/odemis/acq/align/test/tdct_test.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +""" +Created on 12 Feb 2025 + +Copyright © Delmic + +This file is part of Odemis. + +Odemis is free software: you can redistribute it and/or modify it under the terms +of the GNU General Public License version 2 as published by the Free Software +Foundation. + +Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +Odemis. If not, see http://www.gnu.org/licenses/. +""" +import os +import unittest + +import numpy +from odemis import model +from odemis.acq.align import tdct + +RESULTS_PATH = os.path.join(os.getcwd(), "correlation_data.yaml") + +class TestTDCT(unittest.TestCase): + + def tearDown(self): + if os.path.exists(RESULTS_PATH): + os.remove(RESULTS_PATH) + + def test_convert_das_to_numpy_stack(self): + """Test the conversion of DataArrays to numpy stack""" + nc, nz, ny, nx = 3, 10, 1000, 1000 + data_2d = numpy.random.random((ny, nx)) + data_3d = numpy.random.random((nz, ny, nx)) + data_5d = numpy.random.random((1, 1, nz, ny, nx)) + + # Test 2D input + da_2d = model.DataArray(data_2d) + result_2d = tdct._convert_das_to_numpy_stack([da_2d]) + self.assertEqual(result_2d.shape, (1, 1, ny, nx)) + self.assertEqual(result_2d.ndim, 4) + + # Test 3D input + da_3d = model.DataArray(data_3d) + result_3d = tdct._convert_das_to_numpy_stack([da_3d]) + self.assertEqual(result_3d.shape, (1, nz, ny, nx)) + self.assertEqual(result_3d.ndim, 4) + + # Test 5D input + da_5d = model.DataArray(data_5d) + result_5d = tdct._convert_das_to_numpy_stack([da_5d]) + self.assertEqual(result_5d.shape, (1, nz, ny, nx)) + self.assertEqual(result_5d.ndim, 4) + + # Test multiple channels + result_multi_3d = tdct._convert_das_to_numpy_stack([da_3d, da_3d, da_3d]) + self.assertEqual(result_multi_3d.shape, (nc, nz, ny, nx)) + self.assertEqual(result_multi_3d.ndim, 4) + + result_multi_5d = tdct._convert_das_to_numpy_stack([da_5d, da_5d, da_5d]) + self.assertEqual(result_multi_5d.shape, (nc, nz, ny, nx)) + self.assertEqual(result_multi_5d.ndim, 4) + + # Test invalid dimensions + data_1d = numpy.random.random(nx) + da_1d = model.DataArray(data_1d) + with self.assertRaises(AssertionError): + tdct._convert_das_to_numpy_stack([da_1d]) + + def test_run_tdct_correlation(self): + """Run the TDCT correlation and validate the output""" + fib_coords = numpy.array( + [[100, 100], + [900, 100], + [900, 900], + [100, 900]], dtype=numpy.float32) + fm_coords = numpy.array( + [[100, 100, 3], + [1000, 100, 4], + [900, 1000, 8], + [100, 1000, 9]], dtype=numpy.float32) + poi_coords = numpy.array([[500, 500, 5]], dtype=numpy.float32) + fib_image = model.DataArray(numpy.zeros(shape=(1024, 1536)), + metadata={ + model.MD_PIXEL_SIZE: (100e-9, 100e-9)}) + fm_image = numpy.zeros(shape=(10, 1024, 1024)) + path = os.getcwd() + + # run the correlation + ret = tdct.run_tdct_correlation(fib_coords=fib_coords, + fm_coords=fm_coords, + poi_coords=poi_coords, + fib_image=fib_image, + fm_image=fm_image, + path=path) + + self.assertTrue(isinstance(ret, dict)) + self.assertTrue(RESULTS_PATH) + + # extract the poi coordinate from the correlation results + poi = tdct.get_poi_coordinate(ret) + + # check the poi coordinate match the expected value + poi_um = ret["output"]["poi"][0]["px_um"] + + self.assertEqual(len(poi), 2) + self.assertAlmostEqual(poi[0], poi_um[0] * 1e-6) + self.assertAlmostEqual(poi[1], poi_um[1] * 1e-6) diff --git a/src/odemis/acq/drift/__init__.py b/src/odemis/acq/drift/__init__.py index f6db843e3a..52a5236026 100644 --- a/src/odemis/acq/drift/__init__.py +++ b/src/odemis/acq/drift/__init__.py @@ -364,7 +364,10 @@ def align_reference_image( if (ref_image.ndim != 2 or new_image.ndim != 2 or ref_image.shape != new_image.shape): raise ValueError(f"Only equally sized 2D images are supported for alignment. {ref_image.shape}, {new_image.shape}") - shift_px = MeasureShift(ref_image, new_image, 10) + if ref_image.metadata[model.MD_PIXEL_SIZE] != new_image.metadata[model.MD_PIXEL_SIZE]: + raise ValueError("The images must have the same pixel size.") + + shift_px = MeasureShift(ref_image, new_image, 2) pixelsize = ref_image.metadata[model.MD_PIXEL_SIZE] shift_m = (shift_px[0] * pixelsize[0], shift_px[1] * pixelsize[1]) diff --git a/src/odemis/acq/milling/millmng.py b/src/odemis/acq/milling/millmng.py index 28bb530717..92dfb9966e 100644 --- a/src/odemis/acq/milling/millmng.py +++ b/src/odemis/acq/milling/millmng.py @@ -46,30 +46,20 @@ from odemis.util import executeAsyncTask -class MillingTaskManager: +class TFSMillingTaskManager: """This class manages running milling tasks.""" - def __init__(self, future: Future, tasks: List[MillingTaskSettings]): + def __init__(self, future: Future, tasks: List[MillingTaskSettings], fib_stream: FIBStream): """ :param future: the future that will be executing the task :param tasks: The milling tasks to run (in order) """ - self.microscope = model.getComponent(role="fibsem") + self.fibsem = model.getComponent(role="fibsem") self.tasks = tasks - # for reference image alignment, - self.ion_beam = model.getComponent(role="ion-beam") - self.ion_det = model.getComponent(role="se-detector-ion") - self.ion_focus = model.getComponent(role="ion-focus") - - self.fib_stream = FIBStream( - name="FIB", - detector=self.ion_det, - dataflow=self.ion_det.data, - emitter=self.ion_beam, - focuser=self.ion_focus, - ) + # for reference image alignment + self.fib_stream = fib_stream self._future = future if future is not None: @@ -89,7 +79,7 @@ def cancel(self, future: Future) -> bool: return False future._task_state = CANCELLED future.running_subf.cancel() - self.microscope.stop_milling() + self.fibsem.stop_milling() logging.debug("Milling procedure cancelled.") return True @@ -98,11 +88,10 @@ def estimate_milling_time(self) -> float: Estimates the milling time for the given patterns. :return: (float > 0): the estimated time is in seconds """ - return self.microscope.estimate_milling_time() + return self.fibsem.estimate_milling_time() def run_milling(self, settings: MillingTaskSettings): """Run the milling task with the given settings. ThermoFisher implementation""" - microscope = self.microscope # get the milling settings milling_current = settings.milling.current.value @@ -113,12 +102,10 @@ def run_milling(self, settings: MillingTaskSettings): align_at_milling_current = settings.milling.align.value # get initial imaging settings - imaging_current = microscope.get_beam_current(milling_channel) - imaging_voltage = microscope.get_high_voltage(milling_channel) - imaging_fov = microscope.get_field_of_view(milling_channel) + imaging_current = self.fibsem.get_beam_current(milling_channel) + imaging_voltage = self.fibsem.get_high_voltage(milling_channel) + imaging_fov = self.fibsem.get_field_of_view(milling_channel) - # error management - e, ce = False, False try: # acquire a reference image at the imaging settings @@ -128,12 +115,12 @@ def run_milling(self, settings: MillingTaskSettings): ref_image = data[0] # set the milling state - microscope.clear_patterns() - microscope.set_default_patterning_beam_type(milling_channel) - microscope.set_high_voltage(milling_voltage, milling_channel) - microscope.set_beam_current(milling_current, milling_channel) - # microscope.set_field_of_view(milling_fov, milling_channel) # tmp: disable until matched in gui - microscope.set_patterning_mode(milling_mode) + self.fibsem.clear_patterns() + self.fibsem.set_default_patterning_beam_type(milling_channel) + self.fibsem.set_high_voltage(milling_voltage, milling_channel) + self.fibsem.set_beam_current(milling_current, milling_channel) + # self.fibsem.set_field_of_view(milling_fov, milling_channel) # tmp: disable until matched in gui + self.fibsem.set_patterning_mode(milling_mode) # acquire a new image at the milling settings and align if align_at_milling_current: @@ -145,49 +132,42 @@ def run_milling(self, settings: MillingTaskSettings): # draw milling patterns to microscope for pattern in settings.generate(): if isinstance(pattern, RectanglePatternParameters): - microscope.create_rectangle(pattern.to_json()) + self.fibsem.create_rectangle(pattern.to_dict()) else: raise NotImplementedError(f"Pattern {pattern} not supported") # TODO: support other patterns # estimate the milling time - estimated_time = microscope.estimate_milling_time() + estimated_time = self.fibsem.estimate_milling_time() self._future.set_end_time(time.time() + estimated_time) # start patterning (async) - microscope.start_milling() + self.fibsem.start_milling() # wait for milling to finish elapsed_time = 0 wait_time = 5 - while microscope.get_patterning_state() == "Running": + while self.fibsem.get_patterning_state() == "Running": with self._future._task_lock: if self._future.cancelled() == CANCELLED: raise CancelledError() - logging.info(f"Milling in progress... {elapsed_time} / {estimated_time}") + logging.debug(f"Milling in progress... elapsed time: {elapsed_time} s, estimated time: {estimated_time} s") time.sleep(wait_time) elapsed_time += wait_time except CancelledError as ce: - logging.info(f"Cancelled milling: {ce}") + logging.debug(f"Cancelled milling: {ce}") + raise except Exception as e: - logging.error(f"Error while milling: {e}") + logging.exception(f"Error while milling: {e}") + raise finally: # restore imaging state - microscope.set_beam_current(imaging_current, milling_channel) - microscope.set_high_voltage(imaging_voltage, milling_channel) - microscope.set_field_of_view(imaging_fov, milling_channel) - #microscope.set_channel(milling_channel) # TODO: expose on server - microscope.set_active_view(2) - microscope.clear_patterns() - - # we defer raising exceptions until we have restored the imaging state - # to avoid returning the microscope to user control at the milling current - if e: - raise e - if ce: - raise ce # future expects error to be raised + self.fibsem.set_beam_current(imaging_current, milling_channel) + self.fibsem.set_high_voltage(imaging_voltage, milling_channel) + self.fibsem.set_field_of_view(imaging_fov, milling_channel) + self.fibsem.clear_patterns() return def run(self): @@ -197,13 +177,13 @@ def run(self): self._future._task_state = RUNNING try: - for i, task in enumerate(self.tasks, 1): + for task in self.tasks: with self._future._task_lock: if self._future._task_state == CANCELLED: raise CancelledError() - logging.info(f"Running milling task {i}/{len(self.tasks)}: {task.name}") + logging.debug(f"Running milling task: {task.name}") self.run_milling(task) logging.debug("The milling completed") @@ -212,14 +192,14 @@ def run(self): logging.debug("Stopping because milling was cancelled") raise except Exception: - logging.exception("The milling failed") + logging.warning("The milling failed") raise finally: self._future._task_state = FINISHED # TODO: replace with run_milling_tasks_openfibsem -def run_milling_tasks(tasks: List[MillingTaskSettings]) -> Future: +def run_milling_tasks(tasks: List[MillingTaskSettings], fib_stream: FIBStream) -> Future: """ Run multiple milling tasks in order. :param tasks: List of milling tasks to be executed in order. @@ -228,7 +208,7 @@ def run_milling_tasks(tasks: List[MillingTaskSettings]) -> Future: # Create a progressive future with running sub future future = model.ProgressiveFuture() # create acquisition task - milling_task_manager = MillingTaskManager(future, tasks) + milling_task_manager = TFSMillingTaskManager(future, tasks, fib_stream) # add the ability of cancelling the future during execution future.task_canceller = milling_task_manager.cancel diff --git a/src/odemis/acq/milling/patterns.py b/src/odemis/acq/milling/patterns.py index 5079e3e32c..26c8526261 100644 --- a/src/odemis/acq/milling/patterns.py +++ b/src/odemis/acq/milling/patterns.py @@ -25,10 +25,10 @@ import math from abc import ABC, abstractmethod +from typing import List from odemis import model - class MillingPatternParameters(ABC): """Represents milling pattern parameters""" @@ -36,19 +36,19 @@ def __init__(self, name: str): self.name = model.StringVA(name) @abstractmethod - def to_json(self) -> dict: + def to_dict(self) -> dict: pass @staticmethod @abstractmethod - def from_json(data: dict): + def from_dict(data: dict): pass def __repr__(self): - return f"{self.to_json()}" + return f"{self.to_dict()}" @abstractmethod - def generate(self): + def generate(self) -> List['MillingPatternParameters']: """generate the milling pattern for the microscope""" pass @@ -65,7 +65,7 @@ def __init__(self, width: float, height: float, depth: float, rotation: float = self.center = model.TupleContinuous(center, unit="m", range=((-1e3, -1e3), (1e3, 1e3)), cls=(int, float)) self.scan_direction = model.StringEnumerated(scan_direction, choices=set(["TopToBottom", "BottomToTop", "LeftToRight", "RightToLeft"])) - def to_json(self) -> dict: + def to_dict(self) -> dict: """Convert the parameters to a json object""" return {"name": self.name.value, "width": self.width.value, @@ -79,7 +79,7 @@ def to_json(self) -> dict: } @staticmethod - def from_json(data: dict): + def from_dict(data: dict) -> 'RectanglePatternParameters': """Create a RectanglePatternParameters object from a json object""" return RectanglePatternParameters(width=data["width"], height=data["height"], @@ -89,12 +89,12 @@ def from_json(data: dict): scan_direction=data.get("scan_direction", "TopToBottom"), name=data.get("name", "Rectangle")) - def __repr__(self): - return f"{self.to_json()}" + def __repr__(self) -> str: + return f"{self.to_dict()}" - def generate(self): - """Generate a list of milling patterns for the microscope. - Note: the rectangle is a pattern that is always generated as a single pattern""" + def generate(self) -> List[MillingPatternParameters]: + """Generate a list of milling shapes for the microscope. + Note: the rectangle is a pattern that is always generated as a single shape""" return [self] class TrenchPatternParameters(MillingPatternParameters): @@ -108,7 +108,7 @@ def __init__(self, width: float, height: float, depth: float, spacing: float, ce self.spacing = model.FloatContinuous(spacing, unit="m", range=(1e-9, 900e-6)) self.center = model.TupleContinuous(center, unit="m", range=((-1e3, -1e3), (1e3, 1e3)), cls=(int, float)) - def to_json(self) -> dict: + def to_dict(self) -> dict: """Convert the parameters to a json object""" return {"name": self.name.value, "width": self.width.value, @@ -121,7 +121,7 @@ def to_json(self) -> dict: } @staticmethod - def from_json(data: dict): + def from_dict(data: dict) -> 'TrenchPatternParameters': """Create a TrenchPatternParameters object from a json object""" return TrenchPatternParameters(width=data["width"], height=data["height"], @@ -130,11 +130,11 @@ def from_json(data: dict): center=(data.get("center_x", 0), data.get("center_y", 0)), name=data.get("name", "Trench")) - def __repr__(self): - return f"{self.to_json()}" + def __repr__(self) -> str: + return f"{self.to_dict()}" - def generate(self): - """Generate a list of milling patterns for the microscope""" + def generate(self) -> List[MillingPatternParameters]: + """Generate a list of milling shapes for the microscope""" name = self.name.value width = self.width.value height = self.height.value @@ -182,7 +182,7 @@ def __init__(self, width: float, height: float, depth: float, spacing: float, ce self.spacing = model.FloatContinuous(spacing, unit="m", range=(1e-9, 900e-6)) self.center = model.TupleContinuous(center, unit="m", range=((-1e3, -1e3), (1e3, 1e3)), cls=(int, float)) - def to_json(self) -> dict: + def to_dict(self) -> dict: """Convert the parameters to a json object""" return {"name": self.name.value, "width": self.width.value, @@ -195,7 +195,7 @@ def to_json(self) -> dict: } @staticmethod - def from_json(data: dict): + def from_dict(data: dict) -> 'MicroexpansionPatternParameters': """Create a MicroexpansionPatternParameters object from a json object""" return MicroexpansionPatternParameters( width=data["width"], @@ -205,11 +205,11 @@ def from_json(data: dict): center=(data.get("center_x", 0), data.get("center_y", 0)), name=data.get("name", "Microexpansion")) - def __repr__(self): - return f"{self.to_json()}" + def __repr__(self) -> str: + return f"{self.to_dict()}" - def generate(self): - """Generate a list of milling patterns for the microscope""" + def generate(self) -> List[MillingPatternParameters]: + """Generate a list of milling shapes for the microscope""" name = self.name.value width = self.width.value height = self.height.value diff --git a/src/odemis/acq/milling/tasks.py b/src/odemis/acq/milling/tasks.py index 67e20f2ac9..9aad77734f 100644 --- a/src/odemis/acq/milling/tasks.py +++ b/src/odemis/acq/milling/tasks.py @@ -23,7 +23,6 @@ """ -import os from typing import Dict, List import yaml @@ -45,7 +44,7 @@ def __init__(self, current: float, voltage: float, field_of_view: float, mode: s self.channel = model.StringEnumerated(channel, choices=set(["ion"])) self.align = model.BooleanVA(align) # align at the milling current - def to_json(self) -> dict: + def to_dict(self) -> dict: return {"current": self.current.value, "voltage": self.voltage.value, "field_of_view": self.field_of_view.value, @@ -54,7 +53,7 @@ def to_json(self) -> dict: "align": self.align.value} @staticmethod - def from_json(data: dict) -> "MillingSettings": + def from_dict(data: dict) -> "MillingSettings": return MillingSettings(current=data["current"], voltage=data["voltage"], field_of_view=data["field_of_view"], @@ -64,57 +63,70 @@ def from_json(data: dict) -> "MillingSettings": ) def __repr__(self): - return f"{self.to_json()}" + return f"{self.to_dict()}" class MillingTaskSettings: + """Represents a milling tasks, which consists of a set of patterns and settings""" milling: MillingSettings patterns: List[MillingPatternParameters] - def __init__(self, milling: dict, patterns: List[MillingPatternParameters], name: str = "Milling Task"): + def __init__(self, milling: dict, patterns: List[MillingPatternParameters], name: str): self.name = name self.milling = milling self.patterns = patterns - def to_json(self) -> dict: - return {"name": self.name, "milling": self.milling.to_json(), "patterns": [pattern.to_json() for pattern in self.patterns]} + def to_dict(self) -> dict: + """Convert the parameters to a dictionary + :return: dictionary containing the milling task settings + """ + return {"name": self.name, + "milling": self.milling.to_dict(), + "patterns": [pattern.to_dict() for pattern in self.patterns]} @staticmethod - def from_json(data: dict): + def from_dict(data: dict) -> "MillingTaskSettings": + """Create a MillingTaskSettings object from a dictionary + :param data: dictionary containing the milling task settings + :return: MillingTaskSettings""" return MillingTaskSettings( name=data.get("name", "Milling Task"), - milling=MillingSettings.from_json(data["milling"]), - patterns=[pattern_generator[p["pattern"]].from_json(p) for p in data["patterns"]]) + milling=MillingSettings.from_dict(data["milling"]), + patterns=[pattern_generator[p["pattern"]].from_dict(p) for p in data["patterns"]]) def __repr__(self): - return f"{self.to_json()}" + return f"{self.to_dict()}" - def generate(self): - """Generate a list of milling patterns for the microscope""" + def generate(self) -> List[MillingPatternParameters]: + """Generate the list of invidual shapes that can be drawn on the microscope from the high-level patterns. + :return: list of individual shapes to be drawn on the microscope + """ patterns = [] for pattern in self.patterns: patterns.extend(pattern.generate()) return patterns -def save_milling_tasks(path: str, milling_tasks: Dict[str, MillingTaskSettings]): - with open(os.path.join(path, "milling_tasks.yaml"), "w") as f: - yaml.dump(milling_tasks.to_json(), f) - -def load_yaml(path: str): +def save_milling_tasks(path: str, milling_tasks: Dict[str, MillingTaskSettings]) -> None: + """Save milling tasks to a yaml file. + :param path: path to the yaml file + :param milling_tasks: dictionary of milling tasks + :return: None + """ + mdict = {k: v.to_dict() for k, v in milling_tasks.items()} + with open(path, "w") as f: + yaml.dump(mdict, f) + +def load_milling_tasks(path: str) -> Dict[str, MillingTaskSettings]: + """Load milling tasks from a yaml file. + :param path: path to the yaml file + :return: dictionary of milling tasks + """ + milling_tasks = {} with open(path, "r") as f: yaml_file = yaml.safe_load(f) - return yaml_file - -def load_milling_tasks(path: str, task_list: List[str] = None) -> Dict[str, MillingTaskSettings]: - milling_tasks = {} - task_file = load_yaml(path) - - if task_list is None: - task_list = task_file.keys() + # convert the dictionary to Dict[str, MillingTaskSettings] + milling_tasks = {k: MillingTaskSettings.from_dict(v) for k, v in yaml_file.items()} - for task_name in task_list: - task = MillingTaskSettings.from_json(task_file[task_name]) - milling_tasks[task_name] = task return milling_tasks diff --git a/src/odemis/acq/milling/test/patterns_test.py b/src/odemis/acq/milling/test/patterns_test.py index fe4850978c..67f6990da6 100644 --- a/src/odemis/acq/milling/test/patterns_test.py +++ b/src/odemis/acq/milling/test/patterns_test.py @@ -19,7 +19,6 @@ """ import logging import unittest -import json import numpy from odemis.acq.milling.patterns import RectanglePatternParameters, TrenchPatternParameters, MicroexpansionPatternParameters @@ -48,7 +47,6 @@ def setUp(self): ) def test_assignment(self): - # test assignment self.assertEqual(self.pattern.name.value, self.name) self.assertEqual(self.pattern.width.value, self.width) @@ -58,28 +56,28 @@ def test_assignment(self): self.assertEqual(self.pattern.center.value, self.center) self.assertEqual(self.pattern.scan_direction.value, self.scan_direction) - def test_json(self): - # test to_json - rectangle_pattern_json = self.pattern.to_json() - self.assertEqual(rectangle_pattern_json["name"], self.name) - self.assertEqual(rectangle_pattern_json["width"], self.width) - self.assertEqual(rectangle_pattern_json["height"], self.height) - self.assertEqual(rectangle_pattern_json["depth"], self.depth) - self.assertEqual(rectangle_pattern_json["rotation"], self.rotation) - self.assertEqual(rectangle_pattern_json["center_x"], 0) - self.assertEqual(rectangle_pattern_json["center_y"], 0) - self.assertEqual(rectangle_pattern_json["scan_direction"], self.scan_direction) - self.assertEqual(rectangle_pattern_json["pattern"], "rectangle") - - # test from_json - rectangle_pattern_from_json = RectanglePatternParameters.from_json(rectangle_pattern_json) - self.assertEqual(rectangle_pattern_from_json.name.value, self.name) - self.assertEqual(rectangle_pattern_from_json.width.value, self.width) - self.assertEqual(rectangle_pattern_from_json.height.value, self.height) - self.assertEqual(rectangle_pattern_from_json.depth.value, self.depth) - self.assertEqual(rectangle_pattern_from_json.rotation.value, self.rotation) - self.assertEqual(rectangle_pattern_from_json.center.value, self.center) - self.assertEqual(rectangle_pattern_from_json.scan_direction.value, self.scan_direction) + def test_dict(self): + # test to_dict + rectangle_pattern_dict = self.pattern.to_dict() + self.assertEqual(rectangle_pattern_dict["name"], self.name) + self.assertEqual(rectangle_pattern_dict["width"], self.width) + self.assertEqual(rectangle_pattern_dict["height"], self.height) + self.assertEqual(rectangle_pattern_dict["depth"], self.depth) + self.assertEqual(rectangle_pattern_dict["rotation"], self.rotation) + self.assertEqual(rectangle_pattern_dict["center_x"], 0) + self.assertEqual(rectangle_pattern_dict["center_y"], 0) + self.assertEqual(rectangle_pattern_dict["scan_direction"], self.scan_direction) + self.assertEqual(rectangle_pattern_dict["pattern"], "rectangle") + + # test from_dict + rectangle_pattern_from_dict = RectanglePatternParameters.from_dict(rectangle_pattern_dict) + self.assertEqual(rectangle_pattern_from_dict.name.value, self.name) + self.assertEqual(rectangle_pattern_from_dict.width.value, self.width) + self.assertEqual(rectangle_pattern_from_dict.height.value, self.height) + self.assertEqual(rectangle_pattern_from_dict.depth.value, self.depth) + self.assertEqual(rectangle_pattern_from_dict.rotation.value, self.rotation) + self.assertEqual(rectangle_pattern_from_dict.center.value, self.center) + self.assertEqual(rectangle_pattern_from_dict.scan_direction.value, self.scan_direction) def test_generate(self): # test generate @@ -107,7 +105,6 @@ def setUp(self): ) def test_assignment(self): - # test assignment self.assertEqual(self.pattern.name.value, self.name) self.assertEqual(self.pattern.width.value, self.width) @@ -116,26 +113,26 @@ def test_assignment(self): self.assertEqual(self.pattern.spacing.value, self.spacing) self.assertEqual(self.pattern.center.value, self.center) - def test_json(self): - # test to_json - trench_pattern_json = self.pattern.to_json() - self.assertEqual(trench_pattern_json["name"], self.name) - self.assertEqual(trench_pattern_json["width"], self.width) - self.assertEqual(trench_pattern_json["height"], self.height) - self.assertEqual(trench_pattern_json["depth"], self.depth) - self.assertEqual(trench_pattern_json["spacing"], self.spacing) - self.assertEqual(trench_pattern_json["center_x"], 0) - self.assertEqual(trench_pattern_json["center_y"], 0) - self.assertEqual(trench_pattern_json["pattern"], "trench") - - # test from_json - trench_pattern_from_json = TrenchPatternParameters.from_json(trench_pattern_json) - self.assertEqual(trench_pattern_from_json.name.value, self.name) - self.assertEqual(trench_pattern_from_json.width.value, self.width) - self.assertEqual(trench_pattern_from_json.height.value, self.height) - self.assertEqual(trench_pattern_from_json.depth.value, self.depth) - self.assertEqual(trench_pattern_from_json.spacing.value, self.spacing) - self.assertEqual(trench_pattern_from_json.center.value, self.center) + def test_dict(self): + # test to_dict + trench_pattern_dict = self.pattern.to_dict() + self.assertEqual(trench_pattern_dict["name"], self.name) + self.assertEqual(trench_pattern_dict["width"], self.width) + self.assertEqual(trench_pattern_dict["height"], self.height) + self.assertEqual(trench_pattern_dict["depth"], self.depth) + self.assertEqual(trench_pattern_dict["spacing"], self.spacing) + self.assertEqual(trench_pattern_dict["center_x"], 0) + self.assertEqual(trench_pattern_dict["center_y"], 0) + self.assertEqual(trench_pattern_dict["pattern"], "trench") + + # test from_dict + trench_pattern_from_dict = TrenchPatternParameters.from_dict(trench_pattern_dict) + self.assertEqual(trench_pattern_from_dict.name.value, self.name) + self.assertEqual(trench_pattern_from_dict.width.value, self.width) + self.assertEqual(trench_pattern_from_dict.height.value, self.height) + self.assertEqual(trench_pattern_from_dict.depth.value, self.depth) + self.assertEqual(trench_pattern_from_dict.spacing.value, self.spacing) + self.assertEqual(trench_pattern_from_dict.center.value, self.center) def test_generate(self): # test generate @@ -179,7 +176,6 @@ def setUp(self): ) def test_assignment(self): - # test assignment self.assertEqual(self.pattern.name.value, self.name) self.assertEqual(self.pattern.width.value, self.width) @@ -188,26 +184,26 @@ def test_assignment(self): self.assertEqual(self.pattern.spacing.value, self.spacing) self.assertEqual(self.pattern.center.value, self.center) - def test_json(self): - # test to_json - microexpansion_pattern_json = self.pattern.to_json() - self.assertEqual(microexpansion_pattern_json["name"], self.name) - self.assertEqual(microexpansion_pattern_json["width"], self.width) - self.assertEqual(microexpansion_pattern_json["height"], self.height) - self.assertEqual(microexpansion_pattern_json["depth"], self.depth) - self.assertEqual(microexpansion_pattern_json["spacing"], self.spacing) - self.assertEqual(microexpansion_pattern_json["center_x"], 0) - self.assertEqual(microexpansion_pattern_json["center_y"], 0) - self.assertEqual(microexpansion_pattern_json["pattern"], "microexpansion") - - # test from_json - microexpansion_pattern_from_json = MicroexpansionPatternParameters.from_json(microexpansion_pattern_json) - self.assertEqual(microexpansion_pattern_from_json.name.value, self.name) - self.assertEqual(microexpansion_pattern_from_json.width.value, self.width) - self.assertEqual(microexpansion_pattern_from_json.height.value, self.height) - self.assertEqual(microexpansion_pattern_from_json.depth.value, self.depth) - self.assertEqual(microexpansion_pattern_from_json.spacing.value, self.spacing) - self.assertEqual(microexpansion_pattern_from_json.center.value, self.center) + def test_dict(self): + # test to_dict + microexpansion_pattern_dict = self.pattern.to_dict() + self.assertEqual(microexpansion_pattern_dict["name"], self.name) + self.assertEqual(microexpansion_pattern_dict["width"], self.width) + self.assertEqual(microexpansion_pattern_dict["height"], self.height) + self.assertEqual(microexpansion_pattern_dict["depth"], self.depth) + self.assertEqual(microexpansion_pattern_dict["spacing"], self.spacing) + self.assertEqual(microexpansion_pattern_dict["center_x"], 0) + self.assertEqual(microexpansion_pattern_dict["center_y"], 0) + self.assertEqual(microexpansion_pattern_dict["pattern"], "microexpansion") + + # test from_dict + microexpansion_pattern_from_dict = MicroexpansionPatternParameters.from_dict(microexpansion_pattern_dict) + self.assertEqual(microexpansion_pattern_from_dict.name.value, self.name) + self.assertEqual(microexpansion_pattern_from_dict.width.value, self.width) + self.assertEqual(microexpansion_pattern_from_dict.height.value, self.height) + self.assertEqual(microexpansion_pattern_from_dict.depth.value, self.depth) + self.assertEqual(microexpansion_pattern_from_dict.spacing.value, self.spacing) + self.assertEqual(microexpansion_pattern_from_dict.center.value, self.center) def test_generate(self): # test generate diff --git a/src/odemis/acq/milling/test/tasks_test.py b/src/odemis/acq/milling/test/tasks_test.py index 5a7ef937cb..7cf81667b3 100644 --- a/src/odemis/acq/milling/test/tasks_test.py +++ b/src/odemis/acq/milling/test/tasks_test.py @@ -17,14 +17,17 @@ You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/. """ +import os import logging import unittest -from odemis.acq.milling.patterns import TrenchPatternParameters -from odemis.acq.milling.tasks import MillingTaskSettings, MillingSettings +from odemis.acq.milling.patterns import TrenchPatternParameters, MicroexpansionPatternParameters +from odemis.acq.milling.tasks import MillingTaskSettings, MillingSettings, load_milling_tasks, save_milling_tasks logging.basicConfig(format="%(asctime)s %(levelname)-7s %(module)-15s: %(message)s") logging.getLogger().setLevel(logging.DEBUG) +TASKS_PATH = os.path.join(os.getcwd(), "milling_tasks.yaml") + class MillingTaskTestCase(unittest.TestCase): @classmethod @@ -33,13 +36,13 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - pass + if os.path.exists(TASKS_PATH): + os.remove(TASKS_PATH) def setUp(self): pass def test_milling_settings(self): - current = 100e-9 voltage = 30e3 field_of_view = 400e-6 @@ -53,26 +56,25 @@ def test_milling_settings(self): self.assertEqual(milling_settings.mode.value, mode) self.assertEqual(milling_settings.channel.value, channel) - json_data = milling_settings.to_json() - self.assertEqual(json_data["current"], current) - self.assertEqual(json_data["voltage"], voltage) - self.assertEqual(json_data["field_of_view"], field_of_view) - self.assertEqual(json_data["mode"], mode) - self.assertEqual(json_data["channel"], channel) + dict_data = milling_settings.to_dict() + self.assertEqual(dict_data["current"], current) + self.assertEqual(dict_data["voltage"], voltage) + self.assertEqual(dict_data["field_of_view"], field_of_view) + self.assertEqual(dict_data["mode"], mode) + self.assertEqual(dict_data["channel"], channel) - milling_settings_from_json = MillingSettings.from_json(json_data) - self.assertEqual(milling_settings_from_json.current.value, current) - self.assertEqual(milling_settings_from_json.voltage.value, voltage) - self.assertEqual(milling_settings_from_json.field_of_view.value, field_of_view) - self.assertEqual(milling_settings_from_json.mode.value, mode) - self.assertEqual(milling_settings_from_json.channel.value, channel) + milling_settings_from_dict = MillingSettings.from_dict(dict_data) + self.assertEqual(milling_settings_from_dict.current.value, current) + self.assertEqual(milling_settings_from_dict.voltage.value, voltage) + self.assertEqual(milling_settings_from_dict.field_of_view.value, field_of_view) + self.assertEqual(milling_settings_from_dict.mode.value, mode) + self.assertEqual(milling_settings_from_dict.channel.value, channel) def test_milling_task_settings(self): - milling_settings = MillingSettings(100e-9, 30e3, 400e-6, "Serial", "ion") trench_pattern = TrenchPatternParameters(1e-6, 1e-6, 100e-9, 1e-6, (0, 0)) - milling_task_settings = MillingTaskSettings(milling_settings, [trench_pattern]) + milling_task_settings = MillingTaskSettings(milling_settings, [trench_pattern], "Milling Task") self.assertEqual(milling_task_settings.milling.current.value, milling_settings.current.value) self.assertEqual(milling_task_settings.milling.voltage.value, milling_settings.voltage.value) @@ -85,22 +87,63 @@ def test_milling_task_settings(self): self.assertEqual(milling_task_settings.patterns[0].spacing.value, trench_pattern.spacing.value) self.assertEqual(milling_task_settings.patterns[0].center.value, trench_pattern.center.value) - json_data = milling_task_settings.to_json() - self.assertEqual(json_data["name"], "Milling Task") - self.assertEqual(json_data["milling"], milling_settings.to_json()) - self.assertEqual(json_data["patterns"][0], trench_pattern.to_json()) - - milling_task_settings_from_json = MillingTaskSettings.from_json(json_data) - self.assertEqual(milling_task_settings_from_json.milling.current.value, milling_settings.current.value) - self.assertEqual(milling_task_settings_from_json.milling.voltage.value, milling_settings.voltage.value) - self.assertEqual(milling_task_settings_from_json.milling.field_of_view.value, milling_settings.field_of_view.value) - self.assertEqual(milling_task_settings_from_json.milling.mode.value, milling_settings.mode.value) - self.assertEqual(milling_task_settings_from_json.milling.channel.value, milling_settings.channel.value) - self.assertEqual(milling_task_settings_from_json.patterns[0].width.value, trench_pattern.width.value) - self.assertEqual(milling_task_settings_from_json.patterns[0].height.value, trench_pattern.height.value) - self.assertEqual(milling_task_settings_from_json.patterns[0].depth.value, trench_pattern.depth.value) - self.assertEqual(milling_task_settings_from_json.patterns[0].spacing.value, trench_pattern.spacing.value) - self.assertEqual(milling_task_settings_from_json.patterns[0].center.value, trench_pattern.center.value) - - def test_save_load_milling_tasks(self): - pass + dict_data = milling_task_settings.to_dict() + self.assertEqual(dict_data["name"], "Milling Task") + self.assertEqual(dict_data["milling"], milling_settings.to_dict()) + self.assertEqual(dict_data["patterns"][0], trench_pattern.to_dict()) + + milling_task_settings_from_dict = MillingTaskSettings.from_dict(dict_data) + self.assertEqual(milling_task_settings_from_dict.milling.current.value, milling_settings.current.value) + self.assertEqual(milling_task_settings_from_dict.milling.voltage.value, milling_settings.voltage.value) + self.assertEqual(milling_task_settings_from_dict.milling.field_of_view.value, milling_settings.field_of_view.value) + self.assertEqual(milling_task_settings_from_dict.milling.mode.value, milling_settings.mode.value) + self.assertEqual(milling_task_settings_from_dict.milling.channel.value, milling_settings.channel.value) + self.assertEqual(milling_task_settings_from_dict.patterns[0].width.value, trench_pattern.width.value) + self.assertEqual(milling_task_settings_from_dict.patterns[0].height.value, trench_pattern.height.value) + self.assertEqual(milling_task_settings_from_dict.patterns[0].depth.value, trench_pattern.depth.value) + self.assertEqual(milling_task_settings_from_dict.patterns[0].spacing.value, trench_pattern.spacing.value) + self.assertEqual(milling_task_settings_from_dict.patterns[0].center.value, trench_pattern.center.value) + + def test_save_load_task_settings(self): + milling_settings = MillingSettings(100e-9, 30e3, 400e-6, "Serial", "ion") + trench_pattern = TrenchPatternParameters(10e-6, 3e-6, 100e-9, 2e-6, (0, 0)) + trench_task_settings = MillingTaskSettings(milling_settings, [trench_pattern], "Trench") + + milling_settings = MillingSettings(100e-9, 30e3, 400e-6, "Serial", "ion") + microexpansion_pattern = MicroexpansionPatternParameters(1e-6, 10e-6, 100e-9, 10e-6, (0, 0)) + microexpansion_task_settings = MillingTaskSettings(milling_settings, [microexpansion_pattern], "Microexpansion") + + tasks = {"Trench": trench_task_settings, "Microexpansion": microexpansion_task_settings} + + # save and load the tasks + save_milling_tasks(path=TASKS_PATH, milling_tasks=tasks) + loaded_tasks = load_milling_tasks(TASKS_PATH) + + self.assertTrue("Trench" in loaded_tasks) + self.assertTrue("Microexpansion" in loaded_tasks) + + self.assertEqual(loaded_tasks["Trench"].milling.current.value, trench_task_settings.milling.current.value) + self.assertEqual(loaded_tasks["Trench"].milling.voltage.value, trench_task_settings.milling.voltage.value) + self.assertEqual(loaded_tasks["Trench"].milling.field_of_view.value, trench_task_settings.milling.field_of_view.value) + self.assertEqual(loaded_tasks["Trench"].milling.mode.value, trench_task_settings.milling.mode.value) + self.assertEqual(loaded_tasks["Trench"].milling.channel.value, trench_task_settings.milling.channel.value) + self.assertEqual(loaded_tasks["Trench"].patterns[0].width.value, trench_task_settings.patterns[0].width.value) + self.assertEqual(loaded_tasks["Trench"].patterns[0].height.value, trench_task_settings.patterns[0].height.value) + self.assertEqual(loaded_tasks["Trench"].patterns[0].depth.value, trench_task_settings.patterns[0].depth.value) + self.assertEqual(loaded_tasks["Trench"].patterns[0].spacing.value, trench_task_settings.patterns[0].spacing.value) + self.assertEqual(loaded_tasks["Trench"].patterns[0].center.value, trench_task_settings.patterns[0].center.value) + + self.assertEqual(loaded_tasks["Microexpansion"].milling.current.value, microexpansion_task_settings.milling.current.value) + self.assertEqual(loaded_tasks["Microexpansion"].milling.voltage.value, microexpansion_task_settings.milling.voltage.value) + self.assertEqual(loaded_tasks["Microexpansion"].milling.field_of_view.value, microexpansion_task_settings.milling.field_of_view.value) + self.assertEqual(loaded_tasks["Microexpansion"].milling.mode.value, microexpansion_task_settings.milling.mode.value) + self.assertEqual(loaded_tasks["Microexpansion"].milling.channel.value, microexpansion_task_settings.milling.channel.value) + self.assertEqual(loaded_tasks["Microexpansion"].patterns[0].width.value, microexpansion_task_settings.patterns[0].width.value) + self.assertEqual(loaded_tasks["Microexpansion"].patterns[0].height.value, microexpansion_task_settings.patterns[0].height.value) + self.assertEqual(loaded_tasks["Microexpansion"].patterns[0].depth.value, microexpansion_task_settings.patterns[0].depth.value) + self.assertEqual(loaded_tasks["Microexpansion"].patterns[0].spacing.value, microexpansion_task_settings.patterns[0].spacing.value) + self.assertEqual(loaded_tasks["Microexpansion"].patterns[0].center.value, microexpansion_task_settings.patterns[0].center.value) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/odemis/acq/move.py b/src/odemis/acq/move.py index 502d06c0a2..e346c264c5 100644 --- a/src/odemis/acq/move.py +++ b/src/odemis/acq/move.py @@ -39,11 +39,6 @@ from odemis.util.driver import ATOL_ROTATION_POS, isInRange, isNearPosition from odemis.util.transform import RigidTransform, _get_transforms -# feature flags -USE_3D_TRANSFORMS = False -USE_SCAN_ROTATION = False -USE_LINKED_SEM_FOCUS_COMPENSATION = False - MAX_SUBMOVE_DURATION = 90 # s UNKNOWN, LOADING, IMAGING, ALIGNMENT, COATING, LOADING_PATH, MILLING, SEM_IMAGING, \ @@ -75,6 +70,8 @@ ATOL_ROTATION_TRANSFORM = 0.04 # rad ~2.5 deg ATOL_LINEAR_TRANSFORM = 5e-6 # 5 um +# roles that are affected by sample stage transformation +COMPS_AFFECTED_ROLES = ["ccd", "e-beam", "ion-beam"] class MicroscopePostureManager: def __new__(cls, microscope): @@ -314,9 +311,14 @@ def __init__(self, microscope): # pre-tilt is required for milling posture, but not all systems have it stage_md = self.stage.getMetadata() - if model.MD_CALIB in stage_md: - if model.MD_SAMPLE_PRE_TILT in stage_md[model.MD_CALIB]: - self.pre_tilt = stage_md[model.MD_CALIB][model.MD_SAMPLE_PRE_TILT] + md_calib = stage_md.get(model.MD_CALIB, {}) + self.pre_tilt = md_calib.get(model.MD_SAMPLE_PRE_TILT, None) + + # feature flags, for features still in testing + self.use_linked_sem_focus_compensation: bool = md_calib.get("use_linked_sem_focus_compensation", False) + self.use_3d_transforms: bool = md_calib.get("use_3d_transforms", False) + self.use_scan_rotation: bool = md_calib.get("use_scan_rotation", False) + # current posture va self.current_posture = model.VigilantAttribute(UNKNOWN) self.stage.position.subscribe(self._update_posture, init=True) @@ -353,7 +355,10 @@ def getCurrentPostureLabel(self, pos: Dict[str, float] = None) -> int: def at_milling_posture(self, pos: Dict[str, float], stage_md: Dict[str, float]) -> bool: """Milling posture is not required for all meteor systems, so we need to - first check it's available""" + first check it's available + :param pos the stage position + :param stage_md the stage metadata + :param return True if the stage is at the milling posture, False if not (or not available)""" if model.MD_FAV_MILL_POS_ACTIVE in stage_md: stage_milling = self.get_posture_orientation(MILLING) if isNearPosition(pos, @@ -364,7 +369,9 @@ def at_milling_posture(self, pos: Dict[str, float], stage_md: Dict[str, float]) return False def get_posture_orientation(self, posture: int) -> Dict[str, float]: - """Get the orientation of the stage for the given posture""" + """Get the orientation of the stage for the given posture + :param posture: the posture to get the orientation for + :return: a dict with the orientation of the stage for the given posture""" stage_md = self.stage.getMetadata() if posture == SEM_IMAGING: return stage_md[model.MD_FAV_SEM_POS_ACTIVE] @@ -1388,6 +1395,7 @@ def _doCryoSwitchSamplePosition(self, future, target): raise CancelledError() future._task_state = FINISHED + class MeteorTFS3PostureManager(MeteorTFS1PostureManager): def __init__(self, microscope): MeteorPostureManager.__init__(self, microscope) @@ -2752,10 +2760,12 @@ def __init__(self, name: str, role: str, stage_bare: model.Actuator , posture_ma axes=copy.deepcopy(self._stage_bare.axes), **kwargs) # update related MDs - affects = ["ccd", "e-beam", "ion-beam"] # roles - # TODO: filter so only updates availble components - comps = [c.role for c in model.getComponents()] - self.affects = [a for a in affects if a in comps] + self.affects = [] + for role in COMPS_AFFECTED_ROLES: + try: + self.affects.append(model.getComponent(role=role)) + except Exception: + pass # posture manager to convert the positions self.pm = posture_manager @@ -2789,9 +2799,8 @@ def _updatePosition(self, pos_dep): self.position._set_value(pos, force_write=True) # update related mds - for a in self.affects: + for comp in self.affects: try: - comp = model.getComponent(role=a) if comp: md_pos = pos.get("x", 0), pos.get("y", 0) comp.updateMetadata({ @@ -2849,23 +2858,32 @@ def moveAbs(self, pos: Dict[str, float], **kwargs) -> Future: return self._stage_bare.moveAbs(pos_stage, **kwargs) @isasync - def move_vertical(self, pos: Dict[str, float]) -> Future: + def moveRelChamberReferential(self, shift: Dict[str, float]) -> Future: """Move the stage vertically in the chamber. This is non-blocking. - From OpenFIBSEM""" + From OpenFIBSEM. + The desired input shift (x, z) is transformed to x, y, z axis components such that the + the stage moves in the vertical direction in the chamber. For TFS, the z-axis is + attached to the tilt, so the tilt angle must be taken into account. + For Tescan systems, the z-axis is always vertical, so this function is not required. + :param shift: The relative shift to be made (x, z). + :return: A cancellable future + """ # TODO: account for scan rotation theta = self._stage_bare.position.value["rx"] # tilt, in radians - dx = pos.get("x", 0) - pdy = pos.get("y", 0) + dx = shift.get("x", 0) + pdz = shift.get("z", 0) - dy = pdy * math.sin(theta) - dz = pdy / math.cos(theta) - stage_position = {"x": dx, "y": dy, "z": dz} - logging.debug(f"Moving stage vertically by: {stage_position}, theta: {theta}, pos: {pos}") - return self._stage_bare.moveRel(stage_position) + # calculate axis components + dy = pdz * math.sin(theta) + dz = pdz / math.cos(theta) + vshift = {"x": dx, "y": dy, "z": dz} + logging.debug(f"Moving stage vertically by: {vshift}, theta: {theta}, initial shift: {shift}") + return self._stage_bare.moveRel(vshift) def stop(self, axes=None): self._stage_bare.stop() + def calculate_stage_tilt_from_milling_angle(milling_angle: float, pre_tilt: float, column_tilt: int = math.radians(52)) -> float: """Calculate the stage tilt from the milling angle and the pre-tilt. :param milling_angle: the milling angle in radians diff --git a/src/odemis/acq/stream/__init__.py b/src/odemis/acq/stream/__init__.py index b9adacbf0c..e03ceb23d1 100644 --- a/src/odemis/acq/stream/__init__.py +++ b/src/odemis/acq/stream/__init__.py @@ -58,7 +58,6 @@ class EMStream(ABC): EMStream.register(SpotSEMStream) EMStream.register(StaticSEMStream) EMStream.register(EBICSettingsStream) -EMStream.register(IndependentEBICStream) class CLStream(ABC): diff --git a/src/odemis/acq/stream/_helper.py b/src/odemis/acq/stream/_helper.py index 8c8d5c7a24..862628fb9e 100644 --- a/src/odemis/acq/stream/_helper.py +++ b/src/odemis/acq/stream/_helper.py @@ -1497,8 +1497,11 @@ def __init__(self, name, detector, dataflow, emitter, **kwargs): SCANNER_POS_MD = {model.MD_POS, model.MD_POS_COR, model.MD_PIXEL_SIZE, model.MD_PIXEL_SIZE_COR, model.MD_SHEAR, model.MD_SHEAR_COR, model.MD_ROTATION, model.MD_ROTATION_COR} +# Extra duration on the dwell time of the scanner, to ensure that the independent detector is +# ready for the next pixel (even if its internal clock goes a little slower). +SCANNER_DWELL_TIME_EXTRA = 0.5e-6 # s -class IndependentEBICStream(FastScanningDetector): +class IndependentEBICStream(EBICSettingsStream): """ A special EBIC stream, typically with a EBIC (current) as a detector and its own scanner. It's physically very similar to the SEM stream, but as we want to select just a region @@ -1513,16 +1516,8 @@ def __init__(self, name, detector, dataflow, emitter, emt_dataflow, **kwargs): """ See "Stream" for the basic parameters. :param emt_dataflow: a DataFlow that can be used to start/stop the emitter - :param emtvas: (inside kwargs) As for Stream, but don't put "resolution" or "scale". + :param emtvas: (inside kwargs) As for Stream, but *must* have "dwellTime", not "resolution" or "scale". """ - if "acq_type" not in kwargs: - kwargs["acq_type"] = model.MD_AT_EBIC - super().__init__(name, detector, dataflow, emitter, **kwargs) - - self._emitter_dataflow = emt_dataflow - self._latest_emitter_md = {} # the metadata of the emitter, to be copied onto the detector data - self._acq_start_lock = threading.Lock() # To be taken when starting/stopping acquisition - # The "independent" detector is independent if it has dwellTime and resolution assert model.hasVA(detector, "dwellTime") # When playing the stream, the .repetition VA is converted to .resolution on the emitter, so @@ -1530,6 +1525,48 @@ def __init__(self, name, detector, dataflow, emitter, emt_dataflow, **kwargs): # we use the "lower level" VA, which is the .resolution. assert model.hasVA(detector, "resolution") + super().__init__(name, detector, dataflow, emitter, **kwargs) + + self._emitter_dataflow = emt_dataflow + self._latest_emitter_md = {} # the metadata of the emitter, to be copied onto the detector data + self._acq_start_lock = threading.Lock() # To be taken when starting/stopping acquisition + + # Change the standard dwell time setter to set both the detector and the emitter. + # And adjust the VA to ensure it is reflecting both limits. + if not hasattr(self, "emtDwellTime"): + raise ValueError("emtvas must contain 'dwellTime'") + self.emtDwellTime.setter = self._set_dwell_time + self.emtDwellTime.value = self.emtDwellTime.value # Force an update + dt_rng = (max(self.emtDwellTime.range[0], self.detector.dwellTime.range[0]), + min(self.emtDwellTime.range[1], self.detector.dwellTime.range[1])) + self.emtDwellTime.range = dt_rng + + def _updateAcquisitionTime(self) -> None: + """ + Called when a setting that can affect the acquisition time is changed. + Automatically restarts the acquisition if the acquisition time has changed "a lot". + Mostly to handle cases where the dwell time is set to a very large value causing very long + acquisition (eg > 1 min), and then set back to a short duration. + """ + # Override standard method (on LiveStream), which checks whether the acquisition time has + # changed "a lot", and in such case stop and restart the acquisition. + + prev_dur = self._prev_dur + self._prev_dur = self.estimateAcquisitionTime() + + if not self.is_active.value: + # not acquiring => nothing to do + return + # TODO: check if it will finish within 1s + if prev_dur is None or prev_dur < 1: + # very short anyway, not worthy + return + + logging.debug("Restarting acquisition because it lasts %f s", prev_dur) + # Use the dedicated restart method, in a thread as it shouldn't be blocking. + t = threading.Thread(target=self._restart_acquisition) + t.start() + def _onNewEmitterData(self, dataflow: model.DataFlow, data: model.DataArray) -> None: """ Called when the emitter sends new data. Typically almost at the same time as the dectector, @@ -1590,6 +1627,7 @@ def _restart_acquisition(self): self._dataflow.unsubscribe(self._onNewData) self._dataflow.subscribe(self._onNewData) + time.sleep(0.1) # wait a bit, to ensure the detector is ready self._emitter_dataflow.subscribe(self._onNewEmitterData) except Exception: logging.exception("Failed to restart the acquisition") @@ -1600,8 +1638,8 @@ def _onActive(self, active: bool) -> None: """ if active: # Set the emitter to the right settings - self._onDwellTime(self._emitter.dwellTime.value) - self._onResolution(self._emitter.resolution.value) + self._onDwellTime(self._scanner.dwellTime.value) + self._onResolution(self._scanner.resolution.value) with self._acq_start_lock: super()._onActive(active) @@ -1617,22 +1655,56 @@ def _startAcquisition(self, future=None): super()._startAcquisition(future) # ...then start the emitter + time.sleep(0.1) # wait a bit, to ensure the detector is ready self._emitter_dataflow.subscribe(self._onNewEmitterData) - def _onDwellTime(self, value: float): - """ - Overrides the standard _onDwellTime to set the dwell time on the emitter as well. - """ - if self.is_active.value: - self._detector.dwellTime.value = value + def _set_dwell_time(self, dt: float) -> float: + + dt = self._detector.dwellTime.clip(dt) + if not self.is_active.value: + # Nothing else to check (and no need to change the hardware) + return dt + + return self._set_hw_dwell_time(dt) + + def _set_hw_dwell_time(self, dt: float) -> float: + """ + Adjust the dwell times of both the detector and the emitter (aka scanner) to ensure they are compatible. + :param dt: target dwell time to set the detector. It will be clipped to the hardware limits, + and preferably rounded down. The scanner dwell time will be set to a little bit larger + (>= SCANNER_DWELL_TIME_EXTRA) to what was accepted by the detector. + :return: the accepted emitter/scanner dwell time (so typically, a little larger than the target dwell time) + """ + # Update the detector, and read what it accepts (it should always return a value *smaller* or equal to the input) + self._detector.dwellTime.value = dt + # Read the accepted value + det_dt = self._detector.dwellTime.value + logging.debug("Updated detector dwell time to %s s", det_dt) + + # Set the scanner dwell time, just a little bit larger, to ensure there will be enough time + # between each pixel for the independent detector to be ready. + # The scanner dwell time is rounded *up* by the hardware, so we start from the detector + # value, and increase it a tiny bit until it is just long enough. + min_emt_dt = det_dt + SCANNER_DWELL_TIME_EXTRA + self._scanner.dwellTime.value = det_dt + emt_dt = self._scanner.dwellTime.value # Read the accepted value + for i in range(-1, 10): + if emt_dt >= min_emt_dt: + break + # Increase by a larger and larger value: x 0.5, 1, 2, 4, 8, 16... + self._scanner.dwellTime.value = emt_dt + SCANNER_DWELL_TIME_EXTRA * (2 ** i) + emt_dt = self._scanner.dwellTime.value + else: + logging.warning("Failed to set the dwell time of %s to %s s: %s s accepted vs %s s by emitter", + self._detector.name, min_emt_dt, det_dt, emt_dt) - # It's fine to a have dwell time slightly shorter (it will wait a tiny bit after acquiring - # each pixel), but not longer. - if self._detector.dwellTime.value > value: - logging.warning("Failed to set the dwell time of %s to %s s: %s s accepted", - self._detector.name, value, self._detector.dwellTime.value) + # We report the emitter dwell time, which is the actual exposure of the pixels + return emt_dt - super()._onDwellTime(value) + def _linkHwVAs(self): + super()._linkHwVAs() + emt_dt = self._set_hw_dwell_time(self.emtDwellTime.value) + self.emtDwellTime.value = emt_dt def _onResolution(self, value: Tuple[int, int]): """ diff --git a/src/odemis/acq/stream/_sync.py b/src/odemis/acq/stream/_sync.py index 9d4f0a2955..eb932c0359 100644 --- a/src/odemis/acq/stream/_sync.py +++ b/src/odemis/acq/stream/_sync.py @@ -2417,25 +2417,12 @@ def _adjustHardwareSettings(self): if cscale != scale: logging.warning("Emitter scale requested (%s) != accepted (%s)", cscale, scale) + self._emitter.scale.value = cscale # TODO: check that no fuzzing is requested (as it's not supported and # not useful). - # If a detector is "independent", then connect its dwell time - # Note: resolution is updated later, just before the acquisition block - # TODO: should it be done automatically by the "SettingsStream" in .prepare() or .linkHwVA()? dt = self._dwellTime.value - for s in self._streams: - det = s._detector - if model.hasVA(det, "resolution"): - det.dwellTime.value = dt - # It's fine to a have dwell time slightly shorter (it will wait a tiny bit after acquiring - # each pixel), but not longer. - if det.dwellTime.value > dt: - logging.warning("Failed to set the dwell time of %s to %s s: %s s accepted", - det.name, dt, det.dwellTime.value) - - self._emitter.scale.value = cscale # Order matters (a bit) if model.hasVA(self._emitter, "blanker") and self._emitter.blanker.value is None: @@ -2554,9 +2541,11 @@ def _runAcquisition(self, future) -> Tuple[List[model.DataArray], Optional[Excep ((n_x, n_y), em_res)) # Update the resolution of the "independent" detectors + has_inde_detectors = False for s in self._streams: det = s._detector if model.hasVA(det, "resolution"): + has_inde_detectors = True det.resolution.value = (n_x, n_y) # It's unlikely but the detector could have specific constraints on the resolution # and refuse the requested one => better fail early. @@ -2564,7 +2553,8 @@ def _runAcquisition(self, future) -> Tuple[List[model.DataArray], Optional[Excep raise ValueError(f"Failed to set the resolution of {det.name} to {n_x} x {n_y} px: " f"{det.resolution.value} px accepted") else: - logging.debug("Set resolution of independent detector %s to %s", det.name, (n_x, n_y)) + logging.debug("Set resolution of independent detector %s to %s", + det.name, (n_x, n_y)) # Move the beam to the center of the sub-frame trans = tuple(pos_flat[spots_sum:(spots_sum + npixels2scan)].mean(axis=0)) @@ -2588,6 +2578,12 @@ def _runAcquisition(self, future) -> Tuple[List[model.DataArray], Optional[Excep self._df0.synchronizedOn(self._trigger) for s, sub in zip(self._streams, self._subscribers): s._dataflow.subscribe(sub) + + if has_inde_detectors: + # The independent detectors might need a bit of time to be ready. + # If not waiting, the first pixels might be missed. + time.sleep(0.05) + start = time.time() self._acq_min_date = start self._trigger.notify() @@ -2605,7 +2601,7 @@ def _runAcquisition(self, future) -> Tuple[List[model.DataArray], Optional[Excep # receive the data (almost) at the same time. max_end_t = start + frame_time * 10 + 5 for i, s in enumerate(self._streams): - timeout = max(0.1, max_end_t - time.time()) + timeout = max(5.0, max_end_t - time.time()) if not self._acq_complete[i].wait(timeout): raise TimeoutError("Acquisition of repetition stream for frame %s timed out after %g s" % (self._emitter.translation.value, time.time() - max_end_t)) @@ -3387,8 +3383,8 @@ def _onData(self, n, df, data): if self._acq_min_date > data.metadata.get(model.MD_ACQ_DATE, 0): # This is a sign that the e-beam might have been at the wrong (old) # position while Rep data is acquiring - logging.warning("Dropping data because it seems started %g s too early", - self._acq_min_date - data.metadata.get(model.MD_ACQ_DATE, 0)) + logging.warning("Dropping data (of stream %d) because it seems it started %g s too early", + n, self._acq_min_date - data.metadata.get(model.MD_ACQ_DATE, 0)) if n == 0: # As the first detector is synchronised, we need to restart it # TODO: probably not necessary, as the typical reason it arrived diff --git a/src/odemis/acq/test/move_test.py b/src/odemis/acq/test/move_test.py index 190408a939..a90a4d35c8 100644 --- a/src/odemis/acq/test/move_test.py +++ b/src/odemis/acq/test/move_test.py @@ -732,6 +732,7 @@ def test_switching_movements(self): def _test_3d_transformations(self): """Test that the 3D transforms work the same as the 2D transforms for 0 scan rotation""" # 3d transforms should produce the same result as the 2d transforms + self.pm.use_3d_transforms = False # make sure we're using 2D transforms stage_pos = self.stage_bare.position.value ssp = self.pm.to_sample_stage_from_stage_position(stage_pos) # new 2D method ssp2 = self.pm.to_sample_stage_from_stage_position2(stage_pos) # new 3D method @@ -776,6 +777,9 @@ def test_to_posture(self): def test_sample_stage_movement(self): """Test sample stage movements in different postures match the expected movements""" + f = self.stage_bare.moveAbs(self.stage_grid_centers[POSITION_NAMES[GRID_1]]) + f.result() + dx, dy = 50e-6, 50e-6 self.pm.use_3d_transforms = True for posture in [FM_IMAGING, SEM_IMAGING]: @@ -806,7 +810,7 @@ def test_sample_stage_movement(self): # manually calculate the expected stage bare position p = [dx, dy, 0] - tf = self.pm._inv_transforms2[posture] # to-stage + tf = self.pm._inv_transforms2[posture] # to-stage bare q = numpy.dot(tf, p) exp_sb_pos = { diff --git a/src/odemis/driver/autoscript_client.py b/src/odemis/driver/autoscript_client.py index b160636101..a801d024d9 100644 --- a/src/odemis/driver/autoscript_client.py +++ b/src/odemis/driver/autoscript_client.py @@ -1032,9 +1032,10 @@ def set_patterning_mode(self, mode: str) -> None: self.server.set_patterning_mode(mode) def clear_patterns(self) -> None: - """Clear all patterns.""" + """Clear all patterns in fib. NOTE: active_view 2 is the fib view""" with self._proxy_access: self.server._pyroClaimOwnership() + self.server.set_active_view(2) # channel = ion self.server.clear_patterns() def set_default_application_file(self, application_file: str = "Si") -> None: diff --git a/src/odemis/driver/ephemeron.py b/src/odemis/driver/ephemeron.py new file mode 100644 index 0000000000..a9796db477 --- /dev/null +++ b/src/odemis/driver/ephemeron.py @@ -0,0 +1,1201 @@ +# -*- coding: utf-8 -*- +""" +Created on 24 April 2024 + +@author: Stefan Sneep + +Copyright © 2024-2025 Stefan Sneep & Éric Piel, Delmic + +This file is part of Odemis. + +Odemis is free software: you can redistribute it and/or modify it under the terms of the +GNU General Public License version 2 as published by the Free Software Foundation. + +Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with Odemis. +If not, see http://www.gnu.org/licenses/. + +""" +# This driver is developed for communication with the Ephemeron MightyEBIC controller API. +# The MightyEBIC scan controller will acquire a signal synchronized on the pixel signal +# from the e-beam scanner. Eventually, the complete signal of the frame is sent digitally to +# Odemis. + +import asyncio +import logging +import math +import re +import threading +import time +import queue +from asyncio import AbstractEventLoop +from functools import wraps +from typing import Optional, Tuple, Dict, Any, List, Coroutine + +from odemis import model +from odemis.model import Detector, oneway, MD_ACQ_DATE, HwError + +import numpy +from asyncua import Client, Server, Node, ua +from asyncua.common.methods import uamethod +from asyncua.common.statemachine import State, StateMachine +from asyncua.ua import NodeId, ObjectIds, LocalizedText, Argument + +# Don't use too verbose logging for asyncua, otherwise it's really an explosion +logging.getLogger("asyncua").setLevel(logging.WARNING) + +# OPCUA StateMachine constants +STATE_NAME_IDLE = "Idle" +STATE_NAME_BUSY = "Busy" +STATE_NAME_TRIGGER = "Trigger" +STATE_NAME_ERROR = "Error" + +# MightyEBIC driver constants +MAX_SAMPLES_PER_PIXEL = 16 +MAX_NUMBER_OF_CHANNELS = 8 +OVERSAMPLING_VALUES = (0, 2, 4, 8, 16, 32, 64) # Ordered from smallest to largest +MIN_RESOLUTION = (100, 16) # Not clear how small is "too small"... short lines (in time) are not supported +MAX_RESOLUTION = (4096, 4096) # TODO: check the MightyEBIC + +NAMESPACE_INDEX = 0 +NAMESPACE_ADDRESS = "http://opcfoundation.org/UA/" +SIMULATED_URL = "opc.tcp://localhost:4840/mightyebic/server/" +EBIC_CONTROLLER_NODE = "MightyEBICController" +EBIC_STATE_NODE = "MightyEBICState" +EBIC_INFO_NODE = "MightyEBICInfo" + +# Extra time to give to the scan, in addition to the expect scan time. This is mainly to account for +# the time between the device is ready and the e-beam scanner starts (typically a few seconds). +# Also accounts for the time it takes to report the scan is complete (should take less than a few seconds). +SCAN_EXTRA_TIMEOUT = 60 # s + + +class MightyEBIC(Detector): + def __init__(self, name: str, role: str, channel: int, url: str, **kwargs): + """ + Initialise the EBIC controller + :param name: The name of the device configured through the configuration file. + :param role: The role of the device configured through the configuration file. + :param channel: The channel from which the device read the data (starts from 0) + :param url: The url address to use with the OPC UA protocol. + Example of such an url might be opc.tcp://192.168.50.2:4840/mightyebic/server/ + Note: Pass "fake" to use a simulator. + """ + super().__init__(name, role, **kwargs) + + self._channel = channel + if channel < 0 or channel >= MAX_NUMBER_OF_CHANNELS: + raise ValueError(f"Invalid channel number {channel}, should be between 0 and {MAX_NUMBER_OF_CHANNELS - 1}.") + if channel > 0: + # The server only supports sending the first N channels, not just a specific one. + # So, if the channel is not 0, extra data will be acquired, sent, and discarded. + logging.warning("Using channel > 0 is inefficient, consider switching the signal to the first channel") + + # server_sim is only used for simulation of the opcServer if it is non-existent + self._opc_server_sim: Optional[MightyEBICSimulator] = None + + if url == "fake": + # if the device should be simulated, start a simulated server first + self._url = SIMULATED_URL + self._opc_server_sim = MightyEBICSimulator(self._url, self) + else: + url_check = r"^opc\.tcp://(localhost|(\d{1,3}\.){3}\d{1,3}):[0-9]*/" + if not re.search(url_check, url): + raise ValueError(f"The url {url} to connect to is not in the right format." + "Should be like opc.tcp://192.168.50.2:4840/mightyebic/server/ .") + self._url = url + + try: + self._opc_client = MightyEBICUaClient(self._url, timeout=10, component=self) + except OSError as ex: + raise HwError(f"Failed to connect to MightyEBIC computer, check connection: {ex}") + + # TODO: is there a need to support multiple channels? If so, we would need to either + # provide multiple Detectors, each with a DataFlow, or provide multiple DataFlows on this + # single Detector. Needs to be decided... For now, we only support one channel. + # Use a float for the depth, to indicate it returns data in floating point format. + self._shape = MAX_RESOLUTION + (2.0 ** 64,) + self.resolution = model.ResolutionVA(MIN_RESOLUTION, (MIN_RESOLUTION, MAX_RESOLUTION)) + + dt_min = self._opc_client.calculate_dwell_time(oversampling=0, channels=self._channel + 1, spp=1, delay=0) + # The maximum dwell time depends on the delay, which could be arbitrary large, but for now + # we don't allow the user to change the delay, so it's easy. + dt_max = self._opc_client.calculate_dwell_time(oversampling=max(OVERSAMPLING_VALUES), + channels=self._channel + 1, + spp=MAX_SAMPLES_PER_PIXEL, + delay=0) + self.dwellTime = model.FloatContinuous(dt_min, (dt_min, dt_max), unit="s", + setter=self.on_dwell_time_change) + + self.data = EBICDataFlow(self) + self._acquisition_thread: Optional[threading.Thread] = None + self._acquisition_lock = threading.Lock() + self._acquisition_must_stop = threading.Event() + + # Special event to request software unblocking on the scan + self.softwareTrigger = model.Event() + self._metadata[model.MD_DET_TYPE] = model.MD_DT_NORMAL + self._swVersion = self._opc_client.get_version() + self._metadata[model.MD_SW_VERSION] = self._swVersion + + def terminate(self): + self.stop_acquire() # Just in case acquisition was running + self._wait_acquisition_stopped() + + if self._opc_server_sim: + self._opc_server_sim.terminate() + self._opc_client.terminate() + super().terminate() + + def start_acquire(self): + with self._acquisition_lock: + self._wait_acquisition_stopped() + self._acquisition_thread = threading.Thread(target=self._acquire_thread, + name="EBIC acquisition thread") + self._acquisition_thread.start() + + def stop_acquire(self): + """ Stops the current running acquisition, should have no effect when there is none. """ + with self._acquisition_lock: + self._acquisition_must_stop.set() + + def _wait_acquisition_stopped(self): + """ Waits until the acquisition thread is fully finished _if_ it was requested to stop. """ + if self._acquisition_must_stop.is_set(): + logging.debug("Waiting for thread to stop.") + if self._acquisition_thread is None: + return + self._acquisition_thread.join(10) # 10s timeout for safety + if self._acquisition_thread.is_alive(): + logging.exception("Failed to stop the acquisition thread") + # Now let's hope everything is back to normal... + # ensure it's not set, even if the thread died prematurely + self._acquisition_must_stop.clear() + + def _acquire_thread(self) -> None: + """ + Initiate a scan on the EBIC scan controller, before the scan starts the required data is gathered + and a co-routine is initiated within the ua_client. The scan time, with an overhead of 60 seconds + determine the time-out of the scan. After the method is initiated the scan controller waits for the + pixel signal of the scanner, to trigger the beginning of the scan. + """ + try: + self.data._waitSync() + res = self.resolution.value + delay = 0 + md = self.getMetadata() + + act_dt, spp, os = self._opc_client.guess_samples_per_pixel_and_oversampling(self.dwellTime.value, self._channel + 1, 0) + md[model.MD_DWELL_TIME] = act_dt + md[model.MD_INTEGRATION_COUNT] = os * spp + scan_time = self._opc_client.calculate_scan_time(act_dt, res[0], res[1]) + end_time = time.time() + SCAN_EXTRA_TIMEOUT + scan_time + + if self._opc_client.controller_state != STATE_NAME_IDLE: + logging.warning("Scan controller is not idle (%s), will stop the current scan", + self._opc_client.controller_state) + self._opc_client.stop_scan() + time.sleep(0.1) + while self._opc_client.controller_state != STATE_NAME_IDLE: + logging.warning("Waiting longer for controller to become idle, currently %s", + self._opc_client.controller_state) + if self._acquisition_must_stop.wait(0.1): + self._opc_client.stop_scan() + return + + # The timeout here defines how long the scan controller will wait for the trigger signal, + # which we have no idea. However, typically, in the use cases, it should be almost immediate (< 1s) + # To be certain to handle every case, we still a large timeout. + self._opc_client.start_trigger_scan(os, self._channel + 1, spp, delay, res[0], res[1], timeout=60) + earliest_start = time.time() + # State is supposed to change immediately to "Trigger", and back to "Idle" when the scan + # is finished. + + state = self._opc_client.controller_state + logging.debug("Acquisition started, state = %s, expected scan time = %s s, will wait for up to %s seconds", + state, scan_time, SCAN_EXTRA_TIMEOUT + scan_time) + # TODO: is it helpful to not expect the state to be "Busy" immediately? + if self._acquisition_must_stop.wait(0.1): + self._opc_client.stop_scan() + return + + # while the scan controller is "trigger", check for a time-out or stop signal + while self._opc_client.controller_state in (STATE_NAME_BUSY, STATE_NAME_TRIGGER): + if time.time() > end_time: + # Timeout => give up the acquisition + self._opc_client.stop_scan() + raise TimeoutError("Acquisition not ended after %s seconds" % (SCAN_EXTRA_TIMEOUT + scan_time,)) + if self._acquisition_must_stop.wait(0.1): + self._opc_client.stop_scan() + return + + # It shouldn't happen, but good to detect some odd trigger issue + while time.time() < earliest_start + scan_time: + logging.warning("Waiting longer for the scan to really finish, state is %s", + self._opc_client.controller_state) + # Wait for the scan to be finished, but not too long + if self._acquisition_must_stop.wait(0.1): + self._opc_client.stop_scan() + return + + logging.debug("Scan completed (state = %s), will receive data", self._opc_client.controller_state) + + # MD_ACQ_DATE should contain the time when the acquisition started, and some part of Odemis + # will get upset if it's before receiving the (hardware) trigger. However, we don't really + # know when the trigger has been received. So, compute the acquisition date in two ways: + # when we asked to start, and retroactively, when the scan stopped minus the scan time. + # In theory the second time should be always latest, but in case the scan was shorter + # than expected, we'll take the latest of the two. + latest_start = time.time() - scan_time + if earliest_start > latest_start: + logging.warning("Acquisition ended in %s s, which is less than scan time %g s.", + time.time() - earliest_start, scan_time) + md[MD_ACQ_DATE] = earliest_start + else: + md[MD_ACQ_DATE] = latest_start + + # get the data from the last acquisition and notify subscribers + das = self.read_data(res, self._channel + 1, md) + da = das[self._channel] # Only keep the channel we are interested in + self.data.notify(da) + except Exception: + logging.exception("Unexpected failure during acquisition") + finally: + logging.debug("Acquisition thread closed") + + def read_data(self, resolution: Tuple[int, int], channels: int, md: Dict[str, Any]) -> List[model.DataArray]: + """ + After a successful scan data generated by the scan controller should be available for retrieval. + :param resolution: The used resolution (X, Y in pixels) for the scan. + :param channels: (>= 1) The number of channels which the data should have + :param md: The metadata to be added to the raw data. + :return: a series of DataArray, containing the data acquired for each channel. + """ + assert resolution[0] >= 1 and resolution[1] >= 1 + assert channels >= 1 + + result_shape = self._opc_client.get_scan_result_shape() # X, Y, C + if result_shape != (resolution[0], resolution[1], channels): + logging.warning("Expected a result of shape %s, but reported to be %s", + (resolution[0], resolution[1], channels), result_shape) + + logging.debug("Will read data, with expected shape %s", result_shape) + raw_data = self._opc_client.get_scan_result() + + # reshape the data: it's in the order X, Y, C, but we want the (numpy) conventional C, Y, X + raw_arr = numpy.array(raw_data) + try: + raw_arr.shape = result_shape # XYC + raw_arr = numpy.moveaxis(raw_arr, [0, 1, 2], [2, 1, 0]) + except ValueError as ex: + # TODO: if the (expected) resolution is different from result_shape, try with the expected res? + logging.error("Data shape %s does not match the expected %s: %s", + raw_arr.shape, result_shape, ex) + # return the data anyway, which might be more useful than nothing, but 1D to make it clearer + # the shape is unknown. + return [model.DataArray(raw_arr, md)] + + # separate along the first dimension (channels) + das = [model.DataArray(channel_data, md) for channel_data in raw_arr] + return das + + def on_dwell_time_change(self, value: float) -> float: + """ + Called when the dwell time is changed, the value is checked and a value compatible with the + hardware and *smaller* or equal to the requested value is returned. + :param value: request dwell time (s) + :return: accepted dwell time (s) + """ + # Find the closest SPP & oversampling that matches the requested dwell time. It always returns a smaller or equal value, + # unless a value smaller than the minimum is requested (in which case it returns the minimum) + dt, spp, os = self._opc_client.guess_samples_per_pixel_and_oversampling(value, self._channel + 1, 0) + if not value * 0.9 <= dt <= value: # 10% tolerance as a rule-of-thumb (it does happen sometimes) + logging.warning(f"Requested dwell time {value} differs from calculated dwell time {dt}, " + f"with {self._channel + 1} channels, using SPP {spp} and oversampling {os}.") + return dt + + +class EBICDataFlow(model.DataFlow): + def __init__(self, detector): + """ + detector (ephemeron.MightyEBIC): the detector that the dataflow corresponds to + """ + model.DataFlow.__init__(self) + self._detector = detector + + self._sync_event = None # event to be synchronised on, or None + self._evtq = None # a Queue to store received events (= float, time of the event) + + # start/stop_generate are _never_ called simultaneously (thread-safe) + def start_generate(self): + try: + self._detector.start_acquire() + except ReferenceError: + # sem/component has been deleted, it's all fine, we'll be GC'd soon + pass + + def stop_generate(self): + try: + self._detector.stop_acquire() + # Note that after that acquisition might still go on for a short time + except ReferenceError: + # sem/component has been deleted, it's all fine, we'll be GC'd soon + pass + + def synchronizedOn(self, event): + """ + Synchronize the acquisition on the given event. Every time the event is triggered, the scanner will + start a new acquisition/scan. The DataFlow can be synchronized only with one Event at a time. + However, each DataFlow can be synchronized, separately. The scan will only start once each active + DataFlow has received an event. + event (model.Event or None): event to synchronize with. Use None to disable synchronization. + """ + super().synchronizedOn(event) + + if self._sync_event == event: + return + if self._sync_event: + self._sync_event.unsubscribe(self) + if not event: + self._evtq.put(None) # in case it was waiting for this event + self._sync_event = event + if self._sync_event: + # if the df is synchronized, the subscribers probably don't want to + # skip some data + self._evtq = queue.Queue() # to be sure it's empty + self._sync_event.subscribe(self) + + @oneway + def onEvent(self): + """ + Called by the Event when it is triggered + """ + if not self._evtq.empty(): + logging.warning("Received synchronization event but already %d queued", + self._evtq.qsize()) + self._evtq.put(time.time()) + + def _waitSync(self): + """ + Block until the Event on which the dataflow is synchronised has been received. + If the DataFlow is not synchronised on any event, this method immediately returns. + """ + if self._sync_event: + self._evtq.get() + + +class MightyEBICUaClient: + """ + Represents the MightyEBIC, connected over OPC-UA. + Note: it seems the default of the OPC-UA server is to limit messages to 100Mb, which is just enough + to pass one (float) array at 4096x3072. If multiple channels are passed, then the message length + is increased proportionally. So one need to make sure that the server uses a higher limit size + if a higher resolution is used, or the EBIC channel is > 0. + """ + def __init__(self, url: str, timeout: float, component: MightyEBIC): + """ + :param url: url of the server. Example: "opc.tcp://192.168.50.2:4840/mightyebic/server/" + :param timeout: Maximum time to wait for a request sent to the server before failing (in s) + :param component: The component that uses this client. + """ + self.client = Client(url=url, timeout=timeout) + self._ebic_info_node: Optional[Node] = None + self._ebic_state_node: Optional[Node] = None + self._ebic_controller_node: Optional[Node] = None + self._loop: Optional[AbstractEventLoop] = None + self._loop_thread: Optional[threading.Thread] = None + self._prev_state: Optional[str] = None + self._component = component + + # Create an event loop to run the asyncio calls (aka "coroutines") + self._loop = asyncio.new_event_loop() + asyncio.set_event_loop(self._loop) + self._loop_thread = threading.Thread(target=self.run_event_loop, daemon=True) + self._loop_thread.start() + + # Connect to the server, via the event loop + f = asyncio.run_coroutine_threadsafe(self._initialize_client(), self._loop) + f.result() + logging.debug("OPCUA client initialized") + + def terminate(self): + if self.client is None: # already terminated + return + + asyncio.run_coroutine_threadsafe(self.client.disconnect(), self._loop).result() + self._loop.call_soon_threadsafe(self._loop.stop) + self._loop_thread.join() + self.client = None + + async def _initialize_client(self): + """ Lookup all the necessary OPCUA nodes. """ + await self.client.connect() + logging.info("OPCUA connection established") + + objects_node = await self.client.nodes.root.get_child(f"{NAMESPACE_INDEX}:Objects") + state_node = await objects_node.get_child(f"{NAMESPACE_INDEX}:{EBIC_STATE_NODE}") + self._ebic_info_node = await objects_node.get_child(f"{NAMESPACE_INDEX}:{EBIC_INFO_NODE}") # needed + self._ebic_controller_node = await objects_node.get_child(f"{NAMESPACE_INDEX}:{EBIC_CONTROLLER_NODE}") + self._ebic_state_node = await state_node.get_child(f"{NAMESPACE_INDEX}:CurrentState") + + # Note: it could be tempting to use a "state change" subscription, but in practice it seems + # to be implemented as a polling mechanism, which is equivalent to what we already do... + # but less good because the polling would happen constantly, at a low frequency, instead of + # only doing it while waiting for the acquisition to complete. + # This is done this way: + # handler = StateChangeHandler() + # subscription = await self.client.create_subscription(500, handler) + # await subscription.subscribe_data_change([self._ebic_state_node]) + # self._subscription = subscription # keep a reference to avoid it being garbage collected + # + # class StateChangeHandler: + # """ + # Used to handle subscriptions to data changes (see asyncua.DataChangeNotificationHandlerAsync) + # """ + # def datachange_notification(self, node: Node, val: LocalizedText, data: "DataChangeNotification"): + # """ + # Callback when the EBIC server state changes (ie, start/stops acquiring) + # """ + # logging.debug("Controller state notification %r %s", node, val) + + def run_event_loop(self): + """ the command run_forever() has to be set in a separate thread due to its blocking nature. """ + try: + self._loop.run_forever() + except Exception: + logging.exception("Event loop stopped unexpectedly") + finally: + logging.debug("Event loop ended") + + def retry_on_connection_error(coro): + """" + Decorator for coroutines, which detects the OPC-UA connection failed, + and automatically attempts to reconnect, and calls again the coroutine. + """ + # Note, asyncua.Client has a ".connection_lost_callback", which is called when the connection + # is lost. However, it's not clear how it could be used to automatically reconnect... and + # retry the call. + + @wraps(coro) + async def wrapper(self, *args, **kwargs): + try: + return await coro(self, *args, **kwargs) + except ConnectionError: + logging.error("Connection to the MightyEBIC server lost, trying to reconnect.") + self._component.state._set_value(HwError("MightyEBIC disconnected"), force_write=True) + try: + await self.client.disconnect() # Safe to call even if not connected + await self._initialize_client() + self._component.state._set_value(model.ST_RUNNING, force_write=True) + except Exception: + logging.exception("Failed to reconnect to the MightyEBIC server.") + raise + # Try again + return await coro(self, *args, **kwargs) + return wrapper + + # Not a function directly provided by the OPC-UA server, but as it does many calls to the server, + # it's more efficient to have them all in one function, instead of having to request the event + # loop to schedule every call to _calculate_dwell_time(). + def guess_samples_per_pixel_and_oversampling(self, req_dt: float, channels: int, delay: float + ) -> Tuple[float, int, int]: + """ + + Compute the best samples per pixel (SPP) and oversampling rate. + "samples per pixel" is the number of consecutive measurements corresponding to the same pixel, + which will be averaged in the result. + "oversampling rate" is essentially the same, but it is done at a lower level, and so is more + efficient, but has a limited set of values possible: {0, 2, 4, 8, 16, 32, 64}. + It will return a dwell time shorter or equal to the requested dwell time, unless the requested + dwell time is below the minimum dwell time, in which case the minimum dwell time is returned. + :param req_dt: The requested dwell time in seconds. + :param channels: The number of channels to acquire (1 to 8) + :param delay: The delay between the trigger and the start of the acquisition (s) + :return: + dt: actual dwell time accepted (s) + spp: number of samples per pixel for reaching the dwell time + osr: oversampling rate needed + """ + f = asyncio.run_coroutine_threadsafe( + self._guess_samples_per_pixel_and_oversampling(req_dt, channels, delay), + self._loop) + return f.result() + + async def _guess_samples_per_pixel_and_oversampling(self, req_dt: float, channels: int, delay: float + ) -> Tuple[float, int, int]: + """ + See guess_samples_per_pixel_and_oversampling() for info. + This is the actual implementation, which is asynchronous, as it calls coroutines. + """ + # Try every oversampling rate (OSR) , and for each of them find the best Sample per pixel (SPP). + # Pick the best combination of SPP x OSR, offering the largest dwell time below the requested dwell time. + + dt_to_params = {} + for osr in OVERSAMPLING_VALUES: + dt, spp = await self._guess_samples_per_pixel(req_dt, osr, channels, delay) + dt_to_params[dt] = (spp, osr) + if spp == 1 and dt >= req_dt: + # Increasing the oversampling will not allow to have a shorter dwell time, so stop early + break + + # Pick the best one: the largest, below the requested dwell time (and the largest OSR) + try: + best_dt = max(dt for dt in dt_to_params if dt <= req_dt) + spp, osr = dt_to_params[best_dt] + # If there are several dwell times with the same value for spp * osr, pick the largest osr + # (which might be slightly short dwell time, but will be more efficient) + int_counts = spp * osr + dt_same_counts = [dt for dt, (spp, osr) in dt_to_params.items() if spp * osr == int_counts] + best_dt = min(dt_same_counts, key=lambda dt: dt_to_params[dt][1]) + spp, osr = dt_to_params[best_dt] + logging.debug("Best dwell time found for %s: %s, with spp = %s, osr = %s", + req_dt, best_dt, spp, osr) + except ValueError: # No dt < req_dt + best_dt = min(dt_to_params.keys()) + spp, osr = dt_to_params[best_dt] + logging.debug("No dwell time found below the requested %s s, picking %s s with spp = %s, osr = %s", + req_dt,best_dt, spp, osr) + + return best_dt, spp, osr + + async def _guess_samples_per_pixel(self, req_dt: float, oversampling: int, channels: int, delay: float + ) -> Tuple[float, int]: + """ + Compute the best samples per pixel (SPP) + samples per pixel is the number of consecutive measurements corresponding to the same pixel, + which will be averaged in the result. + It will *always* return dwell time shorter or equal to the requested dwell time, unless + the requested dwell time is below the minimum possible dwell time. + :param req_dt: The requested dwell time in seconds. + :param oversampling: The oversampling rate that is applied. + :return: + dt: actual dwell time accepted (s) + spp: number of samples per pixel for reaching the dwell time + """ + # The server doesn't provide information about the best SPP for a given dwell time. It only + # provides the reverse: for a given SPP, what is the dwell time. So we need to "play" a guess + # game to find the best SPP by asking with various values until we find the one matching the + # requested dwell time. + + req_dt_us = req_dt * 1e6 + attempts = 0 + + # read the lowest dt (spp = 1) -> dt_min + dt_min = await self._calculate_dwell_time(oversampling, channels, 1, delay) + + if req_dt_us < dt_min: + logging.info(f"Requested dwell time {req_dt} µs is less than minimum dwell time {dt_min} µs " + f"at oversampling {oversampling}.") + return dt_min * 1e-6, 1 + + # guesstimate by assuming it's linear (so spp == req_dt / dt_min). + # It's usually not too bad, but might be a little too small. + spp_min = 1 + spp_max = min(math.ceil(req_dt_us / dt_min), MAX_SAMPLES_PER_PIXEL) + dt_max = await self._calculate_dwell_time(oversampling, channels, spp_max, delay) + logging.debug("Starting with an estimate of dt = %f (spp = %s, osr = %d)", + dt_max, spp_max, oversampling) + + # Make sure we have an upper bound on the dwell time: double until > requested dt + while dt_max < req_dt_us and spp_max < MAX_SAMPLES_PER_PIXEL: + attempts += 1 + spp_min = spp_max + spp_max = min(spp_max * 2, MAX_SAMPLES_PER_PIXEL) + dt_max = await self._calculate_dwell_time(oversampling, channels, spp_max, delay) + logging.debug(f"Updated spp_max to {spp_max} due to dt_max < req_dt.") + + # Dichotomy between spp_min and spp_max: + # need to find spp so that dt <= req_dt but spp+1 -> dt_p1 > req_dt + logging.debug("Will search for dt between %f and %f", dt_min, dt_max) + while spp_min < spp_max: + attempts += 1 + spp = (spp_min + spp_max) // 2 + dt = await self._calculate_dwell_time(oversampling, channels, spp, delay) + dt_p1 = await self._calculate_dwell_time(oversampling, channels, spp + 1, delay) + + if dt <= req_dt_us < dt_p1: + break + elif dt > req_dt_us: + spp_max = spp + else: + spp_min = spp + 1 + else: + logging.debug("Returning spp_min %s as spp_min == spp_max.", spp_min) + spp = spp_min + dt = await self._calculate_dwell_time(oversampling, channels, spp, delay) + + logging.info("Guessing samples per pixel for dt_req = %s µs as dt %s µs = %s spp * %s osr (in %d attempts)", + req_dt_us, dt, spp, oversampling, attempts) + return dt * 1e-6, spp + + @property + def controller_state(self) -> str: + controller_state = self.read_controller_state() + state_name = controller_state.Text + if state_name != self._prev_state: + logging.debug("Controller state changed to %s", state_name) + self._prev_state = state_name + return state_name + + def read_controller_state(self) -> LocalizedText: + """ + :return: The state of the MightyEBIC controller (see STATE_NAME_*) + """ + f = asyncio.run_coroutine_threadsafe(self._read_controller_state(), self._loop) + return f.result() + + @retry_on_connection_error + async def _read_controller_state(self) -> LocalizedText: + ret_val = await self._ebic_state_node.read_value() + return ret_val + + def set_controller_state(self, new_state: State): + f = asyncio.run_coroutine_threadsafe(self._set_controller_state(new_state), self._loop) + f.result() + + @retry_on_connection_error + async def _set_controller_state(self, new_state: State): + await self._ebic_controller_node.call_method(f"{NAMESPACE_INDEX}:set_controller_state", + new_state) + + def calculate_scan_time(self, dt: float, p_fast: int, p_slow: int) -> float: + f = asyncio.run_coroutine_threadsafe(self._calculate_scan_time(round(dt * 1e6), p_fast, p_slow), + self._loop) + return f.result() + + @retry_on_connection_error + async def _calculate_scan_time(self, dt: int, p_fast: int, p_slow: int) -> float: + """ + :param dt: dwell time in μs (not "ms" as the documentation claims!) + :param p_fast: number of pixels in the fast dimension (X) + :param p_slow: number of pixels in the slow dimension (Y) + :return: scan time in s + """ + st = await self._ebic_info_node.call_method(f"{NAMESPACE_INDEX}:calculate_scan_time", + dt, p_fast, p_slow) + return st + + def calculate_dwell_time(self, oversampling: int, channels: int, spp: int, delay: float) -> float: + """ + Computes how long the measurement of one pixel will take, for the given settings. + See _calculate_dwell_time() + :param oversampling: Must be within {0, 2, 4, 8, 16, 32, 64}. Number of times the signal is sampled. + The result is averaged. + :param channels: The number of channels used simultaneously. The server will only use the first N. + :param spp: The number of samples per pixel, which will be averaged in the result. Similar in + behaviour to the "oversampling", but this is done at a higher level, and so is more flexible, + while requiring extra memory on the ephemeron computer. + :param delay: time to wait for each pixel before starting the acquisition (in s). + allows for the signal to reach a steady state before it is measured. + :return: dwell time in s + """ + f = asyncio.run_coroutine_threadsafe(self._calculate_dwell_time(oversampling, channels, spp, delay), + self._loop) + dt_us = f.result() + # If some incorrect parameters are sent, it returns an int <= 0. + if dt_us <= 0: + raise ValueError(f"Invalid calculate_dwell_time() returned error {dt_us}.") + return dt_us * 1e-6 + + @retry_on_connection_error + async def _calculate_dwell_time(self, oversampling: int, channels: int, spp: int, delay: float) -> int: + """ + Wrapper around the OPC-UA method to calculate the dwell time. + See calculate_dwell_time() for more info. (the only difference is that this function returns + the dwell time in μs, not in s, and it's a coroutine) + :return: dwell time in μs + """ + if not oversampling in OVERSAMPLING_VALUES: + raise ValueError(f"Oversampling value {oversampling} is not valid.") + # oversampling, channels, spp, delay -> returns dt (int) in μs + dt = await self._ebic_info_node.call_method(f"{NAMESPACE_INDEX}:calculate_dwell_time", + oversampling, channels, spp, delay) + return dt + + def start_trigger_scan(self, oversampling: int, channels: int, spp: int, delay: float, + p_fast: int, p_slow: int, + sim: bool = False, timeout: int = 10): + f = asyncio.run_coroutine_threadsafe( + self._start_trigger_scan(oversampling, channels, spp, delay, p_fast, p_slow, sim, timeout), + self._loop) + return f.result() + + @retry_on_connection_error + async def _start_trigger_scan(self, oversampling: int, channels: int, spp: int, delay: float, + p_fast: int, p_slow: int, + sim: bool = False, timeout: int = 10): + """ + Starts a scan with the MightyEBIC scan controller. + Non-blocking: it returns as soon as the request to start the scan is accepted. + :param oversampling: Must be within {0, 2, 4, 8, 16, 32, 64}. Number of times the signal is sampled. + The result is averaged. + :param channels: The number of channels used simultaneously. The server will only use the first N. + :param spp: The number of samples per pixel, which will be averaged in the result. Similar in + behaviour to the "oversampling", but this is done at a higher level, and so is more flexible, + while requiring extra memory on the ephemeron computer. + :param delay: time to wait for each pixel before starting the acquisition (in s). + allows for the signal to reach a steady state before it is measured. + :param p_fast: number of pixels in the fast dimension (X) + :param p_slow: number of pixels in the slow dimension (Y) + :param sim: If True, the scan will be simulated (by the server) + :param timeout: The maximum time to wait for the trigger for the scan to start (in s). If the trigger + doesn't arrive within this time, the scan will be aborted by the server. + """ + # run the start scan method on the server + logging.debug("Starting EBIC scan, with os=%s, channels=%s, spp=%s, delay=%s, shape = (%s, %s), delay=%s", + oversampling, channels, spp, delay, p_fast, p_slow, sim) + + # oversampling, channels, samples, delay, points_fast, points_slow, sim + await self._ebic_controller_node.call_method(f"{NAMESPACE_INDEX}:start_trigger_scan", + oversampling, channels, spp, delay, + p_fast, p_slow, + sim, timeout) + + def stop_scan(self) -> None: + """ + Cancels the scan, if it's running. + """ + f = asyncio.run_coroutine_threadsafe(self._stop_scan(), self._loop) + return f.result() + + @retry_on_connection_error + async def _stop_scan(self) -> None: + # run the stop scan method on the server + logging.info(f"Stopping EBIC scan..") + await self._ebic_controller_node.call_method(f"{NAMESPACE_INDEX}:stop_scan") + + def get_scan_result(self) -> List[float]: + """ + :return: the raw data of the latest scan result. The order of the data is C (channels), slow, fast, + (listed in order from the fastest to the slowest). So it is *not* in the same order as it was acquired. + It is possible to reconstruct a numpy array by using the shape returned by get_scan_result_shape(). + """ + f = asyncio.run_coroutine_threadsafe(self._get_scan_result(), self._loop) + return f.result() + + @retry_on_connection_error + async def _get_scan_result(self) -> List[float]: + """ + Note: this function can be called several times, and the data will still be available. + :return: See get_scan_result() + """ + scan_result_node = await self._ebic_info_node.get_child(f"{NAMESPACE_INDEX}:scan_result") + raw_data = await scan_result_node.read_value() + return raw_data + + def get_scan_result_shape(self) -> Tuple[int, int, int]: + """ + Returns the shape of the scan result, as used in numpy. + :return: p_fast, p_slow, channels + """ + f = asyncio.run_coroutine_threadsafe(self._get_scan_result_shape(), self._loop) + return tuple(f.result()) + + @retry_on_connection_error + async def _get_scan_result_shape(self) -> List[int]: + """ + :return: p_fast, p_slow, channels + """ + scan_result_shape_node = await self._ebic_info_node.get_child(f"{NAMESPACE_INDEX}:scan_result_shape") + data_shape = await scan_result_shape_node.read_value() + return data_shape + + def get_version(self) -> str: + """ + :return: The software version of the MightyEBIC (server) + """ + f = asyncio.run_coroutine_threadsafe(self._get_version(), self._loop) + return f.result() + + @retry_on_connection_error + async def _get_version(self) -> str: + """ + :return: The software version of the MightyEBIC (server) + """ + v = await self._ebic_info_node.call_method(f"{NAMESPACE_INDEX}:version") + return v + + +# Simulated OPC-UA Server constants +SCAN_ARGS = [ + Argument(Name="oversampling", + DataType=NodeId(ObjectIds.Int64), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Oversampling Value")), + Argument(Name="channels", + DataType=NodeId(ObjectIds.Int64), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Number of Scan Channels")), + Argument(Name="samples", + DataType=NodeId(ObjectIds.Int64), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Number of Samples")), + Argument(Name="delay", + DataType=NodeId(ObjectIds.Float), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Delay time (ms)")), + Argument(Name="points_fast", + DataType=NodeId(ObjectIds.Int64), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Points Fast")), + Argument(Name="points_slow", + DataType=NodeId(ObjectIds.Int64), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Points Slow")), + Argument(Name="simulate", + DataType=NodeId(ObjectIds.Boolean), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Simulate Scan")), + Argument(Name="timeout", + DataType=NodeId(ObjectIds.Int64), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Timeout (s)")), +] + +DWELLTIME_ARGS = [ + Argument(Name="oversampling", + DataType=NodeId(ObjectIds.Int64), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Oversampling Value")), + Argument(Name="channels", + DataType=NodeId(ObjectIds.Int64), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Number of Scan Channels")), + Argument(Name="samples", + DataType=NodeId(ObjectIds.Int64), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Number of Samples")), + Argument(Name="delay", + DataType=NodeId(ObjectIds.Float), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Delay time (ms)")) +] + +SCANTIME_ARGS = [ + Argument(Name="dwell_time", + DataType=NodeId(ObjectIds.Int64), + ValueRank=-1, ArrayDimensions=[], + Description=LocalizedText("Dwell Time (ms)")), + Argument(Name="points_fast", + DataType=NodeId(ObjectIds.Int64), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Points Fast")), + Argument(Name="points_slow", + DataType=NodeId(ObjectIds.Int64), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Points Slow")) +] + +STATE_ARGS = [ + Argument(Name="new_state", + DataType=NodeId(ObjectIds.String), + ValueRank=-1, + ArrayDimensions=[], + Description=LocalizedText("Requested State")) +] + + +class MightyEBICSimulator(Server): + """ OPC Server class: This class is required for setting up a simulated server and the state machine. """ + def __init__(self, url: str, parent_det: MightyEBIC): + super().__init__() + self.state_machine: Optional[StateMachine] = None + self.ebic_state_node: Optional[Node] = None + self.ebic_controller_node: Optional[Node] = None + self.ebic_info_node: Optional[Node] = None + self.scan_time: Optional[float] = None + self.ready = False + self.states = {} + self.terminated = False + self._url = url + self._parent_det = parent_det + self._data_var: Optional[Node] = None + self._data_shape_var: Optional[Node] = None + self._stop_scan = threading.Event() # Set when a request to stop the scan (early) is received + self._dt = 1e-5 + self._server_exception: Optional[str] = None + + self._t_simserver = threading.Thread(target=self._start_opc_simserver) + self._t_simserver.start() + # start the simulated server threaded but wait for it to be ready (running) + while not self.ready: + time.sleep(0.1) + if self._server_exception: + raise self._server_exception + + def terminate(self): + # the opcServer is simulated, stop the controlling thread first + self.terminated = True + self._t_simserver.join() + + def _start_opc_simserver(self): + try: + asyncio.run(self.connect_to_server()) + except ConnectionError: + self._server_exception = ConnectionError(f"Unable to start up the simulated server") + except Exception as ex: + self._server_exception = ex + + async def setup(self) -> None: + """ Set up the server StateMachine, nodes, events, methods and variables. """ + await self.init() + self.set_endpoint(self._url) + + await self.register_namespace(NAMESPACE_ADDRESS) + await self.setup_state_machine() + await self.setup_info_node() + await self.setup_controller() + + async def setup_controller(self) -> None: + """ Set up the EBIC scan controller node and its own methods. """ + self.ebic_controller_node = await self.nodes.objects.add_object( + NAMESPACE_INDEX, + EBIC_CONTROLLER_NODE, + ) + + # Add the required methods to the EBIC controller node + await self.ebic_controller_node.add_method(NAMESPACE_INDEX, "start_trigger_scan", self.request_scan_start, SCAN_ARGS, []) + await self.ebic_controller_node.add_method(NAMESPACE_INDEX, "set_controller_state", self.change_state, STATE_ARGS, []) + await self.ebic_controller_node.add_method(NAMESPACE_INDEX, "stop_scan", self.request_scan_stop, [], []) + + # Info section holds updated read-only state variables from the EBIC GUI as well as all "query functions" that + # clients use to compute properties of device scans without actually running them + await self.ebic_info_node.add_method(NAMESPACE_INDEX, "calculate_dwell_time", self.calculate_dwell_time, DWELLTIME_ARGS, + [Argument(Name="dwell_time", + DataType = NodeId(ObjectIds.Int64), + ValueRank = -1, ArrayDimensions = [], + Description = LocalizedText("Dwell Time (ms)"))]) + await self.ebic_info_node.add_method(NAMESPACE_INDEX, "calculate_scan_time", self.calculate_scan_time, SCANTIME_ARGS, + [Argument(Name="scan_time", + DataType = NodeId(ObjectIds.Int64), + ValueRank = -1, ArrayDimensions = [], + Description = LocalizedText("Scan Time (s)")),]) + await self.ebic_info_node.add_method(NAMESPACE_INDEX, "version", self.get_version, [], + [Argument(Name = "version", + DataType = NodeId(ObjectIds.String), + ValueRank = -1, ArrayDimensions = [], + Description = LocalizedText("MightyEBIC Version")),]) + + async def setup_info_node(self): + """ Set up the EBIC info node which will contain data after a successful scan. """ + self.ebic_info_node = await self.nodes.objects.add_object( + NAMESPACE_INDEX, + EBIC_INFO_NODE) + + self._data_var = await self.ebic_info_node.add_variable( + NAMESPACE_INDEX, + "scan_result", + (numpy.zeros((1, 1), dtype=numpy.float64)).tolist()) + + self._data_shape_var = await self.ebic_info_node.add_variable( + NAMESPACE_INDEX, + "scan_result_shape", + [1, 1, 1]) + + async def setup_state_machine(self) -> None: + """ Set up the state machine for the server. """ + self.state_machine = StateMachine( + self, + self.nodes.objects, + NAMESPACE_INDEX, + EBIC_STATE_NODE, + ) + + # install the state machine + await self.state_machine.install() + + # create all the states that will be used + self.states[STATE_NAME_IDLE] = State(STATE_NAME_IDLE, STATE_NAME_IDLE, 1, node=None) + await self.state_machine.add_state(self.states[STATE_NAME_IDLE], state_type=ua.NodeId(2309, 0)) + + self.states[STATE_NAME_BUSY] = State(STATE_NAME_BUSY, STATE_NAME_BUSY, 2, node=None) + await self.state_machine.add_state(self.states[STATE_NAME_BUSY]) + + self.states[STATE_NAME_TRIGGER] = State(STATE_NAME_TRIGGER, STATE_NAME_TRIGGER, 2, node=None) + await self.state_machine.add_state(self.states[STATE_NAME_TRIGGER]) + + self.states[STATE_NAME_ERROR] = State(STATE_NAME_ERROR, STATE_NAME_ERROR, 2, node=None) + await self.state_machine.add_state(self.states[STATE_NAME_ERROR]) + + # set the state to IDLE after installation + await self.state_machine.change_state(self.states[STATE_NAME_IDLE]) + + async def connect_to_server(self): + """ + Set up a very basic operational example of an OPC server, it only needs to be alive as long as it is necessary. + """ + logging.info("Starting OPC-UA MightyEBIC server simulator") + await self.setup() + + async with self: + self.ready = True + while not self.terminated: + await asyncio.sleep(2) + + @uamethod + async def get_version(self, parent): + return "0.0.1-sim" + + @uamethod + async def calculate_dwell_time(self, parent, oversampling: int, channels: int, spp: int, delay: float) -> int: + """ + Calculates the dwell time of each pixel based on channels, samples, delay and oversampling. + Time constants are based on PRU code that drives for AD5764 DAC and AD7608 ADC. + This method is a copy of the method Ephemeron uses in their example server code. + (see https://bitbucket.org/delmic/delmicephemeron/ ) + :param parent: NodeId + :param oversampling: The oversampling rate that is applied. + :param channels: The number of channels used simultaneously. + :param spp: The number of samples per pixel used, this value is determined by the requested dt. + :param delay: this is a variable delay that allows for the signal to + reach a steady state before it is measured. + :return: the calculated dwell time in microseconds. + """ + return self._calculate_dwell_time(oversampling, channels, spp, delay) + + def _calculate_dwell_time(self, oversampling: int, channels: int, spp: int, delay: float) -> int: + # All values are in ns + # calculation of delay cycles (delay step is set at 10e-9 default) + delay = numpy.uint32(delay / 10e-9) + trigger = True # Only simulate the trigger mode + + # ADC Time constants + WAIT = 4000 # default conversion rate + OS_scalar = 4500 # Oversampling scalar per OS multiple 0,2,4,8,16,32,64 + CH_scalar = 1365 # Scalar for each channel we need to clock out + samples_scalar = 20 # + + # DAC time constants + DAC_var = 10 # variable delay multiplier in ns + + # ADC sampling Overhead + sample_Overhead = 110 # + + # DAC write Overhead + LOADDAC_OV = 5345 # overhead + DACUP_OV = 200 + + # Calculate how long it takes to do an ADC read with + # if OS is greater than 2 we need to use different scalar + if oversampling >= 2: + WAIT = OS_scalar * oversampling + # for each conversion amount of time it takes clock out each channel + CH_T = CH_scalar * oversampling * channels + + # total ADC READ based on CH and OS + ADCREAD = 120 + CH_T + WAIT + + # total number of conversions at a pixel + LOOP3 = (sample_Overhead + samples_scalar * spp + ADCREAD * spp) + + # time for each DAC update + # DacUpdate not needed in Trigger mode + if trigger: + DACUPDATE = 0 + else: + DACUPDATE = LOADDAC_OV + DAC_var * delay + DACUP_OV + + Dwell_us = (LOOP3 + DACUPDATE) / 1000.0 # convert to us + + return math.ceil(Dwell_us) + + @uamethod + async def calculate_scan_time(self, parent, dt: float, res_fast: int, res_slow: int) -> float: + """ + Calculate and return the scan time based on the dwell time and the resolution. + :param parent: NodeId + :param dt: The requested dwell time. + :param res_fast: The horizontal points of the resolution. + :param res_slow: The vertical points of the resolution. + :return: Scan time in s. + """ + return self._calculate_scan_time(dt, res_fast, res_slow) + + def _calculate_scan_time(self, dt: float, res_fast: int, res_slow: int) -> float: + self._dt = dt + + LOOP2_OV = 40 + LOOP1_OV = 40 + SETUP = 1025 # Overhead to set up scan + + LOOP2 = (dt * 1000 + LOOP2_OV) * res_fast + 5 + LOOP1 = (LOOP2 + LOOP1_OV) * res_slow + + ScanTime_ns = SETUP + LOOP1 # scan time in nanoseconds + ScanTime_s = ScanTime_ns / 1.0e9 + + return ScanTime_s + + @uamethod + async def change_state(self, parent, new_state): + logging.debug(f"Setting StateMachine CurrentState to new state -> {new_state}") + await self.state_machine.change_state(self.states[new_state], transition=None) + + @uamethod + async def request_scan_start(self, parent, + oversampling: int, channels: int, spp: int, delay: float, + points_fast: int, points_slow: int, simulate: bool, timeout: int): + """ + The actual scan implementation on simulated server. + :param parent: NodeId + :param oversampling: The oversampling rate that is applied. + :param channels: The number of channels used simultaneously. + :param spp: The number of samples per pixel used. + :param delay: variable delay that allows for the signal to reach a steady state before it is measured (ms) + :param points_fast: The horizontal points of the resolution. + :param points_slow: The vertical points of the resolution. + :param simulate: Simulate the scan. + :param timeout: maximum time to wait for the trigger (in s) + """ + await self.state_machine.change_state(self.states[STATE_NAME_TRIGGER]) + + # Estimate the scan time, to simulate + dt = self._calculate_dwell_time(oversampling, channels, spp, delay) + scan_time = self._calculate_scan_time(dt, points_fast, points_slow) + self._stop_scan.clear() + acquisition_thread = threading.Thread(target=self.start_trigger_scan, + name="Simulated EBIC acquisition thread", + args=(scan_time, channels, points_fast, points_slow)) + acquisition_thread.start() + + def start_trigger_scan(self, scan_time: float, channels: int, points_fast: int, points_slow: int): + try: + # as the scan time can be tiny, add a 0.1 s overhead + if self._stop_scan.wait(scan_time + 0.1): + # if stop scan is requested return without updating the data + logging.debug("Scan stopped before it was completed") + else: + asyncio.run(self.update_data((points_fast, points_slow, channels))) + + asyncio.run(self.state_machine.change_state(self.states[STATE_NAME_IDLE])) + except Exception as ex: + logging.exception(f"Simulated scan failed") + finally: + logging.debug("Simulated scan thread completed") + + async def update_data(self, shape): + scan_result = numpy.random.rand(*shape) * 10 # between 0 and 10 (mA), as the device typically does + logging.debug(f"Simulating data with shape {scan_result.shape}") + await self._data_shape_var.write_value(shape) + await self._data_var.write_value(scan_result.flatten().tolist()) + + @uamethod + async def request_scan_stop(self, parent): + logging.debug(f"stop_scan requested from client") + self._stop_scan.set() diff --git a/src/odemis/driver/test/ephemeron_test.py b/src/odemis/driver/test/ephemeron_test.py new file mode 100644 index 0000000000..e83a660a0c --- /dev/null +++ b/src/odemis/driver/test/ephemeron_test.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on 9 Oct 2024 + +Copyright © 2024-2025 Stefan Sneep & Éric Piel, Delmic + +This file is part of Odemis. + +Odemis is free software: you can redistribute it and/or modify it under the terms +of the GNU General Public License version 2 as published by the Free Software +Foundation. + +Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +Odemis. If not, see http://www.gnu.org/licenses/. +""" +import asyncio +import logging +import os +import threading +import time +import unittest +from typing import Optional + +import numpy + +from odemis import model +from odemis.dataio import hdf5 +from odemis.driver import ephemeron, semnidaq +from odemis.driver.ephemeron import MightyEBICSimulator + +logging.basicConfig(format="%(asctime)s %(levelname)-7s %(module)s:%(lineno)d %(message)s") +logging.getLogger().setLevel(logging.DEBUG) + +TEST_NOHW = (os.environ.get("TEST_NOHW", "0") != "0") # Default to Hw testing + +KWARGS_EBIC = { + "name": "EBIC Scan Controller", + "role": "ebic-detector", + "channel": 0, + # If testing is done with the MightyEBIC Software on a VM, use this + # "url": "opc.tcp://192.168.56.2:4840/mightyebic/server/" + "url": "opc.tcp://172.16.0.1:4840/mightyebic/server/" +} + +if TEST_NOHW: + KWARGS_EBIC["url"] = "fake" + +# For the semnidaq driver +CONFIG_SED = { + "name": "sed", + "role": "sed", + "channel": 0, + # "channel": "ao0", # Loopback from the AO0, for testing + "limits": [-3, 6.2] +} + +CONFIG_SCANNER = { + "name": "scanner", + "role": "ebeam", + "channels": [0, 1], + "max_res": [4096, 3072], # px, to force 4:3 ratio + "limits": [[-2.2333, 2.2333], [-1.675, 1.675]], # V + "park": [-5, -5], # V + "settle_time": 120e-6, # s + "scan_active_delay": 0.001, # s + "hfw_nomag": 0.112, + "scanning_ttl": { + 3: [True, True, "external"], # High when scanning, High when VA set to True + 4: [True, True, None], + }, + "image_ttl": { + "pixel": { + "ports": [16], + "inverted": [True], + }, + }, +} + +CONFIG_SEM = { + "name": "sem", + "role": "sem", + "device": "Dev1", + "multi_detector_min_period": 2e-6, # s, + "children": { + "scanner": CONFIG_SCANNER, + "detector0": CONFIG_SED, + } +} + +class TestMightyEBICSyncAcq(unittest.TestCase): + """ + Test case to test the MightyEBIC detector in a synchronous acquisition with the e-beam + """ + @classmethod + def setUpClass(cls): + cls.ebic = ephemeron.MightyEBIC(**KWARGS_EBIC) + + cls.sem = semnidaq.AnalogSEM(**CONFIG_SEM) + for child in cls.sem.children.value: + if child.name == CONFIG_SED["name"]: + cls.sed = child + elif child.name == CONFIG_SCANNER["name"]: + cls.scanner = child + + @classmethod + def tearDownClass(cls): + cls.ebic.terminate() + cls.sem.terminate() + + def test_acquisition(self): + res = (900, 700) # X,Y + dt = 10e-6 # s + self.ebic.resolution.value = res + self.ebic.dwellTime.value = dt + act_dt_ebic = self.ebic.dwellTime.value + self.scanner.scale.value = (1, 1) + self.scanner.resolution.value = res + self.scanner.dwellTime.value = act_dt_ebic + act_dt_ebeam = self.scanner.dwellTime.value + logging.debug("EBIC will use dt = %s, e-beam will use dt = %s", act_dt_ebic, act_dt_ebeam) + assert act_dt_ebic <= act_dt_ebeam + + # Start acquisition + # Add 10% to the scan time to account roughly for the "flyback time" and other scanning overhead + expected_duration = res[0] * res[1] * act_dt_ebeam * 1.1 + self.ebic_data = None + self.ebeam_data = None + self.ebic_received = threading.Event() + self.ebeam_received = threading.Event() + self.ebic.data.subscribe(self.receive_ebic_data) + time.sleep(0.1) # Gives a bit of time to be ready + self.sed.data.subscribe(self.receive_ebeam_data) + + # Data should arrive approximately at the same time + self.ebeam_received.wait(expected_duration * 1.5) # 50% margin for time-out + self.ebic_received.wait(10) # Gives a bit of margin to the EBIC to receive the data + assert self.ebeam_received.is_set() and self.ebic_received.is_set() + + assert self.ebeam_data.shape == self.ebic_data.shape + + # Simulate the acquisition code, and confirm the data can be stored + self.ebic_data.metadata[model.MD_PIXEL_SIZE] = self.ebeam_data.metadata[model.MD_PIXEL_SIZE] + self.ebic_data.metadata[model.MD_POS] = self.ebeam_data.metadata[model.MD_POS] + hdf5.export("test_ebeam.h5", [self.ebeam_data, self.ebic_data]) + + def receive_ebic_data(self, df, d: model.DataArray): + logging.debug("Received EBIC data") + self.ebic_data = d + self.ebic_received.set() + df.unsubscribe(self.receive_ebic_data) + + def receive_ebeam_data(self, df, d: model.DataArray): + logging.debug("Received e-beam data") + self.ebeam_data = d + self.ebeam_received.set() + df.unsubscribe(self.receive_ebeam_data) + + +class TestMightyEBICDetector(unittest.TestCase): + """ + Test case to test the functionality of the driver for an Ephemeron MightyEBIC detector. + """ + @classmethod + def setUpClass(cls): + cls.ebic_det = ephemeron.MightyEBIC(**KWARGS_EBIC) + cls.acquired_data: Optional[model.DataArray] = None + cls.dwell_time_values = [cls.ebic_det.dwellTime.range[0], 6e-6, 10e-6, 11.9e-6, 12e-6, 50e-6, 1.995e-3, 1.996e-3] + + @classmethod + def tearDownClass(cls): + cls.ebic_det.terminate() + + def test_dwell_time(self): + """ + Test for setting the dwell time of the detector, and check that the actual value set + is always less or equal to the requested value. + """ + for dt in self.dwell_time_values: + self.ebic_det.dwellTime.value = dt + self.assertLessEqual(self.ebic_det.dwellTime.value, dt) + + def test_acquisition(self): + """ + Test acquisitions with various dwell times and resolutions. + Note: when testing on the real hardware, there should be a pixel trigger sent (eg, by using + a waveform generator). + """ + for i, dt in enumerate(self.dwell_time_values): + # Set the resolution of the region of acquisition to be non-squared, and not (always) a + # multiple of 2. + if dt < 1e-3: + # Dwell time is short, so we can afford a larger image + res = (140, 500 + i) + else: + # Don't make it too long for the long dwell time + res = (100 + i, 100) + self.ebic_det.resolution.value = res + self.ebic_det.dwellTime.value = dt + + act_dt = self.ebic_det.dwellTime.value + exp_dur = res[0] * res[1] * act_dt # Very minimum duration that it should take + logging.debug("Expected duration for %s px @ %s s: %s s", res, act_dt, exp_dur) + + start_time = time.time() + da = self.ebic_det.data.get() + duration = time.time() - start_time + + # check if the data is in the right shape + self.assertEqual(da.shape[::-1], self.ebic_det.resolution.value) + self.assertEqual(da.dtype.type, numpy.float64) + self.assertGreater(da.metadata[model.MD_ACQ_DATE], start_time) # Should be a tiny bit later + self.assertGreater(duration, exp_dur) + + def test_request_very_high_resolution(self): + """ + Check that a large resolution also works (4096 x 3072) + """ + # set the resolution very high to force a time-out for the start method request on the server + self.ebic_det.resolution.value = (4096, 3072) + self.ebic_det.dwellTime.value = self.ebic_det.dwellTime.range[0] + + da = self.ebic_det.data.get() + + # check if the data is in the right shape + self.assertEqual(da.shape[::-1], self.ebic_det.resolution.value) + self.assertEqual(da.dtype.type, numpy.float64) + + def test_acquisition_stop(self): + # set the resolution of the region of acquisition to 500 x 480, force it non-squared + self.ebic_det.resolution.value = (500, 480) + self.ebic_det.dwellTime.value = 100e-6 + # => ~20s + + self.acquired_data = None + self.ebic_det.data.subscribe(self.receive_ebic_data) + + # stop the acquisition after a few seconds + time.sleep(4) + self.ebic_det.data.unsubscribe(self.receive_ebic_data) + self.assertIsNone(self.acquired_data) # there should be no data acquired + + # check if the state is changed to stopped + time.sleep(1) + self.assertEqual(self.ebic_det._opc_client.controller_state, ephemeron.STATE_NAME_IDLE) + + def receive_ebic_data(self, df, d): + self.acquired_data = d + + def test_disconnection(self): + """ + Check the driver can automatically reconnect to the server after a disconnection. + """ + # Can only be automatically tested via the simulator + if not TEST_NOHW: + self.skipTest("Cannot automatically check disconnection with real hardware") + + # Check it works fine before. Changing the dwell time requires (multiple) remote calls, so + # it's a good check on the connection + self.ebic_det.dwellTime.value = 10e-6 + self.assertLessEqual(self.ebic_det.dwellTime.value, 10e-6) + + # Simulate disconnection + self.ebic_det._opc_server_sim.terminate() + time.sleep(1) + logging.debug("Restarting simulator") + self.ebic_det._opc_server_sim = MightyEBICSimulator(self.ebic_det._url, self.ebic_det) + time.sleep(1) + + self.ebic_det.dwellTime.value = 10e-6 + self.assertLessEqual(self.ebic_det.dwellTime.value, 10e-6) + +if __name__ == "__main__": + unittest.main() diff --git a/src/odemis/gui/comp/miccanvas.py b/src/odemis/gui/comp/miccanvas.py index 904750ebea..8ac4765e02 100644 --- a/src/odemis/gui/comp/miccanvas.py +++ b/src/odemis/gui/comp/miccanvas.py @@ -907,6 +907,8 @@ def on_motion(self, evt): if CAN_FOCUS in self.abilities and self.right_dragging: if evt.ShiftDown(): softener = 0.1 # softer + elif evt.ControlDown(): + softener = 10 # faster else: softener = 1 diff --git a/src/odemis/gui/comp/stream_bar.py b/src/odemis/gui/comp/stream_bar.py index 35b39aefd9..89526801b2 100644 --- a/src/odemis/gui/comp/stream_bar.py +++ b/src/odemis/gui/comp/stream_bar.py @@ -68,7 +68,6 @@ class StreamBar(wx.Panel): acq.stream.MonochromatorSettingsStream, acq.stream.CameraCountStream, acq.stream.ScannedTCSettingsStream, - acq.stream.IndependentEBICStream, ) def __init__(self, *args, **kwargs): diff --git a/src/odemis/gui/conf/data.py b/src/odemis/gui/conf/data.py index 2f2f7a15a6..fe181f8abd 100644 --- a/src/odemis/gui/conf/data.py +++ b/src/odemis/gui/conf/data.py @@ -167,7 +167,11 @@ "event": wx.EVT_SCROLL_CHANGED # only affects when it's a slider }), ("probeCurrent", { - "event": wx.EVT_SCROLL_CHANGED # only affects when it's a slider + "label": "Beam Current", + "control_type": odemis.gui.CONTROL_SLIDER, + "type": "float", + "scale": "linear", + "event": wx.EVT_SCROLL_CHANGED }), ("spotSize", { "tooltip": "Electron-beam Spot size", @@ -235,6 +239,46 @@ "control_type": odemis.gui.CONTROL_NONE, }), )), + "ion-beam": + OrderedDict(( + ("accelVoltage", { + "label": "Accel. Voltage", + "tooltip": "Accelerating voltage", + "event": wx.EVT_SCROLL_CHANGED # only affects when it's a slider + }), + ("probeCurrent", { + "label": "Beam Current", + "control_type": odemis.gui.CONTROL_SLIDER, + "type": "float", + "scale": "linear", + "event": wx.EVT_SCROLL_CHANGED + }), + ("resolution", { + "label": "Resolution", + "control_type": odemis.gui.CONTROL_COMBO, + "tooltip": "Number of pixels in the image", + "choices": None, + "accuracy": None, # never simplify the numbers + }), + ("dwellTime", { + "control_type": odemis.gui.CONTROL_SLIDER, + "tooltip": "Pixel integration time", + "type": "float", + "accuracy": 3, + "event": wx.EVT_SCROLL_CHANGED + }), + ("horizontalFoV", { + "label": "HFW", + "tooltip": "Horizontal Field Width", + "control_type": odemis.gui.CONTROL_COMBO, + "choices": util.hfw_choices, + }), + ("scale", { + # same as binning (but accepts floats) + "control_type": odemis.gui.CONTROL_NONE, + }), + + )), "ebeam-blanker": OrderedDict(( ("period", { @@ -524,22 +568,12 @@ }, }, "ebic-detector": { - "numberOfChannels": { - "label": "Number of channels", - "control_type": odemis.gui.CONTROL_COMBO, - }, - "spp": { - "label": "Samples per pixel", - "control_type": odemis.gui.CONTROL_RADIO, - "tooltip": "Number of samples per pixel", - }, - # For the "independent" detector type (otherwise, these VA's don't exists, so that has no effect) + # For the "independent" detector type (otherwise, these VA's don't exist, so that has no effect) "resolution": { - # It should not be changed directtly, but via the emitter + # It should not be changed directly, but via the emitter "control_type": odemis.gui.CONTROL_NONE, }, "dwellTime": { - # It should not be changed directly, but via the emitter "control_type": odemis.gui.CONTROL_NONE, }, }, @@ -589,10 +623,47 @@ # Keep the same `contrast` and `brigtness` slider order as in the TFS UI OrderedDict(( ("contrast", { + "label": "Contrast", "control_type": odemis.gui.CONTROL_SLIDER, + "tooltip": "Contrast of the electron detector", }), ("brightness", { + "label": "Brightness", "control_type": odemis.gui.CONTROL_SLIDER, + "tooltip": "Brightness of the electron detector", + }), + ("mode", { + "label": "Detector Mode", + "control_type": odemis.gui.CONTROL_COMBO, + "tooltip": "Mode of the electron detector", + }), + ("type", { + "label": "Detector Type", + "control_type": odemis.gui.CONTROL_COMBO, + "tooltip": "Type of the electron detector", + }), + )), + "se-detector-ion": + OrderedDict(( + ("brightness", { + "label": "Brightness", + "control_type": odemis.gui.CONTROL_SLIDER, + "tooltip": "Brightness of the ion detector", + }), + ("contrast", { + "label": "Contrast", + "control_type": odemis.gui.CONTROL_SLIDER, + "tooltip": "Contrast of the ion detector", + }), + ("mode", { + "label": "Detector Mode", + "control_type": odemis.gui.CONTROL_COMBO, + "tooltip": "Mode of the ion detector", + }), + ("type", { + "label": "Detector Type", + "control_type": odemis.gui.CONTROL_COMBO, + "tooltip": "Type of the ion detector", }), )), } @@ -772,121 +843,18 @@ }, }, "meteor" : { - "e-beam": - OrderedDict(( - ("accelVoltage", { - "label": "Accel. Voltage", - "tooltip": "Accelerating voltage", - "event": wx.EVT_SCROLL_CHANGED # only affects when it's a slider - }), - ("probeCurrent", { - "label": "Beam Current", - "control_type": odemis.gui.CONTROL_SLIDER, - "type": "float", - "scale": "linear", - "event": wx.EVT_SCROLL_CHANGED - }), - ("horizontalFoV", { - "label": "HFW", - "tooltip": "Horizontal Field Width", - "control_type": odemis.gui.CONTROL_COMBO, - "choices": util.hfw_choices, - }), - ("dwellTime", { - "control_type": odemis.gui.CONTROL_SLIDER, - "tooltip": "Pixel integration time", - "type": "float", - "accuracy": 3, - "event": wx.EVT_SCROLL_CHANGED - }), - ("scale", { - # same as binning (but accepts floats) + "e-beam": { + "scale": { "control_type": odemis.gui.CONTROL_NONE, - }), - ("resolution", { - "label": "Resolution", - "control_type": odemis.gui.CONTROL_COMBO, - "tooltip": "Number of pixels in the image", - "choices": None, - "accuracy": None, # never simplify the numbers - }), - )), - "ion-beam": - OrderedDict(( - ("accelVoltage", { - "label": "Accel. Voltage", - "tooltip": "Accelerating voltage", - "event": wx.EVT_SCROLL_CHANGED # only affects when it's a slider - }), - ("probeCurrent", { - "label": "Beam Current", - "control_type": odemis.gui.CONTROL_SLIDER, - "type": "float", - "scale": "linear", - "event": wx.EVT_SCROLL_CHANGED - }), - ("resolution", { + }, + "resolution": { "label": "Resolution", "control_type": odemis.gui.CONTROL_COMBO, "tooltip": "Number of pixels in the image", "choices": None, "accuracy": None, # never simplify the numbers - }), - ("dwellTime", { - "control_type": odemis.gui.CONTROL_SLIDER, - "tooltip": "Pixel integration time", - # "range": (1e-9, 1), - # "scale": "log", - "type": "float", - "accuracy": 3, - "event": wx.EVT_SCROLL_CHANGED - }), - ("horizontalFoV", { - "label": "HFW", - "tooltip": "Horizontal Field Width", - "control_type": odemis.gui.CONTROL_COMBO, - "choices": util.hfw_choices, - # "accuracy": 3, - }), - ("scale", { - # same as binning (but accepts floats) - "control_type": odemis.gui.CONTROL_NONE, - # "tooltip": "Pixel resolution preset", - # means will make sure both dimensions are treated as one - # "choices": util.binning_1d_from_2d, - }), - - )), - "se-detector": - OrderedDict(( - ("brightness", { - "label": "Brightness", - }), - ("contrast", { - "label": "Contrast", - }), - ("detector_mode", { - "label": "Detector Mode", - }), - ("detector_type", { - "label": "Detector Type", - }), - )), - "se-detector-ion": - OrderedDict(( - ("brightness", { - "label": "Brightness", - }), - ("contrast", { - "label": "Contrast", - }), - ("detector_mode", { - "label": "Detector Mode", - }), - ("detector_type", { - "label": "Detector Type", - }), - )), + } + }, } } diff --git a/src/odemis/gui/cont/stream_bar.py b/src/odemis/gui/cont/stream_bar.py index a504542329..abc923ce5a 100644 --- a/src/odemis/gui/cont/stream_bar.py +++ b/src/odemis/gui/cont/stream_bar.py @@ -1667,7 +1667,7 @@ def addAR(self): def addEBIC(self, **kwargs): main_data = self._main_data_model - if model.hasVA(main_data.ebic, "resolution"): + if model.hasVA(main_data.ebic, "resolution") and model.hasVA(main_data.ebic, "dwellTime"): ebic_stream = acqstream.IndependentEBICStream( "EBIC", main_data.ebic, @@ -2241,7 +2241,6 @@ def clear_feature_streams(self): # Remove the panels, and indirectly it will clear the view v = self._feature_view for sc in self.stream_controllers.copy(): - logging.warning(f"attempting to remove stream: {sc.stream}") if not isinstance(sc.stream, StaticSEMStream): logging.warning("Unexpected non static stream: %s", sc.stream) continue @@ -2263,7 +2262,7 @@ def clear(self, clear_model=True): """ Remove all the streams, from the GUI (view, stream panels) Must be called in the main GUI thread. - :param clear_model: unused, but required because of external api + :param clear_model: unused, but required because of StreamBarController api """ # clear the graphical part self._stream_bar.clear() diff --git a/src/odemis/gui/model/_constants.py b/src/odemis/gui/model/_constants.py index e762f30eaf..29288afba9 100644 --- a/src/odemis/gui/model/_constants.py +++ b/src/odemis/gui/model/_constants.py @@ -20,6 +20,9 @@ Odemis. If not, see http://www.gnu.org/licenses/. """ + +from enum import Enum + # The different states of a microscope STATE_OFF = 0 STATE_ON = 1 @@ -96,3 +99,7 @@ CALIBRATION_1 = "Calibration 1" CALIBRATION_2 = "Calibration 2" CALIBRATION_3 = "Calibration 3" + +class AcquisitionMode(Enum): + FLM = 1 + FIBSEM = 2 diff --git a/src/odemis/gui/model/stream_view.py b/src/odemis/gui/model/stream_view.py index 23d4c34edd..41f19c57c5 100644 --- a/src/odemis/gui/model/stream_view.py +++ b/src/odemis/gui/model/stream_view.py @@ -25,7 +25,8 @@ import queue import threading import time -from typing import Tuple, Dict +from concurrent.futures import Future +from typing import Dict, Optional, Tuple from odemis import model from odemis.acq.stream import DataProjection, RGBSpatialProjection, Stream, StreamTree @@ -432,7 +433,7 @@ def moveStageToView(self): shift = (view_pos[0] - prev_pos["x"], view_pos[1] - prev_pos["y"]) return self.moveStageBy(shift) - def moveStageTo(self, pos: Tuple[float, float]): + def moveStageTo(self, pos: Tuple[float, float]) -> Optional[Future]: """ Request an absolute move of the stage to a given position @@ -452,7 +453,7 @@ def moveStageTo(self, pos: Tuple[float, float]): f.add_done_callback(self._on_stage_move_done) return f - def clipToStageLimits(self, pos: Tuple[float, float]) -> Dict[str, float]: + def clipToStageLimits(self, pos: Dict[str, float]) -> Dict[str, float]: """ Clip current position in x/y direction to the maximum allowed stage limits. diff --git a/src/odemis/gui/model/tab_gui_data.py b/src/odemis/gui/model/tab_gui_data.py index 52c3244227..1b53e2be1d 100644 --- a/src/odemis/gui/model/tab_gui_data.py +++ b/src/odemis/gui/model/tab_gui_data.py @@ -442,8 +442,6 @@ def __init__(self, main): self.patterns = model.ListVA() self.view_posture = model.VigilantAttribute(SEM_IMAGING) - self.is_sem_active_view: bool = False - self.is_fib_active_view: bool = False def _on_project_path_change(self, _): config = conf.get_acqui_conf() diff --git a/src/odemis/gui/win/acquisition.py b/src/odemis/gui/win/acquisition.py index 884fd883e5..0a8e128cda 100644 --- a/src/odemis/gui/win/acquisition.py +++ b/src/odemis/gui/win/acquisition.py @@ -1420,11 +1420,12 @@ def LoadProjectFileDialog( projectname: str, message: str = "Choose a project directory to load", ) -> Optional[str]: + """ :param parent (wx.Frame): parent window :param projectname (string): project name to propose by default :param message (string): message to display in the dialog - :return (string or none): the project directory name to load (or the none if the user cancelled) + :return (string or None): the project directory name to load (or None if the user cancelled) """ # current project name dialog = wx.DirDialog( @@ -1433,30 +1434,11 @@ def LoadProjectFileDialog( defaultPath=projectname, style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST, ) - - # Show the dialog and check whether is was accepted or cancelled - if dialog.ShowModal() != wx.ID_OK: - return None - - # project path have been selected... - return dialog.GetPath() - -def SelectFileDialog( - parent: wx.Frame, - message: str, - default_path: str, -) -> Optional[str]: - """ - :param parent (wx.Frame): parent window - :param message (string): message to display in the dialog - :param default_path (string): default path to open the dialog - :return (string or none): the selected file name (or the none if the user cancelled) - """ - dialog = wx.FileDialog( + dialog = wx.DirDialog( parent, message=message, - defaultDir=default_path, - style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST, + defaultPath=projectname, + style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST, ) # Show the dialog and check whether is was accepted or cancelled diff --git a/src/odemis/model/_metadata.py b/src/odemis/model/_metadata.py index ae82b69b49..8b02e53a64 100644 --- a/src/odemis/model/_metadata.py +++ b/src/odemis/model/_metadata.py @@ -231,6 +231,9 @@ MD_FAV_SEM_POS_ACTIVE = "Favourite SEM position active" # dict -> float representing the position required for SEM imaging MD_FAV_FIB_POS_ACTIVE = "Favourite FIB position active" # dict -> float representing the position required for FIB imaging MD_FAV_MILL_POS_ACTIVE = "Favourite Milling position active" # dict -> float representing the position required for milling +# NOTE: The milling angle is the angle measured in clockwise direction between the sample plane and FIB axes. +# Assuming sample plane axis is positive from left to right and FIB imaging axis is positive from up to down. +# This rx is converted to the stage tilt when the user moves to the milling position. # The following metadata is used to store the destination components of the # specific known positions for the actuators.