diff --git a/nidn/fdtd_integration/compute_fdtd_grid_scaling.py b/nidn/fdtd_integration/compute_fdtd_grid_scaling.py new file mode 100644 index 0000000..10a2a92 --- /dev/null +++ b/nidn/fdtd_integration/compute_fdtd_grid_scaling.py @@ -0,0 +1,22 @@ +import torch +from .constants import FDTD_GRID_SCALE +from ..utils.global_constants import UNIT_MAGNITUDE + + +def _compute_fdtd_grid_scaling(cfg): + """Compute the scaling of the FDTD grid + + Args: + cfg (DotMap): Run config + + Returns: + torch.float: The scaling is the number of grid points per unit magnitude. This is the maximum of the relation between the unit magnitude and 1/10th of the smallest wavelength, + and a constant which is defaulted to 10. If this scaling becomes too low, i.e. below 2, there might be some errors in creating the grid, + as there are too few grid points for certain elements to be placed correctly. + """ + return torch.maximum( + torch.tensor( + UNIT_MAGNITUDE / (cfg.physical_wavelength_range[0] * FDTD_GRID_SCALE) + ), + torch.tensor(cfg.FDTD_min_gridpoints_per_unit_magnitude), + ) diff --git a/nidn/fdtd_integration/compute_spectrum_fdtd.py b/nidn/fdtd_integration/compute_spectrum_fdtd.py index ae3b7b5..772ed26 100644 --- a/nidn/fdtd_integration/compute_spectrum_fdtd.py +++ b/nidn/fdtd_integration/compute_spectrum_fdtd.py @@ -4,6 +4,7 @@ import torch from nidn.fdtd_integration.constants import FDTD_GRID_SCALE +from nidn.fdtd_integration.compute_fdtd_grid_scaling import _compute_fdtd_grid_scaling from nidn.utils.global_constants import UNIT_MAGNITUDE from ..trcwa.get_frequency_points import get_frequency_points @@ -31,9 +32,17 @@ def compute_spectrum_fdtd(permittivity, cfg: DotMap): logger.debug(physical_wavelengths) logger.debug("Number of layers: " + str(len(permittivity[0, 0, :, 0]))) + if (len(cfg.PER_LAYER_THICKNESS) == 1) and (cfg.N_layers > 1): + cfg.PER_LAYER_THICKNESS = cfg.PER_LAYER_THICKNESS * cfg.N_layers + + cfg.FDTD_grid_scaling = _compute_fdtd_grid_scaling(cfg) # For each wavelength, calculate transmission and reflection coefficents disable_progress_bar = logger._core.min_level >= 20 for i in tqdm(range(len(physical_wavelengths)), disable=disable_progress_bar): + + _check_if_enough_timesteps( + cfg, physical_wavelengths[i], permittivity[:, :, :, i] + ) logger.debug("Simulating for wavelenght: {}".format(physical_wavelengths[i])) transmission_signal = [] reflection_signal = [] @@ -153,3 +162,57 @@ def _average_along_detector(signal): s[2] += p[2] / len(signal[i]) avg[i] = s return avg + + +def _summed_thickness_times_sqrt_permittivity(thicknesses, permittivity): + """ + Helper function to calculate the sum of the product of the thickness and + the square root of the permittivity for each layer in a material stack. + + Args: + thicknesses (tensor): tensor with the thickness of each layer of the material stack + permittivity (tensor): tensor with the relative permittivity for each layer of the material stack + + Returns: + float: sum of thickness times sqrt(e_r) for each layer + """ + summed_permittivity = 0 + for i in range(len(thicknesses)): + summed_permittivity += thicknesses[i] * torch.sqrt(permittivity.real.max()) + return summed_permittivity + + +def _check_if_enough_timesteps(cfg: DotMap, wavelength, permittivity): + """ + Function to find the recommended minimum number of timesteps. + The signal should have passed trough the material, been reflected to the start of the material and reflected again to the + rear detector before the signal is assumed to be steady state. The max permittivity is used for all layers, in case of patterned layers in the future. + + Args: + cfg (DotMap): config + wavelength (float): wavelength of the simulation + permittivity (tensor): tensor of relative permittivities for each layer + """ + wavelengths_of_steady_state_signal = 5 + number_of_internal_reflections = 3 + recommended_timesteps = int( + ( + ( + cfg.FDTD_free_space_distance + + number_of_internal_reflections + * _summed_thickness_times_sqrt_permittivity( + cfg.PER_LAYER_THICKNESS, permittivity + ) + + wavelengths_of_steady_state_signal * wavelength / UNIT_MAGNITUDE + ) + * torch.sqrt(torch.tensor(2.0)) + * cfg.FDTD_grid_scaling + ).item() + ) + logger.debug("Minimum recomended timesteps: {}".format(recommended_timesteps)) + if cfg.FDTD_niter < recommended_timesteps: + logger.warning( + "The number of timesteps should be increased to minimum {} to ensure that the result from the simulation remains physically accurate.".format( + recommended_timesteps + ) + ) diff --git a/nidn/fdtd_integration/init_fdtd.py b/nidn/fdtd_integration/init_fdtd.py index ccb8064..ef4ab8f 100644 --- a/nidn/fdtd_integration/init_fdtd.py +++ b/nidn/fdtd_integration/init_fdtd.py @@ -13,7 +13,6 @@ PointSource, ) from ..utils.global_constants import EPS_0, PI, SPEED_OF_LIGHT, UNIT_MAGNITUDE -from .constants import FDTD_GRID_SCALE def init_fdtd(cfg: DotMap, include_object, wavelength, permittivity): @@ -29,50 +28,40 @@ def init_fdtd(cfg: DotMap, include_object, wavelength, permittivity): fdtd:Grid: Grid with all the added object, ready to be run """ set_backend("torch") - if (len(cfg.PER_LAYER_THICKNESS) == 1) and (cfg.N_layers > 1): - cfg.PER_LAYER_THICKNESS = cfg.PER_LAYER_THICKNESS * cfg.N_layers - thicknesses = torch.tensor(cfg.PER_LAYER_THICKNESS) - # The scaling is the number of grid points per unit magnitude. This is the maximum of the relation between the unit magnitude and 1/10th of the smallest wavelength, - # and a constant which is defaulted to 10. If this scaling becomes too low, i.e. below 2, there might be some errors in creating the grid, - # as there are too few grid points for certain elements to be placed correctly. - scaling = torch.maximum( - torch.tensor( - UNIT_MAGNITUDE / (cfg.physical_wavelength_range[0] * FDTD_GRID_SCALE) - ), - torch.tensor(cfg.FDTD_min_gridpoints_per_unit_magnitude), - ) x_grid_size = ( - scaling + cfg.FDTD_grid_scaling * ( cfg.FDTD_pml_thickness * 2 + cfg.FDTD_free_space_distance * 2 - + torch.sum(thicknesses) + + torch.sum(torch.tensor(cfg.PER_LAYER_THICKNESS)) ) ).int() y_grid_size = 3 logger.debug( "Initializing FDTD grid with size {} by {} grid points, with a scaling factor of {} grid points per um".format( - x_grid_size, y_grid_size, scaling + x_grid_size, y_grid_size, cfg.FDTD_grid_scaling ) ) grid = Grid( (x_grid_size.item(), y_grid_size, 1), - grid_spacing=UNIT_MAGNITUDE / scaling, + grid_spacing=UNIT_MAGNITUDE / cfg.FDTD_grid_scaling, permittivity=1.0, permeability=1.0, ) - grid = _add_boundaries(grid, int(cfg.FDTD_pml_thickness * scaling)) + grid = _add_boundaries(grid, int(cfg.FDTD_pml_thickness * cfg.FDTD_grid_scaling)) # Determine detector positions detector_x = ( - cfg.FDTD_pml_thickness + cfg.FDTD_free_space_distance + torch.sum(thicknesses) + cfg.FDTD_pml_thickness + + cfg.FDTD_free_space_distance + + torch.sum(torch.tensor(cfg.PER_LAYER_THICKNESS)) ) detector_y = torch.tensor(cfg.FDTD_pml_thickness + cfg.FDTD_free_space_distance) - detector_x *= scaling - detector_y *= scaling + detector_x *= cfg.FDTD_grid_scaling + detector_y *= cfg.FDTD_grid_scaling detector_x += cfg.FDTD_gridpoints_from_material_to_detector detector_y -= cfg.FDTD_gridpoints_from_material_to_detector @@ -89,8 +78,8 @@ def init_fdtd(cfg: DotMap, include_object, wavelength, permittivity): detector_y.int().item(), ) - source_x = (cfg.FDTD_source_position[0] * scaling).int() - source_y = (cfg.FDTD_source_position[1] * scaling).int() + source_x = (cfg.FDTD_source_position[0] * cfg.FDTD_grid_scaling).int() + source_y = (cfg.FDTD_source_position[1] * cfg.FDTD_grid_scaling).int() logger.trace( "Placing source at " @@ -110,20 +99,20 @@ def init_fdtd(cfg: DotMap, include_object, wavelength, permittivity): if include_object: x_start = torch.tensor(cfg.FDTD_pml_thickness + cfg.FDTD_free_space_distance) - x_end = x_start + thicknesses[0] + x_end = x_start + cfg.PER_LAYER_THICKNESS[0] for i in range(permittivity.shape[2]): # TODO: Implement possibility for patterned grid, currently uniform layer is used grid = _add_object( grid, - (scaling * x_start).int(), - (scaling * x_end).int(), + (cfg.FDTD_grid_scaling * x_start).int(), + (cfg.FDTD_grid_scaling * x_end).int(), permittivity[0][0][i], frequency=SPEED_OF_LIGHT / wavelength, ) if i < permittivity.shape[2] - 1: - x_start += thicknesses[i] - x_end += thicknesses[i + 1] + x_start += cfg.PER_LAYER_THICKNESS[i] + x_end += cfg.PER_LAYER_THICKNESS[i + 1] else: break diff --git a/nidn/tests/fdtd_test.py b/nidn/tests/fdtd_test.py index cfc1793..de3ece9 100644 --- a/nidn/tests/fdtd_test.py +++ b/nidn/tests/fdtd_test.py @@ -1,8 +1,8 @@ -from re import T -from numpy import NaN, require import torch -from nidn.utils.global_constants import EPS_0, PI, SPEED_OF_LIGHT +from nidn.utils.global_constants import EPS_0, PI, SPEED_OF_LIGHT, UNIT_MAGNITUDE +from nidn.fdtd_integration.constants import FDTD_GRID_SCALE +from nidn.fdtd_integration.compute_fdtd_grid_scaling import _compute_fdtd_grid_scaling from ..fdtd_integration.init_fdtd import init_fdtd from ..fdtd_integration.compute_spectrum_fdtd import _get_detector_values @@ -28,6 +28,9 @@ def test_fdtd_grid_creation(): cfg.N_freq, "linear", ) + + cfg.FDTD_grid_scaling = _compute_fdtd_grid_scaling(cfg) + layer_builder = LayerBuilder(cfg) eps_grid[:, :, 0, :] = layer_builder.build_uniform_layer("titanium_oxide") eps_grid[:, :, 1, :] = layer_builder.build_uniform_layer("germanium") @@ -186,6 +189,7 @@ def test_deviation_from_original_fdtd(): cfg.N_freq, cfg.freq_distribution, ) + cfg.FDTD_grid_scaling = _compute_fdtd_grid_scaling(cfg) eps_grid = torch.zeros(cfg.Nx, cfg.Ny, cfg.N_layers, cfg.N_freq, dtype=torch.cfloat) layer_builder = LayerBuilder(cfg) eps_grid[:, :, 0, :] = layer_builder.build_uniform_layer("titanium_oxide") diff --git a/nidn/tests/training_test.py b/nidn/tests/training_test.py index 01e52e1..55d58d6 100644 --- a/nidn/tests/training_test.py +++ b/nidn/tests/training_test.py @@ -1,3 +1,5 @@ +from nidn.fdtd_integration.compute_fdtd_grid_scaling import _compute_fdtd_grid_scaling + from ..training.run_training import run_training from ..utils.load_default_cfg import load_default_cfg @@ -52,6 +54,7 @@ def test_rcwa_training(): def test_fdtd_training(): cfg = _setup() cfg.solver = "FDTD" + cfg.FDTD_grid_scaling = _compute_fdtd_grid_scaling(cfg) run_training(cfg) # Should have at least learned something assert cfg.results.loss_log[-1] < 0.3 diff --git a/nidn/utils/validate_config.py b/nidn/utils/validate_config.py index 7b2225c..de54dc8 100644 --- a/nidn/utils/validate_config.py +++ b/nidn/utils/validate_config.py @@ -56,7 +56,7 @@ def _check_for_keys(cfg: DotMap): # Some keys that may be set in the cfg during runtime optional_keys = ["target_frequencies","FDTD_grid","model","out_features","results","best_model_state_dict","material_collection", - "N_materials"] + "N_materials", "thicknesses","FDTD_grid_scaling"] # fmt: on for key in required_keys: if key not in cfg: