diff --git a/ImageD11/nbGui/S3DXRD/0_segment_and_label.ipynb b/ImageD11/nbGui/S3DXRD/0_segment_and_label.ipynb index 2ab4308d..722d8e05 100755 --- a/ImageD11/nbGui/S3DXRD/0_segment_and_label.ipynb +++ b/ImageD11/nbGui/S3DXRD/0_segment_and_label.ipynb @@ -23,30 +23,24 @@ }, "outputs": [], "source": [ - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" ] }, { "cell_type": "code", "execution_count": null, - "id": "5726795e-91cf-40cf-b3a9-b114de84e017", + "id": "c3bddb80-39f9-4cd7-9cc1-59fc8d240c24", "metadata": { - "tags": [] + "tags": [ + "parameters" + ] }, "outputs": [], "source": [ - "# Import needed packages\n", - "%matplotlib ipympl\n", - "import pprint\n", - "import numpy as np\n", - "import ImageD11.sinograms.dataset\n", - "import ImageD11.sinograms.lima_segmenter\n", - "import ImageD11.sinograms.assemble_label\n", - "import ImageD11.sinograms.properties\n", - "import ImageD11.nbGui.nb_utils as utils\n", - "from ImageD11.nbGui import segmenter_gui\n", + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", "\n", + "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n", "\n", "# Experts : update these files for your detector if you need to\n", "maskfile = \"/data/id11/nanoscope/Eiger/eiger_mask_E-08-0144_20240205.edf\"\n", @@ -57,7 +51,36 @@ "dtymotor = 'dty'\n", "\n", "# Default segmentation options\n", - "options = { 'cut' : 1, 'pixels_in_spot' : 3, 'howmany' : 100000 }" + "options = { 'cut' : 1, 'pixels_in_spot' : 3, 'howmany' : 100000 }\n", + "\n", + "# EXPERTS: These can be provided as papermill parameters. Users, leave these as None for now...\n", + "dataroot = None\n", + "analysisroot = None\n", + "sample = None\n", + "dataset = None\n", + "\n", + "dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5726795e-91cf-40cf-b3a9-b114de84e017", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Import needed packages\n", + "%matplotlib ipympl\n", + "import pprint\n", + "import numpy as np\n", + "import ImageD11.sinograms.dataset\n", + "import ImageD11.sinograms.lima_segmenter\n", + "import ImageD11.sinograms.assemble_label\n", + "import ImageD11.sinograms.properties\n", + "import ImageD11.nbGui.nb_utils as utils\n", + "from ImageD11.nbGui import segmenter_gui" ] }, { @@ -70,7 +93,8 @@ "outputs": [], "source": [ "# Set up the file paths. Edit this if you are not at ESRF or not using the latest data policy.\n", - "dataroot, analysisroot = segmenter_gui.guess_ESRF_paths() \n", + "if dataroot is None:\n", + " dataroot, analysisroot = segmenter_gui.guess_ESRF_paths() \n", "\n", "if len(dataroot)==0:\n", " print(\"Please fix in the dataroot and analysisroot folder names above!!\")\n", @@ -102,7 +126,8 @@ "outputs": [], "source": [ "# USER: Decide which sample\n", - "sample = 'FeAu_0p5_tR_nscope'" + "if sample is None:\n", + " sample = 'FeAu_0p5_tR_nscope'" ] }, { @@ -128,7 +153,8 @@ "outputs": [], "source": [ "# USER: Decide which dataset\n", - "dataset = \"top_100um\"" + "if dataset is None:\n", + " dataset = \"top_100um\"" ] }, { @@ -258,19 +284,6 @@ "Therefore notebooks 4 and onwards should work from either the tomo or pbp route." ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "703d22d0-ef82-4e08-8087-c57e76e16de1", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "if 1:\n", - " raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -280,13 +293,16 @@ }, "outputs": [], "source": [ + "# you can optionally skip samples\n", + "# skips_dict = {\n", + "# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "# }\n", + "# otherwise by default skip nothing:\n", "skips_dict = {\n", - " \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + " ds.sample: []\n", "}\n", "\n", - "dset_prefix = \"top_\" # some common string in the names of the datasets (*?)\n", - "\n", - "sample_list = [\"FeAu_0p5_tR_nscope\"]\n", + "sample_list = [ds.sample, ]\n", " \n", "samples_dict = utils.find_datasets_to_process(dataroot, skips_dict, dset_prefix, sample_list)\n", "\n", diff --git a/ImageD11/nbGui/S3DXRD/4_visualise.ipynb b/ImageD11/nbGui/S3DXRD/4_visualise.ipynb index 6511fa53..914a5899 100755 --- a/ImageD11/nbGui/S3DXRD/4_visualise.ipynb +++ b/ImageD11/nbGui/S3DXRD/4_visualise.ipynb @@ -28,10 +28,43 @@ "\n", "os.environ['OMP_NUM_THREADS'] = '1'\n", "os.environ['OPENBLAS_NUM_THREADS'] = '1'\n", - "os.environ['MKL_NUM_THREADS'] = '1'\n", + "os.environ['MKL_NUM_THREADS'] = '1'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", + "\n", + "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n", "\n", - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" + "# dataset file to import\n", + "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", + "\n", + "# which phase to index\n", + "phase_str = 'Si'\n", + "\n", + "# the minimum number of peaks you want a pixel to have to be counted\n", + "min_unique = 400\n", + "\n", + "dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans" ] }, { @@ -69,8 +102,6 @@ "source": [ "# USER: Pass path to dataset file\n", "\n", - "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", - "\n", "ds = ImageD11.sinograms.dataset.load(dset_file)\n", " \n", "sample = ds.sample\n", @@ -105,7 +136,6 @@ "outputs": [], "source": [ "# now let's select a phase to index from our parameters json\n", - "phase_str = 'Fe'\n", "\n", "ref_ucell = ds.phases.unitcells[phase_str]\n", "\n", @@ -122,7 +152,8 @@ "source": [ "# import refinement manager\n", "\n", - "refine = PBPRefine.from_h5(ds.refmanfile)" + "refmanpath = os.path.splitext(ds.refmanfile)[0] + f'_{phase_str}.h5'\n", + "refine = PBPRefine.from_h5(refmanpath)" ] }, { @@ -148,8 +179,6 @@ "source": [ "# choose the minimum number of peaks you want a pixel to have to be counted\n", "\n", - "min_unique = 400\n", - "\n", "refine.refinedmap.choose_best(min_unique)\n", "\n", "# refine.refinedmap.choose_best(min_unique)" @@ -187,7 +216,7 @@ "\n", "for i in range(3):\n", " for j in range(3):\n", - " axs[i,j].imshow(refine.refinedmap.best_eps[:, :, i, j], origin=\"lower\", cmap=cmap, norm=normalizer)\n", + " axs[i,j].imshow(refine.refinedmap.best_eps[:, :, i, j], origin=\"lower\", cmap=cmap, norm=normalizer, interpolation='nearest')\n", " axs[i,j].set_title(f'eps_{i+1}{j+1}')\n", "fig.supxlabel('< Lab Y axis')\n", "fig.supylabel('Lab X axis')\n", @@ -274,7 +303,7 @@ "\n", "for i in range(3):\n", " for j in range(3):\n", - " axs[i,j].imshow(tmap.eps_sample[0, ..., i, j], origin=\"lower\", cmap=cmap, norm=normalizer)\n", + " axs[i,j].imshow(tmap.eps_sample[0, ..., i, j], origin=\"lower\", cmap=cmap, norm=normalizer, interpolation='nearest')\n", " axs[i,j].set_title(f'eps_{i+1}{j+1}')\n", "fig.supxlabel('Lab X axis --->')\n", "fig.supylabel('Lab Y axis --->')\n", @@ -330,25 +359,27 @@ "metadata": {}, "outputs": [], "source": [ - "# save the refined TensorMap to disk\n", - "\n", - "tmap.to_h5(os.path.join(ds.analysispath, 'pbp_tensormap_refined.h5'))\n", - "tmap.to_paraview(os.path.join(ds.analysispath, 'pbp_tensormap_refined.h5'))" + "# if we have a previous tomographic TensorMap, we can try to get the labels map too:\n", + "try:\n", + " tmap_tomo = TensorMap.from_h5(ds.grainsfile, h5group='TensorMap_' + phase_str)\n", + " tmap.add_map('labels', tmap_tomo.labels)\n", + "except (FileNotFoundError, OSError, KeyError):\n", + " # couldn't find one, continue anyway\n", + " pass" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ - "# you can also do an MTEX export if you like:\n", + "# save the refined TensorMap to disk\n", "\n", - "ctf_path = os.path.join(ds.analysispath, 'pbp_tensormap_refined.ctf')\n", + "refined_tmap_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_{phase_str}.h5')\n", "\n", - "tmap.to_ctf_mtex(ctf_path, z_index=0)" + "tmap.to_h5(refined_tmap_path)\n", + "tmap.to_paraview(refined_tmap_path)" ] }, { @@ -359,17 +390,22 @@ }, "outputs": [], "source": [ - "ds.save()" + "# you can also do an MTEX export if you like:\n", + "\n", + "refined_ctf_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_{phase_str}.ctf')\n", + "\n", + "tmap.to_ctf_mtex(refined_ctf_path, z_index=0)" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [], "source": [ - "if 1:\n", - " raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")" + "ds.save()" ] }, { @@ -382,15 +418,18 @@ "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", "# you can add samples and datasets to skip in skips_dict\n", "\n", + "# you can optionally skip samples\n", + "# skips_dict = {\n", + "# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "# }\n", + "# otherwise by default skip nothing:\n", "skips_dict = {\n", - " \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + " ds.sample: []\n", "}\n", "\n", - "dset_prefix = \"top\"\n", + "sample_list = [ds.sample, ]\n", "\n", - "sample_list = [\"FeAu_0p5_tR_nscope\"]\n", - " \n", - "samples_dict = utils.find_datasets_to_process(ds.dataroot, skips_dict, dset_prefix, sample_list)\n", + "samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n", " \n", "# manual override:\n", "# samples_dict = {\"FeAu_0p5_tR_nscope\": [\"top_100um\", \"top_150um\"]}\n", @@ -410,15 +449,19 @@ " ds = ImageD11.sinograms.dataset.load(dset_path)\n", " print(f\"I have a DataSet {ds.dset} in sample {ds.sample}\")\n", " \n", - " if not os.path.exists(ds.refoutfile):\n", + " refoutpath = os.path.splitext(ds.refoutfile)[0] + f'_{phase_str}.h5'\n", + " refmanpath = os.path.splitext(ds.refmanfile)[0] + f'_{phase_str}.h5'\n", + "\n", + " if not os.path.exists(refoutpath):\n", " print(f\"Couldn't find PBP refinement output file for {dataset} in sample {sample}, skipping\")\n", " continue\n", " \n", - " if os.path.exists(os.path.join(ds.analysispath, 'pbp_tensormap_refined.h5')):\n", + " refined_tmap_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_{phase_str}.h5')\n", + " if os.path.exists(refined_tmap_path):\n", " print(f\"Already have refined TensorMap output file for {dataset} in sample {sample}, skipping\")\n", " continue\n", " \n", - " refine = PBPRefine.from_h5(ds.refmanfile)\n", + " refine = PBPRefine.from_h5(refmanpath)\n", " refine.refinedmap.choose_best(min_unique)\n", " \n", " # first let's work out what phase we have\n", @@ -437,10 +480,18 @@ " tmap.get_ipf_maps()\n", " eul = tmap.euler\n", " \n", - " tmap.to_h5(os.path.join(ds.analysispath, 'pbp_tensormap_refined.h5'))\n", - " tmap.to_paraview(os.path.join(ds.analysispath, 'pbp_tensormap_refined.h5'))\n", - " ctf_path = os.path.join(ds.analysispath, 'pbp_tensormap_refined.ctf')\n", - " tmap.to_ctf_mtex(ctf_path, z_index=0)\n", + " # if we have a previous tomographic TensorMap, we can try to get the labels map too:\n", + " try:\n", + " tmap_tomo = TensorMap.from_h5(ds.grainsfile, h5group='TensorMap_' + phase_str)\n", + " tmap.add_map('labels', tmap_tomo.labels)\n", + " except (FileNotFoundError, OSError, KeyError):\n", + " # couldn't find one, continue anyway\n", + " pass\n", + " \n", + " tmap.to_h5(refined_tmap_path)\n", + " tmap.to_paraview(refined_tmap_path)\n", + " refined_ctf_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_{phase_str}.ctf')\n", + " tmap.to_ctf_mtex(refined_ctf_path, z_index=0)\n", "\n", " ds.save()\n", "\n", diff --git a/ImageD11/nbGui/S3DXRD/5_combine_phases.ipynb b/ImageD11/nbGui/S3DXRD/5_combine_phases.ipynb index eb32bb25..38b7188d 100755 --- a/ImageD11/nbGui/S3DXRD/5_combine_phases.ipynb +++ b/ImageD11/nbGui/S3DXRD/5_combine_phases.ipynb @@ -13,12 +13,39 @@ { "cell_type": "code", "execution_count": null, - "id": "94b89030-fdb2-47d2-bc26-3e5cfb0d6509", + "id": "636849a2-54fd-44ce-aca3-cb8e7e945e59", "metadata": {}, "outputs": [], "source": [ - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "94b89030-fdb2-47d2-bc26-3e5cfb0d6509", + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", + "\n", + "# python environment stuff\n", + "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n", + "\n", + "# dataset file to import\n", + "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", + "\n", + "phase_strs = ['Fe', 'Au']\n", + "\n", + "# whether or not we are combining refined tensormaps (changes where we look for them)\n", + "combine_refined = True\n", + "\n", + "dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans" ] }, { @@ -63,12 +90,11 @@ "source": [ "# USER: Pass path to dataset file\n", "\n", - "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", - "\n", "ds = ImageD11.sinograms.dataset.load(dset_file)\n", " \n", "sample = ds.sample\n", "dataset = ds.dsname\n", + "rawdata_path = ds.dataroot\n", "processed_data_root_dir = ds.analysisroot\n", "\n", "print(ds)\n", @@ -93,21 +119,24 @@ { "cell_type": "code", "execution_count": null, - "id": "6e51945b-3baa-4a2b-99bc-b97972a99081", - "metadata": { - "tags": [] - }, + "id": "58bababb-461c-41a1-898d-378418fdc4f4", + "metadata": {}, "outputs": [], "source": [ - "# now let's select a phase to index from our parameters json\n", - "major_phase_str = 'Fe'\n", - "minor_phase_str = 'Au'\n", - "\n", - "major_phase_unitcell = ds.phases.unitcells[major_phase_str]\n", - "minor_phase_unitcell = ds.phases.unitcells[minor_phase_str]\n", + "# what phases are we merging?\n", "\n", - "print(major_phase_str, major_phase_unitcell.lattice_parameters, major_phase_unitcell.spacegroup)\n", - "print(minor_phase_str, minor_phase_unitcell.lattice_parameters, minor_phase_unitcell.spacegroup)" + "print(*[ds.phases.unitcells[phase_str].lattice_parameters for phase_str in phase_strs], sep='\\n')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9e458287-439b-4dff-ac5b-2855a1f94f19", + "metadata": {}, + "outputs": [], + "source": [ + "# choose where to import your TensorMaps from\n", + "# if you refined them, you'll need to change the below paths to point to the separate refined tensormap h5 files." ] }, { @@ -119,21 +148,25 @@ }, "outputs": [], "source": [ - "tensor_map_major = TensorMap.from_h5(ds.grainsfile, h5group='TensorMap_' + major_phase_str)\n", - "tensor_map_minor = TensorMap.from_h5(ds.grainsfile, h5group='TensorMap_' + minor_phase_str)" + "if combine_refined:\n", + " tensor_maps = [TensorMap.from_h5(os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_{phase_str}.h5')) for phase_str in phase_strs]\n", + "else:\n", + " tensor_maps = [TensorMap.from_h5(ds.grainsfile, h5group='TensorMap_' + phase_str) for phase_str in phase_strs]" ] }, { "cell_type": "code", "execution_count": null, - "id": "3e547336-885e-490f-96ab-8ab6626c04e3", - "metadata": { - "tags": [] - }, + "id": "1cf5e081-feda-4dee-a260-36f4f286ec1a", + "metadata": {}, "outputs": [], "source": [ - "tensor_map_major.plot('labels')\n", - "tensor_map_minor.plot('labels')" + "try:\n", + " for tmap in tensor_maps:\n", + " tmap.plot('labels')\n", + "except KeyError:\n", + " # no labels field\n", + " pass" ] }, { @@ -145,7 +178,7 @@ }, "outputs": [], "source": [ - "tensor_map_combined = TensorMap.from_combine_phases([tensor_map_major, tensor_map_minor])" + "tensor_map_combined = TensorMap.from_combine_phases(tensor_maps)" ] }, { @@ -158,7 +191,11 @@ "outputs": [], "source": [ "tensor_map_combined.plot('phase_ids')\n", - "tensor_map_combined.plot('labels')\n", + "try:\n", + " tensor_map_combined.plot('labels')\n", + "except KeyError:\n", + " # no labels field\n", + " pass\n", "tensor_map_combined.plot('ipf_z')" ] }, @@ -171,8 +208,12 @@ }, "outputs": [], "source": [ - "tensor_map_combined.to_h5(os.path.join(ds.analysispath, 'combined_map.h5'))\n", - "tensor_map_combined.to_paraview(os.path.join(ds.analysispath, 'combined_map.h5'))" + "if combine_refined:\n", + " tensor_map_combined.to_h5(os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_combined.h5'))\n", + " tensor_map_combined.to_paraview(os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_combined.h5'))\n", + "else:\n", + " tensor_map_combined.to_h5(os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_tmap_combined.h5'))\n", + " tensor_map_combined.to_paraview(os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_tmap_combined.h5'))" ] }, { @@ -181,7 +222,64 @@ "id": "319bf9ac-16ca-4492-9bd4-e5eb1979fd86", "metadata": {}, "outputs": [], - "source": [] + "source": [ + "# We can run the below cell to do this in bulk for many samples/datasets\n", + "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", + "# you can add samples and datasets to skip in skips_dict\n", + "\n", + "# you can optionally skip samples\n", + "# skips_dict = {\n", + "# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "# }\n", + "# otherwise by default skip nothing:\n", + "skips_dict = {\n", + " ds.sample: []\n", + "}\n", + "\n", + "sample_list = [ds.sample, ]\n", + "\n", + "samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n", + " \n", + "# manual override:\n", + "# samples_dict = {\"FeAu_0p5_tR_nscope\": [\"top_100um\", \"top_150um\"]}\n", + " \n", + "# now we have our samples_dict, we can process our data:\n", + "\n", + "for sample, datasets in samples_dict.items():\n", + " for dataset in datasets:\n", + " print(f\"Processing dataset {dataset} in sample {sample}\")\n", + " dset_path = os.path.join(ds.analysisroot, sample, f\"{sample}_{dataset}\", f\"{sample}_{dataset}_dataset.h5\")\n", + " if not os.path.exists(dset_path):\n", + " print(f\"Missing DataSet file for {dataset} in sample {sample}, skipping\")\n", + " continue\n", + " \n", + " print(\"Importing DataSet object\")\n", + " \n", + " ds = ImageD11.sinograms.dataset.load(dset_path)\n", + " print(f\"I have a DataSet {ds.dset} in sample {ds.sample}\")\n", + " \n", + " if combine_refined:\n", + " combined_tmap_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_combined.h5')\n", + " else:\n", + " combined_tmap_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_tmap_combined.h5')\n", + " \n", + " if os.path.exists(combined_tmap_path):\n", + " print(f\"Already have combined TensorMap output file for {dataset} in sample {sample}, skipping\")\n", + " continue\n", + " \n", + " if combine_refined:\n", + " tensor_maps = [TensorMap.from_h5(os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_{phase_str}.h5')) for phase_str in phase_strs]\n", + " else:\n", + " tensor_maps = [TensorMap.from_h5(ds.grainsfile, h5group='TensorMap_' + phase_str) for phase_str in phase_strs]\n", + " tensor_map_combined = TensorMap.from_combine_phases(tensor_maps)\n", + " \n", + " tensor_map_combined.to_h5(combined_tmap_path)\n", + " tensor_map_combined.to_paraview(combined_tmap_path)\n", + " \n", + " ds.save()\n", + "\n", + "print(\"Done!\")" + ] } ], "metadata": { diff --git a/ImageD11/nbGui/S3DXRD/6_stack_layers.ipynb b/ImageD11/nbGui/S3DXRD/6_stack_layers.ipynb index d843781b..1b4ec193 100644 --- a/ImageD11/nbGui/S3DXRD/6_stack_layers.ipynb +++ b/ImageD11/nbGui/S3DXRD/6_stack_layers.ipynb @@ -21,8 +21,40 @@ }, "outputs": [], "source": [ - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e744a4a3-53f9-4fe5-a452-90a1b8dd8d6d", + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", + "\n", + "# python environment stuff\n", + "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n", + "\n", + "# dataset file to import\n", + "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", + "\n", + "dset_prefix = \"top_\"\n", + "\n", + "# are you looking to stack phase-combined layers?\n", + "stack_combined = True\n", + "# otherwise what is the phase you're interested in stacking?\n", + "phase_str = 'Fe'\n", + "# are the layers strain refined?\n", + "stack_refined = True\n", + "\n", + "# what's the z step in microns?\n", + "zstep = None" ] }, { @@ -68,10 +100,6 @@ "metadata": {}, "outputs": [], "source": [ - "# USER: Pass path to dataset file\n", - "\n", - "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", - "\n", "ds = ImageD11.sinograms.dataset.load(dset_file)\n", " \n", "sample = ds.sample\n", @@ -111,8 +139,6 @@ " ds.sample: []\n", "}\n", "\n", - "dset_prefix = \"top\"\n", - "\n", "sample_list = [ds.sample]\n", "\n", "for sample in sample_list:\n", @@ -139,11 +165,7 @@ "source": [ "# pick a specific sample to continue\n", "\n", - "sample = 'FeAu_0p5_tR_nscope'\n", - "\n", - "# pick the name of the H5 group to import the TensorMap from\n", - "\n", - "tmap_h5group = 'TensorMap_YSZ_refined'\n", + "sample = ds.sample\n", "\n", "from collections import OrderedDict\n", "\n", @@ -155,7 +177,17 @@ " ds = ImageD11.sinograms.dataset.load(dset_path)\n", " \n", " # read the tensormaps\n", - " ds.tensor_map = TensorMap.from_h5(ds.grainsfile, h5group=tmap_h5group)\n", + " # choose where to import your TensorMaps from\n", + " \n", + " if stack_combined and stack_refined:\n", + " ds.tensor_map = TensorMap.from_h5(os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refined_tmap_combined.h5'))\n", + " elif stack_combined:\n", + " ds.tensor_map = TensorMap.from_h5(os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_tmap_combined.h5'))\n", + " elif stack_refined:\n", + " ds.tensor_map = TensorMap.from_h5(os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_refiend_tmap_{phase_str}.h5'))\n", + " else:\n", + " ds.tensor_map = TensorMap.from_h5(os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_tmap_{phase_str}.h5'))\n", + "\n", " ds_dict[dataset] = ds" ] }, @@ -166,7 +198,7 @@ "metadata": {}, "outputs": [], "source": [ - "# in this case, first scan has the Z motor at its lowest value\n", + "# in this case, first scan has the Z motor at its lowest value\n", "# you should double-check this for your data!\n", "# this means we're scanning the highest region on the sample first\n", "# then moving down in the sample as we increase the Z number\n", @@ -176,11 +208,21 @@ "# in our case we assume it's the same as the y step\n", "# this may not be true for you!!!\n", "\n", - "zstep = ds.ystep\n", + "if zstep is None:\n", + " zstep = ds.ystep\n", "\n", "combined_tensormap = TensorMap.from_stack([ds.tensor_map for ds in list(ds_dict.values())][::-1], zstep=zstep)" ] }, + { + "cell_type": "markdown", + "id": "e7a2f626-258d-4c53-93f5-92e6411f9846", + "metadata": {}, + "source": [ + "# note: if a labels array makes it into the combined tensormap, it is not modified during the stacking procedure!\n", + "# labels are original to their own layer" + ] + }, { "cell_type": "code", "execution_count": null, @@ -207,8 +249,10 @@ "source": [ "# export to file\n", "\n", - "combined_tensormap.to_h5('combined.h5')\n", - "combined_tensormap.to_paraview('combined.h5')" + "output_path = os.path.join(ds.analysisroot, f'{ds.sample}_stacked.h5')\n", + "\n", + "combined_tensormap.to_h5(output_path)\n", + "combined_tensormap.to_paraview(output_path)" ] }, { diff --git a/ImageD11/nbGui/S3DXRD/import_test_data.ipynb b/ImageD11/nbGui/S3DXRD/import_test_data.ipynb index 53fc2ec5..b1eacf22 100644 --- a/ImageD11/nbGui/S3DXRD/import_test_data.ipynb +++ b/ImageD11/nbGui/S3DXRD/import_test_data.ipynb @@ -28,13 +28,27 @@ { "cell_type": "code", "execution_count": null, - "id": "6c9f1867-2a3c-4b01-83d4-431a7177ebcc", + "id": "6f943f39-15ab-4bda-b0d9-38ed46f06b5a", + "metadata": {}, + "outputs": [], + "source": [ + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7630a0b8-d900-47ca-bb87-6f54df591887", "metadata": { - "tags": [] + "tags": [ + "parameters" + ] }, "outputs": [], "source": [ - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", + "download_dir = '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/Si_cube'\n", "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" ] }, @@ -82,8 +96,6 @@ "source": [ "# USER: change this as you like!\n", "\n", - "download_dir = 'si_cube_test'\n", - "\n", "if not os.path.exists(download_dir):\n", " os.mkdir(download_dir)" ] @@ -158,7 +170,8 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.6" - } + }, + "toc-showcode": false }, "nbformat": 4, "nbformat_minor": 5 diff --git a/ImageD11/nbGui/S3DXRD/pbp_1_indexing.ipynb b/ImageD11/nbGui/S3DXRD/pbp_1_indexing.ipynb index ac38b4d4..2a72ac47 100755 --- a/ImageD11/nbGui/S3DXRD/pbp_1_indexing.ipynb +++ b/ImageD11/nbGui/S3DXRD/pbp_1_indexing.ipynb @@ -36,10 +36,60 @@ "\n", "os.environ['OMP_NUM_THREADS'] = '1'\n", "os.environ['OPENBLAS_NUM_THREADS'] = '1'\n", - "os.environ['MKL_NUM_THREADS'] = '1'\n", + "os.environ['MKL_NUM_THREADS'] = '1'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", + "\n", + "# python environment stuff\n", + "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n", + "\n", + "# dataset file to import\n", + "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", + "\n", + "# which phase to index\n", + "phase_str = 'Si'\n", + "\n", + "# filter the columnfile to discard weak peaks\n", + "minpkint = 5\n", "\n", - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" + "# point-by-point parameters\n", + "hkl_tol = 0.025\n", + "fpks = 0.9\n", + "ds_tol = 0.004\n", + "etacut = 0.1\n", + "ifrac = 5e-3\n", + "y0 = 0.0\n", + "symmetry = \"cubic\"\n", + "foridx = [0, 1, 3, 5, 7]\n", + "forgen = [1, 5, 7]\n", + "uniqcut = 0.85\n", + "use_cluster = False\n", + "\n", + "# EXPERTS: Can specify par_file as a parameter if you want\n", + "par_file = None\n", + "\n", + "dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans" ] }, { @@ -72,8 +122,6 @@ "source": [ "# USER: Pass path to dataset file\n", "\n", - "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", - "\n", "ds = ImageD11.sinograms.dataset.load(dset_file)\n", " \n", "sample = ds.sample\n", @@ -95,7 +143,8 @@ "source": [ "# USER: specify the path to the parameter file\n", "\n", - "par_file = os.path.join(processed_data_root_dir, 'pars.json')\n", + "if par_file is None:\n", + " par_file = os.path.join(processed_data_root_dir, 'pars.json')\n", "\n", "# add them to the dataset\n", "\n", @@ -127,7 +176,6 @@ "outputs": [], "source": [ "# now let's select a phase to index from our parameters json\n", - "phase_str = 'Si'\n", "\n", "ucell = ds.phases.unitcells[phase_str]\n", "\n", @@ -163,8 +211,6 @@ "source": [ "# filter the columnfile to discard weak peaks\n", "\n", - "minpkint = 5\n", - "\n", "cf_2d.filter(cf_2d.Number_of_pixels > minpkint)" ] }, @@ -176,22 +222,28 @@ }, "outputs": [], "source": [ + "cosine_tol=np.cos(np.radians(90 - ds.ostep))\n", + "\n", "pbp_object = ImageD11.sinograms.point_by_point.PBP(ds.parfile,\n", " ds,\n", - " hkl_tol=0.025,\n", - " fpks=0.9,\n", - " ds_tol=0.004,\n", - " etacut=0.1,\n", - " ifrac=5e-3,\n", - " cosine_tol=np.cos(np.radians(90 - ds.ostep)),\n", - " y0=0.0,\n", - " symmetry=\"cubic\",\n", - " foridx=[0, 1, 3, 5, 7],\n", - " forgen=[1, 5, 7],\n", - " uniqcut=0.85,\n", + " hkl_tol=hkl_tol,\n", + " fpks=fpks,\n", + " ds_tol=ds_tol,\n", + " etacut=etacut,\n", + " ifrac=ifrac,\n", + " cosine_tol=cosine_tol,\n", + " y0=y0,\n", + " symmetry=symmetry,\n", + " foridx=foridx,\n", + " forgen=forgen,\n", + " uniqcut=uniqcut,\n", " phase_name=phase_str)\n", "\n", - "pbp_object.setpeaks(cf_2d)" + "# make icolf filename phase-aware\n", + "icolf_filename = ds.icolfile.replace('.h5', f'_{phase_str}.h5')\n", + "grains_filename = ds.pbpfile.replace('.txt', f'_{phase_str}.txt')\n", + "\n", + "pbp_object.setpeaks(cf_2d, icolf_filename=icolf_filename)" ] }, { @@ -213,13 +265,11 @@ }, "outputs": [], "source": [ - "use_cluster = True\n", - "\n", "if use_cluster:\n", " bash_script_path = utils.prepare_pbp_bash(pbp_object, PYTHONPATH, minpkint)\n", " utils.slurm_submit_and_wait(bash_script_path, 15)\n", "else:\n", - " pbp_object.point_by_point(ds.pbpfile, loglevel=3)" + " pbp_object.point_by_point(grains_filename, loglevel=3)" ] }, { @@ -233,16 +283,6 @@ "ds.save()" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "if 1:\n", - " raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -255,23 +295,24 @@ "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", "# you can add samples and datasets to skip in skips_dict\n", "\n", + "# you can optionally skip samples\n", + "# skips_dict = {\n", + "# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "# }\n", + "# otherwise by default skip nothing:\n", "skips_dict = {\n", - " \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + " ds.sample: []\n", "}\n", "\n", - "dset_prefix = \"top\"\n", + "sample_list = [ds.sample, ]\n", + "\n", + "samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n", "\n", - "sample_list = [\"FeAu_0p5_tR_nscope\"]\n", - " \n", - "samples_dict = utils.find_datasets_to_process(ds.dataroot, skips_dict, dset_prefix, sample_list)\n", - " \n", "# manual override:\n", "# samples_dict = {\"FeAu_0p5_tR_nscope\": [\"top_100um\", \"top_150um\"]}\n", " \n", "# now we have our samples_dict, we can process our data:\n", "\n", - "first_pbp_object = pbp_object\n", - "\n", "sbats = []\n", "for sample, datasets in samples_dict.items():\n", " for dataset in datasets:\n", @@ -304,21 +345,23 @@ " \n", " pbp_object = ImageD11.sinograms.point_by_point.PBP(ds.parfile,\n", " ds,\n", - " hkl_tol=first_pbp_object.hkl_tol,\n", - " fpks=first_pbp_object.fpks,\n", - " ds_tol=first_pbp_object.ds_tol,\n", - " etacut=first_pbp_object.etacut,\n", - " ifrac=first_pbp_object.ifrac,\n", - " cosine_tol=first_pbp_object.cosine_tol,\n", - " y0=first_pbp_object.y0,\n", - " symmetry=first_pbp_object.symmetry,\n", - " foridx=first_pbp_object.foridx,\n", - " forgen=first_pbp_object.forgen,\n", - " uniqcut=first_pbp_object.uniqcut,\n", - " phase_name=first_pbp_object.phase_name)\n", + " hkl_tol=hkl_tol,\n", + " fpks=fpks,\n", + " ds_tol=ds_tol,\n", + " etacut=etacut,\n", + " ifrac=ifrac,\n", + " cosine_tol=cosine_tol,\n", + " y0=y0,\n", + " symmetry=symmetry,\n", + " foridx=foridx,\n", + " forgen=forgen,\n", + " uniqcut=uniqcut,\n", + " phase_name=phase_str)\n", + " \n", + " icolf_filename = ds.icolfile.replace('.h5', f'_{phase_str}.h5')\n", + " grains_filename = ds.pbpfile.replace('.txt', f'_{phase_str}.txt')\n", + " pbp_object.setpeaks(cf_2d, icolf_filename=icolf_filename)\n", " \n", - " pbp_object.setpeaks(cf_2d)\n", - "\n", " if use_cluster:\n", " # get the sbat and submit them all at once\n", " bash_script_path = utils.prepare_pbp_bash(pbp_object, PYTHONPATH, minpkint)\n", @@ -326,7 +369,7 @@ " # utils.slurm_submit_and_wait(bash_script_path, 15)\n", " else:\n", " # do it locally\n", - " pbp_object.point_by_point(ds.pbpfile, loglevel=3)\n", + " pbp_object.point_by_point(grains_filename, loglevel=3)\n", " \n", " ds.save()\n", " if use_cluster:\n", diff --git a/ImageD11/nbGui/S3DXRD/pbp_2_visualise.ipynb b/ImageD11/nbGui/S3DXRD/pbp_2_visualise.ipynb index cb18f519..86aabebd 100755 --- a/ImageD11/nbGui/S3DXRD/pbp_2_visualise.ipynb +++ b/ImageD11/nbGui/S3DXRD/pbp_2_visualise.ipynb @@ -26,8 +26,35 @@ }, "outputs": [], "source": [ - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", + "\n", + "# python environment stuff\n", + "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n", + "\n", + "# dataset file to import\n", + "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", + "\n", + "# which phase to index\n", + "phase_str = 'Si'\n", + "\n", + "# the minimum number of peaks you want a pixel to have to be counted\n", + "min_unique = 20\n", + "\n", + "dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans" ] }, { @@ -65,8 +92,6 @@ "source": [ "# USER: Pass path to dataset file\n", "\n", - "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", - "\n", "ds = ImageD11.sinograms.dataset.load(dset_file)\n", " \n", "sample = ds.sample\n", @@ -101,7 +126,6 @@ "outputs": [], "source": [ "# now let's select a phase to index from our parameters json\n", - "phase_str = 'Fe'\n", "\n", "ref_ucell = ds.phases.unitcells[phase_str]\n", "\n", @@ -118,7 +142,7 @@ "source": [ "# load the point-by-point map\n", "\n", - "pmap = PBPMap(ds.pbpfile)" + "pmap = PBPMap(ds.pbpfile.replace('.txt', f'_{phase_str}.txt'))" ] }, { @@ -144,8 +168,6 @@ "source": [ "# choose the minimum number of peaks you want a pixel to have to be counted\n", "\n", - "min_unique = 20\n", - "\n", "pmap.choose_best(min_unique)" ] }, @@ -270,8 +292,12 @@ "source": [ "# save the TensorMap to disk\n", "\n", - "tmap.to_h5(os.path.join(ds.analysispath, 'pbp_tensormap.h5'))\n", - "tmap.to_paraview(os.path.join(ds.analysispath, 'pbp_tensormap.h5'))" + "tmap_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_tmap_{phase_str}.h5')\n", + "\n", + "tmap.to_h5(tmap_path)\n", + "tmap.to_paraview(tmap_path)\n", + "\n", + "print(tmap_path)" ] }, { @@ -282,7 +308,7 @@ "source": [ "# you can also do an MTEX export if you like:\n", "\n", - "ctf_path = os.path.join(ds.analysispath, 'pbp_tensormap.ctf')\n", + "ctf_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_tmap_{phase_str}.ctf')\n", "\n", "tmap.to_ctf_mtex(ctf_path, z_index=0)" ] @@ -292,7 +318,66 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "# We can run the below cell to do this in bulk for many samples/datasets\n", + "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", + "# you can add samples and datasets to skip in skips_dict\n", + "\n", + "# you can optionally skip samples\n", + "# skips_dict = {\n", + "# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "# }\n", + "# otherwise by default skip nothing:\n", + "skips_dict = {\n", + " ds.sample: []\n", + "}\n", + "\n", + "sample_list = [ds.sample, ]\n", + "\n", + "samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n", + " \n", + "# manual override:\n", + "# samples_dict = {\"FeAu_0p5_tR_nscope\": [\"top_100um\", \"top_150um\"]}\n", + " \n", + "# now we have our samples_dict, we can process our data:\n", + "\n", + "for sample, datasets in samples_dict.items():\n", + " for dataset in datasets:\n", + " print(f\"Processing dataset {dataset} in sample {sample}\")\n", + " dset_path = os.path.join(ds.analysisroot, sample, f\"{sample}_{dataset}\", f\"{sample}_{dataset}_dataset.h5\")\n", + " if not os.path.exists(dset_path):\n", + " print(f\"Missing DataSet file for {dataset} in sample {sample}, skipping\")\n", + " continue\n", + "\n", + " print(\"Importing DataSet object\")\n", + " ds = ImageD11.sinograms.dataset.load(dset_path)\n", + " \n", + " tmap_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_tmap_{phase_str}.h5')\n", + " if os.path.exists(tmap_path):\n", + " print(f\"Already have PBP TensorMap output file for {dataset} in sample {sample}, skipping\")\n", + " continue\n", + " \n", + " pbppath = ds.pbpfile.replace('.txt', f'_{phase_str}.txt')\n", + " if not os.path.exists(pbppath):\n", + " print(f\"Can't find pbpfile for {dataset} in sample {sample}, skipping\")\n", + " continue\n", + " \n", + " ds.phases = ds.get_phases_from_disk()\n", + " ref_ucell = ds.phases.unitcells[phase_str]\n", + " pmap = PBPMap(pbppath)\n", + " pmap.choose_best(min_unique)\n", + " phases = {0: ref_ucell}\n", + " phase_ids = TensorMap.recon_order_to_map_order(np.where(pmap.best_nuniq > min_unique, 0, -1))\n", + " tmap = TensorMap.from_pbpmap(pmap, steps=(1, ds.ystep, ds.ystep), phases=phases)\n", + " tmap['phase_ids'] = phase_ids\n", + " tmap.get_ipf_maps()\n", + " tmap.to_h5(tmap_path)\n", + " tmap.to_paraview(tmap_path)\n", + " ctf_path = os.path.join(ds.analysispath, f'{ds.sample}_{ds.dset}_tmap_{phase_str}.ctf')\n", + " tmap.to_ctf_mtex(ctf_path, z_index=0)\n", + "\n", + "print(\"Done!\")" + ] } ], "metadata": { diff --git a/ImageD11/nbGui/S3DXRD/pbp_3_refinement.ipynb b/ImageD11/nbGui/S3DXRD/pbp_3_refinement.ipynb index a2e6a7b1..49e28e05 100755 --- a/ImageD11/nbGui/S3DXRD/pbp_3_refinement.ipynb +++ b/ImageD11/nbGui/S3DXRD/pbp_3_refinement.ipynb @@ -36,10 +36,58 @@ "\n", "os.environ['OMP_NUM_THREADS'] = '1'\n", "os.environ['OPENBLAS_NUM_THREADS'] = '1'\n", - "os.environ['MKL_NUM_THREADS'] = '1'\n", + "os.environ['MKL_NUM_THREADS'] = '1'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", "\n", - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" + "# python environment stuff\n", + "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n", + "\n", + "# dataset file to import\n", + "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", + "\n", + "# which phase to refine\n", + "phase_str = 'Si'\n", + "\n", + "# the minimum number of peaks you want a pixel to have to be counted\n", + "min_unique = 20\n", + "\n", + "# threshold for whole-sample mask binarisation\n", + "manual_threshold = None\n", + "\n", + "# refinement parameters\n", + "y0 = 0.0\n", + "hkl_tol_origins = 0.05\n", + "hkl_tol_refine = 0.1\n", + "hkl_tol_refine_merged = 0.05\n", + "ds_tol = 0.006\n", + "ifrac = 6e-3\n", + "rings_to_refine = None # can be a list of rings\n", + "set_mask_from_input = False # do we mask just from the min_unique value?\n", + "use_cluster = False\n", + "\n", + "dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans" ] }, { @@ -77,8 +125,6 @@ "source": [ "# USER: Pass path to dataset file\n", "\n", - "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", - "\n", "ds = ImageD11.sinograms.dataset.load(dset_file)\n", " \n", "sample = ds.sample\n", @@ -113,7 +159,6 @@ "outputs": [], "source": [ "# now let's select a phase to index from our parameters json\n", - "phase_str = 'Fe'\n", "\n", "ref_ucell = ds.phases.unitcells[phase_str]\n", "\n", @@ -130,7 +175,7 @@ "source": [ "# let's load our point-by-point map from disk\n", "\n", - "pmap = PBPMap(ds.pbpfile)" + "pmap = PBPMap(ds.pbpfile.replace('.txt', f'_{phase_str}.txt'))" ] }, { @@ -156,8 +201,6 @@ "source": [ "# choose the minimum number of peaks you want a pixel to have to be counted\n", "\n", - "min_unique = 20\n", - "\n", "pmap.choose_best(min_unique)" ] }, @@ -200,14 +243,22 @@ "source": [ "# set up a refinement manager object\n", "\n", - "y0 = 0.0\n", - "hkl_tol_origins = 0.05\n", - "hkl_tol_refine = 0.1\n", - "hkl_tol_refine_merged = 0.05\n", - "ds_tol = 0.006\n", - "ifrac = 6e-3\n", + "refine = PBPRefine(dset=ds, y0=y0, hkl_tol_origins=hkl_tol_origins, hkl_tol_refine=hkl_tol_refine, hkl_tol_refine_merged=hkl_tol_refine_merged, ds_tol=ds_tol, ifrac=ifrac, phase_name=phase_str, forref=rings_to_refine)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# change the default paths of the refinement manager to append the phase name\n", + "# so we don't conflict\n", "\n", - "refine = PBPRefine(dset=ds, y0=y0, hkl_tol_origins=hkl_tol_origins, hkl_tol_refine=hkl_tol_refine, hkl_tol_refine_merged=hkl_tol_refine_merged, ds_tol=ds_tol, ifrac=ifrac, phase_name=phase_str)" + "refine.own_filename = os.path.splitext(refine.own_filename)[0] + f'_{phase_str}.h5'\n", + "refine.icolf_filename = os.path.splitext(refine.icolf_filename)[0] + f'_{phase_str}.h5'\n", + "refine.pbpmap_filename = os.path.splitext(refine.pbpmap_filename)[0] + f'_{phase_str}.h5'\n", + "refine.refinedmap_filename = os.path.splitext(refine.refinedmap_filename)[0] + f'_{phase_str}.h5'" ] }, { @@ -263,11 +314,9 @@ }, "outputs": [], "source": [ - "# set whole-sample mask to choose where to refine\n", - "\n", - "manual_threshold = None\n", + "# generate a single-valued map to refine on\n", "\n", - "refine.setmask(manual_threshold=manual_threshold, doplot=True)" + "refine.setsingle(refine.pbpmap, minpeaks=min_unique)" ] }, { @@ -278,9 +327,10 @@ }, "outputs": [], "source": [ - "# generate a single-valued map to refine on\n", + "# set whole-sample mask to choose where to refine\n", + "# if use_singlemap is true, we will generate a mask simply based on where self.singlemap > min_unique\n", "\n", - "refine.setsingle(refine.pbpmap, minpeaks=min_unique)" + "refine.setmask(manual_threshold=manual_threshold, doplot=True, use_singlemap=set_mask_from_input)" ] }, { @@ -309,8 +359,6 @@ "# if compute_origins took more than a couple of minutes to run, I suggest setting use_cluster=True below\n", "# otherwise if you asked for lots of cores and RAM on this Jupyter instance, you can run it locally (use_cluster=False)\n", "\n", - "use_cluster = False\n", - "\n", "refine.run_refine(use_cluster=use_cluster, pythonpath=PYTHONPATH)" ] }, @@ -329,16 +377,6 @@ "ds.save()" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "if 1:\n", - " raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -349,15 +387,18 @@ "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", "# you can add samples and datasets to skip in skips_dict\n", "\n", + "# you can optionally skip samples\n", + "# skips_dict = {\n", + "# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "# }\n", + "# otherwise by default skip nothing:\n", "skips_dict = {\n", - " \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + " ds.sample: []\n", "}\n", "\n", - "dset_prefix = \"top\"\n", + "sample_list = [ds.sample, ]\n", "\n", - "sample_list = [\"FeAu_0p5_tR_nscope\"]\n", - " \n", - "samples_dict = utils.find_datasets_to_process(ds.dataroot, skips_dict, dset_prefix, sample_list)\n", + "samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n", " \n", "# manual override:\n", "# samples_dict = {\"FeAu_0p5_tR_nscope\": [\"top_100um\", \"top_150um\"]}\n", @@ -376,11 +417,12 @@ " \n", " ds = ImageD11.sinograms.dataset.load(dset_path)\n", " print(f\"I have a DataSet {ds.dset} in sample {ds.sample}\")\n", - " if os.path.exists(ds.refoutfile):\n", + " if os.path.exists(os.path.splitext(ds.refoutfile)[0] + f'_{phase_str}.h5'):\n", " print(f\"Already have PBP refinement output file for {dataset} in sample {sample}, skipping\")\n", " continue\n", " \n", - " if not os.path.exists(ds.pbpfile):\n", + " pbpfile = ds.pbpfile.replace('.txt', f'_{phase_str}.txt')\n", + " if not os.path.exists(pbpfile):\n", " print(f\"Can't find PBP indexing file for {dataset} in sample {sample}, skipping\")\n", " continue\n", " \n", @@ -389,16 +431,24 @@ "\n", " if not os.path.exists(ds.col2dfile):\n", " ImageD11.columnfile.colfile_to_hdf(cf_2d, ds.col2dfile)\n", - " \n", - " pmap = PBPMap(ds.pbpfile)\n", + " \n", + " pmap = PBPMap(pbpfile)\n", " pmap.choose_best(min_unique)\n", " \n", - " refine = PBPRefine(dset=ds, y0=y0, hkl_tol_origins=hkl_tol_origins, hkl_tol_refine=hkl_tol_refine, hkl_tol_refine_merged=hkl_tol_refine_merged, ds_tol=ds_tol, ifrac=ifrac, phase_name=phase_str)\n", + " refine = PBPRefine(dset=ds, y0=y0, hkl_tol_origins=hkl_tol_origins, hkl_tol_refine=hkl_tol_refine, hkl_tol_refine_merged=hkl_tol_refine_merged, ds_tol=ds_tol, ifrac=ifrac, phase_name=phase_str, forref=rings_to_refine)\n", + " \n", + " # change the default paths of the refinement manager to append the phase name\n", + " # so we don't conflict\n", + "\n", + " refine.own_filename = os.path.splitext(refine.own_filename)[0] + f'_{phase_str}.h5'\n", + " refine.icolf_filename = os.path.splitext(refine.icolf_filename)[0] + f'_{phase_str}.h5'\n", + " refine.pbpmap_filename = os.path.splitext(refine.pbpmap_filename)[0] + f'_{phase_str}.h5'\n", + " refine.refinedmap_filename = os.path.splitext(refine.refinedmap_filename)[0] + f'_{phase_str}.h5'\n", " \n", " refine.setmap(pmap)\n", " refine.setpeaks(cf_2d)\n", - " refine.setmask(manual_threshold=manual_threshold, doplot=False)\n", " refine.setsingle(refine.pbpmap, minpeaks=min_unique)\n", + " refine.setmask(manual_threshold=manual_threshold, doplot=False, use_singlemap=set_mask_from_input)\n", " refine.get_origins()\n", " refine.run_refine(use_cluster=use_cluster, pythonpath=PYTHONPATH)\n", " if not use_cluster:\n", diff --git a/ImageD11/nbGui/S3DXRD/run_pbp_recon.py b/ImageD11/nbGui/S3DXRD/run_pbp_recon.py index 61cd6342..0c84d1a0 100755 --- a/ImageD11/nbGui/S3DXRD/run_pbp_recon.py +++ b/ImageD11/nbGui/S3DXRD/run_pbp_recon.py @@ -27,6 +27,9 @@ print('Loading dset') ds = ImageD11.sinograms.dataset.load(dsfile) + icolf_filename = ds.icolfile.replace('.h5', "_" + phase_name + ".h5") + grains_filename = ds.pbpfile.replace(".txt", "_" + phase_name + ".txt") + print('Loading peaks') ImageD11.cImageD11.cimaged11_omp_set_num_threads(ImageD11.cImageD11.cores_available()) cf_2d = ds.get_cf_2d() @@ -49,7 +52,7 @@ uniqcut=uniqcut, phase_name=phase_name) - pbp_object.setpeaks(cf_2d) + pbp_object.setpeaks(cf_2d, icolf_filename=icolf_filename) print('Go for pbp') - pbp_object.point_by_point(ds.pbpfile, loglevel=3) + pbp_object.point_by_point(grains_filename, loglevel=3) diff --git a/ImageD11/nbGui/S3DXRD/tomo_1_index.ipynb b/ImageD11/nbGui/S3DXRD/tomo_1_index.ipynb index 4cbc0dd1..0db2e72a 100755 --- a/ImageD11/nbGui/S3DXRD/tomo_1_index.ipynb +++ b/ImageD11/nbGui/S3DXRD/tomo_1_index.ipynb @@ -27,8 +27,58 @@ }, "outputs": [], "source": [ - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", + "\n", + "# python environment stuff\n", + "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n", + "\n", + "# dataset file to import\n", + "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", + "\n", + "# which phase to index\n", + "phase_str = 'Si'\n", + "\n", + "# peak filtration parameters\n", + "cf_strong_frac = 0.9939\n", + "cf_strong_dsmax = 1.594\n", + "cf_strong_dstol = 0.005\n", + "\n", + "# indexing parameters\n", + "indexer_ds_tol = 0.01\n", + "# we have to choose which rings we want to generate orientations on\n", + "# generally we want two or three low-multiplicity rings that are isolated from other phases\n", + "# take a look at the ring assignment output from a few cells above, and choose two or three\n", + "rings_for_gen = [0, 1, 3]\n", + "# now we want to decide which rings to score our found orientations against\n", + "# generally we can just exclude dodgy rings (close to other phases, only a few peaks in etc)\n", + "rings_for_scoring = [0, 1, 2, 3, 4]\n", + "# the sequence of hkl tolerances the indexer will iterate through\n", + "hkl_tols_seq = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.075]\n", + "# the sequence of minpks fractions the indexer will iterate through\n", + "fracs = [0.9, 0.7]\n", + "# the max number of UBIs we can find per pair of rings\n", + "max_grains = 1000\n", + "\n", + "peak_assign_tol = 0.05\n", + "\n", + "# EXPERTS: Can specify par_file as a parameter if you want\n", + "par_file = None\n", + "\n", + "dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans" ] }, { @@ -67,8 +117,6 @@ "source": [ "# USER: Pass path to dataset file\n", "\n", - "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", - "\n", "ds = ImageD11.sinograms.dataset.load(dset_file)\n", " \n", "sample = ds.sample\n", @@ -89,9 +137,9 @@ "outputs": [], "source": [ "# USER: specify the path to the parameter file\n", - "# you can find an example json in the same folder as this notebook\n", "\n", - "par_file = os.path.join(processed_data_root_dir, 'pars.json')\n", + "if par_file is None:\n", + " par_file = os.path.join(processed_data_root_dir, 'pars.json')\n", "\n", "# add them to the dataset\n", "\n", @@ -123,7 +171,6 @@ "outputs": [], "source": [ "# now let's select a phase to index from our parameters json\n", - "phase_str = 'Si'\n", "\n", "ucell = ds.phases.unitcells[phase_str]\n", "\n", @@ -221,14 +268,10 @@ "# here we are filtering our peaks (cf_4d) to select only the strongest ones for indexing purposes only!\n", "# dsmax is being set to limit rings given to the indexer - 6-8 rings is normally good\n", "\n", - "# USER: modify the \"frac\" parameter below and re-run the cell until the orange dot sits nicely on the \"elbow\" of the blue line\n", + "# USER: modify the \"frac\" parameter and re-run the cell until the orange dot sits nicely on the \"elbow\" of the blue line\n", "# this indicates the fractional intensity cutoff we will select\n", "# if the blue line does not look elbow-shaped in the logscale plot, try changing the \"doplot\" parameter (the y scale of the logscale plot) until it does\n", "\n", - "cf_strong_frac = 0.9939\n", - "cf_strong_dsmax = 1.594\n", - "cf_strong_dstol = 0.005\n", - "\n", "cf_strong = select_ring_peaks_by_intensity(cf_4d, frac=cf_strong_frac, dsmax=cf_strong_dsmax, dstol=cf_strong_dstol, doplot=0.95)\n", "print(cf_4d.nrows)\n", "print(cf_strong.nrows)" @@ -298,7 +341,6 @@ "source": [ "# USER: set a tolerance in d-space (for assigning peaks to powder rings)\n", "\n", - "indexer_ds_tol = 0.01\n", "indexer.ds_tol = indexer_ds_tol\n", "\n", "# change the log level so we can see what the ring assigments look like\n", @@ -346,23 +388,8 @@ "outputs": [], "source": [ "# now we are indexing!\n", - "# we have to choose which rings we want to generate orientations on\n", - "# generally we want two or three low-multiplicity rings that are isolated from other phases\n", - "# take a look at the ring assignment output from a few cells above, and choose two or three\n", - "rings_for_gen = [0, 1, 3]\n", - "\n", - "# now we want to decide which rings to score our found orientations against\n", - "# generally we can just exclude dodgy rings (close to other phases, only a few peaks in etc)\n", - "rings_for_scoring = [0, 1, 2, 3, 4]\n", - "\n", - "# the sequence of hkl tolerances the indexer will iterate through\n", - "hkl_tols_seq = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.075]\n", - "# the sequence of minpks fractions the indexer will iterate through\n", - "fracs = [0.9, 0.7]\n", - "# the tolerance in g-vector angle\n", + "# the tolerance in g-vector angle - automatically computed from dataset\n", "cosine_tol = np.cos(np.radians(90 - ds.ostep))\n", - "# the max number of UBIs we can find per pair of rings\n", - "max_grains = 1000\n", "\n", "grains, indexer = utils.do_index(cf=cf_strong,\n", " unitcell=ds.phases.unitcells[phase_str],\n", @@ -423,8 +450,6 @@ "source": [ "# assign peaks to grains\n", "\n", - "peak_assign_tol = 0.05\n", - "\n", "utils.assign_peaks_to_grains(grains, cf_strong, tol=peak_assign_tol)" ] }, @@ -476,16 +501,6 @@ "ds.save()" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "if 1:\n", - " raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -498,14 +513,17 @@ "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", "# you can add samples and datasets to skip in skips_dict\n", "\n", + "# you can optionally skip samples\n", + "# skips_dict = {\n", + "# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "# }\n", + "# otherwise by default skip nothing:\n", "skips_dict = {\n", - " \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + " ds.sample: []\n", "}\n", "\n", - "dset_prefix = \"top\"\n", + "sample_list = [ds.sample, ]\n", "\n", - "sample_list = [\"FeAu_0p5_tR_nscope\"]\n", - " \n", "samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n", " \n", "# manual override:\n", diff --git a/ImageD11/nbGui/S3DXRD/tomo_1_index_minor_phase.ipynb b/ImageD11/nbGui/S3DXRD/tomo_1_index_minor_phase.ipynb index 8a3946b3..20e43e46 100755 --- a/ImageD11/nbGui/S3DXRD/tomo_1_index_minor_phase.ipynb +++ b/ImageD11/nbGui/S3DXRD/tomo_1_index_minor_phase.ipynb @@ -28,8 +28,62 @@ }, "outputs": [], "source": [ - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", + "\n", + "# python environment stuff\n", + "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n", + "\n", + "# dataset file to import\n", + "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", + "\n", + "# phase names\n", + "major_phase_str = 'Fe'\n", + "minor_phase_str = 'Au'\n", + "\n", + "# peak filtering\n", + "major_phase_cf_frac = 0.99418\n", + "major_phase_cf_dsmax = 1.594\n", + "major_phase_cf_dstol = 0.0035\n", + "\n", + "minor_phase_cf_frac = 0.9975\n", + "minor_phase_cf_dsmax = 1.594\n", + "minor_phase_cf_dstol = 0.0045\n", + "\n", + "# indexing\n", + "indexer_ds_tol = 0.0045\n", + "\n", + "rings_for_gen = [0, 4, 5]\n", + "\n", + "# now we want to decide which rings to score our found orientations against\n", + "# generally we can just exclude dodgy rings (close to other phases, only a few peaks in etc)\n", + "rings_for_scoring = [0, 2, 3, 4, 5, 6, 7, 8, 10, 12, 13]\n", + "\n", + "# the sequence of hkl tolerances the indexer will iterate through\n", + "hkl_tols_seq = [0.01, 0.02, 0.03, 0.04, 0.05]\n", + "# the sequence of minpks fractions the indexer will iterate through\n", + "fracs = [0.9]\n", + "# the tolerance in g-vector angle\n", + "\n", + "# the max number of UBIs we can find per pair of rings\n", + "max_grains = 1000\n", + "\n", + "peak_assign_tol = 0.05\n", + "\n", + "dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans" ] }, { @@ -67,8 +121,6 @@ "source": [ "# USER: Pass path to dataset file\n", "\n", - "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", - "\n", "ds = ImageD11.sinograms.dataset.load(dset_file)\n", " \n", "sample = ds.sample\n", @@ -103,8 +155,6 @@ "outputs": [], "source": [ "# now let's select a phase to index from our parameters json\n", - "major_phase_str = 'Fe'\n", - "minor_phase_str = 'Au'\n", "\n", "major_phase_unitcell = ds.phases.unitcells[major_phase_str]\n", "minor_phase_unitcell = ds.phases.unitcells[minor_phase_str]\n", @@ -154,10 +204,6 @@ "# this indicates the fractional intensity cutoff we will select\n", "# if the blue line does not look elbow-shaped in the logscale plot, try changing the \"doplot\" parameter (the y scale of the logscale plot) until it does\n", "\n", - "major_phase_cf_frac = 0.99418\n", - "major_phase_cf_dsmax = 1.594\n", - "major_phase_cf_dstol = 0.0035\n", - "\n", "cf_major_phase = select_ring_peaks_by_intensity(cf_4d, frac=major_phase_cf_frac, dsmax=major_phase_cf_dsmax, dstol=major_phase_cf_dstol, doplot=0.95)\n", "print(cf_4d.nrows)\n", "print(cf_major_phase.nrows)" @@ -184,10 +230,6 @@ }, "outputs": [], "source": [ - "minor_phase_cf_frac = 0.9975\n", - "minor_phase_cf_dsmax = 1.594\n", - "minor_phase_cf_dstol = 0.0045\n", - "\n", "cf_minor_phase = select_ring_peaks_by_intensity(cf_4d, frac=minor_phase_cf_frac, dsmax=minor_phase_cf_dsmax, dstol=minor_phase_cf_dstol, doplot=0.95)\n", "print(cf_4d.nrows)\n", "print(cf_minor_phase.nrows)" @@ -267,7 +309,6 @@ "source": [ "# USER: set a tolerance in d-space (for assigning peaks to powder rings)\n", "\n", - "indexer_ds_tol = 0.0045\n", "indexer.ds_tol = indexer_ds_tol\n", "\n", "# change the log level so we can see what the ring assigments look like\n", @@ -318,21 +359,8 @@ "# now we are indexing!\n", "# we have to choose which rings we want to generate orientations on\n", "# generally we want two or three low-multiplicity rings that are isolated from other phases\n", - "# take a look at the ring assignment output from a few cells above, and choose two or three\n", - "rings_for_gen = [0, 4, 5]\n", - "\n", - "# now we want to decide which rings to score our found orientations against\n", - "# generally we can just exclude dodgy rings (close to other phases, only a few peaks in etc)\n", - "rings_for_scoring = [0, 2, 3, 4, 5, 6, 7, 8, 10, 12, 13]\n", - "\n", - "# the sequence of hkl tolerances the indexer will iterate through\n", - "hkl_tols_seq = [0.01, 0.02, 0.03, 0.04, 0.05]\n", - "# the sequence of minpks fractions the indexer will iterate through\n", - "fracs = [0.9]\n", - "# the tolerance in g-vector angle\n", - "cosine_tol = np.cos(np.radians(90 - 0.25))\n", - "# the max number of UBIs we can find per pair of rings\n", - "max_grains = 1000\n", + "# take a look at the ring assignment output from a few cells above, and choose two or three in 'rings_for_gen'\n", + "cosine_tol = np.cos(np.radians(90 - ds.ostep))\n", "\n", "grains, indexer = utils.do_index(cf=cf_strong,\n", " unitcell=minor_phase_unitcell,\n", @@ -391,8 +419,6 @@ "source": [ "# assign peaks to grains\n", "\n", - "peak_assign_tol = 0.05\n", - "\n", "utils.assign_peaks_to_grains(grains, cf_strong, tol=peak_assign_tol)" ] }, @@ -438,16 +464,6 @@ "ds.save()" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "if 1:\n", - " raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -458,13 +474,16 @@ "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", "# you can add samples and datasets to skip in skips_dict\n", "\n", + "# you can optionally skip samples\n", + "# skips_dict = {\n", + "# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "# }\n", + "# otherwise by default skip nothing:\n", "skips_dict = {\n", - " \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + " ds.sample: []\n", "}\n", "\n", - "dset_prefix = \"top\"\n", - "\n", - "sample_list = [\"FeAu_0p5_tR_nscope\"]\n", + "sample_list = [ds.sample, ]\n", " \n", "samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n", " \n", diff --git a/ImageD11/nbGui/S3DXRD/tomo_2_map.ipynb b/ImageD11/nbGui/S3DXRD/tomo_2_map.ipynb index 3e5acc61..e61e53b6 100644 --- a/ImageD11/nbGui/S3DXRD/tomo_2_map.ipynb +++ b/ImageD11/nbGui/S3DXRD/tomo_2_map.ipynb @@ -21,19 +21,78 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "import os\n", "\n", "os.environ['OMP_NUM_THREADS'] = '1'\n", "os.environ['OPENBLAS_NUM_THREADS'] = '1'\n", - "os.environ['MKL_NUM_THREADS'] = '1'\n", + "os.environ['MKL_NUM_THREADS'] = '1'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", + "\n", + "# python environment stuff\n", + "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n", + "\n", + "# dataset file to import\n", + "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", + "\n", + "# which phase to index\n", + "phase_str = 'Si'\n", + "\n", + "# peak filtration parameters\n", + "cf_strong_frac = 0.993\n", + "cf_strong_dstol = 0.005\n", + "\n", + "# If the sinograms are only half-sinograms (we scanned dty across half the sample rather than the full sample), set the below to true:\n", + "is_half_scan = False\n", + "# If we did halfmask, choose the radius to mask in the centre of the reconstruction (normally hot pixels)\n", + "halfmask_radius = 25\n", + "\n", + "# assign peaks to the grains with hkl tolerance peak_assign_tol\n", + "peak_assign_tol = 0.25\n", + "\n", + "# we can override the whole-sample-mask reconstruction segmentation threshold if we don't like it:\n", + "# e.g. manual_threshold = 0.006\n", + "manual_threshold = None\n", + "\n", + "# tolerance for building sinograms from assigned peaks\n", + "hkltol = 0.25\n", + "\n", + "# We can optionally correct each row of the sinogram by the ring current of that rotation\n", + "# This helps remove artifacts in the reconstruction\n", + "correct_sinos_with_ring_current = True\n", + "\n", + "first_tmap_cutoff_level = 0.4\n", + "\n", + "# how many iterations for Astra reconstruction?\n", + "niter = 500\n", + "\n", + "second_tmap_cutoff_level = 0.05\n", "\n", - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" + "dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans" ] }, { @@ -81,8 +140,6 @@ "source": [ "# USER: Pass path to dataset file\n", "\n", - "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", - "\n", "ds = ImageD11.sinograms.dataset.load(dset_file)\n", " \n", "sample = ds.sample\n", @@ -117,23 +174,10 @@ "outputs": [], "source": [ "# pick a phase\n", - "phase_str = 'Fe'\n", "\n", "ucell = ds.phases.unitcells[phase_str]" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# If the sinograms are only half-sinograms (we scanned dty across half the sample rather than the full sample), set the below to true:\n", - "is_half_scan = False" - ] - }, { "cell_type": "code", "execution_count": null, @@ -163,13 +207,10 @@ "# this time as opposed to indexing, our frac is slightly weaker but we are NOT filtering in dstar!!!!!\n", "# this means many more peaks per grain = stronger sinograms\n", "\n", - "# USER: modify the \"frac\" parameter below and re-run the cell until the orange dot sits nicely on the \"elbow\" of the blue line\n", + "# USER: modify the \"cf_strong_frac\" parameter and re-run the cell until the orange dot sits nicely on the \"elbow\" of the blue line\n", "# this indicates the fractional intensity cutoff we will select\n", "# if the blue line does not look elbow-shaped in the logscale plot, try changing the \"doplot\" parameter (the y scale of the logscale plot) until it does\n", "\n", - "cf_strong_frac = 0.993\n", - "cf_strong_dstol = 0.005\n", - "\n", "cf_strong = select_ring_peaks_by_intensity(cf_4d, frac=cf_strong_frac, dstol=cf_strong_dstol, dsmax=cf_4d.ds.max(), doplot=0.9)\n", "print(cf_4d.nrows)\n", "cf_strong.nrows" @@ -222,9 +263,8 @@ }, "outputs": [], "source": [ - "# assign peaks to the grains\n", + "# assign peaks to the grains with hkl tolerance peak_assign_tol\n", "\n", - "peak_assign_tol = 0.25\n", "utils.assign_peaks_to_grains(grains, cf_strong, peak_assign_tol)\n", "\n", "for grain_label, g in enumerate(grains):\n", @@ -355,6 +395,15 @@ " gs.update_recon_parameters(y0=y0, shift=shift, pad=pad)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our next task is to determine a reconstruction mask for the entire sample.\n", + "\n", + "This should adequately differentiate between sample and air." + ] + }, { "cell_type": "code", "execution_count": null, @@ -363,14 +412,10 @@ }, "outputs": [], "source": [ - "# now let's do a whole-sample tomographic reconstruction\n", - "# generate sinogram for whole sample\n", - "\n", - "whole_sample_sino, xedges, yedges = np.histogram2d(cf_4d.dty, cf_4d.omega, bins=[ds.ybinedges, ds.obinedges])\n", + "whole_sample_sino = ds.sinohist(omega=ds.pk2d['omega'], dty=ds.pk2d['dty'], weights=np.power(ds.pk2d['sum_intensity'], 0.1)).T\n", "\n", "fig, ax = plt.subplots()\n", - "ax.imshow(whole_sample_sino, interpolation=\"nearest\", vmin=0, aspect='auto', norm='log')\n", - "#ax.set_aspect(4)\n", + "ax.imshow(whole_sample_sino, aspect='auto', vmin=0)\n", "plt.show()" ] }, @@ -382,7 +427,7 @@ }, "outputs": [], "source": [ - "# \"quick\" whole-sample reconstruction\n", + "# now perform the tomographic reconstruction:\n", "\n", "nthreads = len(os.sched_getaffinity(os.getpid()))\n", "\n", @@ -413,10 +458,6 @@ "# we should be able to easily segment this using scikit-image\n", "recon_man_mask = apply_manual_mask(whole_sample_recon)\n", "\n", - "# we can also override the threshold if we don't like it:\n", - "# manual_threshold = 0.006\n", - "manual_threshold = None\n", - "\n", "if manual_threshold is None:\n", " thresh = threshold_otsu(recon_man_mask)\n", "else:\n", @@ -456,8 +497,6 @@ "# before we do this, we need to determine our 2D peaks that will be used for the sinogram\n", "# here we can get them from the 4D peaks:\n", "\n", - "hkltol = 0.25\n", - "\n", "gord, inds = get_2d_peaks_from_4d_peaks(ds.pk2d, cf_strong)\n", "\n", "for grain_label, gs in enumerate(tqdm(grainsinos)):\n", @@ -524,7 +563,6 @@ "# We can optionally correct each row of the sinogram by the ring current of that rotation\n", "# This helps remove artifacts in the reconstruction\n", "\n", - "correct_sinos_with_ring_current = True\n", "if correct_sinos_with_ring_current:\n", " ds.get_ring_current_per_scan()\n", " \n", @@ -573,7 +611,7 @@ "gs.recon()\n", "\n", "if is_half_scan:\n", - " halfmask_radius = 25\n", + "\n", " gs.mask_central_zingers(\"iradon\", radius=halfmask_radius)\n", "\n", "# view the result\n", @@ -669,7 +707,7 @@ "source": [ "# let's assemble all the recons together into a TensorMap\n", "\n", - "tensor_map = TensorMap.from_grainsinos(grainsinos, cutoff_level=0.4)" + "tensor_map = TensorMap.from_grainsinos(grainsinos, cutoff_level=first_tmap_cutoff_level)" ] }, { @@ -712,8 +750,6 @@ "# choose the number of iterations\n", "# experience shows 500 is good, and pretty quick on the GPU\n", "\n", - "niter = 500\n", - "\n", "for gs in grainsinos:\n", " gs.update_recon_parameters(pad=pad, shift=shift, mask=whole_sample_mask, niter=niter, y0=y0)" ] @@ -785,21 +821,26 @@ "source": [ "# look at all our ASTRA recons in a grid\n", "\n", - "n_grains_to_plot = 25\n", + "n_grains_to_plot = min(25, len(grainsinos))\n", "\n", "grains_step = len(grainsinos)//n_grains_to_plot\n", "\n", "grid_size = np.ceil(np.sqrt(len(grainsinos[::grains_step]))).astype(int)\n", "nrows = (len(grainsinos[::grains_step])+grid_size-1)//grid_size\n", "\n", - "fig, axs = plt.subplots(grid_size, nrows, figsize=(10,10), layout=\"constrained\", sharex=True, sharey=True)\n", - "for i, ax in enumerate(axs.ravel()):\n", - " if i < len(grainsinos[::grains_step]):\n", - " # get corresponding grain for this axis\n", - " gs = grainsinos[::grains_step][i]\n", - " ax.imshow(gs.recons[\"astra\"], vmin=0, origin=\"lower\")\n", - " # ax.invert_yaxis()\n", - " ax.set_title(i)\n", + "if grid_size == 1:\n", + " fig, ax = plt.subplots(figsize=(10,10), layout=\"constrained\")\n", + " gs = grainsinos[0]\n", + " ax.imshow(gs.recons[\"astra\"], vmin=0, origin=\"lower\")\n", + "else:\n", + " fig, axs = plt.subplots(grid_size, nrows, figsize=(10,10), layout=\"constrained\", sharex=True, sharey=True)\n", + " for i, ax in enumerate(axs.ravel()):\n", + " if i < len(grainsinos[::grains_step]):\n", + " # get corresponding grain for this axis\n", + " gs = grainsinos[::grains_step][i]\n", + " ax.imshow(gs.recons[\"astra\"], vmin=0, origin=\"lower\")\n", + " # ax.invert_yaxis()\n", + " ax.set_title(i)\n", " \n", "plt.show()" ] @@ -812,9 +853,8 @@ }, "outputs": [], "source": [ - "cutoff_level = 0.05\n", "\n", - "tensor_map_astra = TensorMap.from_grainsinos(grainsinos, cutoff_level=cutoff_level, method=\"astra\")" + "tensor_map_astra = TensorMap.from_grainsinos(grainsinos, cutoff_level=second_tmap_cutoff_level, method=\"astra\")" ] }, { @@ -878,16 +918,6 @@ "ds.save()" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "if 1:\n", - " raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -900,13 +930,16 @@ "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", "# you can add samples and datasets to skip in skips_dict\n", "\n", + "# you can optionally skip samples\n", + "# skips_dict = {\n", + "# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "# }\n", + "# otherwise by default skip nothing:\n", "skips_dict = {\n", - " \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + " ds.sample: []\n", "}\n", "\n", - "dset_prefix = \"top\"\n", - "\n", - "sample_list = [\"FeAu_0p5_tR_nscope\"]\n", + "sample_list = [ds.sample, ]\n", " \n", "samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n", " \n", @@ -1024,7 +1057,7 @@ " print(\"Final save\")\n", " write_h5(ds.grainsfile, grainsinos, overwrite_grains=True, group_name=phase_str)\n", " \n", - " tensor_map_astra = TensorMap.from_grainsinos(grainsinos, cutoff_level=cutoff_level, method=\"astra\")\n", + " tensor_map_astra = TensorMap.from_grainsinos(grainsinos, cutoff_level=second_tmap_cutoff_level, method=\"astra\")\n", " tensor_map_astra.to_h5(ds.grainsfile, h5group='TensorMap_' + phase_str)\n", " tensor_map_astra.to_paraview(ds.grainsfile, h5group='TensorMap_' + phase_str)\n", " \n", diff --git a/ImageD11/nbGui/S3DXRD/tomo_2_map_minor_phase.ipynb b/ImageD11/nbGui/S3DXRD/tomo_2_map_minor_phase.ipynb index 7fca4143..42cfd467 100755 --- a/ImageD11/nbGui/S3DXRD/tomo_2_map_minor_phase.ipynb +++ b/ImageD11/nbGui/S3DXRD/tomo_2_map_minor_phase.ipynb @@ -31,10 +31,76 @@ "\n", "os.environ['OMP_NUM_THREADS'] = '1'\n", "os.environ['OPENBLAS_NUM_THREADS'] = '1'\n", - "os.environ['MKL_NUM_THREADS'] = '1'\n", + "os.environ['MKL_NUM_THREADS'] = '1'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", + "\n", + "# python environment stuff\n", + "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n", + "\n", + "# dataset file to import\n", + "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", + "\n", + "# phase names to look at\n", + "major_phase_str = 'Fe'\n", + "minor_phase_str = 'Au'\n", "\n", - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" + "# peak filtering\n", + "major_phase_cf_frac = 0.994\n", + "major_phase_cf_dstol = 0.005\n", + "\n", + "minor_phase_cf_frac = 0.9975\n", + "minor_phase_cf_dstol = 0.005\n", + "\n", + "# If the sinograms are only half-sinograms (we scanned dty across half the sample rather than the full sample), set the below to true:\n", + "is_half_scan = False\n", + "# If we did halfmask, choose the radius to mask in the centre of the reconstruction (normally hot pixels)\n", + "halfmask_radius = 25\n", + "\n", + "# assign peaks to the grains with hkl tolerance peak_assign_tol\n", + "peak_assign_tol = 0.25\n", + "\n", + "# tolerance for building sinograms from assigned peaks\n", + "hkltol = 0.25\n", + "\n", + "# We can optionally correct each row of the sinogram by the ring current of that rotation\n", + "# This helps remove artifacts in the reconstruction\n", + "correct_sinos_with_ring_current = True\n", + "\n", + "first_tmap_cutoff_level = 0.4\n", + "\n", + "# how many iterations for Astra reconstruction?\n", + "niter = 500\n", + "\n", + "second_tmap_cutoff_level = 0.5\n", + "\n", + "# filter out grains with more than grain_too_many_px pixels in the label map\n", + "# this normally indicates a dodgy reconstruction for this grain\n", + "# only really applies if the grains are very small!\n", + "grain_too_many_px = 10\n", + "\n", + "dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans" ] }, { @@ -81,8 +147,6 @@ "source": [ "# USER: Pass path to dataset file\n", "\n", - "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", - "\n", "ds = ImageD11.sinograms.dataset.load(dset_file)\n", " \n", "sample = ds.sample\n", @@ -117,9 +181,6 @@ "outputs": [], "source": [ "# now let's select a phase to index from our parameters json\n", - "major_phase_str = 'Fe'\n", - "minor_phase_str = 'Au'\n", - "\n", "major_phase_unitcell = ds.phases.unitcells[major_phase_str]\n", "minor_phase_unitcell = ds.phases.unitcells[minor_phase_str]\n", "\n", @@ -127,18 +188,6 @@ "print(minor_phase_str, minor_phase_unitcell.lattice_parameters, minor_phase_unitcell.spacegroup)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# If the sinograms are only half-sinograms (we scanned dty across half the sample rather than the full sample), set the below to true:\n", - "is_half_scan = False" - ] - }, { "cell_type": "code", "execution_count": null, @@ -170,9 +219,6 @@ "# this indicates the fractional intensity cutoff we will select\n", "# if the blue line does not look elbow-shaped in the logscale plot, try changing the \"doplot\" parameter (the y scale of the logscale plot) until it does\n", "\n", - "major_phase_cf_frac = 0.994\n", - "major_phase_cf_dstol = 0.005\n", - "\n", "cf_major_phase = select_ring_peaks_by_intensity(cf_4d, frac=major_phase_cf_frac, dstol=major_phase_cf_dstol, doplot=0.95)\n", "print(cf_4d.nrows)\n", "print(cf_major_phase.nrows)" @@ -199,9 +245,6 @@ }, "outputs": [], "source": [ - "minor_phase_cf_frac = 0.9975\n", - "minor_phase_cf_dstol = 0.005\n", - "\n", "cf_minor_phase = select_ring_peaks_by_intensity(cf_4d, frac=minor_phase_cf_frac, dstol=minor_phase_cf_dstol, doplot=0.95)\n", "print(cf_4d.nrows)\n", "print(cf_minor_phase.nrows)" @@ -308,7 +351,6 @@ "source": [ "# assign peaks to the grains\n", "\n", - "peak_assign_tol = 0.25\n", "utils.assign_peaks_to_grains(grains, cf_strong, peak_assign_tol)\n", "\n", "for grain_label, g in enumerate(grains):\n", @@ -412,8 +454,6 @@ "# before we do this, we need to determine our 2D peaks that will be used for the sinogram\n", "# here we can get them from the 4D peaks:\n", "\n", - "hkltol = 0.25\n", - "\n", "gord, inds = get_2d_peaks_from_4d_peaks(ds.pk2d, cf_strong)\n", "\n", "for grain_label, gs in enumerate(tqdm(grainsinos)):\n", @@ -476,7 +516,6 @@ "# We can optionally correct each row of the sinogram by the ring current of that rotation\n", "# This helps remove artifacts in the reconstruction\n", "\n", - "correct_sinos_with_ring_current = True\n", "if correct_sinos_with_ring_current:\n", " \n", " ds.get_ring_current_per_scan()\n", @@ -526,7 +565,7 @@ "gs.recon()\n", "\n", "if is_half_scan:\n", - " halfmask_radius = 25\n", + "\n", " gs.mask_central_zingers(\"iradon\", radius=halfmask_radius)\n", "\n", "# view the result\n", @@ -649,7 +688,7 @@ "source": [ "# let's assemble all the recons together into a TensorMap\n", "\n", - "tensor_map = TensorMap.from_grainsinos(grainsinos, cutoff_level=0.4)" + "tensor_map = TensorMap.from_grainsinos(grainsinos, cutoff_level=first_tmap_cutoff_level)" ] }, { @@ -678,12 +717,6 @@ "# There will likely be many streaks, indicating a few grains have dodgy reconstructions and are probably not to be trusted\n", "# You could optionally run ASTRA:\n", "\n", - "\n", - "# choose the number of iterations\n", - "# experience shows 500 is good, and pretty quick on the GPU\n", - "\n", - "niter = 500\n", - "\n", "for gs in grainsinos:\n", " gs.update_recon_parameters(pad=pad, shift=shift, mask=whole_sample_mask, niter=niter, y0=y0)\n", "\n", @@ -716,11 +749,9 @@ }, "outputs": [], "source": [ - "# Let's assemble all the recons into one map\n", + "# Let's assemble all the astra recons into one map\n", "\n", - "cutoff_level = 0.5\n", - "\n", - "tensor_map_astra = TensorMap.from_grainsinos(grainsinos, cutoff_level=cutoff_level, method=\"astra\")" + "tensor_map_astra = TensorMap.from_grainsinos(grainsinos, cutoff_level=second_tmap_cutoff_level, method=\"astra\")" ] }, { @@ -762,12 +793,10 @@ }, "outputs": [], "source": [ - "# filter out grains with more than 10 pixels in the label map\n", + "# filter out grains with more than grain_too_many_px pixels in the label map\n", "# this normally indicates a dodgy reconstruction for this grain\n", "# only really applies if the grains are very small!\n", "\n", - "grain_too_many_px = 10\n", - "\n", "bad_gids = [int(label) for (label, count) in zip(labels, counts) if count > grain_too_many_px and label > 0]" ] }, @@ -811,9 +840,7 @@ }, "outputs": [], "source": [ - "cutoff_level = 0.5\n", - "\n", - "tensor_map_astra = TensorMap.from_grainsinos(grainsinos_clean, cutoff_level=cutoff_level, method=\"astra\")" + "tensor_map_astra = TensorMap.from_grainsinos(grainsinos_clean, cutoff_level=second_tmap_cutoff_level, method=\"astra\")" ] }, { @@ -932,16 +959,6 @@ "ds.save()" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "if 1:\n", - " raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -952,13 +969,16 @@ "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", "# you can add samples and datasets to skip in skips_dict\n", "\n", + "# you can optionally skip samples\n", + "# skips_dict = {\n", + "# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "# }\n", + "# otherwise by default skip nothing:\n", "skips_dict = {\n", - " \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + " ds.sample: []\n", "}\n", "\n", - "dset_prefix = \"top\"\n", - "\n", - "sample_list = [\"FeAu_0p5_tR_nscope\"]\n", + "sample_list = [ds.sample, ]\n", " \n", "samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n", " \n", @@ -1066,7 +1086,7 @@ " gs.ds = ds\n", " \n", " print(\"Filtering noisy recons\")\n", - " tensor_map_astra = TensorMap.from_grainsinos(grainsinos, cutoff_level=cutoff_level, method=\"astra\")\n", + " tensor_map_astra = TensorMap.from_grainsinos(grainsinos, cutoff_level=second_tmap_cutoff_level, method=\"astra\")\n", " labels, counts = np.unique(tensor_map_astra[\"labels\"], return_counts=True)\n", " bad_gids = [int(label) for (label, count) in zip(labels, counts) if count > grain_too_many_px and label > 0]\n", "\n", @@ -1076,7 +1096,7 @@ " for gs, label in zip(grainsinos_clean, grain_labels_clean):\n", " gs.grain.gid = label\n", "\n", - " tensor_map_astra = TensorMap.from_grainsinos(grainsinos_clean, cutoff_level=cutoff_level, method=\"astra\")\n", + " tensor_map_astra = TensorMap.from_grainsinos(grainsinos_clean, cutoff_level=second_tmap_cutoff_level, method=\"astra\")\n", " \n", " for gs in tqdm(grainsinos):\n", " gs.update_lab_position_from_recon()\n", diff --git a/ImageD11/nbGui/S3DXRD/tomo_3_refinement.ipynb b/ImageD11/nbGui/S3DXRD/tomo_3_refinement.ipynb index 96cdc9d3..5f28aa5d 100755 --- a/ImageD11/nbGui/S3DXRD/tomo_3_refinement.ipynb +++ b/ImageD11/nbGui/S3DXRD/tomo_3_refinement.ipynb @@ -33,10 +33,58 @@ "\n", "os.environ['OMP_NUM_THREADS'] = '1'\n", "os.environ['OPENBLAS_NUM_THREADS'] = '1'\n", - "os.environ['MKL_NUM_THREADS'] = '1'\n", + "os.environ['MKL_NUM_THREADS'] = '1'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "324210ec-acd1-49de-aed0-0ec90b119249", + "metadata": {}, + "outputs": [], + "source": [ + "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80809290-53e9-48a0-bb4a-6a5ed3ef5b1f", + "metadata": { + "tags": [ + "parameters" + ] + }, + "outputs": [], + "source": [ + "# this cell is tagged with 'parameters'\n", + "# to view the tag, select the cell, then find the settings gear icon (right or left sidebar) and look for Cell Tags\n", + "\n", + "# python environment stuff\n", + "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )\n", + "\n", + "# dataset file to import\n", + "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", + "\n", + "# which phase to refine\n", + "phase_str = 'Si'\n", + "\n", + "# default options for the single-valued map (shouldn't need to modify this)\n", + "default_npks = 20\n", + "default_nuniq = 20\n", + "\n", + "# refinement tolerances\n", + "hkl_tol_origins = 0.05\n", + "hkl_tol_refine = 0.1\n", + "hkl_tol_refine_merged = 0.05\n", + "ds_tol = 0.004\n", + "ifrac = 7e-3\n", + "rings_to_refine = None # can be a list of rings\n", + "\n", + "# use cluster for refinement or run locally?\n", + "use_cluster = False\n", "\n", - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" + "dset_prefix = \"top_\" # some common string in the names of the datasets if processing multiple scans" ] }, { @@ -87,8 +135,6 @@ "source": [ "# USER: Pass path to dataset file\n", "\n", - "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", - "\n", "ds = ImageD11.sinograms.dataset.load(dset_file)\n", " \n", "sample = ds.sample\n", @@ -125,7 +171,6 @@ "outputs": [], "source": [ "# now let's select a phase to index from our parameters json\n", - "phase_str = 'Si'\n", "\n", "ucell = ds.phases.unitcells[phase_str]\n", "\n", @@ -201,7 +246,7 @@ "source": [ "# make a PBPMap from our TensorMap\n", "\n", - "pmap = tensor_map.to_pbpmap(z_layer=0, default_npks=20, default_nuniq=20)\n", + "pmap = tensor_map.to_pbpmap(z_layer=0, default_npks=default_npks, default_nuniq=default_nuniq)\n", "# fills voxels that have grains with npks = 20 and nuniq = 20" ] }, @@ -230,13 +275,24 @@ "# set up a refinement manager object\n", "\n", "y0 = grainsinos[0].recon_y0\n", - "hkl_tol_origins = 0.05\n", - "hkl_tol_refine = 0.1\n", - "hkl_tol_refine_merged = 0.05\n", - "ds_tol = 0.004\n", - "ifrac = 7e-3\n", "\n", - "refine = PBPRefine(dset=ds, y0=y0, hkl_tol_origins=hkl_tol_origins, hkl_tol_refine=hkl_tol_refine, hkl_tol_refine_merged=hkl_tol_refine_merged, ds_tol=ds_tol, ifrac=ifrac, phase_name=phase_str)" + "refine = PBPRefine(dset=ds, y0=y0, hkl_tol_origins=hkl_tol_origins, hkl_tol_refine=hkl_tol_refine, hkl_tol_refine_merged=hkl_tol_refine_merged, ds_tol=ds_tol, ifrac=ifrac, phase_name=phase_str, forref=rings_to_refine)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bd7b947d-32d2-45a5-96da-fd8adab58476", + "metadata": {}, + "outputs": [], + "source": [ + "# change the default paths of the refinement manager to append the phase name\n", + "# so we don't conflict\n", + "\n", + "refine.own_filename = os.path.splitext(refine.own_filename)[0] + f'_{phase_str}.h5'\n", + "refine.icolf_filename = os.path.splitext(refine.icolf_filename)[0] + f'_{phase_str}.h5'\n", + "refine.pbpmap_filename = os.path.splitext(refine.pbpmap_filename)[0] + f'_{phase_str}.h5'\n", + "refine.refinedmap_filename = os.path.splitext(refine.refinedmap_filename)[0] + f'_{phase_str}.h5'" ] }, { @@ -346,8 +402,6 @@ "# if compute_origins took more than a couple of minutes to run, I suggest setting use_cluster=True below\n", "# otherwise if you asked for lots of cores and RAM on this Jupyter instance, you can run it locally (use_cluster=False)\n", "\n", - "use_cluster = True\n", - "\n", "refine.run_refine(use_cluster=use_cluster, pythonpath=PYTHONPATH)" ] }, @@ -368,17 +422,6 @@ "ds.save()" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "0c559091-97bd-4b2d-9dc6-d99eb1b6e038", - "metadata": {}, - "outputs": [], - "source": [ - "if 1:\n", - " raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -392,15 +435,18 @@ "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", "# you can add samples and datasets to skip in skips_dict\n", "\n", + "# you can optionally skip samples\n", + "# skips_dict = {\n", + "# \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "# }\n", + "# otherwise by default skip nothing:\n", "skips_dict = {\n", - " \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + " ds.sample: []\n", "}\n", "\n", - "dset_prefix = \"top\"\n", - "\n", - "sample_list = [\"FeAu_0p5_tR_nscope\"]\n", + "sample_list = [ds.sample, ]\n", " \n", - "samples_dict = utils.find_datasets_to_process(ds.dataroot, skips_dict, dset_prefix, sample_list)\n", + "samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n", " \n", "# manual override:\n", "# samples_dict = {\"FeAu_0p5_tR_nscope\": [\"top_100um\", \"top_150um\"]}\n", @@ -419,11 +465,13 @@ " \n", " ds = ImageD11.sinograms.dataset.load(dset_path)\n", " print(f\"I have a DataSet {ds.dset} in sample {ds.sample}\")\n", - " if os.path.exists(ds.refoutfile):\n", + " \n", + " refinedmap_filename = os.path.splitext(ds.refoutfile)[0] + f'_{phase_str}.h5'\n", + " if os.path.exists(refinedmap_filename):\n", " print(f\"Already have PBP refinement output file for {dataset} in sample {sample}, skipping\")\n", " continue\n", " \n", - " if not os.path.exists(ds.pbpfile):\n", + " if not os.path.exists(ds.grainsfile):\n", " print(f\"Can't find PBP indexing file for {dataset} in sample {sample}, skipping\")\n", " continue\n", " \n", @@ -437,10 +485,15 @@ " y0 = grainsinos[0].recon_y0\n", " \n", " tensor_map = TensorMap.from_h5(ds.grainsfile, h5group='TensorMap_' + phase_str)\n", - " pmap = tensor_map.to_pbpmap(z_layer=0, default_npks=20, default_nuniq=20)\n", + " pmap = tensor_map.to_pbpmap(z_layer=0, default_npks=default_npks, default_nuniq=default_nuniq)\n", " pmap.choose_best(1)\n", "\n", - " refine = PBPRefine(dset=ds, y0=y0, hkl_tol_origins=hkl_tol_origins, hkl_tol_refine=hkl_tol_refine, hkl_tol_refine_merged=hkl_tol_refine_merged, ds_tol=ds_tol, ifrac=ifrac, phase_name=phase_str)\n", + " refine = PBPRefine(dset=ds, y0=y0, hkl_tol_origins=hkl_tol_origins, hkl_tol_refine=hkl_tol_refine, hkl_tol_refine_merged=hkl_tol_refine_merged, ds_tol=ds_tol, ifrac=ifrac, phase_name=phase_str, forref=rings_to_refine)\n", + " \n", + " refine.own_filename = os.path.splitext(refine.own_filename)[0] + f'_{phase_str}.h5'\n", + " refine.icolf_filename = os.path.splitext(refine.icolf_filename)[0] + f'_{phase_str}.h5'\n", + " refine.pbpmap_filename = os.path.splitext(refine.pbpmap_filename)[0] + f'_{phase_str}.h5'\n", + " refine.refinedmap_filename = refinedmap_filename\n", " \n", " refine.setmap(pmap)\n", " refine.setpeaks(cf_2d)\n", diff --git a/ImageD11/nbGui/S3DXRD/tomo_3_refinement_minor_phase.ipynb b/ImageD11/nbGui/S3DXRD/tomo_3_refinement_minor_phase.ipynb deleted file mode 100755 index 35de7f74..00000000 --- a/ImageD11/nbGui/S3DXRD/tomo_3_refinement_minor_phase.ipynb +++ /dev/null @@ -1,614 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "bbc4bc96-cbc7-436a-a174-c99388869cbb", - "metadata": {}, - "source": [ - "# Jupyter notebook based on ImageD11 to process scanning 3DXRD data\n", - "# Written by Haixing Fang, Jon Wright and James Ball\n", - "## Date: 12/10/2024" - ] - }, - { - "cell_type": "markdown", - "id": "831932fc-8a7f-4509-8042-47a8b542a68c", - "metadata": {}, - "source": [ - "This notebook will try to perform a point-by-point strain refinement from your tomographic-derived grain shapes. \n", - "\n", - "### NOTE: It is highly recommended to run this notebook on a Jupyter server with many cores and a lot of RAM. \n", - "The compute_origins() function in particular runs locally and can be compute-intensive for large datasets. \n", - "If this is a big scan (e.g 100 million + 2D peaks), you should definitely refine on the cluster rather than locally." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "94b89030-fdb2-47d2-bc26-3e5cfb0d6509", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "\n", - "os.environ['OMP_NUM_THREADS'] = '1'\n", - "os.environ['OPENBLAS_NUM_THREADS'] = '1'\n", - "os.environ['MKL_NUM_THREADS'] = '1'\n", - "\n", - "exec(open('/data/id11/nanoscope/install_ImageD11_from_git.py').read())\n", - "PYTHONPATH = setup_ImageD11_from_git( ) # ( os.path.join( os.environ['HOME'],'Code'), 'ImageD11_git' )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8f4b91e0-7a83-462b-85cb-27f65721ff93", - "metadata": {}, - "outputs": [], - "source": [ - "# import functions we need\n", - "\n", - "import os\n", - "import concurrent.futures\n", - "import timeit\n", - "\n", - "import matplotlib\n", - "%matplotlib ipympl\n", - "\n", - "import h5py\n", - "from tqdm.notebook import tqdm\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "import matplotlib.cm as cm\n", - "from matplotlib.colors import Normalize\n", - "\n", - "from xfab.symmetry import Umis\n", - "\n", - "import ImageD11.columnfile\n", - "from ImageD11.sinograms.tensor_map import TensorMap\n", - "from ImageD11.sinograms.point_by_point import PBPRefine\n", - "from ImageD11.peakselect import select_ring_peaks_by_intensity\n", - "from ImageD11.sinograms import properties, roi_iradon\n", - "from ImageD11.sinograms.sinogram import GrainSinogram, build_slice_arrays, write_slice_recon, read_slice_recon, write_h5, read_h5, write_pbp_strain\n", - "from ImageD11.grain import grain\n", - "from ImageD11 import cImageD11\n", - "\n", - "import ImageD11.nbGui.nb_utils as utils" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "20ff052b-cca8-4310-8b29-4c82e0e513c8", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# USER: Pass path to dataset file\n", - "\n", - "dset_file = 'si_cube_test/processed/Si_cube/Si_cube_S3DXRD_nt_moves_dty/Si_cube_S3DXRD_nt_moves_dty_dataset.h5'\n", - "\n", - "ds = ImageD11.sinograms.dataset.load(dset_file)\n", - " \n", - "sample = ds.sample\n", - "dataset = ds.dsname\n", - "rawdata_path = ds.dataroot\n", - "processed_data_root_dir = ds.analysisroot\n", - "\n", - "print(ds)\n", - "print(ds.shape)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2d1b9af4-89a7-4dff-b258-cc2f77db5ee3", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# load phases from parameter file\n", - "\n", - "ds.phases = ds.get_phases_from_disk()\n", - "ds.phases.unitcells" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8722e04a-a23f-4af3-8530-a90874e27e64", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# now let's select a phase to index from our parameters json\n", - "major_phase_str = 'Fe'\n", - "minor_phase_str = 'Au'\n", - "\n", - "major_phase_unitcell = ds.phases.unitcells[major_phase_str]\n", - "minor_phase_unitcell = ds.phases.unitcells[minor_phase_str]\n", - "\n", - "print(major_phase_str, major_phase_unitcell.lattice_parameters, major_phase_unitcell.spacegroup)\n", - "print(minor_phase_str, minor_phase_unitcell.lattice_parameters, minor_phase_unitcell.spacegroup)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dd01cb0d-4fa3-4701-8d6a-52f644b954f6", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# load 4d peaks\n", - "\n", - "cf_4d = ds.get_cf_4d()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "84edd6e6-2094-445d-9e79-22bdcc4ed29f", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# for now - set parameters with major phase\n", - "\n", - "ds.update_colfile_pars(cf_4d, phase_name=major_phase_str)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2833aa86-d346-4e8d-aedc-17987c4469b4", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "cf_major_phase = select_ring_peaks_by_intensity(cf_4d, frac=1, dsmax=cf_4d.ds.max(), dstol=0.005, doplot=None)\n", - "cf_minor_phase = select_ring_peaks_by_intensity(cf_4d, frac=1, dsmax=cf_4d.ds.max(), dstol=0.005, doplot=None)\n", - "\n", - "major_phase_unitcell.makerings(cf_major_phase.ds.max())\n", - "minor_phase_unitcell.makerings(cf_minor_phase.ds.max())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b69b044c-4b63-4219-9295-924630bbc075", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# now we can take a look at the intensities of the remaining peaks\n", - "\n", - "fig, ax = plt.subplots(figsize=(16, 9), constrained_layout=True)\n", - "\n", - "ax.plot(cf_4d.ds, cf_4d.sum_intensity,',', label='cf_4d',c='blue')\n", - "ax.plot(cf_major_phase.ds, cf_major_phase.sum_intensity,',', label='major phase',c='orange')\n", - "ax.plot(cf_minor_phase.ds, cf_minor_phase.sum_intensity,',', label='minor phase',c='green')\n", - "ax.plot(major_phase_unitcell.ringds, [5e4,]*len(major_phase_unitcell.ringds), '|', ms=90, c=\"red\")\n", - "ax.plot(minor_phase_unitcell.ringds, [1e4,]*len(minor_phase_unitcell.ringds), '|', ms=90, c=\"brown\")\n", - "ax.semilogy()\n", - "\n", - "ax.set_xlabel(\"Dstar\")\n", - "ax.set_ylabel(\"Intensity\")\n", - "ax.legend()\n", - "\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aa403e18-fa6c-41cf-9b98-0134ce623232", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# you should choose the rings that you want to refine off from the plot above\n", - "\n", - "rings_to_refine = [0, 2, 3, 4, 5, 6, 7, 8, 10, 12, 13]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "01a2b143-ed90-4817-92ac-bd78dea2c73c", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Import 2D peaks\n", - "\n", - "cf_2d = ds.get_cf_2d()\n", - "ds.update_colfile_pars(cf_2d, phase_name=minor_phase_str)\n", - "\n", - "print(f\"Read {cf_2d.nrows} 2D peaks\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "55bf5dc8-a25d-4b09-b1d8-e55b1c6d07b6", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# import grainsinos\n", - "\n", - "grainsinos = read_h5(ds.grainsfile, ds, minor_phase_str)\n", - "grains = [gs.grain for gs in grainsinos]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ec5ff57a-0a7f-44cd-b437-eb4cc4e2ea25", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# import slice reconstructions\n", - "\n", - "tensor_map = TensorMap.from_h5(ds.grainsfile, h5group='TensorMap_' + minor_phase_str)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "094fef49-3c33-4d23-8f6e-8605b72f1b78", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "tensor_map.plot('phase_ids')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "08715524-4a8e-41bb-9d67-165523980f6b", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# make a PBPMap from our TensorMap\n", - "\n", - "pmap = tensor_map.to_pbpmap(z_layer=0, default_npks=20, default_nuniq=20)\n", - "# fills voxels that have grains with npks = 20 and nuniq = 20" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "33282e98-4e2c-4805-a2b2-468d6949e554", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "pmap.choose_best(1)\n", - "pmap.plot_best(1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f8feef60-367b-478a-9ce4-8a94e3cedd60", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# set up a refinement manager object\n", - "\n", - "y0 = grainsinos[0].recon_y0\n", - "hkl_tol_origins = 0.05\n", - "hkl_tol_refine = 0.1\n", - "hkl_tol_refine_merged = 0.05\n", - "ds_tol = 0.004\n", - "ifrac = 1e-3\n", - "forref = [0, 2, 3, 4, 5, 6, 7, 8, 10, 12, 13]\n", - "phase_str = minor_phase_str\n", - "\n", - "refine = PBPRefine(dset=ds, y0=y0, hkl_tol_origins=hkl_tol_origins, hkl_tol_refine=hkl_tol_refine, hkl_tol_refine_merged=hkl_tol_refine_merged, ds_tol=ds_tol, ifrac=ifrac, phase_name=phase_str, forref=forref)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ac204c73-58f3-4c32-9e86-289390c6ec4b", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# change the default paths of the refinement manager to append the phase name\n", - "# so we don't conflict\n", - "\n", - "refine.own_filename = os.path.splitext(refine.own_filename)[0] + f'_{phase_str}.h5'\n", - "refine.icolf_filename = os.path.splitext(refine.icolf_filename)[0] + f'_{phase_str}.h5'\n", - "refine.pbpmap_filename = os.path.splitext(refine.pbpmap_filename)[0] + f'_{phase_str}.h5'\n", - "refine.refinedmap_filename = os.path.splitext(refine.refinedmap_filename)[0] + f'_{phase_str}.h5'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15efa1c4-3acb-435b-9955-ff0526338bd1", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# choose 2D peaks to refine with\n", - "\n", - "refine.setpeaks(cf_2d)\n", - "\n", - "# or load from disk:\n", - "# refine.loadpeaks()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aba21183-1f72-4970-a6b8-c058afd4c11b", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# plot the peaks you selected\n", - "\n", - "refine.iplot()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9b4fe817-5794-459b-9974-9d70624af3a8", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# tell it which point-by-point map we are refining\n", - "\n", - "refine.setmap(pmap)\n", - "\n", - "# or load from disk:\n", - "# refine.loadmap()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9372bfa0-4dfc-4727-a927-0d3b55c46875", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# set the mask from minimum peak values\n", - "# anything greater than 0 should be accepted\n", - "\n", - "refine.mask = pmap.best_npks > 0" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9804f2d4-b522-4f3d-aa1a-5ceada3b3b6d", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "fig, ax = plt.subplots()\n", - "ax.imshow(refine.mask, origin='lower')\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "839ddf32-a6eb-46c5-8725-d1b400acc44c", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# generate a single-valued map to refine on\n", - "\n", - "refine.setsingle(refine.pbpmap, minpeaks=1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22f5c297-a8b9-4d89-9f86-366cb7144adb", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# compute diffraction origins - these will be added as a column to refine.icolf\n", - "# will then save the new column to disk to avoid re-computation\n", - "\n", - "refine.get_origins()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7cde5eb5-ce0f-49b5-a6cb-4761f16cee32", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# run the refinement\n", - "# if compute_origins took more than a couple of minutes to run, I suggest setting use_cluster=True below\n", - "# otherwise if you asked for lots of cores and RAM on this Jupyter instance, you can run it locally (use_cluster=False)\n", - "\n", - "use_cluster = False\n", - "\n", - "refine.run_refine(use_cluster=use_cluster, pythonpath=PYTHONPATH)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "acbd5197-268c-48ab-b676-ac0c186b4533", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# save refinement results to disk\n", - "\n", - "if not use_cluster:\n", - " refine.to_h5()\n", - "\n", - "ds.save()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0c559091-97bd-4b2d-9dc6-d99eb1b6e038", - "metadata": {}, - "outputs": [], - "source": [ - "if 1:\n", - " raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "32f475c8-968b-48b6-9840-83ef517144ac", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Now that we're happy with our refinement parameters, we can run the below cell to do this in bulk for many samples/datasets\n", - "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", - "# you can add samples and datasets to skip in skips_dict\n", - "\n", - "skips_dict = {\n", - " \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", - "}\n", - "\n", - "dset_prefix = \"top\"\n", - "\n", - "sample_list = [\"FeAu_0p5_tR_nscope\"]\n", - " \n", - "samples_dict = utils.find_datasets_to_process(ds.dataroot, skips_dict, dset_prefix, sample_list)\n", - " \n", - "# manual override:\n", - "# samples_dict = {\"FeAu_0p5_tR_nscope\": [\"top_100um\", \"top_150um\"]}\n", - " \n", - "# now we have our samples_dict, we can process our data:\n", - "\n", - "for sample, datasets in samples_dict.items():\n", - " for dataset in datasets:\n", - " print(f\"Processing dataset {dataset} in sample {sample}\")\n", - " dset_path = os.path.join(ds.analysisroot, sample, f\"{sample}_{dataset}\", f\"{sample}_{dataset}_dataset.h5\")\n", - " if not os.path.exists(dset_path):\n", - " print(f\"Missing DataSet file for {dataset} in sample {sample}, skipping\")\n", - " continue\n", - " \n", - " print(\"Importing DataSet object\")\n", - " \n", - " ds = ImageD11.sinograms.dataset.load(dset_path)\n", - " print(f\"I have a DataSet {ds.dset} in sample {ds.sample}\")\n", - " if os.path.exists(ds.refoutfile):\n", - " print(f\"Already have PBP refinement output file for {dataset} in sample {sample}, skipping\")\n", - " continue\n", - " \n", - " if not os.path.exists(ds.pbpfile):\n", - " print(f\"Can't find PBP indexing file for {dataset} in sample {sample}, skipping\")\n", - " continue\n", - " \n", - " cf_2d = ds.get_cf_2d()\n", - " ds.update_colfile_pars(cf_2d, phase_name=phase_str)\n", - "\n", - " if not os.path.exists(ds.col2dfile):\n", - " ImageD11.columnfile.colfile_to_hdf(cf_2d, ds.col2dfile)\n", - " \n", - " grainsinos = read_h5(ds.grainsfile, ds, phase_str)\n", - " y0 = grainsinos[0].recon_y0\n", - " \n", - " tensor_map = TensorMap.from_h5(ds.grainsfile, h5group='TensorMap_' + phase_str)\n", - " pmap = tensor_map.to_pbpmap(z_layer=0, default_npks=20, default_nuniq=20)\n", - " pmap.choose_best(1)\n", - "\n", - " refine = PBPRefine(dset=ds, y0=y0, hkl_tol_origins=hkl_tol_origins, hkl_tol_refine=hkl_tol_refine, hkl_tol_refine_merged=hkl_tol_refine_merged, ds_tol=ds_tol, ifrac=ifrac, phase_name=minor_phase_str, forref=forref)\n", - " \n", - " refine.own_filename = os.path.splitext(refine.own_filename)[0] + f'_{phase_str}.h5'\n", - " refine.icolf_filename = os.path.splitext(refine.icolf_filename)[0] + f'_{phase_str}.h5'\n", - " refine.pbpmap_filename = os.path.splitext(refine.pbpmap_filename)[0] + f'_{phase_str}.h5'\n", - " refine.refinedmap_filename = os.path.splitext(refine.refinedmap_filename)[0] + f'_{phase_str}.h5'\n", - " \n", - " refine.setmap(pmap)\n", - " refine.setpeaks(cf_2d)\n", - " refine.mask = pmap.best_npks > 0\n", - " refine.setsingle(refine.pbpmap, minpeaks=1)\n", - " refine.get_origins()\n", - " refine.run_refine(use_cluster=use_cluster, pythonpath=PYTHONPATH)\n", - " if not use_cluster:\n", - " # wait to complete locally, then save\n", - " refine.to_h5()\n", - " ds.save()\n", - "\n", - "print(\"Done!\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "58ebe56b-916c-4a57-a0ae-4471c09b80a0", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (main)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.6" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/ImageD11/nbGui/nb_utils.py b/ImageD11/nbGui/nb_utils.py index af3274b7..8c40a75b 100644 --- a/ImageD11/nbGui/nb_utils.py +++ b/ImageD11/nbGui/nb_utils.py @@ -297,21 +297,27 @@ def save_array(grp, name, ary): def find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list): + # check rawdata path exists, return empty dict otherwise + if not os.path.exists(rawdata_path): + return {} + samples_dict = {} for sample in sample_list: - all_dset_folders_for_sample = os.listdir(os.path.join(rawdata_path, sample)) - dsets_list = [] - for folder in all_dset_folders_for_sample: - if dset_prefix in folder: - dset_name = folder.split(sample + "_")[1] - if sample in skips_dict.keys(): - if dset_name not in skips_dict[sample]: + sample_path = os.path.join(rawdata_path, sample) + if os.path.exists(sample_path): + all_dset_folders_for_sample = os.listdir(sample_path) + dsets_list = [] + for folder in all_dset_folders_for_sample: + if dset_prefix in folder: + dset_name = folder.split(sample + "_")[1] + if sample in skips_dict.keys(): + if dset_name not in skips_dict[sample]: + dsets_list.append(dset_name) + else: dsets_list.append(dset_name) - else: - dsets_list.append(dset_name) - samples_dict[sample] = sorted(dsets_list) + samples_dict[sample] = sorted(dsets_list) return samples_dict diff --git a/ImageD11/parameters.py b/ImageD11/parameters.py index c3c3f2b0..140bc84f 100644 --- a/ImageD11/parameters.py +++ b/ImageD11/parameters.py @@ -108,12 +108,13 @@ def get_xfab_pars_dict(self, phase_name=None): if phase_name is not None: # get parameters for a specific phase phase_pars_dict = self.phase_pars_obj_dict[phase_name].get_parameters() + # copy the geometry dict to a new dict + pars_dict = geometry_pars_dict.copy() + # add in the phase pars + pars_dict.update(phase_pars_dict) else: - # get any phase pars as a dict - phase_pars_dict = self.get_any_phase_pars_obj().get_parameters() - # combine dicts together - pars_dict = phase_pars_dict.copy() - pars_dict.update(geometry_pars_dict) + # just copy the geometry dict + pars_dict = geometry_pars_dict.copy() return pars_dict def load_json(self, filename): diff --git a/ImageD11/sinograms/point_by_point.py b/ImageD11/sinograms/point_by_point.py index 4d606599..612a0e2d 100644 --- a/ImageD11/sinograms/point_by_point.py +++ b/ImageD11/sinograms/point_by_point.py @@ -878,40 +878,48 @@ def iplot(self, skip=1): ax[1].set(ylabel="dty", xlabel="omega") return f, ax - def setmask(self, manual_threshold=None, doplot=False, use_icolf=True): + def setmask(self, manual_threshold=None, doplot=False, use_icolf=True, use_singlemap=False): """Set a mask for choosing what to refine or not. You can choose whether to use self.colf (all peaks) or self.icolf (selected peaks) At the moment it does an iradon on the sinogram of all the 2D peaks in self.colf""" - if use_icolf: - dty = self.icolf.dty - omega = self.icolf.omega - else: - dty = self.colf.dty - omega = self.colf.omega - whole_sample_sino, xedges, yedges = np.histogram2d(dty, omega, - bins=[self.dset.ybinedges, self.dset.obinedges]) - shift, _ = geometry.sino_shift_and_pad(self.y0, len(self.ybincens), self.ybincens.min(), self.ystep) - nthreads = len(os.sched_getaffinity(os.getpid())) - # make sure the shape is the same as sx_grid - pad = self.sx_grid.shape[0] - whole_sample_sino.shape[0] - whole_sample_recon = run_iradon(whole_sample_sino, self.dset.obincens, pad, shift, workers=nthreads) - - # we should be able to easily segment this using scikit-image - recon_man_mask = whole_sample_recon - - # we can also override the threshold if we don't like it: - # manual_threshold = 0.025 - - if manual_threshold is None: - thresh = threshold_otsu(recon_man_mask) + if use_singlemap: + # take all non-nan singlemap values + whole_sample_mask = ~np.isnan(self.singlemap[:, :, 0, 0]) + recon_man_mask = whole_sample_mask.astype(float) + binary = recon_man_mask + chull = recon_man_mask + else: - thresh = manual_threshold + if use_icolf: + dty = self.icolf.dty + omega = self.icolf.omega + else: + dty = self.colf.dty + omega = self.colf.omega + whole_sample_sino, xedges, yedges = np.histogram2d(dty, omega, + bins=[self.dset.ybinedges, self.dset.obinedges]) + shift, _ = geometry.sino_shift_and_pad(self.y0, len(self.ybincens), self.ybincens.min(), self.ystep) + nthreads = len(os.sched_getaffinity(os.getpid())) + # make sure the shape is the same as sx_grid + pad = self.sx_grid.shape[0] - whole_sample_sino.shape[0] + whole_sample_recon = run_iradon(whole_sample_sino, self.dset.obincens, pad, shift, workers=nthreads) + + # we should be able to easily segment this using scikit-image + recon_man_mask = whole_sample_recon + + # we can also override the threshold if we don't like it: + # manual_threshold = 0.025 + + if manual_threshold is None: + thresh = threshold_otsu(recon_man_mask) + else: + thresh = manual_threshold - binary = recon_man_mask > thresh + binary = recon_man_mask > thresh - chull = convex_hull_image(binary) + chull = convex_hull_image(binary) - whole_sample_mask = chull + whole_sample_mask = chull if doplot: from matplotlib import pyplot as plt @@ -1463,6 +1471,7 @@ def refine_map(refine_points, all_pbpmap_ubis, ri_col, rj_col, sx_grid, sy_grid, for refine_idx in numba.prange(npoints): ri, rj = refine_points[refine_idx] if mask[ri, rj]: + # mask is valid at this pixel # print('at ri rj', ri, rj) # mask all_ubis by the pbpmap points @@ -1471,6 +1480,9 @@ def refine_map(refine_points, all_pbpmap_ubis, ri_col, rj_col, sx_grid, sy_grid, # get ubis at this point ubis_here = all_pbpmap_ubis[:, :, pbpmap_mask] + # check that not all ubis are nan + if np.all(np.isnan(ubis_here[0, 0, :])): + continue # get xi0, xi0 at this point xi0 = sx_grid[ri, rj] @@ -1504,6 +1516,8 @@ def refine_map(refine_points, all_pbpmap_ubis, ri_col, rj_col, sx_grid, sy_grid, # iterate through the ubis at this voxel for ubi_idx in np.arange(ubis_here.shape[2]): ubi = ubis_here[:, :, ubi_idx] + if np.isnan(ubi[0, 0]): + continue # we're scoring and assigning one UBI to a bunch of gves # all we need is to generate a mask @@ -1578,10 +1592,18 @@ def refine_map(refine_points, all_pbpmap_ubis, ri_col, rj_col, sx_grid, sy_grid, if grain_npks > min_grain_npks: w, ubifit, residuals, rank, sing_vals = weighted_lstsq_ubi_fit(grain_ydist, gve_grain, hkl) + + problem_fitting = False + try: + # get U from UBI without using ImageD11 grain class + ucell = ubi_to_unitcell(ubifit) + U = ubi_and_ucell_to_u(ubifit, ucell) + except: + problem_fitting = True # check the quality of the fit worth_fitting = (ubifit is not None) and (rank == 3) and (np.linalg.cond(ubifit) < 1e14) and ( - np.linalg.det(ubifit) > 0) and (np.linalg.matrix_rank(ubifit) == 3) + np.linalg.det(ubifit) > 0) and (np.linalg.matrix_rank(ubifit) == 3) and (not np.any(np.isnan(U))) and (not problem_fitting) # do we like the quality? if worth_fitting: @@ -1617,9 +1639,6 @@ def refine_map(refine_points, all_pbpmap_ubis, ri_col, rj_col, sx_grid, sy_grid, hkl = hkli[:, grain_peak_mask] ydist = ydist_grain[grain_peak_mask] - # get U from UBI without using ImageD11 grain class - ucell = ubi_to_unitcell(ubifit) - U = ubi_and_ucell_to_u(ubi_out, ucell) gve0 = U.dot(B0).dot(hkl.astype(np.float64)) gTg0 = np.sum(gve_grain_strainfit * gve0, axis=0) gTg = np.sum(gve_grain_strainfit * gve_grain_strainfit, axis=0) diff --git a/README.md b/README.md index 43d095e4..765e557e 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ImageD11 is a python code for identifying individual grains in spotty area detector X-ray diffraction images. -Version 2.0.2, Jon Wright, wright@esrf.fr +Version 2.0.5, Jon Wright, wright@esrf.fr This is the source code for ImageD11. Probably you wanted a compiled version. @@ -36,7 +36,7 @@ If you want to work with the sources then you can try like this: If you want multiple binaries in your home (on recent pythons) you can do and get the compiled code for each platform in .so files that are labelled by platform. This is potentially useful for a -heterogenous cluster (like at ESRF): +heterogeneous cluster (like at ESRF): ``` # on ppc64le: python3 -m pip install dist/ImageD11-1.9.8-cp38-cp38-linux_ppc64le.whl --user --ignore-installed diff --git a/docs/notebooks/S3DXRD/4_visualise.html b/docs/notebooks/S3DXRD/4_visualise.html new file mode 100644 index 00000000..05e20dc8 --- /dev/null +++ b/docs/notebooks/S3DXRD/4_visualise.html @@ -0,0 +1,8372 @@ + + + + + +4_visualise + + + + + + + + + + + + +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + +
+ + +
+ + +
+ + +
+ + +
+ +
+ + +
+ + +
+ +
+ + +
+ +
+ + +
+ + +
+ + +
+ + +
+ +
+ +
+ + +
+ +
+ + +
+ +
+
+ + + diff --git a/docs/notebooks/S3DXRD/import_test_data.html b/docs/notebooks/S3DXRD/import_test_data.html new file mode 100644 index 00000000..aecc41d8 --- /dev/null +++ b/docs/notebooks/S3DXRD/import_test_data.html @@ -0,0 +1,7746 @@ + + + + + +import_test_data + + + + + + + + + + + + +
+
+ +
+
+ +
+ +
+ + +
+ +
+ +
+ +
+ +
+ + +
+ + +
+ + +
+
+ +
+
+ + diff --git a/docs/notebooks/S3DXRD/pbp_1_indexing.html b/docs/notebooks/S3DXRD/pbp_1_indexing.html new file mode 100644 index 00000000..5869d297 --- /dev/null +++ b/docs/notebooks/S3DXRD/pbp_1_indexing.html @@ -0,0 +1,8187 @@ + + + + + +pbp_1_indexing + + + + + + + + + + + + +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + +
+ +
+ + +
+ + +
+ +
+ +
+ + +
+ + +
+ + +
+ +
+ + +
+ +
+
+ + + diff --git a/docs/notebooks/S3DXRD/pbp_2_visualise.html b/docs/notebooks/S3DXRD/pbp_2_visualise.html new file mode 100644 index 00000000..5d38ff7a --- /dev/null +++ b/docs/notebooks/S3DXRD/pbp_2_visualise.html @@ -0,0 +1,8215 @@ + + + + + +pbp_2_visualise + + + + + + + + + + + + +
+
+ +
+
+ +
+ +
+ + +
+ +
+ +
+ + +
+ + +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ + +
+ + +
+ + +
+ +
+ + +
+ + +
+
+ + + diff --git a/docs/notebooks/S3DXRD/pbp_3_refinement.html b/docs/notebooks/S3DXRD/pbp_3_refinement.html new file mode 100644 index 00000000..8a94e038 --- /dev/null +++ b/docs/notebooks/S3DXRD/pbp_3_refinement.html @@ -0,0 +1,8360 @@ + + + + + +pbp_3_refinement + + + + + + + + + + + + +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + +
+ + +
+ + +
+ +
+ + +
+ +
+ + +
+ + +
+ +
+ +
+ + +
+ + +
+ + +
+ +
+ + +
+ + +
+ + +
+ + +
+ +
+
+ + + diff --git a/docs/notebooks/S3DXRD/tomo_1_index.html b/docs/notebooks/S3DXRD/tomo_1_index.html new file mode 100644 index 00000000..dc6d3d80 --- /dev/null +++ b/docs/notebooks/S3DXRD/tomo_1_index.html @@ -0,0 +1,8556 @@ + + + + + +tomo_1_index + + + + + + + + + + + + +
+
+ +
+
+ +
+ +
+ + +
+ +
+ +
+ + +
+ +
+ + +
+ + +
+ +
+ + +
+ + +
+ +
+ + +
+ +
+ + +
+ + +
+ + +
+ + +
+ + +
+ +
+ + +
+ + +
+ + +
+ + +
+ +
+ +
+ + +
+ +
+
+ + + diff --git a/docs/notebooks/S3DXRD/tomo_2_map.html b/docs/notebooks/S3DXRD/tomo_2_map.html new file mode 100644 index 00000000..de3779d5 --- /dev/null +++ b/docs/notebooks/S3DXRD/tomo_2_map.html @@ -0,0 +1,9310 @@ + + + + + +tomo_2_map + + + + + + + + + + + + +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + +
+ + +
+ +
+ + +
+ + +
+ + +
+ + +
+ + +
+ +
+ +
+ +
+ + +
+ + +
+ + +
+
+ +
+ + +
+ +
+ + +
+ + +
+ + +
+ +
+ + +
+ +
+ + +
+ + +
+ +
+ + +
+ + +
+ + +
+ + +
+ +
+ +
+ +
+ +
+ + +
+ +
+ + +
+ + +
+ + +
+ +
+ +
+ +
+ +
+ + +
+ +
+
+ + + diff --git a/docs/notebooks/S3DXRD/tomo_3_refinement.html b/docs/notebooks/S3DXRD/tomo_3_refinement.html new file mode 100644 index 00000000..31e57a52 --- /dev/null +++ b/docs/notebooks/S3DXRD/tomo_3_refinement.html @@ -0,0 +1,8402 @@ + + + + + +tomo_3_refinement + + + + + + + + + + + + +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + +
+ + +
+ + +
+ + +
+ +
+ +
+ + +
+ +
+ + +
+ +
+ +
+ +
+ +
+ + +
+ + +
+ + +
+ + +
+ + +
+ + +
+ +
+
+ + + diff --git a/setup.py b/setup.py index e7186d8d..8629c3b1 100644 --- a/setup.py +++ b/setup.py @@ -178,6 +178,7 @@ def build_extension(self, ext): more = [ # Used in sandbox / test / not completely essential, but should work for CI + "papermill", # in test for notebook testing "pillow", # in sandbox "lmfit", # in sandbox "sympy", # for maths diff --git a/test/papermill_test_notebooks.py b/test/papermill_test_notebooks.py new file mode 100644 index 00000000..abe02bfd --- /dev/null +++ b/test/papermill_test_notebooks.py @@ -0,0 +1,503 @@ +# flake8: noqa +""" +Python script to automatically end-to-end test our Jupyter notebooks +Currently implemented: nothing (indev) +To run this notebook, you need papermill in your Python environment. +As of 2025/01/21, this is not available in the default Jupyter environment. + +I suggest to do the following: +cd to your ImageD11 git checkout folder +$ pip install papermill ansicolors -t . --no-deps + +This file will add its parent folder (../) to the system path so it can be imported +So you get local ImageD11 and local papermill +""" +import sys +sys.path.insert(0, '../') + +import os + + +import nbformat +import pytest +import papermill +from nbconvert.preprocessors import ExecutePreprocessor + +nb_base_prefix = os.path.join('..', 'ImageD11', 'nbGui') +scan_nb_prefix = os.path.join(nb_base_prefix, 'S3DXRD') + +# there are two levels of testing +# does the notebook work without errors? +# does the notebook give you the output you expect? + + +def noteboook_exec_pmill(nb_input_path, nb_output_path, params_dict): + print('Executing notebook', nb_input_path) + # change output path if it already exists, in case we run the same notebook twice + if os.path.exists(nb_output_path): + nb_output_path = nb_output_path.replace('.ipynb', '_2.ipynb') + papermill.execute_notebook( + nb_input_path, + nb_output_path, + parameters=params_dict + ) + +def notebook_route(base_dir, notebook_paths, notebook_param_dicts, notebook_out_dir=None): + """ + Execute multiple notebooks in the order they are given. + base_dir: The path to the output folder for the test. Must not already exist. + notebook_paths: Ordered list of paths to the notebooks, to be deployed one after the other + notebook_param_dicts: Ordered list of dictionaries of parameters, one dict per notebook to be executed + """ + if os.path.exists(base_dir): + raise ValueError('output test directory already exists:', base_dir) + os.mkdir(base_dir) + if notebook_out_dir is None: + notebook_out_dir = os.path.join(base_dir, 'nb_out') + os.mkdir(notebook_out_dir) + for notebook_in_path, notebook_param_dict in zip(notebook_paths, notebook_param_dicts): + notebook_out_path = os.path.join(notebook_out_dir, os.path.split(notebook_in_path)[1].replace('.ipynb', '_out.ipynb')) + noteboook_exec_pmill(notebook_in_path, notebook_out_path, notebook_param_dict) + + +# test the full tomographic route from start to finish +def test_tomographic_route(): + tomo_dir = 'tomo_route' + scan_nb_names = [ + 'import_test_data.ipynb', + 'tomo_1_index.ipynb', + 'tomo_2_map.ipynb', + 'tomo_3_refinement.ipynb', + '4_visualise.ipynb' + ] + dset_file = os.path.join(tomo_dir, 'processed', 'Si_cube', 'Si_cube_S3DXRD_nt_moves_dty', 'Si_cube_S3DXRD_nt_moves_dty_dataset.h5') + scan_nb_params = [ + {'download_dir': tomo_dir, # import_test_data.ipynb + 'PYTHONPATH': sys.path[0]}, + {'PYTHONPATH': sys.path[0], # tomo_1_index.ipynb + 'dset_file': dset_file, + 'phase_str': 'Si', + 'cf_strong_frac': 0.9939, + 'cf_strong_dsmax': 1.594, + 'cf_strong_dstol': 0.005, + 'indexer_ds_tol': 0.01, + 'rings_for_gen': [0, 1, 3], + 'rings_for_scoring': [0, 1, 2, 3, 4], + 'hkl_tols_seq': [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.075], + 'fracs': [0.9, 0.7], + 'max_grains': 1000, + 'peak_assign_tol': 0.05, + }, + {'PYTHONPATH': sys.path[0], # tomo_2_map.ipynb + 'dset_file': dset_file, + 'phase_str': 'Si', + 'cf_strong_frac': 0.9939, + 'cf_strong_dstol': 0.005, + 'is_half_scan': False, + 'halfmask_radius': 25, + 'peak_assign_tol': 0.25, + 'manual_threshold': None, + 'hkltol': 0.25, + 'correct_sinos_with_ring_current': False, + 'first_tmap_cutoff_level': 0.4, + 'niter': 500, + 'second_tmap_cutoff_level': 0.05 + }, + {'PYTHONPATH': sys.path[0], # tomo_3_refinement.ipynb + 'dset_file': dset_file, + 'phase_str': 'Si', + 'default_npks': 20, + 'default_nuniq': 20, + 'hkl_tol_origins': 0.05, + 'hkl_tol_refine': 0.1, + 'hkl_tol_refine_merged': 0.05, + 'ds_tol': 0.004, + 'ifrac': 7e-3, + 'rings_to_refine': None, + 'use_cluster': False + }, + {'PYTHONPATH': sys.path[0], # 4_visualise.ipynb + 'dset_file': dset_file, + 'phase_str': 'Si', + 'min_unique': 400, + } + ] + scan_nb_paths = [os.path.join(scan_nb_prefix, name) for name in scan_nb_names] + notebook_route(tomo_dir, scan_nb_paths, scan_nb_params) + + + +# test the full point-by-point route from start to finish +def test_pbp_route(): + tomo_dir = 'pbp_route' + scan_nb_names = [ + 'import_test_data.ipynb', + 'pbp_1_indexing.ipynb', + 'pbp_2_visualise.ipynb', + 'pbp_3_refinement.ipynb', + '4_visualise.ipynb' + ] + dset_file = os.path.join(tomo_dir, 'processed', 'Si_cube', 'Si_cube_S3DXRD_nt_moves_dty', 'Si_cube_S3DXRD_nt_moves_dty_dataset.h5') + scan_nb_params = [ + {'download_dir': tomo_dir, # import_test_data.ipynb + 'PYTHONPATH': sys.path[0]}, + {'PYTHONPATH': sys.path[0], # pbp_1_indexing.ipynb + 'dset_file': dset_file, + 'phase_str': 'Si', + 'minpkint': 0, + 'hkl_tol': 0.025, + 'fpks': 0.9, + 'ds_tol': 0.004, + 'etacut': 0.1, + 'ifrac': 5e-3, + 'y0': 24.24, + 'symmetry': "cubic", + 'foridx': [0, 1, 3, 5, 7], + 'forgen': [1, 5, 7], + 'uniqcut': 0.85, + 'use_cluster': False + }, + {'PYTHONPATH': sys.path[0], # pbp_2_visualise.ipynb + 'dset_file': dset_file, + 'phase_str': 'Si', + 'min_unique': 20 + }, + {'PYTHONPATH': sys.path[0], # pbp_3_refinement.ipynb + 'dset_file': dset_file, + 'phase_str': 'Si', + 'min_unique': 20, + 'manual_threshold': None, + 'y0': 24.24, + 'hkl_tol_origins': 0.05, + 'hkl_tol_refine': 0.1, + 'hkl_tol_refine_merged': 0.05, + 'ds_tol': 0.004, + 'ifrac': 7e-3, + 'rings_to_refine': None, + 'set_mask_from_input': False, + 'use_cluster': False + }, + {'PYTHONPATH': sys.path[0], # 4_visualise.ipynb + 'dset_file': dset_file, + 'phase_str': 'Si', + 'min_unique': 400, + } + ] + scan_nb_paths = [os.path.join(scan_nb_prefix, name) for name in scan_nb_names] + notebook_route(tomo_dir, scan_nb_paths, scan_nb_params) + + +def test_FeAu_JADB_tomo(): + tomo_dir = '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/PROCESSED_DATA/20250123_JADB/tomo_route' + scan_nb_names = [ + '0_segment_and_label.ipynb', + 'tomo_1_index.ipynb', + 'tomo_1_index_minor_phase.ipynb', + 'tomo_2_map.ipynb', + 'tomo_2_map_minor_phase.ipynb', + 'tomo_3_refinement.ipynb', # for major phase + 'tomo_3_refinement.ipynb', # for minor phase + '4_visualise.ipynb', # for major phase + '4_visualise.ipynb', # for minor phase + '5_combine_phases.ipynb', + '6_stack_layers.ipynb' + + ] + sample = 'FeAu_0p5_tR_nscope' + dataset = 'top_200um' # first of two layers + dset_file = os.path.join(tomo_dir, sample, f'{sample}_{dataset}', f'{sample}_{dataset}_dataset.h5') + scan_nb_params = [ + {'PYTHONPATH': sys.path[0], # 0_segment_and_label.ipynb + 'maskfile': '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/pars/mask_with_gaps_E-08-0173.edf', + 'e2dxfile': '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/pars/e2dx_E-08-0173_20231127.edf', + 'e2dyfile': '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/pars/e2dy_E-08-0173_20231127.edf', + 'detector': 'eiger', + 'omegamotor': 'rot_center', + 'dtymotor': 'dty', + 'options': { 'cut' : 1, 'pixels_in_spot' : 3, 'howmany' : 100000 }, + 'dataroot': '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/RAW_DATA/', + 'analysisroot': tomo_dir, + 'sample': 'FeAu_0p5_tR_nscope', + 'dataset': 'top_200um', + 'dset_prefix': "top_" + }, + {'PYTHONPATH': sys.path[0], # tomo_1_index.ipynb + 'dset_file': dset_file, + 'par_file': '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/pars/pars.json', + 'phase_str': 'Fe', + 'cf_strong_frac': 0.9939, + 'cf_strong_dsmax': 1.594, + 'cf_strong_dstol': 0.005, + 'indexer_ds_tol': 0.01, + 'rings_for_gen': [0, 1, 3], + 'rings_for_scoring': [0, 1, 2, 3, 4], + 'hkl_tols_seq': [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.075], + 'fracs': [0.9, 0.7], + 'max_grains': 1000, + 'peak_assign_tol': 0.05, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # tomo_1_index_minor_phase.ipynb + 'dset_file': dset_file, + 'major_phase_str': 'Fe', + 'minor_phase_str': 'Au', + 'major_phase_cf_frac': 0.99418, + 'major_phase_cf_dsmax': 1.594, + 'major_phase_cf_dstol': 0.0035, + 'minor_phase_cf_frac': 0.9975, + 'minor_phase_cf_dsmax': 1.594, + 'minor_phase_cf_dstol': 0.0045, + 'indexer_ds_tol': 0.0045, + 'rings_for_gen': [0, 4, 5], + 'rings_for_scoring': [0, 2, 3, 4, 5, 6, 7, 8, 10, 12, 13], + 'hkl_tols_seq': [0.01, 0.02, 0.03, 0.04, 0.05], + 'fracs': [0.9], + 'max_grains': 1000, + 'peak_assign_tol': 0.05, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # tomo_2_map.ipynb + 'dset_file': dset_file, + 'phase_str': 'Fe', + 'cf_strong_frac': 0.9939, + 'cf_strong_dstol': 0.005, + 'is_half_scan': False, + 'halfmask_radius': 25, + 'peak_assign_tol': 0.25, + 'manual_threshold': None, + 'hkltol': 0.25, + 'correct_sinos_with_ring_current': True, + 'first_tmap_cutoff_level': 0.4, + 'niter': 500, + 'second_tmap_cutoff_level': 0.05, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # tomo_2_map_minor_phase.ipynb + 'dset_file': dset_file, + 'major_phase_str': 'Fe', + 'minor_phase_str': 'Au', + 'major_phase_cf_frac': 0.994, + 'major_phase_cf_dstol': 0.005, + 'minor_phase_cf_frac': 0.9975, + 'minor_phase_cf_dstol': 0.005, + 'is_half_scan': False, + 'halfmask_radius': 25, + 'peak_assign_tol': 0.25, + 'hkltol': 0.25, + 'correct_sinos_with_ring_current': True, + 'first_tmap_cutoff_level': 0.4, + 'niter': 500, + 'second_tmap_cutoff_level': 0.5, + 'grain_too_many_px': 10, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # tomo_3_refinement.ipynb - major phase + 'dset_file': dset_file, + 'phase_str': 'Fe', + 'default_npks': 20, + 'default_nuniq': 20, + 'hkl_tol_origins': 0.05, + 'hkl_tol_refine': 0.1, + 'hkl_tol_refine_merged': 0.05, + 'ds_tol': 0.004, + 'ifrac': 7e-3, + 'rings_to_refine': None, + 'use_cluster': False, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # tomo_3_refinement.ipynb - minor phase + 'dset_file': dset_file, + 'phase_str': 'Au', + 'default_npks': 20, + 'default_nuniq': 20, + 'hkl_tol_origins': 0.05, + 'hkl_tol_refine': 0.1, + 'hkl_tol_refine_merged': 0.05, + 'ds_tol': 0.006, + 'ifrac': 1e-3, + 'rings_to_refine': [0, 2, 3, 4, 5, 6, 7, 8, 10, 12, 13], + 'use_cluster': False, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # 4_visualise.ipynb - major phase + 'dset_file': dset_file, + 'phase_str': 'Fe', + 'min_unique': 250, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # 4_visualise.ipynb - minor phase + 'dset_file': dset_file, + 'phase_str': 'Au', + 'min_unique': 0, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # 5_combine_phases.ipynb + 'dset_file': dset_file, + 'phase_strs': ['Fe', 'Au'], + 'combine_refined': True, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # 6_stack_layers.ipynb + 'dset_file': dset_file, + 'stack_combined': True, + 'stack_refined': True, + 'zstep': 50.0, + 'dset_prefix': "top_", + }, + ] + if len(scan_nb_names) != len(scan_nb_params): + raise ValueError('Mismatch between number of notebooks and param dicts!') + scan_nb_paths = [os.path.join(scan_nb_prefix, name) for name in scan_nb_names] + notebook_route(tomo_dir, scan_nb_paths, scan_nb_params) + + +def test_FeAu_JADB_pbp(): + tomo_dir = '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/PROCESSED_DATA/20250123_JADB/pbp_route' + scan_nb_names = [ + '0_segment_and_label.ipynb', + 'pbp_1_indexing.ipynb', # for major phase + 'pbp_1_indexing.ipynb', # for minor phase + 'pbp_2_visualise.ipynb', # for major phase + 'pbp_2_visualise.ipynb', # for minor phase + 'pbp_3_refinement.ipynb', # for major phase + 'pbp_3_refinement.ipynb', # for minor phase + '4_visualise.ipynb', # for major phase + '4_visualise.ipynb', # for minor phase + '5_combine_phases.ipynb', + '6_stack_layers.ipynb' + + ] + sample = 'FeAu_0p5_tR_nscope' + dataset = 'top_200um' # first of two layers + dset_file = os.path.join(tomo_dir, sample, f'{sample}_{dataset}', f'{sample}_{dataset}_dataset.h5') + scan_nb_params = [ + {'PYTHONPATH': sys.path[0], # 0_segment_and_label.ipynb + 'maskfile': '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/pars/mask_with_gaps_E-08-0173.edf', + 'e2dxfile': '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/pars/e2dx_E-08-0173_20231127.edf', + 'e2dyfile': '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/pars/e2dy_E-08-0173_20231127.edf', + 'detector': 'eiger', + 'omegamotor': 'rot_center', + 'dtymotor': 'dty', + 'options': { 'cut' : 1, 'pixels_in_spot' : 3, 'howmany' : 100000 }, + 'dataroot': '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/RAW_DATA/', + 'analysisroot': tomo_dir, + 'sample': 'FeAu_0p5_tR_nscope', + 'dataset': 'top_200um', + 'dset_prefix': "top_" + }, + {'PYTHONPATH': sys.path[0], # pbp_1_indexing.ipynb - major phase + 'dset_file': dset_file, + 'par_file': '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/pars/pars.json', + 'phase_str': 'Fe', + 'minpkint': 5, + 'hkl_tol': 0.03, + 'fpks': 30, + 'ds_tol': 0.008, + 'etacut': 0.1, + 'ifrac': 2e-3, + 'y0': -16.0, + 'symmetry': 'cubic', + 'foridx': [0, 1, 3, 5, 7], + 'forgen': [1, 5, 7], + 'uniqcut': 0.85, + 'use_cluster': False, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # pbp_1_indexing.ipynb - minor phase + 'dset_file': dset_file, + 'par_file': '/data/id11/inhouse2/test_data_3DXRD/S3DXRD/FeAu/pars/pars.json', + 'phase_str': 'Au', + 'minpkint': 5, + 'hkl_tol': 0.03, + 'fpks': 30, + 'ds_tol': 0.008, + 'etacut': 0.1, + 'ifrac': 2e-3, + 'y0': -16.0, + 'symmetry': 'cubic', + 'foridx': [0, 1, 3, 5, 7], + 'forgen': [1, 5, 7], + 'uniqcut': 0.85, + 'use_cluster': False, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # pbp_2_visualise.ipynb - major phase + 'dset_file': dset_file, + 'phase_str': 'Fe', + 'min_unique': 20, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # pbp_2_visualise.ipynb - minor phase + 'dset_file': dset_file, + 'phase_str': 'Au', + 'min_unique': 10, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # pbp_3_refinement.ipynb - major phase + 'dset_file': dset_file, + 'phase_str': 'Fe', + 'min_unique': 20, + 'manual_threshold': None, + 'y0': -16.0, + 'hkl_tol_origins': 0.05, + 'hkl_tol_refine': 0.1, + 'hkl_tol_refine_merged': 0.05, + 'ds_tol': 0.004, + 'ifrac': 7e-3, + 'rings_to_refine': None, + 'set_mask_from_input': True, + 'use_cluster': False, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # pbp_3_refinement.ipynb - minor phase + 'dset_file': dset_file, + 'phase_str': 'Au', + 'min_unique': 10, + 'manual_threshold': None, + 'y0': -16.0, + 'hkl_tol_origins': 0.05, + 'hkl_tol_refine': 0.1, + 'hkl_tol_refine_merged': 0.05, + 'ds_tol': 0.004, + 'ifrac': 7e-3, + 'rings_to_refine': None, + 'set_mask_from_input': True, + 'use_cluster': False, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # 4_visualise.ipynb - major phase + 'dset_file': dset_file, + 'phase_str': 'Fe', + 'min_unique': 250, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # 4_visualise.ipynb - minor phase + 'dset_file': dset_file, + 'phase_str': 'Au', + 'min_unique': 100, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # 5_combine_phases.ipynb + 'dset_file': dset_file, + 'phase_strs': ['Fe', 'Au'], + 'combine_refined': True, + 'dset_prefix': "top_", + }, + {'PYTHONPATH': sys.path[0], # 6_stack_layers.ipynb + 'dset_file': dset_file, + 'stack_combined': True, + 'stack_refined': True, + 'zstep': 50.0, + 'dset_prefix': "top_", + }, + ] + if len(scan_nb_names) != len(scan_nb_params): + raise ValueError('Mismatch between number of notebooks and param dicts!') + scan_nb_paths = [os.path.join(scan_nb_prefix, name) for name in scan_nb_names] + notebook_route(tomo_dir, scan_nb_paths, scan_nb_params) + + +if __name__=='__main__': + print(papermill.__path__) + test_FeAu_JADB_tomo() + test_FeAu_JADB_pbp() \ No newline at end of file