diff --git a/abipy/abilab.py b/abipy/abilab.py index 37dd3a2e1..754689327 100644 --- a/abipy/abilab.py +++ b/abipy/abilab.py @@ -544,7 +544,7 @@ def install_config_files(workdir: Optional[str] = None, force_reinstall: Optiona num_nodes: 1 sockets_per_node: 1 cores_per_socket: 2 - mem_per_node: 4 Gb + mem_per_node: 4 GB """ # Write configuration files. diff --git a/abipy/core/mixins.py b/abipy/core/mixins.py index a7d1f5432..baa4919fc 100644 --- a/abipy/core/mixins.py +++ b/abipy/core/mixins.py @@ -575,12 +575,12 @@ def dump(self, filepath: str) -> str: _ABBREVS = [ - (1 << 50, 'Pb'), - (1 << 40, 'Tb'), - (1 << 30, 'Gb'), - (1 << 20, 'Mb'), - (1 << 10, 'kb'), - (1, 'b'), + (1 << 50, 'PB'), + (1 << 40, 'TB'), + (1 << 30, 'GB'), + (1 << 20, 'MB'), + (1 << 10, 'kB'), + (1, 'B'), ] diff --git a/abipy/data/managers/dragon1_manager.yml b/abipy/data/managers/dragon1_manager.yml index 91b2949e2..1c98f1125 100644 --- a/abipy/data/managers/dragon1_manager.yml +++ b/abipy/data/managers/dragon1_manager.yml @@ -3,7 +3,7 @@ hardware: &hardware num_nodes: 26 sockets_per_node: 2 cores_per_socket: 8 - mem_per_node: 112Gb + mem_per_node: 112GB job: &job mpi_runner: mpirun diff --git a/abipy/data/managers/gmac_manager.yml b/abipy/data/managers/gmac_manager.yml index 46bf64687..72465d003 100644 --- a/abipy/data/managers/gmac_manager.yml +++ b/abipy/data/managers/gmac_manager.yml @@ -16,7 +16,7 @@ qadapters: num_nodes: 1 sockets_per_node: 1 cores_per_socket: 2 - mem_per_node: 4 Gb + mem_per_node: 4 GB # Optional #condition: {"$eq": {omp_threads: 2}} diff --git a/abipy/data/managers/hercules_manager.yml b/abipy/data/managers/hercules_manager.yml index 95321d4e2..cb93647a8 100644 --- a/abipy/data/managers/hercules_manager.yml +++ b/abipy/data/managers/hercules_manager.yml @@ -3,7 +3,7 @@ hardware: &hardware num_nodes: 65 sockets_per_node: 2 cores_per_socket: 8 - mem_per_node: 54Gb + mem_per_node: 54GB job: &job mpi_runner: mpirun diff --git a/abipy/data/managers/hmem_manager.yml b/abipy/data/managers/hmem_manager.yml index f5f206093..27fbbaedb 100644 --- a/abipy/data/managers/hmem_manager.yml +++ b/abipy/data/managers/hmem_manager.yml @@ -4,19 +4,19 @@ high: &high num_nodes: 2 sockets_per_node: 4 cores_per_socket: 12 - mem_per_node: 512Gb + mem_per_node: 512GB middle: &middle num_nodes: 7 sockets_per_node: 4 cores_per_socket: 12 - mem_per_node: 256Gb + mem_per_node: 256GB low: &low num_nodes: 7 sockets_per_node: 4 cores_per_socket: 12 - mem_per_node: 128Gb + mem_per_node: 128GB job: &job mpi_runner: mpirun diff --git a/abipy/data/managers/juqueen_manager.yml b/abipy/data/managers/juqueen_manager.yml index cdd556aa1..98b70b89c 100644 --- a/abipy/data/managers/juqueen_manager.yml +++ b/abipy/data/managers/juqueen_manager.yml @@ -2,7 +2,7 @@ batch: &batch num_nodes: 128 sockets_per_node: 1 cores_per_socket: 16 - mem_per_node: 128Gb + mem_per_node: 128GB job: &job mpi_runner: runjob @@ -48,7 +48,7 @@ qadapters: num_nodes: 1 sockets_per_node: 1 cores_per_socket: 1 - mem_per_node: 12Gb + mem_per_node: 12GB job: #mpi_runner: runjob shell_env: diff --git a/abipy/data/managers/jureca_manager.yml b/abipy/data/managers/jureca_manager.yml index 2ec8e2b2c..7853e1179 100644 --- a/abipy/data/managers/jureca_manager.yml +++ b/abipy/data/managers/jureca_manager.yml @@ -5,13 +5,13 @@ devel: &devel num_nodes: 8 sockets_per_node: 2 cores_per_socket: 12 - mem_per_node: 128Gb + mem_per_node: 128GB batch: &batch num_nodes: 128 sockets_per_node: 2 cores_per_socket: 12 - mem_per_node: 128Gb + mem_per_node: 128GB job: &job # mpirun is not available on jureca. diff --git a/abipy/data/managers/lemaitre2_manager.yml b/abipy/data/managers/lemaitre2_manager.yml index 6231826d1..b85ce68c5 100644 --- a/abipy/data/managers/lemaitre2_manager.yml +++ b/abipy/data/managers/lemaitre2_manager.yml @@ -3,7 +3,7 @@ hardware: &hardware num_nodes: 112 sockets_per_node: 2 cores_per_socket: 6 - mem_per_node: 48Gb + mem_per_node: 48GB job: &job mpi_runner: mpirun diff --git a/abipy/data/managers/lemaitre3_manager.yml b/abipy/data/managers/lemaitre3_manager.yml index 65d610d01..fa9c50995 100644 --- a/abipy/data/managers/lemaitre3_manager.yml +++ b/abipy/data/managers/lemaitre3_manager.yml @@ -5,7 +5,7 @@ hardware: &hardware num_nodes: 80 sockets_per_node: 2 cores_per_socket: 12 - mem_per_node: 95Gb + mem_per_node: 95GB job: &job mpi_runner: mpirun diff --git a/abipy/data/managers/lumi_manager.yml b/abipy/data/managers/lumi_manager.yml index 3f27b7061..da6564294 100644 --- a/abipy/data/managers/lumi_manager.yml +++ b/abipy/data/managers/lumi_manager.yml @@ -5,7 +5,7 @@ hardware: &hardware num_nodes: 1376 sockets_per_node: 2 cores_per_socket: 64 - mem_per_node: 256Gb + mem_per_node: 256GB job: &job mpi_runner: srun diff --git a/abipy/data/managers/manneback_manager.yml b/abipy/data/managers/manneback_manager.yml index ba2fef568..857810294 100644 --- a/abipy/data/managers/manneback_manager.yml +++ b/abipy/data/managers/manneback_manager.yml @@ -3,19 +3,19 @@ Def: &Def num_nodes: 672 sockets_per_node: 2 cores_per_socket: 4 - mem_per_node: 24 Gb + mem_per_node: 24 GB ObanAMD: &ObanAMD num_nodes: 6 sockets_per_node: 4 cores_per_socket: 8 - mem_per_node: 128 Gb + mem_per_node: 128 GB ObanIntel: &ObanIntel num_nodes: 3 sockets_per_node: 4 cores_per_socket: 8 - mem_per_node: 256 Gb + mem_per_node: 256 GB # Environment, modules, and parameters used to launch jobs. job: &job diff --git a/abipy/data/managers/nic4_manager.yml b/abipy/data/managers/nic4_manager.yml index c09dc24cd..9d3b2f557 100644 --- a/abipy/data/managers/nic4_manager.yml +++ b/abipy/data/managers/nic4_manager.yml @@ -3,7 +3,7 @@ hardware: &hardware num_nodes: 120 sockets_per_node: 2 cores_per_socket: 8 - mem_per_node: 64Gb + mem_per_node: 64GB job: &job mpi_runner: "mpirun" diff --git a/abipy/data/managers/shell_manager.yml b/abipy/data/managers/shell_manager.yml index a90b28190..cf6fc833d 100644 --- a/abipy/data/managers/shell_manager.yml +++ b/abipy/data/managers/shell_manager.yml @@ -15,4 +15,4 @@ qadapters: num_nodes: 1 sockets_per_node: 1 cores_per_socket: 2 - mem_per_node: 4 Gb + mem_per_node: 4 GB diff --git a/abipy/data/managers/shell_nompi_manager.yml b/abipy/data/managers/shell_nompi_manager.yml index 6cc5838cd..e7f26376e 100644 --- a/abipy/data/managers/shell_nompi_manager.yml +++ b/abipy/data/managers/shell_nompi_manager.yml @@ -15,4 +15,4 @@ qadapters: num_nodes: 1 sockets_per_node: 1 cores_per_socket: 2 - mem_per_node: 4 Gb + mem_per_node: 4 GB diff --git a/abipy/data/managers/travis_manager.yml b/abipy/data/managers/travis_manager.yml index 70f5902c8..7aa9f1fcf 100644 --- a/abipy/data/managers/travis_manager.yml +++ b/abipy/data/managers/travis_manager.yml @@ -16,4 +16,4 @@ qadapters: num_nodes: 1 sockets_per_node: 1 cores_per_socket: 2 - mem_per_node: 4 Gb + mem_per_node: 4 GB diff --git a/abipy/data/managers/ubu_manager.yml b/abipy/data/managers/ubu_manager.yml index 10228cd2e..5e273d6e5 100644 --- a/abipy/data/managers/ubu_manager.yml +++ b/abipy/data/managers/ubu_manager.yml @@ -17,4 +17,4 @@ qadapters: num_nodes: 1 sockets_per_node: 1 cores_per_socket: 24 - mem_per_node: 4 Gb + mem_per_node: 4 GB diff --git a/abipy/data/managers/vega_manager.yml b/abipy/data/managers/vega_manager.yml index a75f7fa8c..99ffd260f 100644 --- a/abipy/data/managers/vega_manager.yml +++ b/abipy/data/managers/vega_manager.yml @@ -3,7 +3,7 @@ hardware: &hardware num_nodes: 44 sockets_per_node: 4 cores_per_socket: 16 - mem_per_node: 256Gb + mem_per_node: 256GB job: &job mpi_runner: mpirun diff --git a/abipy/data/managers/viper_manager.yml b/abipy/data/managers/viper_manager.yml index afd2f5e3c..80e1e2b7d 100644 --- a/abipy/data/managers/viper_manager.yml +++ b/abipy/data/managers/viper_manager.yml @@ -2,7 +2,7 @@ hardware: &hardware num_nodes: 1 sockets_per_node: 2 cores_per_socket: 4 - mem_per_node: 32Gb + mem_per_node: 32GB job: &job mpi_runner: ~/bin/mpirun.openmpi diff --git a/abipy/data/managers/zenobe_manager.yml b/abipy/data/managers/zenobe_manager.yml index 5a16c0b68..71a408462 100644 --- a/abipy/data/managers/zenobe_manager.yml +++ b/abipy/data/managers/zenobe_manager.yml @@ -3,13 +3,13 @@ westmere: &westmere num_nodes: 274 sockets_per_node: 2 cores_per_socket: 6 - mem_per_node: 24 Gb + mem_per_node: 24 GB ivybridge: &ivybridge num_nodes: 342 sockets_per_node: 2 cores_per_socket: 12 - mem_per_node: 64 Gb + mem_per_node: 64 GB # Environment, modules, and parameters used to launch jobs. job: &job diff --git a/abipy/electrons/arpes.py b/abipy/electrons/arpes.py index 7074ba29c..5e6a1abd7 100644 --- a/abipy/electrons/arpes.py +++ b/abipy/electrons/arpes.py @@ -36,7 +36,7 @@ def model_from_ebands(cls, ebands, tmesh=(0, 300, 600), poorman_polaron=False): #aw: [nwr, ntemp, max_nbcalc, nkcalc, nsppol] array #aw_meshes: [max_nbcalc, nkcalc, nsppol] array with energy mesh in eV from abipy.tools.numtools import lorentzian - try : + try: from scipy.integrate import cumulative_trapezoid as cumtrapz except ImportError: from scipy.integrate import cumtrapz diff --git a/abipy/electrons/lobster.py b/abipy/electrons/lobster.py index 75d7d2bdf..479e74b79 100644 --- a/abipy/electrons/lobster.py +++ b/abipy/electrons/lobster.py @@ -771,7 +771,9 @@ def plot(self, ax=None, **kwargs) -> Figure: """Barplot with average values.""" ax, fig, plt = get_ax_fig_plt(ax=ax) import seaborn as sns - sns.barplot(x="average", y="pair", hue="spin", data=self.dataframe, ax=ax) + df = self.dataframe.copy() + df["pair"] = df["type0"] + "-" + df["type1"] + sns.barplot(x="average", y="pair", hue="spin", data=df, ax=ax) return fig def yield_figs(self, **kwargs): # pragma: no cover diff --git a/abipy/flowtk/flows.py b/abipy/flowtk/flows.py index 863fbf72a..126c2de11 100644 --- a/abipy/flowtk/flows.py +++ b/abipy/flowtk/flows.py @@ -30,6 +30,7 @@ from monty.termcolor import cprint, colored, cprint_map, get_terminal_size from monty.inspect import find_top_pyfile from monty.json import MSONable +from pymatgen.core.units import Memory, UnitError from abipy.tools.iotools import AtomicFile from abipy.tools.serialization import pmg_pickle_load, pmg_pickle_dump, pmg_serialize from abipy.tools.typing import Figure, TYPE_CHECKING @@ -1290,8 +1291,12 @@ def show_status(self, return_df=False, **kwargs): if report is not None: events = '{:>4}|{:>3}'.format(*map(str, (report.num_warnings, report.num_comments))) - para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, ( - task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb")))) + try: + para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, ( + task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("GB")))) + except (KeyError, UnitError): + para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, ( + task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb")))) task_info = list(map(str, [task.__class__.__name__, (task.num_launches, task.num_restarts, task.num_corrections), stime, task.node_id])) @@ -2478,6 +2483,94 @@ def make_light_tarfile(self, name=None): name = os.path.basename(self.workdir) + "-light.tar.gz" if name is None else name return self.make_tarfile(name=name, exclude_dirs=["outdata", "indata", "tmpdata"]) + def make_tarfile(self, name=None, max_filesize=None, exclude_exts=None, exclude_dirs=None, verbose=0, **kwargs): + """ + Create a tarball file. + + Args: + name: Name of the tarball file. Set to os.path.basename(`flow.workdir`) + "tar.gz"` if name is None. + max_filesize (int or string with unit): a file is included in the tar file if its size <= max_filesize + Can be specified in bytes e.g. `max_files=1024` or with a string with unit e.g. `max_filesize="1 MB"`. + No check is done if max_filesize is None. + exclude_exts: List of file extensions to be excluded from the tar file. + exclude_dirs: List of directory basenames to be excluded. + verbose (int): Verbosity level. + kwargs: keyword arguments passed to the :class:`TarFile` constructor. + + Returns: The name of the tarfile. + """ + def any2bytes(s): + """Convert string or number to memory in bytes.""" + if is_string(s): + try: + # latest pymatgen version (as of july 2024) + mem = int(Memory.from_str(s.upper()).to("B")) + except (KeyError, UnitError): # For backward compatibility with older pymatgen versions + try: + mem = int(Memory.from_str(s.replace("B", "b")).to("b")) + except AttributeError: # For even older pymatgen versions + mem = int(Memory.from_string(s.replace("B", "b")).to("b")) + return mem + else: + return int(s) + + if max_filesize is not None: + max_filesize = any2bytes(max_filesize) + + if exclude_exts: + # Add/remove ".nc" so that we can simply pass "GSR" instead of "GSR.nc" + # Moreover this trick allows one to treat WFK.nc and WFK file on the same footing. + exts = [] + for e in list_strings(exclude_exts): + exts.append(e) + if e.endswith(".nc"): + exts.append(e.replace(".nc", "")) + else: + exts.append(e + ".nc") + exclude_exts = exts + + def filter(tarinfo): + """ + Function that takes a TarInfo object argument and returns the changed TarInfo object. + If it instead returns None the TarInfo object will be excluded from the archive. + """ + # Skip links. + if tarinfo.issym() or tarinfo.islnk(): + if verbose: print("Excluding link: %s" % tarinfo.name) + return None + + # Check size in bytes + if max_filesize is not None and tarinfo.size > max_filesize: + if verbose: print("Excluding %s due to max_filesize" % tarinfo.name) + return None + + # Filter filenames. + if exclude_exts and any(tarinfo.name.endswith(ext) for ext in exclude_exts): + if verbose: print("Excluding %s due to extension" % tarinfo.name) + return None + + # Exlude directories (use dir basenames). + if exclude_dirs and any(dir_name in exclude_dirs for dir_name in tarinfo.name.split(os.path.sep)): + if verbose: print("Excluding %s due to exclude_dirs" % tarinfo.name) + return None + + return tarinfo + + back = os.getcwd() + os.chdir(os.path.join(self.workdir, "..")) + + import tarfile + name = os.path.basename(self.workdir) + ".tar.gz" if name is None else name + with tarfile.open(name=name, mode='w:gz', **kwargs) as tar: + tar.add(os.path.basename(self.workdir), arcname=None, recursive=True, filter=filter) + + # Add the script used to generate the flow. + if self.pyfile is not None and os.path.exists(self.pyfile): + tar.add(self.pyfile) + + os.chdir(back) + return name + def explain(self, what="all", nids=None, verbose=0) -> str: """ Return string with the docstrings of the works/tasks in the Flow grouped by class. diff --git a/abipy/flowtk/qadapters.py b/abipy/flowtk/qadapters.py index 7df90b711..87fe2f640 100644 --- a/abipy/flowtk/qadapters.py +++ b/abipy/flowtk/qadapters.py @@ -32,11 +32,12 @@ from monty.inspect import all_subclasses from monty.io import FileLock from monty.json import MSONable -from pymatgen.core.units import Memory +from pymatgen.core.units import Memory, UnitError from abipy.tools.iotools import AtomicFile from .utils import Condition from .launcher import ScriptEditor from .qjobs import QueueJob +from .qutils import any2mb import logging logger = logging.getLogger(__name__) @@ -215,19 +216,7 @@ def __init__(self, **kwargs): # Convert memory to megabytes. m = str(kwargs.pop("mem_per_node")) - - try: - # Support for old pymatgen API - if hasattr(Memory, "from_string"): - self.mem_per_node = int(Memory.from_string(m).to("Mb")) - else: - self.mem_per_node = int(Memory.from_str(m).to("Mb")) - except: - d = {"Kb": "KB", "Mb": "MB", "Gb": "GB", "Tb": "TB"} - for old, new in d.items(): - m = m.replace(old, new) - - self.mem_per_node = int(Memory.from_str(m).to("MB")) + self.mem_per_node = any2mb(m) if self.mem_per_node <= 0 or self.sockets_per_node <= 0 or self.cores_per_socket <= 0: raise ValueError("invalid parameters: %s" % kwargs) @@ -267,7 +256,6 @@ def divmod_node(self, mpi_procs: int, omp_threads: int) -> tuple[int, int]: return divmod(mpi_procs * omp_threads, self.cores_per_node) def as_dict(self) -> dict: - try: # old pymatgen mem_per_node = str(Memory(val=self.mem_per_node, unit='Mb')) @@ -454,9 +442,9 @@ def autodoc(cls) -> str: # it's the limit beyond which the scheduler will not accept the job (MANDATORY). hint_cores: # The limit used in the initial setup of jobs. # Fix_Critical method may increase this number until max_cores is reached - min_mem_per_proc: # Minimum memory per MPI process in Mb, units can be specified e.g. 1.4 Gb + min_mem_per_proc: # Minimum memory per MPI process in MB, units can be specified e.g. 1.4 GB # (DEFAULT: hardware.mem_per_core) - max_mem_per_proc: # Maximum memory per MPI process in Mb, units can be specified e.g. `1.4Gb` + max_mem_per_proc: # Maximum memory per MPI process in MB, units can be specified e.g. `1.4GB` # (DEFAULT: hardware.mem_per_node) timelimit: # Initial time-limit. Accepts time according to slurm-syntax i.e: # "days-hours" or "days-hours:minutes" or "days-hours:minutes:seconds" or @@ -479,7 +467,7 @@ def autodoc(cls) -> str: # # limits_for_task_class: { # NscfTask: {min_cores: 1, max_cores: 10}, - # KerangeTask: {min_cores: 1, max_cores: 1, max_mem_per_proc: 1 Gb}, + # KerangeTask: {min_cores: 1, max_cores: 1, max_mem_per_proc: 1 GB}, # } """ @@ -917,9 +905,10 @@ def set_master_mem_overhead(self, mem_mb): def total_mem(self) -> Memory: """Total memory required by the job in megabytes.""" try: - return Memory(self.mem_per_proc * self.mpi_procs + self.master_mem_overhead, "Mb") - except: - return Memory(self.mem_per_proc * self.mpi_procs + self.master_mem_overhead, "MB") + mem = Memory(self.mem_per_proc * self.mpi_procs + self.master_mem_overhead, "MB") + except UnitError: + mem = Memory(self.mem_per_proc * self.mpi_procs + self.master_mem_overhead, "Mb") + return mem @abc.abstractmethod def cancel(self, job_id: int) -> int: diff --git a/abipy/flowtk/qutils.py b/abipy/flowtk/qutils.py index 303631d42..00200ffcf 100644 --- a/abipy/flowtk/qutils.py +++ b/abipy/flowtk/qutils.py @@ -12,7 +12,7 @@ from subprocess import Popen, PIPE, run from monty.string import is_string -from pymatgen.core.units import Time, Memory +from pymatgen.core.units import Time, Memory, UnitError from abipy.tools.typing import PathLike from abipy.tools import duck from abipy.tools.text import rm_multiple_spaces @@ -132,7 +132,15 @@ def timelimit_parser(s): def any2mb(s): """Convert string or number to memory in megabytes.""" if is_string(s): - return int(Memory.from_str(s).to("Mb")) + try: + # latest pymatgen version (as of july 2024) + mem = int(Memory.from_str(s.upper()).to("MB")) + except (KeyError, UnitError): # For backward compatibility with older pymatgen versions + try: + mem = int(Memory.from_str(s.replace("B", "b")).to("Mb")) + except AttributeError: # For even older pymatgen versions + mem = int(Memory.from_string(s.replace("B", "b")).to("Mb")) + return mem else: return int(s) diff --git a/abipy/flowtk/tasks.py b/abipy/flowtk/tasks.py index 5ee1c170e..46c6b5880 100644 --- a/abipy/flowtk/tasks.py +++ b/abipy/flowtk/tasks.py @@ -23,7 +23,7 @@ from monty.functools import lazy_property, return_none_if_raise from monty.json import MSONable from monty.fnmatch import WildCard -from pymatgen.core.units import Memory +from pymatgen.core.units import Memory, UnitError from abipy.core.globals import get_workdir from abipy.core.structure import Structure from abipy.tools.serialization import json_pretty_dump, pmg_serialize @@ -209,7 +209,7 @@ def speedup(self) -> float: @property def tot_mem(self) -> float: - """Estimated total memory in Mbs (computed from mem_per_proc)""" + """Estimated total memory in MBs (computed from mem_per_proc)""" return self.mem_per_proc * self.mpi_procs @@ -609,7 +609,7 @@ def get_simple_manager(cls) -> str: num_nodes: 1 sockets_per_node: 1 cores_per_socket: 2 - mem_per_node: 4 Gb + mem_per_node: 4 GB """ @classmethod @@ -1881,7 +1881,11 @@ def omp_threads(self) -> int: @property def mem_per_proc(self) -> Memory: """Memory per MPI process.""" - return Memory(self.manager.mem_per_proc, "Mb") + try: + mem = Memory(self.manager.mem_per_proc, "MB") + except UnitError: # For older versions of pymatgen + mem = Memory(self.manager.mem_per_proc, "Mb") + return mem @property def status(self): @@ -1947,8 +1951,12 @@ def set_status(self, status: Status, msg: str) -> Status: if changed: if status == self.S_SUB: self.datetimes.submission = datetime.datetime.now() - self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % ( - self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg)) + try: + self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [GB] %s " % ( + self.mpi_procs, self.omp_threads, self.mem_per_proc.to("GB"), msg)) + except (KeyError, UnitError): + self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % ( + self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg)) elif status == self.S_OK: self.history.info("Task completed %s", msg) diff --git a/abipy/flowtk/tests/test_flows.py b/abipy/flowtk/tests/test_flows.py index 760eac37a..b6ea271f6 100644 --- a/abipy/flowtk/tests/test_flows.py +++ b/abipy/flowtk/tests/test_flows.py @@ -36,13 +36,13 @@ class FlowUnitTest(AbipyTest): #condition: {"$eq": {omp_threads: 2}} limits_for_task_class: { DdkTask: {min_cores: 2, max_cores: 30}, - KerangeTask: {timelimit: 0:10:00, max_mem_per_proc: 1 Gb}, + KerangeTask: {timelimit: 0:10:00, max_mem_per_proc: 1 GB}, } hardware: num_nodes: 10 sockets_per_node: 1 cores_per_socket: 2 - mem_per_node: 4 Gb + mem_per_node: 4 GB job: modules: - intel/compilerpro/13.0.1.117 diff --git a/abipy/flowtk/tests/test_qadapters.py b/abipy/flowtk/tests/test_qadapters.py index 6cbc479e4..7a4240170 100644 --- a/abipy/flowtk/tests/test_qadapters.py +++ b/abipy/flowtk/tests/test_qadapters.py @@ -65,7 +65,7 @@ class QadapterTest(AbipyTest): num_nodes: 3 sockets_per_node: 2 cores_per_socket: 4 - mem_per_node: 8 Gb + mem_per_node: 8 GB """) def test_base(self): @@ -123,7 +123,7 @@ def test_base(self): aequal(new_script, script) # Test can_run and distribute - # The hardware has num_nodes=3, sockets_per_node=2, cores_per_socket=4, mem_per_node="8 Gb" + # The hardware has num_nodes=3, sockets_per_node=2, cores_per_socket=4, mem_per_node="8 GB" afalse(qad.can_run_pconf(ParalConf(mpi_ncpus=hw.num_cores+1, omp_ncpus=1, mem_per_cpu=0.1))) afalse(qad.can_run_pconf(ParalConf(mpi_ncpus=4, omp_ncpus=9, mem_per_cpu=0.1))) afalse(qad.can_run_pconf(ParalConf(mpi_ncpus=4, omp_ncpus=1, mem_per_cpu=10 * giga))) @@ -192,7 +192,7 @@ class ShellAdapterTest(AbipyTest): num_nodes: 1 sockets_per_node: 1 cores_per_socket: 1 - mem_per_node: 4 Gb + mem_per_node: 4 GB """) def test_methods(self): qad = make_qadapter(**self.QDICT) @@ -257,7 +257,7 @@ class SlurmAdapterTest(AbipyTest): num_nodes: 2 sockets_per_node: 2 cores_per_socket: 4 - mem_per_node: 8 Gb + mem_per_node: 8 GB """) def test_methods(self): @@ -342,7 +342,7 @@ class PbsProadapterTest(AbipyTest): num_nodes: 100 sockets_per_node: 2 cores_per_socket: 4 - mem_per_node: 8 Gb""") + mem_per_node: 8 GB""") QDICT_SHARED = safe_load("""\ priority: 1 queue: @@ -363,7 +363,7 @@ class PbsProadapterTest(AbipyTest): num_nodes: 100 sockets_per_node: 2 cores_per_socket: 12 - mem_per_node: 48000 Mb""") + mem_per_node: 48000 MB""") QDICT_EXCLUSIVE = safe_load("""\ priority: 1 queue: @@ -384,7 +384,7 @@ class PbsProadapterTest(AbipyTest): num_nodes: 100 sockets_per_node: 2 cores_per_socket: 12 - mem_per_node: 48000 Mb""") + mem_per_node: 48000 MB""") def test_methods(self): self.maxDiff = None diff --git a/abipy/flowtk/tests/test_tasks.py b/abipy/flowtk/tests/test_tasks.py index 401f27a76..759914d46 100644 --- a/abipy/flowtk/tests/test_tasks.py +++ b/abipy/flowtk/tests/test_tasks.py @@ -28,7 +28,7 @@ class TaskManagerTest(AbipyTest): num_nodes: 10 sockets_per_node: 1 cores_per_socket: 2 - mem_per_node: 4 Gb + mem_per_node: 4 GB job: modules: - intel/compilerpro/13.0.1.117 diff --git a/abipy/scripts/abirun.py b/abipy/scripts/abirun.py index c135bf05b..86697625d 100755 --- a/abipy/scripts/abirun.py +++ b/abipy/scripts/abirun.py @@ -683,7 +683,7 @@ def parse_wslice(s): # Subparser for tar. p_tar = subparsers.add_parser('tar', parents=[copts_parser], help="Create tarball file.") p_tar.add_argument("-s", "--max-filesize", default=None, - help="Exclude file whose size > max-filesize bytes. Accept integer or string e.g `1Mb`.") + help="Exclude file whose size > max-filesize bytes. Accept integer or string e.g `1MB`.") p_tar.add_argument("-e", "--exclude-exts", default=None, type=parse_strings, help="Exclude file extensions. Accept string or comma-separated strings. Ex: -eWFK or --exclude-exts=WFK,GSR") p_tar.add_argument("-d", "--exclude-dirs", default=None, type=parse_strings, diff --git a/abipy/test_files/taskmanager.yml b/abipy/test_files/taskmanager.yml index f5d5d7f23..edfad6f17 100644 --- a/abipy/test_files/taskmanager.yml +++ b/abipy/test_files/taskmanager.yml @@ -6,7 +6,7 @@ hardware: &hardware num_nodes: 1 sockets_per_node: 1 cores_per_socket: 2 - mem_per_node: 4 Gb + mem_per_node: 4 GB job: &job mpi_runner: mpirun diff --git a/abipy/tools/cli_parsers.py b/abipy/tools/cli_parsers.py index dfaafb05a..6a67799ab 100644 --- a/abipy/tools/cli_parsers.py +++ b/abipy/tools/cli_parsers.py @@ -53,7 +53,7 @@ def pn_serve_parser(**kwargs) -> argparse.ArgumentParser: help="Public hostnames which may connect to the Bokeh websocket.\n Syntax: " + "HOST[:PORT] or *. Default: None") p.add_argument('--max_size_mb', default=150, type=int, - help="Maximum message size in Mb allowed by Bokeh and Tornado. Default: 150") + help="Maximum message size in MB allowed by Bokeh and Tornado. Default: 150") p.add_argument('--no-browser', action='store_true', default=False, help=("Start the jupyter server to serve the notebook "