Skip to content

Commit

Permalink
Merge pull request #5747 from jenshnielsen/pylint_ple_plr
Browse files Browse the repository at this point in the history
Ruff: Enable most pylint checks and upgrade to ruff 0.2.2
  • Loading branch information
jenshnielsen authored Feb 19, 2024
2 parents 9f2884d + 04d1450 commit 854973b
Show file tree
Hide file tree
Showing 28 changed files with 128 additions and 148 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: 'v0.2.0'
rev: 'v0.2.2'
hooks:
- id: ruff
types_or: [python, pyi, jupyter, toml]
Expand Down
2 changes: 1 addition & 1 deletion docs/examples/logging/logging_example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@
"source": [
"import logging\n",
"\n",
"import qcodes.logger as logger\n",
"from qcodes import logger\n",
"\n",
"log = logging.getLogger('example_logger')\n",
"\n",
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -245,15 +245,15 @@ extend-exclude = ["typings"]
# I isort
# ISC flake8-implicit-str-concat
# TID253 banned-module-level-imports
select = ["E", "F", "PT025", "UP", "RUF010", "RUF012", "RUF200", "I", "G", "ISC", "TID253", "NPY"]
select = ["E", "F", "PT025", "UP", "RUF010", "RUF012", "RUF200", "I", "G", "ISC", "TID253", "NPY", "PLE", "PLR", "PLC", "PLW"]
# darker will fix this as code is
# reformatted when it is changed.
# G004 We have a lot of use of f strings in log messages
# so disable that lint for now
# NPY002 We have a lot of use of the legacy
# random number generator. Eventually we should port this
# code.
ignore = ["E501", "G004", "NPY002"]
ignore = ["E501", "G004", "NPY002", "PLR2004", "PLR0913", "PLR0911", "PLR0912", "PLR0915", "PLW0602", "PLW0603", "PLW2901"]

[tool.ruff.lint.isort]

Expand Down
5 changes: 2 additions & 3 deletions src/qcodes/dataset/data_set.py
Original file line number Diff line number Diff line change
Expand Up @@ -749,9 +749,8 @@ def _ensure_dataset_written(self) -> None:
{'keys': 'finalize', 'values': self.run_id})
while self.run_id in writer_status.active_datasets:
time.sleep(self.background_sleep_time)
else:
if self.run_id in writer_status.active_datasets:
writer_status.active_datasets.remove(self.run_id)
elif self.run_id in writer_status.active_datasets:
writer_status.active_datasets.remove(self.run_id)
if len(writer_status.active_datasets) == 0:
writer_status.write_in_background = None
if writer_status.bg_writer is not None:
Expand Down
6 changes: 3 additions & 3 deletions src/qcodes/dataset/data_set_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,12 @@
from .data_set_in_memory import DataSetInMem
from .data_set_protocol import DataSetProtocol, ParameterData

DatasetType = TypeVar("DatasetType", bound="DataSetProtocol", covariant=True)
DatasetType_co = TypeVar("DatasetType_co", bound="DataSetProtocol", covariant=True)

log = logging.getLogger(__name__)


class DataSetCache(Generic[DatasetType]):
class DataSetCache(Generic[DatasetType_co]):
"""
The DataSetCache contains a in memory representation of the
data in this dataset as well a a method to progressively read data
Expand All @@ -47,7 +47,7 @@ class DataSetCache(Generic[DatasetType]):
:py:class:`.DataSet.to_pandas_dataframe_dict`
"""

def __init__(self, dataset: DatasetType):
def __init__(self, dataset: DatasetType_co):
self._dataset = dataset
self._data: ParameterData = {}
#: number of rows read per parameter tree (by the name of the dependent parameter)
Expand Down
2 changes: 1 addition & 1 deletion src/qcodes/dataset/database_fix_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
from tqdm import tqdm

import qcodes.dataset.descriptions.versioning.serialization as serial
import qcodes.dataset.descriptions.versioning.v0 as v0
from qcodes.dataset.descriptions.rundescriber import RunDescriber
from qcodes.dataset.descriptions.versioning import v0
from qcodes.dataset.descriptions.versioning.converters import old_to_new
from qcodes.dataset.descriptions.versioning.rundescribertypes import RunDescriberV1Dict
from qcodes.dataset.sqlite.connection import ConnectionPlus, atomic, atomic_transaction
Expand Down
2 changes: 1 addition & 1 deletion src/qcodes/dataset/sqlite/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ def conn_from_dbpath_or_conn(
if conn is None and path_to_db is not None:
conn = connect(path_to_db, get_DB_debug())
elif conn is not None:
conn = conn
pass
else:
# this should be impossible but left here to keep mypy happy.
raise RuntimeError("Could not obtain a connection from"
Expand Down
2 changes: 1 addition & 1 deletion src/qcodes/instrument_drivers/AlazarTech/dll_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def _mark_params_as_updated(*args: Any) -> None:
def _check_error_code(
return_code: int, func: Callable[..., Any], arguments: tuple[Any, ...]
) -> tuple[Any, ...]:
if (return_code != API_SUCCESS) and (return_code != API_DMA_IN_PROGRESS):
if return_code not in {API_SUCCESS, API_DMA_IN_PROGRESS}:
argrepr = repr(arguments)
if len(argrepr) > 100:
argrepr = argrepr[:96] + '...]'
Expand Down
12 changes: 5 additions & 7 deletions src/qcodes/instrument_drivers/Keithley/Keithley_2450.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,13 +86,11 @@ def __init__(
"buffer() missing 1 required positional argument: 'size'"
)
self.write(f":TRACe:MAKE '{self.buffer_name}', {self._size}, {self.style}")
else:
# when referring to default buffer, "size" parameter is not needed.
if size is not None:
self.log.warning(
f"Please use method 'size()' to resize default buffer "
f"{self.buffer_name} size to {self._size}."
)
elif size is not None:
self.log.warning(
f"Please use method 'size()' to resize default buffer "
f"{self.buffer_name} size to {self._size}."
)

self.add_parameter(
"size",
Expand Down
11 changes: 5 additions & 6 deletions src/qcodes/instrument_drivers/Keithley/Keithley_7510.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,13 +114,12 @@ def __init__(
"buffer() missing 1 required positional argument: 'size'"
)
self.write(f":TRACe:MAKE '{self.short_name}', {self._size}, {self.style}")
else:
elif size is not None:
# when referring to default buffer, "size" parameter is not needed.
if size is not None:
self.log.warning(
f"Please use method 'size()' to resize default buffer "
f"{self.short_name} size to {self._size}."
)
self.log.warning(
f"Please use method 'size()' to resize default buffer "
f"{self.short_name} size to {self._size}."
)

self.add_parameter(
"size",
Expand Down
5 changes: 2 additions & 3 deletions src/qcodes/instrument_drivers/Keysight/Infiniium.py
Original file line number Diff line number Diff line change
Expand Up @@ -446,9 +446,8 @@ def _create_query(self, cmd: str, pre_cmd: str = "", post_cmd: str = "") -> str:
chan_str = f",{chan_str}"
if post_cmd:
chan_str = f"{chan_str},"
else:
if pre_cmd and post_cmd:
pre_cmd = f"{pre_cmd},"
elif pre_cmd and post_cmd:
pre_cmd = f"{pre_cmd},"
return f":MEAS:{cmd}? {pre_cmd}{chan_str}{post_cmd}".strip()


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def compliance(self) -> list[int]:
if total_count == normal_count:
print('All measurements are normal')
else:
indices = [i for i, x in enumerate(data.status) if x == "C" or x == "T"]
indices = [i for i, x in enumerate(data.status) if x in {"C", "T"}]
warnings.warn(
f"{exception_count!s} measurements were "
f"out of compliance at {indices!s}"
Expand Down
2 changes: 1 addition & 1 deletion src/qcodes/instrument_drivers/Minicircuits/USBHIDMixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from typing import Any, Optional

try:
import pywinusb.hid as hid # pyright: ignore[reportMissingModuleSource]
from pywinusb import hid # pyright: ignore[reportMissingModuleSource]

imported_hid = True
except ImportError:
Expand Down
11 changes: 5 additions & 6 deletions src/qcodes/instrument_drivers/agilent/Agilent_E8257D.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,14 +80,13 @@ def __init__(
else:
self._min_power = -110
self._max_power = 5
elif frequency_option in ["513", "520", "521", "532", "540"]:
self._min_power = -20
self._max_power = 10
else:
# default minimal power is -20 dBm
if frequency_option in ["513", "520", "521", "532", "540"]:
self._min_power = -20
self._max_power = 10
else:
self._min_power = -20
self._max_power = 5
self._min_power = -20
self._max_power = 5

self.add_parameter(
name="frequency",
Expand Down
11 changes: 6 additions & 5 deletions src/qcodes/instrument_drivers/rohde_schwarz/RTO1000.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,12 +488,13 @@ def __init__(self, name: str, address: str,
"does not match the instrument's response."
" I am going to assume that this oscilloscope "
f"is a model {self.model}")
elif model is None:
raise ValueError(
"No model number provided. Please provide "
'a model number (eg. "RTO1024").'
)
else:
if model is None:
raise ValueError('No model number provided. Please provide '
'a model number (eg. "RTO1024").')
else:
self.model = model
self.model = model

self.HD = HD

Expand Down
21 changes: 10 additions & 11 deletions src/qcodes/instrument_drivers/tektronix/AWG5014.py
Original file line number Diff line number Diff line change
Expand Up @@ -844,19 +844,18 @@ def _pack_record(
Allowed values: 'h', 'd', 's'.
"""
if len(dtype) == 1:
record_data = struct.pack('<' + dtype, value)
record_data = struct.pack("<" + dtype, value)
elif dtype[-1] == "s":
assert isinstance(value, str)
record_data = value.encode("ASCII")
else:
if dtype[-1] == 's':
assert isinstance(value, str)
record_data = value.encode('ASCII')
assert isinstance(value, (abc.Sequence, np.ndarray))
if dtype[-1] == "H" and isinstance(value, np.ndarray):
# numpy conversion is fast
record_data = value.astype("<u2").tobytes()
else:
assert isinstance(value, (abc.Sequence, np.ndarray))
if dtype[-1] == 'H' and isinstance(value, np.ndarray):
# numpy conversion is fast
record_data = value.astype('<u2').tobytes()
else:
# argument unpacking is slow
record_data = struct.pack('<' + dtype, *value)
# argument unpacking is slow
record_data = struct.pack("<" + dtype, *value)

# the zero byte at the end the record name is the "(Include NULL.)"
record_name = name.encode('ASCII') + b'\x00'
Expand Down
10 changes: 4 additions & 6 deletions src/qcodes/math_utils/field_vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,10 @@ def _set_attribute_value(self, attr_name: str, value: float | None) -> None:

if attr_value is None:
setattr(self, "_" + attr_name, value)
else:
if not np.isclose(attr_value, value):
raise ValueError(
f"Computed value of {attr_name} inconsistent with given "
f"value"
)
elif not np.isclose(attr_value, value):
raise ValueError(
f"Computed value of {attr_name} inconsistent with given value"
)

def _set_attribute_values(
self, attr_names: Sequence[str], values: Sequence[float | None]
Expand Down
15 changes: 7 additions & 8 deletions src/qcodes/parameters/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,15 +214,14 @@ def get(self, get_if_invalid: bool = True) -> ParamDataType:

if cache_valid:
return self._value
else:
if get_if_invalid:
if gettable:
return self._parameter.get()
else:
error_msg = self._construct_error_msg()
raise RuntimeError(error_msg)
elif get_if_invalid:
if gettable:
return self._parameter.get()
else:
return self._value
error_msg = self._construct_error_msg()
raise RuntimeError(error_msg)
else:
return self._value

def _construct_error_msg(self) -> str:
if self._timestamp is None:
Expand Down
9 changes: 4 additions & 5 deletions src/qcodes/parameters/parameter_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,12 +462,11 @@ def __call__(self, *args: Any, **kwargs: Any) -> ParamDataType | None:
return self.get()
else:
raise NotImplementedError(f"no get cmd found in Parameter {self.name}")
elif self.settable:
self.set(*args, **kwargs)
return None
else:
if self.settable:
self.set(*args, **kwargs)
return None
else:
raise NotImplementedError(f"no set cmd found in Parameter {self.name}")
raise NotImplementedError(f"no set cmd found in Parameter {self.name}")

def snapshot_base(
self,
Expand Down
17 changes: 8 additions & 9 deletions src/qcodes/plotting/matplotlib_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,16 +97,15 @@ def apply_color_scale_limits(
if data_array is None:
data_array = cast(np.ndarray, colorbar.mappable.get_array())
data_lim = np.nanmin(data_array), np.nanmax(data_array)
elif data_array is not None:
raise RuntimeError(
"You may not specify `data_lim` and `data_array` "
"at the same time. Please refer to the docstring of "
"`apply_color_scale_limits for details:\n\n`"
f"{apply_color_scale_limits.__doc__!s}"
)
else:
if data_array is not None:
raise RuntimeError(
"You may not specify `data_lim` and `data_array` "
"at the same time. Please refer to the docstring of "
"`apply_color_scale_limits for details:\n\n`"
+ str(apply_color_scale_limits.__doc__)
)
else:
data_lim = cast(tuple[float, float], tuple(sorted(data_lim)))
data_lim = cast(tuple[float, float], tuple(sorted(data_lim)))
# if `None` is provided in the new limits don't change this limit
vlim = [new or old for new, old in zip(new_lim, colorbar.mappable.get_clim())]
# sort limits in case they were given in a wrong order
Expand Down
61 changes: 30 additions & 31 deletions src/qcodes/tests/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,38 +203,37 @@ def compare_dictionaries(
path = old_path + "[%s]" % k
if k not in dict_2.keys():
key_err += f"Key {dict_1_name}{path} not in {dict_2_name}\n"
elif isinstance(dict_1[k], dict) and isinstance(dict_2[k], dict):
err += compare_dictionaries(
dict_1[k], dict_2[k], dict_1_name, dict_2_name, path
)[1]
else:
if isinstance(dict_1[k], dict) and isinstance(dict_2[k], dict):
err += compare_dictionaries(
dict_1[k], dict_2[k], dict_1_name, dict_2_name, path
)[1]
else:
match = dict_1[k] == dict_2[k]

# if values are equal-length numpy arrays, the result of
# "==" is a bool array, so we need to 'all' it.
# In any other case "==" returns a bool
# TODO(alexcjohnson): actually, if *one* is a numpy array
# and the other is another sequence with the same entries,
# this will compare them as equal. Do we want this, or should
# we require exact type match?
if hasattr(match, "all"):
match = match.all()

if not match:
value_err += (
'Value of "{}{}" ("{}", type"{}") not same as\n'
' "{}{}" ("{}", type"{}")\n\n'
).format(
dict_1_name,
path,
dict_1[k],
type(dict_1[k]),
dict_2_name,
path,
dict_2[k],
type(dict_2[k]),
)
match = dict_1[k] == dict_2[k]

# if values are equal-length numpy arrays, the result of
# "==" is a bool array, so we need to 'all' it.
# In any other case "==" returns a bool
# TODO(alexcjohnson): actually, if *one* is a numpy array
# and the other is another sequence with the same entries,
# this will compare them as equal. Do we want this, or should
# we require exact type match?
if hasattr(match, "all"):
match = match.all()

if not match:
value_err += (
'Value of "{}{}" ("{}", type"{}") not same as\n'
' "{}{}" ("{}", type"{}")\n\n'
).format(
dict_1_name,
path,
dict_1[k],
type(dict_1[k]),
dict_2_name,
path,
dict_2[k],
type(dict_2[k]),
)

for k in dict_2.keys():
path = old_path + f"[{k}]"
Expand Down
Loading

0 comments on commit 854973b

Please sign in to comment.