Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: update pre-commit hooks #672

Merged
merged 2 commits into from
Jan 13, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ repos:
- id: trailing-whitespace

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: "v0.8.6"
rev: "v0.9.1"
hooks:
- id: ruff
args: ["--fix", "--show-fixes"]
Expand Down
2 changes: 1 addition & 1 deletion autograd/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ def vspace(value):
return vspace(getval(value))
else:
raise TypeError(
"Can't find vector space for value {} of type {}. " "Valid types are {}".format(
"Can't find vector space for value {} of type {}. Valid types are {}".format(
value, type(value), VSpace.mappings.keys()
)
)
Expand Down
12 changes: 4 additions & 8 deletions autograd/numpy/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,11 +81,9 @@ def check_implemented():

if matrix_norm:
if not (ord is None or ord == "fro" or ord == "nuc"):
raise NotImplementedError(
"Gradient of matrix norm not " "implemented for ord={}".format(ord)
)
raise NotImplementedError("Gradient of matrix norm not implemented for ord={}".format(ord))
elif not (ord is None or ord > 1):
raise NotImplementedError("Gradient of norm not " "implemented for ord={}".format(ord))
raise NotImplementedError("Gradient of norm not implemented for ord={}".format(ord))

if axis is None:
expand = lambda a: a
Expand Down Expand Up @@ -139,11 +137,9 @@ def check_implemented():

if matrix_norm:
if not (ord is None or ord == "fro" or ord == "nuc"):
raise NotImplementedError(
"Gradient of matrix norm not " "implemented for ord={}".format(ord)
)
raise NotImplementedError("Gradient of matrix norm not implemented for ord={}".format(ord))
elif not (ord is None or ord > 1):
raise NotImplementedError("Gradient of norm not " "implemented for ord={}".format(ord))
raise NotImplementedError("Gradient of norm not implemented for ord={}".format(ord))

if axis is None:
contract = lambda a: anp.sum(a)
Expand Down
8 changes: 3 additions & 5 deletions autograd/numpy/numpy_vjps.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,11 +251,11 @@
def grad_rollaxis(ans, a, axis, start=0):
if axis < 0:
raise NotImplementedError(
"Gradient of rollaxis not implemented for axis < 0. " "Please use moveaxis instead."
"Gradient of rollaxis not implemented for axis < 0. Please use moveaxis instead."
)
elif start < 0:
raise NotImplementedError(
"Gradient of rollaxis not implemented for start < 0. " "Please use moveaxis instead."
"Gradient of rollaxis not implemented for start < 0. Please use moveaxis instead."
)
return lambda g: anp.rollaxis(g, start - 1, axis) if start > axis else anp.rollaxis(g, start, axis + 1)

Expand Down Expand Up @@ -293,9 +293,7 @@ def helper(g, n):
def grad_gradient(ans, x, *vargs, **kwargs):
axis = kwargs.pop("axis", None)
if vargs or kwargs:
raise NotImplementedError(
"The only optional argument currently supported for np.gradient " "is axis."
)
raise NotImplementedError("The only optional argument currently supported for np.gradient is axis.")
if axis is None:
axis = range(x.ndim)
elif type(axis) is int:
Expand Down
2 changes: 1 addition & 1 deletion autograd/numpy/numpy_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def array(A, *args, **kwargs):
def wrap_if_boxes_inside(raw_array, slow_op_name=None):
if raw_array.dtype is _np.dtype("O"):
if slow_op_name:
warnings.warn("{} is slow for array inputs. " "np.concatenate() is faster.".format(slow_op_name))
warnings.warn("{} is slow for array inputs. np.concatenate() is faster.".format(slow_op_name))
return array_from_args((), {}, *raw_array.ravel()).reshape(raw_array.shape)
else:
return raw_array
Expand Down
2 changes: 1 addition & 1 deletion autograd/scipy/stats/multivariate_normal.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def generalized_outer_product(x):
def covgrad(x, mean, cov, allow_singular=False):
if allow_singular:
raise NotImplementedError(
"The multivariate normal pdf is not " "differentiable w.r.t. a singular covariance matix"
"The multivariate normal pdf is not differentiable w.r.t. a singular covariance matix"
)
J = np.linalg.inv(cov)
solved = np.matmul(J, np.expand_dims(x - mean, -1))
Expand Down
2 changes: 1 addition & 1 deletion autograd/test_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def check_vjp(f, x):
vjv_exact = x_vs.inner_prod(x_v, vjp_y)
vjv_numeric = y_vs.inner_prod(y_v, jvp(x_v))
assert scalar_close(vjv_numeric, vjv_exact), (
"Derivative (VJP) check of {} failed with arg {}:\n" "analytic: {}\nnumeric: {}".format(
"Derivative (VJP) check of {} failed with arg {}:\nanalytic: {}\nnumeric: {}".format(
get_name(f), x, vjv_exact, vjv_numeric
)
)
Expand Down
4 changes: 1 addition & 3 deletions examples/fluidsim/fluidsim.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,5 @@ def callback(params):
simulate(init_vx, init_vy, init_smoke, simulation_timesteps, ax, render=True)

print("Converting frames to an animated GIF...")
os.system(
"convert -delay 5 -loop 0 step*.png" " -delay 250 step100.png surprise.gif"
) # Using imagemagick.
os.system("convert -delay 5 -loop 0 step*.png -delay 250 step100.png surprise.gif") # Using imagemagick.
os.system("rm step*.png")
4 changes: 1 addition & 3 deletions examples/fluidsim/wing.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,6 @@ def callback(weights):

print("Converting frames to an animated GIF...") # Using imagemagick.
os.system(
"convert -delay 5 -loop 0 step*.png " "-delay 250 step{:03d}.png wing.gif".format(
simulation_timesteps
)
"convert -delay 5 -loop 0 step*.png -delay 250 step{:03d}.png wing.gif".format(simulation_timesteps)
)
os.system("rm step*.png")
Loading