Skip to content

Commit

Permalink
refactor: Update ruff and enable pyupgrade rules (#1367)
Browse files Browse the repository at this point in the history
  • Loading branch information
hoxbro authored Oct 31, 2024
1 parent 9cf38ff commit ebd2561
Show file tree
Hide file tree
Showing 29 changed files with 75 additions and 82 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ repos:
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.9
rev: v0.7.1
hooks:
- id: ruff
files: datashader/
Expand Down
3 changes: 1 addition & 2 deletions datashader/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,8 +272,7 @@ def traverse_aggregation(agg):
"""Yield a left->right traversal of an aggregation"""
if isinstance(agg, summary):
for a in agg.values:
for a2 in traverse_aggregation(a):
yield a2
yield from traverse_aggregation(a)
else:
yield agg

Expand Down
1 change: 0 additions & 1 deletion datashader/datashape/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from __future__ import absolute_import

from . import lexer, parser # noqa (API import)
from .coretypes import * # noqa (API import)
Expand Down
35 changes: 17 additions & 18 deletions datashader/datashape/coretypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
This defines the DataShape type system, with unified
shape and data type.
"""
from __future__ import print_function, division, absolute_import

import ctypes
import operator
Expand All @@ -26,7 +25,7 @@ class Type(type):
_registry = {}

def __new__(meta, name, bases, dct):
cls = super(Type, meta).__new__(meta, name, bases, dct)
cls = super(Type, meta).__new__(meta, name, bases, dct) # noqa: UP008
# Don't register abstract classes
if not dct.get('abstract'):
Type._registry[name] = cls
Expand Down Expand Up @@ -223,7 +222,7 @@ def __init__(self, tz=None):
self.tz = tz

def __str__(self):
basename = super(Time, self).__str__()
basename = super().__str__()
if self.tz is None:
return basename
else:
Expand All @@ -244,7 +243,7 @@ def __init__(self, tz=None):
self.tz = tz

def __str__(self):
basename = super(DateTime, self).__str__()
basename = super().__str__()
if self.tz is None:
return basename
else:
Expand Down Expand Up @@ -340,20 +339,20 @@ class Bytes(Unit):


_canonical_string_encodings = {
u'A': u'A',
u'ascii': u'A',
u'U8': u'U8',
u'utf-8': u'U8',
u'utf_8': u'U8',
u'utf8': u'U8',
u'U16': u'U16',
u'utf-16': u'U16',
u'utf_16': u'U16',
u'utf16': u'U16',
u'U32': u'U32',
u'utf-32': u'U32',
u'utf_32': u'U32',
u'utf32': u'U32',
'A': 'A',
'ascii': 'A',
'U8': 'U8',
'utf-8': 'U8',
'utf_8': 'U8',
'utf8': 'U8',
'U16': 'U16',
'utf-16': 'U16',
'utf_16': 'U16',
'utf16': 'U16',
'U32': 'U32',
'utf-32': 'U32',
'utf_32': 'U32',
'utf32': 'U32',
}


Expand Down
6 changes: 1 addition & 5 deletions datashader/datashape/discovery.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from __future__ import print_function, division, absolute_import

from collections import OrderedDict
from datetime import datetime, date, time, timedelta
Expand Down Expand Up @@ -435,10 +434,7 @@ def descendents(d, x):
try:
from unittest.mock import Mock
except ImportError:
try:
from mock import Mock
except ImportError:
pass
pass

if Mock is not None:
@dispatch(Mock)
Expand Down
3 changes: 1 addition & 2 deletions datashader/datashape/internal_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
Do not import datashape modules into this module. See util.py in that case
"""

from __future__ import print_function, division, absolute_import

import keyword
import re
Expand Down Expand Up @@ -76,7 +75,7 @@ def _toposort(edges):
"""
incoming_edges = reverse_dict(edges)
incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())
S = set((v for v in edges if v not in incoming_edges))
S = {v for v in edges if v not in incoming_edges}
L = []

while S:
Expand Down
1 change: 0 additions & 1 deletion datashader/datashape/lexer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
Lexer for the datashape grammar.
"""

from __future__ import absolute_import, division, print_function

import re
import ast
Expand Down
1 change: 0 additions & 1 deletion datashader/datashape/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
Parser for the datashape grammar.
"""

from __future__ import absolute_import, division, print_function

from . import lexer, error
# TODO: Remove coretypes dependency, make 100% of interaction through
Expand Down
1 change: 0 additions & 1 deletion datashader/datashape/promote.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from __future__ import absolute_import

import numpy as np
from datashader import datashape
Expand Down
4 changes: 2 additions & 2 deletions datashader/datashape/tests/test_coretypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def test_record_string():


def test_record_with_unicode_name_as_numpy_dtype():
r = Record([(str('a'), 'int32')])
r = Record([('a', 'int32')])
assert r.to_numpy_dtype() == np.dtype([('a', 'i4')])


Expand Down Expand Up @@ -651,7 +651,7 @@ def test_invalid_record_literal(invalid):
[
(['foo', b'\xc4\x87'.decode('utf8')], str),
(['foo', 'bar'], str),
(list(u'ab'), str)
(list('ab'), str)
]
)
def test_unicode_record_names(names, typ):
Expand Down
1 change: 0 additions & 1 deletion datashader/datashape/tests/test_creation.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from __future__ import absolute_import, division, print_function

import ctypes
import unittest
Expand Down
5 changes: 1 addition & 4 deletions datashader/datashape/tests/test_discovery.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,10 +336,7 @@ def test_lowest_common_dshape_varlen_strings():


def test_discover_mock():
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
from unittest.mock import Mock

# This used to segfault because we were sending mocks into numpy
with pytest.raises(NotImplementedError):
Expand Down
9 changes: 4 additions & 5 deletions datashader/datashape/tests/test_lexer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
Test the DataShape lexer.
"""

from __future__ import absolute_import, division, print_function

import unittest

Expand Down Expand Up @@ -81,9 +80,9 @@ def test_string(self):
self.check_isolated_token("'test'", 'STRING', 'test')
# Valid escaped characters
self.check_isolated_token(r'"\"\b\f\n\r\t\ub155"', 'STRING',
u'"\b\f\n\r\t\ub155')
'"\b\f\n\r\t\ub155')
self.check_isolated_token(r"'\'\b\f\n\r\t\ub155'", 'STRING',
u"'\b\f\n\r\t\ub155")
"'\b\f\n\r\t\ub155")
# A sampling of invalid escaped characters
self.check_failing_token(r'''"\'"''')
self.check_failing_token(r"""'\"'""")
Expand All @@ -95,8 +94,8 @@ def test_string(self):
self.check_failing_token(r"'\u123g'")
self.check_failing_token(r"'\u123'")
# Some unescaped and escapted unicode characters
self.check_isolated_token(u'"\uc548\ub155 \\uc548\\ub155"', 'STRING',
u'\uc548\ub155 \uc548\ub155')
self.check_isolated_token('"\uc548\ub155 \\uc548\\ub155"', 'STRING',
'\uc548\ub155 \uc548\ub155')

def test_failing_tokens(self):
self.check_failing_token('~')
Expand Down
9 changes: 4 additions & 5 deletions datashader/datashape/tests/test_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
Test the DataShape parser.
"""

from __future__ import absolute_import, division, print_function

import unittest
import pytest
Expand Down Expand Up @@ -162,8 +161,8 @@ def assertExpectedParse(ds_str, expected):
# String parameter (positional)
assertExpectedParse('unary["test"]', 'test')
assertExpectedParse("unary['test']", 'test')
assertExpectedParse('unary["\\uc548\\ub155"]', u'\uc548\ub155')
assertExpectedParse(u'unary["\uc548\ub155"]', u'\uc548\ub155')
assertExpectedParse('unary["\\uc548\\ub155"]', '\uc548\ub155')
assertExpectedParse('unary["\uc548\ub155"]', '\uc548\ub155')
# DataShape parameter (positional)
assertExpectedParse('unary[int8]', ct.DataShape(ct.int8))
assertExpectedParse('unary[X]', ct.DataShape(ct.TypeVar('X')))
Expand All @@ -185,8 +184,8 @@ def assertExpectedParse(ds_str, expected):
# String parameter (keyword)
assertExpectedParse('unary[blah="test"]', 'test')
assertExpectedParse("unary[blah='test']", 'test')
assertExpectedParse('unary[blah="\\uc548\\ub155"]', u'\uc548\ub155')
assertExpectedParse(u'unary[blah="\uc548\ub155"]', u'\uc548\ub155')
assertExpectedParse('unary[blah="\\uc548\\ub155"]', '\uc548\ub155')
assertExpectedParse('unary[blah="\uc548\ub155"]', '\uc548\ub155')
# DataShape parameter (keyword)
assertExpectedParse('unary[blah=int8]', ct.DataShape(ct.int8))
assertExpectedParse('unary[blah=X]', ct.DataShape(ct.TypeVar('X')))
Expand Down
1 change: 0 additions & 1 deletion datashader/datashape/type_symbol_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
A symbol table object to hold types for the parser.
"""

from __future__ import absolute_import, division, print_function
import ctypes
from itertools import chain

Expand Down
1 change: 0 additions & 1 deletion datashader/datashape/user.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from __future__ import print_function, division, absolute_import
from .dispatch import dispatch
from .coretypes import (
CType, Date, DateTime, DataShape, Record, String, Time, Var, from_numpy, to_numpy_dtype)
Expand Down
1 change: 0 additions & 1 deletion datashader/datashape/util/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from __future__ import print_function, division, absolute_import

from itertools import chain
import operator
Expand Down
2 changes: 1 addition & 1 deletion datashader/glyphs/trimesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class _PolygonLike(_PointLike):
* interp (bool): Whether to interpolate (True), or to have one color per shape (False)
"""
def __init__(self, x, y, z=None, weight_type=True, interp=True):
super(_PolygonLike, self).__init__(x, y)
super().__init__(x, y)
if z is None:
self.z = []
else:
Expand Down
4 changes: 2 additions & 2 deletions datashader/macros.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ class NameVisitor(ast.NodeVisitor):
NodeVisitor that builds a set of all of the named identifiers in an AST
"""
def __init__(self, *args, **kwargs):
super(NameVisitor, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.names = set()

def visit_Name(self, node):
Expand Down Expand Up @@ -71,7 +71,7 @@ def __init__(self, starred_name, expand_names, *args, **kwargs):
variable
"""
super(ExpandVarargTransformer, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.starred_name = starred_name
self.expand_names = expand_names

Expand Down
8 changes: 4 additions & 4 deletions datashader/mpl_ext.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def alpha_colormap(color, min_alpha=40, max_alpha=255, N=256):

class EqHistNormalize(mpl.colors.Normalize):
def __init__(self, vmin=None, vmax=None, clip=False, nbins=256 ** 2, ncolors=256):
super(EqHistNormalize, self).__init__(vmin, vmax, clip)
super().__init__(vmin, vmax, clip)
self._nbins = nbins
self._bin_edges = None
self._ncolors = ncolors
Expand Down Expand Up @@ -164,15 +164,15 @@ def inverse(self, value):
return np.interp([value], self._color_bins, self._bin_edges)[0]

def autoscale(self, A):
super(EqHistNormalize, self).autoscale(A)
super().autoscale(A)
self._bin_edges = self._binning(A, self._ncolors)

def autoscale_None(self, A):
super(EqHistNormalize, self).autoscale_None(A)
super().autoscale_None(A)
self._bin_edges = self._binning(A, self._ncolors)

def scaled(self):
return super(EqHistNormalize, self).scaled() and self._bin_edges is not None
return super().scaled() and self._bin_edges is not None


class DSArtist(_ImageBase):
Expand Down
4 changes: 2 additions & 2 deletions datashader/reductions.py
Original file line number Diff line number Diff line change
Expand Up @@ -1105,7 +1105,7 @@ def uses_cuda_mutex(self) -> UsesCudaMutex:
return UsesCudaMutex.Global

def _build_append(self, dshape, schema, cuda, antialias, self_intersect):
return super(m2, self)._build_append(dshape, schema, cuda, antialias, self_intersect)
return super()._build_append(dshape, schema, cuda, antialias, self_intersect)

def _build_create(self, required_dshape):
return self._create_float64_zero
Expand Down Expand Up @@ -1263,7 +1263,7 @@ class count_cat(by):
categories present.
"""
def __init__(self, column):
super(count_cat, self).__init__(column, count())
super().__init__(column, count())


class mean(Reduction):
Expand Down
2 changes: 0 additions & 2 deletions datashader/tests/test_colors.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,8 @@ def test_hex_to_rgb():


def test_rgb():
assert rgb(u'#FAFBFC') == (250, 251, 252)
assert rgb('#FAFBFC') == (250, 251, 252)
assert rgb('blue') == (0, 0, 255)
assert rgb(u'blue') == (0, 0, 255)
assert rgb((255, 255, 255)) == (255, 255, 255)
with pytest.raises(ValueError):
rgb((255, 256, 255))
Expand Down
6 changes: 4 additions & 2 deletions datashader/tests/test_geopandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
pytest.param(False, id="dask"),
]

_extras = ["spatialpandas.dask", "dask_geopandas.backends", "dask_geopandas"]

with contextlib.suppress(ImportError):
import dask_geopandas

Expand All @@ -23,12 +25,12 @@

@pytest.fixture(params=_backends)
def dask_both(request):
with dask_switcher(query=request.param, extras=["spatialpandas.dask", "dask_geopandas.backends", "dask_geopandas"]): ...
with dask_switcher(query=request.param, extras=_extras): ...
return request.param

@pytest.fixture
def dask_classic(request):
with dask_switcher(query=False, extras=["spatialpandas.dask", "dask_geopandas.backends", "dask_geopandas"]): ...
with dask_switcher(query=False, extras=_extras): ...

try:
import dask_geopandas
Expand Down
4 changes: 2 additions & 2 deletions datashader/tests/test_mpl_ext.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@

df = pd.DataFrame(
{
"x": np.array(([0.0] * 10 + [1] * 10)),
"y": np.array(([0.0] * 5 + [1] * 5 + [0] * 5 + [1] * 5)),
"x": np.array([0.0] * 10 + [1] * 10),
"y": np.array([0.0] * 5 + [1] * 5 + [0] * 5 + [1] * 5),
}
)

Expand Down
8 changes: 4 additions & 4 deletions datashader/tests/test_pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,10 @@ def _pandas():
plusminus 0 -1 nan -3 4 -5 6 -7 8 -9 10 -11 12 -13 14 -15 16 -17 18 -19
cat2 a b c d a b c d a b c d a b c d a b c d
"""
df_pd = pd.DataFrame({'x': np.array(([0.] * 10 + [1] * 10)),
'y': np.array(([0.] * 5 + [1] * 5 + [0] * 5 + [1] * 5)),
'log_x': np.array(([1.] * 10 + [10] * 10)),
'log_y': np.array(([1.] * 5 + [10] * 5 + [1] * 5 + [10] * 5)),
df_pd = pd.DataFrame({'x': np.array([0.] * 10 + [1] * 10),
'y': np.array([0.] * 5 + [1] * 5 + [0] * 5 + [1] * 5),
'log_x': np.array([1.] * 10 + [10] * 10),
'log_y': np.array([1.] * 5 + [10] * 5 + [1] * 5 + [10] * 5),
'i32': np.arange(20, dtype='i4'),
'i64': np.arange(20, dtype='i8'),
'f32': np.arange(20, dtype='f4'),
Expand Down
Loading

0 comments on commit ebd2561

Please sign in to comment.