From b0e9a3276d62d87813e6a4d26ed3525d372d7174 Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Thu, 12 Oct 2023 19:57:07 +0100 Subject: [PATCH] Fix typos using codespell (#1288) --- CHANGELOG.rst | 2 +- datashader/colors.py | 2 +- datashader/datashape/discovery.py | 2 +- datashader/datashape/type_symbol_table.py | 2 +- datashader/datatypes.py | 6 +-- datashader/reductions.py | 6 +-- datashader/tests/test_antialias.py | 2 +- datashader/tests/test_datatypes.py | 4 +- datashader/transfer_functions/__init__.py | 2 +- examples/README.md | 14 +++---- examples/filetimes.py | 2 +- examples/tiling.ipynb | 8 ++-- examples/user_guide/1_Plotting_Pitfalls.ipynb | 16 ++++---- examples/user_guide/3_Timeseries.ipynb | 8 ++-- examples/user_guide/5_Grids.ipynb | 10 ++--- examples/user_guide/7_Networks.ipynb | 39 +++++++++++-------- 16 files changed, 66 insertions(+), 59 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a202ea9f8..c0bdf1531 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -724,7 +724,7 @@ Minor bugfix release to support Bokeh 0.12: Version 0.3.0 (2016-06-23) -------------------------- -The major feature of this release is support of raster data via ``Canvas.raster``. To use this feature, you must install the optional dependencies via ``conda install rasterio scikit-image``. Rasterio relies on ``gdal`` whose conda package has some known bugs, including a missing dependancy for ``conda install krb5``. InteractiveImage in this release requires bokeh 0.11.1 or earlier, and will not work with bokeh 0.12. +The major feature of this release is support of raster data via ``Canvas.raster``. To use this feature, you must install the optional dependencies via ``conda install rasterio scikit-image``. Rasterio relies on ``gdal`` whose conda package has some known bugs, including a missing dependency for ``conda install krb5``. InteractiveImage in this release requires bokeh 0.11.1 or earlier, and will not work with bokeh 0.12. - **PR #160 #187** Improved example notebooks and dashboard - **PR #186 #184 #178** Add datashader-download-data cli command for grabbing example datasets diff --git a/datashader/colors.py b/datashader/colors.py index 55b0d7282..ee60b5fe5 100644 --- a/datashader/colors.py +++ b/datashader/colors.py @@ -133,7 +133,7 @@ def rgb(x): # Adapted from matplotlib.cm.hot to be more uniform at the high end Hot = ["black", "maroon", "darkred", "red", "orangered", "darkorange", "orange", "gold", "yellow", "white"] -# pseudo terrestial elevation ramp +# pseudo terrestrial elevation ramp Elevation = ["aqua", "sandybrown", "limegreen", "green", "green", "darkgreen", "saddlebrown", "gray", "white"] # Qualitative color maps, for use in colorizing categories diff --git a/datashader/datashape/discovery.py b/datashader/datashape/discovery.py index c33be5e5a..fa0d0eb79 100644 --- a/datashader/datashape/discovery.py +++ b/datashader/datashape/discovery.py @@ -58,7 +58,7 @@ def discover(obj, **kwargs): warn( dedent( """\ - array-like discovery is deperecated. + array-like discovery is deprecated. Please write an explicit discover function for type '%s'. """ % type_name, ), diff --git a/datashader/datashape/type_symbol_table.py b/datashader/datashape/type_symbol_table.py index ef6df0b29..21a4b985c 100644 --- a/datashader/datashape/type_symbol_table.py +++ b/datashader/datashape/type_symbol_table.py @@ -22,7 +22,7 @@ def _complex(tp): return ct.complex_float64 else: raise TypeError( - 'Cannot contruct a complex type with real component %s' % tp) + 'Cannot construct a complex type with real component %s' % tp) def _struct(names, dshapes): diff --git a/datashader/datatypes.py b/datashader/datatypes.py index 1ec02f5e8..11e0f356a 100644 --- a/datashader/datatypes.py +++ b/datashader/datatypes.py @@ -33,7 +33,7 @@ def _validate_ragged_properties(start_indices, flat_array): flat_array: numpy array containing concatenation of all nested arrays to be represented by this ragged array - start_indices: unsiged integer numpy array the same + start_indices: unsigned integer numpy array the same length as the ragged array where values represent the index into flat_array where the corresponding ragged array element @@ -231,7 +231,7 @@ def __init__(self, data, dtype=None, copy=False): - flat_array: numpy array containing concatenation of all nested arrays to be represented by this ragged array - - start_indices: unsiged integer numpy array the same + - start_indices: unsigned integer numpy array the same length as the ragged array where values represent the index into flat_array where the corresponding ragged array element @@ -385,7 +385,7 @@ def flat_array(self): @property def start_indices(self): """ - unsiged integer numpy array the same length as the ragged array where + unsigned integer numpy array the same length as the ragged array where values represent the index into flat_array where the corresponding ragged array element begins diff --git a/datashader/reductions.py b/datashader/reductions.py index dd1b3ab8a..985927018 100644 --- a/datashader/reductions.py +++ b/datashader/reductions.py @@ -480,7 +480,7 @@ def _finalize(bases, cuda=False, **kwargs): class SelfIntersectingOptionalFieldReduction(OptionalFieldReduction): """ Base class for optional field reductions for which self-intersecting - geometry may or may not be desireable. + geometry may or may not be desirable. Ignored if not using antialiasing. """ def __init__(self, column=None, self_intersect=True): @@ -946,8 +946,8 @@ def _combine(aggs): class SelfIntersectingFloatingReduction(FloatingReduction): """ - Base class fo floating reductions for which self-intersecting geometry - may or may not be desireable. + Base class for floating reductions for which self-intersecting geometry + may or may not be desirable. Ignored if not using antialiasing. """ def __init__(self, column=None, self_intersect=True): diff --git a/datashader/tests/test_antialias.py b/datashader/tests/test_antialias.py index 11de629cd..e73af2629 100644 --- a/datashader/tests/test_antialias.py +++ b/datashader/tests/test_antialias.py @@ -75,7 +75,7 @@ # line whereas for 006 it is a multi-segment line, and each vertex is listed # only a single time. Datashader then "connects the dots" as it were. # -# Test 007 tests the edge case, where we draw an almost staright line between +# Test 007 tests the edge case, where we draw an almost straight line between # corners with only a single pixel offset. This is to ensure that anti-aliasing # does not try to draw pixels that are out of bounds. Importantly, this needs # to be run with Numba disabled, since Numba does not do OOB checking by diff --git a/datashader/tests/test_datatypes.py b/datashader/tests/test_datatypes.py index 33fedac7b..5206e9574 100644 --- a/datashader/tests/test_datatypes.py +++ b/datashader/tests/test_datatypes.py @@ -115,7 +115,7 @@ def test_validate_ragged_array_fastpath(): RaggedArray(dict(valid_dict, start_indices=25)) ve.match('start_indices property of a RaggedArray') - # not unsiged int + # not unsigned int with pytest.raises(ValueError) as ve: RaggedArray(dict(valid_dict, start_indices=start_indices.astype('float32'))) @@ -148,7 +148,7 @@ def test_validate_ragged_array_fastpath(): def test_start_indices_dtype(): - # The start_indices dtype should be an unsiged int that is only as large + # The start_indices dtype should be an unsigned int that is only as large # as needed to handle the length of the flat array # Empty diff --git a/datashader/transfer_functions/__init__.py b/datashader/transfer_functions/__init__.py index 28796ba07..6c7e8869e 100755 --- a/datashader/transfer_functions/__init__.py +++ b/datashader/transfer_functions/__init__.py @@ -672,7 +672,7 @@ def shade(agg, cmap=["lightblue", "darkblue"], color_key=Sets1to3, in proportion to how much each category contributes to the final sum. However, if values can be negative or if they are on an interval scale where values e.g. twice as far from zero - are not twice as high (such as temperature in Farenheit), then + are not twice as high (such as temperature in Fahrenheit), then you will need to provide a suitable baseline value for use in calculating color mixing. A value of None (the default) means to take the minimum across the entire aggregate array, which diff --git a/examples/README.md b/examples/README.md index 31b0bb07e..ae1ffafe8 100644 --- a/examples/README.md +++ b/examples/README.md @@ -2,16 +2,16 @@ The best way to understand how Datashader works is to try out our extensive set of examples. [Datashader.org](http://datashader.org) -includes static versions of the -[getting started guide](http://datashader.org/getting_started), +includes static versions of the +[getting started guide](http://datashader.org/getting_started), [user manual](http://datashader.org/user_guide), and [topic examples](http://datashader.org/topics), but for the full experience with dynamic updating you will need to install them on a -live server. +live server. -These instructions assume you are using -[conda](https://conda.io/docs/install/quick.html), but they can be -adapted as needed to use [pip](https://pip.pypa.io/en/stable/installing/) +These instructions assume you are using +[conda](https://conda.io/docs/install/quick.html), but they can be +adapted as needed to use [pip](https://pip.pypa.io/en/stable/installing/) and [virtualenv](https://virtualenv.pypa.io) if desired. To get started, first go to your home directory and @@ -71,7 +71,7 @@ jupyter notebook If you want the generated notebooks to work without an internet connection or with an unreliable connection (e.g. if you see `Loading BokehJS ...` but never -`BokehJS sucessfully loaded`), then restart the Jupyter notebook server using: +`BokehJS successfully loaded`), then restart the Jupyter notebook server using: ``` BOKEH_RESOURCES=inline jupyter notebook --NotebookApp.iopub_data_rate_limit=100000000 diff --git a/examples/filetimes.py b/examples/filetimes.py index 5aa15a832..f3ba431ad 100755 --- a/examples/filetimes.py +++ b/examples/filetimes.py @@ -25,7 +25,7 @@ from datashader import transfer_functions as tf #from multiprocessing.pool import ThreadPool -#dask.set_options(pool=ThreadPool(3)) # select a pecific number of threads +#dask.set_options(pool=ThreadPool(3)) # select a specific number of threads from dask import distributed # Toggled by command-line arguments diff --git a/examples/tiling.ipynb b/examples/tiling.ipynb index 67516e79e..5f7bd282a 100644 --- a/examples/tiling.ipynb +++ b/examples/tiling.ipynb @@ -90,7 +90,7 @@ " xs = np.concatenate([np.random.wald(10000000, 10000000, size=10000000) * offset for offset in xoffsets])\n", " ys = np.concatenate([np.random.wald(10000000, 10000000, size=10000000) * offset for offset in yoffsets])\n", " df = pd.DataFrame(dict(x=xs, y=ys))\n", - " \n", + "\n", " return df.loc[df['x'].between(*x_range) & df['y'].between(*y_range)]" ] }, @@ -152,7 +152,7 @@ "metadata": {}, "source": [ "### Create `post_render_func`\n", - "- accepts `img `, `extras` arguments which correspond to the output PIL.Image before it is write to disk (or S3), and addtional image properties.\n", + "- accepts `img `, `extras` arguments which correspond to the output PIL.Image before it is write to disk (or S3), and additional image properties.\n", "- returns image `(PIL.Image)`\n", "- this is a good place to run any non-datashader-specific logic on each output tile." ] @@ -238,7 +238,7 @@ "\n", "xmin, ymin, xmax, ymax = full_extent_of_data\n", "\n", - "p = figure(width=800, height=800, \n", + "p = figure(width=800, height=800,\n", " x_range=(int(-20e6), int(20e6)),\n", " y_range=(int(-20e6), int(20e6)),\n", " tools=\"pan,wheel_zoom,reset\")\n", @@ -338,7 +338,7 @@ "source": [ "xmin, ymin, xmax, ymax = full_extent_of_data\n", "\n", - "p = figure(width=800, height=800, \n", + "p = figure(width=800, height=800,\n", " x_range=(int(-20e6), int(20e6)),\n", " y_range=(int(-20e6), int(20e6)),\n", " tools=\"pan,wheel_zoom,reset\")\n", diff --git a/examples/user_guide/1_Plotting_Pitfalls.ipynb b/examples/user_guide/1_Plotting_Pitfalls.ipynb index a01031cb9..bde76107a 100644 --- a/examples/user_guide/1_Plotting_Pitfalls.ipynb +++ b/examples/user_guide/1_Plotting_Pitfalls.ipynb @@ -164,7 +164,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "As you can see, it is very difficult to find settings for the dotsize and alpha parameters that correctly reveal the data, even for relatively small and obvious datasets like these. With larger datasets with unknown contents, it is difficult to detect that such problems are occuring, leading to false conclusions based on inappropriately visualized data.\n", + "As you can see, it is very difficult to find settings for the dotsize and alpha parameters that correctly reveal the data, even for relatively small and obvious datasets like these. With larger datasets with unknown contents, it is difficult to detect that such problems are occurring, leading to false conclusions based on inappropriately visualized data.\n", "\n", "### 3. Undersampling\n", "\n", @@ -187,7 +187,7 @@ " np.random.seed(1)\n", " dists = [(np.random.normal(x,s,num), np.random.normal(y,s,num)) for x,y,s in specs]\n", " return np.hstack([d[0] for d in dists]), np.hstack([d[1] for d in dists])\n", - " \n", + "\n", "points = (hv.Points(gaussians(num=600), label=\"600 points\", group=\"Small dots\") +\n", " hv.Points(gaussians(num=60000), label=\"60000 points\", group=\"Small dots\") +\n", " hv.Points(gaussians(num=600), label=\"600 points\", group=\"Tiny dots\") +\n", @@ -221,7 +221,7 @@ " \"\"\"\n", " Given a set of coordinates, bins them into a 2d histogram grid\n", " of the specified size, and optionally transforms the counts\n", - " and/or compresses them into a visible range starting at a \n", + " and/or compresses them into a visible range starting at a\n", " specified offset between 0 and 1.0.\n", " \"\"\"\n", " hist,xs,ys = np.histogram2d(coords[0], coords[1], bins=bins)\n", @@ -354,7 +354,7 @@ "except ImportError:\n", " eq_hist = lambda d,m: d\n", " print(\"scikit-image not installed; skipping histogram equalization\")\n", - " \n", + "\n", "hv.Layout([heatmap(dist,bins,transform=eq_hist) for bins in [8,20,200]])" ] }, @@ -410,10 +410,10 @@ "metadata": {}, "outputs": [], "source": [ - "layout = (hv.Points(dist,label=\"1. Overplotting\") + \n", - " hv.Points(dist,label=\"2. Oversaturation\").opts(s=0.1,alpha=0.5) + \n", - " hv.Points((dist[0][::200],dist[1][::200]),label=\"3. Undersampling\").opts(s=2,alpha=0.5) + \n", - " hv.Points(dist,label=\"4. Undersaturation\").opts(s=0.01,alpha=0.05) + \n", + "layout = (hv.Points(dist,label=\"1. Overplotting\") +\n", + " hv.Points(dist,label=\"2. Oversaturation\").opts(s=0.1,alpha=0.5) +\n", + " hv.Points((dist[0][::200],dist[1][::200]),label=\"3. Undersampling\").opts(s=2,alpha=0.5) +\n", + " hv.Points(dist,label=\"4. Undersaturation\").opts(s=0.01,alpha=0.05) +\n", " heatmap(dist,200,offset=0.2,label=\"5. Underutilized dynamic range\") +\n", " heatmap(dist,200,transform=eq_hist,label=\"6. Nonuniform colormapping\").opts(cmap=\"hot\"))\n", "\n", diff --git a/examples/user_guide/3_Timeseries.ipynb b/examples/user_guide/3_Timeseries.ipynb index 1c2b98455..767929ecc 100644 --- a/examples/user_guide/3_Timeseries.ipynb +++ b/examples/user_guide/3_Timeseries.ipynb @@ -51,7 +51,7 @@ "noise = lambda var, bias, n: np.random.normal(bias, var, n)\n", "data = {c: signal + noise(1, 10*(np.random.random() - 0.5), n) for c in cols}\n", "\n", - "# Add some \"rogue lines\" that differ from the rest \n", + "# Add some \"rogue lines\" that differ from the rest\n", "cols += ['x'] ; data['x'] = signal + np.random.normal(0, 0.02, size=n).cumsum() # Gradually diverges\n", "cols += ['y'] ; data['y'] = signal + noise(1, 20*(np.random.random() - 0.5), n) # Much noisier\n", "cols += ['z'] ; data['z'] = signal # No noise at all\n", @@ -174,7 +174,7 @@ "metadata": {}, "outputs": [], "source": [ - "cvs2 = ds.Canvas(x_range=(12879023 * 1E11, 12879070 * 1E11), \n", + "cvs2 = ds.Canvas(x_range=(12879023 * 1E11, 12879070 * 1E11),\n", " y_range=(37, 50), plot_height=200, plot_width=500)\n", "\n", "w0 = tf.shade(cvs2.line(df, 'ITime', 'a', line_width=0), name=\"line_width 0\")\n", @@ -425,7 +425,7 @@ " np.random.normal(0, 0.3, size=n).cumsum() + 50,\n", " np.random.normal(0, 0.3, size=n).cumsum() + 50]\n", "data = {c: signals[i%3] + noise(1+i, 5*(np.random.random() - 0.5), n) for (i,c) in enumerate(cols)}\n", - "y_range = (1.2*min([s.min() for s in signals]), 1.2*max([s.max() for s in signals])) \n", + "y_range = (1.2*min([s.min() for s in signals]), 1.2*max([s.max() for s in signals]))\n", "\n", "data['Time'] = df['Time']\n", "dfm = pd.DataFrame(data)" @@ -525,7 +525,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Here, each line represents an independent trial of this random walk process. All lines start from the same point at time 0 (all the way to the left). At each subsquent time step, each line moves upward or downward from its prior position by a distance drawn from a normal distribution. Thanks to the nonlinear `eq-hist` colorization, you can see the dispersion in the density of the overall distribution as time advances, at the same time as you can see the individual outliers at the extremes of the distribution. You'll see a similar plot for 1,000,000 or 10,000,000 curves, and much more interesting plots if you have real data to show!" + "Here, each line represents an independent trial of this random walk process. All lines start from the same point at time 0 (all the way to the left). At each subsequent time step, each line moves upward or downward from its prior position by a distance drawn from a normal distribution. Thanks to the nonlinear `eq-hist` colorization, you can see the dispersion in the density of the overall distribution as time advances, at the same time as you can see the individual outliers at the extremes of the distribution. You'll see a similar plot for 1,000,000 or 10,000,000 curves, and much more interesting plots if you have real data to show!" ] } ], diff --git a/examples/user_guide/5_Grids.ipynb b/examples/user_guide/5_Grids.ipynb index 1673bfcc5..504e64dfb 100644 --- a/examples/user_guide/5_Grids.ipynb +++ b/examples/user_guide/5_Grids.ipynb @@ -139,7 +139,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Here the default downsampling function ``mean`` renders a faithful size-reduced version of the original, with all the raster grid points that overlap a given pixel being averaged to create the final pixel value. The ``min`` and ``max`` aggregation functions take the minimum or maxiumum, respectively, of the values overlapping each pixel, and you can see here that the ``min`` version has larger light-blue regions towards the upper right (with each pixel reflecting the minimum of all grid cells it overlaps), while the ``max`` version has larger dark-blue regions towards the upper right. The ``mode`` version computes the most common value that overlaps this pixel (not very useful for floating-point data as here, but important for categorical data where ``mean`` would not be valid; in that case you can also use `first` or `last` to take the first or last value found for a given pixel). The ``std`` version reports the standard deviation of the grid cells in each pixel, which is low towards the lower left where the function is smooth, and increases towards the upper right, where the function value varies significantly per pixel (i.e., has many samples in the original grid with different values).\n", + "Here the default downsampling function ``mean`` renders a faithful size-reduced version of the original, with all the raster grid points that overlap a given pixel being averaged to create the final pixel value. The ``min`` and ``max`` aggregation functions take the minimum or maximum, respectively, of the values overlapping each pixel, and you can see here that the ``min`` version has larger light-blue regions towards the upper right (with each pixel reflecting the minimum of all grid cells it overlaps), while the ``max`` version has larger dark-blue regions towards the upper right. The ``mode`` version computes the most common value that overlaps this pixel (not very useful for floating-point data as here, but important for categorical data where ``mean`` would not be valid; in that case you can also use `first` or `last` to take the first or last value found for a given pixel). The ``std`` version reports the standard deviation of the grid cells in each pixel, which is low towards the lower left where the function is smooth, and increases towards the upper right, where the function value varies significantly per pixel (i.e., has many samples in the original grid with different values).\n", "\n", "The differences between min and max are clearer if we look at a regime where the function varies so much that it can only barely be faithfully be represented in a grid of this size:" ] @@ -179,7 +179,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Here you can see that the ``mean`` downsampling looks like a good approximation to the original array, locally averaging the original function values in each portion of the array. However, if you were to zoom in and adjust for contrast, you would be able to see some of the inevitable aliasing artifacts that occur for any such representation in a too-small array. These aliasing effects are clearly visible in the ``min`` and ``max`` aggregation, because they keep the local minimum or maxiumum rather than averaging out the artifacts. Comparing ``mean`` and ``min`` or ``max`` (or subtracting ``min`` from ``max``) can help find regions of the array that are being poorly represented in the current view.\n", + "Here you can see that the ``mean`` downsampling looks like a good approximation to the original array, locally averaging the original function values in each portion of the array. However, if you were to zoom in and adjust for contrast, you would be able to see some of the inevitable aliasing artifacts that occur for any such representation in a too-small array. These aliasing effects are clearly visible in the ``min`` and ``max`` aggregation, because they keep the local minimum or maximum rather than averaging out the artifacts. Comparing ``mean`` and ``min`` or ``max`` (or subtracting ``min`` from ``max``) can help find regions of the array that are being poorly represented in the current view.\n", "\n", "## Collections of raster data\n", "\n", @@ -224,8 +224,8 @@ "def h(x,y): return np.sin((x**2+y**2)**2)\n", "def m(x,y): return np.exp(x+y)\n", "\n", - "dd = xr.Dataset({'cos': sample(f, n=150), \n", - " 'sin': sample(h, n=150), \n", + "dd = xr.Dataset({'cos': sample(f, n=150),\n", + " 'sin': sample(h, n=150),\n", " 'exp': sample(m, n=150)})\n", "\n", "tf.Images(tf.shade(ds.Canvas().raster(dd, agg=rd.mean('cos')), name='cos ((x^2+y^2)^2)'),\n", @@ -385,7 +385,7 @@ " Qy = np.sin(Y) + np.sin(X)\n", " Z = np.sqrt(X**2 + Y**2)\n", "\n", - " return xr.DataArray(Z, name='Z', dims=['Y', 'X'], \n", + " return xr.DataArray(Z, name='Z', dims=['Y', 'X'],\n", " coords={'Qx': (['Y', 'X'], Qx),\n", " 'Qy': (['Y', 'X'], Qy)})\n", "\n", diff --git a/examples/user_guide/7_Networks.ipynb b/examples/user_guide/7_Networks.ipynb index 7ee9a4110..d7a5dba98 100644 --- a/examples/user_guide/7_Networks.ipynb +++ b/examples/user_guide/7_Networks.ipynb @@ -140,13 +140,13 @@ "def edgesplot(edges, name=None, canvas=None):\n", " canvas = ds.Canvas(**cvsopts) if canvas is None else canvas\n", " return tf.shade(canvas.line(edges, 'x','y', agg=ds.count()), name=name)\n", - " \n", + "\n", "def graphplot(nodes, edges, name=\"\", canvas=None, cat=None):\n", " if canvas is None:\n", " xr = nodes.x.min(), nodes.x.max()\n", " yr = nodes.y.min(), nodes.y.max()\n", " canvas = ds.Canvas(x_range=xr, y_range=yr, **cvsopts)\n", - " \n", + "\n", " np = nodesplot(nodes, name + \" nodes\", canvas, cat)\n", " ep = edgesplot(edges, name + \" edges\", canvas)\n", " return tf.stack(ep, np, how=\"over\", name=name)" @@ -162,9 +162,9 @@ "fd = forcedirected\n", "\n", "%time cd_d = graphplot(cd, connect_edges(cd,edges), \"Circular layout\")\n", - "%time fd_d = graphplot(fd, connect_edges(fd,edges), \"Force-directed\") \n", + "%time fd_d = graphplot(fd, connect_edges(fd,edges), \"Force-directed\")\n", "%time cd_b = graphplot(cd, hammer_bundle(cd,edges), \"Circular layout, bundled\")\n", - "%time fd_b = graphplot(fd, hammer_bundle(fd,edges), \"Force-directed, bundled\") \n", + "%time fd_b = graphplot(fd, hammer_bundle(fd,edges), \"Force-directed, bundled\")\n", "\n", "tf.Images(cd_d,fd_d,cd_b,fd_b).cols(2)" ] @@ -180,6 +180,13 @@ "" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": null, @@ -211,7 +218,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Here you can see the bundling algorithm forms groups of nearby connnections, which helps make the structure at a particular scale clear. The scale of this structure, i.e., how much bundling is done, is determined by an effective \"bandwidth\", which is a combination of an `initial_bandwidth` parameter and a `decay` time constant for annealing this bandwidth over time:" + "Here you can see the bundling algorithm forms groups of nearby connections, which helps make the structure at a particular scale clear. The scale of this structure, i.e., how much bundling is done, is determined by an effective \"bandwidth\", which is a combination of an `initial_bandwidth` parameter and a `decay` time constant for annealing this bandwidth over time:" ] }, { @@ -221,7 +228,7 @@ "outputs": [], "source": [ "%%time\n", - "grid = [graphplot(snodes, \n", + "grid = [graphplot(snodes,\n", " hammer_bundle(*star, iterations=5, decay=decay, initial_bandwidth=bw),\n", " \"d={:0.2f}, bw={:0.2f}\".format(decay, bw))\n", " for decay in [0.1, 0.25, 0.5, 0.9] for bw in [0.1, 0.2, 0.5, 1]]" @@ -277,13 +284,13 @@ "cats,n,m = 4,80,1000\n", "\n", "cnodes = pd.concat([\n", - " pd.DataFrame.from_records([(\"node\"+str(i+100*c),\"c\"+str(c)) for i in range(n)], \n", - " columns=['name','cat']) \n", + " pd.DataFrame.from_records([(\"node\"+str(i+100*c),\"c\"+str(c)) for i in range(n)],\n", + " columns=['name','cat'])\n", " for c in range(cats)], ignore_index=True)\n", "cnodes.cat=cnodes.cat.astype('category')\n", "\n", "cedges = pd.concat([\n", - " pd.DataFrame(np.random.randint(n*c,n*(c+1), size=(m, 2)), \n", + " pd.DataFrame(np.random.randint(n*c,n*(c+1), size=(m, 2)),\n", " columns=['source', 'target'])\n", " for c in range(cats)], ignore_index=True)" ] @@ -305,9 +312,9 @@ "fd = forceatlas2_layout(cnodes, cedges)\n", "\n", "%time rd_d = graphplot(rd, connect_edges(rd,cedges), \"Random layout\", cat=\"cat\")\n", - "%time fd_d = graphplot(fd, connect_edges(fd,cedges), \"Force-directed\", cat=\"cat\") \n", + "%time fd_d = graphplot(fd, connect_edges(fd,cedges), \"Force-directed\", cat=\"cat\")\n", "%time rd_b = graphplot(rd, hammer_bundle(rd,cedges), \"Random layout, bundled\", cat=\"cat\")\n", - "%time fd_b = graphplot(fd, hammer_bundle(fd,cedges), \"Force-directed, bundled\",cat=\"cat\") \n", + "%time fd_b = graphplot(fd, hammer_bundle(fd,cedges), \"Force-directed, bundled\",cat=\"cat\")\n", "\n", "tf.Images(rd_d,fd_d,rd_b,fd_b).cols(2)" ] @@ -353,7 +360,7 @@ "def nx_plot(graph, name=\"\"):\n", " print(graph.name, len(graph.edges))\n", " nodes, edges = nx_layout(graph)\n", - " \n", + "\n", " direct = connect_edges(nodes, edges)\n", " bundled_bw005 = hammer_bundle(nodes, edges)\n", " bundled_bw030 = hammer_bundle(nodes, edges, initial_bandwidth=0.30)\n", @@ -371,11 +378,11 @@ "source": [ "n=50\n", "plots = [nx_plot(g) for g in\n", - " [ng(nx.complete_graph(n), name=\"Complete\"), \n", - " ng(nx.lollipop_graph(n, 5), name=\"Lollipop\"), \n", + " [ng(nx.complete_graph(n), name=\"Complete\"),\n", + " ng(nx.lollipop_graph(n, 5), name=\"Lollipop\"),\n", " ng(nx.barbell_graph(n,2), name=\"Barbell\"),\n", - " ng(nx.ladder_graph(n), name=\"Ladder\"), \n", - " ng(nx.circular_ladder_graph(n), name=\"Circular Ladder\"), \n", + " ng(nx.ladder_graph(n), name=\"Ladder\"),\n", + " ng(nx.circular_ladder_graph(n), name=\"Circular Ladder\"),\n", " ng(nx.star_graph(n), name=\"Star\"),\n", " ng(nx.cycle_graph(n), name=\"Cycle\")]]\n", "\n",