diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 81563ba20e..519e489f61 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -6,107 +6,140 @@ on: - "v[0-9]+.[0-9]+.[0-9]+a[0-9]+" - "v[0-9]+.[0-9]+.[0-9]+b[0-9]+" - "v[0-9]+.[0-9]+.[0-9]+rc[0-9]+" - # Dry-run only workflow_dispatch: schedule: - cron: "0 14 * * SUN" +defaults: + run: + shell: bash -el {0} + +env: + SETUPTOOLS_ENABLE_FEATURES: "legacy-editable" + PYTHON_VERSION: "3.11" + PACKAGE: "holoviews" + jobs: + waiting_room: + name: Waiting Room + runs-on: ubuntu-latest + needs: [conda_build, pip_install] + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') + environment: + name: publish + steps: + - run: echo "All builds have finished, have been approved, and ready to publish" + conda_build: - name: Build Conda Packages + name: Build Conda runs-on: "ubuntu-latest" - defaults: - run: - shell: bash -l {0} - env: - CHANS_DEV: "-c pyviz/label/dev -c bokeh" - PKG_TEST_PYTHON: "--test-python=py39" - PYTHON_VERSION: "3.9" - CHANS: "-c pyviz" - MPLBACKEND: "Agg" - CONDA_UPLOAD_TOKEN: ${{ secrets.CONDA_UPLOAD_TOKEN }} - SETUPTOOLS_ENABLE_FEATURES: "legacy-editable" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: "100" - - uses: conda-incubator/setup-miniconda@v2 - with: - miniconda-version: "latest" - python-version: ${{ env.PYTHON_VERSION }} - name: Fetch unshallow run: git fetch --prune --tags --unshallow -f - - name: Set output - id: vars - run: echo "tag=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT + - uses: conda-incubator/setup-miniconda@v3 + with: + miniconda-version: "latest" - name: conda setup run: | - conda config --set always_yes True - conda config --append channels pyviz/label/dev - conda config --append channels conda-forge # pyct is for running setup.py - conda install -y conda-build anaconda-client build pyct + conda install -y conda-build build pyct -c pyviz/label/dev - name: conda build run: | - bash ./scripts/build_conda.sh + source ./scripts/build_conda.sh + echo "CONDA_FILE="$CONDA_PREFIX/conda-bld/noarch/$PACKAGE-$VERSION-py_0.tar.bz2"" >> $GITHUB_ENV + - uses: actions/upload-artifact@v4 + if: always() + with: + name: conda + path: ${{ env.CONDA_FILE }} + if-no-files-found: error + + conda_publish: + name: Publish Conda + runs-on: ubuntu-latest + needs: [conda_build, waiting_room] + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') + steps: + - uses: actions/download-artifact@v4 + with: + name: conda + path: dist/ + - name: Set environment variables + run: | + echo "TAG=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + echo "CONDA_FILE=$(ls dist/*.tar.bz2)" >> $GITHUB_ENV + - uses: conda-incubator/setup-miniconda@v3 + with: + miniconda-version: "latest" + - name: conda setup + run: | + conda install -y anaconda-client - name: conda dev upload - if: (github.event_name == 'push' && (contains(steps.vars.outputs.tag, 'a') || contains(steps.vars.outputs.tag, 'b') || contains(steps.vars.outputs.tag, 'rc'))) + if: contains(env.TAG, 'a') || contains(env.TAG, 'b') || contains(env.TAG, 'rc') run: | - VERSION="$(echo "$(ls dist/*.whl)" | cut -d- -f2)" - FILE="$CONDA_PREFIX/conda-bld/noarch/holoviews-$VERSION-py_0.tar.bz2" - anaconda --token $CONDA_UPLOAD_TOKEN upload --user pyviz --label=dev $FILE + anaconda --token ${{ secrets.CONDA_UPLOAD_TOKEN }} upload --user pyviz --label=dev $CONDA_FILE - name: conda main upload - if: (github.event_name == 'push' && !(contains(steps.vars.outputs.tag, 'a') || contains(steps.vars.outputs.tag, 'b') || contains(steps.vars.outputs.tag, 'rc'))) + if: (!(contains(env.TAG, 'a') || contains(env.TAG, 'b') || contains(env.TAG, 'rc'))) run: | - VERSION="$(echo "$(ls dist/*.whl)" | cut -d- -f2)" - FILE="$CONDA_PREFIX/conda-bld/noarch/holoviews-$VERSION-py_0.tar.bz2" - anaconda --token $CONDA_UPLOAD_TOKEN upload --user pyviz --label=dev --label=main $FILE + anaconda --token ${{ secrets.CONDA_UPLOAD_TOKEN }} upload --user pyviz --label=dev --label=main $CONDA_FILE + pip_build: - name: Build PyPI Packages + name: Build PyPI runs-on: "ubuntu-latest" - defaults: - run: - shell: bash -l {0} - env: - CHANS_DEV: "-c pyviz/label/dev -c bokeh" - PKG_TEST_PYTHON: "--test-python=py39" - PYTHON_VERSION: "3.9" - CHANS: "-c pyviz" - MPLBACKEND: "Agg" - PPU: ${{ secrets.PPU }} - PPP: ${{ secrets.PPP }} - PYPI: "https://upload.pypi.org/legacy/" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: "100" - - uses: conda-incubator/setup-miniconda@v2 - with: - miniconda-version: "latest" - python-version: 3.9 - name: Fetch unshallow run: git fetch --prune --tags --unshallow -f - - name: conda setup - run: | - conda install -c pyviz "pyctdev>=0.5" - doit ecosystem_setup - doit env_create $CHANS_DEV --python=$PYTHON_VERSION - - name: env setup - run: | - conda activate test-environment - doit develop_install $CHANS_DEV - pip uninstall -y holoviews - doit pip_on_conda - - name: doit env_capture - run: | - conda activate test-environment - doit env_capture - - name: pip build - run: | - conda activate test-environment - doit ecosystem=pip package_build --test-group=simple - - name: pip upload - if: github.event_name == 'push' + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Install build run: | - conda activate test-environment - doit ecosystem=pip package_upload -u $PPU -p $PPP -r $PYPI + python -m pip install build + - name: Build package + run: python -m build . + - uses: actions/upload-artifact@v4 + if: always() + with: + name: pip + path: dist/ + if-no-files-found: error + + pip_install: + name: Install PyPI + runs-on: "ubuntu-latest" + needs: [pip_build] + steps: + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - uses: actions/download-artifact@v4 + with: + name: pip + path: dist/ + - name: Install package + run: python -m pip install dist/*.whl + - name: Test package + run: python -c "import $PACKAGE; print($PACKAGE.__version__)" + + pip_publish: + name: Publish PyPI + runs-on: ubuntu-latest + needs: [pip_build, waiting_room] + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') + steps: + - uses: actions/download-artifact@v4 + with: + name: pip + path: dist/ + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: ${{ secrets.PPU }} + password: ${{ secrets.PPP }} + repository-url: "https://upload.pypi.org/legacy/" diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 1ccee6aed1..578df685c2 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -40,11 +40,11 @@ jobs: PANEL_EMBED_JSON: "true" PANEL_EMBED_JSON_PREFIX: "json" steps: - - uses: holoviz-dev/holoviz_tasks/install@v0.1a19 + - uses: holoviz-dev/holoviz_tasks/install@v0 with: name: Documentation python-version: "3.10" - channel-priority: strict + channel-priority: flexible channels: pyviz/label/dev,conda-forge,nodefaults envs: "-o doc" cache: true @@ -52,10 +52,10 @@ jobs: - name: Set output id: vars run: echo "tag=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT - - name: bokeh sampledata + - name: Download data run: | conda activate test-environment - bokeh sampledata + bash scripts/download_data.sh - name: generate rst run: | conda activate test-environment diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index ad67149396..57e3226018 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -50,7 +50,7 @@ jobs: name: Run pre-commit runs-on: "ubuntu-latest" steps: - - uses: holoviz-dev/holoviz_tasks/pre-commit@v0.1a19 + - uses: holoviz-dev/holoviz_tasks/pre-commit@v0 setup: name: Setup workflow @@ -64,7 +64,7 @@ jobs: - uses: actions/checkout@v3 if: github.event_name != 'pull_request' - name: Check for code changes - uses: dorny/paths-filter@v2.11.1 + uses: dorny/paths-filter@v3 id: filter with: filters: | @@ -91,10 +91,7 @@ jobs: run: | MATRIX=$(jq -nsc '{ "os": ["ubuntu-latest", "macos-latest", "windows-latest"], - "python-version": ["3.9", "3.11"], - "include": [ - {"os": "ubuntu-latest", "python-version": "3.10"} - ] + "python-version": ["3.9", "3.12"] }') echo "MATRIX=$MATRIX" >> $GITHUB_ENV - name: Set test matrix with 'full' option @@ -102,7 +99,7 @@ jobs: run: | MATRIX=$(jq -nsc '{ "os": ["ubuntu-latest", "macos-latest", "windows-latest"], - "python-version": ["3.9", "3.10", "3.11"] + "python-version": ["3.9", "3.10", "3.11", "3.12"] }') echo "MATRIX=$MATRIX" >> $GITHUB_ENV - name: Set test matrix with 'downstream' option @@ -126,22 +123,27 @@ jobs: DESC: "Python ${{ matrix.python-version }}, ${{ matrix.os }} unit tests" PYTHON_VERSION: ${{ matrix.python-version }} steps: - - uses: holoviz-dev/holoviz_tasks/install@v0.1a19 + - uses: holoviz-dev/holoviz_tasks/install@v0 if: needs.setup.outputs.code_change == 'true' with: name: unit_test_suite python-version: ${{ matrix.python-version }} - channel-priority: strict + channel-priority: flexible channels: pyviz/label/dev,conda-forge,nodefaults envs: "-o flakes -o tests -o examples_tests -o tests_ci" cache: ${{ github.event.inputs.cache || github.event.inputs.cache == '' }} conda-update: true id: install - - name: bokeh sampledata + - name: Check packages latest version if: needs.setup.outputs.code_change == 'true' run: | conda activate test-environment - bokeh sampledata + python scripts/check_latest_packages.py bokeh panel param datashader + - name: Download data + if: needs.setup.outputs.code_change == 'true' + run: | + conda activate test-environment + bash scripts/download_data.sh - name: doit test_unit if: needs.setup.outputs.code_change == 'true' run: | @@ -176,12 +178,12 @@ jobs: # it as one of the sources. PYCTDEV_SELF_CHANNEL: "pyviz/label/dev" steps: - - uses: holoviz-dev/holoviz_tasks/install@v0.1a19 + - uses: holoviz-dev/holoviz_tasks/install@v0 if: needs.setup.outputs.code_change == 'true' with: name: ui_test_suite python-version: ${{ matrix.python-version }} - channels: pyviz/label/dev,bokeh,conda-forge,nodefaults + channels: pyviz/label/dev,conda-forge,nodefaults envs: "-o recommended -o tests -o build -o tests_ci" cache: ${{ github.event.inputs.cache || github.event.inputs.cache == '' }} playwright: true @@ -213,45 +215,28 @@ jobs: DESC: "Python ${{ matrix.python-version }}, ${{ matrix.os }} core tests" PYTHON_VERSION: ${{ matrix.python-version }} steps: - # Add back when this works on Python 3.12 - # - uses: holoviz-dev/holoviz_tasks/install@v0.1a19 - # if: needs.setup.outputs.code_change == 'true' - # with: - # name: core_test_suite - # python-version: ${{ matrix.python-version }} - # # channel-priority: strict - # channels: pyviz/label/dev,conda-forge,nodefaults - # envs: "-o tests_core -o tests_ci" - # cache: ${{ github.event.inputs.cache || github.event.inputs.cache == '' }} - # conda-update: true - # id: install - - uses: actions/checkout@v3 - if: needs.setup.outputs.code_change == 'true' - with: - fetch-depth: "100" - - name: Fetch unshallow - if: needs.setup.outputs.code_change == 'true' - run: git fetch --prune --tags --unshallow -f - - uses: actions/setup-python@v4 + - uses: holoviz-dev/holoviz_tasks/install@v0 if: needs.setup.outputs.code_change == 'true' with: - python-version: 3.12 - - name: install - if: needs.setup.outputs.code_change == 'true' - run: | - python -m pip install -ve '.[tests_core, tests_ci]' - - name: bokeh sampledata + name: core_test_suite + python-version: ${{ matrix.python-version }} + # channel-priority: strict + channels: pyviz/label/dev,conda-forge,nodefaults + envs: "-o tests_core -o tests_ci" + cache: ${{ github.event.inputs.cache || github.event.inputs.cache == '' }} + id: install + - name: Download data if: needs.setup.outputs.code_change == 'true' run: | - # conda activate test-environment - bokeh sampledata + conda activate test-environment + bash scripts/download_data.sh - name: Check packages latest version if: needs.setup.outputs.code_change == 'true' run: | - # conda activate test-environment - python scripts/check_latest_packages.py + conda activate test-environment + python scripts/check_latest_packages.py numpy pandas bokeh panel param - name: doit test_unit if: needs.setup.outputs.code_change == 'true' run: | - # conda activate test-environment + conda activate test-environment pytest holoviews diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c0da1e2c85..602951f640 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,7 +20,7 @@ repos: - id: check-json - id: detect-private-key - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.2.0 + rev: v0.3.4 hooks: - id: ruff files: holoviews/|scripts/ @@ -42,7 +42,7 @@ repos: - id: rst-directive-colons - id: rst-inline-touching-normal - repo: https://github.com/shellcheck-py/shellcheck-py - rev: v0.9.0.6 + rev: v0.10.0.1 hooks: - id: shellcheck - repo: https://github.com/pre-commit/mirrors-prettier diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml index 24f412d2a7..087efe849e 100644 --- a/conda.recipe/meta.yaml +++ b/conda.recipe/meta.yaml @@ -32,6 +32,13 @@ requirements: - {{ dep }} {% endfor %} +test: + imports: + - {{ sdata['name'] }} + commands: + - pip check + requires: + - pip about: home: https://holoviews.org diff --git a/examples/conftest.py b/examples/conftest.py index b33cbeff2f..22dced7136 100644 --- a/examples/conftest.py +++ b/examples/conftest.py @@ -1,3 +1,4 @@ +import os import platform import sys @@ -55,6 +56,12 @@ "reference/elements/bokeh/VSpans.ipynb", ] +# 2024-03-27: ffmpeg errors on Windows CI +if system == "Windows" and os.environ.get("GITHUB_RUN_ID"): + collect_ignore_glob += [ + "user_guide/Plotting_with_Matplotlib.ipynb", + ] + def pytest_runtest_makereport(item, call): """ diff --git a/examples/gallery/demos/bokeh/html_hover_tooltips.ipynb b/examples/gallery/demos/bokeh/html_hover_tooltips.ipynb new file mode 100644 index 0000000000..767ed2b223 --- /dev/null +++ b/examples/gallery/demos/bokeh/html_hover_tooltips.ipynb @@ -0,0 +1,113 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import holoviews as hv\n", + "\n", + "hv.extension(\"bokeh\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This demo demonstrates how to build custom hover tooltips using HTML. The\n", + "tooltips are displayed when the user hovers over a point in the plot." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Declare data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df = pd.DataFrame(\n", + " dict(\n", + " x=[1, 2, 3, 4, 5],\n", + " y=[2, 5, 8, 2, 7],\n", + " desc=[\"A\", \"b\", \"C\", \"d\", \"E\"],\n", + " imgs=[\n", + " \"https://docs.bokeh.org/static/snake.jpg\",\n", + " \"https://docs.bokeh.org/static/snake2.png\",\n", + " \"https://docs.bokeh.org/static/snake3D.png\",\n", + " \"https://docs.bokeh.org/static/snake4_TheRevenge.png\",\n", + " \"https://docs.bokeh.org/static/snakebite.jpg\",\n", + " ],\n", + " fonts=[\n", + " \"italics\",\n", + " \"
pre
\",\n", + " \"bold\",\n", + " \"small\",\n", + " \"del\",\n", + " ],\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Declare plot\n", + "\n", + "Having declared the tooltips' columns, we can reference them in the tooltips with `@`. Just be sure to pass *all the relevant columns* as extra `vdims` ." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "TOOLTIPS = \"\"\"\n", + "
\n", + " $label\n", + "
\n", + " \n", + "
\n", + "
\n", + " @desc\n", + " [$index]\n", + "
\n", + "
\n", + " @fonts{safe}\n", + "
\n", + "
\n", + " Location\n", + " ($x, $y)\n", + "
\n", + "
\n", + "\"\"\"\n", + "\n", + "hv.Scatter(df, kdims=[\"x\"], vdims=[\"y\", \"desc\", \"imgs\", \"fonts\"], label=\"Pictures\").opts(\n", + " hover_tooltips=TOOLTIPS, size=20\n", + ")" + ] + } + ], + "metadata": { + "language_info": { + "name": "python", + "pygments_lexer": "ipython3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/user_guide/Customizing_Plots.ipynb b/examples/user_guide/Customizing_Plots.ipynb index ac76ff7839..2b519bee67 100644 --- a/examples/user_guide/Customizing_Plots.ipynb +++ b/examples/user_guide/Customizing_Plots.ipynb @@ -506,7 +506,7 @@ "source": [ "##### Dimension.soft_range\n", "\n", - "Declaringa ``soft_range`` on the other hand combines the data range and the supplied range, i.e. it will pick whichever extent is wider. Using the same example as above we can see it uses the -10 value supplied in the soft_range but also extends to 100, which is the upper bound of the actual data:" + "Declaring a ``soft_range`` on the other hand combines the data range and the supplied range, i.e. it will pick whichever extent is wider. Using the same example as above we can see it uses the -10 value supplied in the soft_range but also extends to 100, which is the upper bound of the actual data:" ] }, { diff --git a/examples/user_guide/Plotting_with_Bokeh.ipynb b/examples/user_guide/Plotting_with_Bokeh.ipynb index 1e7fa587b6..31e42ef7cf 100644 --- a/examples/user_guide/Plotting_with_Bokeh.ipynb +++ b/examples/user_guide/Plotting_with_Bokeh.ipynb @@ -296,6 +296,78 @@ " img.options(data_aspect=2, frame_width=300).relabel('data_aspect=2')).cols(2)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Navigable Bounds\n", + "\n", + "Users may set the `apply_hard_bounds` option to constrain the navigable range (extent one could zoom or pan to). If `True`, the navigable bounds of the plot will be constrained to the range of the data. Go ahead and try to zoom in a out in the plot below, you should find that you cannot zoom beyond the extents of the data.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "x_values = np.linspace(0, 10, 100)\n", + "y_values = np.sin(x_values)\n", + "\n", + "hv.Curve((x_values, y_values)).opts(apply_hard_bounds=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If `xlim` or `ylim` is set for an element, the navigable bounds of the plot will be set based\n", + "on the combined extremes of extents between the data and xlim/ylim ranges. In the plot below, the `xlim` constrains the initial view, but you should be able to pan to the x-range between 0 and 12 - the combined extremes of ranges between the data (0,10) and `xlim` (2,12)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "x_values = np.linspace(0, 10, 100)\n", + "y_values = np.sin(x_values)\n", + "\n", + "hv.Curve((x_values, y_values)).opts(\n", + " apply_hard_bounds=True,\n", + " xlim=(2, 12),\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If a dimension range is specified (e.g. with `.redim.range`), this range will be used as the hard bounds, regardless of the data range or xlim/ylim. This is because the dimension range is itended to be an override on the minimum and maximum allowable values for the dimension. Read more in [Annotating your Data](./01-Annotating_Data.ipynb)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "x_values = np.linspace(0, 10, 100)\n", + "y_values = np.sin(x_values)\n", + "\n", + "hv.Curve((x_values, y_values)).opts(\n", + " apply_hard_bounds=True,\n", + ").redim.range(x=(4, 6))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the plot above, you should not be able to navigate beyond the specified dimension ranges of `x` (4, 6). " + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -652,7 +724,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Additionally, you can provide `'vline'`, the equivalent of passing `HoverTool(mode='vline')`, or `'hline'` to set the hit-testing behavior" + "Moreover, you can provide `'vline'`, the equivalent of passing `HoverTool(mode='vline')`, or `'hline'` to set the hit-testing behavior." ] }, { @@ -661,27 +733,142 @@ "metadata": {}, "outputs": [], "source": [ - "error = np.random.rand(100, 3)\n", - "heatmap_data = {(chr(65+i), chr(97+j)):i*j for i in range(5) for j in range(5) if i!=j}\n", - "data = [np.random.normal() for i in range(10000)]\n", - "hist = np.histogram(data, 20)\n", + "hv.Curve(np.arange(100)).opts(tools=[\"vline\"]) + hv.Curve(np.arange(100)).opts(tools=[\"hline\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Equivalently, you may say `tools=[\"hover\"]` alongside `hover_mode`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "(\n", + " hv.Curve(np.arange(100)).opts(tools=[\"hover\"], hover_mode=\"vline\")\n", + " + hv.Curve(np.arange(100)).opts(tools=[\"hover\"], hover_mode=\"hline\")\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you'd like finer control on the formatting, you may use `hover_tooltips` to declare the tooltips as a list of tuples of the labels and a specification of the dimension name and how to display it.\n", "\n", - "points = hv.Points(error)\n", - "heatmap = hv.HeatMap(heatmap_data).sort()\n", - "histogram = hv.Histogram(hist)\n", - "image = hv.Image(np.random.rand(50,50))\n", + "Behind the scenes, the `hover_tooltips` feature extends the capabilities of Bokeh's `HoverTool` tooltips by providing additional flexibility and customization options, so for a reference see the [bokeh user guide](https://bokeh.pydata.org/en/latest/docs/user_guide/tools.html#hovertool)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "hover_tooltips = [\n", + " ('Name', '@name'),\n", + " ('Symbol', '@symbol'),\n", + " ('CPK', '$color[hex, swatch]:CPK')\n", + "]\n", "\n", - "(points + heatmap + histogram + image).opts(\n", - " opts.Points(tools=['hline'], size=5), opts.HeatMap(tools=['hover']),\n", - " opts.Image(tools=['vline']), opts.Histogram(tools=['hover']),\n", - " opts.Layout(shared_axes=False)).cols(2)" + "points.clone().opts(\n", + " tools=[\"hover\"], hover_tooltips=hover_tooltips, color='metal', cmap='Category20',\n", + " line_color='black', size=dim('atomic radius')/10,\n", + " width=600, height=400, show_grid=True,\n", + " title='Chemical Elements by Type (scaled by atomic radius)')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Unique from Bokeh's `HoverTool`, the HoloViews' `hover_tooltips` also supports a mix of string and tuple formats for defining tooltips, allowing for both direct references to data columns and customized display options.\n", + "\n", + "Additionally, you can include as many, or as little, dimension names as desired." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "hover_tooltips = [\n", + " \"name\", # will assume @name\n", + " (\"Symbol\", \"@symbol\"), # @ still required if tuple\n", + " ('CPK', '$color[hex, swatch]:CPK'),\n", + " \"density\"\n", + "]\n", + "\n", + "points.clone().opts(\n", + " tools=[\"hover\"], hover_tooltips=hover_tooltips, color='metal', cmap='Category20',\n", + " line_color='black', size=dim('atomic radius')/10,\n", + " width=600, height=400, show_grid=True,\n", + " title='Chemical Elements by Type (scaled by atomic radius)')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`hover_tooltips` also support displaying the HoloViews element's `label` and `group`.\n", + "\n", + "Keep in mind, to reference these special variables that are not based on the data, a prefix of `$` is required!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "a_curve = hv.Curve([0, 1, 2], label=\"A\", group=\"C\")\n", + "b_curve = hv.Curve([2, 1, 0], label=\"B\", group=\"C\")\n", + "(a_curve * b_curve).opts(\"Curve\", hover_tooltips=[\"$label\", \"$group\", \"@x\", \"y\"]) # $ is required, @ is not needed for string" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you need special formatting, you may also specify the formats inside `hover_tooltips` alongside `hover_formatters`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def datetime(x):\n", + " return np.array(x, dtype=np.datetime64)\n", + "\n", + "\n", + "df = pd.DataFrame(\n", + " {\n", + " \"date\": [\"2019-01-01\", \"2019-01-02\", \"2019-01-03\"],\n", + " \"adj_close\": [100, 101, 100000],\n", + " }\n", + ")\n", + "\n", + "curve = hv.Curve((datetime(df[\"date\"]), df[\"adj_close\"]), \"date\", \"adj close\")\n", + "curve.opts(\n", + " hover_tooltips=[\"date\", (\"Close\", \"$@{adj close}{0.2f}\")], # use @{ } for dims with spaces\n", + " hover_formatters={\"@{adj close}\": \"printf\"}, # use 'printf' formatter for '@{adj close}' field\n", + " hover_mode=\"vline\",\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "It is also possible to explicitly declare the columns to display by manually constructing a `HoverTool` and declaring the tooltips as a list of tuples of the labels and a specification of the dimension name and how to display it (for a complete reference see the [bokeh user guide](https://bokeh.pydata.org/en/latest/docs/user_guide/tools.html#hovertool))." + "You can provide HTML strings too! See a demo [here](../gallery/demos/bokeh/html_hover_tooltips.ipynb), or explicitly declare the columns to display by manually constructing a Bokeh [`HoverTool`](https://bokeh.pydata.org/en/latest/docs/user_guide/tools.html#hovertool)." ] }, { @@ -695,7 +882,7 @@ "\n", "points = hv.Points(\n", " elements, ['electronegativity', 'density'],\n", - " ['name', 'symbol', 'metal', 'CPK', 'atomic radius']\n", + " ['name', 'symbol', 'metal', 'CPK', 'atomic radius'],\n", ").sort('metal')\n", "\n", "tooltips = [\n", diff --git a/holoviews/__init__.py b/holoviews/__init__.py index eaf4a5ccba..e3231dece9 100644 --- a/holoviews/__init__.py +++ b/holoviews/__init__.py @@ -173,6 +173,8 @@ def help(obj, visualization=True, ansi=True, backend=None, pydoc.help(obj) +del os, rcfile, warnings + def __getattr__(name): if name == "annotate": # Lazy loading Panel @@ -180,5 +182,13 @@ def __getattr__(name): return annotate raise AttributeError(f"module {__name__!r} has no attribute {name!r}") +__all__ = [k for k in locals() if not k.startswith('_')] +__all__ += ['annotate', '__version__'] -del os, rcfile, warnings +def __dir__(): + return __all__ + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from .annotators import annotate diff --git a/holoviews/core/dimension.py b/holoviews/core/dimension.py index 6ff601295c..59218185f9 100644 --- a/holoviews/core/dimension.py +++ b/holoviews/core/dimension.py @@ -109,10 +109,12 @@ def process_dimensions(kdims, vdims): elif isinstance(dims, (tuple, str, Dimension, dict)): dims = [dims] elif not isinstance(dims, list): - raise ValueError("{} argument expects a Dimension or list of dimensions, " - "specified as tuples, strings, dictionaries or Dimension " - "instances, not a {} type. Ensure you passed the data as the " - "first argument.".format(group, type(dims).__name__)) + raise ValueError( + f"{group} argument expects a Dimension or list of dimensions, " + "specified as tuples, strings, dictionaries or Dimension " + f"instances, not a {type(dims).__name__} type. " + "Ensure you passed the data as the first argument." + ) dimensions[group] = [asdim(d) for d in dims] return dimensions diff --git a/holoviews/core/io.py b/holoviews/core/io.py index 13ddd9b45e..774238ece4 100644 --- a/holoviews/core/io.py +++ b/holoviews/core/io.py @@ -370,8 +370,7 @@ def save(self_or_cls, obj, filename, key=None, info=None, **kwargs): components = list(obj.data.values()) entries = entries if len(entries) > 1 else [entries[0]+'(L)'] else: - entries = ['{}.{}'.format(group_sanitizer(obj.group, False), - label_sanitizer(obj.label, False))] + entries = [f'{group_sanitizer(obj.group, False)}.{label_sanitizer(obj.label, False)}'] components = [obj] for component, entry in zip(components, entries): diff --git a/holoviews/element/raster.py b/holoviews/element/raster.py index 19cf97e28e..8075bfff3b 100644 --- a/holoviews/element/raster.py +++ b/holoviews/element/raster.py @@ -278,10 +278,12 @@ def __init__(self, data, kdims=None, vdims=None, bounds=None, extents=None, Dataset.__init__(self, data, kdims=kdims, vdims=vdims, extents=extents, **params) if not self.interface.gridded: - raise DataError("{} type expects gridded data, {} is columnar. " - "To display columnar data as gridded use the HeatMap " - "element or aggregate the data (e.g. using rasterize " - "or np.histogram2d).".format(type(self).__name__, self.interface.__name__)) + raise DataError( + f"{type(self).__name__} type expects gridded data, " + f"{self.interface.__name__} is columnar. " + "To display columnar data as gridded use the HeatMap " + "element or aggregate the data (e.g. using np.histogram2d)." + ) dim2, dim1 = self.interface.shape(self, gridded=True)[:2] if bounds is None: @@ -800,10 +802,12 @@ def __init__(self, data, kdims=None, vdims=None, **params): data = ([], [], np.zeros((0, 0))) super().__init__(data, kdims, vdims, **params) if not self.interface.gridded: - raise DataError("{} type expects gridded data, {} is columnar. " - "To display columnar data as gridded use the HeatMap " - "element or aggregate the data (e.g. using " - "np.histogram2d).".format(type(self).__name__, self.interface.__name__)) + raise DataError( + f"{type(self).__name__} type expects gridded data, " + f"{self.interface.__name__} is columnar. " + "To display columnar data as gridded use the HeatMap " + "element or aggregate the data (e.g. using np.histogram2d)." + ) def trimesh(self): """ diff --git a/holoviews/ipython/archive.py b/holoviews/ipython/archive.py index 5eefa4f873..e6acd4d67f 100644 --- a/holoviews/ipython/archive.py +++ b/holoviews/ipython/archive.py @@ -147,8 +147,7 @@ def export(self, timestamp=None): tstamp = time.strftime(self.timestamp_format, self._timestamp) export_name = self._format(self.export_name, {'timestamp':tstamp, 'notebook':self.notebook_name}) - print(('Export name: {!r}\nDirectory {!r}'.format(export_name, - os.path.join(os.path.abspath(self.root)))) + print((f'Export name: {export_name!r}\nDirectory {os.path.join(os.path.abspath(self.root))!r}') + '\n\nIf no output appears, please check holoviews.archive.last_export_status()') display(Javascript(cmd)) diff --git a/holoviews/operation/datashader.py b/holoviews/operation/datashader.py index 08f2688661..cd7ac77a2a 100644 --- a/holoviews/operation/datashader.py +++ b/holoviews/operation/datashader.py @@ -194,9 +194,10 @@ def _get_agg_params(self, element, x, y, agg_fn, bounds): elif column: dims = [d for d in element.dimensions('ranges') if d == column] if not dims: - raise ValueError("Aggregation column '{}' not found on '{}' element. " - "Ensure the aggregator references an existing " - "dimension.".format(column,element)) + raise ValueError( + f"Aggregation column '{column}' not found on '{element}' element. " + "Ensure the aggregator references an existing dimension." + ) if isinstance(agg_fn, (ds.count, ds.count_cat)): if vdim_prefix: vdim_name = f'{vdim_prefix}{column} Count' @@ -405,7 +406,7 @@ def _process(self, element, key=None): eldata = agg if ds_version > Version('0.5.0') else (xs, ys, agg.data) return self.p.element_type(eldata, **params) else: - params['vdims'] = list(agg.coords[agg_fn.column].data) + params['vdims'] = list(map(str, agg.coords[agg_fn.column].data)) return ImageStack(agg, **params) def _apply_datashader(self, dfdata, cvs_fn, agg_fn, agg_kwargs, x, y): diff --git a/holoviews/operation/downsample.py b/holoviews/operation/downsample.py index 616434d386..50cdd7ff65 100644 --- a/holoviews/operation/downsample.py +++ b/holoviews/operation/downsample.py @@ -165,7 +165,7 @@ def _min_max(x, y, n_out, **kwargs): from tsdownsample import MinMaxDownsampler except ModuleNotFoundError: raise NotImplementedError( - 'The min-max downsampling algorithm requires the tsdownsampler ' + 'The min-max downsampling algorithm requires the tsdownsample ' 'library to be installed.' ) from None return MinMaxDownsampler().downsample(x, y, n_out=n_out, **kwargs) @@ -175,7 +175,7 @@ def _min_max_lttb(x, y, n_out, **kwargs): from tsdownsample import MinMaxLTTBDownsampler except ModuleNotFoundError: raise NotImplementedError( - 'The minmax-lttb downsampling algorithm requires the tsdownsampler ' + 'The minmax-lttb downsampling algorithm requires the tsdownsample ' 'library to be installed.' ) from None return MinMaxLTTBDownsampler().downsample(x, y, n_out=n_out, **kwargs) @@ -185,9 +185,10 @@ def _m4(x, y, n_out, **kwargs): from tsdownsample import M4Downsampler except ModuleNotFoundError: raise NotImplementedError( - 'The m4 downsampling algorithm requires the tsdownsampler ' + 'The m4 downsampling algorithm requires the tsdownsample ' 'library to be installed.' ) from None + n_out = n_out - (n_out % 4) # n_out must be a multiple of 4 return M4Downsampler().downsample(x, y, n_out=n_out, **kwargs) @@ -204,7 +205,7 @@ class downsample1d(ResampleOperation1D): """ Implements downsampling of a regularly sampled 1D dataset. - If available uses the `tsdownsampler` library to perform massively + If available uses the `tsdownsample` library to perform massively accelerated downsampling. """ @@ -214,14 +215,14 @@ class downsample1d(ResampleOperation1D): - `lttb`: Largest Triangle Three Buckets downsample algorithm. - `nth`: Selects every n-th point. - `viewport`: Selects all points in a given viewport. - - `minmax`: Selects the min and max value in each bin (requires tsdownsampler). - - `m4`: Selects the min, max, first and last value in each bin (requires tsdownsampler). + - `minmax`: Selects the min and max value in each bin (requires tsdownsample). + - `m4`: Selects the min, max, first and last value in each bin (requires tsdownsample). - `minmax-lttb`: First selects n_out * minmax_ratio min and max values, then further reduces these to n_out values using the - Largest Triangle Three Buckets algorithm (requires tsdownsampler).""") + Largest Triangle Three Buckets algorithm (requires tsdownsample).""") parallel = param.Boolean(default=False, doc=""" - The number of threads to use (if tsdownsampler is available).""") + The number of threads to use (if tsdownsample is available).""") minmax_ratio = param.Integer(default=4, bounds=(0, None), doc=""" For the minmax-lttb algorithm determines the ratio of candidate diff --git a/holoviews/operation/element.py b/holoviews/operation/element.py index 0d8bc13246..131a14a08d 100644 --- a/holoviews/operation/element.py +++ b/holoviews/operation/element.py @@ -3,6 +3,7 @@ examples. """ import warnings +from functools import partial import numpy as np import param @@ -754,6 +755,9 @@ class histogram(Operation): groupby = param.ClassSelector(default=None, class_=(str, Dimension), doc=""" Defines a dimension to group the Histogram returning an NdOverlay of Histograms.""") + groupby_range = param.Selector(default="shared", objects=["shared", "separated"], doc=""" + Whether to group the histograms along the same range or separate them.""") + log = param.Boolean(default=False, doc=""" Whether to use base 10 logarithmic samples for the bin edges.""") @@ -781,15 +785,7 @@ class histogram(Operation): style_prefix = param.String(default=None, allow_None=None, doc=""" Used for setting a common style for histograms in a HoloMap or AdjointLayout.""") - def _process(self, element, key=None): - if self.p.groupby: - if not isinstance(element, Dataset): - raise ValueError('Cannot use histogram groupby on non-Dataset Element') - grouped = element.groupby(self.p.groupby, group_type=Dataset, container_type=NdOverlay) - self.p.groupby = None - return grouped.map(self._process, Dataset) - - normed = False if self.p.mean_weighted and self.p.weight_dimension else self.p.normed + def _get_dim_and_data(self, element): if self.p.dimension: selected_dim = self.p.dimension else: @@ -800,6 +796,21 @@ def _process(self, element, key=None): data = element.interface.values(element, selected_dim, compute=False) else: data = element.dimension_values(selected_dim) + return dim, data + + def _process(self, element, key=None, groupby=False): + if self.p.groupby: + if not isinstance(element, Dataset): + raise ValueError('Cannot use histogram groupby on non-Dataset Element') + grouped = element.groupby(self.p.groupby, group_type=Dataset, container_type=NdOverlay) + if self.p.groupby_range == 'shared' and not self.p.bin_range: + _, data = self._get_dim_and_data(element) + self.p.bin_range = (data.min(), data.max()) + self.p.groupby = None + return grouped.map(partial(self._process, groupby=True), Dataset) + + normed = False if self.p.mean_weighted and self.p.weight_dimension else self.p.normed + dim, data = self._get_dim_and_data(element) is_datetime = isdatetime(data) if is_datetime: @@ -859,7 +870,7 @@ def _process(self, element, key=None): if isdatetime(edges): edges = edges.astype('datetime64[ns]').astype('int64') else: - hist_range = self.p.bin_range or element.range(selected_dim) + hist_range = self.p.bin_range or element.range(dim) # Suppress a warning emitted by Numpy when datetime or timedelta scalars # are compared. See https://github.com/numpy/numpy/issues/10095 and # https://github.com/numpy/numpy/issues/9210. @@ -939,8 +950,9 @@ def _process(self, element, key=None): # Save off the computed bin edges so that if this operation instance # is used to compute another histogram, it will default to the same # bin edges. - self.bins = list(edges) - return Histogram((edges, hist), kdims=[element.get_dimension(selected_dim)], + if not groupby: + self.bins = list(edges) + return Histogram((edges, hist), kdims=[dim], label=element.label, **params) diff --git a/holoviews/plotting/bokeh/chart.py b/holoviews/plotting/bokeh/chart.py index 0f481f2940..db03780ef5 100644 --- a/holoviews/plotting/bokeh/chart.py +++ b/holoviews/plotting/bokeh/chart.py @@ -61,7 +61,7 @@ class PointPlot(LegendPlot, ColorbarPlot): selection_display = BokehOverlaySelectionDisplay() - style_opts = (['cmap', 'palette', 'marker', 'size', 'angle'] + + style_opts = (['cmap', 'palette', 'marker', 'size', 'angle', 'hit_dilation'] + base_properties + line_properties + fill_properties) _plot_methods = dict(single='scatter', batched='scatter') diff --git a/holoviews/plotting/bokeh/element.py b/holoviews/plotting/bokeh/element.py index 2bea93bba5..b4e6fec90a 100644 --- a/holoviews/plotting/bokeh/element.py +++ b/holoviews/plotting/bokeh/element.py @@ -1,4 +1,5 @@ import warnings +from collections import defaultdict from itertools import chain from types import FunctionType @@ -105,6 +106,11 @@ class ElementPlot(BokehPlot, GenericElementPlot): align = param.ObjectSelector(default='start', objects=['start', 'center', 'end'], doc=""" Alignment (vertical or horizontal) of the plot in a layout.""") + apply_hard_bounds = param.Boolean(default=False, doc=""" + If True, the navigable bounds of the plot will be set based + on the more extreme of extents between the data or xlim/ylim ranges. + If dim ranges are set, the hard bounds will be set to the dim ranges.""") + autorange = param.ObjectSelector(default=None, objects=['x', 'y', None], doc=""" Whether to auto-range along either the x- or y-axis, i.e. when panning or zooming along the orthogonal axis it will @@ -230,6 +236,15 @@ class ElementPlot(BokehPlot, GenericElementPlot): tools = param.List(default=[], doc=""" A list of plugin tools to use on the plot.""") + hover_tooltips = param.ClassSelector(class_=(list, str), doc=""" + A list of dimensions to be displayed in the hover tooltip.""") + + hover_formatters = param.Dict(doc=""" + A dict of formatting options for the hover tooltip.""") + + hover_mode = param.ObjectSelector(default='mouse', objects=['mouse', 'vline', 'hline'], doc=""" + The hover mode determines how the hover tool is activated.""") + toolbar = param.ObjectSelector(default='right', objects=["above", "below", "left", "right", "disable", None], @@ -286,16 +301,139 @@ def _hover_opts(self, element): dims += element.dimensions() return list(util.unique_iterator(dims)), {} + def _replace_hover_label_group(self, element, tooltip): + if isinstance(tooltip, tuple): + has_label = hasattr(element, 'label') and element.label + has_group = hasattr(element, 'group') and element.group != element.param.group.default + if not has_label and not has_group: + return tooltip + + if ("$label" in tooltip or "${label}" in tooltip): + tooltip = (tooltip[0], element.label) + elif ("$group" in tooltip or "${group}" in tooltip): + tooltip = (tooltip[0], element.group) + elif isinstance(tooltip, str): + if "$label" in tooltip: + tooltip = tooltip.replace("$label", element.label) + elif "${label}" in tooltip: + tooltip = tooltip.replace("${label}", element.label) + + if "$group" in tooltip: + tooltip = tooltip.replace("$group", element.group) + elif "${group}" in tooltip: + tooltip = tooltip.replace("${group}", element.group) + return tooltip + + def _replace_hover_value_aliases(self, tooltip, tooltips_dict): + for name, tuple_ in tooltips_dict.items(): + # some elements, like image, rename the tooltip, e.g. @y -> $y + # let's replace those, so the hover tooltip is discoverable + # ensure it works for `(@x, @y)` -> `($x, $y)` too + if isinstance(tooltip, tuple): + value_alias = tuple_[1] + if f"@{name}" in tooltip[1]: + tooltip = (tooltip[0], tooltip[1].replace(f"@{name}", value_alias)) + elif f"@{{{name}}}" in tooltip[1]: + tooltip = (tooltip[0], tooltip[1].replace(f"@{{{name}}}", value_alias)) + elif isinstance(tooltip, str): + if f"@{name}" in tooltip: + tooltip = tooltip.replace(f"@{name}", tuple_[1]) + elif f"@{{{name}}}" in tooltip: + tooltip = tooltip.replace(f"@{{{name}}}", tuple_[1]) + return tooltip + + def _prepare_hover_kwargs(self, element): + tooltips, hover_opts = self._hover_opts(element) + + dim_aliases = { + f"{dim.label} ({dim.unit})" if dim.unit else dim.label: dim.name + for dim in element.kdims + element.vdims + } + + # make dict so it's easy to get the tooltip for a given dimension; + tooltips_dict = {} + units_dict = {} + for ttp in tooltips: + if isinstance(ttp, tuple): + name = ttp[0] + tuple_ = (ttp[0], ttp[1]) + elif isinstance(ttp, Dimension): + name = ttp.name + # three brackets means replacing variable, + # and then wrapping in brackets, like @{air} + unit = f" ({ttp.unit})" if ttp.unit else "" + tuple_ = ( + ttp.pprint_label, + f"@{{{util.dimension_sanitizer(ttp.name)}}}" + ) + units_dict[name] = unit + elif isinstance(ttp, str): + name = ttp + # three brackets means replacing variable, + # and then wrapping in brackets, like @{air} + tuple_ = (ttp.name, f"@{{{util.dimension_sanitizer(ttp)}}}") + + if name in dim_aliases: + name = dim_aliases[name] + + # key is the vanilla data column/dimension name + # value should always be a tuple (label, value) + tooltips_dict[name] = tuple_ + + # subset the tooltips to only the ones user wants + if self.hover_tooltips: + # If hover tooltips are defined as a list of strings or tuples + if isinstance(self.hover_tooltips, list): + new_tooltips = [] + for tooltip in self.hover_tooltips: + if isinstance(tooltip, str): + # make into a tuple + new_tooltip = tooltips_dict.get(tooltip.lstrip("@")) + if new_tooltip is None: + label = tooltip.lstrip("$").lstrip("@") + value = tooltip if "$" in tooltip else f"@{{{tooltip.lstrip('@')}}}" + new_tooltip = (label, value) + new_tooltips.append(new_tooltip) + elif isinstance(tooltip, tuple): + unit = units_dict.get(tooltip[0]) + tooltip = self._replace_hover_value_aliases(tooltip, tooltips_dict) + if unit: + tooltip = (f"{tooltip[0]}{unit}", tooltip[1]) + new_tooltips.append(tooltip) + else: + raise ValueError('Hover tooltips must be a list with items of strings or tuples.') + tooltips = new_tooltips + else: + # Likely HTML str + tooltips = self._replace_hover_value_aliases(self.hover_tooltips, tooltips_dict) + else: + tooltips = list(tooltips_dict.values()) + + # replace the label and group in the tooltips + if isinstance(tooltips, list): + tooltips = [self._replace_hover_label_group(element, ttp) for ttp in tooltips] + elif isinstance(tooltips, str): + tooltips = self._replace_hover_label_group(element, tooltips) + + if self.hover_formatters: + hover_opts['formatters'] = self.hover_formatters + + if self.hover_mode: + hover_opts["mode"] = self.hover_mode + + return tooltips, hover_opts + def _init_tools(self, element, callbacks=None): """ Processes the list of tools to be supplied to the plot. """ if callbacks is None: callbacks = [] - tooltips, hover_opts = self._hover_opts(element) - tooltips = [(ttp.pprint_label, '@{%s}' % util.dimension_sanitizer(ttp.name)) - if isinstance(ttp, Dimension) else ttp for ttp in tooltips] - if not tooltips: tooltips = None + + tooltips, hover_opts = self._prepare_hover_kwargs(element) + + if not tooltips: + tooltips = None callbacks = callbacks+self.callbacks cb_tools, tool_names = [], [] @@ -314,13 +452,23 @@ def _init_tools(self, element, callbacks=None): cb_tools.append(tool) self.handles[handle] = tool + all_tools = cb_tools + self.default_tools + self.tools + if self.hover_tooltips: + no_hover = ( + "hover" not in all_tools and + not (any(isinstance(tool, tools.HoverTool) for tool in all_tools)) + ) + if no_hover: + all_tools.append("hover") + tool_list = [] - for tool in cb_tools + self.default_tools + self.tools: + for tool in all_tools: if tool in tool_names: continue if tool in ['vline', 'hline']: + tool_opts = dict(hover_opts, mode=tool) tool = tools.HoverTool( - tooltips=tooltips, tags=['hv_created'], mode=tool, **hover_opts + tooltips=tooltips, tags=['hv_created'], **tool_opts ) elif bokeh32 and isinstance(tool, str) and tool.endswith( ('wheel_zoom', 'zoom_in', 'zoom_out') @@ -392,9 +540,7 @@ def _init_tools(self, element, callbacks=None): def _update_hover(self, element): tool = self.handles['hover'] if 'hv_created' in tool.tags: - tooltips, hover_opts = self._hover_opts(element) - tooltips = [(ttp.pprint_label, '@{%s}' % util.dimension_sanitizer(ttp.name)) - if isinstance(ttp, Dimension) else ttp for ttp in tooltips] + tooltips, hover_opts = self._prepare_hover_kwargs(element) tool.tooltips = tooltips else: plot_opts = element.opts.get('plot', 'bokeh') @@ -1332,7 +1478,6 @@ def _setup_autorange(self): else: p0, p1 = self.padding, self.padding - # Clean this up in bokeh 3.0 using View.find_one API callback = CustomJS(code=f""" const cb = function() {{ @@ -1354,30 +1499,10 @@ def _setup_autorange(self): return invert ? [upper, lower] : [lower, upper] }} - const ref = plot.id - - const find = (view) => {{ - let iterable = view.child_views === undefined ? [] : view.child_views - for (const sv of iterable) {{ - if (sv.model.id == ref) - return sv - const obj = find(sv) - if (obj !== null) - return obj - }} - return null - }} - let plot_view = null; - for (const root of plot.document.roots()) {{ - const root_view = window.Bokeh.index[root.id] - if (root_view === undefined) - return - plot_view = find(root_view) - if (plot_view != null) - break - }} - if (plot_view == null) + let plot_view = Bokeh.index.find_one(plot) + if (plot_view == null) {{ return + }} let range_limits = {{}} for (const dr of plot.data_renderers) {{ @@ -1398,20 +1523,23 @@ def _setup_autorange(self): }} }} - if (y_range_name) {{ + if (y_range_name in range_limits) {{ + const [vmin_old, vmax_old] = range_limits[y_range_name] + range_limits[y_range_name] = [Math.min(vmin, vmin_old), Math.max(vmax, vmax_old)] + }} else {{ range_limits[y_range_name] = [vmin, vmax] }} }} - let range_tags_extras = plot.{dim}_range.tags[1] - if (range_tags_extras['autorange']) {{ - let lowerlim = range_tags_extras['y-lowerlim'] ?? null - let upperlim = range_tags_extras['y-upperlim'] ?? null - let [start, end] = get_padded_range('default', lowerlim, upperlim, range_tags_extras['invert_yaxis']) - if ((start != end) && window.Number.isFinite(start) && window.Number.isFinite(end)) {{ - plot.{dim}_range.setv({{start, end}}) - }} - }} + let range_tags_extras = plot.{dim}_range.tags[1] + if (range_tags_extras['autorange']) {{ + let lowerlim = range_tags_extras['y-lowerlim'] ?? null + let upperlim = range_tags_extras['y-upperlim'] ?? null + let [start, end] = get_padded_range('default', lowerlim, upperlim, range_tags_extras['invert_yaxis']) + if ((start != end) && window.Number.isFinite(start) && window.Number.isFinite(end)) {{ + plot.{dim}_range.setv({{start, end}}) + }} + }} for (let key in plot.extra_{dim}_ranges) {{ const extra_range = plot.extra_{dim}_ranges[key] @@ -1582,14 +1710,12 @@ def _apply_transforms(self, element, data, ranges, style, group=None): if not util.isscalar(val): if k in self._nonvectorized_styles: element = type(element).__name__ - raise ValueError('Mapping a dimension to the "{style}" ' + raise ValueError(f'Mapping a dimension to the "{k}" ' 'style option is not supported by the ' - '{element} element using the {backend} ' - 'backend. To map the "{dim}" dimension ' - 'to the {style} use a groupby operation ' - 'to overlay your data along the dimension.'.format( - style=k, dim=v.dimension, element=element, - backend=self.renderer.backend)) + f'{element} element using the {self.renderer.backend} ' + f'backend. To map the "{v.dimension}" dimension ' + f'to the {k} use a groupby operation ' + 'to overlay your data along the dimension.') elif data and len(val) != len(next(iter(data.values()))): if isinstance(element, VectorField): val = np.tile(val, 3) @@ -1919,6 +2045,10 @@ def initialize_plot(self, ranges=None, plot=None, plots=None, source=None): if self._subcoord_overlaid: if style_element.label in plot.extra_y_ranges: self.handles['y_range'] = plot.extra_y_ranges.pop(style_element.label) + + if self.apply_hard_bounds: + self._apply_hard_bounds(element, ranges) + self.handles['plot'] = plot if self.autorange: @@ -1944,6 +2074,32 @@ def initialize_plot(self, ranges=None, plot=None, plots=None, source=None): return plot + def _apply_hard_bounds(self, element, ranges): + """ + Apply hard bounds to the x and y ranges of the plot. If xlim/ylim is set, limit the + initial viewable range to xlim/ylim, but allow navigation up to the abs max between + the data range and xlim/ylim. If dim range is set (e.g. via redim.range), enforce + as hard bounds. + + """ + + def validate_bound(bound): + return bound if util.isfinite(bound) else None + + min_extent_x, min_extent_y, max_extent_x, max_extent_y = map( + validate_bound, self.get_extents(element, ranges, range_type='combined', lims_as_soft_ranges=True) + ) + + def set_bounds(axis, min_extent, max_extent): + """Set the bounds for a given axis, using None if both extents are None or identical""" + try: + self.handles[axis].bounds = None if min_extent == max_extent else (min_extent, max_extent) + except ValueError: + self.handles[axis].bounds = None + + set_bounds('x_range', min_extent_x, max_extent_x) + set_bounds('y_range', min_extent_y, max_extent_y) + def _setup_data_callbacks(self, plot): if not self._js_on_data_callbacks: return @@ -2069,6 +2225,9 @@ def update_frame(self, key, ranges=None, plot=None, element=None): cds = self.handles['cds'] self._postprocess_hover(renderer, cds) + if self.apply_hard_bounds: + self._apply_hard_bounds(element, ranges) + self._update_glyphs(element, ranges, self.style[self.cyclic_index]) self._execute_hooks(element) @@ -2226,7 +2385,6 @@ def _init_glyph(self, plot, mapping, properties, key): return renderer, renderer.glyph - class ColorbarPlot(ElementPlot): """ ColorbarPlot provides methods to create colormappers and colorbar @@ -2642,7 +2800,6 @@ def _process_legend(self, plot=None): r.muted = self.legend_muted - class AnnotationPlot: """ Mix-in plotting subclass for AnnotationPlots which do not have a legend. @@ -2672,7 +2829,7 @@ class OverlayPlot(GenericOverlayPlot, LegendPlot): 'min_height', 'max_height', 'min_width', 'min_height', 'margin', 'aspect', 'data_aspect', 'frame_width', 'frame_height', 'responsive', 'fontscale', 'subcoordinate_y', - 'subcoordinate_scale'] + 'subcoordinate_scale', 'autorange'] def __init__(self, overlay, **kwargs): self._multi_y_propagation = self.lookup_options(overlay, 'plot').options.get('multi_y', False) @@ -2807,7 +2964,6 @@ def _process_legend(self, overlay): for r in item.renderers: r.muted = self.legend_muted or r.muted - def _init_tools(self, element, callbacks=None): """ Processes the list of tools to be supplied to the plot. @@ -2850,7 +3006,6 @@ def _init_tools(self, element, callbacks=None): self.handles['hover_tools'] = hover_tools return init_tools - def _merge_tools(self, subplot): """ Merges tools on the overlay with those on the subplots. @@ -2876,9 +3031,77 @@ def _merge_tools(self, subplot): subplot.handles['zooms_subcoordy'].values(), self.handles['zooms_subcoordy'].values(), ): - renderers = list(util.unique_iterator(subplot_zoom.renderers + overlay_zoom.renderers)) + renderers = list(util.unique_iterator(overlay_zoom.renderers + subplot_zoom.renderers)) overlay_zoom.renderers = renderers + def _postprocess_subcoordinate_y_groups(self, overlay, plot): + """ + Add a zoom tool per group to the overlay. + """ + # First, just process and validate the groups and their content. + groups = defaultdict(list) + + # If there are groups AND there are subcoordinate_y elements without a group. + if any(el.group != type(el).__name__ for el in overlay) and any( + el.opts.get('plot').kwargs.get('subcoordinate_y', False) + and el.group == type(el).__name__ + for el in overlay + ): + raise ValueError( + 'The subcoordinate_y overlay contains elements with a defined group, each ' + 'subcoordinate_y element in the overlay must have a defined group.' + ) + + for el in overlay: + # group is the Element type per default (e.g. Curve, Spike). + if el.group == type(el).__name__: + continue + if not el.opts.get('plot').kwargs.get('subcoordinate_y', False): + raise ValueError( + f"All elements in group {el.group!r} must set the option " + f"'subcoordinate_y=True'. Not found for: {el}" + ) + groups[el.group].append(el) + + # No need to go any further if there's just one group. + if len(groups) <= 1: + return + + # At this stage, there's only one zoom tool (e.g. 1 wheel_zoom) that + # has all the renderers (e.g. all the curves in the overlay). + # We want to create as many zoom tools as groups, for each group + # the zoom tool must have the renderers of the elements of the group. + zoom_tools = self.handles['zooms_subcoordy'] + for zoom_tool_name, zoom_tool in zoom_tools.items(): + renderers_per_group = defaultdict(list) + # We loop through each overlay sub-elements and empty the list of + # renderers of the initial tool. + for el in overlay: + if el.group not in groups: + continue + renderers_per_group[el.group].append(zoom_tool.renderers.pop(0)) + + if zoom_tool.renderers: + raise RuntimeError(f'Found unexpected zoom renderers {zoom_tool.renderers}') + + new_ztools = [] + # Create a new tool per group with the right renderers and a custom description. + for grp, grp_renderers in renderers_per_group.items(): + new_tool = zoom_tool.clone() + new_tool.renderers = grp_renderers + new_tool.description = f"{zoom_tool_name.replace('_', ' ').title()} ({grp})" + new_ztools.append(new_tool) + # Revert tool order so the upper tool in the toolbar corresponds to the + # upper group in the overlay. + new_ztools = new_ztools[::-1] + + # Update the handle for good measure. + zoom_tools[zoom_tool_name] = new_ztools + + # Replace the original tool by the new ones + idx = plot.tools.index(zoom_tool) + plot.tools[idx:idx+1] = new_ztools + def _get_dimension_factors(self, overlay, ranges, dimension): factors = [] for k, sp in self.subplots.items(): @@ -2948,6 +3171,7 @@ def initialize_plot(self, ranges=None, plot=None, plots=None): self._update_ranges(element, ranges) panels = [] + subcoord_y_glyph_renderers = [] for key, subplot in self.subplots.items(): frame = None if self.tabs: @@ -2966,6 +3190,24 @@ def initialize_plot(self, ranges=None, plot=None, plots=None): title = get_tab_title(key, frame, self.hmap.last) panels.append(TabPanel(child=child, title=title)) self._merge_tools(subplot) + if getattr(subplot, "subcoordinate_y", False) and ( + glyph_renderer := subplot.handles.get("glyph_renderer") + ): + subcoord_y_glyph_renderers.append(glyph_renderer) + + if self.subcoordinate_y: + # Reverse the subcoord-y renderers only. + reversed_renderers = subcoord_y_glyph_renderers[::-1] + reordered = [] + for item in plot.renderers: + if item not in subcoord_y_glyph_renderers: + reordered.append(item) + else: + reordered.append(reversed_renderers.pop(0)) + plot.renderers = reordered + + if self.subcoordinate_y: + self._postprocess_subcoordinate_y_groups(element, plot) if self.tabs: self.handles['plot'] = Tabs( @@ -2993,6 +3235,9 @@ def initialize_plot(self, ranges=None, plot=None, plots=None): if self.top_level: self.init_links() + if self.autorange: + self._setup_autorange() + self._execute_hooks(element) return self.handles['plot'] diff --git a/holoviews/plotting/bokeh/plot.py b/holoviews/plotting/bokeh/plot.py index 37771afb1b..6a97643d63 100644 --- a/holoviews/plotting/bokeh/plot.py +++ b/holoviews/plotting/bokeh/plot.py @@ -614,8 +614,7 @@ def initialize_plot(self, ranges=None, plots=None): sync_legends(plot) plot = self._make_axes(plot) if hasattr(plot, "toolbar") and self.merge_tools: - plot.toolbar = merge_tools(plots) - + plot.toolbar = merge_tools(plots, hide_toolbar=True) title = self._get_title_div(self.keys[-1]) if title: plot = Column(title, plot) diff --git a/holoviews/plotting/bokeh/raster.py b/holoviews/plotting/bokeh/raster.py index 9245bc86b6..8f281cc12b 100644 --- a/holoviews/plotting/bokeh/raster.py +++ b/holoviews/plotting/bokeh/raster.py @@ -258,13 +258,31 @@ def _get_cmapper_opts(self, low, high, factors, colors): def _get_colormapper(self, eldim, element, ranges, style, factors=None, colors=None, group=None, name='color_mapper'): + indices = None + vdims = element.vdims + if isinstance(style.get("cmap"), dict): + dict_cmap = style["cmap"] + missing = [vd.name for vd in vdims if vd.name not in dict_cmap] + if missing: + missing_str = "', '".join(sorted(missing)) + raise ValueError( + "The supplied cmap dictionary must have the same " + f"value dimensions as the element. Missing: '{missing_str}'" + ) + keys, values = zip(*dict_cmap.items()) + style["cmap"] = list(values) + indices = [keys.index(vd.name) for vd in vdims] + cmapper = super()._get_colormapper( eldim, element, ranges, style, factors=factors, colors=colors, group=group, name=name ) - num_elements = len(element.vdims) - step_size = len(cmapper.palette) // num_elements - indices = np.arange(num_elements) * step_size + + if indices is None: + num_elements = len(vdims) + step_size = len(cmapper.palette) // num_elements + indices = np.arange(num_elements) * step_size + cmapper.palette = np.array(cmapper.palette)[indices].tolist() return cmapper diff --git a/holoviews/plotting/bokeh/stats.py b/holoviews/plotting/bokeh/stats.py index 4251e4ddda..cc2adcccbb 100644 --- a/holoviews/plotting/bokeh/stats.py +++ b/holoviews/plotting/bokeh/stats.py @@ -72,6 +72,9 @@ class BoxWhiskerPlot(MultiDistributionMixin, CompositeElementPlot, ColorbarPlot, show_legend = param.Boolean(default=False, doc=""" Whether to show legend for the plot.""") + outlier_radius = param.Number(default=0.01, doc=""" + The radius of the circle marker for the outliers.""") + # Deprecated options color_index = param.ClassSelector(default=None, class_=(str, int), @@ -191,11 +194,11 @@ def get_data(self, element, ranges, style): if self.invert_axes: vbar_map = {'y': 'index', 'left': 'top', 'right': 'bottom', 'height': width} seg_map = {'y0': 'x0', 'y1': 'x1', 'x0': 'y0', 'x1': 'y1'} - out_map = {'y': 'index', 'x': vdim} + out_map = {'y': 'index', 'x': vdim, 'radius': self.outlier_radius} else: vbar_map = {'x': 'index', 'top': 'top', 'bottom': 'bottom', 'width': width} seg_map = {'x0': 'x0', 'x1': 'x1', 'y0': 'y0', 'y1': 'y1'} - out_map = {'x': 'index', 'y': vdim} + out_map = {'x': 'index', 'y': vdim, 'radius': self.outlier_radius} vbar2_map = dict(vbar_map) # Get color values @@ -206,7 +209,7 @@ def get_data(self, element, ranges, style): cdim, cidx = None, None factors = [] - vdim = element.vdims[0].name + vdim = dimension_sanitizer(element.vdims[0].name) for key, g in groups.items(): # Compute group label if element.kdims: diff --git a/holoviews/plotting/bokeh/util.py b/holoviews/plotting/bokeh/util.py index 490fbf9035..1bc7427fa2 100644 --- a/holoviews/plotting/bokeh/util.py +++ b/holoviews/plotting/bokeh/util.py @@ -60,7 +60,7 @@ from ...util.warnings import warn from ..util import dim_axis_label -bokeh_version = Version(bokeh.__version__) +bokeh_version = Version(Version(bokeh.__version__).base_version) bokeh32 = bokeh_version >= Version("3.2") bokeh33 = bokeh_version >= Version("3.3") bokeh34 = bokeh_version >= Version("3.4") @@ -384,7 +384,7 @@ def compute_layout_properties( return aspect_info, dimension_info -def merge_tools(plot_grid, disambiguation_properties=None): +def merge_tools(plot_grid, *, disambiguation_properties=None, hide_toolbar=False): """ Merges tools defined on a grid of plots into a single toolbar. All tools of the same type are merged unless they define one @@ -397,6 +397,8 @@ def merge_tools(plot_grid, disambiguation_properties=None): if isinstance(item, LayoutDOM): for p in item.select(dict(type=Plot)): tools.extend(p.toolbar.tools) + if hide_toolbar and hasattr(item, 'toolbar_location'): + item.toolbar_location = None if isinstance(item, GridPlot): item.toolbar_location = None @@ -415,7 +417,7 @@ def merge(tool, group): if p not in disambiguation_properties: ignore.add(p) - return Toolbar(tools=group_tools(tools, merge=merge, ignore=ignore) if merge_tools else tools) + return Toolbar(tools=group_tools(tools, merge=merge, ignore=ignore)) if tools else Toolbar() def sync_legends(bokeh_layout): diff --git a/holoviews/plotting/plot.py b/holoviews/plotting/plot.py index d8b4c94d5b..bcaedd2a50 100644 --- a/holoviews/plotting/plot.py +++ b/holoviews/plotting/plot.py @@ -925,7 +925,7 @@ def lookup(x): for opt, v in opts.items(): if opt not in options[key]: options[key][opt] = v - return options if keyfn else options[None] + return options if keyfn else options.get(None, {}) def _get_projection(cls, obj): """ @@ -1423,7 +1423,7 @@ def _get_range_extents(self, element, ranges, range_type, xdim, ydim, zdim): return (x0, y0, x1, y1) - def get_extents(self, element, ranges, range_type='combined', dimension=None, xdim=None, ydim=None, zdim=None, **kwargs): + def get_extents(self, element, ranges, range_type='combined', dimension=None, xdim=None, ydim=None, zdim=None, lims_as_soft_ranges=False, **kwargs): """ Gets the extents for the axes from the current Element. The globally computed ranges can optionally override the extents. @@ -1444,6 +1444,12 @@ def get_extents(self, element, ranges, range_type='combined', dimension=None, xd This allows Overlay plots to obtain each range and combine them appropriately for all the objects in the overlay. + + If lims_as_soft_ranges is set to True, the xlim and ylim will be treated as + soft ranges instead of the default case as hard ranges while computing the extents. + This is used e.g. when apply_hard_bounds is True and xlim/ylim is set, in which + case we limit the initial viewable range to xlim/ylim, but allow navigation up to + the abs max between the data range and xlim/ylim. """ num = 6 if (isinstance(self.projection, str) and self.projection == '3d') else 4 if self.apply_extents and range_type in ('combined', 'extents'): @@ -1486,8 +1492,15 @@ def get_extents(self, element, ranges, range_type='combined', dimension=None, xd else: x0, y0, x1, y1 = combined - x0, x1 = util.dimension_range(x0, x1, self.xlim, (None, None)) - y0, y1 = util.dimension_range(y0, y1, self.ylim, (None, None)) + if lims_as_soft_ranges: + # run x|ylim through max_range to ensure datetime-dtype matching with ranges + xlim_soft_ranges = util.max_range([self.xlim]) + ylim_soft_ranges = util.max_range([self.ylim]) + x0, x1 = util.dimension_range(x0, x1, (None, None), xlim_soft_ranges) + y0, y1 = util.dimension_range(y0, y1, (None, None), ylim_soft_ranges) + else: + x0, x1 = util.dimension_range(x0, x1, self.xlim, (None, None)) + y0, y1 = util.dimension_range(y0, y1, self.ylim, (None, None)) if not self.drawn: x_range, y_range = ((y0, y1), (x0, x1)) if self.invert_axes else ((x0, x1), (y0, y1)) @@ -1828,8 +1841,9 @@ def _create_subplot(self, key, obj, streams, ranges): plottype = registry.get(vtype, None) if plottype is None: self.param.warning( - "No plotting class for {} type and {} backend " - "found. ".format(vtype.__name__, self.renderer.backend)) + f"No plotting class for {vtype.__name__} type " + f"and {self.renderer.backend} backend found. " + ) return None # Get zorder and style counter diff --git a/holoviews/plotting/plotly/element.py b/holoviews/plotting/plotly/element.py index 504c7dcdea..564f67139f 100644 --- a/holoviews/plotting/plotly/element.py +++ b/holoviews/plotting/plotly/element.py @@ -174,11 +174,8 @@ def generate_plot(self, key, ranges, element=None, is_geo=False): ] if unsupported_opts: raise ValueError( - "The following {typ} style options are not supported by the Plotly " - "backend when overlaid on Tiles:\n" - " {unsupported_opts}".format( - typ=type(element).__name__, unsupported_opts=unsupported_opts - ) + f"The following {type(element).__name__} style options are not supported by the Plotly " + f"backend when overlaid on Tiles:\n {unsupported_opts}" ) # Get data and options and merge them @@ -352,8 +349,8 @@ def _apply_transforms(self, element, ranges, style): continue elif (not v.applies(element) and v.dimension not in self.overlay_dims): new_style.pop(k) - self.param.warning('Specified {} dim transform {!r} could not be applied, as not all ' - 'dimensions could be resolved.'.format(k, v)) + self.param.warning(f'Specified {k} dim transform {v!r} could not be applied, as not all ' + 'dimensions could be resolved.') continue if len(v.ops) == 0 and v.dimension in self.overlay_dims: @@ -368,14 +365,12 @@ def _apply_transforms(self, element, ranges, style): if not util.isscalar(val): if k in self._nonvectorized_styles: element = type(element).__name__ - raise ValueError('Mapping a dimension to the "{style}" ' + raise ValueError(f'Mapping a dimension to the "{k}" ' 'style option is not supported by the ' - '{element} element using the {backend} ' - 'backend. To map the "{dim}" dimension ' - 'to the {style} use a groupby operation ' - 'to overlay your data along the dimension.'.format( - style=k, dim=v.dimension, element=element, - backend=self.renderer.backend)) + f'{element} element using the {self.renderer.backend} ' + f'backend. To map the "{v.dimension}" dimension ' + f'to the {k} use a groupby operation ' + 'to overlay your data along the dimension.') # If color is not valid colorspec add colormapper numeric = isinstance(val, np.ndarray) and val.dtype.kind in 'uifMm' diff --git a/holoviews/plotting/util.py b/holoviews/plotting/util.py index d03e9bbe2f..3cc2947bae 100644 --- a/holoviews/plotting/util.py +++ b/holoviews/plotting/util.py @@ -83,12 +83,12 @@ def collate(obj): return obj.collate() if isinstance(obj, HoloMap): display_warning.param.warning( - "Nesting {0}s within a {1} makes it difficult to access " - "your data or control how it appears; we recommend " - "calling .collate() on the {1} in order to follow the " - "recommended nesting structure shown in the Composing " - "Data user guide (https://goo.gl/2YS8LJ)".format( - obj.type.__name__, type(obj).__name__)) + f"Nesting {obj.type.__name__}s within a {type(obj).__name__} " + "makes it difficult to access your data or control how it appears; " + f"we recommend calling .collate() on the {type(obj).__name__} " + "in order to follow the recommended nesting structure shown " + "in the Composing Data user guide (https://goo.gl/2YS8LJ)" + ) return obj.collate() elif isinstance(obj, (Layout, NdLayout)): try: diff --git a/holoviews/streams.py b/holoviews/streams.py index 75f66af7af..7a61a00cdf 100644 --- a/holoviews/streams.py +++ b/holoviews/streams.py @@ -240,12 +240,13 @@ def _process_streams(cls, streams): if overlap: pname = type(s.parameterized).__name__ param.main.param.warning( - 'The {} parameter(s) on the {} object have ' + f'The {sorted([p.name for p in overlap])} parameter(s) ' + f'on the {pname} object have ' 'already been supplied in another stream. ' 'Ensure that the supplied streams only specify ' 'each parameter once, otherwise multiple ' - 'events will be triggered when the parameter ' - 'changes.'.format(sorted([p.name for p in overlap]), pname)) + 'events will be triggered when the parameter changes.' + ) parameterizeds[pid] |= set(s.parameters) valid.append(s) return valid, invalid diff --git a/holoviews/tests/conftest.py b/holoviews/tests/conftest.py index f4d9533400..7d79f2379b 100644 --- a/holoviews/tests/conftest.py +++ b/holoviews/tests/conftest.py @@ -2,14 +2,18 @@ import sys from collections.abc import Callable +import panel as pn import pytest -from panel.tests.conftest import ( # noqa +from panel.tests.conftest import ( # noqa: F401 optional_markers, port, pytest_addoption, pytest_configure, server_cleanup, ) +from panel.tests.util import serve_and_wait + +import holoviews as hv def pytest_collection_modifyitems(config, items): @@ -32,14 +36,19 @@ def pytest_collection_modifyitems(config, items): with contextlib.suppress(ImportError): import matplotlib as mpl - mpl.use('agg') + + mpl.use("agg") with contextlib.suppress(Exception): - # From Dask 2023.7,1 they now automatic convert strings + # From Dask 2023.7.1 they now automatically convert strings # https://docs.dask.org/en/stable/changelog.html#v2023-7-1 + # From Dask 2024.3.0 they now use `dask_expr` by default + # https://github.com/dask/dask/issues/10995 import dask + dask.config.set({"dataframe.convert-string": False}) + dask.config.set({"dataframe.query-planning": False}) @pytest.fixture @@ -49,40 +58,38 @@ def ibis_sqlite_backend(): except ImportError: yield None else: - ibis.set_backend('sqlite') + ibis.set_backend("sqlite") yield ibis.set_backend(None) @pytest.fixture def bokeh_backend(): - import holoviews as hv - hv.renderer('bokeh') + hv.renderer("bokeh") prev_backend = hv.Store.current_backend - hv.Store.current_backend = 'bokeh' + hv.Store.current_backend = "bokeh" yield hv.Store.current_backend = prev_backend @pytest.fixture def mpl_backend(): - import holoviews as hv - hv.renderer('matplotlib') + hv.renderer("matplotlib") prev_backend = hv.Store.current_backend - hv.Store.current_backend = 'matplotlib' + hv.Store.current_backend = "matplotlib" yield hv.Store.current_backend = prev_backend @pytest.fixture def plotly_backend(): - import holoviews as hv - hv.renderer('plotly') + hv.renderer("plotly") prev_backend = hv.Store.current_backend - hv.Store.current_backend = 'plotly' + hv.Store.current_backend = "plotly" yield hv.Store.current_backend = prev_backend + @pytest.fixture def unimport(monkeypatch: pytest.MonkeyPatch) -> Callable[[str], None]: """ @@ -98,3 +105,13 @@ def unimport_module(modname: str) -> None: monkeypatch.setattr(sys, "path", []) return unimport_module + + +@pytest.fixture +def serve_hv(page, port): # noqa: F811 + def serve_and_return_page(hv_obj): + serve_and_wait(pn.pane.HoloViews(hv_obj), port=port) + page.goto(f"http://localhost:{port}") + return page + + return serve_and_return_page diff --git a/holoviews/tests/operation/test_datashader.py b/holoviews/tests/operation/test_datashader.py index ee4302649b..531d869a09 100644 --- a/holoviews/tests/operation/test_datashader.py +++ b/holoviews/tests/operation/test_datashader.py @@ -1560,3 +1560,10 @@ def test_imagestack_datashader_color_key(): color_key=cc.glasbey_light, ) render(op) # should not error out + + +def test_imagestack_datashade_count_cat(): + # Test for https://github.com/holoviz/holoviews/issues/6154 + df = pd.DataFrame({"x": range(3), "y": range(3), "c": range(3)}) + op = datashade(Points(df), aggregator=ds.count_cat("c")) + render(op) # should not error out diff --git a/holoviews/tests/operation/test_operation.py b/holoviews/tests/operation/test_operation.py index 0746560418..763c5f81ad 100644 --- a/holoviews/tests/operation/test_operation.py +++ b/holoviews/tests/operation/test_operation.py @@ -331,6 +331,49 @@ def test_dataset_histogram_empty_explicit_bins(self): hist = Histogram(([0, 1, 2], [0, 0]), vdims=('x_count', 'Count')) self.assertEqual(op_hist, hist) + def test_dataset_histogram_groupby_range_shared(self): + x = np.arange(10) + y = np.arange(10) + 10 + xy = np.concatenate([x, y]) + label = ["x"] * 10 + ["y"] * 10 + + ds = Dataset(pd.DataFrame([xy, label], index=["xy", "label"]).T, vdims=["xy", "label"]) + hist = histogram(ds, groupby="label", groupby_range="shared") + exp = np.linspace(0, 19, 21) + for k, v in hist.items(): + np.testing.assert_equal(exp, v.data["xy"]) + sel = np.asarray(label) == k + assert sel.sum() == 10 + assert (v.data["xy_count"][sel] == 1).all() + assert (v.data["xy_count"][~sel] == 0).all() + + def test_dataset_histogram_groupby_range_separated(self): + x = np.arange(10) + y = np.arange(10) + 10 + xy = np.concatenate([x, y]) + label = ["x"] * 10 + ["y"] * 10 + + ds = Dataset(pd.DataFrame([xy, label], index=["xy", "label"]).T, vdims=["xy", "label"]) + hist = histogram(ds, groupby="label", groupby_range="separated") + + for idx, v in enumerate(hist): + exp = np.linspace(idx * 10, 10 * idx + 9, 21) + np.testing.assert_equal(exp, v.data["xy"]) + assert v.data["xy_count"].sum() == 10 + + def test_dataset_histogram_groupby_datetime(self): + x = pd.date_range("2020-01-01", periods=100) + y = pd.date_range("2020-01-01", periods=100) + xy = np.concatenate([x, y]) + label = ["x"] * 100 + ["y"] * 100 + ds = Dataset(pd.DataFrame([xy, label], index=["xy", "label"]).T, vdims=["xy", "label"]) + hist = histogram(ds, groupby="label") + + exp = pd.date_range("2020-01-01", '2020-04-09', periods=21) + for h in hist: + np.testing.assert_equal(exp, h.data["xy"]) + assert (h.data["xy_count"] == 5).all() + @da_skip def test_dataset_histogram_dask(self): import dask.array as da diff --git a/holoviews/tests/plotting/bokeh/test_annotationplot.py b/holoviews/tests/plotting/bokeh/test_annotationplot.py index ab955a8131..4a60a3e823 100644 --- a/holoviews/tests/plotting/bokeh/test_annotationplot.py +++ b/holoviews/tests/plotting/bokeh/test_annotationplot.py @@ -17,7 +17,7 @@ VSpan, VSpans, ) -from holoviews.plotting.bokeh.util import bokeh32, bokeh33 +from holoviews.plotting.bokeh.util import bokeh32, bokeh33, bokeh34 from .test_plot import TestBokehPlot, bokeh_renderer @@ -29,7 +29,9 @@ VStrip as BkVStrip, ) -if bokeh33: +if bokeh34: + from bokeh.models import Node +elif bokeh33: from bokeh.models.coordinates import Node diff --git a/holoviews/tests/plotting/bokeh/test_elementplot.py b/holoviews/tests/plotting/bokeh/test_elementplot.py index ab6e210382..9cef2f3b6c 100644 --- a/holoviews/tests/plotting/bokeh/test_elementplot.py +++ b/holoviews/tests/plotting/bokeh/test_elementplot.py @@ -3,6 +3,7 @@ import numpy as np import panel as pn +import param import pytest from bokeh.document import Document from bokeh.models import ( @@ -16,7 +17,8 @@ tools, ) -from holoviews.core import DynamicMap, HoloMap, NdOverlay +from holoviews import opts +from holoviews.core import DynamicMap, HoloMap, NdOverlay, Overlay from holoviews.core.util import dt_to_int from holoviews.element import Curve, HeatMap, Image, Labels, Scatter from holoviews.plotting.util import process_cmap @@ -993,3 +995,96 @@ def test_clim_percentile(self): low, high = plot.ranges[('Image',)]['z']['robust'] assert low > 0 assert high < 1 + +class TestApplyHardBounds(TestBokehPlot): + def test_apply_hard_bounds(self): + """Test `apply_hard_bounds` with a single element.""" + x_values = np.linspace(10, 50, 5) + y_values = np.array([10, 20, 30, 40, 50]) + curve = Curve((x_values, y_values)).opts(apply_hard_bounds=True) + plot = bokeh_renderer.get_plot(curve) + assert plot.handles['x_range'].bounds == (10, 50) + + def test_apply_hard_bounds_overlay(self): + """Test `apply_hard_bounds` with an overlay of curves.""" + x1_values = np.linspace(10, 50, 5) + x2_values = np.linspace(10, 90, 5) + y_values = np.array([10, 20, 30, 40, 50]) + curve1 = Curve((x1_values, y_values)) + curve2 = Curve((x2_values, y_values)) + overlay = Overlay([curve1, curve2]).opts(opts.Curve(apply_hard_bounds=True)) + plot = bokeh_renderer.get_plot(overlay) + # Check if the large of the data range can be navigated to + assert plot.handles['x_range'].bounds == (10, 90) + + def test_apply_hard_bounds_with_xlim(self): + """Test `apply_hard_bounds` with `xlim` set. Initial view should be within xlim but allow panning to data range.""" + x_values = np.linspace(10, 50, 5) + y_values = np.array([10, 20, 30, 40, 50]) + curve = Curve((x_values, y_values)).opts(apply_hard_bounds=True, xlim=(15, 35)) + plot = bokeh_renderer.get_plot(curve) + initial_view_range = (plot.handles['x_range'].start, plot.handles['x_range'].end) + assert initial_view_range == (15, 35) + # Check if data beyond xlim can be navigated to + assert plot.handles['x_range'].bounds == (10, 50) + + def test_apply_hard_bounds_with_redim_range(self): + """Test `apply_hard_bounds` with `.redim.range(x=...)`. Hard bounds should strictly apply.""" + x_values = np.linspace(10, 50, 5) + y_values = np.array([10, 20, 30, 40, 50]) + curve = Curve((x_values, y_values)).redim.range(x=(25, None)).opts(apply_hard_bounds=True) + plot = bokeh_renderer.get_plot(curve) + # Expected to strictly adhere to any redim.range bounds, otherwise the data range + assert (plot.handles['x_range'].start, plot.handles['x_range'].end) == (25, 50) + assert plot.handles['x_range'].bounds == (25, 50) + + def test_apply_hard_bounds_datetime(self): + """Test datetime axes with hard bounds.""" + target_xlim_l = dt.datetime(2020, 1, 3) + target_xlim_h = dt.datetime(2020, 1, 7) + dates = [dt.datetime(2020, 1, i) for i in range(1, 11)] + values = np.linspace(0, 100, 10) + curve = Curve((dates, values)).opts( + apply_hard_bounds=True, + xlim=(target_xlim_l, target_xlim_h) + ) + plot = bokeh_renderer.get_plot(curve) + initial_view_range = (dt_to_int(plot.handles['x_range'].start), dt_to_int(plot.handles['x_range'].end)) + assert initial_view_range == (dt_to_int(target_xlim_l), dt_to_int(target_xlim_h)) + # Validate navigation bounds include entire data range + hard_bounds = (dt_to_int(plot.handles['x_range'].bounds[0]), dt_to_int(plot.handles['x_range'].bounds[1])) + assert hard_bounds == (dt_to_int(dt.datetime(2020, 1, 1)), dt_to_int(dt.datetime(2020, 1, 10))) + + def test_dynamic_map_bounds_update(self): + """Test that `apply_hard_bounds` applies correctly when DynamicMap is updated.""" + + def curve_data(choice): + datasets = { + 'set1': (np.linspace(0, 5, 100), np.random.rand(100)), + 'set2': (np.linspace(0, 20, 100), np.random.rand(100)), + } + x, y = datasets[choice] + return Curve((x, y)) + + ChoiceStream = Stream.define( + 'Choice', + choice=param.ObjectSelector(default='set1', objects=['set1', 'set2']) + ) + choice_stream = ChoiceStream() + dmap = DynamicMap(curve_data, kdims=[], streams=[choice_stream]) + dmap = dmap.opts(opts.Curve(apply_hard_bounds=True, xlim=(2,3), framewise=True)) + dmap = dmap.redim.values(choice=['set1', 'set2']) + plot = bokeh_renderer.get_plot(dmap) + + # Keeping the xlim consistent between updates, and change data range bounds + # Initially select 'set1' + dmap.event(choice='set1') + assert plot.handles['x_range'].start == 2 + assert plot.handles['x_range'].end == 3 + assert plot.handles['x_range'].bounds == (0, 5) + + # Update to 'set2' + dmap.event(choice='set2') + assert plot.handles['x_range'].start == 2 + assert plot.handles['x_range'].end == 3 + assert plot.handles['x_range'].bounds == (0, 20) diff --git a/holoviews/tests/plotting/bokeh/test_rasterplot.py b/holoviews/tests/plotting/bokeh/test_rasterplot.py index 56dd684f90..c5c404f6f9 100644 --- a/holoviews/tests/plotting/bokeh/test_rasterplot.py +++ b/holoviews/tests/plotting/bokeh/test_rasterplot.py @@ -408,6 +408,34 @@ def test_image_stack_tuple_single_3darray(self): assert source.data["dh"][0] == self.ysize assert isinstance(plot, ImageStackPlot) + def test_image_stack_dict_cmap(self): + x = np.arange(0, 3) + y = np.arange(5, 8) + a = np.array([[np.nan, np.nan, 1], [np.nan] * 3, [np.nan] * 3]) + b = np.array([[np.nan] * 3, [1, 1, np.nan], [np.nan] * 3]) + c = np.array([[np.nan] * 3, [np.nan] * 3, [1, 1, 1]]) + + img_stack = ImageStack((x, y, a, b, c), kdims=["x", "y"], vdims=["b", "a", "c"]) + img_stack.opts(cmap={"c": "yellow", "a": "red", "b": "green"}) + plot = bokeh_renderer.get_plot(img_stack) + source = plot.handles["source"] + np.testing.assert_equal(source.data["image"][0][:, :, 0], a) + np.testing.assert_equal(source.data["image"][0][:, :, 1], b) + np.testing.assert_equal(source.data["image"][0][:, :, 2], c) + assert plot.handles["color_mapper"].palette == ["green", "red", "yellow"] + + def test_image_stack_dict_cmap_missing(self): + x = np.arange(0, 3) + y = np.arange(5, 8) + a = np.array([[np.nan, np.nan, 1], [np.nan] * 3, [np.nan] * 3]) + b = np.array([[np.nan] * 3, [1, 1, np.nan], [np.nan] * 3]) + c = np.array([[np.nan] * 3, [np.nan] * 3, [1, 1, 1]]) + + img_stack = ImageStack((x, y, a, b, c), kdims=["x", "y"], vdims=["b", "a", "c"]) + with pytest.raises(ValueError, match="must have the same value dimensions"): + img_stack.opts(cmap={"c": "yellow", "a": "red"}) + bokeh_renderer.get_plot(img_stack) + class TestImageStackEven(_ImageStackBase): __test__ = True diff --git a/holoviews/tests/plotting/bokeh/test_server.py b/holoviews/tests/plotting/bokeh/test_server.py index 4f9f21cf8e..5686bfbff3 100644 --- a/holoviews/tests/plotting/bokeh/test_server.py +++ b/holoviews/tests/plotting/bokeh/test_server.py @@ -163,6 +163,9 @@ def test_server_dynamicmap_with_stream(self): cds = session.document.roots[0].select_one({'type': ColumnDataSource}) self.assertEqual(cds.data['y'][2], 2) + def loaded(): + state._schedule_on_load(doc, None) + doc.add_next_tick_callback(loaded) def run(): stream.event(y=3) doc.add_next_tick_callback(run) @@ -180,6 +183,9 @@ def test_server_dynamicmap_with_stream_dims(self): orig_cds = session.document.roots[0].select_one({'type': ColumnDataSource}) self.assertEqual(orig_cds.data['y'][2], 2) + def loaded(): + state._schedule_on_load(doc, None) + doc.add_next_tick_callback(loaded) def run(): stream.event(y=3) doc.add_next_tick_callback(run) diff --git a/holoviews/tests/plotting/bokeh/test_subcoordy.py b/holoviews/tests/plotting/bokeh/test_subcoordy.py index 033fc4c040..0920c195e1 100644 --- a/holoviews/tests/plotting/bokeh/test_subcoordy.py +++ b/holoviews/tests/plotting/bokeh/test_subcoordy.py @@ -41,6 +41,17 @@ def test_bool_base(self): assert plot.state.yaxis.ticker.ticks == [0, 1] assert plot.state.yaxis.major_label_overrides == {0: 'Data 0', 1: 'Data 1'} + def test_renderers_reversed(self): + overlay = Overlay([Curve(range(10), label=f'Data {i}').opts(subcoordinate_y=True) for i in range(2)]) + overlay = VSpan(0, 1, label='back') * overlay * VSpan(2, 3, label='front') + plot = bokeh_renderer.get_plot(overlay) + renderers = plot.handles['plot'].renderers + assert (renderers[0].left, renderers[0].right) == (0, 1) + # Only the subcoord-y renderers are reversed by default. + assert renderers[1].name == 'Data 1' + assert renderers[2].name == 'Data 0' + assert (renderers[3].left, renderers[3].right) == (2, 3) + def test_bool_scale(self): test_data = [ (0.5, (-0.25, 0.25), (0.75, 1.25), (-0.25, 1.25)), @@ -252,6 +263,136 @@ def test_tools_instance_zoom_untouched(self): else: raise AssertionError('Provided zoom not found.') + def test_single_group(self): + # Same as test_bool_base, to check nothing is affected by defining + # a single group. + + overlay = Overlay([Curve(range(10), label=f'Data {i}', group='Group').opts(subcoordinate_y=True) for i in range(2)]) + plot = bokeh_renderer.get_plot(overlay) + # subcoordinate_y is propagated to the overlay + assert plot.subcoordinate_y is True + # the figure has only one yaxis + assert len(plot.state.yaxis) == 1 + # the overlay has two subplots + assert len(plot.subplots) == 2 + assert ('Group', 'Data_0') in plot.subplots + assert ('Group', 'Data_1') in plot.subplots + # the range per subplots are correctly computed + sp1 = plot.subplots[('Group', 'Data_0')] + assert sp1.handles['glyph_renderer'].coordinates.y_target.start == -0.5 + assert sp1.handles['glyph_renderer'].coordinates.y_target.end == 0.5 + sp2 = plot.subplots[('Group', 'Data_1')] + assert sp2.handles['glyph_renderer'].coordinates.y_target.start == 0.5 + assert sp2.handles['glyph_renderer'].coordinates.y_target.end == 1.5 + # y_range is correctly computed + assert plot.handles['y_range'].start == -0.5 + assert plot.handles['y_range'].end == 1.5 + # extra_y_range is empty + assert plot.handles['extra_y_ranges'] == {} + # the ticks show the labels + assert plot.state.yaxis.ticker.ticks == [0, 1] + assert plot.state.yaxis.major_label_overrides == {0: 'Data 0', 1: 'Data 1'} + + def test_multiple_groups(self): + overlay = Overlay([ + Curve(range(10), label=f'{group} / {i}', group=group).opts(subcoordinate_y=True) + for group in ['A', 'B'] + for i in range(2) + ]) + plot = bokeh_renderer.get_plot(overlay) + # subcoordinate_y is propagated to the overlay + assert plot.subcoordinate_y is True + # the figure has only one yaxis + assert len(plot.state.yaxis) == 1 + # the overlay has two subplots + assert len(plot.subplots) == 4 + assert ('A', 'A_over_0') in plot.subplots + assert ('A', 'A_over_1') in plot.subplots + assert ('B', 'B_over_0') in plot.subplots + assert ('B', 'B_over_1') in plot.subplots + # the range per subplots are correctly computed + sp1 = plot.subplots[('A', 'A_over_0')] + assert sp1.handles['glyph_renderer'].coordinates.y_target.start == -0.5 + assert sp1.handles['glyph_renderer'].coordinates.y_target.end == 0.5 + sp2 = plot.subplots[('A', 'A_over_1')] + assert sp2.handles['glyph_renderer'].coordinates.y_target.start == 0.5 + assert sp2.handles['glyph_renderer'].coordinates.y_target.end == 1.5 + sp3 = plot.subplots[('B', 'B_over_0')] + assert sp3.handles['glyph_renderer'].coordinates.y_target.start == 1.5 + assert sp3.handles['glyph_renderer'].coordinates.y_target.end == 2.5 + sp4 = plot.subplots[('B', 'B_over_1')] + assert sp4.handles['glyph_renderer'].coordinates.y_target.start == 2.5 + assert sp4.handles['glyph_renderer'].coordinates.y_target.end == 3.5 + # y_range is correctly computed + assert plot.handles['y_range'].start == -0.5 + assert plot.handles['y_range'].end == 3.5 + # extra_y_range is empty + assert plot.handles['extra_y_ranges'] == {} + # the ticks show the labels + assert plot.state.yaxis.ticker.ticks == [0, 1, 2, 3] + assert plot.state.yaxis.major_label_overrides == { + 0: 'A / 0', 1: 'A / 1', + 2: 'B / 0', 3: 'B / 1', + } + + def test_multiple_groups_wheel_zoom_configured(self): + # Same as test_tools_default_wheel_zoom_configured + + groups = ['A', 'B'] + overlay = Overlay([ + Curve(range(10), label=f'{group} / {i}', group=group).opts(subcoordinate_y=True) + for group in groups + for i in range(2) + ]) + plot = bokeh_renderer.get_plot(overlay) + zoom_tools = [tool for tool in plot.state.tools if isinstance(tool, WheelZoomTool)] + assert zoom_tools == plot.handles['zooms_subcoordy']['wheel_zoom'] + assert len(zoom_tools) == len(groups) + for zoom_tool, group in zip(zoom_tools, reversed(groups)): + assert len(zoom_tool.renderers) == 2 + assert len(set(zoom_tool.renderers)) == 2 + assert zoom_tool.dimensions == 'height' + assert zoom_tool.level == 1 + assert zoom_tool.description == f'Wheel Zoom ({group})' + + def test_single_group_overlaid_no_error(self): + overlay = Overlay([Curve(range(10), label=f'Data {i}', group='Group').opts(subcoordinate_y=True) for i in range(2)]) + with_span = VSpan(1, 2) * overlay * VSpan(3, 4) + bokeh_renderer.get_plot(with_span) + + def test_multiple_groups_overlaid_no_error(self): + overlay = Overlay([ + Curve(range(10), label=f'{group} / {i}', group=group).opts(subcoordinate_y=True) + for group in ['A', 'B'] + for i in range(2) + ]) + with_span = VSpan(1, 2) * overlay * VSpan(3, 4) + bokeh_renderer.get_plot(with_span) + + def test_missing_group_error(self): + curves = [] + for i, group in enumerate(['A', 'B', 'C']): + for i in range(2): + label = f'{group}{i}' + if group == "B": + curve = Curve(range(10), label=label, group=group).opts( + subcoordinate_y=True + ) + else: + curve = Curve(range(10), label=label).opts( + subcoordinate_y=True + ) + curves.append(curve) + + with pytest.raises( + ValueError, + match=( + 'The subcoordinate_y overlay contains elements with a defined group, each ' + 'subcoordinate_y element in the overlay must have a defined group.' + ) + ): + bokeh_renderer.get_plot(Overlay(curves)) + def test_norm_subcoordinate_group_ranges(self): x = np.linspace(0, 10 * np.pi, 21) curves = [] diff --git a/holoviews/tests/plotting/matplotlib/test_renderer.py b/holoviews/tests/plotting/matplotlib/test_renderer.py index 07d7523ebf..0c8f55aa26 100644 --- a/holoviews/tests/plotting/matplotlib/test_renderer.py +++ b/holoviews/tests/plotting/matplotlib/test_renderer.py @@ -1,12 +1,15 @@ """ Test cases for rendering exporters """ +import os import subprocess +import sys from unittest import SkipTest import numpy as np import panel as pn import param +import pytest from matplotlib import style from panel.widgets import DiscreteSlider, FloatSlider, Player from pyviz_comms import CommManager @@ -80,6 +83,7 @@ def test_render_gif(self): data, metadata = self.renderer.components(self.map1, 'gif') self.assertIn("= Version(latest) - date_check = current_release_date >= allowed_date + date_check = latest_release_date >= allowed_date is_latest = version_check or date_check all_latest &= is_latest @@ -38,4 +44,4 @@ def main(*packages): if __name__ == "__main__": - main("numpy", "pandas") + main(*sys.argv[1:]) diff --git a/scripts/download_data.sh b/scripts/download_data.sh new file mode 100755 index 0000000000..16754fee73 --- /dev/null +++ b/scripts/download_data.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +set -euxo pipefail + +bokeh sampledata + +python -c " +try: + import pooch + import scipy + import xarray as xr +except ImportError: + pass +else: + xr.tutorial.open_dataset('air_temperature') + xr.tutorial.open_dataset('rasm') +" diff --git a/setup.py b/setup.py index 10fd2ea55d..0edb3fd2ad 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ install_requires = [ "param >=1.12.0,<3.0", "numpy >=1.0", - "pyviz_comms >=0.7.4", + "pyviz_comms >=2.1", "panel >=1.0", "colorcet", "packaging", @@ -57,6 +57,7 @@ 'spatialpandas', 'datashader >=0.11.1', 'dash >=1.16', + 'xyzservices >=2022.9.0', ] if os.name != "nt":