From e8881bef17a4666ac5fee65a73bf337cdc8ca547 Mon Sep 17 00:00:00 2001 From: pnunna93 <104791500+pnunna93@users.noreply.github.com> Date: Fri, 20 Sep 2024 15:54:58 -0500 Subject: [PATCH] Enable packaging for ROCm 6.2 (#1367) * Enable 6.2 build * Update documentation for 6.2.0 pip install --- .github/workflows/python-package.yml | 2 +- docs/source/installation.mdx | 2 +- tests/test_functional.py | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index d2da82501..21c4c1895 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -107,7 +107,7 @@ jobs: os: [ubuntu-latest] arch: [x86_64] rocm_version: - ["6.1.2"] + ["6.1.2", "6.2"] runs-on: ${{ matrix.os }} # One day, we could run them on native agents. Azure supports this now but it's planned only for Q3 2023 for hosted agents steps: - uses: actions/checkout@v4 diff --git a/docs/source/installation.mdx b/docs/source/installation.mdx index 60419b38a..146fb0ddd 100644 --- a/docs/source/installation.mdx +++ b/docs/source/installation.mdx @@ -167,7 +167,7 @@ apt-get update && apt-get install -y git && cd home pip install torch --index-url https://download.pytorch.org/whl/rocm6.1/ # Install bitsandbytes from PyPI -# (This is supported on Ubuntu 22.04, Python 3.10, ROCm 6.1.0/6.1.1/6.1.2 and gpu arch - gfx90a, gfx942, gfx1100 +# (This is supported on Ubuntu 22.04, Python 3.10, ROCm 6.1.0/6.1.1/6.1.2/6.2.0 and gpu arch - gfx90a, gfx942, gfx1100 # Please install from source if your configuration doesn't match with these) pip install bitsandbytes diff --git a/tests/test_functional.py b/tests/test_functional.py index a9d926b89..35187db78 100644 --- a/tests/test_functional.py +++ b/tests/test_functional.py @@ -2303,6 +2303,7 @@ def test_gemv_4bit(dtype, storage_type, quant_storage, double_quant, kind): assert maxratio < 1.02 and maxratio > 0.98 +@pytest.mark.skipif(HIP_ENVIRONMENT, reason="this test is not supported on ROCm yet") @pytest.mark.parametrize("kind", ["fc1", "fc2", "attn", "attn_packed"]) @pytest.mark.parametrize("quant_type", ["nf4", "fp4"]) @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32], ids=describe_dtype)