From 71612a6b07d1a7224ffb65e5768a083163725944 Mon Sep 17 00:00:00 2001 From: lucylq Date: Thu, 14 Nov 2024 10:50:12 -0800 Subject: [PATCH] Install requirements for llama vision (#6854) install requirements for llama vision --- .ci/scripts/test_model.sh | 4 ++++ .github/workflows/pull.yml | 4 ---- .github/workflows/trunk.yml | 4 ---- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/.ci/scripts/test_model.sh b/.ci/scripts/test_model.sh index cd9583ff97..1f42880b8f 100755 --- a/.ci/scripts/test_model.sh +++ b/.ci/scripts/test_model.sh @@ -87,6 +87,10 @@ test_model() { bash examples/models/llava/install_requirements.sh STRICT="--no-strict" fi + if [[ "$MODEL_NAME" == "llama3_2_vision_encoder" ]]; then + # Install requirements for llama vision + bash examples/models/llama3_2_vision/install_requirements.sh + fi # python3 -m examples.portable.scripts.export --model_name="llama2" should works too "${PYTHON_EXECUTABLE}" -m examples.portable.scripts.export --model_name="${MODEL_NAME}" "${STRICT}" run_portable_executor_runner diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index d056ab866a..1f5da06a92 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -72,10 +72,6 @@ jobs: conda activate "${CONDA_ENV}" MODEL_NAME=${{ matrix.model }} - # Install requirements for llama vision - if [[ "$MODEL_NAME" == "llama3_2_vision_encoder" ]]; then - bash examples/models/llama3_2_vision/install_requirements.sh - fi BUILD_TOOL=${{ matrix.build-tool }} BACKEND=${{ matrix.backend }} DEMO_BACKEND_DELEGATION=${{ matrix.demo_backend_delegation }} diff --git a/.github/workflows/trunk.yml b/.github/workflows/trunk.yml index fd78b20537..7e8769aa77 100644 --- a/.github/workflows/trunk.yml +++ b/.github/workflows/trunk.yml @@ -58,10 +58,6 @@ jobs: bash .ci/scripts/setup-conda.sh # Setup MacOS dependencies as there is no Docker support on MacOS atm PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}" - # Install requirements for llama vision - if [[ "$MODEL_NAME" == "llama3_2_vision_encoder" ]]; then - ${CONDA_RUN} bash examples/models/llama3_2_vision/install_requirements.sh - fi # Build and test executorch PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}" "${DEMO_BACKEND_DELEGATION}"