From d13006cca857323814c09f5123b87c1a005bba74 Mon Sep 17 00:00:00 2001 From: V3n3RiX Date: Tue, 4 Mar 2025 01:46:23 +0000 Subject: gentoo auto-resync : 04:03:2025 - 01:46:22 --- sci-libs/caffe2/Manifest | 2 +- sci-libs/caffe2/caffe2-2.5.1-r7.ebuild | 326 --------------------------------- sci-libs/caffe2/caffe2-2.5.1-r8.ebuild | 326 +++++++++++++++++++++++++++++++++ 3 files changed, 327 insertions(+), 327 deletions(-) delete mode 100644 sci-libs/caffe2/caffe2-2.5.1-r7.ebuild create mode 100644 sci-libs/caffe2/caffe2-2.5.1-r8.ebuild (limited to 'sci-libs/caffe2') diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest index 29dd81aeb415..573a9f8b161c 100644 --- a/sci-libs/caffe2/Manifest +++ b/sci-libs/caffe2/Manifest @@ -13,6 +13,6 @@ DIST pytorch-2.4.1.tar.gz 115029469 BLAKE2B c2909ff27d527bc57cba56b780d3b8cd07a0 DIST pytorch-2.5.1.tar.gz 116091366 BLAKE2B 7838b17562b94ffc7d798031348689db607dd5eae2a3c35be365972e2b52a2c1b12067068d5aca5ab00cf0977d9c2c3c9ae5337d69534c864c732e6256cbeef6 SHA512 a913a466324a65fa3d79c5e9ad4d605fc7976f0134fda2f81aaa3cea29d56926604999b8a238759646d211e63b47bbb446cdffa86ca8defd8159f11e30301289 DIST pytorch-2.6.0.tar.gz 119594438 BLAKE2B 3152eb341cf42295e147e59625beb9c06608aa4b78f9618c1c0024b10c1c767715d07fe8c4be52d029ac47f808cd0d5e65c9530ec90d951a64b993083b4067ad SHA512 a70da80ff09d226085e18228132cf6bb236ad8cc47eed52375d0d2a615f09dd33849da947270b5670c184eab60cb8e2adf11d801babfbda7aa621400501d07b0 EBUILD caffe2-2.4.1-r4.ebuild 8557 BLAKE2B 2cefd7d0c818fb431b3eb6bc2be0a1b225790271b1108169256f8dccac99707fac3d36dc98b7e19a688c66053627d0a222d67e7f65eb6a600ed251677df5490a SHA512 ded17bc1e912f4691bfeee75c66676d331d03c21fe9c9c7416c874ef435e624bdc6109c128f0b5dbe9a5a5be81e648767c6020cd570b94e74f47061e5839f9b3 -EBUILD caffe2-2.5.1-r7.ebuild 8710 BLAKE2B 425e59df62e884643f9ea7024f942b6fd42948d4fb9d8e446dbba3c920d6f8efad2e78bd7a7ff54c04b08483dcb00572596a848f36d22dbdb0e1c6723e05cfec SHA512 3f056ebc8db1501e210f96a075286b788916de89099b3f2ded0e124220848dc6268ba5f1702e81b0c373f944ec53e0494ce862940b0bfa76925ef7ef55d8b678 +EBUILD caffe2-2.5.1-r8.ebuild 8709 BLAKE2B bc51fd52e4cc86d50c1793a592bb1b5a38466f0332c7c9c0010eb051e42db694049237fe9b5dabdfa6f179437708a3cca7cc45fe79991828044698484cdfc237 SHA512 9576827a35e5b380b7142dd8cb7ffe4a85aa30693011425c999790919865e541eac41fd481337019af584b07e21397e0095f576197185940c59a054cfa5ef216 EBUILD caffe2-2.6.0.ebuild 8745 BLAKE2B 0880979760a4c2ea297bdad4a130bbe706dc74780ddada5d93a78f9fd51634e2fe745caf8ef395e2fb223b1c0de4c647669bab727de7482ad1f80817f7f0c771 SHA512 e93d9a21d7ac83e97e934d3b91c9f62fffbea3eb19ae26a154d9375853a567d0cf7f98f5586b01784bef9f9356c6ec00f4904c14e42d6b7e60b361ae5d3c68eb MISC metadata.xml 1081 BLAKE2B 57800165612fd555263cab4cf0bb0521acd7d41f0023da501a50ccc5a479d25ad7087661a6eec500333c3cf9cc3a85d7be8e9385869c14d92f351d8d9df68556 SHA512 71cc785a5b1688b6a0ac971d31d5fa85bef86966aa80009c34e97caa385c81a7c2fb64ae34d0a39c21548d8238efd008da24bd838a47886bfa3f0d952246b76e diff --git a/sci-libs/caffe2/caffe2-2.5.1-r7.ebuild b/sci-libs/caffe2/caffe2-2.5.1-r7.ebuild deleted file mode 100644 index 85a3c940b097..000000000000 --- a/sci-libs/caffe2/caffe2-2.5.1-r7.ebuild +++ /dev/null @@ -1,326 +0,0 @@ -# Copyright 2022-2025 Gentoo Authors -# Distributed under the terms of the GNU General Public License v2 - -EAPI=8 - -PYTHON_COMPAT=( python3_{10..12} ) -ROCM_VERSION=6.1 -inherit python-single-r1 cmake cuda flag-o-matic prefix rocm toolchain-funcs - -MYPN=pytorch -MYP=${MYPN}-${PV} - -DESCRIPTION="A deep learning framework" -HOMEPAGE="https://pytorch.org/" -SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz - -> ${MYP}.tar.gz" - -S="${WORKDIR}"/${MYP} - -LICENSE="BSD" -SLOT="0" -KEYWORDS="~amd64" -IUSE="cuda distributed fbgemm flash gloo mkl mpi nnpack +numpy onednn openblas opencl openmp qnnpack rocm xnnpack" -RESTRICT="test" -REQUIRED_USE=" - ${PYTHON_REQUIRED_USE} - mpi? ( distributed ) - gloo? ( distributed ) - ?? ( cuda rocm ) - rocm? ( - || ( ${ROCM_REQUIRED_USE} ) - !flash - ) -" - -RDEPEND=" - ${PYTHON_DEPS} - dev-cpp/abseil-cpp:= - dev-cpp/gflags:= - >=dev-cpp/glog-0.5.0 - dev-cpp/nlohmann_json - dev-cpp/opentelemetry-cpp - dev-libs/cpuinfo - dev-libs/libfmt - dev-libs/protobuf:= - dev-libs/pthreadpool - dev-libs/sleef - virtual/lapack - sci-libs/onnx - sci-libs/foxi - cuda? ( - dev-libs/cudnn - >=dev-libs/cudnn-frontend-1.0.3:0/8 - dev-util/nvidia-cuda-toolkit:=[profiler] - ) - fbgemm? ( >=dev-libs/FBGEMM-2023.12.01 ) - gloo? ( sci-libs/gloo[cuda?] ) - mpi? ( virtual/mpi ) - nnpack? ( sci-libs/NNPACK ) - numpy? ( $(python_gen_cond_dep ' - dev-python/numpy[${PYTHON_USEDEP}] - ') ) - onednn? ( =dev-libs/oneDNN-3.5* ) - opencl? ( virtual/opencl ) - qnnpack? ( - !sci-libs/QNNPACK - dev-cpp/gemmlowp - ) - rocm? ( - =dev-util/hip-6.1* - =dev-libs/rccl-6.1*[${ROCM_USEDEP}] - =sci-libs/rocThrust-6.1*[${ROCM_USEDEP}] - =sci-libs/rocPRIM-6.1*[${ROCM_USEDEP}] - =sci-libs/hipBLAS-6.1*[${ROCM_USEDEP}] - =sci-libs/hipFFT-6.1*[${ROCM_USEDEP}] - =sci-libs/hipSPARSE-6.1*[${ROCM_USEDEP}] - =sci-libs/hipRAND-6.1*[${ROCM_USEDEP}] - =sci-libs/hipCUB-6.1*[${ROCM_USEDEP}] - =sci-libs/hipSOLVER-6.1*[${ROCM_USEDEP}] - =sci-libs/miopen-6.1*[${ROCM_USEDEP}] - =dev-util/roctracer-6.1*[${ROCM_USEDEP}] - - =sci-libs/hipBLASLt-6.1* - amdgpu_targets_gfx90a? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx90a] ) - amdgpu_targets_gfx940? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx940] ) - amdgpu_targets_gfx941? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx941] ) - amdgpu_targets_gfx942? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx942] ) - ) - distributed? ( - sci-libs/tensorpipe[cuda?] - dev-cpp/cpp-httplib - ) - xnnpack? ( >=sci-libs/XNNPACK-2024.02.29 ) - mkl? ( sci-libs/mkl ) - openblas? ( sci-libs/openblas ) -" - -DEPEND=" - ${RDEPEND} - qnnpack? ( dev-libs/clog ) - cuda? ( <=dev-libs/cutlass-3.4.1 ) - onednn? ( sci-libs/ideep ) - dev-libs/psimd - dev-libs/FP16 - dev-libs/FXdiv - dev-libs/pocketfft - dev-libs/flatbuffers - >=sci-libs/kineto-0.4.0_p20240525 - $(python_gen_cond_dep ' - dev-python/pyyaml[${PYTHON_USEDEP}] - dev-python/pybind11[${PYTHON_USEDEP}] - dev-python/typing-extensions[${PYTHON_USEDEP}] - ') -" - -PATCHES=( - "${FILESDIR}"/${P}-unbundle_fmt.patch - "${FILESDIR}"/${P}-unbundle_kineto.patch - "${FILESDIR}"/${P}-cudnn_include_fix.patch - "${FILESDIR}"/${P}-gentoo.patch - "${FILESDIR}"/${PN}-2.4.0-cpp-httplib.patch - "${FILESDIR}"/${P}-glog-0.6.0.patch - "${FILESDIR}"/${P}-newfix-functorch-install.patch -) - -src_prepare() { - filter-lto #bug 862672 - - # Unbundle fmt - sed -i \ - -e 's|::fmt-header-only||' \ - c10/CMakeLists.txt \ - cmake/Dependencies.cmake \ - torch/CMakeLists.txt \ - || die - - # Drop third_party from CMake tree - sed -i \ - -e '/add_subdirectory.*third_party/d' \ - CMakeLists.txt \ - cmake/Dependencies.cmake \ - cmake/ProtoBuf.cmake \ - aten/src/ATen/CMakeLists.txt \ - || die - # Change libc10* path - sed -i \ - -e "/EXPORT/s|DESTINATION lib)|DESTINATION $(get_libdir))|" \ - c10/cuda/CMakeLists.txt \ - c10/CMakeLists.txt \ - c10/hip/CMakeLists.txt \ - || die - - cmake_src_prepare - pushd torch/csrc/jit/serialization || die - flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die - popd - - # prefixify the hardcoded paths, after all patches are applied - hprefixify \ - aten/CMakeLists.txt \ - caffe2/CMakeLists.txt \ - cmake/Metal.cmake \ - cmake/Modules/*.cmake \ - cmake/Modules_CUDA_fix/FindCUDNN.cmake \ - cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \ - cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \ - cmake/public/LoadHIP.cmake \ - cmake/public/cuda.cmake \ - cmake/Dependencies.cmake \ - torch/CMakeLists.txt \ - CMakeLists.txt - - if use rocm; then - sed -e "s:/opt/rocm:/usr:" \ - -e "s:lib/cmake:$(get_libdir)/cmake:g" \ - -e "s/HIP 1.0/HIP 1.0 REQUIRED/" \ - -i cmake/public/LoadHIP.cmake || die - - ebegin "HIPifying cuda sources" - ${EPYTHON} tools/amd_build/build_amd.py || die - eend $? - fi -} - -src_configure() { - if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then - ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0." - ewarn "These may not be optimal for your GPU." - ewarn "" - ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU," - ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2." - ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5 3.5" - ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell" - ewarn "" - ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus" - ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'" - fi - - local mycmakeargs=( - -DBUILD_CUSTOM_PROTOBUF=OFF - -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir) - -DPython_EXECUTABLE="${PYTHON}" - -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir) - -DUSE_CCACHE=OFF - -DUSE_CUDA=$(usex cuda) - -DUSE_DISTRIBUTED=$(usex distributed) - -DUSE_FAKELOWP=OFF - -DUSE_FBGEMM=$(usex fbgemm) - -DUSE_FLASH_ATTENTION=$(usex flash) - -DUSE_GFLAGS=ON - -DUSE_GLOG=ON - -DUSE_GLOO=$(usex gloo) - -DUSE_ITT=OFF - -DUSE_KINETO=OFF # TODO - -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma - -DUSE_MEM_EFF_ATTENTION=OFF - -DUSE_MKLDNN=$(usex onednn) - -DUSE_MPI=$(usex mpi) - -DUSE_NCCL=OFF - -DUSE_NNPACK=$(usex nnpack) - -DUSE_NUMA=OFF - -DUSE_NUMPY=$(usex numpy) - -DUSE_OPENCL=$(usex opencl) - -DUSE_OPENMP=$(usex openmp) - -DUSE_PYTORCH_QNNPACK=$(usex qnnpack) - -DUSE_PYTORCH_METAL=OFF - -DUSE_ROCM=$(usex rocm) - -DUSE_SYSTEM_CPUINFO=ON - -DUSE_SYSTEM_EIGEN_INSTALL=ON - -DUSE_SYSTEM_FP16=ON - -DUSE_SYSTEM_FXDIV=ON - -DUSE_SYSTEM_GLOO=ON - -DUSE_SYSTEM_ONNX=ON - -DUSE_SYSTEM_PSIMD=ON - -DUSE_SYSTEM_PSIMD=ON - -DUSE_SYSTEM_PTHREADPOOL=ON - -DUSE_SYSTEM_PYBIND11=ON - -DUSE_SYSTEM_SLEEF=ON - -DUSE_SYSTEM_XNNPACK=$(usex xnnpack) - -DUSE_TENSORPIPE=$(usex distributed) - -DUSE_UCC=OFF - -DUSE_VALGRIND=OFF - -DUSE_XNNPACK=$(usex xnnpack) - -DUSE_XPU=OFF - -Wno-dev - ) - - if use mkl; then - mycmakeargs+=(-DBLAS=MKL) - elif use openblas; then - mycmakeargs+=(-DBLAS=OpenBLAS) - else - mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=) - fi - - if use cuda; then - addpredict "/dev/nvidiactl" # bug 867706 - addpredict "/dev/char" - addpredict "/proc/self/task" # bug 926116 - - mycmakeargs+=( - -DUSE_CUDNN=ON - -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}" - -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library - -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")" - ) - elif use rocm; then - export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)" - - mycmakeargs+=( - -DUSE_NCCL=ON - -DUSE_SYSTEM_NCCL=ON - ) - - # ROCm libraries produce too much warnings - append-cxxflags -Wno-deprecated-declarations -Wno-unused-result - - if tc-is-clang; then - # fix mangling in LLVM: https://github.com/llvm/llvm-project/issues/85656 - append-cxxflags -fclang-abi-compat=17 - fi - fi - - if use onednn; then - mycmakeargs+=( - -DMKLDNN_FOUND=ON - -DMKLDNN_LIBRARIES=dnnl - -DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl" - ) - fi - - cmake_src_configure -} - -src_compile() { - PYTORCH_BUILD_VERSION=${PV} \ - PYTORCH_BUILD_NUMBER=0 \ - cmake_src_compile -} - -src_install() { - cmake_src_install - - # Used by pytorch ebuild - insinto "/var/lib/${PN}" - doins "${BUILD_DIR}"/CMakeCache.txt - dostrip -x /var/lib/${PN}/functorch.so - - rm -rf python - mkdir -p python/torch || die - cp torch/version.py python/torch/ || die - python_domodule python/torch - - mkdir "${D}"$(python_get_sitedir)/torch/bin || die - mkdir "${D}"$(python_get_sitedir)/torch/lib || die - mkdir "${D}"$(python_get_sitedir)/torch/include || die - - ln -s ../../../../../include/torch \ - "${D}$(python_get_sitedir)"/torch/include/torch || die # bug 923269 - - mv "${D}"/usr/bin/torch_shm_manager \ - "${D}"/$(python_get_sitedir)/torch/bin/ || die - - mv "${D}"/usr/$(get_libdir)/libtorch_global_deps.so \ - "${D}"/$(python_get_sitedir)/torch/lib/ || die -} diff --git a/sci-libs/caffe2/caffe2-2.5.1-r8.ebuild b/sci-libs/caffe2/caffe2-2.5.1-r8.ebuild new file mode 100644 index 000000000000..0e476ce00275 --- /dev/null +++ b/sci-libs/caffe2/caffe2-2.5.1-r8.ebuild @@ -0,0 +1,326 @@ +# Copyright 2022-2025 Gentoo Authors +# Distributed under the terms of the GNU General Public License v2 + +EAPI=8 + +PYTHON_COMPAT=( python3_{10..12} ) +ROCM_VERSION=6.1 +inherit python-single-r1 cmake cuda flag-o-matic prefix rocm toolchain-funcs + +MYPN=pytorch +MYP=${MYPN}-${PV} + +DESCRIPTION="A deep learning framework" +HOMEPAGE="https://pytorch.org/" +SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz + -> ${MYP}.tar.gz" + +S="${WORKDIR}"/${MYP} + +LICENSE="BSD" +SLOT="0" +KEYWORDS="~amd64" +IUSE="cuda distributed fbgemm flash gloo mkl mpi nnpack +numpy onednn openblas opencl openmp qnnpack rocm xnnpack" +RESTRICT="test" +REQUIRED_USE=" + ${PYTHON_REQUIRED_USE} + mpi? ( distributed ) + gloo? ( distributed ) + ?? ( cuda rocm ) + rocm? ( + || ( ${ROCM_REQUIRED_USE} ) + !flash + ) +" + +RDEPEND=" + ${PYTHON_DEPS} + dev-cpp/abseil-cpp:= + dev-cpp/gflags:= + >=dev-cpp/glog-0.5.0 + dev-cpp/nlohmann_json + dev-cpp/opentelemetry-cpp + dev-libs/cpuinfo + dev-libs/libfmt + dev-libs/protobuf:= + dev-libs/pthreadpool + dev-libs/sleef + virtual/lapack + sci-libs/onnx + sci-libs/foxi + cuda? ( + dev-libs/cudnn + >=dev-libs/cudnn-frontend-1.0.3:0/8 + dev-util/nvidia-cuda-toolkit:=[profiler] + ) + fbgemm? ( >=dev-libs/FBGEMM-2023.12.01 ) + gloo? ( sci-libs/gloo[cuda?] ) + mpi? ( virtual/mpi ) + nnpack? ( sci-libs/NNPACK ) + numpy? ( $(python_gen_cond_dep ' + dev-python/numpy[${PYTHON_USEDEP}] + ') ) + onednn? ( =dev-libs/oneDNN-3.5* ) + opencl? ( virtual/opencl ) + qnnpack? ( + !sci-libs/QNNPACK + dev-cpp/gemmlowp + ) + rocm? ( + =dev-util/hip-6.1* + =dev-libs/rccl-6.1*[${ROCM_USEDEP}] + =sci-libs/rocThrust-6.1*[${ROCM_USEDEP}] + =sci-libs/rocPRIM-6.1*[${ROCM_USEDEP}] + =sci-libs/hipBLAS-6.1*[${ROCM_USEDEP}] + =sci-libs/hipFFT-6.1*[${ROCM_USEDEP}] + =sci-libs/hipSPARSE-6.1*[${ROCM_USEDEP}] + =sci-libs/hipRAND-6.1*[${ROCM_USEDEP}] + =sci-libs/hipCUB-6.1*[${ROCM_USEDEP}] + =sci-libs/hipSOLVER-6.1*[${ROCM_USEDEP}] + =sci-libs/miopen-6.1*[${ROCM_USEDEP}] + =dev-util/roctracer-6.1*[${ROCM_USEDEP}] + + =sci-libs/hipBLASLt-6.1* + amdgpu_targets_gfx90a? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx90a] ) + amdgpu_targets_gfx940? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx940] ) + amdgpu_targets_gfx941? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx941] ) + amdgpu_targets_gfx942? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx942] ) + ) + distributed? ( + sci-libs/tensorpipe[cuda?] + dev-cpp/cpp-httplib + ) + xnnpack? ( ~sci-libs/XNNPACK-2024.02.29 ) + mkl? ( sci-libs/mkl ) + openblas? ( sci-libs/openblas ) +" + +DEPEND=" + ${RDEPEND} + qnnpack? ( dev-libs/clog ) + cuda? ( <=dev-libs/cutlass-3.4.1 ) + onednn? ( sci-libs/ideep ) + dev-libs/psimd + dev-libs/FP16 + dev-libs/FXdiv + dev-libs/pocketfft + dev-libs/flatbuffers + >=sci-libs/kineto-0.4.0_p20240525 + $(python_gen_cond_dep ' + dev-python/pyyaml[${PYTHON_USEDEP}] + dev-python/pybind11[${PYTHON_USEDEP}] + dev-python/typing-extensions[${PYTHON_USEDEP}] + ') +" + +PATCHES=( + "${FILESDIR}"/${P}-unbundle_fmt.patch + "${FILESDIR}"/${P}-unbundle_kineto.patch + "${FILESDIR}"/${P}-cudnn_include_fix.patch + "${FILESDIR}"/${P}-gentoo.patch + "${FILESDIR}"/${PN}-2.4.0-cpp-httplib.patch + "${FILESDIR}"/${P}-glog-0.6.0.patch + "${FILESDIR}"/${P}-newfix-functorch-install.patch +) + +src_prepare() { + filter-lto #bug 862672 + + # Unbundle fmt + sed -i \ + -e 's|::fmt-header-only||' \ + c10/CMakeLists.txt \ + cmake/Dependencies.cmake \ + torch/CMakeLists.txt \ + || die + + # Drop third_party from CMake tree + sed -i \ + -e '/add_subdirectory.*third_party/d' \ + CMakeLists.txt \ + cmake/Dependencies.cmake \ + cmake/ProtoBuf.cmake \ + aten/src/ATen/CMakeLists.txt \ + || die + # Change libc10* path + sed -i \ + -e "/EXPORT/s|DESTINATION lib)|DESTINATION $(get_libdir))|" \ + c10/cuda/CMakeLists.txt \ + c10/CMakeLists.txt \ + c10/hip/CMakeLists.txt \ + || die + + cmake_src_prepare + pushd torch/csrc/jit/serialization || die + flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die + popd + + # prefixify the hardcoded paths, after all patches are applied + hprefixify \ + aten/CMakeLists.txt \ + caffe2/CMakeLists.txt \ + cmake/Metal.cmake \ + cmake/Modules/*.cmake \ + cmake/Modules_CUDA_fix/FindCUDNN.cmake \ + cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \ + cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \ + cmake/public/LoadHIP.cmake \ + cmake/public/cuda.cmake \ + cmake/Dependencies.cmake \ + torch/CMakeLists.txt \ + CMakeLists.txt + + if use rocm; then + sed -e "s:/opt/rocm:/usr:" \ + -e "s:lib/cmake:$(get_libdir)/cmake:g" \ + -e "s/HIP 1.0/HIP 1.0 REQUIRED/" \ + -i cmake/public/LoadHIP.cmake || die + + ebegin "HIPifying cuda sources" + ${EPYTHON} tools/amd_build/build_amd.py || die + eend $? + fi +} + +src_configure() { + if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then + ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0." + ewarn "These may not be optimal for your GPU." + ewarn "" + ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU," + ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2." + ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5 3.5" + ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell" + ewarn "" + ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus" + ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'" + fi + + local mycmakeargs=( + -DBUILD_CUSTOM_PROTOBUF=OFF + -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir) + -DPython_EXECUTABLE="${PYTHON}" + -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir) + -DUSE_CCACHE=OFF + -DUSE_CUDA=$(usex cuda) + -DUSE_DISTRIBUTED=$(usex distributed) + -DUSE_FAKELOWP=OFF + -DUSE_FBGEMM=$(usex fbgemm) + -DUSE_FLASH_ATTENTION=$(usex flash) + -DUSE_GFLAGS=ON + -DUSE_GLOG=ON + -DUSE_GLOO=$(usex gloo) + -DUSE_ITT=OFF + -DUSE_KINETO=OFF # TODO + -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma + -DUSE_MEM_EFF_ATTENTION=OFF + -DUSE_MKLDNN=$(usex onednn) + -DUSE_MPI=$(usex mpi) + -DUSE_NCCL=OFF + -DUSE_NNPACK=$(usex nnpack) + -DUSE_NUMA=OFF + -DUSE_NUMPY=$(usex numpy) + -DUSE_OPENCL=$(usex opencl) + -DUSE_OPENMP=$(usex openmp) + -DUSE_PYTORCH_QNNPACK=$(usex qnnpack) + -DUSE_PYTORCH_METAL=OFF + -DUSE_ROCM=$(usex rocm) + -DUSE_SYSTEM_CPUINFO=ON + -DUSE_SYSTEM_EIGEN_INSTALL=ON + -DUSE_SYSTEM_FP16=ON + -DUSE_SYSTEM_FXDIV=ON + -DUSE_SYSTEM_GLOO=ON + -DUSE_SYSTEM_ONNX=ON + -DUSE_SYSTEM_PSIMD=ON + -DUSE_SYSTEM_PSIMD=ON + -DUSE_SYSTEM_PTHREADPOOL=ON + -DUSE_SYSTEM_PYBIND11=ON + -DUSE_SYSTEM_SLEEF=ON + -DUSE_SYSTEM_XNNPACK=$(usex xnnpack) + -DUSE_TENSORPIPE=$(usex distributed) + -DUSE_UCC=OFF + -DUSE_VALGRIND=OFF + -DUSE_XNNPACK=$(usex xnnpack) + -DUSE_XPU=OFF + -Wno-dev + ) + + if use mkl; then + mycmakeargs+=(-DBLAS=MKL) + elif use openblas; then + mycmakeargs+=(-DBLAS=OpenBLAS) + else + mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=) + fi + + if use cuda; then + addpredict "/dev/nvidiactl" # bug 867706 + addpredict "/dev/char" + addpredict "/proc/self/task" # bug 926116 + + mycmakeargs+=( + -DUSE_CUDNN=ON + -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}" + -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library + -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")" + ) + elif use rocm; then + export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)" + + mycmakeargs+=( + -DUSE_NCCL=ON + -DUSE_SYSTEM_NCCL=ON + ) + + # ROCm libraries produce too much warnings + append-cxxflags -Wno-deprecated-declarations -Wno-unused-result + + if tc-is-clang; then + # fix mangling in LLVM: https://github.com/llvm/llvm-project/issues/85656 + append-cxxflags -fclang-abi-compat=17 + fi + fi + + if use onednn; then + mycmakeargs+=( + -DMKLDNN_FOUND=ON + -DMKLDNN_LIBRARIES=dnnl + -DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl" + ) + fi + + cmake_src_configure +} + +src_compile() { + PYTORCH_BUILD_VERSION=${PV} \ + PYTORCH_BUILD_NUMBER=0 \ + cmake_src_compile +} + +src_install() { + cmake_src_install + + # Used by pytorch ebuild + insinto "/var/lib/${PN}" + doins "${BUILD_DIR}"/CMakeCache.txt + dostrip -x /var/lib/${PN}/functorch.so + + rm -rf python + mkdir -p python/torch || die + cp torch/version.py python/torch/ || die + python_domodule python/torch + + mkdir "${D}"$(python_get_sitedir)/torch/bin || die + mkdir "${D}"$(python_get_sitedir)/torch/lib || die + mkdir "${D}"$(python_get_sitedir)/torch/include || die + + ln -s ../../../../../include/torch \ + "${D}$(python_get_sitedir)"/torch/include/torch || die # bug 923269 + + mv "${D}"/usr/bin/torch_shm_manager \ + "${D}"/$(python_get_sitedir)/torch/bin/ || die + + mv "${D}"/usr/$(get_libdir)/libtorch_global_deps.so \ + "${D}"/$(python_get_sitedir)/torch/lib/ || die +} -- cgit v1.2.3