From 518b45bffd19d0b75715f338985f96c459f9d129 Mon Sep 17 00:00:00 2001 From: V3n3RiX Date: Wed, 4 Sep 2024 12:28:34 +0100 Subject: gentoo auto-resync : 04:09:2024 - 12:28:34 --- sci-libs/caffe2/Manifest | 3 +- sci-libs/caffe2/caffe2-2.4.0-r1.ebuild | 295 +++++++++++++++++++++ sci-libs/caffe2/caffe2-2.4.0.ebuild | 291 -------------------- .../caffe2/files/caffe2-2.4.0-cpp-httplib.patch | 13 + 4 files changed, 310 insertions(+), 292 deletions(-) create mode 100644 sci-libs/caffe2/caffe2-2.4.0-r1.ebuild delete mode 100644 sci-libs/caffe2/caffe2-2.4.0.ebuild create mode 100644 sci-libs/caffe2/files/caffe2-2.4.0-cpp-httplib.patch (limited to 'sci-libs/caffe2') diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest index 8f6ab50abdad..e3d93a4c00cf 100644 --- a/sci-libs/caffe2/Manifest +++ b/sci-libs/caffe2/Manifest @@ -1,3 +1,4 @@ +AUX caffe2-2.4.0-cpp-httplib.patch 414 BLAKE2B fabe646c86f4c6736600fa44733ab6ea246a7a007a01b1a558a42b32f42ccd9ce63a14ac5a949a07258c8ba06d464ea5bd1de3ab1fe265792e527365f9dbb546 SHA512 c206df54f285bbd7fe5a216f906e6c545e145a21322bd817bf5cee1190d7c3381f626991889cac8b8f586f277e80bf26f393abd01a0e3b5ded97f8c355c46bac AUX caffe2-2.4.0-libfmt-11.patch 1560 BLAKE2B 816201b6abc3ad4c4924c649905068306f3a0c674eab36fb6a6772b84d2bf0806dd55d586d6873952b0eac56afae00513966071a380b9e60eff3c668e6aec737 SHA512 acdc879f2ed8ae546a6ce4db1b63faba3b6672374c61940d58e0c2b747c4b0d8d38fb34a2e66f3805f9f328fd4e0115dd705b541871b2fd99a2782756ca583e5 DIST caffe2-patches-20240809.tar.gz 15242 BLAKE2B 77503c61487e7d85cca5afcab9a6e638f9833a70861845638cf1b62bc492d7b6650e6db81d53ebb2f39c6313509250d339f725f04d03ec6dd23dd0cf70843d8c SHA512 74b3b0b6671b655ecac93f7436c4ed7cb0157a83aafbf6afcc0811e11cef341cd8f638db1a111bcbb01e1a6dd4daf3a36b96d7a8ce90f04c2fa091bd6e3a142b DIST pytorch-2.2.2.tar.gz 116367503 BLAKE2B 0be22f2ec4b9aac6f5e976664cae01facf07929a32565cd57d7cc5b2d9888e9ae71ca301853752fe8f31d174d04c9974eb9ed2f3d452360a50ccf024f200726a SHA512 7990e0f9484038c3458c0bda2c863bf2b19e56edab81fc5938c6e0f08b17558287f853bb67350e8cca8f42bec0f1d4ba0e94e50a145db8da44bdd4bd703d91d0 @@ -7,5 +8,5 @@ DIST pytorch-2.4.0.tar.gz 115031093 BLAKE2B d206477963977011627df284efa01482fbf5 EBUILD caffe2-2.2.2-r1.ebuild 7494 BLAKE2B f09d96b6a6ab71366a3b30ac7f8e6d89a20bf6034fb0d055a59f7f1d31f2dd08742307790982ed1f2768305ccea094e5d1655a2ac7d6e91e24b5620c1ed3f0cd SHA512 a10afef393275946a0fa51b2303a1ceaf567e7df30ff96bdf3343d929c575038945f404cd75118d7f40f49ed8d03ef66a2d8a3aef10ede1b4b411f0efee2e56e EBUILD caffe2-2.3.0-r3.ebuild 8468 BLAKE2B b3e5d3bdd08fb7c2beabb0de3244b9de125b283ebd2a1d8dbccdc863b912555726eb40649d67af8593eb64dc9e23f9d021174a0868751fb3a5daf26d98884740 SHA512 6123ad17b0b1de6bd8d8ae3a52a7f7205385e3d54faa5f2fdf4006a9255ac69413caf08f4ba2c79679edadbe5c94cb7c187d5b5f2dc3ed6682d7ae601881e86b EBUILD caffe2-2.3.1.ebuild 8489 BLAKE2B 64b0ff8a65f46be8d3a80b4b4c47fb513eaaf78208e2d831ced3fcfa27f39cfa89d8648a64dea73a8b516476867f10cd04a4cb1873149659abe6114f88657ef4 SHA512 4bcb6fe1c3ab47879e57a453649866f8e5bf09da7cce6778de909447d6deb0ae0671512a1d182f5e275d246bb05c4553ff95bed490d4fd3f723653feae95b47e -EBUILD caffe2-2.4.0.ebuild 8237 BLAKE2B 5072962bb295d6e0aa63e4e5d2f97c33820c7a70f3c71070df6cdf6a7d1f784f42e87072d6540b5db67ded9d56510731482927ac594246f9808743d7f26c13ce SHA512 d321924fd8794744f173a5571aa4adc59183abc1fb792381e81afc094675a170fe6129378e19da46df14c4638d33b72a18f19e4003918d8d5293188adc802537 +EBUILD caffe2-2.4.0-r1.ebuild 8300 BLAKE2B 5da1e3fdaa97ae769d71202317024469dd7a4bcc338963c342d4a879bb50b0e5666dcbbef9dfa7057c0c679a07ff4778abb0c51bfa4b4ba9090255c3f4c5e3c1 SHA512 ab040a6cabc2464d87bfd4237f43fe0edc172071f658e4e396b35a85b1b0ab3ab45fbc897c7bb9b4ac3f2c60e088ecd321d636bb8ea19f39e64f5538cf76da33 MISC metadata.xml 1225 BLAKE2B ab7fb0bf8b2d37ddaa1a9ecc815eb094e85465d20d3a30af081b42e0b60ade9858d0053b101ba0e7750a90cb48b5b79db9bdc2729bf66d0420732489da62fe54 SHA512 dfb58597fb4bcdd7df0fcc3f2514518e118e8fc9b1cd24868aab60c32a62ff419b8b72a7c294925eff4c8871cc8df606af7fa60bfa99901091d8195101ee1153 diff --git a/sci-libs/caffe2/caffe2-2.4.0-r1.ebuild b/sci-libs/caffe2/caffe2-2.4.0-r1.ebuild new file mode 100644 index 000000000000..d8e42de52185 --- /dev/null +++ b/sci-libs/caffe2/caffe2-2.4.0-r1.ebuild @@ -0,0 +1,295 @@ +# Copyright 2022-2024 Gentoo Authors +# Distributed under the terms of the GNU General Public License v2 + +EAPI=8 + +PYTHON_COMPAT=( python3_{10..12} ) +ROCM_VERSION=6.1 +inherit python-single-r1 cmake cuda flag-o-matic prefix rocm toolchain-funcs + +MYPN=pytorch +MYP=${MYPN}-${PV} + +DESCRIPTION="A deep learning framework" +HOMEPAGE="https://pytorch.org/" +SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz + -> ${MYP}.tar.gz + https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz" + +S="${WORKDIR}"/${MYP} + +LICENSE="BSD" +SLOT="0" +KEYWORDS="~amd64" +IUSE="cuda distributed fbgemm flash gloo mkl mpi nnpack +numpy onednn openblas opencl openmp qnnpack rocm xnnpack" +RESTRICT="test" +REQUIRED_USE=" + ${PYTHON_REQUIRED_USE} + mpi? ( distributed ) + gloo? ( distributed ) + ?? ( cuda rocm ) + rocm? ( + || ( ${ROCM_REQUIRED_USE} ) + !flash + ) +" + +# CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122 +RDEPEND=" + ${PYTHON_DEPS} + dev-cpp/gflags:= + >=dev-cpp/glog-0.5.0 + dev-libs/cpuinfo + dev-libs/libfmt + dev-cpp/opentelemetry-cpp + dev-libs/protobuf:= + dev-libs/pthreadpool + dev-libs/sleef + virtual/lapack + sci-libs/onnx + sci-libs/foxi + cuda? ( + dev-libs/cudnn + >=dev-libs/cudnn-frontend-1.0.3:0/8 + =dev-libs/FBGEMM-2023.12.01 ) + gloo? ( sci-libs/gloo[cuda?] ) + mpi? ( virtual/mpi ) + nnpack? ( sci-libs/NNPACK ) + numpy? ( $(python_gen_cond_dep ' + dev-python/numpy[${PYTHON_USEDEP}] + ') ) + onednn? ( dev-libs/oneDNN ) + opencl? ( virtual/opencl ) + qnnpack? ( + !sci-libs/QNNPACK + dev-cpp/gemmlowp + ) + rocm? ( + =dev-util/hip-6.1* + =dev-libs/rccl-6.1*[${ROCM_USEDEP}] + =sci-libs/rocThrust-6.1*[${ROCM_USEDEP}] + =sci-libs/rocPRIM-6.1*[${ROCM_USEDEP}] + =sci-libs/hipBLAS-6.1*[${ROCM_USEDEP}] + =sci-libs/hipFFT-6.1*[${ROCM_USEDEP}] + =sci-libs/hipSPARSE-6.1*[${ROCM_USEDEP}] + =sci-libs/hipRAND-6.1*[${ROCM_USEDEP}] + =sci-libs/hipCUB-6.1*[${ROCM_USEDEP}] + =sci-libs/hipSOLVER-6.1*[${ROCM_USEDEP}] + =sci-libs/miopen-6.1*[${ROCM_USEDEP}] + =dev-util/roctracer-6.1*[${ROCM_USEDEP}] + + =sci-libs/hipBLASLt-6.1* + amdgpu_targets_gfx90a? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx90a] ) + amdgpu_targets_gfx940? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx940] ) + amdgpu_targets_gfx941? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx941] ) + amdgpu_targets_gfx942? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx942] ) + ) + distributed? ( + sci-libs/tensorpipe[cuda?] + dev-cpp/cpp-httplib + ) + xnnpack? ( >=sci-libs/XNNPACK-2024.02.29 ) + mkl? ( sci-libs/mkl ) + openblas? ( sci-libs/openblas ) +" +DEPEND=" + ${RDEPEND} + cuda? ( >=dev-libs/cutlass-3.4.1 ) + onednn? ( sci-libs/ideep ) + dev-libs/psimd + dev-libs/FP16 + dev-libs/FXdiv + dev-libs/pocketfft + dev-libs/flatbuffers + >=sci-libs/kineto-0.4.0_p20240525 + $(python_gen_cond_dep ' + dev-python/pyyaml[${PYTHON_USEDEP}] + dev-python/pybind11[${PYTHON_USEDEP}] + dev-python/typing-extensions[${PYTHON_USEDEP}] + ') +" + +PATCHES=( + ../patches/${PN}-2.4.0-gentoo.patch + ../patches/${PN}-2.4.0-install-dirs.patch + ../patches/${PN}-1.12.0-glog-0.6.0.patch + ../patches/${PN}-1.13.1-tensorpipe.patch + ../patches/${PN}-2.3.0-cudnn_include_fix.patch + ../patches/${PN}-2.1.2-fix-rpath.patch + ../patches/${PN}-2.4.0-fix-openmp-link.patch + ../patches/${PN}-2.4.0-rocm-fix-std-cpp17.patch + ../patches/${PN}-2.2.2-musl.patch + ../patches/${PN}-2.4.0-exclude-aotriton.patch + ../patches/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch + ../patches/${PN}-2.3.0-fix-libcpp.patch + "${FILESDIR}"/${PN}-2.4.0-libfmt-11.patch + "${FILESDIR}"/${P}-cpp-httplib.patch +) + +src_prepare() { + filter-lto #bug 862672 + sed -i \ + -e "/third_party\/gloo/d" \ + cmake/Dependencies.cmake \ + || die + cmake_src_prepare + pushd torch/csrc/jit/serialization || die + flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die + popd + # prefixify the hardcoded paths, after all patches are applied + hprefixify \ + aten/CMakeLists.txt \ + caffe2/CMakeLists.txt \ + cmake/Metal.cmake \ + cmake/Modules/*.cmake \ + cmake/Modules_CUDA_fix/FindCUDNN.cmake \ + cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \ + cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \ + cmake/public/LoadHIP.cmake \ + cmake/public/cuda.cmake \ + cmake/Dependencies.cmake \ + torch/CMakeLists.txt \ + CMakeLists.txt + + if use rocm; then + sed -e "s:/opt/rocm:/usr:" \ + -e "s:lib/cmake:$(get_libdir)/cmake:g" \ + -e "s/HIP 1.0/HIP 1.0 REQUIRED/" \ + -i cmake/public/LoadHIP.cmake || die + + ebegin "HIPifying cuda sources" + ${EPYTHON} tools/amd_build/build_amd.py || die + eend $? + fi +} + +src_configure() { + if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then + ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0." + ewarn "These may not be optimal for your GPU." + ewarn "" + ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU," + ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2." + ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5 3.5" + ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell" + ewarn "" + ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus" + ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'" + fi + + local mycmakeargs=( + -DBUILD_CUSTOM_PROTOBUF=OFF + -DBUILD_SHARED_LIBS=ON + + -DUSE_CCACHE=OFF + -DUSE_CUDA=$(usex cuda) + -DUSE_DISTRIBUTED=$(usex distributed) + -DUSE_MPI=$(usex mpi) + -DUSE_FAKELOWP=OFF + -DUSE_FBGEMM=$(usex fbgemm) + -DUSE_FLASH_ATTENTION=$(usex flash) + -DUSE_MEM_EFF_ATTENTION=OFF + -DUSE_GFLAGS=ON + -DUSE_GLOG=ON + -DUSE_GLOO=$(usex gloo) + -DUSE_KINETO=OFF # TODO + -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma + -DUSE_MKLDNN=$(usex onednn) + -DUSE_NNPACK=$(usex nnpack) + -DUSE_XNNPACK=$(usex xnnpack) + -DUSE_SYSTEM_XNNPACK=$(usex xnnpack) + -DUSE_TENSORPIPE=$(usex distributed) + -DUSE_PYTORCH_QNNPACK=$(usex qnnpack) + -DUSE_NUMPY=$(usex numpy) + -DUSE_OPENCL=$(usex opencl) + -DUSE_OPENMP=$(usex openmp) + -DUSE_ROCM=$(usex rocm) + -DUSE_SYSTEM_CPUINFO=ON + -DUSE_SYSTEM_PYBIND11=ON + -DUSE_UCC=OFF + -DUSE_VALGRIND=OFF + -DPython_EXECUTABLE="${PYTHON}" + -DUSE_ITT=OFF + -DUSE_SYSTEM_PTHREADPOOL=ON + -DUSE_SYSTEM_PSIMD=ON + -DUSE_SYSTEM_FXDIV=ON + -DUSE_SYSTEM_FP16=ON + -DUSE_SYSTEM_GLOO=ON + -DUSE_SYSTEM_ONNX=ON + -DUSE_SYSTEM_SLEEF=ON + -DUSE_PYTORCH_METAL=OFF + -DUSE_XPU=OFF + + -Wno-dev + -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir) + -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir) + ) + + if use mkl; then + mycmakeargs+=(-DBLAS=MKL) + elif use openblas; then + mycmakeargs+=(-DBLAS=OpenBLAS) + else + mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=) + fi + + if use cuda; then + addpredict "/dev/nvidiactl" # bug 867706 + addpredict "/dev/char" + addpredict "/proc/self/task" # bug 926116 + + mycmakeargs+=( + -DUSE_CUDNN=ON + -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}" + -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library + -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")" + ) + elif use rocm; then + export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)" + + mycmakeargs+=( + -DUSE_NCCL=ON + -DUSE_SYSTEM_NCCL=ON + ) + + # ROCm libraries produce too much warnings + append-cxxflags -Wno-deprecated-declarations -Wno-unused-result + + if tc-is-clang; then + # fix mangling in LLVM: https://github.com/llvm/llvm-project/issues/85656 + append-cxxflags -fclang-abi-compat=17 + fi + fi + + if use onednn; then + mycmakeargs+=( + -DUSE_MKLDNN=ON + -DMKLDNN_FOUND=ON + -DMKLDNN_LIBRARIES=dnnl + -DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl" + ) + fi + + cmake_src_configure + + # do not rerun cmake and the build process in src_install + sed '/RERUN/,+1d' -i "${BUILD_DIR}"/build.ninja || die +} + +src_install() { + cmake_src_install + + insinto "/var/lib/${PN}" + doins "${BUILD_DIR}"/CMakeCache.txt + + rm -rf python + mkdir -p python/torch/include || die + mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die + cp torch/version.py python/torch/ || die + python_domodule python/caffe2 + python_domodule python/torch + ln -s ../../../../../include/torch \ + "${D}$(python_get_sitedir)"/torch/include/torch || die # bug 923269 +} diff --git a/sci-libs/caffe2/caffe2-2.4.0.ebuild b/sci-libs/caffe2/caffe2-2.4.0.ebuild deleted file mode 100644 index 81a8906ea669..000000000000 --- a/sci-libs/caffe2/caffe2-2.4.0.ebuild +++ /dev/null @@ -1,291 +0,0 @@ -# Copyright 2022-2024 Gentoo Authors -# Distributed under the terms of the GNU General Public License v2 - -EAPI=8 - -PYTHON_COMPAT=( python3_{10..12} ) -ROCM_VERSION=6.1 -inherit python-single-r1 cmake cuda flag-o-matic prefix rocm toolchain-funcs - -MYPN=pytorch -MYP=${MYPN}-${PV} - -DESCRIPTION="A deep learning framework" -HOMEPAGE="https://pytorch.org/" -SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz - -> ${MYP}.tar.gz - https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz" - -S="${WORKDIR}"/${MYP} - -LICENSE="BSD" -SLOT="0" -KEYWORDS="~amd64" -IUSE="cuda distributed fbgemm flash gloo mkl mpi nnpack +numpy onednn openblas opencl openmp qnnpack rocm xnnpack" -RESTRICT="test" -REQUIRED_USE=" - ${PYTHON_REQUIRED_USE} - mpi? ( distributed ) - gloo? ( distributed ) - ?? ( cuda rocm ) - rocm? ( - || ( ${ROCM_REQUIRED_USE} ) - !flash - ) -" - -# CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122 -RDEPEND=" - ${PYTHON_DEPS} - dev-cpp/gflags:= - >=dev-cpp/glog-0.5.0 - dev-libs/cpuinfo - dev-libs/libfmt - dev-cpp/opentelemetry-cpp - dev-libs/protobuf:= - dev-libs/pthreadpool - dev-libs/sleef - virtual/lapack - sci-libs/onnx - sci-libs/foxi - cuda? ( - dev-libs/cudnn - >=dev-libs/cudnn-frontend-1.0.3:0/8 - =dev-libs/FBGEMM-2023.12.01 ) - gloo? ( sci-libs/gloo[cuda?] ) - mpi? ( virtual/mpi ) - nnpack? ( sci-libs/NNPACK ) - numpy? ( $(python_gen_cond_dep ' - dev-python/numpy[${PYTHON_USEDEP}] - ') ) - onednn? ( dev-libs/oneDNN ) - opencl? ( virtual/opencl ) - qnnpack? ( - !sci-libs/QNNPACK - dev-cpp/gemmlowp - ) - rocm? ( - =dev-util/hip-6.1* - =dev-libs/rccl-6.1*[${ROCM_USEDEP}] - =sci-libs/rocThrust-6.1*[${ROCM_USEDEP}] - =sci-libs/rocPRIM-6.1*[${ROCM_USEDEP}] - =sci-libs/hipBLAS-6.1*[${ROCM_USEDEP}] - =sci-libs/hipFFT-6.1*[${ROCM_USEDEP}] - =sci-libs/hipSPARSE-6.1*[${ROCM_USEDEP}] - =sci-libs/hipRAND-6.1*[${ROCM_USEDEP}] - =sci-libs/hipCUB-6.1*[${ROCM_USEDEP}] - =sci-libs/hipSOLVER-6.1*[${ROCM_USEDEP}] - =sci-libs/miopen-6.1*[${ROCM_USEDEP}] - =dev-util/roctracer-6.1*[${ROCM_USEDEP}] - - =sci-libs/hipBLASLt-6.1* - amdgpu_targets_gfx90a? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx90a] ) - amdgpu_targets_gfx940? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx940] ) - amdgpu_targets_gfx941? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx941] ) - amdgpu_targets_gfx942? ( =sci-libs/hipBLASLt-6.1*[amdgpu_targets_gfx942] ) - ) - distributed? ( sci-libs/tensorpipe[cuda?] ) - xnnpack? ( >=sci-libs/XNNPACK-2024.02.29 ) - mkl? ( sci-libs/mkl ) - openblas? ( sci-libs/openblas ) -" -DEPEND=" - ${RDEPEND} - cuda? ( >=dev-libs/cutlass-3.4.1 ) - onednn? ( sci-libs/ideep ) - dev-libs/psimd - dev-libs/FP16 - dev-libs/FXdiv - dev-libs/pocketfft - dev-libs/flatbuffers - >=sci-libs/kineto-0.4.0_p20240525 - $(python_gen_cond_dep ' - dev-python/pyyaml[${PYTHON_USEDEP}] - dev-python/pybind11[${PYTHON_USEDEP}] - dev-python/typing-extensions[${PYTHON_USEDEP}] - ') -" - -PATCHES=( - ../patches/${PN}-2.4.0-gentoo.patch - ../patches/${PN}-2.4.0-install-dirs.patch - ../patches/${PN}-1.12.0-glog-0.6.0.patch - ../patches/${PN}-1.13.1-tensorpipe.patch - ../patches/${PN}-2.3.0-cudnn_include_fix.patch - ../patches/${PN}-2.1.2-fix-rpath.patch - ../patches/${PN}-2.4.0-fix-openmp-link.patch - ../patches/${PN}-2.4.0-rocm-fix-std-cpp17.patch - ../patches/${PN}-2.2.2-musl.patch - ../patches/${PN}-2.4.0-exclude-aotriton.patch - ../patches/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch - ../patches/${PN}-2.3.0-fix-libcpp.patch - "${FILESDIR}"/${PN}-2.4.0-libfmt-11.patch -) - -src_prepare() { - filter-lto #bug 862672 - sed -i \ - -e "/third_party\/gloo/d" \ - cmake/Dependencies.cmake \ - || die - cmake_src_prepare - pushd torch/csrc/jit/serialization || die - flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die - popd - # prefixify the hardcoded paths, after all patches are applied - hprefixify \ - aten/CMakeLists.txt \ - caffe2/CMakeLists.txt \ - cmake/Metal.cmake \ - cmake/Modules/*.cmake \ - cmake/Modules_CUDA_fix/FindCUDNN.cmake \ - cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \ - cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \ - cmake/public/LoadHIP.cmake \ - cmake/public/cuda.cmake \ - cmake/Dependencies.cmake \ - torch/CMakeLists.txt \ - CMakeLists.txt - - if use rocm; then - sed -e "s:/opt/rocm:/usr:" \ - -e "s:lib/cmake:$(get_libdir)/cmake:g" \ - -e "s/HIP 1.0/HIP 1.0 REQUIRED/" \ - -i cmake/public/LoadHIP.cmake || die - - ebegin "HIPifying cuda sources" - ${EPYTHON} tools/amd_build/build_amd.py || die - eend $? - fi -} - -src_configure() { - if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then - ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0." - ewarn "These may not be optimal for your GPU." - ewarn "" - ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU," - ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2." - ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5 3.5" - ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell" - ewarn "" - ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus" - ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'" - fi - - local mycmakeargs=( - -DBUILD_CUSTOM_PROTOBUF=OFF - -DBUILD_SHARED_LIBS=ON - - -DUSE_CCACHE=OFF - -DUSE_CUDA=$(usex cuda) - -DUSE_DISTRIBUTED=$(usex distributed) - -DUSE_MPI=$(usex mpi) - -DUSE_FAKELOWP=OFF - -DUSE_FBGEMM=$(usex fbgemm) - -DUSE_FLASH_ATTENTION=$(usex flash) - -DUSE_MEM_EFF_ATTENTION=OFF - -DUSE_GFLAGS=ON - -DUSE_GLOG=ON - -DUSE_GLOO=$(usex gloo) - -DUSE_KINETO=OFF # TODO - -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma - -DUSE_MKLDNN=$(usex onednn) - -DUSE_NNPACK=$(usex nnpack) - -DUSE_XNNPACK=$(usex xnnpack) - -DUSE_SYSTEM_XNNPACK=$(usex xnnpack) - -DUSE_TENSORPIPE=$(usex distributed) - -DUSE_PYTORCH_QNNPACK=$(usex qnnpack) - -DUSE_NUMPY=$(usex numpy) - -DUSE_OPENCL=$(usex opencl) - -DUSE_OPENMP=$(usex openmp) - -DUSE_ROCM=$(usex rocm) - -DUSE_SYSTEM_CPUINFO=ON - -DUSE_SYSTEM_PYBIND11=ON - -DUSE_UCC=OFF - -DUSE_VALGRIND=OFF - -DPython_EXECUTABLE="${PYTHON}" - -DUSE_ITT=OFF - -DUSE_SYSTEM_PTHREADPOOL=ON - -DUSE_SYSTEM_PSIMD=ON - -DUSE_SYSTEM_FXDIV=ON - -DUSE_SYSTEM_FP16=ON - -DUSE_SYSTEM_GLOO=ON - -DUSE_SYSTEM_ONNX=ON - -DUSE_SYSTEM_SLEEF=ON - -DUSE_PYTORCH_METAL=OFF - -DUSE_XPU=OFF - - -Wno-dev - -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir) - -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir) - ) - - if use mkl; then - mycmakeargs+=(-DBLAS=MKL) - elif use openblas; then - mycmakeargs+=(-DBLAS=OpenBLAS) - else - mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=) - fi - - if use cuda; then - addpredict "/dev/nvidiactl" # bug 867706 - addpredict "/dev/char" - addpredict "/proc/self/task" # bug 926116 - - mycmakeargs+=( - -DUSE_CUDNN=ON - -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}" - -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library - -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")" - ) - elif use rocm; then - export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)" - - mycmakeargs+=( - -DUSE_NCCL=ON - -DUSE_SYSTEM_NCCL=ON - ) - - # ROCm libraries produce too much warnings - append-cxxflags -Wno-deprecated-declarations -Wno-unused-result - - if tc-is-clang; then - # fix mangling in LLVM: https://github.com/llvm/llvm-project/issues/85656 - append-cxxflags -fclang-abi-compat=17 - fi - fi - - if use onednn; then - mycmakeargs+=( - -DUSE_MKLDNN=ON - -DMKLDNN_FOUND=ON - -DMKLDNN_LIBRARIES=dnnl - -DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl" - ) - fi - - cmake_src_configure - - # do not rerun cmake and the build process in src_install - sed '/RERUN/,+1d' -i "${BUILD_DIR}"/build.ninja || die -} - -src_install() { - cmake_src_install - - insinto "/var/lib/${PN}" - doins "${BUILD_DIR}"/CMakeCache.txt - - rm -rf python - mkdir -p python/torch/include || die - mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die - cp torch/version.py python/torch/ || die - python_domodule python/caffe2 - python_domodule python/torch - ln -s ../../../../../include/torch \ - "${D}$(python_get_sitedir)"/torch/include/torch || die # bug 923269 -} diff --git a/sci-libs/caffe2/files/caffe2-2.4.0-cpp-httplib.patch b/sci-libs/caffe2/files/caffe2-2.4.0-cpp-httplib.patch new file mode 100644 index 000000000000..5d684a4a4738 --- /dev/null +++ b/sci-libs/caffe2/files/caffe2-2.4.0-cpp-httplib.patch @@ -0,0 +1,13 @@ +--- a/torch/lib/libshm/CMakeLists.txt 2024-09-04 06:09:51.943752841 +0200 ++++ b/torch/lib/libshm/CMakeLists.txt 2024-09-04 06:10:52.243821438 +0200 +@@ -24,6 +24,10 @@ + CXX_STANDARD 17) + target_link_libraries(shm PRIVATE ${TORCH_CPU_LIB}) + ++if (USE_DISTRIBUTED) ++ target_link_libraries(shm PRIVATE cpp-httplib) ++endif() ++ + if(UNIX AND NOT APPLE) + include(CheckLibraryExists) + find_package(Threads REQUIRED) -- cgit v1.2.3