4d8cf0b84e
These fail with sandboxing
86 lines
2.4 KiB
Nix
86 lines
2.4 KiB
Nix
{ buildPythonPackage, pythonOlder,
|
||
cudaSupport ? false, cudatoolkit ? null, cudnn ? null,
|
||
fetchFromGitHub, lib, numpy, pyyaml, cffi, typing, cmake,
|
||
linkFarm, symlinkJoin,
|
||
utillinux, which }:
|
||
|
||
assert cudnn == null || cudatoolkit != null;
|
||
assert !cudaSupport || cudatoolkit != null;
|
||
|
||
let
|
||
cudatoolkit_joined = symlinkJoin {
|
||
name = "${cudatoolkit.name}-unsplit";
|
||
paths = [ cudatoolkit.out cudatoolkit.lib ];
|
||
};
|
||
|
||
# Normally libcuda.so.1 is provided at runtime by nvidia-x11 via
|
||
# LD_LIBRARY_PATH=/run/opengl-driver/lib. We only use the stub
|
||
# libcuda.so from cudatoolkit for running tests, so that we don’t have
|
||
# to recompile pytorch on every update to nvidia-x11 or the kernel.
|
||
cudaStub = linkFarm "cuda-stub" [{
|
||
name = "libcuda.so.1";
|
||
path = "${cudatoolkit}/lib/stubs/libcuda.so";
|
||
}];
|
||
cudaStubEnv = lib.optionalString cudaSupport
|
||
"LD_LIBRARY_PATH=${cudaStub}\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} ";
|
||
|
||
in buildPythonPackage rec {
|
||
version = "0.4.1";
|
||
pname = "pytorch";
|
||
|
||
src = fetchFromGitHub {
|
||
owner = "pytorch";
|
||
repo = "pytorch";
|
||
rev = "v${version}";
|
||
fetchSubmodules = true;
|
||
sha256 = "1cr8h47jxgfar5bamyvlayvqymnb2qvp7rr0ka2d2d4rdldf9lrp";
|
||
};
|
||
|
||
preConfigure = lib.optionalString cudaSupport ''
|
||
export CC=${cudatoolkit.cc}/bin/gcc CXX=${cudatoolkit.cc}/bin/g++
|
||
'' + lib.optionalString (cudaSupport && cudnn != null) ''
|
||
export CUDNN_INCLUDE_DIR=${cudnn}/include
|
||
'';
|
||
|
||
preFixup = ''
|
||
function join_by { local IFS="$1"; shift; echo "$*"; }
|
||
function strip2 {
|
||
IFS=':'
|
||
read -ra RP <<< $(patchelf --print-rpath $1)
|
||
IFS=' '
|
||
RP_NEW=$(join_by : ''${RP[@]:2})
|
||
patchelf --set-rpath \$ORIGIN:''${RP_NEW} "$1"
|
||
}
|
||
|
||
for f in $(find ''${out} -name 'libcaffe2*.so')
|
||
do
|
||
strip2 $f
|
||
done
|
||
'';
|
||
|
||
buildInputs = [
|
||
cmake
|
||
numpy.blas
|
||
utillinux
|
||
which
|
||
] ++ lib.optionals cudaSupport [cudatoolkit_joined cudnn];
|
||
|
||
propagatedBuildInputs = [
|
||
cffi
|
||
numpy
|
||
pyyaml
|
||
] ++ lib.optional (pythonOlder "3.5") typing;
|
||
|
||
checkPhase = ''
|
||
${cudaStubEnv}python test/run_test.py --exclude dataloader sparse torch utils distributed
|
||
'';
|
||
|
||
meta = {
|
||
description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration.";
|
||
homepage = https://pytorch.org/;
|
||
license = lib.licenses.bsd3;
|
||
platforms = lib.platforms.linux;
|
||
maintainers = with lib.maintainers; [ teh ];
|
||
};
|
||
}
|