2018-12-25 01:59:40 +00:00
|
|
|
|
{ stdenv, fetchurl, buildPythonPackage, pythonOlder,
|
2018-05-04 15:19:31 +01:00
|
|
|
|
cudaSupport ? false, cudatoolkit ? null, cudnn ? null,
|
2018-12-25 01:59:40 +00:00
|
|
|
|
fetchFromGitHub, lib, numpy, pyyaml, cffi, typing, cmake, hypothesis, numactl,
|
2018-07-21 01:44:44 +01:00
|
|
|
|
linkFarm, symlinkJoin,
|
2018-05-04 15:19:31 +01:00
|
|
|
|
utillinux, which }:
|
2017-07-16 20:15:05 +01:00
|
|
|
|
|
2018-05-04 15:19:31 +01:00
|
|
|
|
assert cudnn == null || cudatoolkit != null;
|
|
|
|
|
assert !cudaSupport || cudatoolkit != null;
|
|
|
|
|
|
|
|
|
|
let
|
|
|
|
|
cudatoolkit_joined = symlinkJoin {
|
|
|
|
|
name = "${cudatoolkit.name}-unsplit";
|
|
|
|
|
paths = [ cudatoolkit.out cudatoolkit.lib ];
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
# Normally libcuda.so.1 is provided at runtime by nvidia-x11 via
|
|
|
|
|
# LD_LIBRARY_PATH=/run/opengl-driver/lib. We only use the stub
|
|
|
|
|
# libcuda.so from cudatoolkit for running tests, so that we don’t have
|
|
|
|
|
# to recompile pytorch on every update to nvidia-x11 or the kernel.
|
|
|
|
|
cudaStub = linkFarm "cuda-stub" [{
|
|
|
|
|
name = "libcuda.so.1";
|
|
|
|
|
path = "${cudatoolkit}/lib/stubs/libcuda.so";
|
|
|
|
|
}];
|
|
|
|
|
cudaStubEnv = lib.optionalString cudaSupport
|
|
|
|
|
"LD_LIBRARY_PATH=${cudaStub}\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} ";
|
|
|
|
|
|
|
|
|
|
in buildPythonPackage rec {
|
2018-12-17 14:15:13 +00:00
|
|
|
|
version = "1.0.0";
|
2017-07-16 20:15:05 +01:00
|
|
|
|
pname = "pytorch";
|
|
|
|
|
|
|
|
|
|
src = fetchFromGitHub {
|
2017-08-19 02:22:23 +01:00
|
|
|
|
owner = "pytorch";
|
|
|
|
|
repo = "pytorch";
|
|
|
|
|
rev = "v${version}";
|
2018-05-04 15:19:31 +01:00
|
|
|
|
fetchSubmodules = true;
|
2018-12-17 14:15:13 +00:00
|
|
|
|
sha256 = "076cpbig4sywn9vv674c0xdg832sdrd5pk1d0725pjkm436kpvlm";
|
2017-07-16 20:15:05 +01:00
|
|
|
|
};
|
|
|
|
|
|
2018-12-25 01:57:30 +00:00
|
|
|
|
patches =
|
|
|
|
|
[ # Skips two tests that are only meant to run on multi GPUs
|
|
|
|
|
(fetchurl {
|
|
|
|
|
url = "https://github.com/pytorch/pytorch/commit/bfa666eb0deebac21b03486e26642fd70d66e478.patch";
|
|
|
|
|
sha256 = "1fgblcj02gjc0y62svwc5gnml879q3x2z7m69c9gax79dpr37s9i";
|
|
|
|
|
})
|
|
|
|
|
];
|
|
|
|
|
|
2018-05-04 15:19:31 +01:00
|
|
|
|
preConfigure = lib.optionalString cudaSupport ''
|
2018-05-04 23:00:50 +01:00
|
|
|
|
export CC=${cudatoolkit.cc}/bin/gcc CXX=${cudatoolkit.cc}/bin/g++
|
2018-05-04 15:19:31 +01:00
|
|
|
|
'' + lib.optionalString (cudaSupport && cudnn != null) ''
|
|
|
|
|
export CUDNN_INCLUDE_DIR=${cudnn}/include
|
2017-07-16 20:15:05 +01:00
|
|
|
|
'';
|
|
|
|
|
|
2018-09-12 08:22:52 +01:00
|
|
|
|
preFixup = ''
|
|
|
|
|
function join_by { local IFS="$1"; shift; echo "$*"; }
|
|
|
|
|
function strip2 {
|
|
|
|
|
IFS=':'
|
|
|
|
|
read -ra RP <<< $(patchelf --print-rpath $1)
|
|
|
|
|
IFS=' '
|
|
|
|
|
RP_NEW=$(join_by : ''${RP[@]:2})
|
|
|
|
|
patchelf --set-rpath \$ORIGIN:''${RP_NEW} "$1"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for f in $(find ''${out} -name 'libcaffe2*.so')
|
|
|
|
|
do
|
|
|
|
|
strip2 $f
|
|
|
|
|
done
|
|
|
|
|
'';
|
|
|
|
|
|
2018-12-24 11:46:51 +00:00
|
|
|
|
# Override the (weirdly) wrong version set by default. See
|
|
|
|
|
# https://github.com/NixOS/nixpkgs/pull/52437#issuecomment-449718038
|
|
|
|
|
# https://github.com/pytorch/pytorch/blob/v1.0.0/setup.py#L267
|
|
|
|
|
PYTORCH_BUILD_VERSION = version;
|
|
|
|
|
PYTORCH_BUILD_NUMBER = 0;
|
|
|
|
|
|
2018-12-25 02:00:13 +00:00
|
|
|
|
# Suppress a weird warning in mkl-dnn, part of ideep in pytorch
|
|
|
|
|
# (upstream seems to have fixed this in the wrong place?)
|
|
|
|
|
# https://github.com/intel/mkl-dnn/commit/8134d346cdb7fe1695a2aa55771071d455fae0bc
|
|
|
|
|
NIX_CFLAGS_COMPILE = lib.optionals (numpy.blasImplementation == "mkl") [ "-Wno-error=array-bounds" ];
|
|
|
|
|
|
2019-03-06 21:34:26 +00:00
|
|
|
|
nativeBuildInputs = [
|
2017-07-16 20:15:05 +01:00
|
|
|
|
cmake
|
2018-05-04 15:19:31 +01:00
|
|
|
|
utillinux
|
|
|
|
|
which
|
2019-03-06 21:34:26 +00:00
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
buildInputs = [
|
|
|
|
|
numpy.blas
|
2018-12-25 01:59:40 +00:00
|
|
|
|
] ++ lib.optionals cudaSupport [ cudatoolkit_joined cudnn ]
|
|
|
|
|
++ lib.optionals stdenv.isLinux [ numactl ];
|
2017-11-22 22:02:34 +00:00
|
|
|
|
|
2017-07-16 20:15:05 +01:00
|
|
|
|
propagatedBuildInputs = [
|
|
|
|
|
cffi
|
|
|
|
|
numpy
|
|
|
|
|
pyyaml
|
2018-05-04 23:00:50 +01:00
|
|
|
|
] ++ lib.optional (pythonOlder "3.5") typing;
|
2017-07-16 20:15:05 +01:00
|
|
|
|
|
2018-12-17 14:15:13 +00:00
|
|
|
|
checkInputs = [ hypothesis ];
|
2018-05-04 15:19:31 +01:00
|
|
|
|
checkPhase = ''
|
2018-12-17 14:15:13 +00:00
|
|
|
|
${cudaStubEnv}python test/run_test.py --exclude dataloader sparse torch utils thd_distributed distributed cpp_extensions
|
2017-07-16 20:15:05 +01:00
|
|
|
|
'';
|
2017-11-22 22:02:34 +00:00
|
|
|
|
|
2017-07-16 20:15:05 +01:00
|
|
|
|
meta = {
|
2018-12-25 01:58:04 +00:00
|
|
|
|
description = "Open source, prototype-to-production deep learning platform";
|
|
|
|
|
homepage = https://pytorch.org/;
|
|
|
|
|
license = lib.licenses.bsd3;
|
|
|
|
|
platforms = lib.platforms.linux;
|
|
|
|
|
maintainers = with lib.maintainers; [ teh thoughtpolice ];
|
2017-07-16 20:15:05 +01:00
|
|
|
|
};
|
|
|
|
|
}
|