Merge pull request #30433 from abbradar/cudatoolkit

CUDA-related updates and cleanups
This commit is contained in:
Nikolay Amiantov 2017-10-19 13:18:43 +03:00 committed by GitHub
commit 15bfc8c12a
20 changed files with 400 additions and 561 deletions

View File

@ -1,4 +1,4 @@
{ stdenv, stdenv_gcc5, lib, fetchurl, boost, cmake, ffmpeg, gettext, glew
{ stdenv, lib, fetchurl, boost, cmake, ffmpeg, gettext, glew
, ilmbase, libXi, libX11, libXext, libXrender
, libjpeg, libpng, libsamplerate, libsndfile
, libtiff, mesa, openal, opencolorio, openexr, openimageio, openjpeg_1, python
@ -10,7 +10,7 @@
with lib;
(if cudaSupport then stdenv_gcc5 else stdenv).mkDerivation rec {
stdenv.mkDerivation rec {
name = "blender-2.79";
src = fetchurl {
@ -57,9 +57,8 @@ with lib;
++ optional jackaudioSupport "-DWITH_JACK=ON"
++ optionals cudaSupport
[ "-DWITH_CYCLES_CUDA_BINARIES=ON"
# Disable the sm_20 architecture to work around a segfault in
# ptxas, as suggested on #blendercoders.
"-DCYCLES_CUDA_BINARIES_ARCH=sm_21;sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61"
# Disable architectures before sm_30 to support new CUDA toolkits.
"-DCYCLES_CUDA_BINARIES_ARCH=sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61"
]
++ optional colladaSupport "-DWITH_OPENCOLLADA=ON";

View File

@ -22,22 +22,23 @@ assert pythonSupport -> (python != null && numpy != null);
stdenv.mkDerivation rec {
name = "caffe-${version}";
version = "1.0-rc5";
version = "1.0";
src = fetchFromGitHub {
owner = "BVLC";
repo = "caffe";
rev = "rc5";
sha256 = "0lfmmc0n6xvkpygvxclzrvd0zigb4yfc5612anv2ahlxpfi9031c";
rev = version;
sha256 = "104jp3cm823i3cdph7hgsnj6l77ygbwsy35mdmzhmsi4jxprd9j3";
};
enableParallelBuilding = true;
nativeBuildInputs = [ cmake doxygen ];
cmakeFlags = [ "-DCUDA_ARCH_NAME=All" ]
++ lib.optional (!cudaSupport) "-DCPU_ONLY=ON"
++ lib.optional (!pythonSupport) "-DBUILD_python=OFF";
cmakeFlags = [
"-DCUDA_ARCH_NAME=All"
(if pythonSupport then "-Dpython_version=${python.version}" else "-DBUILD_python=OFF")
] ++ lib.optional (!cudaSupport) "-DCPU_ONLY=ON";
buildInputs = [ boost google-gflags glog protobuf hdf5-cpp lmdb leveldb snappy opencv atlas ]
++ lib.optional cudaSupport cudatoolkit
@ -49,6 +50,16 @@ stdenv.mkDerivation rec {
outputs = [ "bin" "out"];
propagatedBuildOutputs = []; # otherwise propagates out -> bin cycle
preConfigure = lib.optionalString (cudaSupport && lib.versionAtLeast cudatoolkit.version "9.0") ''
# CUDA 9.0 doesn't support sm_20
sed -i 's,20 21(20) ,,' cmake/Cuda.cmake
'' + lib.optionalString (python.isPy3 or false) ''
sed -i \
-e 's,"python-py''${boost_py_version}",python3,g' \
-e 's,''${Boost_PYTHON-PY''${boost_py_version}_FOUND},''${Boost_PYTHON3_FOUND},g' \
cmake/Dependencies.cmake
'';
postInstall = ''
# Internal static library.
rm $out/lib/libproto.a

View File

@ -1,30 +1,44 @@
{ lib, stdenv, fetchurl, patchelf, perl, ncurses, expat, python27, zlib
{ lib, stdenv, makeWrapper, fetchurl, requireFile, patchelf, perl, ncurses, expat, python27, zlib
, gcc48, gcc49, gcc5, gcc6
, xorg, gtk2, glib, fontconfig, freetype, unixODBC, alsaLib, glibc
}:
let
common =
{ version, url, sha256
args@{ gcc, version, sha256
, url ? ""
, name ? ""
, developerProgram ? false
, python ? python27
}:
stdenv.mkDerivation rec {
name = "cudatoolkit-${version}";
inherit (args) version;
dontPatchELF = true;
dontStrip = true;
src =
if stdenv.system == "x86_64-linux" then
fetchurl {
inherit url sha256;
if developerProgram then
requireFile {
message = ''
This nix expression requires that ${args.name} is already part of the store.
Register yourself to NVIDIA Accelerated Computing Developer Program, retrieve the CUDA toolkit
at https://developer.nvidia.com/cuda-toolkit, and run the following command in the download directory:
nix-prefetch-url file://${args.name}
'';
inherit (args) name sha256;
}
else throw "cudatoolkit does not support platform ${stdenv.system}";
else
fetchurl {
inherit (args) url sha256;
};
outputs = [ "out" "lib" "doc" ];
buildInputs = [ perl ];
nativeBuildInputs = [ perl makeWrapper ];
runtimeDependencies = [
ncurses expat python zlib glibc
@ -37,8 +51,8 @@ let
unpackPhase = ''
sh $src --keep --noexec
cd pkg/run_files
sh cuda-linux64-rel-${version}-*.run --keep --noexec
sh cuda-samples-linux-${version}-*.run --keep --noexec
sh cuda-linux*.run --keep --noexec
sh cuda-samples*.run --keep --noexec
cd pkg
'';
@ -92,15 +106,25 @@ let
# Remove OpenCL libraries as they are provided by ocl-icd and driver.
rm -f $out/lib64/libOpenCL*
# Set compiler for NVCC.
wrapProgram $out/bin/nvcc \
--prefix PATH : ${gcc}/bin
'' + lib.optionalString (lib.versionOlder version "8.0") ''
# Hack to fix building against recent Glibc/GCC.
echo "NIX_CFLAGS_COMPILE+=' -D_FORCE_INLINES'" >> $out/nix-support/setup-hook
'';
passthru = {
cc = gcc;
majorVersion =
let versionParts = lib.splitString "." version;
in "${lib.elemAt versionParts 0}.${lib.elemAt versionParts 1}";
};
meta = with stdenv.lib; {
description = "A compiler for NVIDIA GPUs, math libraries, and tools";
homepage = https://developer.nvidia.com/cuda-toolkit;
platforms = platforms.linux;
homepage = "https://developer.nvidia.com/cuda-toolkit";
platforms = [ "x86_64-linux" ];
license = licenses.unfree;
};
};
@ -109,32 +133,44 @@ in {
cudatoolkit6 = common {
version = "6.0.37";
url = http://developer.download.nvidia.com/compute/cuda/6_0/rel/installers/cuda_6.0.37_linux_64.run;
url = "http://developer.download.nvidia.com/compute/cuda/6_0/rel/installers/cuda_6.0.37_linux_64.run";
sha256 = "991e436c7a6c94ec67cf44204d136adfef87baa3ded270544fa211179779bc40";
gcc = gcc48;
};
cudatoolkit65 = common {
version = "6.5.19";
url = http://developer.download.nvidia.com/compute/cuda/6_5/rel/installers/cuda_6.5.19_linux_64.run;
url = "http://developer.download.nvidia.com/compute/cuda/6_5/rel/installers/cuda_6.5.19_linux_64.run";
sha256 = "1x9zdmk8z784d3d35vr2ak1l4h5v4jfjhpxfi9fl9dvjkcavqyaj";
gcc = gcc48;
};
cudatoolkit7 = common {
version = "7.0.28";
url = http://developer.download.nvidia.com/compute/cuda/7_0/Prod/local_installers/cuda_7.0.28_linux.run;
url = "http://developer.download.nvidia.com/compute/cuda/7_0/Prod/local_installers/cuda_7.0.28_linux.run";
sha256 = "1km5hpiimx11jcazg0h3mjzk220klwahs2vfqhjavpds5ff2wafi";
gcc = gcc49;
};
cudatoolkit75 = common {
version = "7.5.18";
url = http://developer.download.nvidia.com/compute/cuda/7.5/Prod/local_installers/cuda_7.5.18_linux.run;
url = "http://developer.download.nvidia.com/compute/cuda/7.5/Prod/local_installers/cuda_7.5.18_linux.run";
sha256 = "1v2ylzp34ijyhcxyh5p6i0cwawwbbdhni2l5l4qm21s1cx9ish88";
gcc = gcc49;
};
cudatoolkit8 = common {
version = "8.0.61";
url = https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda_8.0.61_375.26_linux-run;
url = "https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda_8.0.61_375.26_linux-run";
sha256 = "1i4xrsqbad283qffvysn88w2pmxzxbbby41lw0j1113z771akv4w";
gcc = gcc5;
};
cudatoolkit9 = common {
version = "9.0.176";
url = "https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda_9.0.176_384.81_linux-run";
sha256 = "0308rmmychxfa4inb1ird9bpgfppgr9yrfg1qp0val5azqik91ln";
gcc = gcc6;
};
}

View File

@ -16,7 +16,7 @@
, enableGStreamer ? false, gst_all_1
, enableEigen ? true, eigen
, enableOpenblas ? true, openblas
, enableCuda ? false, cudatoolkit, gcc5
, enableCuda ? false, cudatoolkit
, enableTesseract ? false, tesseract, leptonica
, AVFoundation, Cocoa, QTKit
}:
@ -145,7 +145,7 @@ stdenv.mkDerivation rec {
# simply enabled automatically if contrib is built, and it detects
# tesseract & leptonica.
++ lib.optionals enableTesseract [ tesseract leptonica ]
++ lib.optionals enableCuda [ cudatoolkit gcc5 ]
++ lib.optional enableCuda cudatoolkit
++ lib.optional buildContrib protobuf
++ lib.optionals stdenv.isDarwin [ AVFoundation Cocoa QTKit ];
@ -165,8 +165,10 @@ stdenv.mkDerivation rec {
(opencvFlag "OPENEXR" enableEXR)
(opencvFlag "CUDA" enableCuda)
(opencvFlag "CUBLAS" enableCuda)
] ++ lib.optionals enableCuda [ "-DCUDA_FAST_MATH=ON" ]
++ lib.optional buildContrib "-DBUILD_PROTOBUF=off"
] ++ lib.optionals enableCuda [
"-DCUDA_FAST_MATH=ON"
"-DCUDA_HOST_COMPILER=${cudatoolkit.cc}/bin/gcc"
] ++ lib.optional buildContrib "-DBUILD_PROTOBUF=off"
++ lib.optionals stdenv.isDarwin ["-DWITH_OPENCL=OFF" "-DWITH_LAPACK=OFF"];
enableParallelBuilding = true;

View File

@ -1,17 +1,17 @@
{ lib, stdenv, stdenv_gcc5, fetchurl, fetchFromGitHub, cmake, pkgconfig, xorg, mesa_glu
{ lib, stdenv, fetchurl, fetchFromGitHub, cmake, pkgconfig, xorg, mesa_glu
, mesa_noglu, glew, ocl-icd, python3
, cudaSupport ? false, cudatoolkit
}:
(if cudaSupport then stdenv_gcc5 else stdenv).mkDerivation rec {
stdenv.mkDerivation rec {
name = "opensubdiv-${version}";
version = "3.2.0";
version = "3.3.0";
src = fetchFromGitHub {
owner = "PixarAnimationStudios";
repo = "OpenSubdiv";
rev = "v${lib.replaceChars ["."] ["_"] version}";
sha256 = "0wk12n1s8za3sz8d6bmfm3rfjyx20j48gy1xp57dvbnjvlvzqy3w";
sha256 = "0wpjwfik4q9s4r30hndhzmfyzv968mmg5lgng0123l07mn47d2yl";
};
outputs = [ "out" "dev" ];
@ -30,7 +30,7 @@
"-DNO_EXAMPLES=1"
"-DGLEW_INCLUDE_DIR=${glew.dev}/include"
"-DGLEW_LIBRARY=${glew.dev}/lib"
];
] ++ lib.optional cudaSupport "-DOSD_CUDA_NVCC_FLAGS=--gpu-architecture=compute_30";
enableParallelBuilding = true;

View File

@ -1,73 +0,0 @@
{ stdenv
, fetchFromGitHub
, cmake
, gfortran
, blas
, boost
, python
, ocl-icd
, cudatoolkit
, nvidia_x11
, gtest
}:
stdenv.mkDerivation rec {
name = "clblas-cuda-${version}";
version = "git-20160505";
src = fetchFromGitHub {
owner = "clMathLibraries";
repo = "clBLAS";
rev = "d20977ec4389c6b3751e318779410007c5e272f8";
sha256 = "1jna176cxznv7iz43svd6cjrbbf0fc2lrbpfpg4s08vc7xnwp0n4";
};
patches = [ ./platform.patch ];
postPatch = ''
sed -i -re 's/(set\(\s*Boost_USE_STATIC_LIBS\s+).*/\1OFF\ \)/g' src/CMakeLists.txt
'';
configurePhase = ''
findInputs ${boost.dev} boost_dirs propagated-native-build-inputs
export BOOST_INCLUDEDIR=$(echo $boost_dirs | sed -e s/\ /\\n/g - | grep '\-dev')/include
export BOOST_LIBRARYDIR=$(echo $boost_dirs | sed -e s/\ /\\n/g - | grep -v '\-dev')/lib
mkdir -p Build
pushd Build
export LD_LIBRARY_PATH="${stdenv.lib.makeLibraryPath [ blas nvidia_x11 ]}"
cmake ../src -DCMAKE_INSTALL_PREFIX=$out \
-DCMAKE_BUILD_TYPE=Release \
-DOPENCL_ROOT=${cudatoolkit} \
-DUSE_SYSTEM_GTEST=ON
'';
dontStrip = true;
buildInputs = [
cmake
gfortran
blas
python
ocl-icd
cudatoolkit
nvidia_x11
gtest
];
meta = with stdenv.lib; {
homepage = https://github.com/clMathLibraries/clBLAS;
description = "A software library containing BLAS functions written in OpenCL";
longDescription = ''
This package contains a library of BLAS functions on top of OpenCL.
The current version is linked to the NVIDIA OpenCL implementation provided by the CUDA toolkit.
'';
license = licenses.asl20;
maintainers = with maintainers; [ artuuge ];
platforms = platforms.linux;
};
}

View File

@ -0,0 +1,62 @@
{ stdenv
, fetchFromGitHub
, cmake
, gfortran
, blas
, boost
, python
, ocl-icd
, opencl-headers
, gtest
}:
stdenv.mkDerivation rec {
name = "clblas-${version}";
version = "2.12";
src = fetchFromGitHub {
owner = "clMathLibraries";
repo = "clBLAS";
rev = "v${version}";
sha256 = "154mz52r5hm0jrp5fqrirzzbki14c1jkacj75flplnykbl36ibjs";
};
patches = [ ./platform.patch ];
postPatch = ''
sed -i -re 's/(set\(\s*Boost_USE_STATIC_LIBS\s+).*/\1OFF\ \)/g' src/CMakeLists.txt
'';
preConfigure = ''
cd src
'';
cmakeFlags = [
"-DUSE_SYSTEM_GTEST=ON"
];
buildInputs = [
cmake
gfortran
blas
python
ocl-icd
opencl-headers
boost
gtest
];
enableParallelBuilding = true;
meta = with stdenv.lib; {
homepage = "https://github.com/clMathLibraries/clBLAS";
description = "A software library containing BLAS functions written in OpenCL";
longDescription = ''
This package contains a library of BLAS functions on top of OpenCL.
'';
license = licenses.asl20;
maintainers = with maintainers; [ artuuge ];
platforms = platforms.linux;
};
}

View File

@ -1,46 +0,0 @@
{ stdenv
, requireFile
, cudatoolkit
}:
stdenv.mkDerivation rec {
version = "5.0";
cudatoolkit_version = "7.5";
name = "cudatoolkit-${cudatoolkit_version}-cudnn-${version}";
src = requireFile rec {
name = "cudnn-${cudatoolkit_version}-linux-x64-v${version}-ga.tgz";
message = ''
This nix expression requires that ${name} is already part of the store.
Register yourself to NVIDIA Accelerated Computing Developer Program, retrieve the cuDNN library
at https://developer.nvidia.com/cudnn, and run the following command in the download directory:
nix-prefetch-url file://${name}
'';
sha256 = "c4739a00608c3b66a004a74fc8e721848f9112c5cb15f730c1be4964b3a23b3a";
};
phases = "unpackPhase installPhase fixupPhase";
installPhase = ''
function fixRunPath {
p=$(patchelf --print-rpath $1)
patchelf --set-rpath "$p:${stdenv.lib.makeLibraryPath [ stdenv.cc.cc ]}" $1
}
fixRunPath lib64/libcudnn.so
mkdir -p $out
cp -a include $out/include
cp -a lib64 $out/lib64
'';
propagatedBuildInputs = [
cudatoolkit
];
meta = {
description = "NVIDIA CUDA Deep Neural Network library (cuDNN)";
homepage = https://developer.nvidia.com/cudnn;
license = stdenv.lib.licenses.unfree;
};
}

View File

@ -1,40 +0,0 @@
{ stdenv
, requireFile
, cudatoolkit
, fetchurl
}:
stdenv.mkDerivation rec {
version = "5.1";
cudatoolkit_version = "8.0";
name = "cudatoolkit-${cudatoolkit_version}-cudnn-${version}";
src = fetchurl {
url = "http://developer.download.nvidia.com/compute/redist/cudnn/v5.1/cudnn-8.0-linux-x64-v5.1.tgz";
sha256 = "1kj50smlkm347wfbfqvy09ylvad1zapqjc9yqvfykmiddyrij1y1";
};
installPhase = ''
function fixRunPath {
p=$(patchelf --print-rpath $1)
patchelf --set-rpath "$p:${stdenv.lib.makeLibraryPath [ stdenv.cc.cc ]}" $1
}
fixRunPath lib64/libcudnn.so
mkdir -p $out
cp -a include $out/include
cp -a lib64 $out/lib64
'';
propagatedBuildInputs = [
cudatoolkit
];
meta = with stdenv.lib; {
description = "NVIDIA CUDA Deep Neural Network library (cuDNN)";
homepage = https://developer.nvidia.com/cudnn;
license = stdenv.lib.licenses.unfree;
maintainers = with maintainers; [ mdaiter ];
};
}

View File

@ -1,40 +0,0 @@
{ stdenv
, requireFile
, cudatoolkit
, fetchurl
}:
stdenv.mkDerivation rec {
version = "6.0";
cudatoolkit_version = "8.0";
name = "cudatoolkit-${cudatoolkit_version}-cudnn-${version}";
src = fetchurl {
url = "http://developer.download.nvidia.com/compute/redist/cudnn/v6.0/cudnn-8.0-linux-x64-v6.0.tgz";
sha256 = "173zpgrk55ri8if7s5yngsc89ajd6hz4pss4cdxlv6lcyh5122cv";
};
installPhase = ''
function fixRunPath {
p=$(patchelf --print-rpath $1)
patchelf --set-rpath "$p:${stdenv.lib.makeLibraryPath [ stdenv.cc.cc ]}" $1
}
fixRunPath lib64/libcudnn.so
mkdir -p $out
cp -a include $out/include
cp -a lib64 $out/lib64
'';
propagatedBuildInputs = [
cudatoolkit
];
meta = with stdenv.lib; {
description = "NVIDIA CUDA Deep Neural Network library (cuDNN)";
homepage = https://developer.nvidia.com/cudnn;
license = stdenv.lib.licenses.unfree;
maintainers = with maintainers; [ jyp ];
};
}

View File

@ -1,34 +1,45 @@
{ stdenv, requireFile, cudatoolkit }:
stdenv.mkDerivation rec {
version = "4.0";
name = "cudnn-${version}";
src = requireFile rec {
name = "cudnn-7.0-linux-x64-v${version}-prod.tgz";
message = ''
This nix expression requires that ${name} is
already part of the store. Register yourself to NVIDIA Accelerated Computing Developer Program
and download cuDNN library at https://developer.nvidia.com/cudnn, and store it to the nix store with nix-store --add-fixed sha256 <FILE>.
'';
sha256 = "0zgr6qdbc29qw6sikhrh6diwwz7150rqc8a49f2qf37j2rvyyr2f";
{ callPackage, cudatoolkit7, cudatoolkit75, cudatoolkit8, cudatoolkit9 }:
let
generic = args: callPackage (import ./generic.nix (removeAttrs args ["cudatoolkit"])) {
inherit (args) cudatoolkit;
};
phases = "unpackPhase installPhase fixupPhase";
in
propagatedBuildInputs = [ cudatoolkit ];
{
cudnn_cudatoolkit7 = generic rec {
version = "4.0";
cudatoolkit = cudatoolkit7;
srcName = "cudnn-${cudatoolkit.majorVersion}-linux-x64-v${version}-prod.tgz";
sha256 = "0zgr6qdbc29qw6sikhrh6diwwz7150rqc8a49f2qf37j2rvyyr2f";
};
installPhase = ''
mkdir -p $out
cp -a include $out/include
cp -a lib64 $out/lib64
'';
cudnn_cudatoolkit75 = generic rec {
version = "6.0";
cudatoolkit = cudatoolkit75;
srcName = "cudnn-${cudatoolkit.majorVersion}-linux-x64-v${version}.tgz";
sha256 = "0b68hv8pqcvh7z8xlgm4cxr9rfbjs0yvg1xj2n5ap4az1h3lp3an";
};
meta = {
description = "NVIDIA CUDA Deep Neural Network library (cuDNN)";
homepage = https://developer.nvidia.com/cudnn;
license = stdenv.lib.licenses.unfree;
cudnn6_cudatoolkit8 = generic rec {
version = "6.0";
cudatoolkit = cudatoolkit8;
srcName = "cudnn-${cudatoolkit.majorVersion}-linux-x64-v${version}.tgz";
sha256 = "173zpgrk55ri8if7s5yngsc89ajd6hz4pss4cdxlv6lcyh5122cv";
};
cudnn_cudatoolkit8 = generic rec {
version = "7.0";
cudatoolkit = cudatoolkit8;
srcName = "cudnn-${cudatoolkit.majorVersion}-linux-x64-v7.tgz";
sha256 = "19yjdslrslwv5ic4vgpzb0fa0mqbgi6a66b7gc66vdc9n9589398";
};
cudnn_cudatoolkit9 = generic rec {
version = "7.0";
cudatoolkit = cudatoolkit9;
srcName = "cudnn-${cudatoolkit.majorVersion}-linux-x64-v7.tgz";
sha256 = "1ld5x819vya6p2ppmr7i3lz9ac2y81kssgbzgd0lsign7r2qjapc";
};
}

View File

@ -1,29 +1,34 @@
{ version
, srcName
, sha256
}:
{ stdenv
, lib
, requireFile
, cudatoolkit
}:
stdenv.mkDerivation rec {
version = "5.0";
cudatoolkit_version = "8.0";
name = "cudatoolkit-${cudatoolkit.majorVersion}-cudnn-${version}";
name = "cudatoolkit-${cudatoolkit_version}-cudnn-${version}";
inherit version;
src = requireFile rec {
name = "cudnn-${cudatoolkit_version}-linux-x64-v${version}-ga.tgz";
name = srcName;
inherit sha256;
message = ''
This nix expression requires that ${name} is already part of the store.
Register yourself to NVIDIA Accelerated Computing Developer Program, retrieve the cuDNN library
at https://developer.nvidia.com/cudnn, and run the following command in the download directory:
nix-prefetch-url file://${name}
'';
sha256 = "af80eb1ce0cb51e6a734b2bdc599e6d50b676eab3921e5bddfe5443485df86b6";
};
installPhase = ''
function fixRunPath {
p=$(patchelf --print-rpath $1)
patchelf --set-rpath "$p:${stdenv.lib.makeLibraryPath [ stdenv.cc.cc ]}" $1
patchelf --set-rpath "$p:${lib.makeLibraryPath [ stdenv.cc.cc ]}" $1
}
fixRunPath lib64/libcudnn.so
@ -36,10 +41,16 @@ stdenv.mkDerivation rec {
cudatoolkit
];
passthru = {
inherit cudatoolkit;
majorVersion = lib.head (lib.splitString "." version);
};
meta = with stdenv.lib; {
description = "NVIDIA CUDA Deep Neural Network library (cuDNN)";
homepage = https://developer.nvidia.com/cudnn;
license = stdenv.lib.licenses.unfree;
homepage = "https://developer.nvidia.com/cudnn";
license = licenses.unfree;
platforms = [ "x86_64-linux" ];
maintainers = with maintainers; [ mdaiter ];
};
}

View File

@ -0,0 +1,75 @@
{ stdenv
, lib
, fetchPypi
, gcc
, writeScriptBin
, buildPythonPackage
, isPyPy
, pythonOlder
, isPy3k
, nose
, numpy
, pydot_ng
, scipy
, six
, libgpuarray
, cudaSupport ? false, cudatoolkit
, cudnnSupport ? false, cudnn
}:
assert cudnnSupport -> cudaSupport;
let
extraFlags =
lib.optionals cudaSupport [ "-I ${cudatoolkit}/include" "-L ${cudatoolkit}/lib" ]
++ lib.optionals cudnnSupport [ "-I ${cudnn}/include" "-L ${cudnn}/lib" ];
gcc_ = writeScriptBin "g++" ''
#!${stdenv.shell}
export NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE ${toString extraFlags}"
exec ${gcc}/bin/g++ "$@"
'';
libgpuarray_ = libgpuarray.override { inherit cudaSupport; };
in buildPythonPackage rec {
name = "${pname}-${version}";
pname = "Theano";
version = "0.9.0";
disabled = isPyPy || pythonOlder "2.6" || (isPy3k && pythonOlder "3.3");
src = fetchPypi {
inherit pname version;
sha256 = "05xwg00da8smkvkh6ywbywqzj8dw7x840jr74wqhdy9icmqncpbl";
};
postPatch = ''
sed -i 's,g++,${gcc_}/bin/g++,g' theano/configdefaults.py
'' + lib.optionalString cudnnSupport ''
sed -i \
-e "s,ctypes.util.find_library('cudnn'),'${cudnn}/lib/libcudnn.so',g" \
-e "s/= _dnn_check_compile()/= (True, None)/g" \
theano/gpuarray/dnn.py
'';
preCheck = ''
mkdir -p check-phase
export HOME=$(pwd)/check-phase
'';
doCheck = false;
# takes far too long, also throws "TypeError: sort() missing 1 required positional argument: 'a'"
# when run from the installer, and testing with Python 3.5 hits github.com/Theano/Theano/issues/4276,
# the fix for which hasn't been merged yet.
# keep Nose around since running the tests by hand is possible from Python or bash
checkInputs = [ nose ];
propagatedBuildInputs = [ numpy numpy.blas scipy six libgpuarray_ ];
meta = with stdenv.lib; {
homepage = http://deeplearning.net/software/theano/;
description = "A Python library for large-scale array computation";
license = licenses.bsd3;
maintainers = with maintainers; [ maintainers.bcdarwin ];
};
}

View File

@ -1,65 +0,0 @@
{ buildPythonPackage
, fetchFromGitHub
, pythonOlder
, future
, numpy
, six
, scipy
, nose
, nose-parameterized
, pydot_ng
, sphinx
, pygments
, libgpuarray
, python
, pycuda
, cudatoolkit
, cudnn
, stdenv
}:
buildPythonPackage rec {
name = "Theano-cuda-${version}";
version = "0.8.2";
src = fetchFromGitHub {
owner = "Theano";
repo = "Theano";
rev = "46fbfeb628220b5e42bf8277a5955c52d153e874";
sha256 = "1sl91gli3jaw5gpjqqab4fiq4x6282spqciaid1s65pjsf3k55sc";
};
doCheck = false;
patchPhase = ''
pushd theano/sandbox/gpuarray
sed -i -re '2s/^/from builtins import bytes\n/g' subtensor.py
sed -i -re "s/(b'2')/int(bytes(\1))/g" subtensor.py
sed -i -re "s/(ctx.bin_id\[\-2\])/int(\1)/g" subtensor.py
sed -i -re '2s/^/from builtins import bytes\n/g' dnn.py
sed -i -re "s/(b'30')/int(bytes(\1))/g" dnn.py
sed -i -re "s/(ctx.bin_id\[\-2:\])/int(\1)/g" dnn.py
popd
'';
dontStrip = true;
propagatedBuildInputs = [
numpy.blas
numpy
six
scipy
nose
nose-parameterized
pydot_ng
sphinx
pygments
pycuda
cudatoolkit
libgpuarray
cudnn
] ++ (stdenv.lib.optional (pythonOlder "3.0") future);
passthru.cudaSupport = true;
}

View File

@ -1,44 +0,0 @@
{ stdenv
, fetchurl
, buildPythonPackage
, isPyPy
, pythonOlder
, isPy3k
, nose
, numpy
, pydot_ng
, scipy
, six
}:
buildPythonPackage rec {
name = "Theano-0.9.0";
disabled = isPyPy || pythonOlder "2.6" || (isPy3k && pythonOlder "3.3");
src = fetchurl {
url = "mirror://pypi/T/Theano/${name}.tar.gz";
sha256 = "05xwg00da8smkvkh6ywbywqzj8dw7x840jr74wqhdy9icmqncpbl";
};
#preCheck = ''
# mkdir -p check-phase
# export HOME=$(pwd)/check-phase
#'';
doCheck = false;
# takes far too long, also throws "TypeError: sort() missing 1 required positional argument: 'a'"
# when run from the installer, and testing with Python 3.5 hits github.com/Theano/Theano/issues/4276,
# the fix for which hasn't been merged yet.
# keep Nose around since running the tests by hand is possible from Python or bash
propagatedBuildInputs = [ nose numpy numpy.blas pydot_ng scipy six ];
meta = {
homepage = http://deeplearning.net/software/theano/;
description = "A Python library for large-scale array computation";
license = stdenv.lib.licenses.bsd3;
maintainers = [ stdenv.lib.maintainers.bcdarwin ];
};
passthru.cudaSupport = false;
}

View File

@ -1,129 +0,0 @@
{ stdenv
, buildPythonPackage
, fetchFromGitHub
, cmake
, cython
, numpy
, Mako
, six
, nose
, beaker
, memcached
, pkgconfig
, glibc
, clblas
, Babel
, pygments
, scipy
, python
, cudatoolkit
, nvidia_x11
}:
buildPythonPackage rec {
name = "libgpuarray-cuda-${version}";
version = "-9998.0";
src = fetchFromGitHub {
owner = "Theano";
repo = "libgpuarray";
rev = "fc36a40526c0a8303ace6c574ffdefba7feafe17";
sha256 = "1kb0k42addqjxiahlcbv6v6271yhsmz71j12186fpy60870i7zm7";
};
doCheck = true;
configurePhase = ''
mkdir -p Build/Install
pushd Build
cmake .. -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=./Install \
-DCLBLAS_ROOT_DIR=${clblas}
popd
'';
preBuild = ''
pushd Build
make
make install
function fixRunPath {
p=$(patchelf --print-rpath $1)
patchelf --set-rpath "$p:${stdenv.lib.makeLibraryPath [ cudatoolkit clblas nvidia_x11 ]}" $1
}
fixRunPath Install/lib/libgpuarray.so
popd
'';
setupPyBuildFlags = [ "-L $(pwd)/Build/Install/lib" "-I $(pwd)/Build/Install/include" ];
preInstall = ''
cp -r Build/Install $out
'';
postInstall = ''
pushd $out/${python.sitePackages}/pygpu
for f in $(find $out/pygpu -name "*.h"); do
ln -s $f $(basename $f)
done
popd
'';
checkPhase = ''
mkdir -p my_bin
pushd my_bin
cat > libgpuarray_run_tests << EOF
#!/bin/sh
if [ \$# -eq 0 ]; then
echo "No argument provided."
echo "Available tests:"
ls $out/${python.sitePackages}/pygpu/tests | grep "test_"
exit 1
else
nosetests -v "$out/${python.sitePackages}/pygpu/tests/\$@"
fi
EOF
chmod +x libgpuarray_run_tests
popd
cp -r my_bin $out/bin
'';
dontStrip = true;
propagatedBuildInputs = [
numpy
scipy
nose
six
Mako
];
buildInputs = [
cmake
cython
beaker
memcached
pkgconfig
glibc
Babel
pygments
numpy.blas
cudatoolkit
nvidia_x11
clblas
];
meta = with stdenv.lib; {
homepage = https://github.com/Theano/libgpuarray;
description = "Library to manipulate tensors on GPU.";
license = licenses.free;
maintainers = with maintainers; [ artuuge ];
platforms = platforms.linux;
};
}

View File

@ -0,0 +1,81 @@
{ stdenv
, lib
, buildPythonPackage
, fetchFromGitHub
, cmake
, cython
, numpy
, six
, nose
, Mako
, python
, cudaSupport ? false, cudatoolkit
, openclSupport ? true, ocl-icd, clblas
}:
buildPythonPackage rec {
name = "libgpuarray-${version}";
version = "0.6.9";
src = fetchFromGitHub {
owner = "Theano";
repo = "libgpuarray";
rev = "v${version}";
sha256 = "06z47ls42a37gbv0x7f3l1qvils7q0hvy02s95l530klgibp19s0";
};
# requires a GPU
doCheck = false;
configurePhase = "cmakeConfigurePhase";
libraryPath = lib.makeLibraryPath (
[]
++ lib.optionals cudaSupport [ cudatoolkit.lib cudatoolkit.out ]
++ lib.optionals openclSupport [ ocl-icd clblas ]
);
preBuild = ''
make -j$NIX_BUILD_CORES
make install
ls $out/lib
export NIX_CFLAGS_COMPILE="-L $out/lib -I $out/include $NIX_CFLAGS_COMPILE"
cd ..
'';
postFixup = ''
rm $out/lib/libgpuarray-static.a
function fixRunPath {
p=$(patchelf --print-rpath $1)
patchelf --set-rpath "$p:$libraryPath" $1
}
fixRunPath $out/lib/libgpuarray.so
'';
propagatedBuildInputs = [
numpy
six
Mako
];
enableParallelBuilding = true;
buildInputs = [
cmake
cython
nose
];
meta = with stdenv.lib; {
homepage = "https://github.com/Theano/libgpuarray";
description = "Library to manipulate tensors on GPU.";
license = licenses.free;
maintainers = with maintainers; [ artuuge ];
platforms = platforms.linux;
};
}

View File

@ -1577,27 +1577,19 @@ with pkgs;
cudatoolkit65
cudatoolkit7
cudatoolkit75
cudatoolkit8;
cudatoolkit8
cudatoolkit9;
cudatoolkit = cudatoolkit8;
cudatoolkit = cudatoolkit9;
cudnn = callPackage ../development/libraries/science/math/cudnn/default.nix {};
inherit (callPackages ../development/libraries/science/math/cudnn { })
cudnn_cudatoolkit7
cudnn_cudatoolkit75
cudnn6_cudatoolkit8
cudnn_cudatoolkit8
cudnn_cudatoolkit9;
cudnn5_cudatoolkit75 = callPackage ../development/libraries/science/math/cudnn/7.5-5.0 {
cudatoolkit = cudatoolkit75;
};
cudnn5_cudatoolkit80 = callPackage ../development/libraries/science/math/cudnn/8.0-5.0 {
cudatoolkit = cudatoolkit8;
};
cudnn51_cudatoolkit80 = callPackage ../development/libraries/science/math/cudnn/8.0-5.1 {
cudatoolkit = cudatoolkit8;
};
cudnn60_cudatoolkit80 = callPackage ../development/libraries/science/math/cudnn/8.0-6.0 {
cudatoolkit = cudatoolkit8;
};
cudnn = cudnn_cudatoolkit9;
curlFull = curl.override {
idnSupport = true;
@ -10033,6 +10025,8 @@ with pkgs;
};
opencv3 = callPackage ../development/libraries/opencv/3.x.nix {
enableCuda = config.cudaSupport or false;
cudatoolkit = cudatoolkit8;
inherit (darwin.apple_sdk.frameworks) AVFoundation Cocoa QTKit;
};
@ -10095,7 +10089,7 @@ with pkgs;
};
opensubdiv = callPackage ../development/libraries/opensubdiv {
stdenv_gcc5 = overrideCC stdenv gcc5;
cudaSupport = config.cudaSupport or false;
cmake = cmake_2_8;
};
@ -13832,7 +13826,7 @@ with pkgs;
bleachbit = callPackage ../applications/misc/bleachbit { };
blender = callPackage ../applications/misc/blender {
stdenv_gcc5 = overrideCC stdenv gcc5;
cudaSupport = config.cudaSupport or false;
python = python35;
};
@ -18365,10 +18359,7 @@ with pkgs;
blas = callPackage ../development/libraries/science/math/blas { };
clblas-cuda = callPackage ../development/libraries/science/math/clblas/cuda {
cudatoolkit = pkgs.cudatoolkit75;
inherit (linuxPackages) nvidia_x11;
};
clblas = callPackage ../development/libraries/science/math/clblas { };
jags = callPackage ../applications/science/math/jags { };
@ -18750,8 +18741,7 @@ with pkgs;
caffe = callPackage ../applications/science/math/caffe rec {
cudaSupport = config.caffe.cudaSupport or config.cudaSupport or false;
# CUDA 8 doesn't support GCC 6.
stdenv = if cudaSupport then overrideCC pkgs.stdenv gcc5 else pkgs.stdenv;
cudnnSupport = cudaSupport;
};
ecm = callPackage ../applications/science/math/ecm { };
@ -18775,7 +18765,9 @@ with pkgs;
sbcl = null;
};
mxnet = callPackage ../applications/science/math/mxnet {
mxnet = callPackage ../applications/science/math/mxnet rec {
cudaSupport = config.cudaSupport or false;
cudnnSupport = cudaSupport;
inherit (linuxPackages) nvidia_x11;
};

View File

@ -11129,14 +11129,12 @@ in {
};
});
libgpuarray-cuda = callPackage ../development/python-modules/libgpuarray/cuda/default.nix rec {
inherit (self) numpy scipy;
inherit (pkgs.linuxPackages) nvidia_x11;
cudatoolkit = pkgs.cudatoolkit75;
clblas = pkgs.clblas-cuda;
libgpuarray = callPackage ../development/python-modules/libgpuarray {
clblas = pkgs.clblas.override { boost = self.boost; };
cudaSupport = pkgs.config.cudaSupport or false;
};
libnacl = callPackage ../development/python-modules/libnacl/default.nix {
libnacl = callPackage ../development/python-modules/libnacl {
inherit (pkgs) libsodium;
};
@ -19835,24 +19833,20 @@ in {
stevedore = callPackage ../development/python-modules/stevedore {};
Theano = self.TheanoWithoutCuda;
Theano = callPackage ../development/python-modules/Theano rec {
cudaSupport = pkgs.config.cudaSupport or false;
cudnnSupport = cudaSupport;
};
TheanoWithoutCuda = callPackage ../development/python-modules/Theano/theano-without-cuda { };
TheanoWithoutCuda = self.Theano.override {
cudaSupport = true;
cudnnSupport = true;
};
TheanoWithCuda = callPackage ../development/python-modules/Theano/theano-with-cuda (
let
boost = pkgs.boost159.override {
inherit (self) python numpy scipy;
};
in rec {
cudatoolkit = pkgs.cudatoolkit75;
cudnn = pkgs.cudnn5_cudatoolkit75;
inherit (self) numpy scipy;
pycuda = self.pycuda.override { inherit boost; };
libgpuarray = self.libgpuarray-cuda.override {
clblas = pkgs.clblas-cuda.override { inherit boost; };
};
});
TheanoWithCuda = self.Theano.override {
cudaSupport = false;
cudnnSupport = false;
};
tidylib = buildPythonPackage rec {
version = "0.2.4";
@ -25980,14 +25974,16 @@ EOF
tensorflow-tensorboard = callPackage ../development/python-modules/tensorflow-tensorboard { };
tensorflow = self.tensorflowWithoutCuda;
tensorflow = callPackage ../development/python-modules/tensorflow {
cudaSupport = pkgs.config.cudaSupport or false;
};
tensorflowWithoutCuda = callPackage ../development/python-modules/tensorflow { };
tensorflowWithoutCuda = self.tensorflow.override {
cudaSupport = false;
};
tensorflowWithCuda = callPackage ../development/python-modules/tensorflow {
tensorflowWithCuda = self.tensorflow.override {
cudaSupport = true;
cudatoolkit = pkgs.cudatoolkit8;
cudnn = pkgs.cudnn60_cudatoolkit80;
};
tflearn = buildPythonPackage rec {