2021-01-09 14:39:16 +00:00
|
|
|
{ lib
|
|
|
|
, glibc
|
|
|
|
, fetchFromGitHub
|
|
|
|
, makeWrapper
|
|
|
|
, buildGoPackage
|
|
|
|
, linkFarm
|
|
|
|
, writeShellScript
|
|
|
|
, containerRuntimePath
|
|
|
|
, configTemplate
|
|
|
|
}:
|
|
|
|
let
|
|
|
|
isolatedContainerRuntimePath = linkFarm "isolated_container_runtime_path" [
|
|
|
|
{
|
|
|
|
name = "runc";
|
|
|
|
path = containerRuntimePath;
|
|
|
|
}
|
|
|
|
];
|
|
|
|
warnIfXdgConfigHomeIsSet = writeShellScript "warn_if_xdg_config_home_is_set" ''
|
|
|
|
set -eo pipefail
|
|
|
|
|
|
|
|
if [ -n "$XDG_CONFIG_HOME" ]; then
|
|
|
|
echo >&2 "$(tput setaf 3)warning: \$XDG_CONFIG_HOME=$XDG_CONFIG_HOME$(tput sgr 0)"
|
|
|
|
fi
|
|
|
|
'';
|
|
|
|
in
|
|
|
|
buildGoPackage rec {
|
|
|
|
pname = "nvidia-container-runtime";
|
2021-09-07 05:27:18 +01:00
|
|
|
version = "3.5.0";
|
2021-01-10 12:58:25 +00:00
|
|
|
|
2021-01-09 14:39:16 +00:00
|
|
|
src = fetchFromGitHub {
|
|
|
|
owner = "NVIDIA";
|
|
|
|
repo = pname;
|
|
|
|
rev = "v${version}";
|
2021-09-07 05:27:18 +01:00
|
|
|
sha256 = "sha256-+LZjsN/tKqsPJamoI8xo9LFv14c3e9vVlSP4NJhElcs=";
|
2021-01-09 14:39:16 +00:00
|
|
|
};
|
2021-01-10 12:58:25 +00:00
|
|
|
|
2021-09-07 05:27:18 +01:00
|
|
|
goPackagePath = "github.com/nvidia/nvidia-container-runtime";
|
2021-08-26 04:31:57 +01:00
|
|
|
ldflags = [ "-s" "-w" ];
|
2021-01-09 14:39:16 +00:00
|
|
|
nativeBuildInputs = [ makeWrapper ];
|
2021-01-10 12:58:25 +00:00
|
|
|
|
2021-01-09 14:39:16 +00:00
|
|
|
postInstall = ''
|
|
|
|
mkdir -p $out/etc/nvidia-container-runtime
|
|
|
|
|
|
|
|
# nvidia-container-runtime invokes docker-runc or runc if that isn't
|
|
|
|
# available on PATH.
|
|
|
|
#
|
|
|
|
# Also set XDG_CONFIG_HOME if it isn't already to allow overriding
|
|
|
|
# configuration. This in turn allows users to have the nvidia container
|
|
|
|
# runtime enabled for any number of higher level runtimes like docker and
|
|
|
|
# podman, i.e., there's no need to have mutually exclusivity on what high
|
|
|
|
# level runtime can enable the nvidia runtime because each high level
|
|
|
|
# runtime has its own config.toml file.
|
|
|
|
wrapProgram $out/bin/nvidia-container-runtime \
|
|
|
|
--run "${warnIfXdgConfigHomeIsSet}" \
|
|
|
|
--prefix PATH : ${isolatedContainerRuntimePath} \
|
|
|
|
--set-default XDG_CONFIG_HOME $out/etc
|
|
|
|
|
|
|
|
cp ${configTemplate} $out/etc/nvidia-container-runtime/config.toml
|
|
|
|
|
|
|
|
substituteInPlace $out/etc/nvidia-container-runtime/config.toml \
|
|
|
|
--subst-var-by glibcbin ${lib.getBin glibc}
|
|
|
|
'';
|
2021-01-10 12:58:25 +00:00
|
|
|
|
2021-01-09 14:39:16 +00:00
|
|
|
meta = with lib; {
|
|
|
|
homepage = "https://github.com/NVIDIA/nvidia-container-runtime";
|
|
|
|
description = "NVIDIA container runtime";
|
2021-01-10 13:52:50 +00:00
|
|
|
license = licenses.asl20;
|
2021-01-09 14:39:16 +00:00
|
|
|
platforms = platforms.linux;
|
|
|
|
maintainers = with maintainers; [ cpcloud ];
|
|
|
|
};
|
|
|
|
}
|