Merge pull request #275947 from SomeoneSerge/fix/cuda-cc-wrapper

cudaPackages.backendStdenv.cc: gccForLibs
This commit is contained in:
Someone 2024-01-13 09:36:02 +00:00 committed by GitHub
commit bc5ed95a04
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 91 additions and 107 deletions

View File

@ -110,6 +110,9 @@ let
gccForLibs_solib = getLib gccForLibs
+ optionalString (targetPlatform != hostPlatform) "/${targetPlatform.config}";
# Analogously to cc_solib and gccForLibs_solib
libcxx_solib = "${lib.getLib libcxx}/lib";
# The following two functions, `isGccArchSupported` and
# `isGccTuneSupported`, only handle those situations where a flag
# (`-march` or `-mtune`) is accepted by one compiler but rejected
@ -261,6 +264,25 @@ stdenv.mkDerivation {
inherit bintools;
inherit cc libc libcxx nativeTools nativeLibc nativePrefix isGNU isClang;
# Expose the C++ standard library we're using. See the comments on "General
# libc++ support". This is also relevant when using older gcc than the
# stdenv's, as may be required e.g. by CUDAToolkit's nvcc.
cxxStdlib =
let
givenLibcxx = libcxx.isLLVM or false;
givenGccForLibs = useGccForLibs && gccForLibs.langCC or false;
in
if (!givenLibcxx) && givenGccForLibs then
{ kind = "libstdc++"; package = gccForLibs; solib = gccForLibs_solib; }
else if givenLibcxx then
{ kind = "libc++"; package = libcxx; solib = libcxx_solib;}
else
# We're probably using the `libstdc++` that came with our `gcc`.
# TODO: this is maybe not always correct?
# TODO: what happens when `nativeTools = true`?
{ kind = "libstdc++"; package = cc; solib = cc_solib; }
;
emacsBufferSetup = pkgs: ''
; We should handle propagation here too
(mapc
@ -440,6 +462,13 @@ stdenv.mkDerivation {
echo "-L${gccForLibs}/lib/gcc/${targetPlatform.config}/${gccForLibs.version}" >> $out/nix-support/cc-ldflags
echo "-L${gccForLibs_solib}/lib" >> $out/nix-support/cc-ldflags
''
# The above "fix" may be incorrect; gcc.cc.lib doesn't contain a
# `target-triple` dir but the correct fix may be to just remove the above?
#
# For clang it's not necessary (see `--gcc-toolchain` below) and for other
# situations adding in the above will bring in lots of other gcc libraries
# (i.e. sanitizer libraries, `libatomic`, `libquadmath`) besides just
# `libstdc++`; this may actually break clang.
# TODO We would like to connect this to `useGccForLibs`, but we cannot yet
# because `libcxxStdenv` on linux still needs this. Maybe someday we'll
@ -564,7 +593,7 @@ stdenv.mkDerivation {
echo "$ccLDFlags" >> $out/nix-support/cc-ldflags
echo "$ccCFlags" >> $out/nix-support/cc-cflags
'' + optionalString (targetPlatform.isDarwin && (libcxx != null) && (cc.isClang or false)) ''
echo " -L${lib.getLib libcxx}/lib" >> $out/nix-support/cc-ldflags
echo " -L${libcxx_solib}" >> $out/nix-support/cc-ldflags
''
##

View File

@ -1,79 +0,0 @@
final: prev: let
### Cuda Toolkit
# Function to build the class cudatoolkit package
buildCudaToolkitPackage = final.callPackage ./common.nix;
# Version info for the classic cudatoolkit packages that contain everything that is in redist.
cudatoolkitVersions = final.lib.importTOML ./versions.toml;
finalVersion = cudatoolkitVersions.${final.cudaVersion};
# Exposed as cudaPackages.backendStdenv.
# This is what nvcc uses as a backend,
# and it has to be an officially supported one (e.g. gcc11 for cuda11).
#
# It, however, propagates current stdenv's libstdc++ to avoid "GLIBCXX_* not found errors"
# when linked with other C++ libraries.
# E.g. for cudaPackages_11_8 we use gcc11 with gcc12's libstdc++
# Cf. https://github.com/NixOS/nixpkgs/pull/218265 for context
backendStdenv = final.callPackage ./stdenv.nix {
# We use buildPackages (= pkgsBuildHost) because we look for a gcc that
# runs on our build platform, and that produces executables for the host
# platform (= platform on which we deploy and run the downstream packages).
# The target platform of buildPackages.gcc is our host platform, so its
# .lib output should be the libstdc++ we want to be writing in the runpaths
# Cf. https://github.com/NixOS/nixpkgs/pull/225661#discussion_r1164564576
nixpkgsCompatibleLibstdcxx = final.pkgs.buildPackages.gcc.cc.lib;
nvccCompatibleCC = final.pkgs.buildPackages."${finalVersion.gcc}".cc;
};
### Add classic cudatoolkit package
cudatoolkit =
let
attrs = builtins.removeAttrs finalVersion [ "gcc" ];
attrs' = attrs // { inherit backendStdenv; };
in
buildCudaToolkitPackage attrs';
cudaFlags = final.callPackage ./flags.nix {};
# Internal hook, used by cudatoolkit and cuda redist packages
# to accommodate automatic CUDAToolkit_ROOT construction
markForCudatoolkitRootHook = (final.callPackage
({ makeSetupHook }:
makeSetupHook
{ name = "mark-for-cudatoolkit-root-hook"; }
./hooks/mark-for-cudatoolkit-root-hook.sh)
{ });
# Currently propagated by cuda_nvcc or cudatoolkit, rather than used directly
setupCudaHook = (final.callPackage
({ makeSetupHook, backendStdenv }:
makeSetupHook
{
name = "setup-cuda-hook";
substitutions.setupCudaHook = placeholder "out";
# Point NVCC at a compatible compiler
substitutions.ccRoot = "${backendStdenv.cc}";
# Required in addition to ccRoot as otherwise bin/gcc is looked up
# when building CMakeCUDACompilerId.cu
substitutions.ccFullPath = "${backendStdenv.cc}/bin/${backendStdenv.cc.targetPrefix}c++";
}
./hooks/setup-cuda-hook.sh)
{ });
in
{
inherit
backendStdenv
cudatoolkit
cudaFlags
markForCudatoolkitRootHook
setupCudaHook;
saxpy = final.callPackage ./saxpy { };
}

View File

@ -2,38 +2,29 @@
lib,
nvccCompatibilities,
cudaVersion,
buildPackages,
pkgs,
overrideCC,
stdenv,
wrapCCWith,
stdenvAdapters,
}:
let
gccMajorVersion = nvccCompatibilities.${cudaVersion}.gccMaxMajorVersion;
# We use buildPackages (= pkgsBuildHost) because we look for a gcc that
# runs on our build platform, and that produces executables for the host
# platform (= platform on which we deploy and run the downstream packages).
# The target platform of buildPackages.gcc is our host platform, so its
# .lib output should be the libstdc++ we want to be writing in the runpaths
# Cf. https://github.com/NixOS/nixpkgs/pull/225661#discussion_r1164564576
nixpkgsCompatibleLibstdcxx = buildPackages.gcc.cc.lib;
nvccCompatibleCC = buildPackages."gcc${gccMajorVersion}".cc;
cc = wrapCCWith {
cc = nvccCompatibleCC;
# This option is for clang's libcxx, but we (ab)use it for gcc's libstdc++.
# Note that libstdc++ maintains forward-compatibility: if we load a newer
# libstdc++ into the process, we can still use libraries built against an
# older libstdc++. This, in practice, means that we should use libstdc++ from
# the same stdenv that the rest of nixpkgs uses.
# We currently do not try to support anything other than gcc and linux.
libcxx = nixpkgsCompatibleLibstdcxx;
};
cudaStdenv = overrideCC stdenv cc;
cudaStdenv = stdenvAdapters.useLibsFrom stdenv pkgs."gcc${gccMajorVersion}Stdenv";
passthruExtra = {
inherit nixpkgsCompatibleLibstdcxx;
nixpkgsCompatibleLibstdcxx = lib.warn "cudaPackages.backendStdenv.nixpkgsCompatibleLibstdcxx is misnamed, deprecated, and will be removed after 24.05" cudaStdenv.cc.cxxStdlib.package;
# cc already exposed
};
assertCondition = true;
in
# We should use libstdc++ at least as new as nixpkgs' stdenv's one.
assert let
cxxStdlibCuda = cudaStdenv.cc.cxxStdlib.package;
cxxStdlibNixpkgs = stdenv.cc.cxxStdlib.package;
in
((stdenv.cc.cxxStdlib.kind or null) == "libstdc++")
-> lib.versionAtLeast cxxStdlibCuda.version cxxStdlibNixpkgs.version;
lib.extendDerivation assertCondition passthruExtra cudaStdenv

View File

@ -85,7 +85,20 @@ attrsets.filterAttrs (attr: _: (builtins.hasAttr attr prev)) {
);
cuda_nvcc = prev.cuda_nvcc.overrideAttrs (
oldAttrs: {
oldAttrs:
let
# This replicates the logic in stdenvAdapters.useLibsFrom, except we use
# gcc from pkgsHostTarget and not from buildPackages.
ccForLibs-wrapper = final.pkgs.stdenv.cc;
gccMajorVersion = final.nvccCompatibilities.${cudaVersion}.gccMaxMajorVersion;
cc = final.pkgs.wrapCCWith {
cc = final.pkgs."gcc${gccMajorVersion}".cc;
useCcForLibs = true;
gccForLibs = ccForLibs-wrapper.cc;
};
cxxStdlibDir = ccForLibs-wrapper.cxxStdlib.solib;
in
{
outputs = oldAttrs.outputs ++ lists.optionals (!(builtins.elem "lib" oldAttrs.outputs)) [ "lib" ];
@ -119,8 +132,8 @@ attrsets.filterAttrs (attr: _: (builtins.hasAttr attr prev)) {
cat << EOF >> bin/nvcc.profile
# Fix a compatible backend compiler
PATH += ${lib.getBin final.backendStdenv.cc}/bin:
LIBRARIES += "-L${lib.getLib final.backendStdenv.nixpkgsCompatibleLibstdcxx}/lib"
PATH += ${lib.getBin cc}/bin:
LIBRARIES += "-L${cxxStdlibDir}/lib"
# Expose the split-out nvvm
LIBRARIES =+ -L''${!outputBin}/nvvm/lib

View File

@ -282,7 +282,6 @@ let
# loading multiple extensions in the same python program due to duplicate protobuf DBs.
# 2) Patch python path in the compiler driver.
preBuild = lib.optionalString cudaSupport ''
export NIX_LDFLAGS+=" -L${backendStdenv.nixpkgsCompatibleLibstdcxx}/lib"
patchShebangs ../output/external/xla/third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc.tpl
'' + lib.optionalString stdenv.isDarwin ''
# Framework search paths aren't added by bintools hook

View File

@ -237,6 +237,30 @@ rec {
});
});
useLibsFrom = modelStdenv: targetStdenv:
let
ccForLibs = modelStdenv.cc.cc;
cc = pkgs.wrapCCWith {
/* NOTE: cc.cc is the unwrapped compiler. Should we respect the old
* wrapper instead? */
cc = targetStdenv.cc.cc;
/* NOTE(originally by rrbutani):
* Normally the `useCcForLibs`/`gccForLibs` mechanism is used to get a
* clang based `cc` to use `libstdc++` (from gcc).
*
* Here we (ab)use it to use a `libstdc++` from a different `gcc` than our
* `cc`.
*
* Note that this does not inhibit our `cc`'s lib dir from being added to
* cflags/ldflags (see `cc_solib` in `cc-wrapper`) but this is okay: our
* `gccForLibs`'s paths should take precedence. */
useCcForLibs = true;
gccForLibs = ccForLibs;
};
in
overrideCC targetStdenv cc;
useMoldLinker = stdenv: let
bintools = stdenv.cc.bintools.override {
extraBuildCommands = ''

View File

@ -14145,6 +14145,13 @@ self: super: with self; {
grpc = compat.grpcTF;
grpcio = compat.grpcioTF;
tensorboard = compat.tensorboardTF;
# Tensorflow 2.13 doesn't support gcc13:
# https://github.com/tensorflow/tensorflow/issues/61289
#
# We use the nixpkgs' default libstdc++ to stay compatible with other
# python modules
stdenv = pkgs.stdenvAdapters.useLibsFrom stdenv pkgs.gcc12Stdenv;
};
tensorflow-datasets = callPackage ../development/python-modules/tensorflow-datasets { };