Merge pull request #323331 from xddxdd/llama-cpp
llama-cpp: 3091 -> 3260
This commit is contained in:
commit
d8724afca4
@ -71,13 +71,13 @@ let
|
||||
in
|
||||
effectiveStdenv.mkDerivation (finalAttrs: {
|
||||
pname = "llama-cpp";
|
||||
version = "3091";
|
||||
version = "3260";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "ggerganov";
|
||||
repo = "llama.cpp";
|
||||
rev = "refs/tags/b${finalAttrs.version}";
|
||||
hash = "sha256-ppujag6Nrk/M9QMQ4mYe2iADsfKzmfKtOP8Ib7GZBmk=";
|
||||
hash = "sha256-0KVwSzxfGinpv5KkDCgF2J+1ijDv87PlDrC+ldscP6s=";
|
||||
leaveDotGit = true;
|
||||
postFetch = ''
|
||||
git -C "$out" rev-parse --short HEAD > $out/COMMIT
|
||||
@ -86,12 +86,12 @@ effectiveStdenv.mkDerivation (finalAttrs: {
|
||||
};
|
||||
|
||||
postPatch = ''
|
||||
substituteInPlace ./ggml-metal.m \
|
||||
substituteInPlace ./ggml/src/ggml-metal.m \
|
||||
--replace-fail '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
|
||||
|
||||
substituteInPlace ./scripts/build-info.cmake \
|
||||
--replace-fail 'set(BUILD_NUMBER 0)' 'set(BUILD_NUMBER ${finalAttrs.version})' \
|
||||
--replace-fail 'set(BUILD_COMMIT "unknown")' "set(BUILD_COMMIT \"$(cat COMMIT)\")"
|
||||
substituteInPlace ./scripts/build-info.sh \
|
||||
--replace-fail 'build_number="0"' 'build_number="${finalAttrs.version}"' \
|
||||
--replace-fail 'build_commit="unknown"' "build_commit=\"$(cat COMMIT)\""
|
||||
'';
|
||||
|
||||
nativeBuildInputs = [ cmake ninja pkg-config git ]
|
||||
@ -109,17 +109,16 @@ effectiveStdenv.mkDerivation (finalAttrs: {
|
||||
|
||||
cmakeFlags = [
|
||||
# -march=native is non-deterministic; override with platform-specific flags if needed
|
||||
(cmakeBool "LLAMA_NATIVE" false)
|
||||
(cmakeBool "BUILD_SHARED_SERVER" true)
|
||||
(cmakeBool "GGML_NATIVE" false)
|
||||
(cmakeBool "LLAMA_BUILD_SERVER" true)
|
||||
(cmakeBool "BUILD_SHARED_LIBS" true)
|
||||
(cmakeBool "BUILD_SHARED_LIBS" true)
|
||||
(cmakeBool "LLAMA_BLAS" blasSupport)
|
||||
(cmakeBool "LLAMA_CLBLAST" openclSupport)
|
||||
(cmakeBool "LLAMA_CUDA" cudaSupport)
|
||||
(cmakeBool "LLAMA_HIPBLAS" rocmSupport)
|
||||
(cmakeBool "LLAMA_METAL" metalSupport)
|
||||
(cmakeBool "LLAMA_RPC" rpcSupport)
|
||||
(cmakeBool "LLAMA_VULKAN" vulkanSupport)
|
||||
(cmakeBool "GGML_BLAS" blasSupport)
|
||||
(cmakeBool "GGML_CLBLAST" openclSupport)
|
||||
(cmakeBool "GGML_CUDA" cudaSupport)
|
||||
(cmakeBool "GGML_HIPBLAS" rocmSupport)
|
||||
(cmakeBool "GGML_METAL" metalSupport)
|
||||
(cmakeBool "GGML_RPC" rpcSupport)
|
||||
(cmakeBool "GGML_VULKAN" vulkanSupport)
|
||||
]
|
||||
++ optionals cudaSupport [
|
||||
(cmakeFeature "CMAKE_CUDA_ARCHITECTURES" cudaPackages.flags.cmakeCudaArchitecturesString)
|
||||
@ -138,7 +137,6 @@ effectiveStdenv.mkDerivation (finalAttrs: {
|
||||
(cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
|
||||
(cmakeBool "LLAMA_METAL_EMBED_LIBRARY" true)
|
||||
] ++ optionals rpcSupport [
|
||||
"-DLLAMA_RPC=ON"
|
||||
# This is done so we can move rpc-server out of bin because llama.cpp doesn't
|
||||
# install rpc-server in their install target.
|
||||
"-DCMAKE_SKIP_BUILD_RPATH=ON"
|
||||
@ -147,10 +145,11 @@ effectiveStdenv.mkDerivation (finalAttrs: {
|
||||
# upstream plans on adding targets at the cmakelevel, remove those
|
||||
# additional steps after that
|
||||
postInstall = ''
|
||||
mv $out/bin/main $out/bin/llama
|
||||
mv $out/bin/server $out/bin/llama-server
|
||||
# Match previous binary name for this package
|
||||
ln -sf $out/bin/llama-cli $out/bin/llama
|
||||
|
||||
mkdir -p $out/include
|
||||
cp $src/llama.h $out/include/
|
||||
cp $src/include/llama.h $out/include/
|
||||
'' + optionalString rpcSupport "cp bin/rpc-server $out/bin/llama-rpc-server";
|
||||
|
||||
passthru.updateScript = nix-update-script {
|
||||
|
Loading…
Reference in New Issue
Block a user