llama-cpp: remove workaround for local-ai (#450840)

This commit is contained in:
Philip Taron
2025-10-10 23:40:04 +00:00
committed by GitHub
2 changed files with 1 additions and 14 deletions

View File

@@ -91,19 +91,6 @@ effectiveStdenv.mkDerivation (finalAttrs: {
patches = lib.optionals vulkanSupport [ ./disable_bfloat16.patch ]; patches = lib.optionals vulkanSupport [ ./disable_bfloat16.patch ];
postPatch = ''
# Workaround for local-ai package which overrides this package to an older llama-cpp
if [ -f ./ggml/src/ggml-metal.m ]; then
substituteInPlace ./ggml/src/ggml-metal.m \
--replace-fail '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
fi
if [ -f ./ggml/src/ggml-metal/ggml-metal.m ]; then
substituteInPlace ./ggml/src/ggml-metal/ggml-metal.m \
--replace-fail '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
fi
'';
nativeBuildInputs = [ nativeBuildInputs = [
cmake cmake
ninja ninja

View File

@@ -112,7 +112,7 @@ let
hash = "sha256-b9B5I3EbBFrkWc6RLXMWcCRKayyWjlGuQrogUcrISrc="; hash = "sha256-b9B5I3EbBFrkWc6RLXMWcCRKayyWjlGuQrogUcrISrc=";
fetchSubmodules = true; fetchSubmodules = true;
}; };
postPatch = prev.postPatch + '' postPatch = ''
cd examples cd examples
cp -r --no-preserve=mode ${src}/backend/cpp/llama grpc-server cp -r --no-preserve=mode ${src}/backend/cpp/llama grpc-server
cp llava/clip* llava/llava.* grpc-server cp llava/clip* llava/llava.* grpc-server