llama-cpp: remove workaround for local-ai (#450840)
This commit is contained in:
@@ -91,19 +91,6 @@ effectiveStdenv.mkDerivation (finalAttrs: {
|
|||||||
|
|
||||||
patches = lib.optionals vulkanSupport [ ./disable_bfloat16.patch ];
|
patches = lib.optionals vulkanSupport [ ./disable_bfloat16.patch ];
|
||||||
|
|
||||||
postPatch = ''
|
|
||||||
# Workaround for local-ai package which overrides this package to an older llama-cpp
|
|
||||||
if [ -f ./ggml/src/ggml-metal.m ]; then
|
|
||||||
substituteInPlace ./ggml/src/ggml-metal.m \
|
|
||||||
--replace-fail '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -f ./ggml/src/ggml-metal/ggml-metal.m ]; then
|
|
||||||
substituteInPlace ./ggml/src/ggml-metal/ggml-metal.m \
|
|
||||||
--replace-fail '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
|
|
||||||
fi
|
|
||||||
'';
|
|
||||||
|
|
||||||
nativeBuildInputs = [
|
nativeBuildInputs = [
|
||||||
cmake
|
cmake
|
||||||
ninja
|
ninja
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ let
|
|||||||
hash = "sha256-b9B5I3EbBFrkWc6RLXMWcCRKayyWjlGuQrogUcrISrc=";
|
hash = "sha256-b9B5I3EbBFrkWc6RLXMWcCRKayyWjlGuQrogUcrISrc=";
|
||||||
fetchSubmodules = true;
|
fetchSubmodules = true;
|
||||||
};
|
};
|
||||||
postPatch = prev.postPatch + ''
|
postPatch = ''
|
||||||
cd examples
|
cd examples
|
||||||
cp -r --no-preserve=mode ${src}/backend/cpp/llama grpc-server
|
cp -r --no-preserve=mode ${src}/backend/cpp/llama grpc-server
|
||||||
cp llava/clip* llava/llava.* grpc-server
|
cp llava/clip* llava/llava.* grpc-server
|
||||||
|
|||||||
Reference in New Issue
Block a user