From 3989b29a9b0a2c0de92e848957d9a47635957f84 Mon Sep 17 00:00:00 2001 From: Jhen-Jie Hong Date: Tue, 7 Nov 2023 09:36:23 +0800 Subject: [PATCH] examples : fix n_gpu_layers usage in talk-llama (#1441) --- examples/talk-llama/talk-llama.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/talk-llama/talk-llama.cpp b/examples/talk-llama/talk-llama.cpp index 6cc30c1..bc0119a 100644 --- a/examples/talk-llama/talk-llama.cpp +++ b/examples/talk-llama/talk-llama.cpp @@ -266,6 +266,9 @@ int main(int argc, char ** argv) { llama_backend_init(true); auto lmparams = llama_model_default_params(); + if (!params.use_gpu) { + lcparams.lmparams = 0; + } struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams); @@ -276,9 +279,6 @@ int main(int argc, char ** argv) { lcparams.seed = 1; lcparams.f16_kv = true; lcparams.n_threads = params.n_threads; - if (!params.use_gpu) { - lcparams.n_gpu_layers = 0; - } struct llama_context * ctx_llama = llama_new_context_with_model(model_llama, lcparams);