Skip to content

Commit

Permalink
examples : fix n_gpu_layers usage in talk-llama (#1441)
Browse files Browse the repository at this point in the history
  • Loading branch information
jhen0409 authored Nov 7, 2023
1 parent 0463028 commit 3989b29
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions examples/talk-llama/talk-llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,9 @@ int main(int argc, char ** argv) {
llama_backend_init(true);

auto lmparams = llama_model_default_params();
if (!params.use_gpu) {
lcparams.lmparams = 0;
}

struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);

Expand All @@ -276,9 +279,6 @@ int main(int argc, char ** argv) {
lcparams.seed = 1;
lcparams.f16_kv = true;
lcparams.n_threads = params.n_threads;
if (!params.use_gpu) {
lcparams.n_gpu_layers = 0;
}

struct llama_context * ctx_llama = llama_new_context_with_model(model_llama, lcparams);

Expand Down

0 comments on commit 3989b29

Please sign in to comment.