Skip to content

Commit

Permalink
llama : print devices used on model load
Browse files Browse the repository at this point in the history
  • Loading branch information
slaren committed Oct 7, 2024
1 parent 5f4e30d commit 9e04f2c
Showing 1 changed file with 5 additions and 0 deletions.
5 changes: 5 additions & 0 deletions src/llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19100,8 +19100,13 @@ struct llama_model * llama_load_model_from_file(

case GGML_BACKEND_DEVICE_TYPE_GPU:
case GGML_BACKEND_DEVICE_TYPE_GPU_FULL:
{
size_t free, total; // NOLINT
ggml_backend_dev_memory(dev, &free, &total);
LLAMA_LOG_INFO("%s: using device %s (%s) - %zu MiB free\n", __func__, ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), free/1024/1024);
model->devices.push_back(dev);
break;
}
}
}

Expand Down

0 comments on commit 9e04f2c

Please sign in to comment.