Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

llama : use _impl suffix instead of _internal #11060

Merged
merged 1 commit into from
Jan 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions src/llama-quant.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ static void zeros(std::ofstream & file, size_t n) {
}
}

struct quantize_state_internal {
struct quantize_state_impl {
const llama_model & model;
const llama_model_quantize_params * params;

Expand All @@ -43,13 +43,13 @@ struct quantize_state_internal {
// used to figure out if a model shares tok_embd with the output weight
bool has_output = false;

quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
quantize_state_impl(const llama_model & model, const llama_model_quantize_params * params)
: model(model)
, params(params)
{}
};

static void llama_tensor_dequantize_internal(
static void llama_tensor_dequantize_impl(
struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
const size_t nelements, const int nthread
) {
Expand Down Expand Up @@ -121,7 +121,7 @@ static void llama_tensor_dequantize_internal(
workers.clear();
}

static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
const std::string name = ggml_get_name(tensor);

// TODO: avoid hardcoded tensor names - use the TN_* constants
Expand Down Expand Up @@ -410,7 +410,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
return new_type;
}

static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
if (nthread < 2) {
// single-thread
size_t new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
Expand Down Expand Up @@ -464,7 +464,7 @@ static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const floa
return new_size;
}

static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
static void llama_model_quantize_impl(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
ggml_type default_type;
llama_ftype ftype = params->ftype;

Expand Down Expand Up @@ -534,7 +534,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
llm_load_hparams(ml, model);
llm_load_stats (ml, model);

struct quantize_state_internal qs(model, params);
struct quantize_state_impl qs(model, params);

if (params->only_copy) {
ftype = model.ftype;
Expand Down Expand Up @@ -837,7 +837,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
} else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
} else {
llama_tensor_dequantize_internal(tensor, f32_conv_buf, workers, nelements, nthread);
llama_tensor_dequantize_impl(tensor, f32_conv_buf, workers, nelements, nthread);
f32_data = (float *) f32_conv_buf.data();
}

Expand Down Expand Up @@ -866,7 +866,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
void * new_data_03 = (char *)new_data + ggml_row_size(new_type, n_per_row) * i03 * nrows;
const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr;

new_size += llama_tensor_quantize_internal(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
new_size += llama_tensor_quantize_impl(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
}
LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
}
Expand Down Expand Up @@ -919,7 +919,7 @@ uint32_t llama_model_quantize(
const char * fname_out,
const llama_model_quantize_params * params) {
try {
llama_model_quantize_internal(fname_inp, fname_out, params);
llama_model_quantize_impl(fname_inp, fname_out, params);
} catch (const std::exception & err) {
LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
return 1;
Expand Down
16 changes: 8 additions & 8 deletions src/llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10517,7 +10517,7 @@ static enum ggml_status llama_graph_compute(
// return positive int on warning
// return negative int on error
//
static int llama_decode_internal(
static int llama_decode_impl(
llama_context & lctx,
llama_batch inp_batch) {

Expand Down Expand Up @@ -10852,7 +10852,7 @@ static int llama_decode_internal(
// return positive int on warning
// return negative int on error
//
static int llama_encode_internal(
static int llama_encode_impl(
llama_context & lctx,
llama_batch inp_batch) {

Expand Down Expand Up @@ -11034,7 +11034,7 @@ static int llama_encode_internal(
}

// find holes from the beginning of the KV cache and fill them by moving data from the end of the cache
static void llama_kv_cache_defrag_internal(struct llama_context & lctx) {
static void llama_kv_cache_defrag_impl(struct llama_context & lctx) {
auto & kv_self = lctx.kv_self;

const auto & hparams = lctx.model.hparams;
Expand Down Expand Up @@ -11254,7 +11254,7 @@ static void llama_kv_cache_defrag_internal(struct llama_context & lctx) {
//LLAMA_LOG_INFO("(tmp log) KV defrag time: %.3f ms\n", (t_end - t_start)/1000.0);
}

static void llama_kv_cache_update_internal(struct llama_context & lctx) {
static void llama_kv_cache_update_impl(struct llama_context & lctx) {
bool need_reserve = false;

if (lctx.kv_self.has_shift) {
Expand Down Expand Up @@ -11290,7 +11290,7 @@ static void llama_kv_cache_update_internal(struct llama_context & lctx) {

// defragment the KV cache if needed
if (lctx.kv_self.do_defrag) {
llama_kv_cache_defrag_internal(lctx);
llama_kv_cache_defrag_impl(lctx);

need_reserve = true;

Expand Down Expand Up @@ -11991,7 +11991,7 @@ void llama_kv_cache_defrag(struct llama_context * ctx) {
}

void llama_kv_cache_update(struct llama_context * ctx) {
llama_kv_cache_update_internal(*ctx);
llama_kv_cache_update_impl(*ctx);
}

bool llama_kv_cache_can_shift(struct llama_context * ctx) {
Expand All @@ -12003,7 +12003,7 @@ bool llama_kv_cache_can_shift(struct llama_context * ctx) {
int32_t llama_encode(
struct llama_context * ctx,
struct llama_batch batch) {
const int ret = llama_encode_internal(*ctx, batch);
const int ret = llama_encode_impl(*ctx, batch);
if (ret != 0) {
LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret);
}
Expand All @@ -12014,7 +12014,7 @@ int32_t llama_encode(
int32_t llama_decode(
struct llama_context * ctx,
struct llama_batch batch) {
const int ret = llama_decode_internal(*ctx, batch);
const int ret = llama_decode_impl(*ctx, batch);
if (ret != 0) {
LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
}
Expand Down
Loading