Skip to content

Commit

Permalink
examples : dedup simple
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed Aug 16, 2023
1 parent c290f3e commit 795ec70
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 140 deletions.
5 changes: 1 addition & 4 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Define the default target now so that it is always the first target
BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple server embd-input-test gguf gguf-llama-simple gptneox-main
BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple server embd-input-test gguf gptneox-main

# Binaries only useful for tests
TEST_TARGETS = tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0
Expand Down Expand Up @@ -388,9 +388,6 @@ embd-input-test: $(LIB_PRE)embdinput$(DSO_EXT) examples/embd-input/embd-input-te
gguf: examples/gguf/gguf.cpp build-info.h ggml.o llama.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)

gguf-llama-simple: examples/gguf/gguf-llama-simple.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)

gptneox-main: gptneox-main.cpp ggml.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)

Expand Down
129 changes: 0 additions & 129 deletions examples/gguf/gguf-llama-simple.cpp

This file was deleted.

18 changes: 11 additions & 7 deletions examples/simple/simple.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,17 @@ int main(int argc, char ** argv) {

llama_backend_init(params.numa);

llama_model * model;
llama_context * ctx;
llama_context_params ctx_params = llama_context_default_params();

std::tie(model, ctx) = llama_init_from_gpt_params(params);
llama_model * model = llama_load_model_from_file(params.model.c_str(), ctx_params);

if (model == NULL) {
fprintf(stderr, "%s: error: unable to load model\n", __func__);
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
return 1;
}

llama_context * ctx = llama_new_context_with_model(model, ctx_params);

// tokenize the prompt

std::vector<llama_token> tokens_list;
Expand All @@ -54,7 +55,7 @@ int main(int argc, char ** argv) {
const int max_context_size = llama_n_ctx(ctx);
const int max_tokens_list_size = max_context_size - 4;

if ((int)tokens_list.size() > max_tokens_list_size) {
if ((int) tokens_list.size() > max_tokens_list_size) {
fprintf(stderr, "%s: error: prompt too long (%d tokens, max %d)\n", __func__, (int) tokens_list.size(), max_tokens_list_size);
return 1;
}
Expand All @@ -74,7 +75,9 @@ int main(int argc, char ** argv) {
// tokens (see "infinite text generation via context swapping" in the main example), but in this minimalist
// example, we will just stop the loop once this cache is full or once an end of stream is detected.

while (llama_get_kv_cache_token_count( ctx ) < max_context_size) {
const int n_gen = std::min(32, max_context_size);

while (llama_get_kv_cache_token_count(ctx) < n_gen) {
// evaluate the transformer

if (llama_eval(ctx, tokens_list.data(), int(tokens_list.size()), llama_get_kv_cache_token_count(ctx), params.n_threads)) {
Expand Down Expand Up @@ -114,13 +117,14 @@ int main(int argc, char ** argv) {

// push this new token for next evaluation
tokens_list.push_back(new_token_id);

}

llama_free(ctx);
llama_free_model(model);

llama_backend_free();

fprintf(stderr, "\n\n");

return 0;
}

0 comments on commit 795ec70

Please sign in to comment.