From 0209d39526db18501c5141b109b431f42b359b89 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Fri, 13 Oct 2023 10:33:07 +0200 Subject: [PATCH 01/31] wip llava python bindings compatibility --- examples/llava/CMakeLists.txt | 3 +++ examples/llava/clip.cpp | 30 +++++++++++++++------ examples/llava/clip.h | 1 + examples/llava/llava.cpp | 49 ++++++++++++++++++++--------------- examples/llava/test-llava.cpp | 6 +++++ 5 files changed, 60 insertions(+), 29 deletions(-) create mode 100644 examples/llava/test-llava.cpp diff --git a/examples/llava/CMakeLists.txt b/examples/llava/CMakeLists.txt index d02e6ab461ff2..d04dcc5c53a23 100644 --- a/examples/llava/CMakeLists.txt +++ b/examples/llava/CMakeLists.txt @@ -18,3 +18,6 @@ target_compile_features(${TARGET} PRIVATE cxx_std_11) if(TARGET BUILD_INFO) add_dependencies(${TARGET} BUILD_INFO) endif() + +unset(TARGET) +llama_build_and_test_executable(test-llava.cpp) diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index f4258b34d7f16..5bb2e4c3718cb 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -682,25 +682,39 @@ clip_image_u8 * make_clip_image_u8() { return new clip_image_u8(); } clip_image_f32 * make_clip_image_f32() { return new clip_image_f32(); } -bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) { - int nx, ny, nc; - auto data = stbi_load(fname, &nx, &ny, &nc, 3); - if (!data) { - fprintf(stderr, "%s: failed to load '%s'\n", __func__, fname); - return false; - } - +static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_image_u8 * img) { img->nx = nx; img->ny = ny; img->size = nx * ny * 3; img->data = new uint8_t[img->size](); memcpy(img->data, data, img->size); +} +bool clip_image_load_from_bytes(const unsigned char * bytes, int bytes_length, clip_image_u8 * img) { + int nx, ny, nc; + auto data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3); + if (!data) { + fprintf(stderr, "%s: failed to decode image bytes\n", __func__); + return false; + } + build_clip_img_from_data(data, nx, ny, img); stbi_image_free(data); + return true; +} +bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) { + int nx, ny, nc; + auto data = stbi_load(fname, &nx, &ny, &nc, 3); + if (!data) { + fprintf(stderr, "%s: failed to load image '%s'\n", __func__, fname); + return false; + } + build_clip_img_from_data(data, nx, ny, img); + stbi_image_free(data); return true; } + // normalize: x = (x - mean) / std // TODO: implement bicubic interpolation instead of linear. bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32 * res, const bool pad2square) { diff --git a/examples/llava/clip.h b/examples/llava/clip.h index 3d7261e299a35..c0b53d0b8dbdf 100644 --- a/examples/llava/clip.h +++ b/examples/llava/clip.h @@ -58,6 +58,7 @@ struct clip_image_f32_batch { struct clip_image_u8 * make_clip_image_u8(); struct clip_image_f32 * make_clip_image_f32(); bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); +bool clip_image_load_from_bytes(const unsigned char * bytes, int bytes_length, clip_image_u8 * img); bool clip_image_preprocess(const struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32 * res, const bool pad2square); bool clip_image_encode(const struct clip_ctx * ctx, const int n_threads, struct clip_image_f32 * img, float * vec); diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index 14dacc7807e8d..c55d4f165e7db 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -12,6 +12,28 @@ static void show_additional_info(int /*argc*/, char ** argv) { printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n"); } +static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_embd, int * n_img_pos, float * t_img_enc_ms) { + clip_image_f32 img_res; + if (!clip_image_preprocess(ctx_clip, img, &img_res, /*pad2square =*/ true)) { + fprintf(stderr, "%s: unable to preprocess image\n", __func__); + + return false; + } + + *n_img_pos = clip_n_patches(ctx_clip); + *n_img_embd = clip_n_mmproj_embd(ctx_clip); + + const int64_t t_img_enc_start_us = ggml_time_us(); + if (!clip_image_encode(ctx_clip, n_threads, &img_res, image_embd)) { + fprintf(stderr, "Unable to encode image\n"); + + return false; + } + const int64_t t_img_enc_end_us = ggml_time_us(); + *t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0; + return true; +} + int main(int argc, char ** argv) { ggml_time_init(); @@ -39,40 +61,27 @@ int main(int argc, char ** argv) { // load and preprocess the image clip_image_u8 img; - clip_image_f32 img_res; if (!clip_image_load_from_file(img_path, &img)) { fprintf(stderr, "%s: is %s really an image file?\n", __func__, img_path); - clip_free(ctx_clip); return 1; } - if (!clip_image_preprocess(ctx_clip, &img, &img_res, /*pad2square =*/ true)) { - fprintf(stderr, "%s: unable to preprocess %s\n", __func__, img_path); - - clip_free(ctx_clip); - return 1; - } - - int n_img_pos = clip_n_patches(ctx_clip); - int n_img_embd = clip_n_mmproj_embd(ctx_clip); - float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); - if (!image_embd) { fprintf(stderr, "Unable to allocate memory for image embeddings\n"); - return 1; } - const int64_t t_img_enc_start_us = ggml_time_us(); - if (!clip_image_encode(ctx_clip, params.n_threads, &img_res, image_embd)) { - fprintf(stderr, "Unable to encode image\n"); - + int n_img_embd; + int n_img_pos; + float t_img_enc_ms; + if (!encode_image_with_clip(ctx_clip, params.n_threads, &img, image_embd, &n_img_embd, &n_img_pos, &t_img_enc_ms)) { + fprintf(stderr, "%s: cannot encode image, aborting\n", __func__); + clip_free(ctx_clip); return 1; } - const int64_t t_img_enc_end_us = ggml_time_us(); // we get the embeddings, free up the memory required for CLIP clip_free(ctx_clip); @@ -140,8 +149,6 @@ int main(int argc, char ** argv) { printf("\n"); { - const float t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0; - printf("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / n_img_pos); } diff --git a/examples/llava/test-llava.cpp b/examples/llava/test-llava.cpp new file mode 100644 index 0000000000000..6e8a0136797e2 --- /dev/null +++ b/examples/llava/test-llava.cpp @@ -0,0 +1,6 @@ +#include + +int main(int argc, char ** argv) { + printf("dummy llava test\n"); + return 0; +} From 3c10d9f3de3e79bdba18f3745e3cc56b0aa45e67 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Fri, 13 Oct 2023 12:21:44 +0200 Subject: [PATCH 02/31] add external llava API --- examples/llava/llava.cpp | 121 ++++++++++++++++++++++++--------------- examples/llava/llava.h | 31 ++++++++++ 2 files changed, 106 insertions(+), 46 deletions(-) create mode 100644 examples/llava/llava.h diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index c55d4f165e7db..22e6252362b70 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -2,6 +2,7 @@ #include "llava-utils.h" #include "common.h" #include "llama.h" +#include "llava.h" #include #include @@ -34,27 +35,13 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli return true; } -int main(int argc, char ** argv) { - ggml_time_init(); - - gpt_params params; - - if (!gpt_params_parse(argc, argv, params)) { - show_additional_info(argc, argv); - return 1; - } - - if (params.mmproj.empty() || params.image.empty()) { - gpt_print_usage(argc, argv, params); - show_additional_info(argc, argv); - return 1; - } +struct llava_context * llava_init(gpt_params * params) { - const char * clip_path = params.mmproj.c_str(); - const char * img_path = params.image.c_str(); + const char * clip_path = params->mmproj.c_str(); + const char * img_path = params->image.c_str(); - if (params.prompt.empty()) { - params.prompt = "describe the image in detail."; + if (params->prompt.empty()) { + params->prompt = "describe the image in detail."; } auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); @@ -65,47 +52,48 @@ int main(int argc, char ** argv) { if (!clip_image_load_from_file(img_path, &img)) { fprintf(stderr, "%s: is %s really an image file?\n", __func__, img_path); clip_free(ctx_clip); - return 1; + return NULL; } float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); if (!image_embd) { fprintf(stderr, "Unable to allocate memory for image embeddings\n"); - return 1; + return NULL; } int n_img_embd; int n_img_pos; float t_img_enc_ms; - if (!encode_image_with_clip(ctx_clip, params.n_threads, &img, image_embd, &n_img_embd, &n_img_pos, &t_img_enc_ms)) { + if (!encode_image_with_clip(ctx_clip, params->n_threads, &img, image_embd, &n_img_embd, &n_img_pos, &t_img_enc_ms)) { fprintf(stderr, "%s: cannot encode image, aborting\n", __func__); clip_free(ctx_clip); - return 1; + return NULL; } // we get the embeddings, free up the memory required for CLIP clip_free(ctx_clip); + ctx_clip = NULL; - llama_backend_init(params.numa); + llama_backend_init(params->numa); llama_model_params model_params = llama_model_default_params(); - llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); + llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params); if (model == NULL) { fprintf(stderr , "%s: error: unable to load model\n" , __func__); - return 1; + return NULL; } llama_context_params ctx_params = llama_context_default_params(); - ctx_params.n_ctx = params.n_ctx < 2048 ? 2048 : params.n_ctx; // we need a longer context size to process image embeddings - ctx_params.n_threads = params.n_threads; - ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch; + ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings + ctx_params.n_threads = params->n_threads; + ctx_params.n_threads_batch = params->n_threads_batch == -1 ? params->n_threads : params->n_threads_batch; llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); if (ctx_llama == NULL) { fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); - return 1; + return NULL; } // make sure that the correct mmproj was used, i.e., compare apples to apples @@ -118,28 +106,49 @@ int main(int argc, char ** argv) { llama_backend_free(); free(image_embd); - return 1; + return NULL; } - // process the prompt - // llava chat format is "USER: \n\nASSISTANT:" + { + printf("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / n_img_pos); + } + + + auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context)); + + ctx_llava->ctx_llama = ctx_llama; + ctx_llava->ctx_clip = ctx_clip; + ctx_llava->model = model; + ctx_llava->image_embd = image_embd; + ctx_llava->n_img_pos = n_img_pos; + return ctx_llava; +} + +void llava_free(struct llava_context * ctx_llava) { + llama_free(ctx_llava->ctx_llama); + llama_free_model(ctx_llava->model); + llama_backend_free(); + free(ctx_llava->image_embd); +} + +void llava_process_prompt(struct llava_context * ctx_llava, gpt_params * params, const char * prompt) { int n_past = 0; - const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict; + const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; // GG: are we sure that the should be a trailing whitespace at the end of this string? - eval_string(ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER: ", params.n_batch, &n_past); - eval_image_embd(ctx_llama, image_embd, n_img_pos, params.n_batch, &n_past); - eval_string(ctx_llama, params.prompt.c_str(), params.n_batch, &n_past); - eval_string(ctx_llama, "\nASSISTANT:", params.n_batch, &n_past); + eval_string(ctx_llava->ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER: ", params->n_batch, &n_past); + eval_image_embd(ctx_llava->ctx_llama, ctx_llava->image_embd, ctx_llava->n_img_pos, params->n_batch, &n_past); + eval_string(ctx_llava->ctx_llama, prompt, params->n_batch, &n_past); + eval_string(ctx_llava->ctx_llama, "\nASSISTANT:", params->n_batch, &n_past); // generate the response printf("\n"); for (int i = 0; i < max_tgt_len; i++) { - const char * tmp = sample(ctx_llama, params, &n_past); + const char * tmp = sample(ctx_llava->ctx_llama, *params, &n_past); if (strcmp(tmp, "") == 0) break; printf("%s", tmp); @@ -148,16 +157,36 @@ int main(int argc, char ** argv) { printf("\n"); - { - printf("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / n_img_pos); +} + + +int main(int argc, char ** argv) { + ggml_time_init(); + + gpt_params params; + + if (!gpt_params_parse(argc, argv, params)) { + show_additional_info(argc, argv); + return 1; + } + if (params.mmproj.empty() || params.image.empty()) { + gpt_print_usage(argc, argv, params); + show_additional_info(argc, argv); + return 1; } - llama_print_timings(ctx_llama); + auto ctx_llava = llava_init(¶ms); + if (ctx_llava == NULL) { + fprintf(stderr, "%s: error: failed to init llava\n", __func__); + return 1; + } - llama_free(ctx_llama); - llama_free_model(model); - llama_backend_free(); - free(image_embd); + // process the prompt + // llava chat format is "USER: \n\nASSISTANT:" + llava_process_prompt(ctx_llava, ¶ms, params.prompt.c_str()); + + llama_print_timings(ctx_llava->ctx_llama); + llava_free(ctx_llava); return 0; } diff --git a/examples/llava/llava.h b/examples/llava/llava.h new file mode 100644 index 0000000000000..4f229a08cb9cb --- /dev/null +++ b/examples/llava/llava.h @@ -0,0 +1,31 @@ +#ifndef LLAVA_H +#define LLAVA_H + +#include "ggml.h" + +struct clip_ctx; + +#ifdef __cplusplus +extern "C" { +#endif + +struct llava_context { + struct clip_ctx * ctx_clip = NULL; + struct llama_context * ctx_llama = NULL; + struct llama_model * model = NULL; + + int n_img_pos = 0; + float * image_embd = NULL; +}; + +struct llava_context * llava_init(gpt_params * params); +void llava_free(struct llava_context * ctx_llava); + +void llava_process_prompt(struct llava_context * ctx_llava, gpt_params * params, const char * prompt); + + +#ifdef __cplusplus +} +#endif + +#endif From 770dc9da0d5724fc31dba1f6bf0ea93b201b137b Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Fri, 13 Oct 2023 17:53:17 +0200 Subject: [PATCH 03/31] add base64 in-prompt image support --- examples/llava/base64.hpp | 392 ++++++++++++++++++++++++++++++++++++++ examples/llava/llava.cpp | 84 +++++++- 2 files changed, 468 insertions(+), 8 deletions(-) create mode 100644 examples/llava/base64.hpp diff --git a/examples/llava/base64.hpp b/examples/llava/base64.hpp new file mode 100644 index 0000000000000..9a19238251faf --- /dev/null +++ b/examples/llava/base64.hpp @@ -0,0 +1,392 @@ +/* +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to +*/ + +#ifndef PUBLIC_DOMAIN_BASE64_HPP_ +#define PUBLIC_DOMAIN_BASE64_HPP_ + +#include +#include +#include +#include + +class base64_error : public std::runtime_error +{ +public: + using std::runtime_error::runtime_error; +}; + +class base64 +{ +public: + enum class alphabet + { + /** the alphabet is detected automatically */ + auto_, + /** the standard base64 alphabet is used */ + standard, + /** like `standard` except that the characters `+` and `/` are replaced by `-` and `_` respectively*/ + url_filename_safe + }; + + enum class decoding_behavior + { + /** if the input is not padded, the remaining bits are ignored */ + moderate, + /** if a padding character is encounter decoding is finished */ + loose + }; + + /** + Encodes all the elements from `in_begin` to `in_end` to `out`. + + @warning The source and destination cannot overlap. The destination must be able to hold at least + `required_encode_size(std::distance(in_begin, in_end))`, otherwise the behavior depends on the output iterator. + + @tparam Input_iterator the source; the returned elements are cast to `std::uint8_t` and should not be greater than + 8 bits + @tparam Output_iterator the destination; the elements written to it are from the type `char` + @param in_begin the beginning of the source + @param in_end the ending of the source + @param out the destination iterator + @param alphabet which alphabet should be used + @returns the iterator to the next element past the last element copied + @throws see `Input_iterator` and `Output_iterator` + */ + template + static Output_iterator encode(Input_iterator in_begin, Input_iterator in_end, Output_iterator out, + alphabet alphabet = alphabet::standard) + { + constexpr auto pad = '='; + const char* alpha = alphabet == alphabet::url_filename_safe + ? "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" + : "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + + while (in_begin != in_end) { + std::uint8_t i0 = 0, i1 = 0, i2 = 0; + + // first character + i0 = static_cast(*in_begin); + ++in_begin; + + *out = alpha[i0 >> 2 & 0x3f]; + ++out; + + // part of first character and second + if (in_begin != in_end) { + i1 = static_cast(*in_begin); + ++in_begin; + + *out = alpha[((i0 & 0x3) << 4) | (i1 >> 4 & 0x0f)]; + ++out; + } else { + *out = alpha[(i0 & 0x3) << 4]; + ++out; + + // last padding + *out = pad; + ++out; + + // last padding + *out = pad; + ++out; + + break; + } + + // part of second character and third + if (in_begin != in_end) { + i2 = static_cast(*in_begin); + ++in_begin; + + *out = alpha[((i1 & 0xf) << 2) | (i2 >> 6 & 0x03)]; + ++out; + } else { + *out = alpha[(i1 & 0xf) << 2]; + ++out; + + // last padding + *out = pad; + ++out; + + break; + } + + // rest of third + *out = alpha[i2 & 0x3f]; + ++out; + } + + return out; + } + /** + Encodes a string. + + @param str the string that should be encoded + @param alphabet which alphabet should be used + @returns the encoded base64 string + @throws see base64::encode() + */ + static std::string encode(const std::string& str, alphabet alphabet = alphabet::standard) + { + std::string result; + + result.reserve(required_encode_size(str.length()) + 1); + + encode(str.begin(), str.end(), std::back_inserter(result), alphabet); + + return result; + } + /** + Encodes a char array. + + @param buffer the char array + @param size the size of the array + @param alphabet which alphabet should be used + @returns the encoded string + */ + static std::string encode(const char* buffer, std::size_t size, alphabet alphabet = alphabet::standard) + { + std::string result; + + result.reserve(required_encode_size(size) + 1); + + encode(buffer, buffer + size, std::back_inserter(result), alphabet); + + return result; + } + /** + Decodes all the elements from `in_begin` to `in_end` to `out`. `in_begin` may point to the same location as `out`, + in other words: inplace decoding is possible. + + @warning The destination must be able to hold at least `required_decode_size(std::distance(in_begin, in_end))`, + otherwise the behavior depends on the output iterator. + + @tparam Input_iterator the source; the returned elements are cast to `char` + @tparam Output_iterator the destination; the elements written to it are from the type `std::uint8_t` + @param in_begin the beginning of the source + @param in_end the ending of the source + @param out the destination iterator + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @returns the iterator to the next element past the last element copied + @throws base64_error depending on the set behavior + @throws see `Input_iterator` and `Output_iterator` + */ + template + static Output_iterator decode(Input_iterator in_begin, Input_iterator in_end, Output_iterator out, + alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + //constexpr auto pad = '='; + std::uint8_t last = 0; + auto bits = 0; + + while (in_begin != in_end) { + auto c = *in_begin; + ++in_begin; + + if (c == '=') { + break; + } + + auto part = _base64_value(alphabet, c); + + // enough bits for one byte + if (bits + 6 >= 8) { + *out = (last << (8 - bits)) | (part >> (bits - 2)); + ++out; + + bits -= 2; + } else { + bits += 6; + } + + last = part; + } + + // check padding + if (behavior != decoding_behavior::loose) { + while (in_begin != in_end) { + auto c = *in_begin; + ++in_begin; + + if (c != '=') { + throw base64_error("invalid base64 character."); + } + } + } + + return out; + } + /** + Decodes a string. + + @param str the base64 encoded string + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @returns the decoded string + @throws see base64::decode() + */ + static std::string decode(const std::string& str, alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + std::string result; + + result.reserve(max_decode_size(str.length())); + + decode(str.begin(), str.end(), std::back_inserter(result), alphabet, behavior); + + return result; + } + /** + Decodes a string. + + @param buffer the base64 encoded buffer + @param size the size of the buffer + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @returns the decoded string + @throws see base64::decode() + */ + static std::string decode(const char* buffer, std::size_t size, alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + std::string result; + + result.reserve(max_decode_size(size)); + + decode(buffer, buffer + size, std::back_inserter(result), alphabet, behavior); + + return result; + } + /** + Decodes a string inplace. + + @param[in,out] str the base64 encoded string + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @throws base64::decode_inplace() + */ + static void decode_inplace(std::string& str, alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + str.resize(decode(str.begin(), str.end(), str.begin(), alphabet, behavior) - str.begin()); + } + /** + Decodes a char array inplace. + + @param[in,out] str the string array + @param size the length of the array + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @returns the pointer to the next element past the last element decoded + @throws base64::decode_inplace() + */ + static char* decode_inplace(char* str, std::size_t size, alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + return decode(str, str + size, str, alphabet, behavior); + } + /** + Returns the required decoding size for a given size. The value is calculated with the following formula: + + $$ + \lceil \frac{size}{4} \rceil \cdot 3 + $$ + + @param size the size of the encoded input + @returns the size of the resulting decoded buffer; this the absolute maximum + */ + static std::size_t max_decode_size(std::size_t size) noexcept + { + return (size / 4 + (size % 4 ? 1 : 0)) * 3; + } + /** + Returns the required encoding size for a given size. The value is calculated with the following formula: + + $$ + \lceil \frac{size}{3} \rceil \cdot 4 + $$ + + @param size the size of the decoded input + @returns the size of the resulting encoded buffer + */ + static std::size_t required_encode_size(std::size_t size) noexcept + { + return (size / 3 + (size % 3 ? 1 : 0)) * 4; + } + +private: + static std::uint8_t _base64_value(alphabet& alphabet, char c) + { + if (c >= 'A' && c <= 'Z') { + return c - 'A'; + } else if (c >= 'a' && c <= 'z') { + return c - 'a' + 26; + } else if (c >= '0' && c <= '9') { + return c - '0' + 52; + } + + // comes down to alphabet + if (alphabet == alphabet::standard) { + if (c == '+') { + return 62; + } else if (c == '/') { + return 63; + } + } else if (alphabet == alphabet::url_filename_safe) { + if (c == '-') { + return 62; + } else if (c == '_') { + return 63; + } + } // auto detect + else { + if (c == '+') { + alphabet = alphabet::standard; + + return 62; + } else if (c == '/') { + alphabet = alphabet::standard; + + return 63; + } else if (c == '-') { + alphabet = alphabet::url_filename_safe; + + return 62; + } else if (c == '_') { + alphabet = alphabet::url_filename_safe; + + return 63; + } + } + + throw base64_error("invalid base64 character."); + } +}; + +#endif // !PUBLIC_DOMAIN_BASE64_HPP_ diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index 22e6252362b70..bfa2f72a55cc6 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -8,6 +8,8 @@ #include #include +#include "base64.hpp" + static void show_additional_info(int /*argc*/, char ** argv) { printf("\n example usage: %s -m --mmproj --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n"); @@ -35,24 +37,90 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli return true; } +static const char* IMG_BASE64_TAG_BEGIN = ""; + +static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) { + begin_out = prompt.find(IMG_BASE64_TAG_BEGIN); + end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out); +} + +static bool prompt_contains_image(const std::string& prompt) { + size_t begin, end; + find_image_tag_in_prompt(prompt, begin, end); + return (begin != std::string::npos); +} + +// replaces the base64 image tag in the prompt with `replacement` +static bool get_image_from_prompt(const std::string& prompt, clip_image_u8 * img) { + size_t img_base64_str_start, img_base64_str_end; + find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); + if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { + fprintf(stderr, "%s: invalid base64 image tag. must be %s%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END); + return false; + } + + auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN); + auto base64_bytes_count = img_base64_str_end - base64_bytes_start; + auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count ); + printf("base64_str: '%s'\n", base64_str.c_str()); + + auto required_bytes = base64::required_encode_size(base64_str.size()); + auto img_bytes = std::vector(required_bytes); + auto img_bytes_end = base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); + auto img_bytes_len = img_bytes_end - img_bytes.begin(); + + auto img_loaded_ok = clip_image_load_from_bytes(img_bytes.data(), img_bytes_len, img); + if (!img_loaded_ok) { + fprintf(stderr, "%s: could not load image from base64 string.\n", __func__); + return false; + } + + return true; +} + +static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { + size_t begin, end; + find_image_tag_in_prompt(prompt, begin, end); + if (begin == std::string::npos || end == std::string::npos) { + return prompt; + } + auto pre = prompt.substr(0, begin); + auto post = prompt.substr(end+1); + return pre + replacement + post; +} + struct llava_context * llava_init(gpt_params * params) { const char * clip_path = params->mmproj.c_str(); const char * img_path = params->image.c_str(); - if (params->prompt.empty()) { - params->prompt = "describe the image in detail."; + auto prompt = params->prompt; + if (prompt.empty()) { + prompt = "describe the image in detail."; } - + auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); // load and preprocess the image clip_image_u8 img; - if (!clip_image_load_from_file(img_path, &img)) { - fprintf(stderr, "%s: is %s really an image file?\n", __func__, img_path); - clip_free(ctx_clip); - return NULL; + if (prompt_contains_image(prompt)) { + if (img_path) { + printf("using base64 encoded image instead of command line image path\n"); + } + if (!get_image_from_prompt(prompt, &img)) { + fprintf(stderr, "%s: can't load image from prompt\n", __func__); + clip_free(ctx_clip); + return NULL; + } + prompt = remove_image_from_prompt(prompt); + } else { + if (!clip_image_load_from_file(img_path, &img)) { + fprintf(stderr, "%s: is %s really an image file?\n", __func__, img_path); + clip_free(ctx_clip); + return NULL; + } } float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); @@ -169,7 +237,7 @@ int main(int argc, char ** argv) { show_additional_info(argc, argv); return 1; } - if (params.mmproj.empty() || params.image.empty()) { + if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) { gpt_print_usage(argc, argv, params); show_additional_info(argc, argv); return 1; From 8224ca5775b7f09f088abf2379fcac25270085d4 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sat, 14 Oct 2023 10:43:13 +0200 Subject: [PATCH 04/31] wip refactor image loading --- examples/llava/llava-utils.h | 52 +++++++++++++++ examples/llava/llava.cpp | 122 +++++++++++------------------------ 2 files changed, 91 insertions(+), 83 deletions(-) diff --git a/examples/llava/llava-utils.h b/examples/llava/llava-utils.h index 79e237c86f508..db8af6d6c04d4 100644 --- a/examples/llava/llava-utils.h +++ b/examples/llava/llava-utils.h @@ -143,3 +143,55 @@ inline const char * sample(struct llama_context * ctx_llama, gpt_params & params eval_id(ctx_llama, id, n_past); return ret.c_str(); } + +static const char* IMG_BASE64_TAG_BEGIN = ""; + +static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) { + begin_out = prompt.find(IMG_BASE64_TAG_BEGIN); + end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out); +} + +static bool prompt_contains_image(const std::string& prompt) { + size_t begin, end; + find_image_tag_in_prompt(prompt, begin, end); + return (begin != std::string::npos); +} + +// replaces the base64 image tag in the prompt with `replacement` +static bool get_image_from_prompt(const std::string& prompt, clip_image_u8 * img) { + size_t img_base64_str_start, img_base64_str_end; + find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); + if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { + fprintf(stderr, "%s: invalid base64 image tag. must be %s%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END); + return false; + } + + auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN); + auto base64_bytes_count = img_base64_str_end - base64_bytes_start; + auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count ); + + auto required_bytes = base64::required_encode_size(base64_str.size()); + auto img_bytes = std::vector(required_bytes); + auto img_bytes_end = base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); + auto img_bytes_len = img_bytes_end - img_bytes.begin(); + + auto img_loaded_ok = clip_image_load_from_bytes(img_bytes.data(), img_bytes_len, img); + if (!img_loaded_ok) { + fprintf(stderr, "%s: could not load image from base64 string.\n", __func__); + return false; + } + + return true; +} + +static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { + size_t begin, end; + find_image_tag_in_prompt(prompt, begin, end); + if (begin == std::string::npos || end == std::string::npos) { + return prompt; + } + auto pre = prompt.substr(0, begin); + auto post = prompt.substr(end+1); + return pre + replacement + post; +} diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index bfa2f72a55cc6..7781d4222187b 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -37,58 +37,28 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli return true; } -static const char* IMG_BASE64_TAG_BEGIN = ""; +bool llava_build_img_embed(struct llava_context * ctx_llava, const clip_image_u8 * img) { -static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) { - begin_out = prompt.find(IMG_BASE64_TAG_BEGIN); - end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out); -} - -static bool prompt_contains_image(const std::string& prompt) { - size_t begin, end; - find_image_tag_in_prompt(prompt, begin, end); - return (begin != std::string::npos); -} - -// replaces the base64 image tag in the prompt with `replacement` -static bool get_image_from_prompt(const std::string& prompt, clip_image_u8 * img) { - size_t img_base64_str_start, img_base64_str_end; - find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); - if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { - fprintf(stderr, "%s: invalid base64 image tag. must be %s%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END); + float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); + if (!image_embd) { + fprintf(stderr, "Unable to allocate memory for image embeddings\n"); + free(image_embd); return false; } - auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN); - auto base64_bytes_count = img_base64_str_end - base64_bytes_start; - auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count ); - printf("base64_str: '%s'\n", base64_str.c_str()); - - auto required_bytes = base64::required_encode_size(base64_str.size()); - auto img_bytes = std::vector(required_bytes); - auto img_bytes_end = base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); - auto img_bytes_len = img_bytes_end - img_bytes.begin(); - - auto img_loaded_ok = clip_image_load_from_bytes(img_bytes.data(), img_bytes_len, img); - if (!img_loaded_ok) { - fprintf(stderr, "%s: could not load image from base64 string.\n", __func__); + int n_img_embd; + int n_img_pos; + float t_img_enc_ms; + if (!encode_image_with_clip(ctx_clip, params->n_threads, &img, image_embd, &n_img_embd, &n_img_pos, &t_img_enc_ms)) { + fprintf(stderr, "%s: cannot encode image, aborting\n", __func__); + free(image_embd); return false; } - return true; + ctx_llava->image_embd = image_embd; + retur true; } -static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { - size_t begin, end; - find_image_tag_in_prompt(prompt, begin, end); - if (begin == std::string::npos || end == std::string::npos) { - return prompt; - } - auto pre = prompt.substr(0, begin); - auto post = prompt.substr(end+1); - return pre + replacement + post; -} struct llava_context * llava_init(gpt_params * params) { @@ -102,46 +72,6 @@ struct llava_context * llava_init(gpt_params * params) { auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); - // load and preprocess the image - clip_image_u8 img; - - if (prompt_contains_image(prompt)) { - if (img_path) { - printf("using base64 encoded image instead of command line image path\n"); - } - if (!get_image_from_prompt(prompt, &img)) { - fprintf(stderr, "%s: can't load image from prompt\n", __func__); - clip_free(ctx_clip); - return NULL; - } - prompt = remove_image_from_prompt(prompt); - } else { - if (!clip_image_load_from_file(img_path, &img)) { - fprintf(stderr, "%s: is %s really an image file?\n", __func__, img_path); - clip_free(ctx_clip); - return NULL; - } - } - - float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); - if (!image_embd) { - fprintf(stderr, "Unable to allocate memory for image embeddings\n"); - return NULL; - } - - int n_img_embd; - int n_img_pos; - float t_img_enc_ms; - if (!encode_image_with_clip(ctx_clip, params->n_threads, &img, image_embd, &n_img_embd, &n_img_pos, &t_img_enc_ms)) { - fprintf(stderr, "%s: cannot encode image, aborting\n", __func__); - clip_free(ctx_clip); - return NULL; - } - - // we get the embeddings, free up the memory required for CLIP - clip_free(ctx_clip); - ctx_clip = NULL; - llama_backend_init(params->numa); llama_model_params model_params = llama_model_default_params(); @@ -194,6 +124,11 @@ struct llava_context * llava_init(gpt_params * params) { } void llava_free(struct llava_context * ctx_llava) { + if (ctx_llava->ctx_clip) { + clip_free(ctx_clip); + ctx_llava->ctx_clip = NULL; + } + llama_free(ctx_llava->ctx_llama); llama_free_model(ctx_llava->model); llama_backend_free(); @@ -249,6 +184,27 @@ int main(int argc, char ** argv) { return 1; } + // load and preprocess the image + clip_image_u8 img; + if (prompt_contains_image(prompt)) { + if (img_path) { + printf("using base64 encoded image instead of command line image path\n"); + } + if (!get_image_from_prompt(prompt, &img)) { + fprintf(stderr, "%s: can't load image from prompt\n", __func__); + clip_free(ctx_clip); + return NULL; + } + prompt = remove_image_from_prompt(prompt); + } else { + if (!clip_image_load_from_file(img_path, &img)) { + fprintf(stderr, "%s: is %s really an image file?\n", __func__, img_path); + clip_free(ctx_clip); + return NULL; + } + } + llava_build_img_embed(ctx_llava, &img); + // process the prompt // llava chat format is "USER: \n\nASSISTANT:" llava_process_prompt(ctx_llava, ¶ms, params.prompt.c_str()); From c6932085febdd3f4794bf058e39afbe5dee6d952 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sat, 14 Oct 2023 11:51:33 +0200 Subject: [PATCH 05/31] refactor image load out of llava init --- examples/llava/llava-utils.h | 2 + examples/llava/llava.cpp | 83 +++++++++++++++++------------------- examples/llava/llava.h | 6 +-- 3 files changed, 45 insertions(+), 46 deletions(-) diff --git a/examples/llava/llava-utils.h b/examples/llava/llava-utils.h index db8af6d6c04d4..b794c39cc9cef 100644 --- a/examples/llava/llava-utils.h +++ b/examples/llava/llava-utils.h @@ -5,6 +5,8 @@ #include "common.h" #include "llama.h" +#include "base64.hpp" + #include #include #include diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index 7781d4222187b..cf19a2f78d095 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -15,7 +15,8 @@ static void show_additional_info(int /*argc*/, char ** argv) { printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n"); } -static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_embd, int * n_img_pos, float * t_img_enc_ms) { +static bool encode_image_with_clip(llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_embd, int * n_img_pos) { + auto ctx_clip = ctx_llava->ctx_clip; clip_image_f32 img_res; if (!clip_image_preprocess(ctx_clip, img, &img_res, /*pad2square =*/ true)) { fprintf(stderr, "%s: unable to preprocess image\n", __func__); @@ -26,6 +27,14 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli *n_img_pos = clip_n_patches(ctx_clip); *n_img_embd = clip_n_mmproj_embd(ctx_clip); + // make sure that the correct mmproj was used, i.e., compare apples to apples + int n_llama_embd = llama_n_embd(llama_get_model(ctx_llava->ctx_llama)); + if (*n_img_embd != n_llama_embd) { + printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, *n_img_embd, n_llama_embd); + + return false; + } + const int64_t t_img_enc_start_us = ggml_time_us(); if (!clip_image_encode(ctx_clip, n_threads, &img_res, image_embd)) { fprintf(stderr, "Unable to encode image\n"); @@ -33,12 +42,18 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli return false; } const int64_t t_img_enc_end_us = ggml_time_us(); - *t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0; + float t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0; + + { + printf("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / *n_img_pos); + } + return true; } -bool llava_build_img_embed(struct llava_context * ctx_llava, const clip_image_u8 * img) { +static bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out) { + auto ctx_clip = ctx_llava->ctx_clip; float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); if (!image_embd) { fprintf(stderr, "Unable to allocate memory for image embeddings\n"); @@ -46,24 +61,22 @@ bool llava_build_img_embed(struct llava_context * ctx_llava, const clip_image_u8 return false; } + int n_image_pos; int n_img_embd; - int n_img_pos; - float t_img_enc_ms; - if (!encode_image_with_clip(ctx_clip, params->n_threads, &img, image_embd, &n_img_embd, &n_img_pos, &t_img_enc_ms)) { + if (!encode_image_with_clip(ctx_llava, n_threads, img, image_embd, &n_img_embd, &n_image_pos)) { fprintf(stderr, "%s: cannot encode image, aborting\n", __func__); free(image_embd); return false; } - - ctx_llava->image_embd = image_embd; - retur true; + *image_embd_out = image_embd; + *n_image_pos_out = n_image_pos; + return true; } struct llava_context * llava_init(gpt_params * params) { const char * clip_path = params->mmproj.c_str(); - const char * img_path = params->image.c_str(); auto prompt = params->prompt; if (prompt.empty()) { @@ -94,55 +107,36 @@ struct llava_context * llava_init(gpt_params * params) { return NULL; } - // make sure that the correct mmproj was used, i.e., compare apples to apples - int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama)); - if (n_img_embd != n_llama_embd) { - printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_img_embd, n_llama_embd); - - llama_free(ctx_llama); - llama_free_model(model); - llama_backend_free(); - free(image_embd); - - return NULL; - } - - { - printf("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / n_img_pos); - } - auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context)); ctx_llava->ctx_llama = ctx_llama; ctx_llava->ctx_clip = ctx_clip; ctx_llava->model = model; - ctx_llava->image_embd = image_embd; - ctx_llava->n_img_pos = n_img_pos; return ctx_llava; } void llava_free(struct llava_context * ctx_llava) { if (ctx_llava->ctx_clip) { - clip_free(ctx_clip); + clip_free(ctx_llava->ctx_clip); ctx_llava->ctx_clip = NULL; } llama_free(ctx_llava->ctx_llama); llama_free_model(ctx_llava->model); llama_backend_free(); - free(ctx_llava->image_embd); } -void llava_process_prompt(struct llava_context * ctx_llava, gpt_params * params, const char * prompt) { +static void llava_process_prompt(struct llava_context * ctx_llava, float * image_embd, int n_img_pos, gpt_params * params, const char * prompt) { int n_past = 0; const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; + // llava chat format is "USER: \n\nASSISTANT:" // GG: are we sure that the should be a trailing whitespace at the end of this string? eval_string(ctx_llava->ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER: ", params->n_batch, &n_past); - eval_image_embd(ctx_llava->ctx_llama, ctx_llava->image_embd, ctx_llava->n_img_pos, params->n_batch, &n_past); + eval_image_embd(ctx_llava->ctx_llama, image_embd, n_img_pos, params->n_batch, &n_past); eval_string(ctx_llava->ctx_llama, prompt, params->n_batch, &n_past); eval_string(ctx_llava->ctx_llama, "\nASSISTANT:", params->n_batch, &n_past); @@ -186,31 +180,34 @@ int main(int argc, char ** argv) { // load and preprocess the image clip_image_u8 img; + auto prompt = params.prompt; if (prompt_contains_image(prompt)) { - if (img_path) { + if (!params.image.empty()) { printf("using base64 encoded image instead of command line image path\n"); } if (!get_image_from_prompt(prompt, &img)) { fprintf(stderr, "%s: can't load image from prompt\n", __func__); - clip_free(ctx_clip); - return NULL; + llava_free(ctx_llava); + return 1; } prompt = remove_image_from_prompt(prompt); } else { - if (!clip_image_load_from_file(img_path, &img)) { - fprintf(stderr, "%s: is %s really an image file?\n", __func__, img_path); - clip_free(ctx_clip); - return NULL; + if (!clip_image_load_from_file(params.image.c_str(), &img)) { + fprintf(stderr, "%s: is %s really an image file?\n", __func__, params.image.c_str()); + llava_free(ctx_llava); + return 1; } } - llava_build_img_embed(ctx_llava, &img); + float * image_embd; + int n_image_pos; + llava_build_img_embed(ctx_llava, params.n_threads, &img, &image_embd, &n_image_pos); // process the prompt - // llava chat format is "USER: \n\nASSISTANT:" - llava_process_prompt(ctx_llava, ¶ms, params.prompt.c_str()); + llava_process_prompt(ctx_llava, image_embd, n_image_pos, ¶ms, params.prompt.c_str()); llama_print_timings(ctx_llava->ctx_llama); + free(image_embd); llava_free(ctx_llava); return 0; } diff --git a/examples/llava/llava.h b/examples/llava/llava.h index 4f229a08cb9cb..ddbcc8d43205f 100644 --- a/examples/llava/llava.h +++ b/examples/llava/llava.h @@ -14,14 +14,14 @@ struct llava_context { struct llama_context * ctx_llama = NULL; struct llama_model * model = NULL; - int n_img_pos = 0; - float * image_embd = NULL; +// int n_img_pos = 0; +// float * image_embd = NULL; }; struct llava_context * llava_init(gpt_params * params); void llava_free(struct llava_context * ctx_llava); -void llava_process_prompt(struct llava_context * ctx_llava, gpt_params * params, const char * prompt); +//void llava_process_prompt(struct llava_context * ctx_llava, gpt_params * params, const char * prompt); #ifdef __cplusplus From 08891175738247a804a4e447e64b2e9dc0b4b3df Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sat, 14 Oct 2023 12:39:00 +0200 Subject: [PATCH 06/31] cleanup --- examples/llava/llava.cpp | 53 ++++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index cf19a2f78d095..ffdad9c99d57f 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -128,7 +128,33 @@ void llava_free(struct llava_context * ctx_llava) { llama_backend_free(); } -static void llava_process_prompt(struct llava_context * ctx_llava, float * image_embd, int n_img_pos, gpt_params * params, const char * prompt) { + + +static bool load_image(llava_context * ctx_llava, gpt_params * params, float **image_embd, int * n_image_pos) { + // load and preprocess the image + clip_image_u8 img; + auto prompt = params->prompt; + if (prompt_contains_image(prompt)) { + if (!params->image.empty()) { + printf("using base64 encoded image instead of command line image path\n"); + } + if (!get_image_from_prompt(prompt, &img)) { + fprintf(stderr, "%s: can't load image from prompt\n", __func__); + return false; + } + prompt = remove_image_from_prompt(prompt); + } else { + if (!clip_image_load_from_file(params->image.c_str(), &img)) { + fprintf(stderr, "%s: is %s really an image file?\n", __func__, params->image.c_str()); + return false; + } + } + llava_build_img_embed(ctx_llava, params->n_threads, &img, image_embd, n_image_pos); + + return true; +} + +static void process_prompt(struct llava_context * ctx_llava, float * image_embd, int n_img_pos, gpt_params * params, const char * prompt) { int n_past = 0; const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; @@ -156,7 +182,6 @@ static void llava_process_prompt(struct llava_context * ctx_llava, float * image } - int main(int argc, char ** argv) { ggml_time_init(); @@ -178,32 +203,12 @@ int main(int argc, char ** argv) { return 1; } - // load and preprocess the image - clip_image_u8 img; - auto prompt = params.prompt; - if (prompt_contains_image(prompt)) { - if (!params.image.empty()) { - printf("using base64 encoded image instead of command line image path\n"); - } - if (!get_image_from_prompt(prompt, &img)) { - fprintf(stderr, "%s: can't load image from prompt\n", __func__); - llava_free(ctx_llava); - return 1; - } - prompt = remove_image_from_prompt(prompt); - } else { - if (!clip_image_load_from_file(params.image.c_str(), &img)) { - fprintf(stderr, "%s: is %s really an image file?\n", __func__, params.image.c_str()); - llava_free(ctx_llava); - return 1; - } - } float * image_embd; int n_image_pos; - llava_build_img_embed(ctx_llava, params.n_threads, &img, &image_embd, &n_image_pos); + load_image(ctx_llava, ¶ms, &image_embd, &n_image_pos); // process the prompt - llava_process_prompt(ctx_llava, image_embd, n_image_pos, ¶ms, params.prompt.c_str()); + process_prompt(ctx_llava, image_embd, n_image_pos, ¶ms, params.prompt.c_str()); llama_print_timings(ctx_llava->ctx_llama); From f83c0606bd26ed14285832df154f4988b3d810de Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sat, 14 Oct 2023 12:58:40 +0200 Subject: [PATCH 07/31] further cleanup; move llava-cli into its own file and rename --- examples/llava/CMakeLists.txt | 16 +++++- examples/llava/README.md | 6 +- examples/llava/clip.cpp | 2 +- examples/llava/clip.h | 2 +- examples/llava/llava-cli.cpp | 101 ++++++++++++++++++++++++++++++++++ examples/llava/llava-utils.h | 10 ++-- examples/llava/llava.cpp | 95 +------------------------------- examples/llava/llava.h | 6 +- 8 files changed, 128 insertions(+), 110 deletions(-) create mode 100644 examples/llava/llava-cli.cpp diff --git a/examples/llava/CMakeLists.txt b/examples/llava/CMakeLists.txt index d04dcc5c53a23..7e05bb3bff20b 100644 --- a/examples/llava/CMakeLists.txt +++ b/examples/llava/CMakeLists.txt @@ -11,9 +11,21 @@ if(TARGET BUILD_INFO) endif() set(TARGET llava) -add_executable(${TARGET} llava.cpp) +add_library(${TARGET} llava.cpp llava.h) +install(TARGETS ${TARGET} LIBRARY) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_11) +if (NOT MSVC) + target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h + endif() +if(TARGET BUILD_INFO) + add_dependencies(${TARGET} BUILD_INFO) +endif() + +set(TARGET llava-cli) +add_executable(${TARGET} llava-cli.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama clip ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common llama clip llava ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) if(TARGET BUILD_INFO) add_dependencies(${TARGET} BUILD_INFO) diff --git a/examples/llava/README.md b/examples/llava/README.md index fc3446b60fd7d..b1df8dd165425 100644 --- a/examples/llava/README.md +++ b/examples/llava/README.md @@ -9,12 +9,12 @@ models are available. After API is confirmed, more models will be supported / uploaded. ## Usage -Build with cmake or run `make llava` to build it. +Build with cmake or run `make llava-cli` to build it. -After building, run: `./llava` to see the usage. For example: +After building, run: `./llava-cli` to see the usage. For example: ```sh -./llava -m llava-v1.5-7b/ggml-model-q5_k.gguf --mmproj llava-v1.5-7b/mmproj-model-f16.gguf --image path/to/an/image.jpg +./llava-cli -m llava-v1.5-7b/ggml-model-q5_k.gguf --mmproj llava-v1.5-7b/mmproj-model-f16.gguf --image path/to/an/image.jpg ``` **note**: A lower temperature like 0.1 is recommended for better quality. add `--temp 0.1` to the command to do so. diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 5bb2e4c3718cb..d8eb865fc08bc 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -690,7 +690,7 @@ static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_ memcpy(img->data, data, img->size); } -bool clip_image_load_from_bytes(const unsigned char * bytes, int bytes_length, clip_image_u8 * img) { +bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, clip_image_u8 * img) { int nx, ny, nc; auto data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3); if (!data) { diff --git a/examples/llava/clip.h b/examples/llava/clip.h index c0b53d0b8dbdf..f161b738ecc6a 100644 --- a/examples/llava/clip.h +++ b/examples/llava/clip.h @@ -58,7 +58,7 @@ struct clip_image_f32_batch { struct clip_image_u8 * make_clip_image_u8(); struct clip_image_f32 * make_clip_image_f32(); bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); -bool clip_image_load_from_bytes(const unsigned char * bytes, int bytes_length, clip_image_u8 * img); +bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, clip_image_u8 * img); bool clip_image_preprocess(const struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32 * res, const bool pad2square); bool clip_image_encode(const struct clip_ctx * ctx, const int n_threads, struct clip_image_f32 * img, float * vec); diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp new file mode 100644 index 0000000000000..84c37624662b3 --- /dev/null +++ b/examples/llava/llava-cli.cpp @@ -0,0 +1,101 @@ +#include +#include + +#include "ggml.h" +#include "common.h" +#include "clip.h" +#include "llava.h" +#include "llava-utils.h" + + +static void show_additional_info(int /*argc*/, char ** argv) { + printf("\n example usage: %s -m --mmproj --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); + printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n"); +} + +static bool load_image(llava_context * ctx_llava, gpt_params * params, float **image_embd, int * n_image_pos) { + // load and preprocess the image + clip_image_u8 img; + auto prompt = params->prompt; + if (prompt_contains_image(prompt)) { + if (!params->image.empty()) { + printf("using base64 encoded image instead of command line image path\n"); + } + if (!clip_image_load_from_prompt(prompt, &img)) { + fprintf(stderr, "%s: can't load image from prompt\n", __func__); + return false; + } + prompt = remove_image_from_prompt(prompt); + } else { + if (!clip_image_load_from_file(params->image.c_str(), &img)) { + fprintf(stderr, "%s: is %s really an image file?\n", __func__, params->image.c_str()); + return false; + } + } + llava_build_img_embed(ctx_llava, params->n_threads, &img, image_embd, n_image_pos); + + return true; +} + +static void process_prompt(struct llava_context * ctx_llava, float * image_embd, int n_img_pos, gpt_params * params, const char * prompt) { + int n_past = 0; + + const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; + + // llava chat format is "USER: \n\nASSISTANT:" + // GG: are we sure that the should be a trailing whitespace at the end of this string? + eval_string(ctx_llava->ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER: ", params->n_batch, &n_past); + eval_image_embd(ctx_llava->ctx_llama, image_embd, n_img_pos, params->n_batch, &n_past); + eval_string(ctx_llava->ctx_llama, prompt, params->n_batch, &n_past); + eval_string(ctx_llava->ctx_llama, "\nASSISTANT:", params->n_batch, &n_past); + + // generate the response + + printf("\n"); + + for (int i = 0; i < max_tgt_len; i++) { + const char * tmp = sample(ctx_llava->ctx_llama, *params, &n_past); + if (strcmp(tmp, "") == 0) break; + + printf("%s", tmp); + fflush(stdout); + } + + printf("\n"); + +} + +int main(int argc, char ** argv) { + ggml_time_init(); + + gpt_params params; + + if (!gpt_params_parse(argc, argv, params)) { + show_additional_info(argc, argv); + return 1; + } + if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) { + gpt_print_usage(argc, argv, params); + show_additional_info(argc, argv); + return 1; + } + + auto ctx_llava = llava_init(¶ms); + if (ctx_llava == NULL) { + fprintf(stderr, "%s: error: failed to init llava\n", __func__); + return 1; + } + + float * image_embd; + int n_image_pos; + load_image(ctx_llava, ¶ms, &image_embd, &n_image_pos); + + // process the prompt + process_prompt(ctx_llava, image_embd, n_image_pos, ¶ms, params.prompt.c_str()); + + llama_print_timings(ctx_llava->ctx_llama); + + free(image_embd); + llava_free(ctx_llava); + return 0; +} diff --git a/examples/llava/llava-utils.h b/examples/llava/llava-utils.h index b794c39cc9cef..3b4fa96ccfd35 100644 --- a/examples/llava/llava-utils.h +++ b/examples/llava/llava-utils.h @@ -149,19 +149,19 @@ inline const char * sample(struct llama_context * ctx_llama, gpt_params & params static const char* IMG_BASE64_TAG_BEGIN = ""; -static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) { +inline void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) { begin_out = prompt.find(IMG_BASE64_TAG_BEGIN); end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out); } -static bool prompt_contains_image(const std::string& prompt) { +inline bool prompt_contains_image(const std::string& prompt) { size_t begin, end; find_image_tag_in_prompt(prompt, begin, end); return (begin != std::string::npos); } // replaces the base64 image tag in the prompt with `replacement` -static bool get_image_from_prompt(const std::string& prompt, clip_image_u8 * img) { +inline bool clip_image_load_from_prompt(const std::string& prompt, clip_image_u8 * img) { size_t img_base64_str_start, img_base64_str_end; find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { @@ -176,7 +176,7 @@ static bool get_image_from_prompt(const std::string& prompt, clip_image_u8 * img auto required_bytes = base64::required_encode_size(base64_str.size()); auto img_bytes = std::vector(required_bytes); auto img_bytes_end = base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); - auto img_bytes_len = img_bytes_end - img_bytes.begin(); + size_t img_bytes_len = img_bytes_end - img_bytes.begin(); auto img_loaded_ok = clip_image_load_from_bytes(img_bytes.data(), img_bytes_len, img); if (!img_loaded_ok) { @@ -187,7 +187,7 @@ static bool get_image_from_prompt(const std::string& prompt, clip_image_u8 * img return true; } -static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { +inline std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { size_t begin, end; find_image_tag_in_prompt(prompt, begin, end); if (begin == std::string::npos || end == std::string::npos) { diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index ffdad9c99d57f..522334c7c6eee 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -10,11 +10,6 @@ #include "base64.hpp" -static void show_additional_info(int /*argc*/, char ** argv) { - printf("\n example usage: %s -m --mmproj --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); - printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n"); -} - static bool encode_image_with_clip(llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_embd, int * n_img_pos) { auto ctx_clip = ctx_llava->ctx_clip; clip_image_f32 img_res; @@ -51,7 +46,7 @@ static bool encode_image_with_clip(llava_context * ctx_llava, int n_threads, con return true; } -static bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out) { +bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out) { auto ctx_clip = ctx_llava->ctx_clip; float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); @@ -128,91 +123,3 @@ void llava_free(struct llava_context * ctx_llava) { llama_backend_free(); } - - -static bool load_image(llava_context * ctx_llava, gpt_params * params, float **image_embd, int * n_image_pos) { - // load and preprocess the image - clip_image_u8 img; - auto prompt = params->prompt; - if (prompt_contains_image(prompt)) { - if (!params->image.empty()) { - printf("using base64 encoded image instead of command line image path\n"); - } - if (!get_image_from_prompt(prompt, &img)) { - fprintf(stderr, "%s: can't load image from prompt\n", __func__); - return false; - } - prompt = remove_image_from_prompt(prompt); - } else { - if (!clip_image_load_from_file(params->image.c_str(), &img)) { - fprintf(stderr, "%s: is %s really an image file?\n", __func__, params->image.c_str()); - return false; - } - } - llava_build_img_embed(ctx_llava, params->n_threads, &img, image_embd, n_image_pos); - - return true; -} - -static void process_prompt(struct llava_context * ctx_llava, float * image_embd, int n_img_pos, gpt_params * params, const char * prompt) { - int n_past = 0; - - const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; - - // llava chat format is "USER: \n\nASSISTANT:" - // GG: are we sure that the should be a trailing whitespace at the end of this string? - eval_string(ctx_llava->ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER: ", params->n_batch, &n_past); - eval_image_embd(ctx_llava->ctx_llama, image_embd, n_img_pos, params->n_batch, &n_past); - eval_string(ctx_llava->ctx_llama, prompt, params->n_batch, &n_past); - eval_string(ctx_llava->ctx_llama, "\nASSISTANT:", params->n_batch, &n_past); - - // generate the response - - printf("\n"); - - for (int i = 0; i < max_tgt_len; i++) { - const char * tmp = sample(ctx_llava->ctx_llama, *params, &n_past); - if (strcmp(tmp, "") == 0) break; - - printf("%s", tmp); - fflush(stdout); - } - - printf("\n"); - -} - -int main(int argc, char ** argv) { - ggml_time_init(); - - gpt_params params; - - if (!gpt_params_parse(argc, argv, params)) { - show_additional_info(argc, argv); - return 1; - } - if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) { - gpt_print_usage(argc, argv, params); - show_additional_info(argc, argv); - return 1; - } - - auto ctx_llava = llava_init(¶ms); - if (ctx_llava == NULL) { - fprintf(stderr, "%s: error: failed to init llava\n", __func__); - return 1; - } - - float * image_embd; - int n_image_pos; - load_image(ctx_llava, ¶ms, &image_embd, &n_image_pos); - - // process the prompt - process_prompt(ctx_llava, image_embd, n_image_pos, ¶ms, params.prompt.c_str()); - - llama_print_timings(ctx_llava->ctx_llama); - - free(image_embd); - llava_free(ctx_llava); - return 0; -} diff --git a/examples/llava/llava.h b/examples/llava/llava.h index ddbcc8d43205f..1d8b87a465596 100644 --- a/examples/llava/llava.h +++ b/examples/llava/llava.h @@ -2,6 +2,7 @@ #define LLAVA_H #include "ggml.h" +#include "common.h" struct clip_ctx; @@ -13,15 +14,12 @@ struct llava_context { struct clip_ctx * ctx_clip = NULL; struct llama_context * ctx_llama = NULL; struct llama_model * model = NULL; - -// int n_img_pos = 0; -// float * image_embd = NULL; }; struct llava_context * llava_init(gpt_params * params); void llava_free(struct llava_context * ctx_llava); -//void llava_process_prompt(struct llava_context * ctx_llava, gpt_params * params, const char * prompt); +bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out); #ifdef __cplusplus From e2cd07cf87c4e535afdab694542c600cb043e5f3 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sat, 14 Oct 2023 13:03:13 +0200 Subject: [PATCH 08/31] move base64.hpp into common/ --- {examples/llava => common}/base64.hpp | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {examples/llava => common}/base64.hpp (100%) diff --git a/examples/llava/base64.hpp b/common/base64.hpp similarity index 100% rename from examples/llava/base64.hpp rename to common/base64.hpp From f8eddcf8e823c254280ca50ba04dd1154f59c6d7 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sat, 14 Oct 2023 13:13:40 +0200 Subject: [PATCH 09/31] collapse clip and llava libraries --- examples/llava/CMakeLists.txt | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/examples/llava/CMakeLists.txt b/examples/llava/CMakeLists.txt index 7e05bb3bff20b..d451690ad678b 100644 --- a/examples/llava/CMakeLists.txt +++ b/examples/llava/CMakeLists.txt @@ -1,17 +1,5 @@ -set(TARGET clip) -add_library(${TARGET} clip.cpp clip.h) -install(TARGETS ${TARGET} LIBRARY) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) -if (NOT MSVC) - target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h - endif() -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() - set(TARGET llava) -add_library(${TARGET} llava.cpp llava.h) +add_library(${TARGET} llava.cpp llava.h clip.cpp clip.h) install(TARGETS ${TARGET} LIBRARY) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) @@ -22,10 +10,11 @@ if(TARGET BUILD_INFO) add_dependencies(${TARGET} BUILD_INFO) endif() + set(TARGET llava-cli) add_executable(${TARGET} llava-cli.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama clip llava ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) if(TARGET BUILD_INFO) add_dependencies(${TARGET} BUILD_INFO) From b9f533b997d0a27e11f3e6450b17c536db5c3da7 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sat, 14 Oct 2023 17:11:32 +0200 Subject: [PATCH 10/31] move llava into its own subdir --- .gitignore | 2 +- CMakeLists.txt | 1 + common/CMakeLists.txt | 1 + examples/llava/CMakeLists.txt | 16 -------------- examples/llava/llava-cli.cpp | 6 +++++- llava/CMakeLists.txt | 13 ++++++++++++ {examples/llava => llava}/clip.cpp | 0 {examples/llava => llava}/clip.h | 0 {examples/llava => llava}/llava-utils.h | 0 {examples/llava => llava}/llava.cpp | 28 ++++++++++++------------- {examples/llava => llava}/llava.h | 3 ++- 11 files changed, 36 insertions(+), 34 deletions(-) create mode 100644 llava/CMakeLists.txt rename {examples/llava => llava}/clip.cpp (100%) rename {examples/llava => llava}/clip.h (100%) rename {examples/llava => llava}/llava-utils.h (100%) rename {examples/llava => llava}/llava.cpp (83%) rename {examples/llava => llava}/llava.h (57%) diff --git a/.gitignore b/.gitignore index 471cf90d5cb51..c96dc91af438b 100644 --- a/.gitignore +++ b/.gitignore @@ -44,7 +44,7 @@ models-mnt /infill /libllama.so /llama-bench -/llava +/llava-cli /main /metal /perplexity diff --git a/CMakeLists.txt b/CMakeLists.txt index 6af42a6c266c8..58547b6d51ba9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -793,6 +793,7 @@ endif() # add_subdirectory(common) +add_subdirectory(llava) if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION) include(CTest) diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index fbb0ff0952ac7..ac4ce09583083 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -3,6 +3,7 @@ set(TARGET common) add_library(${TARGET} OBJECT + base64.hpp common.h common.cpp sampling.h diff --git a/examples/llava/CMakeLists.txt b/examples/llava/CMakeLists.txt index d451690ad678b..6b41d157e7d4f 100644 --- a/examples/llava/CMakeLists.txt +++ b/examples/llava/CMakeLists.txt @@ -1,16 +1,3 @@ -set(TARGET llava) -add_library(${TARGET} llava.cpp llava.h clip.cpp clip.h) -install(TARGETS ${TARGET} LIBRARY) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) -if (NOT MSVC) - target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h - endif() -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() - - set(TARGET llava-cli) add_executable(${TARGET} llava-cli.cpp) install(TARGETS ${TARGET} RUNTIME) @@ -19,6 +6,3 @@ target_compile_features(${TARGET} PRIVATE cxx_std_11) if(TARGET BUILD_INFO) add_dependencies(${TARGET} BUILD_INFO) endif() - -unset(TARGET) -llama_build_and_test_executable(test-llava.cpp) diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 84c37624662b3..173c2d93852fb 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -32,7 +32,11 @@ static bool load_image(llava_context * ctx_llava, gpt_params * params, float **i return false; } } - llava_build_img_embed(ctx_llava, params->n_threads, &img, image_embd, n_image_pos); + bool image_embed_result = llava_build_img_embed(ctx_llava->ctx_llama, ctx_llava->ctx_clip, params->n_threads, &img, image_embd, n_image_pos); + if (!image_embed_result) { + fprintf(stderr, "%s: coulnd't embed the image\n", __func__); + return false; + } return true; } diff --git a/llava/CMakeLists.txt b/llava/CMakeLists.txt new file mode 100644 index 0000000000000..9f9f8871d4ce5 --- /dev/null +++ b/llava/CMakeLists.txt @@ -0,0 +1,13 @@ +set(TARGET llava) + +add_library(${TARGET} llava.cpp llava.h clip.cpp clip.h) + +target_include_directories(${TARGET} PUBLIC .) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_11) +if (NOT MSVC) + target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h + endif() +if(TARGET BUILD_INFO) + add_dependencies(${TARGET} BUILD_INFO) +endif() diff --git a/examples/llava/clip.cpp b/llava/clip.cpp similarity index 100% rename from examples/llava/clip.cpp rename to llava/clip.cpp diff --git a/examples/llava/clip.h b/llava/clip.h similarity index 100% rename from examples/llava/clip.h rename to llava/clip.h diff --git a/examples/llava/llava-utils.h b/llava/llava-utils.h similarity index 100% rename from examples/llava/llava-utils.h rename to llava/llava-utils.h diff --git a/examples/llava/llava.cpp b/llava/llava.cpp similarity index 83% rename from examples/llava/llava.cpp rename to llava/llava.cpp index 522334c7c6eee..a20d34bfa7449 100644 --- a/examples/llava/llava.cpp +++ b/llava/llava.cpp @@ -10,8 +10,7 @@ #include "base64.hpp" -static bool encode_image_with_clip(llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_embd, int * n_img_pos) { - auto ctx_clip = ctx_llava->ctx_clip; +static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_embd, int * n_img_pos) { clip_image_f32 img_res; if (!clip_image_preprocess(ctx_clip, img, &img_res, /*pad2square =*/ true)) { fprintf(stderr, "%s: unable to preprocess image\n", __func__); @@ -22,14 +21,6 @@ static bool encode_image_with_clip(llava_context * ctx_llava, int n_threads, con *n_img_pos = clip_n_patches(ctx_clip); *n_img_embd = clip_n_mmproj_embd(ctx_clip); - // make sure that the correct mmproj was used, i.e., compare apples to apples - int n_llama_embd = llama_n_embd(llama_get_model(ctx_llava->ctx_llama)); - if (*n_img_embd != n_llama_embd) { - printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, *n_img_embd, n_llama_embd); - - return false; - } - const int64_t t_img_enc_start_us = ggml_time_us(); if (!clip_image_encode(ctx_clip, n_threads, &img_res, image_embd)) { fprintf(stderr, "Unable to encode image\n"); @@ -46,9 +37,8 @@ static bool encode_image_with_clip(llava_context * ctx_llava, int n_threads, con return true; } -bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out) { +bool llava_build_img_embed(const llama_context * ctx_llama, clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out) { - auto ctx_clip = ctx_llava->ctx_clip; float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); if (!image_embd) { fprintf(stderr, "Unable to allocate memory for image embeddings\n"); @@ -58,13 +48,22 @@ bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, cons int n_image_pos; int n_img_embd; - if (!encode_image_with_clip(ctx_llava, n_threads, img, image_embd, &n_img_embd, &n_image_pos)) { + if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_img_embd, &n_image_pos)) { fprintf(stderr, "%s: cannot encode image, aborting\n", __func__); free(image_embd); return false; } + // make sure that the correct mmproj was used, i.e., compare apples to apples + int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama)); + if (n_img_embd != n_llama_embd) { + printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_img_embd, n_llama_embd); + free(image_embd); + return false; + } + *image_embd_out = image_embd; *n_image_pos_out = n_image_pos; + return true; } @@ -102,16 +101,15 @@ struct llava_context * llava_init(gpt_params * params) { return NULL; } - auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context)); ctx_llava->ctx_llama = ctx_llama; ctx_llava->ctx_clip = ctx_clip; ctx_llava->model = model; return ctx_llava; - } + void llava_free(struct llava_context * ctx_llava) { if (ctx_llava->ctx_clip) { clip_free(ctx_llava->ctx_clip); diff --git a/examples/llava/llava.h b/llava/llava.h similarity index 57% rename from examples/llava/llava.h rename to llava/llava.h index 1d8b87a465596..a7789ad364522 100644 --- a/examples/llava/llava.h +++ b/llava/llava.h @@ -19,7 +19,8 @@ struct llava_context { struct llava_context * llava_init(gpt_params * params); void llava_free(struct llava_context * ctx_llava); -bool llava_build_img_embed(struct llava_context * ctx_llava, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out); +/** build a llava image embedding from the passed-in clip image `img`. result is returned as image_embd_out, size n_image_pos_out */ +bool llava_build_img_embed(const struct llama_context * ctx_llama, struct clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out); #ifdef __cplusplus From f21af512cd8176b5ea8eb81b722054d47d8b2c0a Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sat, 14 Oct 2023 17:20:21 +0200 Subject: [PATCH 11/31] wip --- examples/llava/llava-cli.cpp | 5 +++-- llava/clip.h | 5 +++-- llava/llava.cpp | 18 +++++++++--------- llava/llava.h | 2 +- 4 files changed, 16 insertions(+), 14 deletions(-) diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 173c2d93852fb..5c61c67fd0902 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -13,7 +13,7 @@ static void show_additional_info(int /*argc*/, char ** argv) { printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n"); } -static bool load_image(llava_context * ctx_llava, gpt_params * params, float **image_embd, int * n_image_pos) { +static bool load_image(llava_context * ctx_llava, gpt_params * params, float **image_embd, int * n_img_pos) { // load and preprocess the image clip_image_u8 img; auto prompt = params->prompt; @@ -32,7 +32,7 @@ static bool load_image(llava_context * ctx_llava, gpt_params * params, float **i return false; } } - bool image_embed_result = llava_build_img_embed(ctx_llava->ctx_llama, ctx_llava->ctx_clip, params->n_threads, &img, image_embd, n_image_pos); + bool image_embed_result = llava_build_img_embed(ctx_llava->ctx_llama, ctx_llava->ctx_clip, params->n_threads, &img, image_embd, n_img_pos); if (!image_embed_result) { fprintf(stderr, "%s: coulnd't embed the image\n", __func__); return false; @@ -49,6 +49,7 @@ static void process_prompt(struct llava_context * ctx_llava, float * image_embd, // llava chat format is "USER: \n\nASSISTANT:" // GG: are we sure that the should be a trailing whitespace at the end of this string? eval_string(ctx_llava->ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER: ", params->n_batch, &n_past); + printf("embedding image, n_img_pos is %d\n", n_img_pos); eval_image_embd(ctx_llava->ctx_llama, image_embd, n_img_pos, params->n_batch, &n_past); eval_string(ctx_llava->ctx_llama, prompt, params->n_batch, &n_past); eval_string(ctx_llava->ctx_llama, "\nASSISTANT:", params->n_batch, &n_past); diff --git a/llava/clip.h b/llava/clip.h index f161b738ecc6a..26851fafc2b66 100644 --- a/llava/clip.h +++ b/llava/clip.h @@ -2,6 +2,7 @@ #define CLIP_H #include "ggml.h" +#include "llama.h" struct clip_ctx; @@ -57,8 +58,8 @@ struct clip_image_f32_batch { struct clip_image_u8 * make_clip_image_u8(); struct clip_image_f32 * make_clip_image_f32(); -bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); -bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, clip_image_u8 * img); +LLAMA_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); +LLAMA_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, clip_image_u8 * img); bool clip_image_preprocess(const struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32 * res, const bool pad2square); bool clip_image_encode(const struct clip_ctx * ctx, const int n_threads, struct clip_image_f32 * img, float * vec); diff --git a/llava/llava.cpp b/llava/llava.cpp index a20d34bfa7449..e720c8ba432a5 100644 --- a/llava/llava.cpp +++ b/llava/llava.cpp @@ -10,7 +10,7 @@ #include "base64.hpp" -static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_embd, int * n_img_pos) { +static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_image_embd, int * n_img_pos) { clip_image_f32 img_res; if (!clip_image_preprocess(ctx_clip, img, &img_res, /*pad2square =*/ true)) { fprintf(stderr, "%s: unable to preprocess image\n", __func__); @@ -19,7 +19,7 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli } *n_img_pos = clip_n_patches(ctx_clip); - *n_img_embd = clip_n_mmproj_embd(ctx_clip); + *n_image_embd = clip_n_mmproj_embd(ctx_clip); const int64_t t_img_enc_start_us = ggml_time_us(); if (!clip_image_encode(ctx_clip, n_threads, &img_res, image_embd)) { @@ -37,7 +37,7 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli return true; } -bool llava_build_img_embed(const llama_context * ctx_llama, clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out) { +bool llava_build_img_embed(const llama_context * ctx_llama, clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out) { float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); if (!image_embd) { @@ -46,23 +46,23 @@ bool llava_build_img_embed(const llama_context * ctx_llama, clip_ctx * ctx_clip, return false; } - int n_image_pos; - int n_img_embd; - if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_img_embd, &n_image_pos)) { + int n_img_pos; + int n_image_embd; + if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_image_embd, &n_img_pos)) { fprintf(stderr, "%s: cannot encode image, aborting\n", __func__); free(image_embd); return false; } // make sure that the correct mmproj was used, i.e., compare apples to apples int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama)); - if (n_img_embd != n_llama_embd) { - printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_img_embd, n_llama_embd); + if (n_image_embd != n_llama_embd) { + printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_image_embd, n_llama_embd); free(image_embd); return false; } *image_embd_out = image_embd; - *n_image_pos_out = n_image_pos; + *n_img_pos_out = n_img_pos; return true; } diff --git a/llava/llava.h b/llava/llava.h index a7789ad364522..ba67103a4dfcb 100644 --- a/llava/llava.h +++ b/llava/llava.h @@ -20,7 +20,7 @@ struct llava_context * llava_init(gpt_params * params); void llava_free(struct llava_context * ctx_llava); /** build a llava image embedding from the passed-in clip image `img`. result is returned as image_embd_out, size n_image_pos_out */ -bool llava_build_img_embed(const struct llama_context * ctx_llama, struct clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out); +LLAMA_API bool llava_build_img_embed(const struct llama_context * ctx_llama, struct clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out); #ifdef __cplusplus From 708928c649880ac70df6adb5721ba1fd635e3627 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sat, 14 Oct 2023 18:24:55 +0200 Subject: [PATCH 12/31] fix bug where base64 string was not removed from the prompt --- examples/llava/llava-cli.cpp | 67 ++++++++++++++++++++++++++++++++++-- llava/llava-utils.h | 22 ++---------- llava/llava.cpp | 65 ++++++++-------------------------- llava/llava.h | 16 ++++----- 4 files changed, 87 insertions(+), 83 deletions(-) diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 5c61c67fd0902..603b552ca18cf 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -7,6 +7,13 @@ #include "llava.h" #include "llava-utils.h" +struct llava_context { + struct clip_ctx * ctx_clip = NULL; + struct llama_context * ctx_llama = NULL; + struct llama_model * model = NULL; +}; + + static void show_additional_info(int /*argc*/, char ** argv) { printf("\n example usage: %s -m --mmproj --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); @@ -25,7 +32,7 @@ static bool load_image(llava_context * ctx_llava, gpt_params * params, float **i fprintf(stderr, "%s: can't load image from prompt\n", __func__); return false; } - prompt = remove_image_from_prompt(prompt); + params->prompt = remove_image_from_prompt(prompt); } else { if (!clip_image_load_from_file(params->image.c_str(), &img)) { fprintf(stderr, "%s: is %s really an image file?\n", __func__, params->image.c_str()); @@ -49,8 +56,7 @@ static void process_prompt(struct llava_context * ctx_llava, float * image_embd, // llava chat format is "USER: \n\nASSISTANT:" // GG: are we sure that the should be a trailing whitespace at the end of this string? eval_string(ctx_llava->ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER: ", params->n_batch, &n_past); - printf("embedding image, n_img_pos is %d\n", n_img_pos); - eval_image_embd(ctx_llava->ctx_llama, image_embd, n_img_pos, params->n_batch, &n_past); + llava_eval_image_embd(ctx_llava->ctx_llama, image_embd, n_img_pos, params->n_batch, &n_past); eval_string(ctx_llava->ctx_llama, prompt, params->n_batch, &n_past); eval_string(ctx_llava->ctx_llama, "\nASSISTANT:", params->n_batch, &n_past); @@ -70,6 +76,61 @@ static void process_prompt(struct llava_context * ctx_llava, float * image_embd, } + +static struct llava_context * llava_init(gpt_params * params) { + + const char * clip_path = params->mmproj.c_str(); + + auto prompt = params->prompt; + if (prompt.empty()) { + prompt = "describe the image in detail."; + } + + auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); + + llama_backend_init(params->numa); + + llama_model_params model_params = llama_model_default_params(); + llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params); + if (model == NULL) { + fprintf(stderr , "%s: error: unable to load model\n" , __func__); + return NULL; + } + + llama_context_params ctx_params = llama_context_default_params(); + + ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings + ctx_params.n_threads = params->n_threads; + ctx_params.n_threads_batch = params->n_threads_batch == -1 ? params->n_threads : params->n_threads_batch; + + llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); + + if (ctx_llama == NULL) { + fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); + return NULL; + } + + auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context)); + + ctx_llava->ctx_llama = ctx_llama; + ctx_llava->ctx_clip = ctx_clip; + ctx_llava->model = model; + return ctx_llava; +} + + +static void llava_free(struct llava_context * ctx_llava) { + if (ctx_llava->ctx_clip) { + clip_free(ctx_llava->ctx_clip); + ctx_llava->ctx_clip = NULL; + } + + llama_free(ctx_llava->ctx_llama); + llama_free_model(ctx_llava->model); + llama_backend_free(); +} + + int main(int argc, char ** argv) { ggml_time_init(); diff --git a/llava/llava-utils.h b/llava/llava-utils.h index 3b4fa96ccfd35..53beefd261467 100644 --- a/llava/llava-utils.h +++ b/llava/llava-utils.h @@ -11,24 +11,6 @@ #include #include -inline bool eval_image_embd(llama_context * ctx_llama, float * embd, int N, int n_batch, int * n_past) { - int n_embd = llama_n_embd(llama_get_model(ctx_llama)); - - for (int i = 0; i < N; i += n_batch) { - int n_eval = N - i; - if (n_eval > n_batch) { - n_eval = n_batch; - } - llama_batch batch = {int32_t(n_eval), nullptr, (embd+i*n_embd), nullptr, nullptr, nullptr, *n_past, 1, 0, }; - if (llama_decode(ctx_llama, batch)) { - fprintf(stderr, "%s : failed to eval\n", __func__); - return false; - } - *n_past += n_eval; - } - return true; -} - inline bool eval_tokens(struct llama_context * ctx_llama, std::vector tokens, int n_batch, int * n_past) { int N = (int) tokens.size(); for (int i = 0; i < N; i += n_batch) { @@ -37,7 +19,7 @@ inline bool eval_tokens(struct llama_context * ctx_llama, std::vectormmproj.c_str(); - - auto prompt = params->prompt; - if (prompt.empty()) { - prompt = "describe the image in detail."; - } - - auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); - - llama_backend_init(params->numa); - - llama_model_params model_params = llama_model_default_params(); - llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params); - if (model == NULL) { - fprintf(stderr , "%s: error: unable to load model\n" , __func__); - return NULL; - } - - llama_context_params ctx_params = llama_context_default_params(); - - ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings - ctx_params.n_threads = params->n_threads; - ctx_params.n_threads_batch = params->n_threads_batch == -1 ? params->n_threads : params->n_threads_batch; - - llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); - - if (ctx_llama == NULL) { - fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); - return NULL; +bool llava_eval_image_embd(llama_context * ctx_llama, float * image_embd, int n_image_pos, int n_batch, int * n_past) { + int n_embd = llama_n_embd(llama_get_model(ctx_llama)); + + for (int i = 0; i < n_image_pos; i += n_batch) { + int n_eval = n_image_pos - i; + if (n_eval > n_batch) { + n_eval = n_batch; + } + llama_batch batch = {int32_t(n_eval), nullptr, (image_embd+i*n_embd), nullptr, nullptr, nullptr, *n_past, 1, 0, }; + if (llama_decode(ctx_llama, batch)) { + fprintf(stderr, "%s : failed to eval\n", __func__); + return false; + } + *n_past += n_eval; } - - auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context)); - - ctx_llava->ctx_llama = ctx_llama; - ctx_llava->ctx_clip = ctx_clip; - ctx_llava->model = model; - return ctx_llava; -} - - -void llava_free(struct llava_context * ctx_llava) { - if (ctx_llava->ctx_clip) { - clip_free(ctx_llava->ctx_clip); - ctx_llava->ctx_clip = NULL; - } - - llama_free(ctx_llava->ctx_llama); - llama_free_model(ctx_llava->model); - llama_backend_free(); + return true; } - diff --git a/llava/llava.h b/llava/llava.h index ba67103a4dfcb..de3875e039f19 100644 --- a/llava/llava.h +++ b/llava/llava.h @@ -10,18 +10,14 @@ struct clip_ctx; extern "C" { #endif -struct llava_context { - struct clip_ctx * ctx_clip = NULL; - struct llama_context * ctx_llama = NULL; - struct llama_model * model = NULL; -}; - -struct llava_context * llava_init(gpt_params * params); -void llava_free(struct llava_context * ctx_llava); - -/** build a llava image embedding from the passed-in clip image `img`. result is returned as image_embd_out, size n_image_pos_out */ +/** using ctx_clip, build a llava image embedding from the passed-in image `img` (see clip.h for methods to load img). + * result is returned as image_embd_out, size n_image_pos_out */ LLAMA_API bool llava_build_img_embed(const struct llama_context * ctx_llama, struct clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out); +/** write the image represented by image_embd (size n_image_pos) into the llama context with batch size n_batch, + * starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */ +LLAMA_API bool llava_eval_image_embd(struct llama_context * ctx_llama, float * image_embd, int n_image_pos, int n_batch, int * n_past); + #ifdef __cplusplus } From 09edb7ecdf8029f2293bc01f283badc4defaf806 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sat, 14 Oct 2023 18:59:40 +0200 Subject: [PATCH 13/31] get libllava to output in the right place --- llava/CMakeLists.txt | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/llava/CMakeLists.txt b/llava/CMakeLists.txt index 9f9f8871d4ce5..d1118e9dbf133 100644 --- a/llava/CMakeLists.txt +++ b/llava/CMakeLists.txt @@ -1,9 +1,20 @@ set(TARGET llava) -add_library(${TARGET} llava.cpp llava.h clip.cpp clip.h) +if (BUILD_SHARED_LIBS) + add_library(${TARGET} SHARED llava.cpp llava.h clip.cpp clip.h) + set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON) + set_target_properties(${TARGET} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) + target_link_libraries(${TARGET} PUBLIC Threads::Threads common llama ggml ${LLAMA_EXTRA_LIBS}) + install(TARGETS ${TARGET} LIBRARY) +else() + add_library(${TARGET} llava.cpp llava.h clip.cpp clip.h) + target_link_libraries(${TARGET} PRIVATE common llama ggml ${CMAKE_THREAD_LIBS_INIT}) +endif() target_include_directories(${TARGET} PUBLIC .) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_include_directories(${TARGET} PUBLIC ..) +target_include_directories(${TARGET} PUBLIC ../common) + target_compile_features(${TARGET} PRIVATE cxx_std_11) if (NOT MSVC) target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h @@ -11,3 +22,4 @@ if (NOT MSVC) if(TARGET BUILD_INFO) add_dependencies(${TARGET} BUILD_INFO) endif() + From 2847ecf2ddac85755dfec8b07517aab5490e4147 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sat, 14 Oct 2023 19:15:35 +0200 Subject: [PATCH 14/31] expose llava methods in libllama.dylib --- CMakeLists.txt | 17 +++++++++++++++++ llava/CMakeLists.txt | 12 ++---------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 58547b6d51ba9..09e28ff629407 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -698,6 +698,7 @@ target_include_directories(llama PUBLIC .) target_compile_features(llama PUBLIC cxx_std_11) # don't bump target_link_libraries(llama PRIVATE ggml + llava ${LLAMA_EXTRA_LIBS} ) @@ -707,6 +708,22 @@ if (BUILD_SHARED_LIBS) if (LLAMA_METAL) set_target_properties(llama PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal") endif() + + # By default, symbols provided by the sublibs that are not used by mainlib (which is all of them in this case) + # are not used. This changes that. + if (WIN32) + set_target_properties(llama PROPERTIES + LINK_FLAGS "/WHOLEARCHIVE" + ) + elseif (APPLE) + set_target_properties(llama PROPERTIES + LINK_FLAGS "-Wl,-all_load" + ) + else () + set_target_properties(llama PROPERTIES + LINK_FLAGS "-Wl,--whole-archive" + ) + endif () endif() diff --git a/llava/CMakeLists.txt b/llava/CMakeLists.txt index d1118e9dbf133..433ec6cfa5d49 100644 --- a/llava/CMakeLists.txt +++ b/llava/CMakeLists.txt @@ -1,15 +1,7 @@ set(TARGET llava) -if (BUILD_SHARED_LIBS) - add_library(${TARGET} SHARED llava.cpp llava.h clip.cpp clip.h) - set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON) - set_target_properties(${TARGET} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) - target_link_libraries(${TARGET} PUBLIC Threads::Threads common llama ggml ${LLAMA_EXTRA_LIBS}) - install(TARGETS ${TARGET} LIBRARY) -else() - add_library(${TARGET} llava.cpp llava.h clip.cpp clip.h) - target_link_libraries(${TARGET} PRIVATE common llama ggml ${CMAKE_THREAD_LIBS_INIT}) -endif() +add_library(${TARGET} STATIC llava.cpp llava.h clip.cpp clip.h) +target_link_libraries(${TARGET} PRIVATE ${CMAKE_THREAD_LIBS_INIT}) target_include_directories(${TARGET} PUBLIC .) target_include_directories(${TARGET} PUBLIC ..) From e3261ffad3b3b523a9b84463c8fb22c94c2b34cd Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sun, 15 Oct 2023 00:18:04 +0200 Subject: [PATCH 15/31] cleanup memory usage around clip_image_* --- examples/llava/llava-cli.cpp | 9 ++++--- llava/clip.cpp | 48 +++++++++++++++++++----------------- llava/clip.h | 8 ++++-- llava/llava.cpp | 10 +++++--- 4 files changed, 43 insertions(+), 32 deletions(-) diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 603b552ca18cf..336b674a743e6 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -22,25 +22,26 @@ static void show_additional_info(int /*argc*/, char ** argv) { static bool load_image(llava_context * ctx_llava, gpt_params * params, float **image_embd, int * n_img_pos) { // load and preprocess the image - clip_image_u8 img; + clip_image_u8 * img = make_clip_image_u8(); auto prompt = params->prompt; if (prompt_contains_image(prompt)) { if (!params->image.empty()) { printf("using base64 encoded image instead of command line image path\n"); } - if (!clip_image_load_from_prompt(prompt, &img)) { + if (!clip_image_load_from_prompt(prompt, img)) { fprintf(stderr, "%s: can't load image from prompt\n", __func__); return false; } params->prompt = remove_image_from_prompt(prompt); } else { - if (!clip_image_load_from_file(params->image.c_str(), &img)) { + if (!clip_image_load_from_file(params->image.c_str(), img)) { fprintf(stderr, "%s: is %s really an image file?\n", __func__, params->image.c_str()); return false; } } - bool image_embed_result = llava_build_img_embed(ctx_llava->ctx_llama, ctx_llava->ctx_clip, params->n_threads, &img, image_embd, n_img_pos); + bool image_embed_result = llava_build_img_embed(ctx_llava->ctx_llama, ctx_llava->ctx_clip, params->n_threads, img, image_embd, n_img_pos); if (!image_embed_result) { + clip_image_u8_free(img); fprintf(stderr, "%s: coulnd't embed the image\n", __func__); return false; } diff --git a/llava/clip.cpp b/llava/clip.cpp index d8eb865fc08bc..a2531de73d4a5 100644 --- a/llava/clip.cpp +++ b/llava/clip.cpp @@ -679,9 +679,11 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { } clip_image_u8 * make_clip_image_u8() { return new clip_image_u8(); } - clip_image_f32 * make_clip_image_f32() { return new clip_image_f32(); } +void clip_image_u8_free(clip_image_u8 * img) { if (img->data) { delete[] img->data; } delete img; } +void clip_image_f32_free(clip_image_f32 * img) { if (img->data) { delete[] img->data; } delete img; } + static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_image_u8 * img) { img->nx = nx; img->ny = ny; @@ -726,39 +728,40 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104) // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156 - clip_image_u8 temp; // we will keep the input image data here temporarily + clip_image_u8 * temp = make_clip_image_u8(); // we will keep the input image data here temporarily if (pad2square && img->nx != img->ny) { int longer_side = std::max(img->nx, img->ny); - temp.nx = longer_side; - temp.ny = longer_side; - temp.size = 3 * longer_side * longer_side; - temp.data = new uint8_t[temp.size](); + temp->nx = longer_side; + temp->ny = longer_side; + temp->size = 3 * longer_side * longer_side; + temp->data = new uint8_t[temp->size](); uint8_t bc[3] = {122, 116, 104}; // bakground color in RGB from LLaVA // fill with background color - for (size_t i = 0; i < temp.size; i++) { - temp.data[i] = bc[i % 3]; + for (size_t i = 0; i < temp->size; i++) { + temp->data[i] = bc[i % 3]; } // copy from the input image for (int y = 0; y < img->ny; y++) { for (int x = 0; x < img->nx; x++) { const int i = 3 * (y * img->nx + x); - const int j = 3 * (y * temp.nx + x); - temp.data[j] = img->data[i]; - temp.data[j+1] = img->data[i+1]; - temp.data[j+2] = img->data[i+2]; + const int j = 3 * (y * temp->nx + x); + temp->data[j] = img->data[i]; + temp->data[j+1] = img->data[i+1]; + temp->data[j+2] = img->data[i+2]; } } } else { - temp.nx = img->nx; - temp.ny = img->ny; - temp.size = img->size; - temp.data = img->data; + temp->nx = img->nx; + temp->ny = img->ny; + temp->size = img->size; + temp->data = new uint8_t[temp->size](); + *temp->data = *img->data; // copy } - const int nx = temp.nx; - const int ny = temp.ny; + const int nx = temp->nx; + const int ny = temp->ny; const int nx2 = ctx->vision_model.hparams.image_size; const int ny2 = ctx->vision_model.hparams.image_size; @@ -797,10 +800,10 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip const int j10 = 3 * (y1 * nx + x0) + c; const int j11 = 3 * (y1 * nx + x1) + c; - const float v00 = temp.data[j00]; - const float v01 = temp.data[j01]; - const float v10 = temp.data[j10]; - const float v11 = temp.data[j11]; + const float v00 = temp->data[j00]; + const float v01 = temp->data[j01]; + const float v10 = temp->data[j10]; + const float v11 = temp->data[j11]; const float v0 = v00 * (1.0f - dx) + v01 * dx; const float v1 = v10 * (1.0f - dx) + v11 * dx; @@ -815,6 +818,7 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip } } } + clip_image_u8_free(temp); return true; } diff --git a/llava/clip.h b/llava/clip.h index 26851fafc2b66..2185f67b96ba9 100644 --- a/llava/clip.h +++ b/llava/clip.h @@ -33,7 +33,7 @@ int clip_n_mmproj_embd(struct clip_ctx * ctx); struct clip_image_u8 { int nx; int ny; - uint8_t * data; + uint8_t * data = NULL; size_t size; }; @@ -42,7 +42,7 @@ struct clip_image_u8 { struct clip_image_f32 { int nx; int ny; - float * data; + float * data = NULL; size_t size; }; @@ -58,8 +58,12 @@ struct clip_image_f32_batch { struct clip_image_u8 * make_clip_image_u8(); struct clip_image_f32 * make_clip_image_f32(); +LLAMA_API void clip_image_u8_free(clip_image_u8 * img); +LLAMA_API void clip_image_f32_free(clip_image_f32 * img); LLAMA_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); +/** interpret bytes as an image file with length bytes_length, and use the result to populate img */ LLAMA_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, clip_image_u8 * img); + bool clip_image_preprocess(const struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32 * res, const bool pad2square); bool clip_image_encode(const struct clip_ctx * ctx, const int n_threads, struct clip_image_f32 * img, float * vec); diff --git a/llava/llava.cpp b/llava/llava.cpp index 8cbdda7d268bf..18cbc76aa496b 100644 --- a/llava/llava.cpp +++ b/llava/llava.cpp @@ -11,10 +11,10 @@ #include "base64.hpp" static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_image_embd, int * n_img_pos) { - clip_image_f32 img_res; - if (!clip_image_preprocess(ctx_clip, img, &img_res, /*pad2square =*/ true)) { + clip_image_f32 * img_res = make_clip_image_f32(); + if (!clip_image_preprocess(ctx_clip, img, img_res, /*pad2square =*/ true)) { fprintf(stderr, "%s: unable to preprocess image\n", __func__); - + clip_image_f32_free(img_res); return false; } @@ -22,7 +22,9 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli *n_image_embd = clip_n_mmproj_embd(ctx_clip); const int64_t t_img_enc_start_us = ggml_time_us(); - if (!clip_image_encode(ctx_clip, n_threads, &img_res, image_embd)) { + bool encoded = clip_image_encode(ctx_clip, n_threads, img_res, image_embd); + clip_image_f32_free(img_res); + if (!encoded) { fprintf(stderr, "Unable to encode image\n"); return false; From d64891b6cf993e29f1b9d3ebb3a6785fea28707d Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sun, 15 Oct 2023 17:17:18 +0200 Subject: [PATCH 16/31] cleanup and refactor *again* --- examples/llava/llava-cli.cpp | 39 ++++++------ llava/clip.cpp | 24 ++++---- llava/clip.h | 8 +-- llava/llava-utils.h | 16 ++--- llava/llava.cpp | 111 +++++++++++++++++++++++++++++------ llava/llava.h | 17 ++++-- 6 files changed, 150 insertions(+), 65 deletions(-) diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 336b674a743e6..8ae5c41247f5e 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -20,46 +20,47 @@ static void show_additional_info(int /*argc*/, char ** argv) { printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n"); } -static bool load_image(llava_context * ctx_llava, gpt_params * params, float **image_embd, int * n_img_pos) { +static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_params * params) { + // load and preprocess the image - clip_image_u8 * img = make_clip_image_u8(); + llava_image_embed * embed = NULL; auto prompt = params->prompt; if (prompt_contains_image(prompt)) { if (!params->image.empty()) { printf("using base64 encoded image instead of command line image path\n"); } - if (!clip_image_load_from_prompt(prompt, img)) { + embed = llava_image_embed_make_with_prompt_base64(ctx_llava->ctx_clip, params->n_threads, prompt); + if (!embed) { fprintf(stderr, "%s: can't load image from prompt\n", __func__); - return false; + return NULL; } params->prompt = remove_image_from_prompt(prompt); } else { - if (!clip_image_load_from_file(params->image.c_str(), img)) { + embed = llava_image_embed_make_with_filename(ctx_llava->ctx_clip, params->n_threads, params->image.c_str()); + if (!embed) { fprintf(stderr, "%s: is %s really an image file?\n", __func__, params->image.c_str()); - return false; + return NULL; } } - bool image_embed_result = llava_build_img_embed(ctx_llava->ctx_llama, ctx_llava->ctx_clip, params->n_threads, img, image_embd, n_img_pos); - if (!image_embed_result) { - clip_image_u8_free(img); - fprintf(stderr, "%s: coulnd't embed the image\n", __func__); - return false; - } - return true; + return embed; } -static void process_prompt(struct llava_context * ctx_llava, float * image_embd, int n_img_pos, gpt_params * params, const char * prompt) { +static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, gpt_params * params, const char * prompt) { int n_past = 0; const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; // llava chat format is "USER: \n\nASSISTANT:" // GG: are we sure that the should be a trailing whitespace at the end of this string? + printf("evaluating system prompt\n"); eval_string(ctx_llava->ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER: ", params->n_batch, &n_past); - llava_eval_image_embd(ctx_llava->ctx_llama, image_embd, n_img_pos, params->n_batch, &n_past); + printf("evaluating image embed\n"); + llava_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past); + printf("evaluating prompt\n"); eval_string(ctx_llava->ctx_llama, prompt, params->n_batch, &n_past); eval_string(ctx_llava->ctx_llama, "\nASSISTANT:", params->n_batch, &n_past); + printf("awaiting response\n"); // generate the response @@ -153,16 +154,14 @@ int main(int argc, char ** argv) { return 1; } - float * image_embd; - int n_image_pos; - load_image(ctx_llava, ¶ms, &image_embd, &n_image_pos); + auto image_embed = load_image(ctx_llava, ¶ms); // process the prompt - process_prompt(ctx_llava, image_embd, n_image_pos, ¶ms, params.prompt.c_str()); + process_prompt(ctx_llava, image_embed, ¶ms, params.prompt.c_str()); llama_print_timings(ctx_llava->ctx_llama); - free(image_embd); + llava_image_embed_free(image_embed); llava_free(ctx_llava); return 0; } diff --git a/llava/clip.cpp b/llava/clip.cpp index a2531de73d4a5..ffb38b81cc8ca 100644 --- a/llava/clip.cpp +++ b/llava/clip.cpp @@ -678,7 +678,10 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { return new_clip; } -clip_image_u8 * make_clip_image_u8() { return new clip_image_u8(); } +clip_image_u8 * make_clip_image_u8() { + auto img = new clip_image_u8(); + return img; +} clip_image_f32 * make_clip_image_f32() { return new clip_image_f32(); } void clip_image_u8_free(clip_image_u8 * img) { if (img->data) { delete[] img->data; } delete img; } @@ -692,11 +695,11 @@ static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_ memcpy(img->data, data, img->size); } -bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, clip_image_u8 * img) { +bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) { int nx, ny, nc; - auto data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3); + auto data = stbi_load(fname, &nx, &ny, &nc, 3); if (!data) { - fprintf(stderr, "%s: failed to decode image bytes\n", __func__); + fprintf(stderr, "%s: failed to load image '%s'\n", __func__, fname); return false; } build_clip_img_from_data(data, nx, ny, img); @@ -704,11 +707,11 @@ bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length return true; } -bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) { +bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) { int nx, ny, nc; - auto data = stbi_load(fname, &nx, &ny, &nc, 3); + auto data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3); if (!data) { - fprintf(stderr, "%s: failed to load image '%s'\n", __func__, fname); + fprintf(stderr, "%s: failed to decode image bytes\n", __func__); return false; } build_clip_img_from_data(data, nx, ny, img); @@ -716,7 +719,6 @@ bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) { return true; } - // normalize: x = (x - mean) / std // TODO: implement bicubic interpolation instead of linear. bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32 * res, const bool pad2square) { @@ -1065,16 +1067,16 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i return true; } -int clip_n_mmproj_embd(struct clip_ctx * ctx) { +int clip_n_mmproj_embd(const struct clip_ctx * ctx) { return ctx->vision_model.mm_2_b->ne[0]; } -int clip_n_patches(struct clip_ctx * ctx) { +int clip_n_patches(const struct clip_ctx * ctx) { auto & params = ctx->vision_model.hparams; return (params.image_size / params.patch_size) * (params.image_size / params.patch_size); } -size_t clip_embd_nbytes(struct clip_ctx * ctx) { +size_t clip_embd_nbytes(const struct clip_ctx * ctx) { return clip_n_patches(ctx) * clip_n_mmproj_embd(ctx) * sizeof(float); } diff --git a/llava/clip.h b/llava/clip.h index 2185f67b96ba9..a8022c52453d2 100644 --- a/llava/clip.h +++ b/llava/clip.h @@ -25,9 +25,9 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity); void clip_free(struct clip_ctx * ctx); -size_t clip_embd_nbytes(struct clip_ctx * ctx); -int clip_n_patches(struct clip_ctx * ctx); -int clip_n_mmproj_embd(struct clip_ctx * ctx); +size_t clip_embd_nbytes(const struct clip_ctx * ctx); +int clip_n_patches(const struct clip_ctx * ctx); +int clip_n_mmproj_embd(const struct clip_ctx * ctx); // RGB uint8 image struct clip_image_u8 { @@ -62,7 +62,7 @@ LLAMA_API void clip_image_u8_free(clip_image_u8 * img); LLAMA_API void clip_image_f32_free(clip_image_f32 * img); LLAMA_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); /** interpret bytes as an image file with length bytes_length, and use the result to populate img */ -LLAMA_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, clip_image_u8 * img); +LLAMA_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img); bool clip_image_preprocess(const struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32 * res, const bool pad2square); bool clip_image_encode(const struct clip_ctx * ctx, const int n_threads, struct clip_image_f32 * img, float * vec); diff --git a/llava/llava-utils.h b/llava/llava-utils.h index 53beefd261467..38bf5729642ed 100644 --- a/llava/llava-utils.h +++ b/llava/llava-utils.h @@ -4,6 +4,7 @@ #include "common.h" #include "llama.h" +#include "llava.h" #include "base64.hpp" @@ -143,12 +144,12 @@ inline bool prompt_contains_image(const std::string& prompt) { } // replaces the base64 image tag in the prompt with `replacement` -inline bool clip_image_load_from_prompt(const std::string& prompt, clip_image_u8 * img) { +inline llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) { size_t img_base64_str_start, img_base64_str_end; find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { fprintf(stderr, "%s: invalid base64 image tag. must be %s%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END); - return false; + return NULL; } auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN); @@ -157,16 +158,15 @@ inline bool clip_image_load_from_prompt(const std::string& prompt, clip_image_u8 auto required_bytes = base64::required_encode_size(base64_str.size()); auto img_bytes = std::vector(required_bytes); - auto img_bytes_end = base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); - size_t img_bytes_len = img_bytes_end - img_bytes.begin(); + base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); - auto img_loaded_ok = clip_image_load_from_bytes(img_bytes.data(), img_bytes_len, img); - if (!img_loaded_ok) { + auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size()); + if (!embed) { fprintf(stderr, "%s: could not load image from base64 string.\n", __func__); - return false; + return NULL; } - return true; + return embed; } inline std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { diff --git a/llava/llava.cpp b/llava/llava.cpp index 18cbc76aa496b..6bafee5a0abcf 100644 --- a/llava/llava.cpp +++ b/llava/llava.cpp @@ -10,7 +10,7 @@ #include "base64.hpp" -static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_image_embd, int * n_img_pos) { +static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_pos) { clip_image_f32 * img_res = make_clip_image_f32(); if (!clip_image_preprocess(ctx_clip, img, img_res, /*pad2square =*/ true)) { fprintf(stderr, "%s: unable to preprocess image\n", __func__); @@ -19,7 +19,6 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli } *n_img_pos = clip_n_patches(ctx_clip); - *n_image_embd = clip_n_mmproj_embd(ctx_clip); const int64_t t_img_enc_start_us = ggml_time_us(); bool encoded = clip_image_encode(ctx_clip, n_threads, img_res, image_embd); @@ -39,7 +38,18 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli return true; } -bool llava_build_img_embed(const llama_context * ctx_llama, clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out) { +bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip) { + // make sure that the correct mmproj was used, i.e., compare apples to apples + int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama)); + auto n_image_embd = clip_n_mmproj_embd(ctx_clip); + if (n_image_embd != n_llama_embd) { + printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_image_embd, n_llama_embd); + return false; + } + return true; +} + +static bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out) { float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); if (!image_embd) { @@ -49,20 +59,11 @@ bool llava_build_img_embed(const llama_context * ctx_llama, clip_ctx * ctx_clip, } int n_img_pos; - int n_image_embd; - if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_image_embd, &n_img_pos)) { + if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_img_pos)) { fprintf(stderr, "%s: cannot encode image, aborting\n", __func__); free(image_embd); return false; } - // make sure that the correct mmproj was used, i.e., compare apples to apples - int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama)); - if (n_image_embd != n_llama_embd) { - printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_image_embd, n_llama_embd); - free(image_embd); - return false; - } - *image_embd_out = image_embd; *n_img_pos_out = n_img_pos; @@ -71,15 +72,15 @@ bool llava_build_img_embed(const llama_context * ctx_llama, clip_ctx * ctx_clip, -bool llava_eval_image_embd(llama_context * ctx_llama, float * image_embd, int n_image_pos, int n_batch, int * n_past) { +bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, int n_batch, int * n_past) { int n_embd = llama_n_embd(llama_get_model(ctx_llama)); - for (int i = 0; i < n_image_pos; i += n_batch) { - int n_eval = n_image_pos - i; + for (int i = 0; i < image_embed->n_image_pos; i += n_batch) { + int n_eval = image_embed->n_image_pos - i; if (n_eval > n_batch) { n_eval = n_batch; } - llama_batch batch = {int32_t(n_eval), nullptr, (image_embd+i*n_embd), nullptr, nullptr, nullptr, *n_past, 1, 0, }; + llama_batch batch = {int32_t(n_eval), nullptr, (image_embed->embed+i*n_embd), nullptr, nullptr, nullptr, *n_past, 1, 0, }; if (llama_decode(ctx_llama, batch)) { fprintf(stderr, "%s : failed to eval\n", __func__); return false; @@ -88,3 +89,79 @@ bool llava_eval_image_embd(llama_context * ctx_llama, float * image_embd, int n_ } return true; } + + +LLAMA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) +{ + clip_image_u8 * img = make_clip_image_u8(); + if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) { + clip_image_u8_free(img); + fprintf(stderr, "%s: can't load image from bytes, is it a valid image?", __func__); + return NULL; + } + + float* image_embed = NULL; + int n_image_pos = 0; + bool image_embed_result = llava_image_embed_make_with_clip_img(ctx_clip, n_threads, img, &image_embed, &n_image_pos); + if (!image_embed_result) { + clip_image_u8_free(img); + fprintf(stderr, "%s: coulnd't embed the image\n", __func__); + return NULL; + } + + clip_image_u8_free(img); + auto result = (llava_image_embed*)malloc(sizeof(llava_image_embed)); + result->embed = image_embed; + result->n_image_pos = n_image_pos; + return result; +} + +static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *sizeOut) +{ + auto file = fopen(path, "rb"); + if (file == NULL) { + fprintf(stderr, "%s: can't read file %s\n", __func__, path); + return false; + } + + fseek(file, 0, SEEK_END); + auto fileSize = ftell(file); + fseek(file, 0, SEEK_SET); + + auto buffer = (unsigned char *)malloc(fileSize); // Allocate memory to hold the file data + if (buffer == NULL) { + fprintf(stderr, "%s: failed to alloc %ld bytes for file %s\n", __func__, fileSize, path); + perror("Memory allocation error"); + fclose(file); + return false; + } + fread(buffer, 1, fileSize, file); // Read the file into the buffer + fclose(file); // Close the file + + *bytesOut = buffer; + *sizeOut = fileSize; + return true; + +} + +LLAMA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path) +{ + unsigned char* image_bytes; + long image_bytes_length; + auto loaded = load_file_to_bytes(image_path, &image_bytes, &image_bytes_length); + if (!loaded) { + fprintf(stderr, "%s: failed to load %s\n", __func__, image_path); + return NULL; + } + + auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, image_bytes, image_bytes_length); + free(image_bytes); + + return embed; +} + + +LLAMA_API void llava_image_embed_free(struct llava_image_embed * embed) { + free(embed->embed); + free(embed); +} diff --git a/llava/llava.h b/llava/llava.h index de3875e039f19..aa9ea1a4f1597 100644 --- a/llava/llava.h +++ b/llava/llava.h @@ -10,13 +10,20 @@ struct clip_ctx; extern "C" { #endif -/** using ctx_clip, build a llava image embedding from the passed-in image `img` (see clip.h for methods to load img). - * result is returned as image_embd_out, size n_image_pos_out */ -LLAMA_API bool llava_build_img_embed(const struct llama_context * ctx_llama, struct clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_image_pos_out); +struct llava_image_embed { + float * embed; + int n_image_pos; +}; -/** write the image represented by image_embd (size n_image_pos) into the llama context with batch size n_batch, +LLAMA_API bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip); + +LLAMA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length); +LLAMA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path); +LLAMA_API void llava_image_embed_free(struct llava_image_embed * embed); + +/** write the image represented by embed into the llama context with batch size n_batch, * starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */ -LLAMA_API bool llava_eval_image_embd(struct llama_context * ctx_llama, float * image_embd, int n_image_pos, int n_batch, int * n_past); +LLAMA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past); #ifdef __cplusplus From 5a9155189945cd9aa6b98a4a340b38dc93c8d219 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sun, 15 Oct 2023 17:25:13 +0200 Subject: [PATCH 17/31] update headerdoc --- llava/clip.h | 7 ++++--- llava/llava.h | 7 +++++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/llava/clip.h b/llava/clip.h index a8022c52453d2..8dfa97e12d49f 100644 --- a/llava/clip.h +++ b/llava/clip.h @@ -21,9 +21,10 @@ struct clip_vision_hparams { float eps; }; -struct clip_ctx * clip_model_load(const char * fname, const int verbosity); - -void clip_free(struct clip_ctx * ctx); +/** load mmproj model */ +LLAMA_API struct clip_ctx * clip_model_load(const char * fname, const int verbosity); +/** free mmproj model */ +LLAMA_API void clip_free(struct clip_ctx * ctx); size_t clip_embd_nbytes(const struct clip_ctx * ctx); int clip_n_patches(const struct clip_ctx * ctx); diff --git a/llava/llava.h b/llava/llava.h index aa9ea1a4f1597..637fe4c09bfe4 100644 --- a/llava/llava.h +++ b/llava/llava.h @@ -15,14 +15,17 @@ struct llava_image_embed { int n_image_pos; }; +/** sanity check for clip <-> llava embed size match */ LLAMA_API bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip); +/** build an image embed from image file bytes */ LLAMA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length); +/** build an image embed from a path to an image filename */ LLAMA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path); LLAMA_API void llava_image_embed_free(struct llava_image_embed * embed); +/** free an embedding made with llava_image_embed_make_* */ -/** write the image represented by embed into the llama context with batch size n_batch, - * starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */ +/** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */ LLAMA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past); From 803703478de817b2be045534e7c997eb6cf72ce3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Fri, 3 Nov 2023 01:34:52 +0300 Subject: [PATCH 18/31] build with cmake, not tested (WIP) --- common/CMakeLists.txt | 4 ++-- examples/llava/llava-cli.cpp | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index cb8f4ec8afa45..d38818982277e 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -40,8 +40,8 @@ endif() set(TARGET common) -add_library(${TARGET} OBJECT - base64.hpp +add_library(${TARGET} STATIC +base64.hpp common.h common.cpp sampling.h diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 8ae5c41247f5e..4b4136cc019c5 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -52,14 +52,13 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_ const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; // llava chat format is "USER: \n\nASSISTANT:" - // GG: are we sure that the should be a trailing whitespace at the end of this string? printf("evaluating system prompt\n"); - eval_string(ctx_llava->ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER: ", params->n_batch, &n_past); + eval_string(ctx_llava->ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:", params->n_batch, &n_past, true); printf("evaluating image embed\n"); llava_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past); printf("evaluating prompt\n"); - eval_string(ctx_llava->ctx_llama, prompt, params->n_batch, &n_past); - eval_string(ctx_llava->ctx_llama, "\nASSISTANT:", params->n_batch, &n_past); + eval_string(ctx_llava->ctx_llama, prompt, params->n_batch, &n_past, false); + eval_string(ctx_llava->ctx_llama, "\nASSISTANT:", params->n_batch, &n_past, false); printf("awaiting response\n"); // generate the response From 52143f799b3367c92b83355d7670ccee1d045d07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Sun, 5 Nov 2023 15:22:47 +0300 Subject: [PATCH 19/31] Editorconfig --- common/base64.hpp | 688 +++++++++++++++++------------------ examples/llava/llava-cli.cpp | 4 +- 2 files changed, 345 insertions(+), 347 deletions(-) diff --git a/common/base64.hpp b/common/base64.hpp index 9a19238251faf..563247a6e5f7d 100644 --- a/common/base64.hpp +++ b/common/base64.hpp @@ -36,357 +36,357 @@ For more information, please refer to class base64_error : public std::runtime_error { public: - using std::runtime_error::runtime_error; + using std::runtime_error::runtime_error; }; class base64 { public: - enum class alphabet - { - /** the alphabet is detected automatically */ - auto_, - /** the standard base64 alphabet is used */ - standard, - /** like `standard` except that the characters `+` and `/` are replaced by `-` and `_` respectively*/ - url_filename_safe - }; - - enum class decoding_behavior - { - /** if the input is not padded, the remaining bits are ignored */ - moderate, - /** if a padding character is encounter decoding is finished */ - loose - }; - - /** - Encodes all the elements from `in_begin` to `in_end` to `out`. - - @warning The source and destination cannot overlap. The destination must be able to hold at least - `required_encode_size(std::distance(in_begin, in_end))`, otherwise the behavior depends on the output iterator. - - @tparam Input_iterator the source; the returned elements are cast to `std::uint8_t` and should not be greater than - 8 bits - @tparam Output_iterator the destination; the elements written to it are from the type `char` - @param in_begin the beginning of the source - @param in_end the ending of the source - @param out the destination iterator - @param alphabet which alphabet should be used - @returns the iterator to the next element past the last element copied - @throws see `Input_iterator` and `Output_iterator` - */ - template - static Output_iterator encode(Input_iterator in_begin, Input_iterator in_end, Output_iterator out, - alphabet alphabet = alphabet::standard) - { - constexpr auto pad = '='; - const char* alpha = alphabet == alphabet::url_filename_safe - ? "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" - : "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; - - while (in_begin != in_end) { - std::uint8_t i0 = 0, i1 = 0, i2 = 0; - - // first character - i0 = static_cast(*in_begin); - ++in_begin; - - *out = alpha[i0 >> 2 & 0x3f]; - ++out; - - // part of first character and second - if (in_begin != in_end) { - i1 = static_cast(*in_begin); - ++in_begin; - - *out = alpha[((i0 & 0x3) << 4) | (i1 >> 4 & 0x0f)]; - ++out; - } else { - *out = alpha[(i0 & 0x3) << 4]; - ++out; - - // last padding - *out = pad; - ++out; - - // last padding - *out = pad; - ++out; - - break; - } - - // part of second character and third - if (in_begin != in_end) { - i2 = static_cast(*in_begin); - ++in_begin; - - *out = alpha[((i1 & 0xf) << 2) | (i2 >> 6 & 0x03)]; - ++out; - } else { - *out = alpha[(i1 & 0xf) << 2]; - ++out; - - // last padding - *out = pad; - ++out; - - break; - } - - // rest of third - *out = alpha[i2 & 0x3f]; - ++out; - } - - return out; - } - /** - Encodes a string. - - @param str the string that should be encoded - @param alphabet which alphabet should be used - @returns the encoded base64 string - @throws see base64::encode() - */ - static std::string encode(const std::string& str, alphabet alphabet = alphabet::standard) - { - std::string result; - - result.reserve(required_encode_size(str.length()) + 1); - - encode(str.begin(), str.end(), std::back_inserter(result), alphabet); - - return result; - } - /** - Encodes a char array. - - @param buffer the char array - @param size the size of the array - @param alphabet which alphabet should be used - @returns the encoded string - */ - static std::string encode(const char* buffer, std::size_t size, alphabet alphabet = alphabet::standard) - { - std::string result; - - result.reserve(required_encode_size(size) + 1); - - encode(buffer, buffer + size, std::back_inserter(result), alphabet); - - return result; - } - /** - Decodes all the elements from `in_begin` to `in_end` to `out`. `in_begin` may point to the same location as `out`, - in other words: inplace decoding is possible. - - @warning The destination must be able to hold at least `required_decode_size(std::distance(in_begin, in_end))`, - otherwise the behavior depends on the output iterator. - - @tparam Input_iterator the source; the returned elements are cast to `char` - @tparam Output_iterator the destination; the elements written to it are from the type `std::uint8_t` - @param in_begin the beginning of the source - @param in_end the ending of the source - @param out the destination iterator - @param alphabet which alphabet should be used - @param behavior the behavior when an error was detected - @returns the iterator to the next element past the last element copied - @throws base64_error depending on the set behavior - @throws see `Input_iterator` and `Output_iterator` - */ - template - static Output_iterator decode(Input_iterator in_begin, Input_iterator in_end, Output_iterator out, - alphabet alphabet = alphabet::auto_, - decoding_behavior behavior = decoding_behavior::moderate) - { - //constexpr auto pad = '='; - std::uint8_t last = 0; - auto bits = 0; - - while (in_begin != in_end) { - auto c = *in_begin; - ++in_begin; - - if (c == '=') { - break; - } - - auto part = _base64_value(alphabet, c); - - // enough bits for one byte - if (bits + 6 >= 8) { - *out = (last << (8 - bits)) | (part >> (bits - 2)); - ++out; - - bits -= 2; - } else { - bits += 6; - } - - last = part; - } - - // check padding - if (behavior != decoding_behavior::loose) { - while (in_begin != in_end) { - auto c = *in_begin; - ++in_begin; - - if (c != '=') { - throw base64_error("invalid base64 character."); - } - } - } - - return out; - } - /** - Decodes a string. - - @param str the base64 encoded string - @param alphabet which alphabet should be used - @param behavior the behavior when an error was detected - @returns the decoded string - @throws see base64::decode() - */ - static std::string decode(const std::string& str, alphabet alphabet = alphabet::auto_, - decoding_behavior behavior = decoding_behavior::moderate) - { - std::string result; - - result.reserve(max_decode_size(str.length())); - - decode(str.begin(), str.end(), std::back_inserter(result), alphabet, behavior); - - return result; - } - /** - Decodes a string. - - @param buffer the base64 encoded buffer - @param size the size of the buffer - @param alphabet which alphabet should be used - @param behavior the behavior when an error was detected - @returns the decoded string - @throws see base64::decode() - */ - static std::string decode(const char* buffer, std::size_t size, alphabet alphabet = alphabet::auto_, - decoding_behavior behavior = decoding_behavior::moderate) - { - std::string result; - - result.reserve(max_decode_size(size)); - - decode(buffer, buffer + size, std::back_inserter(result), alphabet, behavior); - - return result; - } - /** - Decodes a string inplace. - - @param[in,out] str the base64 encoded string - @param alphabet which alphabet should be used - @param behavior the behavior when an error was detected - @throws base64::decode_inplace() - */ - static void decode_inplace(std::string& str, alphabet alphabet = alphabet::auto_, - decoding_behavior behavior = decoding_behavior::moderate) - { - str.resize(decode(str.begin(), str.end(), str.begin(), alphabet, behavior) - str.begin()); - } - /** - Decodes a char array inplace. - - @param[in,out] str the string array - @param size the length of the array - @param alphabet which alphabet should be used - @param behavior the behavior when an error was detected - @returns the pointer to the next element past the last element decoded - @throws base64::decode_inplace() - */ - static char* decode_inplace(char* str, std::size_t size, alphabet alphabet = alphabet::auto_, - decoding_behavior behavior = decoding_behavior::moderate) - { - return decode(str, str + size, str, alphabet, behavior); - } - /** - Returns the required decoding size for a given size. The value is calculated with the following formula: - - $$ - \lceil \frac{size}{4} \rceil \cdot 3 - $$ - - @param size the size of the encoded input - @returns the size of the resulting decoded buffer; this the absolute maximum - */ - static std::size_t max_decode_size(std::size_t size) noexcept - { - return (size / 4 + (size % 4 ? 1 : 0)) * 3; - } - /** - Returns the required encoding size for a given size. The value is calculated with the following formula: - - $$ - \lceil \frac{size}{3} \rceil \cdot 4 - $$ - - @param size the size of the decoded input - @returns the size of the resulting encoded buffer - */ - static std::size_t required_encode_size(std::size_t size) noexcept - { - return (size / 3 + (size % 3 ? 1 : 0)) * 4; - } + enum class alphabet + { + /** the alphabet is detected automatically */ + auto_, + /** the standard base64 alphabet is used */ + standard, + /** like `standard` except that the characters `+` and `/` are replaced by `-` and `_` respectively*/ + url_filename_safe + }; + + enum class decoding_behavior + { + /** if the input is not padded, the remaining bits are ignored */ + moderate, + /** if a padding character is encounter decoding is finished */ + loose + }; + + /** + Encodes all the elements from `in_begin` to `in_end` to `out`. + + @warning The source and destination cannot overlap. The destination must be able to hold at least + `required_encode_size(std::distance(in_begin, in_end))`, otherwise the behavior depends on the output iterator. + + @tparam Input_iterator the source; the returned elements are cast to `std::uint8_t` and should not be greater than + 8 bits + @tparam Output_iterator the destination; the elements written to it are from the type `char` + @param in_begin the beginning of the source + @param in_end the ending of the source + @param out the destination iterator + @param alphabet which alphabet should be used + @returns the iterator to the next element past the last element copied + @throws see `Input_iterator` and `Output_iterator` + */ + template + static Output_iterator encode(Input_iterator in_begin, Input_iterator in_end, Output_iterator out, + alphabet alphabet = alphabet::standard) + { + constexpr auto pad = '='; + const char* alpha = alphabet == alphabet::url_filename_safe + ? "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" + : "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + + while (in_begin != in_end) { + std::uint8_t i0 = 0, i1 = 0, i2 = 0; + + // first character + i0 = static_cast(*in_begin); + ++in_begin; + + *out = alpha[i0 >> 2 & 0x3f]; + ++out; + + // part of first character and second + if (in_begin != in_end) { + i1 = static_cast(*in_begin); + ++in_begin; + + *out = alpha[((i0 & 0x3) << 4) | (i1 >> 4 & 0x0f)]; + ++out; + } else { + *out = alpha[(i0 & 0x3) << 4]; + ++out; + + // last padding + *out = pad; + ++out; + + // last padding + *out = pad; + ++out; + + break; + } + + // part of second character and third + if (in_begin != in_end) { + i2 = static_cast(*in_begin); + ++in_begin; + + *out = alpha[((i1 & 0xf) << 2) | (i2 >> 6 & 0x03)]; + ++out; + } else { + *out = alpha[(i1 & 0xf) << 2]; + ++out; + + // last padding + *out = pad; + ++out; + + break; + } + + // rest of third + *out = alpha[i2 & 0x3f]; + ++out; + } + + return out; + } + /** + Encodes a string. + + @param str the string that should be encoded + @param alphabet which alphabet should be used + @returns the encoded base64 string + @throws see base64::encode() + */ + static std::string encode(const std::string& str, alphabet alphabet = alphabet::standard) + { + std::string result; + + result.reserve(required_encode_size(str.length()) + 1); + + encode(str.begin(), str.end(), std::back_inserter(result), alphabet); + + return result; + } + /** + Encodes a char array. + + @param buffer the char array + @param size the size of the array + @param alphabet which alphabet should be used + @returns the encoded string + */ + static std::string encode(const char* buffer, std::size_t size, alphabet alphabet = alphabet::standard) + { + std::string result; + + result.reserve(required_encode_size(size) + 1); + + encode(buffer, buffer + size, std::back_inserter(result), alphabet); + + return result; + } + /** + Decodes all the elements from `in_begin` to `in_end` to `out`. `in_begin` may point to the same location as `out`, + in other words: inplace decoding is possible. + + @warning The destination must be able to hold at least `required_decode_size(std::distance(in_begin, in_end))`, + otherwise the behavior depends on the output iterator. + + @tparam Input_iterator the source; the returned elements are cast to `char` + @tparam Output_iterator the destination; the elements written to it are from the type `std::uint8_t` + @param in_begin the beginning of the source + @param in_end the ending of the source + @param out the destination iterator + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @returns the iterator to the next element past the last element copied + @throws base64_error depending on the set behavior + @throws see `Input_iterator` and `Output_iterator` + */ + template + static Output_iterator decode(Input_iterator in_begin, Input_iterator in_end, Output_iterator out, + alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + //constexpr auto pad = '='; + std::uint8_t last = 0; + auto bits = 0; + + while (in_begin != in_end) { + auto c = *in_begin; + ++in_begin; + + if (c == '=') { + break; + } + + auto part = _base64_value(alphabet, c); + + // enough bits for one byte + if (bits + 6 >= 8) { + *out = (last << (8 - bits)) | (part >> (bits - 2)); + ++out; + + bits -= 2; + } else { + bits += 6; + } + + last = part; + } + + // check padding + if (behavior != decoding_behavior::loose) { + while (in_begin != in_end) { + auto c = *in_begin; + ++in_begin; + + if (c != '=') { + throw base64_error("invalid base64 character."); + } + } + } + + return out; + } + /** + Decodes a string. + + @param str the base64 encoded string + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @returns the decoded string + @throws see base64::decode() + */ + static std::string decode(const std::string& str, alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + std::string result; + + result.reserve(max_decode_size(str.length())); + + decode(str.begin(), str.end(), std::back_inserter(result), alphabet, behavior); + + return result; + } + /** + Decodes a string. + + @param buffer the base64 encoded buffer + @param size the size of the buffer + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @returns the decoded string + @throws see base64::decode() + */ + static std::string decode(const char* buffer, std::size_t size, alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + std::string result; + + result.reserve(max_decode_size(size)); + + decode(buffer, buffer + size, std::back_inserter(result), alphabet, behavior); + + return result; + } + /** + Decodes a string inplace. + + @param[in,out] str the base64 encoded string + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @throws base64::decode_inplace() + */ + static void decode_inplace(std::string& str, alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + str.resize(decode(str.begin(), str.end(), str.begin(), alphabet, behavior) - str.begin()); + } + /** + Decodes a char array inplace. + + @param[in,out] str the string array + @param size the length of the array + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @returns the pointer to the next element past the last element decoded + @throws base64::decode_inplace() + */ + static char* decode_inplace(char* str, std::size_t size, alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + return decode(str, str + size, str, alphabet, behavior); + } + /** + Returns the required decoding size for a given size. The value is calculated with the following formula: + + $$ + \lceil \frac{size}{4} \rceil \cdot 3 + $$ + + @param size the size of the encoded input + @returns the size of the resulting decoded buffer; this the absolute maximum + */ + static std::size_t max_decode_size(std::size_t size) noexcept + { + return (size / 4 + (size % 4 ? 1 : 0)) * 3; + } + /** + Returns the required encoding size for a given size. The value is calculated with the following formula: + + $$ + \lceil \frac{size}{3} \rceil \cdot 4 + $$ + + @param size the size of the decoded input + @returns the size of the resulting encoded buffer + */ + static std::size_t required_encode_size(std::size_t size) noexcept + { + return (size / 3 + (size % 3 ? 1 : 0)) * 4; + } private: - static std::uint8_t _base64_value(alphabet& alphabet, char c) - { - if (c >= 'A' && c <= 'Z') { - return c - 'A'; - } else if (c >= 'a' && c <= 'z') { - return c - 'a' + 26; - } else if (c >= '0' && c <= '9') { - return c - '0' + 52; - } - - // comes down to alphabet - if (alphabet == alphabet::standard) { - if (c == '+') { - return 62; - } else if (c == '/') { - return 63; - } - } else if (alphabet == alphabet::url_filename_safe) { - if (c == '-') { - return 62; - } else if (c == '_') { - return 63; - } - } // auto detect - else { - if (c == '+') { - alphabet = alphabet::standard; - - return 62; - } else if (c == '/') { - alphabet = alphabet::standard; - - return 63; - } else if (c == '-') { - alphabet = alphabet::url_filename_safe; - - return 62; - } else if (c == '_') { - alphabet = alphabet::url_filename_safe; - - return 63; - } - } - - throw base64_error("invalid base64 character."); - } + static std::uint8_t _base64_value(alphabet& alphabet, char c) + { + if (c >= 'A' && c <= 'Z') { + return c - 'A'; + } else if (c >= 'a' && c <= 'z') { + return c - 'a' + 26; + } else if (c >= '0' && c <= '9') { + return c - '0' + 52; + } + + // comes down to alphabet + if (alphabet == alphabet::standard) { + if (c == '+') { + return 62; + } else if (c == '/') { + return 63; + } + } else if (alphabet == alphabet::url_filename_safe) { + if (c == '-') { + return 62; + } else if (c == '_') { + return 63; + } + } // auto detect + else { + if (c == '+') { + alphabet = alphabet::standard; + + return 62; + } else if (c == '/') { + alphabet = alphabet::standard; + + return 63; + } else if (c == '-') { + alphabet = alphabet::url_filename_safe; + + return 62; + } else if (c == '_') { + alphabet = alphabet::url_filename_safe; + + return 63; + } + } + + throw base64_error("invalid base64 character."); + } }; #endif // !PUBLIC_DOMAIN_BASE64_HPP_ diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 4b4136cc019c5..dd2756969a72b 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -74,19 +74,17 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_ } printf("\n"); - } static struct llava_context * llava_init(gpt_params * params) { - const char * clip_path = params->mmproj.c_str(); auto prompt = params->prompt; if (prompt.empty()) { prompt = "describe the image in detail."; } - + auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); llama_backend_init(params->numa); From 32bf7bf61f8ebdf04e683977c0a455161563a963 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Sun, 5 Nov 2023 15:33:16 +0300 Subject: [PATCH 20/31] Editorconfig --- CMakeLists.txt | 2 +- examples/llava/clip.cpp | 2 +- examples/llava/llava-utils.h | 2 +- examples/llava/llava.cpp | 10 +++++----- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index fb4f5c01add6f..4362570f7f2bc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -686,7 +686,7 @@ if (BUILD_SHARED_LIBS) # By default, symbols provided by the sublibs that are not used by mainlib (which is all of them in this case) # are not used. This changes that. - if (WIN32) + if (WIN32) set_target_properties(llama PROPERTIES LINK_FLAGS "/WHOLEARCHIVE" ) diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 92e19346f8a1c..3c909c7d3c6ab 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -680,7 +680,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { return new_clip; } -clip_image_u8 * make_clip_image_u8() { +clip_image_u8 * make_clip_image_u8() { auto img = new clip_image_u8(); return img; } diff --git a/examples/llava/llava-utils.h b/examples/llava/llava-utils.h index 5d7da63fd6ed3..aa6f0304cbda8 100644 --- a/examples/llava/llava-utils.h +++ b/examples/llava/llava-utils.h @@ -165,7 +165,7 @@ inline bool prompt_contains_image(const std::string& prompt) { } // replaces the base64 image tag in the prompt with `replacement` -inline llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) { +inline llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) { size_t img_base64_str_start, img_base64_str_end; find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index edfd6c0723852..d2ce65df6c171 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -116,17 +116,17 @@ LLAMA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct cl return result; } -static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *sizeOut) +static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *sizeOut) { - auto file = fopen(path, "rb"); + auto file = fopen(path, "rb"); if (file == NULL) { fprintf(stderr, "%s: can't read file %s\n", __func__, path); return false; } - fseek(file, 0, SEEK_END); - auto fileSize = ftell(file); - fseek(file, 0, SEEK_SET); + fseek(file, 0, SEEK_END); + auto fileSize = ftell(file); + fseek(file, 0, SEEK_SET); auto buffer = (unsigned char *)malloc(fileSize); // Allocate memory to hold the file data if (buffer == NULL) { From 53dca51fd1b97cf6b234117d63a31589d68d224c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Sun, 5 Nov 2023 17:00:35 +0300 Subject: [PATCH 21/31] Build with make --- Makefile | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 300c1e6c7e127..ab683cf146d86 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # Define the default target now so that it is always the first target BUILD_TARGETS = \ main quantize quantize-stats perplexity embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \ - simple batched batched-bench save-load-state server gguf llama-bench llava baby-llama beam-search \ + simple batched batched-bench save-load-state server gguf llama-bench llava llava-cli baby-llama beam-search \ speculative infill benchmark-matmult parallel finetune export-lora tests/test-c.o # Binaries only useful for tests @@ -617,7 +617,10 @@ convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggm llama-bench: examples/llama-bench/llama-bench.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -llava: examples/llava/llava.cpp examples/llava/llava-utils.h examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +libllava.a: examples/llava/llava.cpp examples/llava/llava.h examples/llava/llava-utils.h examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h common/base64.hpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) + $(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ $(LDFLAGS) -Wno-cast-qual + +llava-cli: examples/llava/llava-cli.cpp examples/llava/clip.h examples/llava/clip.cpp examples/llava/llava.h examples/llava/llava.cpp examples/llava/llava-utils.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -Wno-cast-qual baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) From b9277727a692e48bf44dd151ca8533e45099c015 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Sun, 5 Nov 2023 17:10:54 +0300 Subject: [PATCH 22/31] Build with make --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ab683cf146d86..68710ad439220 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # Define the default target now so that it is always the first target BUILD_TARGETS = \ main quantize quantize-stats perplexity embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \ - simple batched batched-bench save-load-state server gguf llama-bench llava llava-cli baby-llama beam-search \ + simple batched batched-bench save-load-state server gguf llama-bench libllava.a llava-cli baby-llama beam-search \ speculative infill benchmark-matmult parallel finetune export-lora tests/test-c.o # Binaries only useful for tests From 01f06e26c37cf4fdf8412ff0a3496cd31ddbabd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Sun, 5 Nov 2023 17:34:48 +0300 Subject: [PATCH 23/31] Fix cyclical depts on Windows --- CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4362570f7f2bc..350bf72172e4e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -673,7 +673,6 @@ target_include_directories(llama PUBLIC .) target_compile_features(llama PUBLIC cxx_std_11) # don't bump target_link_libraries(llama PRIVATE ggml - llava ${LLAMA_EXTRA_LIBS} ) From ad97e0eda8baee4537979e60a6687c892db044ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Sun, 5 Nov 2023 23:36:16 +0300 Subject: [PATCH 24/31] attempt to fix build on Windows --- examples/llava/llava-utils.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/examples/llava/llava-utils.h b/examples/llava/llava-utils.h index aa6f0304cbda8..3303f7a8711f4 100644 --- a/examples/llava/llava-utils.h +++ b/examples/llava/llava-utils.h @@ -1,3 +1,6 @@ +#ifndef LLA_UTILS_H +#define LLAVA_UTILS_H + #pragma once // this one and clip lib will be eventually merged to a single lib, let's keep it this way for now @@ -200,3 +203,5 @@ inline std::string remove_image_from_prompt(const std::string& prompt, const cha auto post = prompt.substr(end + strlen(IMG_BASE64_TAG_END)); return pre + replacement + post; } + +#endif From 1f8c866408dfa5ebae73142e80a268f479ac5f0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Mon, 6 Nov 2023 03:27:03 +0300 Subject: [PATCH 25/31] attempt to fix build on Windows --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 350bf72172e4e..7e00688d6e1b7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -687,7 +687,7 @@ if (BUILD_SHARED_LIBS) # are not used. This changes that. if (WIN32) set_target_properties(llama PROPERTIES - LINK_FLAGS "/WHOLEARCHIVE" + LINK_FLAGS "/WHOLEARCHIVE /FORCE:MULTIPLE" ) elseif (APPLE) set_target_properties(llama PROPERTIES From d6be69faffa24b98daf9dbd2634ed77e2a0a7d4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Mon, 6 Nov 2023 03:36:53 +0300 Subject: [PATCH 26/31] Upd TODOs --- examples/llava/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/llava/README.md b/examples/llava/README.md index b1df8dd165425..323c5fdd02835 100644 --- a/examples/llava/README.md +++ b/examples/llava/README.md @@ -51,7 +51,6 @@ Now both the LLaMA part and the image encoder is in the `llava-v1.5-7b` director ## TODO -- [ ] Support server mode. - [ ] Support non-CPU backend for the image encoding part. - [ ] Support different sampling methods. - [ ] Support more model variants. From 5b8b9ef987f187c59cca3359cc36feddf7bd634a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Mon, 6 Nov 2023 04:08:21 +0300 Subject: [PATCH 27/31] attempt to fix build on Windows+CUDA --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7e00688d6e1b7..56c3cb6f45d19 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -687,7 +687,7 @@ if (BUILD_SHARED_LIBS) # are not used. This changes that. if (WIN32) set_target_properties(llama PROPERTIES - LINK_FLAGS "/WHOLEARCHIVE /FORCE:MULTIPLE" + LINK_FLAGS "/WHOLEARCHIVE /FORCE:MULTIPLE /NODEFAULTLIB:library" ) elseif (APPLE) set_target_properties(llama PROPERTIES From b9bacc78b8f3d1cd832335ae19ada5174add1440 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Mon, 6 Nov 2023 04:38:45 +0300 Subject: [PATCH 28/31] Revert changes in cmake --- CMakeLists.txt | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 56c3cb6f45d19..7b4eb18403c0b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -682,22 +682,6 @@ if (BUILD_SHARED_LIBS) if (LLAMA_METAL) set_target_properties(llama PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal") endif() - - # By default, symbols provided by the sublibs that are not used by mainlib (which is all of them in this case) - # are not used. This changes that. - if (WIN32) - set_target_properties(llama PROPERTIES - LINK_FLAGS "/WHOLEARCHIVE /FORCE:MULTIPLE /NODEFAULTLIB:library" - ) - elseif (APPLE) - set_target_properties(llama PROPERTIES - LINK_FLAGS "-Wl,-all_load" - ) - else () - set_target_properties(llama PROPERTIES - LINK_FLAGS "-Wl,--whole-archive" - ) - endif () endif() From 9f03ac7086e2dcfe76766991e3edaa27d89b3b9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Mon, 6 Nov 2023 13:40:20 +0300 Subject: [PATCH 29/31] Fix according to review comments --- Makefile | 4 +- examples/llava/clip.h | 29 +++-- examples/llava/llava-cli.cpp | 185 ++++++++++++++++++++++++++++--- examples/llava/llava-utils.h | 207 ----------------------------------- examples/llava/llava.cpp | 23 +--- examples/llava/llava.h | 26 ++++- 6 files changed, 218 insertions(+), 256 deletions(-) delete mode 100644 examples/llava/llava-utils.h diff --git a/Makefile b/Makefile index 68710ad439220..f2d4fd0312ad9 100644 --- a/Makefile +++ b/Makefile @@ -617,10 +617,10 @@ convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggm llama-bench: examples/llama-bench/llama-bench.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -libllava.a: examples/llava/llava.cpp examples/llava/llava.h examples/llava/llava-utils.h examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h common/base64.hpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) +libllava.a: examples/llava/llava.cpp examples/llava/llava.h examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h common/base64.hpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ $(LDFLAGS) -Wno-cast-qual -llava-cli: examples/llava/llava-cli.cpp examples/llava/clip.h examples/llava/clip.cpp examples/llava/llava.h examples/llava/llava.cpp examples/llava/llava-utils.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +llava-cli: examples/llava/llava-cli.cpp examples/llava/clip.h examples/llava/clip.cpp examples/llava/llava.h examples/llava/llava.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -Wno-cast-qual baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) diff --git a/examples/llava/clip.h b/examples/llava/clip.h index 955b23928d8b2..f11df85de9a73 100644 --- a/examples/llava/clip.h +++ b/examples/llava/clip.h @@ -1,7 +1,22 @@ #ifndef CLIP_H #define CLIP_H -#include "llama.h" +#include +#include + +#ifdef LLAMA_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef LLAMA_BUILD +# define CLIP_API __declspec(dllexport) +# else +# define CLIP_API __declspec(dllimport) +# endif +# else +# define CLIP_API __attribute__ ((visibility ("default"))) +# endif +#else +# define CLIP_API +#endif struct clip_ctx; @@ -21,9 +36,9 @@ struct clip_vision_hparams { }; /** load mmproj model */ -LLAMA_API struct clip_ctx * clip_model_load(const char * fname, const int verbosity); +CLIP_API struct clip_ctx * clip_model_load(const char * fname, const int verbosity); /** free mmproj model */ -LLAMA_API void clip_free(struct clip_ctx * ctx); +CLIP_API void clip_free(struct clip_ctx * ctx); size_t clip_embd_nbytes(const struct clip_ctx * ctx); int clip_n_patches(const struct clip_ctx * ctx); @@ -58,11 +73,11 @@ struct clip_image_f32_batch { struct clip_image_u8 * make_clip_image_u8(); struct clip_image_f32 * make_clip_image_f32(); -LLAMA_API void clip_image_u8_free(clip_image_u8 * img); -LLAMA_API void clip_image_f32_free(clip_image_f32 * img); -LLAMA_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); +CLIP_API void clip_image_u8_free(clip_image_u8 * img); +CLIP_API void clip_image_f32_free(clip_image_f32 * img); +CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); /** interpret bytes as an image file with length bytes_length, and use the result to populate img */ -LLAMA_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img); +CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img); bool clip_image_preprocess(const struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32 * res, const bool pad2square); bool clip_image_encode(const struct clip_ctx * ctx, const int n_threads, struct clip_image_f32 * img, float * vec); diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index dd2756969a72b..19374c67ff6c5 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -1,11 +1,171 @@ -#include -#include - #include "ggml.h" #include "common.h" #include "clip.h" #include "llava.h" -#include "llava-utils.h" +#include "llama.h" + +#include "base64.hpp" + +#include +#include +#include + +static bool eval_tokens(struct llama_context * ctx_llama, std::vector tokens, int n_batch, int * n_past) { + int N = (int) tokens.size(); + for (int i = 0; i < N; i += n_batch) { + int n_eval = (int) tokens.size() - i; + if (n_eval > n_batch) { + n_eval = n_batch; + } + if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) { + fprintf(stderr, "%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past); + return false; + } + *n_past += n_eval; + } + return true; +} + +static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) { + std::vector tokens; + tokens.push_back(id); + return eval_tokens(ctx_llama, tokens, 1, n_past); +} + +static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){ + std::string str2 = str; + std::vector embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos); + eval_tokens(ctx_llama, embd_inp, n_batch, n_past); + return true; +} + +// TODO: use common/sampling.h +static llama_token sample_id(llama_context * ctx_llama, gpt_params & params) { + auto & sparams = params.sparams; + + // out of user input, sample next token + const float temp = sparams.temp; + const int32_t top_k = sparams.top_k <= 0 ? llama_n_vocab(llama_get_model(ctx_llama)) : sparams.top_k; + const float top_p = sparams.top_p; + const float tfs_z = sparams.tfs_z; + const float typical_p = sparams.typical_p; + // const int32_t repeat_last_n = sparams.repeat_last_n < 0 ? n_ctx : sparams.repeat_last_n; + // const float repeat_penalty = sparams.repeat_penalty; + // const float alpha_presence = sparams.presence_penalty; + // const float alpha_frequency = sparams.frequency_penalty; + const int mirostat = sparams.mirostat; + const float mirostat_tau = sparams.mirostat_tau; + const float mirostat_eta = sparams.mirostat_eta; + // const bool penalize_nl = sparams.penalize_nl; + + llama_token id = 0; + { + auto logits = llama_get_logits(ctx_llama); + auto n_vocab = llama_n_vocab(llama_get_model(ctx_llama)); + + // Apply params.logit_bias map + for (auto it = sparams.logit_bias.begin(); it != sparams.logit_bias.end(); it++) { + logits[it->first] += it->second; + } + + std::vector candidates; + candidates.reserve(n_vocab); + for (llama_token token_id = 0; token_id < n_vocab; token_id++) { + candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f}); + } + + llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; + + if (temp <= 0) { + // Greedy sampling + id = llama_sample_token_greedy(ctx_llama, &candidates_p); + } else { + if (mirostat == 1) { + static float mirostat_mu = 2.0f * mirostat_tau; + const int mirostat_m = 100; + llama_sample_temp(ctx_llama, &candidates_p, temp); + id = llama_sample_token_mirostat(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu); + } else if (mirostat == 2) { + static float mirostat_mu = 2.0f * mirostat_tau; + llama_sample_temp(ctx_llama, &candidates_p, temp); + id = llama_sample_token_mirostat_v2(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu); + } else { + // Temperature sampling + llama_sample_top_k(ctx_llama, &candidates_p, top_k, 1); + llama_sample_tail_free(ctx_llama, &candidates_p, tfs_z, 1); + llama_sample_typical(ctx_llama, &candidates_p, typical_p, 1); + llama_sample_top_p(ctx_llama, &candidates_p, top_p, 1); + llama_sample_temp(ctx_llama, &candidates_p, temp); + id = llama_sample_token(ctx_llama, &candidates_p); + } + } + } + + return id; +} + +static const char * sample(struct llama_context * ctx_llama, gpt_params & params, int * n_past) { + int id = sample_id(ctx_llama, params); + static std::string ret; + if (id == llama_token_eos(llama_get_model(ctx_llama))) { + ret = ""; + } else { + ret = llama_token_to_piece(ctx_llama, id); + } + eval_id(ctx_llama, id, n_past); + return ret.c_str(); +} + +static const char* IMG_BASE64_TAG_BEGIN = ""; + +static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) { + begin_out = prompt.find(IMG_BASE64_TAG_BEGIN); + end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out); +} + +static bool prompt_contains_image(const std::string& prompt) { + size_t begin, end; + find_image_tag_in_prompt(prompt, begin, end); + return (begin != std::string::npos); +} + +// replaces the base64 image tag in the prompt with `replacement` +static llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) { + size_t img_base64_str_start, img_base64_str_end; + find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); + if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { + fprintf(stderr, "%s: invalid base64 image tag. must be %s%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END); + return NULL; + } + + auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN); + auto base64_bytes_count = img_base64_str_end - base64_bytes_start; + auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count ); + + auto required_bytes = base64::required_encode_size(base64_str.size()); + auto img_bytes = std::vector(required_bytes); + base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); + + auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size()); + if (!embed) { + fprintf(stderr, "%s: could not load image from base64 string.\n", __func__); + return NULL; + } + + return embed; +} + +static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { + size_t begin, end; + find_image_tag_in_prompt(prompt, begin, end); + if (begin == std::string::npos || end == std::string::npos) { + return prompt; + } + auto pre = prompt.substr(0, begin); + auto post = prompt.substr(end + strlen(IMG_BASE64_TAG_END)); + return pre + replacement + post; +} struct llava_context { struct clip_ctx * ctx_clip = NULL; @@ -13,8 +173,6 @@ struct llava_context { struct llama_model * model = NULL; }; - - static void show_additional_info(int /*argc*/, char ** argv) { printf("\n example usage: %s -m --mmproj --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n"); @@ -46,20 +204,15 @@ static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_para return embed; } -static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, gpt_params * params, const char * prompt) { +static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, gpt_params * params, const std::string & prompt) { int n_past = 0; const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; - // llava chat format is "USER: \n\nASSISTANT:" - printf("evaluating system prompt\n"); + // llava chat format is "\nUSER:\n\nASSISTANT:" eval_string(ctx_llava->ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:", params->n_batch, &n_past, true); - printf("evaluating image embed\n"); llava_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past); - printf("evaluating prompt\n"); - eval_string(ctx_llava->ctx_llama, prompt, params->n_batch, &n_past, false); - eval_string(ctx_llava->ctx_llama, "\nASSISTANT:", params->n_batch, &n_past, false); - printf("awaiting response\n"); + eval_string(ctx_llava->ctx_llama, (prompt + "\nASSISTANT:").c_str(), params->n_batch, &n_past, false); // generate the response @@ -117,7 +270,6 @@ static struct llava_context * llava_init(gpt_params * params) { return ctx_llava; } - static void llava_free(struct llava_context * ctx_llava) { if (ctx_llava->ctx_clip) { clip_free(ctx_llava->ctx_clip); @@ -129,7 +281,6 @@ static void llava_free(struct llava_context * ctx_llava) { llama_backend_free(); } - int main(int argc, char ** argv) { ggml_time_init(); @@ -154,7 +305,7 @@ int main(int argc, char ** argv) { auto image_embed = load_image(ctx_llava, ¶ms); // process the prompt - process_prompt(ctx_llava, image_embed, ¶ms, params.prompt.c_str()); + process_prompt(ctx_llava, image_embed, ¶ms, params.prompt); llama_print_timings(ctx_llava->ctx_llama); diff --git a/examples/llava/llava-utils.h b/examples/llava/llava-utils.h deleted file mode 100644 index 3303f7a8711f4..0000000000000 --- a/examples/llava/llava-utils.h +++ /dev/null @@ -1,207 +0,0 @@ -#ifndef LLA_UTILS_H -#define LLAVA_UTILS_H - -#pragma once - -// this one and clip lib will be eventually merged to a single lib, let's keep it this way for now - -#include "common.h" -#include "llama.h" -#include "llava.h" - -#include "base64.hpp" - -#include -#include -#include - -// todo: remove this -inline bool eval_image_embd(llama_context * ctx_llama, float * embd, int N, int n_batch, int * n_past) { - int n_embd = llama_n_embd(llama_get_model(ctx_llama)); - - for (int i = 0; i < N; i += n_batch) { - int n_eval = N - i; - if (n_eval > n_batch) { - n_eval = n_batch; - } - llama_batch batch = {int32_t(n_eval), nullptr, (embd+i*n_embd), nullptr, nullptr, nullptr, nullptr, *n_past, 1, 0, }; - if (llama_decode(ctx_llama, batch)) { - fprintf(stderr, "%s : failed to eval\n", __func__); - return false; - } - *n_past += n_eval; - } - return true; -} - -inline bool eval_tokens(struct llama_context * ctx_llama, std::vector tokens, int n_batch, int * n_past) { - int N = (int) tokens.size(); - for (int i = 0; i < N; i += n_batch) { - int n_eval = (int) tokens.size() - i; - if (n_eval > n_batch) { - n_eval = n_batch; - } - if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) { - fprintf(stderr, "%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past); - return false; - } - *n_past += n_eval; - } - return true; -} - -inline bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) { - std::vector tokens; - tokens.push_back(id); - return eval_tokens(ctx_llama, tokens, 1, n_past); -} - -inline bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){ - std::string str2 = str; - std::vector embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos); - eval_tokens(ctx_llama, embd_inp, n_batch, n_past); - return true; -} - -// TODO: use common/sampling.h -inline llama_token sample_id(llama_context * ctx_llama, gpt_params & params) { - auto & sparams = params.sparams; - - // out of user input, sample next token - const float temp = sparams.temp; - const int32_t top_k = sparams.top_k <= 0 ? llama_n_vocab(llama_get_model(ctx_llama)) : sparams.top_k; - const float top_p = sparams.top_p; - const float tfs_z = sparams.tfs_z; - const float typical_p = sparams.typical_p; - // const int32_t repeat_last_n = sparams.repeat_last_n < 0 ? n_ctx : sparams.repeat_last_n; - // const float repeat_penalty = sparams.repeat_penalty; - // const float alpha_presence = sparams.presence_penalty; - // const float alpha_frequency = sparams.frequency_penalty; - const int mirostat = sparams.mirostat; - const float mirostat_tau = sparams.mirostat_tau; - const float mirostat_eta = sparams.mirostat_eta; - // const bool penalize_nl = sparams.penalize_nl; - - llama_token id = 0; - { - auto logits = llama_get_logits(ctx_llama); - auto n_vocab = llama_n_vocab(llama_get_model(ctx_llama)); - - // Apply params.logit_bias map - for (auto it = sparams.logit_bias.begin(); it != sparams.logit_bias.end(); it++) { - logits[it->first] += it->second; - } - - std::vector candidates; - candidates.reserve(n_vocab); - for (llama_token token_id = 0; token_id < n_vocab; token_id++) { - candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f}); - } - - llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; - - // TODO: Apply penalties - // float nl_logit = logits[llama_token_nl(ctx)]; - // auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx); - // llama_sample_repetition_penalty(ctx, &candidates_p, - // last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, - // last_n_repeat, repeat_penalty); - // llama_sample_frequency_and_presence_penalties(ctx, &candidates_p, - // last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, - // last_n_repeat, alpha_frequency, alpha_presence); - // if (!penalize_nl) { - // logits[llama_token_nl(ctx)] = nl_logit; - // } - - if (temp <= 0) { - // Greedy sampling - id = llama_sample_token_greedy(ctx_llama, &candidates_p); - } else { - if (mirostat == 1) { - static float mirostat_mu = 2.0f * mirostat_tau; - const int mirostat_m = 100; - llama_sample_temp(ctx_llama, &candidates_p, temp); - id = llama_sample_token_mirostat(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu); - } else if (mirostat == 2) { - static float mirostat_mu = 2.0f * mirostat_tau; - llama_sample_temp(ctx_llama, &candidates_p, temp); - id = llama_sample_token_mirostat_v2(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu); - } else { - // Temperature sampling - llama_sample_top_k(ctx_llama, &candidates_p, top_k, 1); - llama_sample_tail_free(ctx_llama, &candidates_p, tfs_z, 1); - llama_sample_typical(ctx_llama, &candidates_p, typical_p, 1); - llama_sample_top_p(ctx_llama, &candidates_p, top_p, 1); - llama_sample_temp(ctx_llama, &candidates_p, temp); - id = llama_sample_token(ctx_llama, &candidates_p); - } - } - } - - return id; -} - -inline const char * sample(struct llama_context * ctx_llama, gpt_params & params, int * n_past) { - int id = sample_id(ctx_llama, params); - static std::string ret; - if (id == llama_token_eos(llama_get_model(ctx_llama))) { - ret = ""; - } else { - ret = llama_token_to_piece(ctx_llama, id); - } - eval_id(ctx_llama, id, n_past); - return ret.c_str(); -} - -static const char* IMG_BASE64_TAG_BEGIN = ""; - -inline void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) { - begin_out = prompt.find(IMG_BASE64_TAG_BEGIN); - end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out); -} - -inline bool prompt_contains_image(const std::string& prompt) { - size_t begin, end; - find_image_tag_in_prompt(prompt, begin, end); - return (begin != std::string::npos); -} - -// replaces the base64 image tag in the prompt with `replacement` -inline llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) { - size_t img_base64_str_start, img_base64_str_end; - find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); - if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { - fprintf(stderr, "%s: invalid base64 image tag. must be %s%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END); - return NULL; - } - - auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN); - auto base64_bytes_count = img_base64_str_end - base64_bytes_start; - auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count ); - - auto required_bytes = base64::required_encode_size(base64_str.size()); - auto img_bytes = std::vector(required_bytes); - base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); - - auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size()); - if (!embed) { - fprintf(stderr, "%s: could not load image from base64 string.\n", __func__); - return NULL; - } - - return embed; -} - -inline std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { - size_t begin, end; - find_image_tag_in_prompt(prompt, begin, end); - if (begin == std::string::npos || end == std::string::npos) { - return prompt; - } - auto pre = prompt.substr(0, begin); - auto post = prompt.substr(end + strlen(IMG_BASE64_TAG_END)); - return pre + replacement + post; -} - -#endif diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index d2ce65df6c171..d10bcf2d22465 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -1,5 +1,4 @@ #include "clip.h" -#include "llava-utils.h" #include "common.h" #include "llama.h" #include "llava.h" @@ -28,12 +27,11 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli return false; } + const int64_t t_img_enc_end_us = ggml_time_us(); float t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0; - { - printf("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / *n_img_pos); - } + printf("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / *n_img_pos); return true; } @@ -50,7 +48,6 @@ bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * } static bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out) { - float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); if (!image_embd) { fprintf(stderr, "Unable to allocate memory for image embeddings\n"); @@ -70,8 +67,6 @@ static bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_thre return true; } - - bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, int n_batch, int * n_past) { int n_embd = llama_n_embd(llama_get_model(ctx_llama)); @@ -90,9 +85,7 @@ bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_ return true; } - -LLAMA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) -{ +LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) { clip_image_u8 * img = make_clip_image_u8(); if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) { clip_image_u8_free(img); @@ -116,8 +109,7 @@ LLAMA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct cl return result; } -static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *sizeOut) -{ +static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *sizeOut) { auto file = fopen(path, "rb"); if (file == NULL) { fprintf(stderr, "%s: can't read file %s\n", __func__, path); @@ -141,11 +133,9 @@ static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *bytesOut = buffer; *sizeOut = fileSize; return true; - } -LLAMA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path) -{ +LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path) { unsigned char* image_bytes; long image_bytes_length; auto loaded = load_file_to_bytes(image_path, &image_bytes, &image_bytes_length); @@ -160,8 +150,7 @@ LLAMA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct return embed; } - -LLAMA_API void llava_image_embed_free(struct llava_image_embed * embed) { +LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed) { free(embed->embed); free(embed); } diff --git a/examples/llava/llava.h b/examples/llava/llava.h index 637fe4c09bfe4..e08ce78839dcb 100644 --- a/examples/llava/llava.h +++ b/examples/llava/llava.h @@ -2,7 +2,21 @@ #define LLAVA_H #include "ggml.h" -#include "common.h" + + +#ifdef LLAMA_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef LLAMA_BUILD +# define LLAVA_API __declspec(dllexport) +# else +# define LLAVA_API __declspec(dllimport) +# endif +# else +# define LLAVA_API __attribute__ ((visibility ("default"))) +# endif +#else +# define LLAVA_API +#endif struct clip_ctx; @@ -16,17 +30,17 @@ struct llava_image_embed { }; /** sanity check for clip <-> llava embed size match */ -LLAMA_API bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip); +LLAVA_API bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip); /** build an image embed from image file bytes */ -LLAMA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length); +LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length); /** build an image embed from a path to an image filename */ -LLAMA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path); -LLAMA_API void llava_image_embed_free(struct llava_image_embed * embed); +LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path); +LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed); /** free an embedding made with llava_image_embed_make_* */ /** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */ -LLAMA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past); +LLAVA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past); #ifdef __cplusplus From 22f43fca0ac2237766f825a8ab4aa2d5e19238d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Mon, 6 Nov 2023 19:16:33 +0300 Subject: [PATCH 30/31] Support building as a shared library --- examples/llava/CMakeLists.txt | 40 +++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/examples/llava/CMakeLists.txt b/examples/llava/CMakeLists.txt index 688e0feccb424..8ea3e5c836c13 100644 --- a/examples/llava/CMakeLists.txt +++ b/examples/llava/CMakeLists.txt @@ -1,22 +1,36 @@ -set(TARGET llava) +add_library(llava OBJECT + llava.cpp + llava.h + clip.cpp + clip.h + ) -add_library(${TARGET} STATIC llava.cpp llava.h clip.cpp clip.h) -target_link_libraries(${TARGET} PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(llava PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT}) -target_include_directories(${TARGET} PUBLIC .) -target_include_directories(${TARGET} PUBLIC ../..) -target_include_directories(${TARGET} PUBLIC ../../common) +target_include_directories(llava PUBLIC .) +target_include_directories(llava PUBLIC ../..) +target_include_directories(llava PUBLIC ../../common) + +target_compile_features(llava PRIVATE cxx_std_11) + +add_library(llava_static STATIC $) +if (BUILD_SHARED_LIBS) + set_target_properties(llava PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(llava PRIVATE LLAMA_SHARED LLAMA_BUILD) + add_library(llava_shared SHARED $) + target_link_libraries(llava_shared PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT}) + install(TARGETS llava_shared LIBRARY) +endif() -target_compile_features(${TARGET} PRIVATE cxx_std_11) if (NOT MSVC) - target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h + target_compile_options(llava PRIVATE -Wno-cast-qual) # stb_image.h endif() if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) + add_dependencies(llava BUILD_INFO) endif() set(TARGET llava-cli) -add_executable(${TARGET} llava-cli.cpp) -install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +add_executable(llava-cli llava-cli.cpp) +install(TARGETS llava-cli RUNTIME) +target_link_libraries(llava-cli PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(llava PRIVATE cxx_std_11) From 354802979e6c2eecb6cc0ca99daeb51ed1a55ae2 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Mon, 6 Nov 2023 16:07:21 -0500 Subject: [PATCH 31/31] address review comments --- build-info.h | 9 --------- common/CMakeLists.txt | 2 +- 2 files changed, 1 insertion(+), 10 deletions(-) delete mode 100644 build-info.h diff --git a/build-info.h b/build-info.h deleted file mode 100644 index cfe0edae6c39e..0000000000000 --- a/build-info.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef BUILD_INFO_H -#define BUILD_INFO_H - -#define BUILD_NUMBER 1418 -#define BUILD_COMMIT "9d02956" -#define BUILD_COMPILER "clang version 16.0.0" -#define BUILD_TARGET "x86_64-unknown-linux-gnu" - -#endif // BUILD_INFO_H diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index f73b333419732..4f930bdc59059 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -41,7 +41,7 @@ endif() set(TARGET common) add_library(${TARGET} STATIC -base64.hpp + base64.hpp common.h common.cpp sampling.h