From 75aba012bbfd999a5b2d2414cd42967b4b953b3e Mon Sep 17 00:00:00 2001 From: yatarkan Date: Wed, 29 Jan 2025 15:49:20 +0400 Subject: [PATCH 1/7] Fix Qwen2VL generation without images --- src/cpp/src/visual_language/inputs_embedder.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/cpp/src/visual_language/inputs_embedder.cpp b/src/cpp/src/visual_language/inputs_embedder.cpp index 66b17e5804..2c07ff8277 100644 --- a/src/cpp/src/visual_language/inputs_embedder.cpp +++ b/src/cpp/src/visual_language/inputs_embedder.cpp @@ -1641,10 +1641,6 @@ class InputsEmbedderQwen2VL : public InputsEmbedder::IInputsEmbedder { ov::Tensor input_ids = get_encoded_input_ids(formatted_prompt, metrics, chat_template_fallback); ov::Tensor text_embeds = m_embedding.infer(input_ids); - if (images.empty()) { - return text_embeds; - } - auto start_tokenizer_time = std::chrono::steady_clock::now(); ov::Tensor encoded_vision_start_token = m_tokenizer.encode(m_vlm_config.vision_start_token, ov::genai::add_special_tokens(false)).input_ids; ov::Tensor encoded_image_pad_token = m_tokenizer.encode(m_vlm_config.image_pad_token, ov::genai::add_special_tokens(false)).input_ids; @@ -1659,6 +1655,10 @@ class InputsEmbedderQwen2VL : public InputsEmbedder::IInputsEmbedder { int64_t position_ids_max_element = *std::max_element(m_position_ids.data(), m_position_ids.data() + m_position_ids.get_size()); m_rope_delta = position_ids_max_element + 1 - static_cast(input_ids.get_shape().at(1)); + if (images.empty()) { + return text_embeds; + } + return merge_text_and_image_embeddings_qwen2vl(input_ids, text_embeds, image_embeds, images_grid_thw, image_pad_token_id); } From 5a1f8f8b9a4d345cf2f6d4d52db3badb2a87309b Mon Sep 17 00:00:00 2001 From: yatarkan Date: Wed, 29 Jan 2025 17:34:16 +0400 Subject: [PATCH 2/7] Get rotary_pos_emb dim from model --- src/cpp/src/visual_language/inputs_embedder.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cpp/src/visual_language/inputs_embedder.cpp b/src/cpp/src/visual_language/inputs_embedder.cpp index 2c07ff8277..f50cc268cb 100644 --- a/src/cpp/src/visual_language/inputs_embedder.cpp +++ b/src/cpp/src/visual_language/inputs_embedder.cpp @@ -1853,7 +1853,7 @@ class InputsEmbedderQwen2VL : public InputsEmbedder::IInputsEmbedder { } // Calculate rotary embeddings for max_grid_size - const size_t dim = 1280 / 16 / 2; // config.vision_config.embed_dim / self.config.vision_config.num_heads / 2 + const size_t dim = m_vision_embeddings_merger.get_tensor("rotary_pos_emb").get_shape().at(1); const float theta = 10000.0f; std::vector inv_freq(dim / 2); From 91bd7742a5f9c9734fb9f2f17c925ca25b661691 Mon Sep 17 00:00:00 2001 From: yatarkan Date: Wed, 29 Jan 2025 17:57:45 +0400 Subject: [PATCH 3/7] Add vlm models to python test --- tests/python_tests/test_vlm_pipeline.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/python_tests/test_vlm_pipeline.py b/tests/python_tests/test_vlm_pipeline.py index 0f9358b961..749932d02a 100644 --- a/tests/python_tests/test_vlm_pipeline.py +++ b/tests/python_tests/test_vlm_pipeline.py @@ -47,6 +47,9 @@ def get_ov_model(model_id, cache): @pytest.mark.parametrize("model_id", [ "katuni4ka/tiny-random-minicpmv-2_6", "katuni4ka/tiny-random-phi3-vision", + "katuni4ka/tiny-random-llava", + "katuni4ka/tiny-random-internvl2", + "katuni4ka/tiny-random-qwen2vl", ]) def test_vlm_pipeline(model_id, cache): def streamer(word: str) -> bool: From 3298b1f6db327ac21c98e6b919d0a32d097afcb5 Mon Sep 17 00:00:00 2001 From: yatarkan Date: Wed, 29 Jan 2025 19:15:25 +0400 Subject: [PATCH 4/7] Add debug print to qwen2vl smart resize --- src/cpp/src/visual_language/vision_encoder.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cpp/src/visual_language/vision_encoder.cpp b/src/cpp/src/visual_language/vision_encoder.cpp index 04ddd63145..e8edd40890 100644 --- a/src/cpp/src/visual_language/vision_encoder.cpp +++ b/src/cpp/src/visual_language/vision_encoder.cpp @@ -843,7 +843,7 @@ std::tuple get_pixel_values_phi3_v(const ov::Tensor& imag ImageSize smart_resize_qwen2vl(size_t height, size_t width, size_t factor, size_t min_pixels, size_t max_pixels) { if (height < factor || width < factor) { - OPENVINO_THROW("Height or width must be larger than factor"); + OPENVINO_THROW("Height (" + std::to_string(height) + ") and width (" + std::to_string(width) + ") must be greater than factor (" + std::to_string(factor) + ")"); } if (std::max(height, width) / std::min(height, width) > 200) { OPENVINO_THROW("Absolute aspect ratio must be smaller than 200"); From 2fc1908739317de0970ec2d90bee884eb1ceaadc Mon Sep 17 00:00:00 2001 From: yatarkan Date: Wed, 29 Jan 2025 20:23:10 +0400 Subject: [PATCH 5/7] Fix image layout in tests --- tests/python_tests/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python_tests/common.py b/tests/python_tests/common.py index b0b6a70e93..d9a8de3516 100644 --- a/tests/python_tests/common.py +++ b/tests/python_tests/common.py @@ -523,7 +523,7 @@ def get_image_by_link(link): image = Image.open(requests.get(link, stream=True).raw) if image.mode != 'RGB': image = image.convert('RGB') - image_data = np.array((np.array(image.getdata()) - 128).astype(np.byte)).reshape(1, 3, image.size[1], image.size[0]) + image_data = np.array((np.array(image.getdata()) - 128).astype(np.byte)).reshape(1, image.size[1], image.size[0], 3) return Tensor(image_data) From 7a0515fa42ed4cb0418605dedd08c5185b16c226 Mon Sep 17 00:00:00 2001 From: Vladimir Zlobin Date: Thu, 30 Jan 2025 15:17:24 +0400 Subject: [PATCH 6/7] Fix InputsEmbedderPhi3V --- .../src/visual_language/inputs_embedder.cpp | 41 ++++++++----------- 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/src/cpp/src/visual_language/inputs_embedder.cpp b/src/cpp/src/visual_language/inputs_embedder.cpp index f50cc268cb..88c76f71ab 100644 --- a/src/cpp/src/visual_language/inputs_embedder.cpp +++ b/src/cpp/src/visual_language/inputs_embedder.cpp @@ -1407,15 +1407,15 @@ std::vector split_tokenize(const std::string& text, ov::genai::Token return tokenized; } -ov::Tensor insert_image_placeholders(const std::vector& chunks, size_t tokens_per_image) { +ov::Tensor insert_image_placeholders(const std::vector& chunks, const std::vector& tokens_per_images) { size_t merged_length = 0; for (const ov::Tensor& chunk : chunks) { merged_length += chunk.get_shape().at(1); } - merged_length += chunks.empty() ? 0 : (chunks.size() - 1) * tokens_per_image; + merged_length += std::accumulate(tokens_per_images.begin(), tokens_per_images.end(), 0); ov::Tensor merged{ov::element::i64, {1, merged_length}}; size_t offset = 0; - int64_t image_id = -1; + int64_t image_id = 0; for (const ov::Tensor& chunk : chunks) { size_t length = chunk.get_shape().at(1); std::copy_n( @@ -1427,11 +1427,11 @@ ov::Tensor insert_image_placeholders(const std::vector& chunks, size if (offset < merged_length) { std::fill_n( merged.data() + offset, - tokens_per_image, - image_id + tokens_per_images.at(image_id), + -image_id - 1 // It could be just -image_id. -1 is for consistency with the original implementation. ); - offset += tokens_per_image; - --image_id; + offset += tokens_per_images.at(image_id); + ++image_id; } } return merged; @@ -1460,9 +1460,7 @@ class InputsEmbedderPhi3V : public InputsEmbedder::IInputsEmbedder { public: ov::InferRequest m_hd_feature_transformer; ov::InferRequest m_vision_projection; - // Used to insert <|image_i|>\n per image (not a slice). - size_t m_image_id = 1; - size_t m_tokens_per_image = 0; + std::vector m_tokens_per_images; InputsEmbedderPhi3V( const VLMConfig& vlm_config, @@ -1470,7 +1468,7 @@ class InputsEmbedderPhi3V : public InputsEmbedder::IInputsEmbedder { const std::string& device, const ov::AnyMap device_config ): - IInputsEmbedder(vlm_config, model_dir, device, device_config), m_image_id{0}, + IInputsEmbedder(vlm_config, model_dir, device, device_config), m_hd_feature_transformer{phi3_v::create_hd_feature_transformer()}, m_vision_projection{utils::singleton_core().compile_model(model_dir / "openvino_vision_projection_model.xml", device, {}).create_infer_request()} {} @@ -1481,8 +1479,8 @@ class InputsEmbedderPhi3V : public InputsEmbedder::IInputsEmbedder { for (const ov::Tensor& image : to_single_image_tensors(images)) { EncodedImage encoded_image = m_vision_encoder.encode(image); images_features_proj.push_back(phi3_v::hd_feature_transform(encoded_image, m_hd_feature_transformer, m_vlm_config.sub_GN, m_vlm_config.glb_GN, m_vision_projection)); - images_prompt << "<|image_" << m_image_id << "|>\n"; - ++m_image_id; + m_tokens_per_images.push_back(images_features_proj.back().get_shape().at(1)); + images_prompt << "<|image_" << m_tokens_per_images.size() << "|>\n"; } images_prompt << prompt; std::vector new_chat_tokens; @@ -1490,8 +1488,7 @@ class InputsEmbedderPhi3V : public InputsEmbedder::IInputsEmbedder { if (m_is_chat_conversation) { m_history.push_back({{"role", "user"}, {"content", images_prompt.str()}}); constexpr bool add_generation_prompt = true; - std::string new_templated_chat_history; - new_templated_chat_history = m_tokenizer.apply_chat_template(m_history, add_generation_prompt); + std::string new_templated_chat_history = m_tokenizer.apply_chat_template(m_history, add_generation_prompt); auto start_tokenizer_time = std::chrono::steady_clock::now(); new_chat_tokens = phi3_v::split_tokenize(new_templated_chat_history, m_tokenizer); prev_chat_tokens = phi3_v::split_tokenize(m_templated_chat_history, m_tokenizer); @@ -1504,11 +1501,8 @@ class InputsEmbedderPhi3V : public InputsEmbedder::IInputsEmbedder { auto end_tokenizer_time = std::chrono::steady_clock::now(); metrics.raw_metrics.tokenization_durations.emplace_back(PerfMetrics::get_microsec(end_tokenizer_time - start_tokenizer_time)); } - if (0 == m_tokens_per_image && !images_features_proj.empty()) { - m_tokens_per_image = images_features_proj.at(0).get_shape().at(1); - } - ov::Tensor new_merged_tokens = phi3_v::insert_image_placeholders(new_chat_tokens, m_tokens_per_image); - ov::Tensor prev_merged_tokens = phi3_v::insert_image_placeholders(prev_chat_tokens, m_tokens_per_image); + ov::Tensor new_merged_tokens = phi3_v::insert_image_placeholders(new_chat_tokens, m_tokens_per_images); + ov::Tensor prev_merged_tokens = phi3_v::insert_image_placeholders(prev_chat_tokens, m_tokens_per_images); ov::Tensor new_tokens = update_history(new_merged_tokens, prev_merged_tokens); std::vector tokens = phi3_v::drop_image_placeholders(new_tokens); OPENVINO_ASSERT(tokens.size() == images_features_proj.size() + 1); @@ -1516,7 +1510,6 @@ class InputsEmbedderPhi3V : public InputsEmbedder::IInputsEmbedder { for (size_t im_id = 0; im_id < images_features_proj.size(); ++im_id) { size_t text_length = tokens.at(im_id).get_shape().at(1); size_t im_length = images_features_proj.at(im_id).get_shape().at(1); - OPENVINO_ASSERT(im_length == m_tokens_per_image); features_length += text_length + im_length; } features_length += tokens.back().get_shape().at(1); @@ -1549,7 +1542,7 @@ class InputsEmbedderPhi3V : public InputsEmbedder::IInputsEmbedder { ); if (!m_is_chat_conversation) { - m_image_id = 0; + m_tokens_per_images.clear(); } return inputs_embeds; @@ -1557,12 +1550,12 @@ class InputsEmbedderPhi3V : public InputsEmbedder::IInputsEmbedder { virtual void start_chat(const std::string& system_message) override { IInputsEmbedder::start_chat(system_message); - m_image_id = 0; + m_tokens_per_images.clear(); } virtual void finish_chat() override { IInputsEmbedder::finish_chat(); - m_image_id = 0; + m_tokens_per_images.clear(); } }; From 80a6dfe2bdbdc8494a9c42974ec6b57ddc9d2d7d Mon Sep 17 00:00:00 2001 From: yatarkan Date: Thu, 30 Jan 2025 15:41:40 +0400 Subject: [PATCH 7/7] Remove failing tiny-random-internvl2 model from tests --- tests/python_tests/test_vlm_pipeline.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/python_tests/test_vlm_pipeline.py b/tests/python_tests/test_vlm_pipeline.py index 749932d02a..3c188b26b2 100644 --- a/tests/python_tests/test_vlm_pipeline.py +++ b/tests/python_tests/test_vlm_pipeline.py @@ -48,7 +48,6 @@ def get_ov_model(model_id, cache): "katuni4ka/tiny-random-minicpmv-2_6", "katuni4ka/tiny-random-phi3-vision", "katuni4ka/tiny-random-llava", - "katuni4ka/tiny-random-internvl2", "katuni4ka/tiny-random-qwen2vl", ]) def test_vlm_pipeline(model_id, cache):