Skip to content

Commit

Permalink
[WWB]: Fixed nano-Llava preprocessor selection (#1646)
Browse files Browse the repository at this point in the history
Partially fixes WWB flow for nano-Llava. It works for Optimum inference
but requires additional changes on the Optimum side to support HF
Transformers.
  • Loading branch information
AlexKoff88 authored Jan 29, 2025
1 parent 6c3ecf9 commit ec50b5b
Showing 1 changed file with 13 additions and 11 deletions.
24 changes: 13 additions & 11 deletions tools/who_what_benchmark/whowhatbench/wwb.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import logging
import os

from transformers import AutoTokenizer, AutoProcessor
from transformers import AutoTokenizer, AutoProcessor, AutoConfig
import openvino as ov

import pandas as pd
Expand Down Expand Up @@ -220,17 +220,19 @@ def load_tokenizer(args):


def load_processor(args):
processor = None
if args.base_model is not None:
processor = AutoProcessor.from_pretrained(
args.base_model, trust_remote_code=True
)
elif args.target_model is not None:
processor = AutoProcessor.from_pretrained(
args.target_model, trust_remote_code=True
)
model_id = args.base_model if args.base_model is not None else args.target_model
if model_id is None:
return None

config = AutoConfig.from_pretrained(model_id, trust_remote_code=True)
if "llava-qwen" in config.model_type:
preprocessor_id = config.mm_vision_tower
else:
preprocessor_id = model_id

return processor
return AutoProcessor.from_pretrained(
preprocessor_id, trust_remote_code=True
)


def diff_strings(a: str, b: str, *, use_loguru_colors: bool = False) -> str:
Expand Down

0 comments on commit ec50b5b

Please sign in to comment.