Skip to content

Commit

Permalink
[InstructBLIP] qformer_tokenizer is required input (#33222)
Browse files Browse the repository at this point in the history
* [InstructBLIP] qformer_tokenizer is required input

* Bit safer

* Add to instructblipvideo processor

* Fix up

* Use video inputs

* Update tests/models/instructblipvideo/test_processor_instructblipvideo.py
  • Loading branch information
amyeroberts authored Sep 4, 2024
1 parent 5731dc8 commit d2dcff9
Show file tree
Hide file tree
Showing 4 changed files with 696 additions and 16 deletions.
23 changes: 16 additions & 7 deletions src/transformers/models/instructblip/processing_instructblip.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,24 +50,23 @@ class InstructBlipProcessor(ProcessorMixin):
An instance of [`BlipImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
qformer_tokenizer (`AutoTokenizer`, *optional*):
qformer_tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The Q-Former tokenizer is a required input.
num_query_tokens (`int`, *optional*):"
Number of tokens used by the Qformer as queries, should be same as in model's config.
"""

attributes = ["image_processor", "tokenizer"]
attributes = ["image_processor", "tokenizer", "qformer_tokenizer"]
valid_kwargs = ["num_query_tokens"]
image_processor_class = "BlipImageProcessor"
tokenizer_class = "AutoTokenizer"
qformer_tokenizer_class = "AutoTokenizer"

def __init__(self, image_processor, tokenizer, qformer_tokenizer=None, num_query_tokens=None, **kwargs):
# add QFormer tokenizer
self.qformer_tokenizer = qformer_tokenizer
def __init__(self, image_processor, tokenizer, qformer_tokenizer, num_query_tokens=None, **kwargs):
self.image_token = AddedToken("<image>", normalized=False, special=True)
tokenizer.add_tokens([self.image_token], special_tokens=True)
self.num_query_tokens = num_query_tokens
super().__init__(image_processor, tokenizer)
super().__init__(image_processor, tokenizer, qformer_tokenizer)

def __call__(
self,
Expand Down Expand Up @@ -205,7 +204,17 @@ def save_pretrained(self, save_directory, **kwargs):
os.makedirs(save_directory, exist_ok=True)
qformer_tokenizer_path = os.path.join(save_directory, "qformer_tokenizer")
self.qformer_tokenizer.save_pretrained(qformer_tokenizer_path)
return super().save_pretrained(save_directory, **kwargs)

# We modify the attributes so that only the tokenizer and image processor are saved in the main folder
qformer_present = "qformer_tokenizer" in self.attributes
if qformer_present:
self.attributes.remove("qformer_tokenizer")

outputs = super().save_pretrained(save_directory, **kwargs)

if qformer_present:
self.attributes += ["qformer_tokenizer"]
return outputs

# overwrite to load the Q-Former tokenizer from a separate folder
@classmethod
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,24 +50,23 @@ class InstructBlipVideoProcessor(ProcessorMixin):
An instance of [`InstructBlipVideoImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
qformer_tokenizer (`AutoTokenizer`, *optional*):
qformer_tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The Q-Former tokenizer is a required input.
num_query_tokens (`int`, *optional*):
Number of tokens used by the Qformer as queries, should be same as in model's config.
"""

attributes = ["image_processor", "tokenizer"]
attributes = ["image_processor", "tokenizer", "qformer_tokenizer"]
valid_kwargs = ["num_query_tokens"]
image_processor_class = "InstructBlipVideoImageProcessor"
tokenizer_class = "AutoTokenizer"
qformer_tokenizer_class = "AutoTokenizer"

def __init__(self, image_processor, tokenizer, qformer_tokenizer=None, num_query_tokens=None, **kwargs):
# add QFormer tokenizer
self.qformer_tokenizer = qformer_tokenizer
def __init__(self, image_processor, tokenizer, qformer_tokenizer, num_query_tokens=None, **kwargs):
self.video_token = AddedToken("<video>", normalized=False, special=True)
tokenizer.add_tokens([self.video_token], special_tokens=True)
self.num_query_tokens = num_query_tokens
super().__init__(image_processor, tokenizer)
super().__init__(image_processor, tokenizer, qformer_tokenizer)

def __call__(
self,
Expand Down Expand Up @@ -95,6 +94,9 @@ def __call__(
Please refer to the docstring of the above two methods for more information.
"""
if images is None and text is None:
raise ValueError("You have to specify at least one of images or text.")

encoding = BatchFeature()

if text is not None:
Expand Down Expand Up @@ -204,7 +206,17 @@ def save_pretrained(self, save_directory, **kwargs):
os.makedirs(save_directory, exist_ok=True)
qformer_tokenizer_path = os.path.join(save_directory, "qformer_tokenizer")
self.qformer_tokenizer.save_pretrained(qformer_tokenizer_path)
return super().save_pretrained(save_directory, **kwargs)

# We modify the attributes so that only the tokenizer and image processor are saved in the main folder
qformer_present = "qformer_tokenizer" in self.attributes
if qformer_present:
self.attributes.remove("qformer_tokenizer")

outputs = super().save_pretrained(save_directory, **kwargs)

if qformer_present:
self.attributes += ["qformer_tokenizer"]
return outputs

# overwrite to load the Q-Former tokenizer from a separate folder
@classmethod
Expand Down
238 changes: 236 additions & 2 deletions tests/models/instructblip/test_processor_instructblip.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,11 @@
import numpy as np
import pytest

from transformers.testing_utils import require_vision
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available

from ...test_processing_common import ProcessorTesterMixin


if is_vision_available():
from PIL import Image
Expand All @@ -36,7 +38,9 @@


@require_vision
class InstructBlipProcessorTest(unittest.TestCase):
class InstructBlipProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = InstructBlipProcessor

def setUp(self):
self.tmpdirname = tempfile.mkdtemp()

Expand Down Expand Up @@ -189,3 +193,233 @@ def test_model_input_names(self):
list(inputs.keys()),
["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"],
)

# Override as InstructBlipProcessor has qformer_tokenizer
@require_vision
@require_torch
def test_tokenizer_defaults_preserved_by_kwargs(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length")
qformer_tokenizer = self.get_component("qformer_tokenizer", max_length=117, padding="max_length")

processor = self.processor_class(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
self.skip_processor_without_typed_kwargs(processor)
input_str = "lower newer"
image_input = self.prepare_image_inputs()

inputs = processor(text=input_str, images=image_input, return_tensors="pt")
self.assertEqual(len(inputs["input_ids"][0]), 117)

# Override as InstructBlipProcessor has qformer_tokenizer
@require_torch
@require_vision
def test_image_processor_defaults_preserved_by_image_kwargs(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor", size=(234, 234))
tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length")
qformer_tokenizer = self.get_component("qformer_tokenizer", max_length=117, padding="max_length")

processor = self.processor_class(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
self.skip_processor_without_typed_kwargs(processor)

input_str = "lower newer"
image_input = self.prepare_image_inputs()

inputs = processor(text=input_str, images=image_input)
self.assertEqual(len(inputs["pixel_values"][0][0]), 234)

# Override as InstructBlipProcessor has qformer_tokenizer
@require_vision
@require_torch
def test_kwargs_overrides_default_tokenizer_kwargs(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer", padding="longest")
qformer_tokenizer = self.get_component("qformer_tokenizer", padding="longest")

processor = self.processor_class(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
self.skip_processor_without_typed_kwargs(processor)
input_str = "lower newer"
image_input = self.prepare_image_inputs()

inputs = processor(
text=input_str, images=image_input, return_tensors="pt", max_length=112, padding="max_length"
)
self.assertEqual(len(inputs["input_ids"][0]), 112)

# Override as InstructBlipProcessor has qformer_tokenizer
@require_torch
@require_vision
def test_kwargs_overrides_default_image_processor_kwargs(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor", size=(234, 234))
tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length")
qformer_tokenizer = self.get_component("qformer_tokenizer", max_length=117, padding="max_length")

processor = self.processor_class(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
self.skip_processor_without_typed_kwargs(processor)

input_str = "lower newer"
image_input = self.prepare_image_inputs()

inputs = processor(text=input_str, images=image_input, size=[224, 224])
self.assertEqual(len(inputs["pixel_values"][0][0]), 224)

# Override as InstructBlipProcessor has qformer_tokenizer
@require_torch
@require_vision
def test_unstructured_kwargs(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
qformer_tokenizer = self.get_component("qformer_tokenizer")

processor = self.processor_class(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
self.skip_processor_without_typed_kwargs(processor)

input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
size={"height": 214, "width": 214},
padding="max_length",
max_length=76,
)

self.assertEqual(inputs["pixel_values"].shape[2], 214)
self.assertEqual(len(inputs["input_ids"][0]), 76)

# Override as InstructBlipProcessor has qformer_tokenizer
@require_torch
@require_vision
def test_unstructured_kwargs_batched(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
qformer_tokenizer = self.get_component("qformer_tokenizer")

processor = self.processor_class(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
self.skip_processor_without_typed_kwargs(processor)

input_str = ["lower newer", "upper older longer string"]
image_input = self.prepare_image_inputs() * 2
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
size={"height": 214, "width": 214},
padding="longest",
max_length=76,
)

self.assertEqual(inputs["pixel_values"].shape[2], 214)

self.assertEqual(len(inputs["input_ids"][0]), 6)

# Override as InstructBlipProcessor has qformer_tokenizer
@require_torch
@require_vision
def test_doubly_passed_kwargs(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
qformer_tokenizer = self.get_component("qformer_tokenizer")

processor = self.processor_class(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
self.skip_processor_without_typed_kwargs(processor)

input_str = ["lower newer"]
image_input = self.prepare_image_inputs()
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
images_kwargs={"size": {"height": 222, "width": 222}},
size={"height": 214, "width": 214},
)

# Override as InstructBlipProcessor has qformer_tokenizer
@require_torch
@require_vision
def test_structured_kwargs_nested(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
qformer_tokenizer = self.get_component("qformer_tokenizer")

processor = self.processor_class(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
self.skip_processor_without_typed_kwargs(processor)

input_str = "lower newer"
image_input = self.prepare_image_inputs()

# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"size": {"height": 214, "width": 214}},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}

inputs = processor(text=input_str, images=image_input, **all_kwargs)
self.skip_processor_without_typed_kwargs(processor)

self.assertEqual(inputs["pixel_values"].shape[2], 214)

self.assertEqual(len(inputs["input_ids"][0]), 76)

# Override as InstructBlipProcessor has qformer_tokenizer
@require_torch
@require_vision
def test_structured_kwargs_nested_from_dict(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")

image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
qformer_tokenizer = self.get_component("qformer_tokenizer")

processor = self.processor_class(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
self.skip_processor_without_typed_kwargs(processor)
input_str = "lower newer"
image_input = self.prepare_image_inputs()

# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"size": {"height": 214, "width": 214}},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}

inputs = processor(text=input_str, images=image_input, **all_kwargs)
self.assertEqual(inputs["pixel_values"].shape[2], 214)

self.assertEqual(len(inputs["input_ids"][0]), 76)
Loading

0 comments on commit d2dcff9

Please sign in to comment.