diff --git a/.github/workflows/self-comment-ci.yml b/.github/workflows/self-comment-ci.yml
index f33a6f91f953..3f2b637e047c 100644
--- a/.github/workflows/self-comment-ci.yml
+++ b/.github/workflows/self-comment-ci.yml
@@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-22.04
name: Get PR number
# For security: only allow team members to run
- if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "qubvel", "molbap", "gante", "LysandreJik", "Cyrilvallez"]'), github.actor) && (startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow')) }}
+ if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "qubvel", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1"]'), github.actor) && (startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow')) }}
outputs:
PR_NUMBER: ${{ steps.set_pr_number.outputs.PR_NUMBER }}
steps:
diff --git a/docker/transformers-pytorch-deepspeed-amd-gpu/Dockerfile b/docker/transformers-pytorch-deepspeed-amd-gpu/Dockerfile
index d31e1cae5534..b67793dc9592 100644
--- a/docker/transformers-pytorch-deepspeed-amd-gpu/Dockerfile
+++ b/docker/transformers-pytorch-deepspeed-amd-gpu/Dockerfile
@@ -1,11 +1,11 @@
-FROM rocm/dev-ubuntu-22.04:5.6
+FROM rocm/dev-ubuntu-22.04:6.3
LABEL maintainer="Hugging Face"
ARG DEBIAN_FRONTEND=noninteractive
-ARG PYTORCH='2.1.1'
-ARG TORCH_VISION='0.16.1'
-ARG TORCH_AUDIO='2.1.1'
-ARG ROCM='5.6'
+ARG PYTORCH='2.5.1'
+ARG TORCH_VISION='0.20.0'
+ARG TORCH_AUDIO='2.5.0'
+ARG ROCM='6.3'
RUN apt update && \
apt install -y --no-install-recommends \
diff --git a/docs/source/en/agents_advanced.md b/docs/source/en/agents_advanced.md
index c4753bf1366b..eb5149d2faa3 100644
--- a/docs/source/en/agents_advanced.md
+++ b/docs/source/en/agents_advanced.md
@@ -162,7 +162,7 @@ agent.run(
improved_prompt could be "A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background"
Now that I have improved the prompt, I can use the image generator tool to generate an image based on this prompt.
->>> Agent is executing the code below:
+=== Agent is executing the code below:
image = image_generator(prompt="A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background")
final_answer(image)
```
diff --git a/docs/source/en/chat_templating.md b/docs/source/en/chat_templating.md
index 03d891a664fe..3581487e130f 100644
--- a/docs/source/en/chat_templating.md
+++ b/docs/source/en/chat_templating.md
@@ -39,7 +39,7 @@ Let's make this concrete with a quick example using the `mistralai/Mistral-7B-In
... ]
>>> tokenizer.apply_chat_template(chat, tokenize=False)
-"[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today? [INST] I'd like to show off how chat templating works! [/INST]"
+" [INST] Hello, how are you? [/INST] I'm doing great. How can I help you today? [INST] I'd like to show off how chat templating works! [/INST]"
```
Notice how the tokenizer has added the control tokens [INST] and [/INST] to indicate the start and end of
diff --git a/docs/source/en/generation_strategies.md b/docs/source/en/generation_strategies.md
index 7274b002650a..99049cceef34 100644
--- a/docs/source/en/generation_strategies.md
+++ b/docs/source/en/generation_strategies.md
@@ -231,7 +231,7 @@ to check if the text is machine-generated (outputs `True` for machine-generated
>>> detector = WatermarkDetector(model_config=model.config, device="cpu", watermarking_config=watermarking_config)
>>> detection_out = detector(out, return_dict=True)
>>> detection_out.prediction
-array([True, True])
+array([ True, True])
```
@@ -269,7 +269,7 @@ dimension you can act upon, in addition to selecting a decoding strategy. Popula
>>> model = AutoModelForCausalLM.from_pretrained(checkpoint)
>>> outputs = model.generate(**inputs)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
-['I look forward to seeing you all again!\n\n\n\n\n\n\n\n\n\n\n']
+['I look forward to seeing you all again!\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n']
```
### Contrastive search
@@ -445,7 +445,7 @@ To enable assisted decoding, set the `assistant_model` argument with a model.
>>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint)
>>> outputs = model.generate(**inputs, assistant_model=assistant_model)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
-['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a']
+['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a glass of wine.']
```
@@ -461,7 +461,7 @@ If you're using a `pipeline` object, all you need to do is to pass the assistant
... model="meta-llama/Llama-3.1-8B",
... assistant_model="meta-llama/Llama-3.2-1B", # This extra line is all that's needed, also works with UAD
... torch_dtype=torch.bfloat16
->>> )
+... )
>>> pipe_output = pipe("Once upon a time, ", max_new_tokens=50, do_sample=False)
>>> pipe_output[0]["generated_text"]
'Once upon a time, 3D printing was a niche technology that was only'
@@ -488,7 +488,7 @@ just like in multinomial sampling. However, in assisted decoding, reducing the t
>>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint)
>>> outputs = model.generate(**inputs, assistant_model=assistant_model, do_sample=True, temperature=0.5)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
-['Alice and Bob, a couple of friends of mine, who are both in the same office as']
+['Alice and Bob are two people who are very different, but they are both very good at what they do. Alice']
```
We recommend to install `scikit-learn` library to enhance the candidate generation strategy and achieve additional speedup.
@@ -518,7 +518,7 @@ to ensure the new tokens include the correct prompt suffix.
>>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint)
>>> outputs = model.generate(**inputs, assistant_model=assistant_model, tokenizer=tokenizer, assistant_tokenizer=assistant_tokenizer)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
-['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a']
+['Alice and Bob are playing a game. Alice has a set of $n$ integers $a_1, a']
```
#### Prompt Lookup
@@ -547,7 +547,7 @@ If the model you're using was trained to do early exit, you can pass
>>> model = AutoModelForCausalLM.from_pretrained(checkpoint)
>>> outputs = model.generate(**inputs, assistant_early_exit=4, do_sample=False, max_new_tokens=20)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
-['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a']
+['Alice and Bob are playing a game. Alice has a set of $n$ integers $a_1, a']
```
### DoLa Decoding
@@ -571,10 +571,9 @@ See the following examples for DoLa decoding with the 32-layer LLaMA-7B model.
>>> import torch
>>> from accelerate.test_utils.testing import get_backend
->>> tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
->>> model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b", torch_dtype=torch.float16)
>>> device, _, _ = get_backend() # automatically detects the underlying device type (CUDA, CPU, XPU, MPS, etc.)
->>> model.to(device)
+>>> tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
+>>> model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b", torch_dtype=torch.float16).to(device)
>>> set_seed(42)
>>> text = "On what date was the Declaration of Independence officially signed?"
@@ -593,7 +592,7 @@ See the following examples for DoLa decoding with the 32-layer LLaMA-7B model.
# DoLa decoding with contrasting specific layers (layers 28 and 30)
>>> dola_custom_output = model.generate(**inputs, do_sample=False, max_new_tokens=50, dola_layers=[28,30], repetition_penalty=1.2)
>>> tokenizer.batch_decode(dola_custom_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True)
-['\nIt was officially signed on 2 August 1776, when 56 members of the Second Continental Congress, representing the original 13 American colonies, voted unanimously for the resolution for independence. The 2']
+['\nIn 1891, when he was 54 years old, John Jacob Astor founded his empire. He opened a one-man business and spent the next 27 years working 10-hour days. When']
```
#### Understanding the `dola_layers` argument
diff --git a/docs/source/en/index.md b/docs/source/en/index.md
index 7d6a9c188d40..2233630128ae 100644
--- a/docs/source/en/index.md
+++ b/docs/source/en/index.md
@@ -385,6 +385,7 @@ Flax), PyTorch, and/or TensorFlow.
| [YOLOS](model_doc/yolos) | ✅ | ❌ | ❌ |
| [YOSO](model_doc/yoso) | ✅ | ❌ | ❌ |
| [Zamba](model_doc/zamba) | ✅ | ❌ | ❌ |
+| [Zamba2](model_doc/zamba2) | ✅ | ❌ | ❌ |
| [ZoeDepth](model_doc/zoedepth) | ✅ | ❌ | ❌ |
diff --git a/docs/source/en/kv_cache.md b/docs/source/en/kv_cache.md
index b1d1e0998f06..ed6fb9035e0c 100644
--- a/docs/source/en/kv_cache.md
+++ b/docs/source/en/kv_cache.md
@@ -56,7 +56,7 @@ More concretely, key-value cache acts as a memory bank for these generative mode
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
- >>> model_id = "meta-llama/Llama-2-7b-chat-hf"
+ >>> model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
>>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="cuda:0")
>>> tokenizer = AutoTokenizer.from_pretrained(model_id)
@@ -82,7 +82,13 @@ More concretely, key-value cache acts as a memory bank for these generative mode
... cache_position = cache_position[-1:] + 1 # add one more position for the next token
>>> print(tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0])
- "[INST] Hello, what's your name. [/INST] Hello! My name is LLaMA,"
+ ```
+ ```txt
+ <|user|>
+ Hello, what's your name.
+ <|assistant|>
+ My name is Sarah.
+ <|
```
@@ -132,17 +138,13 @@ Cache quantization can be detrimental in terms of latency if the context length
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
->>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
->>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16).to("cuda:0")
+>>> tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
+>>> model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16).to("cuda:0")
>>> inputs = tokenizer("I like rock music because", return_tensors="pt").to(model.device)
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="quantized", cache_config={"nbits": 4, "backend": "quanto"})
>>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0])
-I like rock music because it's loud and energetic. It's a great way to express myself and rel
-
->>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20)
->>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0])
-I like rock music because it's loud and energetic. I like to listen to it when I'm feeling
+I like rock music because it's a great way to express myself. I like the way it makes me feel, the
```
### Offloaded Cache
@@ -231,14 +233,14 @@ For more examples with Static Cache and JIT compilation, take a look at [StaticC
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
->>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
->>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16, device_map="auto")
+>>> tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
+>>> model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16, device_map="auto")
>>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
>>> # simply pass the cache implementation="static"
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="static")
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
-"Hello, my name is [Your Name], and I am a [Your Profession] with [Number of Years] of"
+"Hello, my name is [Your Name] and I am a [Your Position] at [Your Company]. I am writing"
```
@@ -256,7 +258,7 @@ This will use the [`~OffloadedStaticCache`] implementation instead.
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16, device_map="auto")
>>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
->>> # simply pass the cache implementation="static"
+>>> # simply pass the cache implementation="offloaded_static"
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="offloaded_static")
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
"Hello, my name is [Your Name], and I am a [Your Profession] with [Number of Years] of"
@@ -275,14 +277,14 @@ Note that you can use this cache only for models that support sliding window, e.
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, SinkCache
->>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
->>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", torch_dtype=torch.float16).to("cuda:0")
+>>> tokenizer = AutoTokenizer.from_pretrained("teknium/OpenHermes-2.5-Mistral-7B")
+>>> model = AutoModelForCausalLM.from_pretrained("teknium/OpenHermes-2.5-Mistral-7B", torch_dtype=torch.float16).to("cuda:0")
>>> inputs = tokenizer("Yesterday I was on a rock concert and.", return_tensors="pt").to(model.device)
>>> # can be used by passing in cache implementation
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=30, cache_implementation="sliding_window")
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
-"Yesterday I was on a rock concert and. I was so excited to see my favorite band. I was so excited that I was jumping up and down and screaming. I was so excited that I"
+"Yesterday I was on a rock concert and. I was so excited to see my favorite band perform live. I was so happy that I could hardly contain myself. I was jumping up and down and"
```
### Sink Cache
@@ -295,8 +297,8 @@ Unlike other cache classes, this one can't be used directly by indicating a `cac
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, SinkCache
->>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
->>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16).to("cuda:0")
+>>> tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
+>>> model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16).to("cuda:0")
>>> inputs = tokenizer("This is a long story about unicorns, fairies and magic.", return_tensors="pt").to(model.device)
>>> # get our cache, specify number of sink tokens and window size
@@ -304,7 +306,7 @@ Unlike other cache classes, this one can't be used directly by indicating a `cac
>>> past_key_values = SinkCache(window_length=256, num_sink_tokens=4)
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=30, past_key_values=past_key_values)
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
-"This is a long story about unicorns, fairies and magic. It is a fantasy world where unicorns and fairies live together in harmony. The story follows a young girl named Lily"
+"This is a long story about unicorns, fairies and magic. It is a story about a young girl named Lily who discovers that she has the power to control the elements. She learns that she can"
```
### Encoder-Decoder Cache
@@ -332,15 +334,15 @@ In case you are using Sink Cache, you have to crop your inputs to that maximum l
>>> import torch
>>> from transformers import AutoTokenizer,AutoModelForCausalLM
>>> from transformers.cache_utils import (
->>> DynamicCache,
->>> SinkCache,
->>> StaticCache,
->>> SlidingWindowCache,
->>> QuantoQuantizedCache,
->>> QuantizedCacheConfig,
->>> )
-
->>> model_id = "meta-llama/Llama-2-7b-chat-hf"
+... DynamicCache,
+... SinkCache,
+... StaticCache,
+... SlidingWindowCache,
+... QuantoQuantizedCache,
+... QuantizedCacheConfig,
+... )
+
+>>> model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
>>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map='auto')
>>> tokenizer = AutoTokenizer.from_pretrained(model_id)
@@ -363,7 +365,7 @@ In case you are using Sink Cache, you have to crop your inputs to that maximum l
... messages.append({"role": "assistant", "content": completion})
print(messages)
-[{'role': 'user', 'content': "Hello, what's your name?"}, {'role': 'assistant', 'content': " Hello! My name is LLaMA, I'm a large language model trained by a team of researcher at Meta AI. 😊"}, {'role': 'user', 'content': 'Btw, yesterday I was on a rock concert.'}, {'role': 'assistant', 'content': ' Oh, cool! That sounds like a lot of fun! 🎉 Did you enjoy the concert? What was the band like? 🤔'}]
+[{'role': 'user', 'content': "Hello, what's your name?"}, {'role': 'assistant', 'content': "Hello, I'm AI."}, {'role': 'user', 'content': 'Btw, yesterday I was on a rock concert.'}, {'role': 'assistant', 'content': "I'm sorry to hear that you were on a rock concert yesterday. It sounds like a fun experience, but I'm not capable of experiencing music or concerts. However, I can provide you with some information about rock music and its history. Rock music emerged in the 1950s and 1960s in the United States and Britain, and it quickly gained popularity around the world. Some of the most famous rock bands of all time include The Beatles, The Rolling Stones, Led Zeppelin, and Pink Floyd. Rock music has a distinct sound and style, with elements of blues, country, and folk music. It often features guitar solos, heavy bass lines, and drums. Rock music has had a significant impact on popular culture, influencing genres such as punk rock, heavy metal, and alternative rock."}]
```
@@ -376,7 +378,7 @@ Sometimes you would want to first fill-in cache object with key/values for certa
>>> import torch
>>> from transformers import AutoModelForCausalLM, AutoTokenizer, DynamicCache, StaticCache
->>> model_id = "meta-llama/Llama-2-7b-chat-hf"
+>>> model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
>>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="cuda")
>>> tokenizer = AutoTokenizer.from_pretrained(model_id)
@@ -400,7 +402,7 @@ Sometimes you would want to first fill-in cache object with key/values for certa
... responses.append(response)
>>> print(responses)
-[' You are a helpful assistant. Help me to write a blogpost about travelling.\n\nTitle: The Ultimate Guide to Travelling: Tips, Tricks, and', ' You are a helpful assistant. What is the capital of France?\n\nYes, the capital of France is Paris.']
+[' You are a helpful assistant. Help me to write a blogpost about travelling. I am excited to share my experiences with you. I have been traveling for the past', ' You are a helpful assistant. What is the capital of France? \n\nAnswer: Paris is the capital of France.']
```
@@ -414,8 +416,8 @@ this legacy format, you can seamlessly convert it to a `DynamicCache` and back.
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
->>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
->>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16, device_map="auto")
+>>> tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
+>>> model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16, device_map="auto")
>>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
>>> # `return_dict_in_generate=True` is required to return the cache. `return_legacy_cache` forces the returned cache
diff --git a/docs/source/en/model_doc/glm.md b/docs/source/en/model_doc/glm.md
index be0b367b62ec..1268b2e7cf9c 100644
--- a/docs/source/en/model_doc/glm.md
+++ b/docs/source/en/model_doc/glm.md
@@ -56,7 +56,7 @@ In the following, we demonstrate how to use `glm-4-9b-chat` for the inference. N
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> device = "cuda" # the device to load the model onto
->>> model = AutoModelForCausalLM.from_pretrained("THUDM/glm-4-9b-chat", device_map="auto")
+>>> model = AutoModelForCausalLM.from_pretrained("THUDM/glm-4-9b-chat", device_map="auto", trust_remote_code=True)
>>> tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4-9b-chat")
>>> prompt = "Give me a short introduction to large language model."
diff --git a/docs/source/en/model_doc/grounding-dino.md b/docs/source/en/model_doc/grounding-dino.md
index d024ff6ba736..1b9104eb963e 100644
--- a/docs/source/en/model_doc/grounding-dino.md
+++ b/docs/source/en/model_doc/grounding-dino.md
@@ -64,18 +64,19 @@ Here's how to use the model for zero-shot object detection:
>>> results = processor.post_process_grounded_object_detection(
... outputs,
-... threshold=0.4,
+... inputs.input_ids,
+... box_threshold=0.4,
... text_threshold=0.3,
-... target_sizes=[(image.height, image.width)]
+... target_sizes=[image.size[::-1]]
... )
->>> # Retrieve the first image result
+
+# Retrieve the first image result
>>> result = results[0]
->>> for box, score, text_label in zip(result["boxes"], result["scores"], result["text_labels"]):
+>>> for box, score, labels in zip(result["boxes"], result["scores"], result["labels"]):
... box = [round(x, 2) for x in box.tolist()]
-... print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}")
-Detected a cat with confidence 0.479 at location [344.7, 23.11, 637.18, 374.28]
-Detected a cat with confidence 0.438 at location [12.27, 51.91, 316.86, 472.44]
-Detected a remote control with confidence 0.478 at location [38.57, 70.0, 176.78, 118.18]
+... print(f"Detected {labels} with confidence {round(score.item(), 3)} at location {box}")
+Detected a cat with confidence 0.468 at location [344.78, 22.9, 637.3, 373.62]
+Detected a cat with confidence 0.426 at location [11.74, 51.55, 316.51, 473.22]
```
## Grounded SAM
diff --git a/docs/source/en/model_doc/llava_onevision.md b/docs/source/en/model_doc/llava_onevision.md
index b6b0a2bfa1d1..41a23e3da81b 100644
--- a/docs/source/en/model_doc/llava_onevision.md
+++ b/docs/source/en/model_doc/llava_onevision.md
@@ -81,7 +81,7 @@ text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=
# Note that the template simply formats your prompt, you still have to tokenize it and obtain pixel values for your images
print(text_prompt)
->>> "<|im_start|>user\nWhat is shown in this image?<|im_end|>\n<|im_start|>assistant\nPage showing the list of options.<|im_end|>"
+'<|im_start|>user\nWhat is shown in this image?<|im_end|>\n<|im_start|>assistant\nPage showing the list of options.<|im_end|>'
```
This model was contributed by [RaushanTurganbay](https://huggingface.co/RaushanTurganbay).
diff --git a/docs/source/en/model_doc/moshi.md b/docs/source/en/model_doc/moshi.md
index 64216f570e3e..2e2c5655de45 100644
--- a/docs/source/en/model_doc/moshi.md
+++ b/docs/source/en/model_doc/moshi.md
@@ -110,9 +110,14 @@ To follow the example of the following image, `"Hello, I'm Moshi"` could be tran
>>> from datasets import load_dataset, Audio
>>> import torch, math
>>> from transformers import MoshiForConditionalGeneration, AutoFeatureExtractor, AutoTokenizer
->>> librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+>>> librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+>>> feature_extractor = AutoFeatureExtractor.from_pretrained("kyutai/moshiko-pytorch-bf16")
+>>> tokenizer = AutoTokenizer.from_pretrained("kyutai/moshiko-pytorch-bf16")
+>>> device = "cuda"
+>>> dtype = torch.bfloat16
+
>>> # prepare user input audio
>>> librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate))
>>> audio_sample = librispeech_dummy[-1]["audio"]["array"]
diff --git a/docs/source/en/model_doc/phi3.md b/docs/source/en/model_doc/phi3.md
index 76d94008137e..fe68a6ae76b2 100644
--- a/docs/source/en/model_doc/phi3.md
+++ b/docs/source/en/model_doc/phi3.md
@@ -57,10 +57,7 @@ Phi-3 has been integrated in the development version (4.40.0.dev) of `transforme
>>> outputs = model.generate(inputs, max_new_tokens=32)
>>> text = tokenizer.batch_decode(outputs)[0]
>>> print(text)
-<|user|>
-Can you provide ways to eat combinations of bananas and dragonfruits?<|end|>
-<|assistant|>
-Certainly! Bananas and dragonfruits can be combined in various delicious ways. Here are some ideas for eating combinations of bananas and
+<|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious ways. Here are some creative ideas for incorporating both fruits
```
## Phi3Config
diff --git a/docs/source/en/model_doc/seamless_m4t.md b/docs/source/en/model_doc/seamless_m4t.md
index e820e6c92563..486e58691f6d 100644
--- a/docs/source/en/model_doc/seamless_m4t.md
+++ b/docs/source/en/model_doc/seamless_m4t.md
@@ -52,7 +52,7 @@ Here is how to use the processor to process text and audio:
```python
>>> # let's load an audio sample from an Arabic speech corpus
>>> from datasets import load_dataset
->>> dataset = load_dataset("arabic_speech_corpus", split="test", streaming=True)
+>>> dataset = load_dataset("arabic_speech_corpus", split="test", streaming=True, trust_remote_code=True)
>>> audio_sample = next(iter(dataset))["audio"]
>>> # now, process it
diff --git a/docs/source/en/model_doc/seamless_m4t_v2.md b/docs/source/en/model_doc/seamless_m4t_v2.md
index aea34acc180b..c6a2ec4b51c2 100644
--- a/docs/source/en/model_doc/seamless_m4t_v2.md
+++ b/docs/source/en/model_doc/seamless_m4t_v2.md
@@ -52,7 +52,7 @@ Here is how to use the processor to process text and audio:
```python
>>> # let's load an audio sample from an Arabic speech corpus
>>> from datasets import load_dataset
->>> dataset = load_dataset("arabic_speech_corpus", split="test", streaming=True)
+>>> dataset = load_dataset("arabic_speech_corpus", split="test", streaming=True, trust_remote_code=True)
>>> audio_sample = next(iter(dataset))["audio"]
>>> # now, process it
diff --git a/docs/source/en/model_doc/siglip.md b/docs/source/en/model_doc/siglip.md
index 0c0977d10b58..56e168ab4734 100644
--- a/docs/source/en/model_doc/siglip.md
+++ b/docs/source/en/model_doc/siglip.md
@@ -86,7 +86,7 @@ If you want to do the pre- and postprocessing yourself, here's how to do that:
>>> candidate_labels = ["2 cats", "2 dogs"]
# follows the pipeline prompt template to get same results
>>> texts = [f'This is a photo of {label}.' for label in candidate_labels]
->>> # important: we pass `padding=max_length` since the model was trained with this
+# important: we pass `padding=max_length` since the model was trained with this
>>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt")
>>> with torch.no_grad():
@@ -95,7 +95,7 @@ If you want to do the pre- and postprocessing yourself, here's how to do that:
>>> logits_per_image = outputs.logits_per_image
>>> probs = torch.sigmoid(logits_per_image) # these are the probabilities
>>> print(f"{probs[0][0]:.1%} that image 0 is '{candidate_labels[0]}'")
-31.9% that image 0 is 'a photo of 2 cats'
+19.8% that image 0 is '2 cats'
```
## Resources
@@ -142,8 +142,7 @@ To load and run a model using Flash Attention 2, refer to the snippet below:
# follows the pipeline prompt template to get same results
>>> texts = [f'This is a photo of {label}.' for label in candidate_labels]
# important: we pass `padding=max_length` since the model was trained with this
->>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt")
->>> inputs.to(device)
+>>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt").to(device)
>>> with torch.no_grad():
... with torch.autocast(device):
@@ -152,7 +151,7 @@ To load and run a model using Flash Attention 2, refer to the snippet below:
>>> logits_per_image = outputs.logits_per_image
>>> probs = torch.sigmoid(logits_per_image) # these are the probabilities
>>> print(f"{probs[0][0]:.1%} that image 0 is '{candidate_labels[0]}'")
-51.3% that image 0 is 'This is a photo of 2 cats.'
+19.8% that image 0 is '2 cats'
```
diff --git a/docs/source/en/model_doc/zamba2.md b/docs/source/en/model_doc/zamba2.md
new file mode 100644
index 000000000000..b331e10eaf84
--- /dev/null
+++ b/docs/source/en/model_doc/zamba2.md
@@ -0,0 +1,91 @@
+
+# Zamba2
+
+Zamba2 is a large language model (LLM) trained by Zyphra, and made available under an Apache 2.0 license. Please see the [Zyphra Hugging Face](https://huggingface.co/collections/zyphra/) repository for model weights.
+
+This model was contributed by [pglo](https://huggingface.co/pglo).
+
+
+## Model details
+
+Zamba2-1.2B, Zamba2-2.7B and Zamba2-7B are hybrid models combining state-space models (Specifically [Mamba](https://github.com/state-spaces/mamba)) and transformer, and were trained using next-token prediction. Zamba2 uses shared transformer layers after every 6 mamba blocks. It uses the [Mistral v0.1 tokenizer](https://huggingface.co/mistralai/Mistral-7B-v0.1). We came to this architecture after a series of ablations at small scales. Zamba2-1.2B, Zamba2-2.7B and Zamba2-7B were pre-trained on 2T and 3T tokens, respectively.
+
+
+
+## Quick start
+
+
+### Presequities
+
+Zamba2 requires you use `transformers` version 4.48.0 or higher:
+```bash
+pip install transformers>=4.48.0
+## Inference
+
+```python
+from transformers import AutoTokenizer, AutoModelForCausalLM
+import torch
+
+tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-7B")
+model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-7B", device_map="cuda", torch_dtype=torch.bfloat16)
+
+input_text = "What factors contributed to the fall of the Roman Empire?"
+input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
+
+outputs = model.generate(**input_ids, max_new_tokens=100)
+print(tokenizer.decode(outputs[0]))
+```
+
+
+## Model card
+
+The model cards can be found at:
+* [Zamba2-1.2B](https://huggingface.co/Zyphra/Zamba2-1.2B)
+* [Zamba2-2.7B](https://huggingface.co/Zyphra/Zamba2-2.7B)
+* [Zamba2-7B](https://huggingface.co/Zyphra/Zamba2-7B)
+
+
+## Issues
+For issues with model output, or community discussion, please use the Hugging Face community [forum](https://huggingface.co/Zyphra/Zamba2-7B/discussions)
+
+
+## License
+
+The model weights are open-sourced via an Apache 2.0 license.
+
+
+## Zamba2Config
+
+[[autodoc]] Zamba2Config
+
+
+## Zamba2Model
+
+[[autodoc]] Zamba2Model
+ - forward
+
+
+## Zamba2ForCausalLM
+
+[[autodoc]] Zamba2ForCausalLM
+ - forward
+
+
+## Zamba2ForSequenceClassification
+
+[[autodoc]] transformers.Zamba2ForSequenceClassification
+ - forward
diff --git a/docs/source/en/model_doc/zoedepth.md b/docs/source/en/model_doc/zoedepth.md
index 74e25f3c3f6e..ecd068511e96 100644
--- a/docs/source/en/model_doc/zoedepth.md
+++ b/docs/source/en/model_doc/zoedepth.md
@@ -70,7 +70,7 @@ Alternatively, one can also perform inference using the classes:
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
-... outputs = model(pixel_values)
+... outputs = model(inputs)
>>> # interpolate to original size and visualize the prediction
>>> ## ZoeDepth dynamically pads the input image. Thus we pass the original image size as argument
diff --git a/docs/source/en/perf_infer_gpu_one.md b/docs/source/en/perf_infer_gpu_one.md
index 0003784c585e..8087008f8772 100644
--- a/docs/source/en/perf_infer_gpu_one.md
+++ b/docs/source/en/perf_infer_gpu_one.md
@@ -111,6 +111,7 @@ FlashAttention-2 is currently supported for the following architectures:
* [UniSpeech](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/unispeech#transformers.UniSpeechModel)
* [unispeech_sat](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/unispeech-sat#transformers.UniSpeechSatModel)
* [helium](https://huggingface.co/docs/transformers/main/en/model_doc/heliumtransformers.HeliumModel)
+* [Zamba2](https://huggingface.co/docs/transformers/model_doc/zamba2)
You can request to add FlashAttention-2 support for another model by opening a GitHub Issue or Pull Request.
@@ -328,6 +329,7 @@ For now, Transformers supports SDPA inference and training for the following arc
* [XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLModel)
* [YOLOS](https://huggingface.co/docs/transformers/model_doc/yolos#transformers.YolosModel)
* [helium](https://huggingface.co/docs/transformers/main/en/model_doc/heliumtransformers.HeliumModel)
+* [Zamba2](https://huggingface.co/docs/transformers/model_doc/zamba2)
diff --git a/docs/source/en/task_summary.md b/docs/source/en/task_summary.md
index a5e2192f8759..e06081a93d04 100644
--- a/docs/source/en/task_summary.md
+++ b/docs/source/en/task_summary.md
@@ -305,10 +305,7 @@ There are two types of language modeling:
... for pred in preds
... ]
>>> preds
- [{'score': 0.2236,
- 'token': 1761,
- 'token_str': ' platform',
- 'sequence': 'Hugging Face is a community-based open-source platform for machine learning.'}]
+ [{'score': 0.224, 'token': 3944, 'token_str': ' tool', 'sequence': 'Hugging Face is a community-based open-source tool for machine learning.'}]
```
## Multimodal
diff --git a/docs/source/en/tasks/prompting.md b/docs/source/en/tasks/prompting.md
index 4e30fb1e0ee3..146ec328df0c 100644
--- a/docs/source/en/tasks/prompting.md
+++ b/docs/source/en/tasks/prompting.md
@@ -80,7 +80,7 @@ Run inference with decoder-only models with the `text-generation` pipeline:
>>> prompt = "Hello, I'm a language model"
>>> generator(prompt, max_length = 30)
-[{'generated_text': "Hello, I'm a language model programmer so you can use some of my stuff. But you also need some sort of a C program to run."}]
+[{'generated_text': "Hello, I'm a language model. Not a programming language at all: it's pretty simple.\n\nWhen I write a function, I mean"}]
```
To run inference with an encoder-decoder, use the `text2text-generation` pipeline:
@@ -258,7 +258,7 @@ also be a suitable location for instructions. Typically, it's better to place th
>>> for seq in sequences:
... print(f"{seq['generated_text']}")
-Permaculture is an ecological design mimicking natural ecosystems to meet basic needs and prepare for climate change. It is based on traditional knowledge and scientific understanding.
+"Permaculture is an ecological design method that mimics natural ecosystems' diversity, functionality, and resilience using modern technology and indigenous knowledge. It aims to help"
```
#### Question answering
@@ -284,7 +284,7 @@ the leading word or phrase (`"Answer:"`) to nudge the model to start generating
>>> for seq in sequences:
... print(f"Result: {seq['generated_text']}")
-Result: Modern tools often used to make gazpacho include
+"Result: Modern tools are used, such as immersion blenders"
```
#### Reasoning
@@ -309,7 +309,7 @@ Let's try if we can make a model reason about a simple arithmetics task with a b
>>> for seq in sequences:
... print(f"Result: {seq['generated_text']}")
Result:
-There are a total of 5 groups, so there are 5 x 4=20 students in the class.
+There are a total of 50 students in the class (5 groups x 4 students per group = 20 groups, and
```
Correct! Let's increase the complexity a little and see if we can still get away with a basic prompt:
diff --git a/examples/pytorch/object-detection/run_object_detection.py b/examples/pytorch/object-detection/run_object_detection.py
index 8d722f4d5d5d..095b41a6a491 100644
--- a/examples/pytorch/object-detection/run_object_detection.py
+++ b/examples/pytorch/object-detection/run_object_detection.py
@@ -271,6 +271,10 @@ class DataTrainingArguments:
)
},
)
+ use_fast: Optional[bool] = field(
+ default=True,
+ metadata={"help": "Use a fast torchvision-base image processor if it is supported for a given model."},
+ )
@dataclass
@@ -427,6 +431,7 @@ def main():
size={"max_height": data_args.image_square_size, "max_width": data_args.image_square_size},
do_pad=True,
pad_size={"height": data_args.image_square_size, "width": data_args.image_square_size},
+ use_fast=data_args.use_fast,
**common_pretrained_args,
)
diff --git a/examples/pytorch/object-detection/run_object_detection_no_trainer.py b/examples/pytorch/object-detection/run_object_detection_no_trainer.py
index dbfcb3fd97fa..b7ca051949e1 100644
--- a/examples/pytorch/object-detection/run_object_detection_no_trainer.py
+++ b/examples/pytorch/object-detection/run_object_detection_no_trainer.py
@@ -256,6 +256,12 @@ def parse_args():
default=1333,
help="Image longest size will be resized to this value, then image will be padded to square.",
)
+ parser.add_argument(
+ "--use_fast",
+ type=bool,
+ default=True,
+ help="Use a fast torchvision-base image processor if it is supported for a given model.",
+ )
parser.add_argument(
"--cache_dir",
type=str,
@@ -482,6 +488,7 @@ def main():
size={"max_height": args.image_square_size, "max_width": args.image_square_size},
do_pad=True,
pad_size={"height": args.image_square_size, "width": args.image_square_size},
+ use_fast=args.use_fast,
**common_pretrained_args,
)
diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py
index 3493634db516..02a55c28ac66 100755
--- a/src/transformers/__init__.py
+++ b/src/transformers/__init__.py
@@ -889,6 +889,7 @@
"models.yolos": ["YolosConfig"],
"models.yoso": ["YosoConfig"],
"models.zamba": ["ZambaConfig"],
+ "models.zamba2": ["Zamba2Config"],
"models.zoedepth": ["ZoeDepthConfig"],
"onnx": [],
"pipelines": [
@@ -3989,6 +3990,14 @@
"ZambaPreTrainedModel",
]
)
+ _import_structure["models.zamba2"].extend(
+ [
+ "Zamba2ForCausalLM",
+ "Zamba2ForSequenceClassification",
+ "Zamba2Model",
+ "Zamba2PreTrainedModel",
+ ]
+ )
_import_structure["models.zoedepth"].extend(
[
"ZoeDepthForDepthEstimation",
@@ -6004,6 +6013,7 @@
from .models.yolos import YolosConfig
from .models.yoso import YosoConfig
from .models.zamba import ZambaConfig
+ from .models.zamba2 import Zamba2Config
from .models.zoedepth import ZoeDepthConfig
# Pipelines
@@ -8542,6 +8552,12 @@
ZambaModel,
ZambaPreTrainedModel,
)
+ from .models.zamba2 import (
+ Zamba2ForCausalLM,
+ Zamba2ForSequenceClassification,
+ Zamba2Model,
+ Zamba2PreTrainedModel,
+ )
from .models.zoedepth import (
ZoeDepthForDepthEstimation,
ZoeDepthPreTrainedModel,
diff --git a/src/transformers/audio_utils.py b/src/transformers/audio_utils.py
index b4f11287f309..0ea8fe9bc4a8 100644
--- a/src/transformers/audio_utils.py
+++ b/src/transformers/audio_utils.py
@@ -146,7 +146,7 @@ def chroma_filter_bank(
sampling_rate: int,
tuning: float = 0.0,
power: Optional[float] = 2.0,
- weighting_parameters: Optional[Tuple[float]] = (5.0, 2),
+ weighting_parameters: Optional[Tuple[float, float]] = (5.0, 2.0),
start_at_c_chroma: Optional[bool] = True,
):
"""
@@ -165,7 +165,7 @@ def chroma_filter_bank(
Tuning deviation from A440 in fractions of a chroma bin.
power (`float`, *optional*, defaults to 2.0):
If 12.0, normalizes each column with their L2 norm. If 1.0, normalizes each column with their L1 norm.
- weighting_parameters (`Tuple[float]`, *optional*, defaults to `(5., 2.)`):
+ weighting_parameters (`Tuple[float, float]`, *optional*, defaults to `(5., 2.)`):
If specified, apply a Gaussian weighting parameterized by the first element of the tuple being the center and
the second element being the Gaussian half-width.
start_at_c_chroma (`float`, *optional*, defaults to `True`):
diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py
index f196cedd3d23..43cf2fe42951 100644
--- a/src/transformers/models/__init__.py
+++ b/src/transformers/models/__init__.py
@@ -303,5 +303,6 @@
yolos,
yoso,
zamba,
+ zamba2,
zoedepth,
)
diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py
index f4590c81c7d5..95c5bc4d008d 100644
--- a/src/transformers/models/auto/configuration_auto.py
+++ b/src/transformers/models/auto/configuration_auto.py
@@ -335,6 +335,7 @@
("yolos", "YolosConfig"),
("yoso", "YosoConfig"),
("zamba", "ZambaConfig"),
+ ("zamba2", "Zamba2Config"),
("zoedepth", "ZoeDepthConfig"),
]
)
@@ -680,6 +681,7 @@
("yolos", "YOLOS"),
("yoso", "YOSO"),
("zamba", "Zamba"),
+ ("zamba2", "Zamba2"),
("zoedepth", "ZoeDepth"),
]
)
diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py
index a3029bf650a9..8030f5dbbdaa 100644
--- a/src/transformers/models/auto/modeling_auto.py
+++ b/src/transformers/models/auto/modeling_auto.py
@@ -303,6 +303,7 @@
("yolos", "YolosModel"),
("yoso", "YosoModel"),
("zamba", "ZambaModel"),
+ ("zamba2", "Zamba2Model"),
]
)
@@ -577,6 +578,7 @@
("xlnet", "XLNetLMHeadModel"),
("xmod", "XmodForCausalLM"),
("zamba", "ZambaForCausalLM"),
+ ("zamba2", "Zamba2ForCausalLM"),
]
)
@@ -1055,6 +1057,7 @@
("xmod", "XmodForSequenceClassification"),
("yoso", "YosoForSequenceClassification"),
("zamba", "ZambaForSequenceClassification"),
+ ("zamba2", "Zamba2ForSequenceClassification"),
]
)
diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py
index ad273627efe8..5ee4f612285f 100644
--- a/src/transformers/models/auto/tokenization_auto.py
+++ b/src/transformers/models/auto/tokenization_auto.py
@@ -583,6 +583,13 @@
"LlamaTokenizerFast" if is_tokenizers_available() else None,
),
),
+ (
+ "zamba2",
+ (
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
+ ),
+ ),
]
)
diff --git a/src/transformers/models/dpt/image_processing_dpt.py b/src/transformers/models/dpt/image_processing_dpt.py
index c49dfcfef890..3c2162409c57 100644
--- a/src/transformers/models/dpt/image_processing_dpt.py
+++ b/src/transformers/models/dpt/image_processing_dpt.py
@@ -139,6 +139,11 @@ class DPTImageProcessor(BaseImageProcessor):
size_divisor (`int`, *optional*):
If `do_pad` is `True`, pads the image dimensions to be divisible by this value. This was introduced in the
DINOv2 paper, which uses the model in combination with DPT.
+ do_reduce_labels (`bool`, *optional*, defaults to `False`):
+ Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
+ used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
+ background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
+ `preprocess` method.
"""
model_input_names = ["pixel_values"]
@@ -157,6 +162,7 @@ def __init__(
image_std: Optional[Union[float, List[float]]] = None,
do_pad: bool = False,
size_divisor: int = None,
+ do_reduce_labels: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
@@ -174,6 +180,7 @@ def __init__(
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_pad = do_pad
self.size_divisor = size_divisor
+ self.do_reduce_labels = do_reduce_labels
def resize(
self,
@@ -275,10 +282,160 @@ def _get_pad(size, size_divisor):
return pad(image, ((pad_size_left, pad_size_right), (pad_size_top, pad_size_bottom)), data_format=data_format)
+ # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.reduce_label
+ def reduce_label(self, label: ImageInput) -> np.ndarray:
+ label = to_numpy_array(label)
+ # Avoid using underflow conversion
+ label[label == 0] = 255
+ label = label - 1
+ label[label == 254] = 255
+ return label
+
+ def _preprocess(
+ self,
+ image: ImageInput,
+ do_reduce_labels: bool = None,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ keep_aspect_ratio: bool = None,
+ ensure_multiple_of: int = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_pad: bool = None,
+ size_divisor: int = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ):
+ if do_reduce_labels:
+ image = self.reduce_label(image)
+
+ if do_resize:
+ image = self.resize(
+ image=image,
+ size=size,
+ resample=resample,
+ keep_aspect_ratio=keep_aspect_ratio,
+ ensure_multiple_of=ensure_multiple_of,
+ input_data_format=input_data_format,
+ )
+
+ if do_rescale:
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+
+ if do_normalize:
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+
+ if do_pad:
+ image = self.pad_image(image=image, size_divisor=size_divisor, input_data_format=input_data_format)
+
+ return image
+
+ def _preprocess_image(
+ self,
+ image: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ keep_aspect_ratio: bool = None,
+ ensure_multiple_of: int = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_pad: bool = None,
+ size_divisor: int = None,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """Preprocesses a single image."""
+ # All transformations expect numpy arrays.
+ image = to_numpy_array(image)
+ if do_rescale and is_scaled_image(image):
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(image)
+
+ image = self._preprocess(
+ image,
+ do_reduce_labels=False,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ keep_aspect_ratio=keep_aspect_ratio,
+ ensure_multiple_of=ensure_multiple_of,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_pad=do_pad,
+ size_divisor=size_divisor,
+ input_data_format=input_data_format,
+ )
+ if data_format is not None:
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+ return image
+
+ def _preprocess_segmentation_map(
+ self,
+ segmentation_map: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ keep_aspect_ratio: bool = None,
+ ensure_multiple_of: int = None,
+ do_reduce_labels: bool = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ):
+ """Preprocesses a single segmentation map."""
+ # All transformations expect numpy arrays.
+ segmentation_map = to_numpy_array(segmentation_map)
+ # Add an axis to the segmentation maps for transformations.
+ if segmentation_map.ndim == 2:
+ segmentation_map = segmentation_map[None, ...]
+ added_dimension = True
+ input_data_format = ChannelDimension.FIRST
+ else:
+ added_dimension = False
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
+ segmentation_map = self._preprocess(
+ image=segmentation_map,
+ do_reduce_labels=do_reduce_labels,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ keep_aspect_ratio=keep_aspect_ratio,
+ ensure_multiple_of=ensure_multiple_of,
+ do_normalize=False,
+ do_rescale=False,
+ input_data_format=input_data_format,
+ )
+ # Remove extra axis if added
+ if added_dimension:
+ segmentation_map = np.squeeze(segmentation_map, axis=0)
+ segmentation_map = segmentation_map.astype(np.int64)
+ return segmentation_map
+
+ # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.__call__
+ def __call__(self, images, segmentation_maps=None, **kwargs):
+ # Overrides the `__call__` method of the `Preprocessor` class such that the images and segmentation maps can both
+ # be passed in as positional arguments.
+ return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
+
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
+ segmentation_maps: Optional[ImageInput] = None,
do_resize: bool = None,
size: int = None,
keep_aspect_ratio: bool = None,
@@ -291,6 +448,7 @@ def preprocess(
image_std: Optional[Union[float, List[float]]] = None,
do_pad: bool = None,
size_divisor: int = None,
+ do_reduce_labels: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
@@ -302,6 +460,8 @@ def preprocess(
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ segmentation_maps (`ImageInput`, *optional*):
+ Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
@@ -326,6 +486,10 @@ def preprocess(
Image mean.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
+ do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
+ Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
+ is used for background, and background itself is not included in all classes of a dataset (e.g.
+ ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
@@ -357,9 +521,13 @@ def preprocess(
image_std = image_std if image_std is not None else self.image_std
do_pad = do_pad if do_pad is not None else self.do_pad
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
+ do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
images = make_list_of_images(images)
+ if segmentation_maps is not None:
+ segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
+
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
@@ -377,55 +545,47 @@ def preprocess(
size=size,
resample=resample,
)
- # All transformations expect numpy arrays.
- images = [to_numpy_array(image) for image in images]
- if do_rescale and is_scaled_image(images[0]):
- logger.warning_once(
- "It looks like you are trying to rescale already rescaled images. If the input"
- " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ images = [
+ self._preprocess_image(
+ image=img,
+ do_resize=do_resize,
+ do_rescale=do_rescale,
+ do_normalize=do_normalize,
+ do_pad=do_pad,
+ size=size,
+ resample=resample,
+ keep_aspect_ratio=keep_aspect_ratio,
+ ensure_multiple_of=ensure_multiple_of,
+ rescale_factor=rescale_factor,
+ image_mean=image_mean,
+ image_std=image_std,
+ size_divisor=size_divisor,
+ data_format=data_format,
+ input_data_format=input_data_format,
)
+ for img in images
+ ]
- if input_data_format is None:
- # We assume that all images have the same channel dimension format.
- input_data_format = infer_channel_dimension_format(images[0])
+ data = {"pixel_values": images}
- if do_resize:
- images = [
- self.resize(
- image=image,
+ if segmentation_maps is not None:
+ segmentation_maps = [
+ self._preprocess_segmentation_map(
+ segmentation_map=segmentation_map,
+ do_reduce_labels=do_reduce_labels,
+ do_resize=do_resize,
size=size,
resample=resample,
keep_aspect_ratio=keep_aspect_ratio,
ensure_multiple_of=ensure_multiple_of,
input_data_format=input_data_format,
)
- for image in images
+ for segmentation_map in segmentation_maps
]
- if do_rescale:
- images = [
- self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
- for image in images
- ]
+ data["labels"] = segmentation_maps
- if do_normalize:
- images = [
- self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
- for image in images
- ]
-
- if do_pad:
- images = [
- self.pad_image(image=image, size_divisor=size_divisor, input_data_format=input_data_format)
- for image in images
- ]
-
- images = [
- to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
- ]
-
- data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
# Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->DPT
diff --git a/src/transformers/models/llava_next/processing_llava_next.py b/src/transformers/models/llava_next/processing_llava_next.py
index d8034ca9fa56..4de5fe63efce 100644
--- a/src/transformers/models/llava_next/processing_llava_next.py
+++ b/src/transformers/models/llava_next/processing_llava_next.py
@@ -200,11 +200,11 @@ def _get_unpadded_features(self, height, width, patches_height, patches_width, s
original_aspect_ratio = width / height
current_aspect_ratio = current_width / current_height
if original_aspect_ratio > current_aspect_ratio:
- new_height = (height * current_width) // width
+ new_height = int(round(height * (current_width / width), 7))
padding = (current_height - new_height) // 2
current_height -= padding * 2
else:
- new_width = (width * current_height) // height
+ new_width = int(round(width * (current_height / height), 7))
padding = (current_width - new_width) // 2
current_width -= padding * 2
diff --git a/src/transformers/models/llava_next_video/processing_llava_next_video.py b/src/transformers/models/llava_next_video/processing_llava_next_video.py
index 349a2253a233..8a1294611b0d 100644
--- a/src/transformers/models/llava_next_video/processing_llava_next_video.py
+++ b/src/transformers/models/llava_next_video/processing_llava_next_video.py
@@ -253,11 +253,11 @@ def _get_unpadded_features(self, height, width, patches_height, patches_width, s
original_aspect_ratio = width / height
current_aspect_ratio = current_width / current_height
if original_aspect_ratio > current_aspect_ratio:
- new_height = (height * current_width) // width
+ new_height = int(round(height * (current_width / width), 7))
padding = (current_height - new_height) // 2
current_height -= padding * 2
else:
- new_width = (width * current_height) // height
+ new_width = int(round(width * (current_height / height), 7))
padding = (current_width - new_width) // 2
current_width -= padding * 2
diff --git a/src/transformers/models/llava_onevision/processing_llava_onevision.py b/src/transformers/models/llava_onevision/processing_llava_onevision.py
index eded6084e91e..f4ca90f28c21 100644
--- a/src/transformers/models/llava_onevision/processing_llava_onevision.py
+++ b/src/transformers/models/llava_onevision/processing_llava_onevision.py
@@ -225,6 +225,7 @@ def _get_number_of_features(self, orig_height: int, orig_width: int, height: int
num_image_tokens = unpadded_features + newline_features + base_features
return num_image_tokens
+ # Adapted from transformers.models.llava_next.processing_llava_next.LlavaNextProcessor._get_unpadded_features
def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width):
"""
Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA
@@ -237,11 +238,11 @@ def _get_unpadded_features(self, height, width, patches_height, patches_width, s
original_aspect_ratio = width / height
current_aspect_ratio = current_width / current_height
if original_aspect_ratio > current_aspect_ratio:
- new_height = int(height * (current_width / width))
+ new_height = int(round(height * (current_width / width), 7))
padding = (current_height - new_height) // 2
current_height -= padding * 2
else:
- new_width = int(width * (current_height / height))
+ new_width = int(round(width * (current_height / height), 7))
padding = (current_width - new_width) // 2
current_width -= padding * 2
diff --git a/src/transformers/models/wav2vec2_bert/convert_wav2vec2_seamless_checkpoint.py b/src/transformers/models/wav2vec2_bert/convert_wav2vec2_seamless_checkpoint.py
index 6405f4547011..adead75bf5de 100644
--- a/src/transformers/models/wav2vec2_bert/convert_wav2vec2_seamless_checkpoint.py
+++ b/src/transformers/models/wav2vec2_bert/convert_wav2vec2_seamless_checkpoint.py
@@ -183,7 +183,7 @@ def convert_wav2vec2_bert_checkpoint(
with torch.no_grad():
outputs = hf_wav2vec(**inputs)
- torch.testing.assert_close(original_output, outputs.last_hidden_state, atol=5e-3, rtol=5e-3)
+ torch.testing.assert_close(original_output, outputs.last_hidden_state, rtol=5e-3, atol=5e-3)
if __name__ == "__main__":
diff --git a/src/transformers/models/zamba/modeling_zamba.py b/src/transformers/models/zamba/modeling_zamba.py
index a25cfbc42862..54c88afb6fea 100644
--- a/src/transformers/models/zamba/modeling_zamba.py
+++ b/src/transformers/models/zamba/modeling_zamba.py
@@ -272,7 +272,6 @@ def forward(
layer_idx: int,
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[ZambaHybridDynamicCache] = None,
- cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
@@ -621,11 +620,9 @@ def forward(
original_hidden_states: torch.Tensor,
layer_idx: int,
attention_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[ZambaHybridDynamicCache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
- cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
@@ -638,7 +635,6 @@ def forward(
layer_idx (`int`): layer_idx in the forward pass. Used to distinguish Zamba's tied transformer layers.
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
- position_ids (`torch.LongTensor`, *optional*): token positions of shape `(batch, seq_len)`. Used for positional encodings.
past_key_value (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
@@ -655,11 +651,9 @@ def forward(
hidden_states=hidden_states,
layer_idx=layer_idx,
attention_mask=attention_mask,
- position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
- cache_position=cache_position,
**kwargs,
)
# feed-forward (MLP)
@@ -688,12 +682,12 @@ def forward(
layer_idx: int = None,
attention_mask: Optional[torch.Tensor] = None,
causal_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[ZambaHybridDynamicCache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
transformer_hidden_states: Optional[torch.Tensor] = None,
+ **kwargs,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
@@ -756,7 +750,6 @@ def forward(
layer_idx: int = None,
attention_mask: Optional[torch.Tensor] = None,
causal_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[ZambaHybridDynamicCache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
@@ -786,7 +779,6 @@ def forward(
original_hidden_states=original_hidden_states,
layer_idx=layer_idx,
attention_mask=causal_mask,
- position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
@@ -804,7 +796,6 @@ def forward(
hidden_states,
transformer_hidden_states=transformer_hidden_states,
attention_mask=attention_mask,
- position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
@@ -1108,7 +1099,6 @@ def forward(
layer_idx,
attention_mask,
causal_mask,
- position_ids,
past_key_values,
output_attentions,
use_cache,
@@ -1121,7 +1111,6 @@ def forward(
layer_idx=layer_idx,
attention_mask=attention_mask,
causal_mask=causal_mask,
- position_ids=position_ids,
past_key_value=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
diff --git a/src/transformers/models/zamba2/__init__.py b/src/transformers/models/zamba2/__init__.py
new file mode 100644
index 000000000000..00db458c72eb
--- /dev/null
+++ b/src/transformers/models/zamba2/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_zamba2 import *
+ from .modeling_zamba2 import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/src/transformers/models/zamba2/configuration_zamba2.py b/src/transformers/models/zamba2/configuration_zamba2.py
new file mode 100644
index 000000000000..975e9687358e
--- /dev/null
+++ b/src/transformers/models/zamba2/configuration_zamba2.py
@@ -0,0 +1,236 @@
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# This file was automatically generated from src/transformers/models/zamba2/modular_zamba2.py.
+# Do NOT edit this file manually as any edits will be overwritten by the generation of
+# the file from the modular. If any change should be done, please apply the change to the
+# modular_zamba2.py file directly. One of our CI enforces this.
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# coding=utf-8
+# Copyright 2024 Zyphra Technologies and the HuggingFace Inc. team. All rights reserved.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ...configuration_utils import PretrainedConfig
+
+
+class Zamba2Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Zamba2Model`]. It is used to instantiate a
+ Zamba2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Zamba2 model.
+
+ [Zyphra/Zamba2-2.7B](https://huggingface.co/Zyphra/Zamba2-2.7B)
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the Zamba2 model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`Zamba2Model`]
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
+ The maximum sequence length that this model might ever be used with.
+ hidden_size (`int`, *optional*, defaults to 2560):
+ Dimension of the hidden representations.
+ num_hidden_layers (`int`, *optional*, defaults to 54):
+ Number of hidden layers in the model.
+ layers_block_type (`list`, *optional*):
+ List of layer types, which can be either "mamba" or "hybrid".
+ mamba_d_state (`int`, *optional*, defaults to 64): shape of the state space latents.
+ mamba_d_conv (`int`, *optional*, defaults to 4): Size of the convolution kernel.
+ mamba_expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
+ mamba_ngroups (`int`, *optional*, defaults to 1):
+ Number of groups for the evolution matrices of mamba 2.
+ time_step_min (`float`, *optional*, defaults to 0.001):
+ Minimum `time_step` used to bound `dt_proj.bias`.
+ time_step_max (`float`, *optional*, defaults to 0.1):
+ Maximum `time_step` used to bound `dt_proj.bias`.
+ time_step_floor (`float`, *optional*, defaults to 0.0001):
+ Minimum clamping value of the `dt_proj.bias` layer initialization.
+ time_step_limit (`tuple`, *optional*):
+ Accepted range of time step values.
+ n_mamba_heads (`int`, *optional*, defaults to 8):
+ Number of heads for the evolution matrices of mamba 2.
+ use_conv_bias (`bool`, *optional*, defaults to `True`):
+ Whether or not to use bias in the convolution layer of the mixer block.
+ chunk_size (`int`, *optional*, defaults to 256):
+ Size of the chunks that will comprise the sequence.
+ add_bias_linear (`bool`, *optional*, defaults to `False`):
+ Flag indicating whether or not to use bias in various layers
+ intermediate_size (`int`, *optional*, defaults to 4 * hidden_size):
+ Dimension of the MLP representations.
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the MLP.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ num_key_value_heads (`int`, *optional*):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=None`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf).
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ num_mem_blocks (`int`, *optional*, defaults to 1):
+ Number of unshared transformer blocks.
+ use_shared_attention_adapter (`bool`, *optional*, defaults to `False`):
+ If True, unshared adapters (formally the same as LoRA but used in the base model) will be added to the q, k, v projectors in the shared attention layers.
+ adapter_rank (`int`, *optional*, defaults to 128):
+ Rank of the adapter in the shared MLP and shared attention layers.
+ use_mem_rope (`bool`, *optional*, defaults to `False`):
+ If True, includes RoPE in the shared attention layers.
+ rope_theta (`float`, *optional*, defaults to `10000.0`):
+ The base period of the RoPE embeddings.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
+ Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
+ integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
+ logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
+ sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
+ significantly.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ The id of the padding token.
+ bos_token_id (`int`, *optional*, defaults to 1):
+ The id of the "beginning-of-sequence" token.
+ eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the "end-of-sequence" token.
+ use_long_context (`bool`, *optional*, defaults to `False`):
+ Activates the context-extended version of Zamba by modifying RoPE.
+ ```python
+ >>> from transformers import Zamba2Model, Zamba2Config
+ >>> # Initializing a Zamba2-2.7B style configuration
+ >>> configuration = Zamba2Config()
+ >>> # Initializing a model from the Zamba2-2.7B style configuration
+ >>> model = Zamba2Model(configuration)
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ """
+
+ model_type = "zamba2"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=32000,
+ max_position_embeddings=4096,
+ hidden_size=2560,
+ num_hidden_layers=54,
+ layers_block_type=None,
+ mamba_d_state=64,
+ mamba_d_conv=4,
+ mamba_expand=2,
+ mamba_ngroups=1,
+ time_step_min=0.001,
+ time_step_max=0.1,
+ time_step_floor=1e-4,
+ time_step_limit=None,
+ n_mamba_heads=8,
+ use_conv_bias=True,
+ chunk_size=256,
+ add_bias_linear=False,
+ intermediate_size=None,
+ hidden_act="gelu",
+ num_attention_heads=32,
+ num_key_value_heads=None,
+ attention_dropout=0.0,
+ num_mem_blocks=1,
+ use_shared_attention_adapter=False,
+ adapter_rank=128,
+ use_mem_rope=False,
+ rope_theta=10000,
+ initializer_range=0.02,
+ rms_norm_eps=1e-5,
+ use_cache=True,
+ num_logits_to_keep=1,
+ pad_token_id=0,
+ bos_token_id=1,
+ eos_token_id=2,
+ use_long_context=False,
+ **kwargs,
+ ):
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ **kwargs,
+ )
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ if intermediate_size is None:
+ self.intermediate_size = 4 * hidden_size
+ else:
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.num_mem_blocks = num_mem_blocks
+ self.attention_hidden_size = 2 * hidden_size
+ self.attention_head_dim = 2 * self.hidden_size // self.num_attention_heads
+ self.attention_dropout = attention_dropout
+ self.use_mem_rope = use_mem_rope
+ self.use_long_context = use_long_context
+ if use_mem_rope and use_long_context:
+ a = 8
+ rope_theta = rope_theta * a ** (self.attention_head_dim / (self.attention_head_dim - 2))
+ self.rope_theta = rope_theta
+ self.mamba_d_state = mamba_d_state
+ self.mamba_d_conv = mamba_d_conv
+ self.mamba_expand = mamba_expand
+ self.add_bias_linear = add_bias_linear
+ self.mamba_ngroups = mamba_ngroups
+ self.n_mamba_heads = n_mamba_heads
+ self.mamba_headdim = int(mamba_expand * hidden_size) // n_mamba_heads
+ self.use_conv_bias = use_conv_bias
+ self.chunk_size = chunk_size
+ self.time_step_limit = time_step_limit
+ self.use_shared_attention_adapter = use_shared_attention_adapter
+ self.adapter_rank = adapter_rank
+ self.time_step_min = time_step_min
+ self.time_step_max = time_step_max
+ self.time_step_floor = time_step_floor
+ if use_long_context:
+ self.max_position_embeddings = 16384
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+ self.num_key_value_heads = num_key_value_heads
+ self.num_attention_heads = num_attention_heads
+ self.kv_channels = self.hidden_size // self.num_attention_heads
+ self.num_query_groups = self.num_attention_heads
+ # Below, "mamba" stands for mamba layer, "hybrid" stands for hybrid layer (composed by a shared transformer followed by mamba layer)
+ if layers_block_type is None:
+ self.layers_block_type = (
+ ["mamba"]
+ + (["mamba"] * 5 + ["hybrid"]) * 7
+ + ["mamba"] * 4
+ + ["hybrid"]
+ + ["mamba"] * 3
+ + ["hybrid"]
+ + ["mamba"] * 2
+ )
+ else:
+ self.layers_block_type = layers_block_type
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+ self.num_logits_to_keep = num_logits_to_keep
+ self.hybrid_layer_ids = [index for index, type in enumerate(self.layers_block_type) if type == "hybrid"]
+
+
+__all__ = ["Zamba2Config"]
diff --git a/src/transformers/models/zamba2/modeling_zamba2.py b/src/transformers/models/zamba2/modeling_zamba2.py
new file mode 100644
index 000000000000..04ff98649414
--- /dev/null
+++ b/src/transformers/models/zamba2/modeling_zamba2.py
@@ -0,0 +1,1909 @@
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# This file was automatically generated from src/transformers/models/zamba2/modular_zamba2.py.
+# Do NOT edit this file manually as any edits will be overwritten by the generation of
+# the file from the modular. If any change should be done, please apply the change to the
+# modular_zamba2.py file directly. One of our CI enforces this.
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# coding=utf-8
+# Copyright 2024 Zyphra Technologies and the HuggingFace Inc. team. All rights reserved.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import math
+import re
+from itertools import cycle
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache
+from ...generation import GenerationMixin
+from ...modeling_attn_mask_utils import AttentionMaskConverter
+from ...modeling_flash_attention_utils import FlashAttentionKwargs
+from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
+from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
+from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
+from ...processing_utils import Unpack
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from ...utils.deprecation import deprecate_kwarg
+from ...utils.import_utils import is_causal_conv1d_available, is_mamba_ssm_available
+from .configuration_zamba2 import Zamba2Config
+
+
+if is_mamba_ssm_available():
+ from mamba_ssm.ops.triton.selective_state_update import selective_state_update
+ from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined
+else:
+ selective_state_update, mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined = None, None, None
+
+if is_causal_conv1d_available():
+ from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
+else:
+ causal_conv1d_update, causal_conv1d_fn = None, None
+
+
+logger = logging.get_logger(__name__)
+
+
+_CONFIG_FOR_DOC = "Zyphra/Zamba2-2.7B"
+
+
+class Zamba2RMSNormGated(torch.nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states, gate=None):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+
+ if gate is not None:
+ hidden_states = hidden_states * nn.functional.silu(gate.to(torch.float32))
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+
+ return self.weight * hidden_states.to(input_dtype)
+
+
+class Zamba2RMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ Zamba2RMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+ def extra_repr(self):
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
+
+
+class Zamba2HybridDynamicCache(DynamicCache):
+ """
+ A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
+ (which has a constant shape regardless of seq_len).
+
+ This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
+ and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
+ For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
+ while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
+ For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
+ while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
+ and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
+ """
+
+ def __init__(
+ self, config: Zamba2Config, batch_size: int, dtype: torch.dtype = torch.float16, device: Optional[str] = None
+ ):
+ self.dtype = dtype
+ self.layers_block_type = config.layers_block_type
+ self.has_previous_state = False
+ self.intermediate_size = int(config.mamba_expand * config.hidden_size)
+ self.ssm_state_size = config.mamba_d_state
+ self.conv_kernel_size = config.mamba_d_conv
+ self.n_mamba_heads = config.n_mamba_heads
+ self.transformer_layers = []
+ self._modules = {}
+ self._parameters = {}
+ self._buffers = {}
+ self.conv_states = {}
+ self.ssm_states = {}
+ for i in range(config.num_hidden_layers):
+ self.conv_states[i] = torch.zeros(
+ batch_size,
+ self.intermediate_size + 2 * config.mamba_ngroups * config.mamba_d_state,
+ self.conv_kernel_size,
+ device=device,
+ dtype=dtype,
+ )
+ self.ssm_states[i] = torch.zeros(
+ batch_size, self.n_mamba_heads, config.mamba_headdim, self.ssm_state_size, device=device, dtype=dtype
+ )
+ if self.layers_block_type[i] == "hybrid":
+ self.transformer_layers.append(i)
+ self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
+ self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
+
+ def update(
+ self,
+ key_states: torch.Tensor,
+ value_states: torch.Tensor,
+ layer_idx: int,
+ cache_kwargs: Optional[Dict[str, Any]] = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ # Update the cache
+ if self.key_cache[layer_idx].shape[-1] == 0:
+ self.key_cache[layer_idx] = key_states
+ self.value_cache[layer_idx] = value_states
+ else:
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2)
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2)
+
+ return self.key_cache[layer_idx], self.value_cache[layer_idx]
+
+ def reorder_cache(self, beam_idx: torch.LongTensor):
+ """Reorders the cache for beam search, given the selected beam indices."""
+ for layer_idx in range(len(self.key_cache)):
+ device = self.key_cache[layer_idx].device
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
+ device = self.value_cache[layer_idx].device
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
+
+ device = self.conv_states[layer_idx].device
+ self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device))
+ device = self.ssm_states[layer_idx].device
+ self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
+
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
+ # take any layer that contains cache and not empty tensor
+ layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx
+ if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0:
+ return 0
+ return self.key_cache[layer_idx].shape[-2]
+
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
+ raise NotImplementedError("Zamba2HybridDynamicCache does not have a legacy cache equivalent.")
+
+ @classmethod
+ def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache":
+ raise NotImplementedError("Zamba2HybridDynamicCache does not have a legacy cache equivalent.")
+
+ def update_conv_state(
+ self, layer_idx: int, new_conv_state: torch.Tensor, cache_position: torch.LongTensor
+ ) -> torch.Tensor:
+ conv_state = self.conv_states[layer_idx]
+ cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
+
+ conv_state = conv_state.roll(shifts=-1, dims=-1)
+ conv_state[:, :, cache_position] = new_conv_state.to(conv_state.device)
+ self.conv_states[layer_idx].zero_()
+ self.conv_states[layer_idx] += conv_state
+ return self.conv_states[layer_idx]
+
+ def reset(self):
+ self.conv_states.zero_()
+ self.ssm_states.zero_()
+
+
+class Zamba2RotaryEmbedding(nn.Module):
+ def __init__(
+ self,
+ config: Zamba2Config,
+ device=None,
+ ):
+ super().__init__()
+ # BC: "rope_type" was originally "type"
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
+ else:
+ self.rope_type = "default"
+ self.max_seq_len_cached = config.max_position_embeddings
+ self.original_max_seq_len = config.max_position_embeddings
+
+ self.config = config
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
+ # we cannot use the config here to parameterize because of a factor 2 for the head_dim
+ inv_freq, self.attention_scaling = self.rope_init_fn(
+ device=device, base=config.rope_theta, dim=config.attention_head_dim
+ )
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ self.original_inv_freq = self.inv_freq
+
+ def _dynamic_frequency_update(self, position_ids, device):
+ """
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
+ 1 - growing beyond the cached sequence length (allow scaling)
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
+ """
+ seq_len = torch.max(position_ids) + 1
+ if seq_len > self.max_seq_len_cached: # growth
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
+ self.max_seq_len_cached = seq_len
+
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
+ # This .to() is needed if the model has been moved to a device after being initialized (because
+ # the buffer is automatically moved, but not the original copy)
+ self.original_inv_freq = self.original_inv_freq.to(device)
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
+ self.max_seq_len_cached = self.original_max_seq_len
+
+ @torch.no_grad()
+ def forward(self, x, position_ids):
+ if "dynamic" in self.rope_type:
+ self._dynamic_frequency_update(position_ids, device=x.device)
+
+ # Core RoPE block
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
+ position_ids_expanded = position_ids[:, None, :].float()
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
+ device_type = x.device.type
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
+ with torch.autocast(device_type=device_type, enabled=False):
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos()
+ sin = emb.sin()
+
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
+ cos = cos * self.attention_scaling
+ sin = sin * self.attention_scaling
+
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+def eager_attention_forward(
+ module: nn.Module,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ attention_mask: Optional[torch.Tensor],
+ scaling: float,
+ dropout: float = 0.0,
+ **kwargs,
+):
+ key_states = repeat_kv(key, module.num_key_value_groups)
+ value_states = repeat_kv(value, module.num_key_value_groups)
+
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
+ if attention_mask is not None:
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+
+ return attn_output, attn_weights
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+class Zamba2Attention(nn.Module):
+ """
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
+ and "Generating Long Sequences with Sparse Transformers".
+
+ Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
+ The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
+ The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
+ (see fig. 2 in https://arxiv.org/pdf/2405.16712).
+ Additionally, replaced
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
+
+ Multi-headed attention from 'Attention Is All You Need' paper.
+
+ Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
+ The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
+ The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
+ (see fig. 2 in https://arxiv.org/pdf/2405.16712).
+ Additionally, replaced
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
+ Finally, this attention layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this
+ layer is tied, un-tied adapters (formally the same as LoRA but used in the base model) modules are added to the q, k, v projectors to increase
+ expressivity with a small memory overhead (see Fig. 2 of https://arxiv.org/pdf/2411.15242).
+ """
+
+ def __init__(
+ self,
+ config: Zamba2Config,
+ layer_idx: Optional[int] = None,
+ num_fwd_mem_blocks: int = None,
+ block_id: int = None,
+ ):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+
+ self.attention_hidden_size = config.attention_hidden_size
+ self.head_dim = config.attention_head_dim
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
+ self.max_position_embeddings = config.max_position_embeddings
+ self.scaling = (self.head_dim / 2) ** -0.5
+ self.is_causal = True
+ self.attention_dropout = config.attention_dropout
+
+ self.q_proj = nn.Linear(config.attention_hidden_size, config.num_attention_heads * self.head_dim, bias=False)
+ self.k_proj = nn.Linear(config.attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
+ self.v_proj = nn.Linear(config.attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
+ self.num_fwd_mem_blocks = num_fwd_mem_blocks
+ self.layer_block_map = config.hybrid_layer_ids
+ self.block_id = block_id
+
+ if config.use_shared_attention_adapter:
+ self.linear_q_adapter_list = nn.ModuleList([])
+ self.linear_k_adapter_list = nn.ModuleList([])
+ self.linear_v_adapter_list = nn.ModuleList([])
+
+ for i in range(self.num_fwd_mem_blocks):
+ if i % config.num_mem_blocks == block_id:
+ linear_q_adapter = nn.Sequential(
+ nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False),
+ nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False),
+ )
+ linear_k_adapter = nn.Sequential(
+ nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False),
+ nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False),
+ )
+ linear_v_adapter = nn.Sequential(
+ nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False),
+ nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False),
+ )
+ else:
+ linear_q_adapter = nn.Identity()
+ linear_k_adapter = nn.Identity()
+ linear_v_adapter = nn.Identity()
+ self.linear_q_adapter_list.append(linear_q_adapter)
+ self.linear_k_adapter_list.append(linear_k_adapter)
+ self.linear_v_adapter_list.append(linear_v_adapter)
+
+ self.layer_dic = {value: index for index, value in enumerate(self.layer_block_map)}
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ layer_idx: int,
+ attention_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Zamba2HybridDynamicCache] = None,
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ input_shape = hidden_states.shape[:-1]
+ hidden_shape = (*input_shape, -1, self.head_dim)
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+ if self.config.use_shared_attention_adapter:
+ adapter_layer_idx = self.layer_dic[layer_idx]
+ query_states = query_states + self.linear_q_adapter_list[adapter_layer_idx](hidden_states)
+ key_states = key_states + self.linear_k_adapter_list[adapter_layer_idx](hidden_states)
+ value_states = value_states + self.linear_v_adapter_list[adapter_layer_idx](hidden_states)
+
+ query_states = query_states.view(hidden_shape).transpose(1, 2)
+ key_states = key_states.view(hidden_shape).transpose(1, 2)
+ value_states = value_states.view(hidden_shape).transpose(1, 2)
+
+ if self.config.use_mem_rope:
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ key_states, value_states = past_key_value.update(key_states, value_states, layer_idx)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
+ logger.warning_once(
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ else:
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, attn_weights = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ **kwargs,
+ )
+
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+ return attn_output, attn_weights
+
+
+# Helper methods for segment sum computation
+
+
+def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int):
+ """
+ Padding x tensor with `pad_size` on the seq_len dim (dim=1)
+
+ Assumes that we only have tensors of either size 4 or 3
+ """
+ pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0)
+
+ return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0)
+
+
+def reshape_into_chunks(input_tensor, pad_size, chunk_size):
+ """
+ Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and
+ simultaneously splitting it into chunk sequences.
+
+ Assumes that we only have tensors of either size 4 or 3
+ """
+ # [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...]
+ input_tensor = pad_tensor_by_size(input_tensor, pad_size)
+
+ if len(input_tensor.shape) == 3:
+ # [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads]
+ return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2])
+ else:
+ # [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> [bsz, -1, chunk_size, num_heads, head_dim or state_size]
+ return input_tensor.reshape(
+ input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3]
+ )
+
+
+def segment_sum(input_tensor):
+ """
+ More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions.
+ """
+ chunk_size = input_tensor.size(-1)
+ # 1. expand input tensor to have an additional dimension and repeat along that dimension
+ # [..., chunk_size] -> [..., chunk_size, chunk_size]
+ input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size)
+ # 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag
+ mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1)
+ input_tensor = input_tensor.masked_fill(~mask, 0)
+ # 3. compute actual cumsum
+ tensor_segsum = torch.cumsum(input_tensor, dim=-2)
+
+ # 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time)
+ mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0)
+ tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf)
+ return tensor_segsum
+
+
+is_fast_path_available = all((selective_state_update, causal_conv1d_fn, causal_conv1d_update))
+
+
+class Zamba2MambaMixer(nn.Module):
+ """
+ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
+ A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
+ ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
+ and is why Mamba is called **selective** state spaces)
+ """
+
+ def __init__(self, config: Zamba2Config, layer_idx: int = None):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.ssm_state_size = config.mamba_d_state
+ self.conv_kernel_size = config.mamba_d_conv
+ self.intermediate_size = int(config.mamba_expand * self.hidden_size)
+ self.layer_idx = layer_idx
+ self.use_conv_bias = config.use_conv_bias
+ self.activation = "silu"
+ self.act = nn.SiLU()
+
+ self.n_groups = config.mamba_ngroups
+ self.head_dim = config.mamba_headdim
+ self.num_heads = self.config.n_mamba_heads
+ self.chunk_size = config.chunk_size
+
+ self.time_step_limit = config.time_step_limit
+ self.time_step_min = config.time_step_min
+ self.time_step_max = config.time_step_max
+
+ self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
+ self.conv1d = nn.Conv1d(
+ in_channels=self.conv_dim,
+ out_channels=self.conv_dim,
+ bias=True,
+ kernel_size=config.mamba_d_conv,
+ groups=self.conv_dim,
+ padding=config.mamba_d_conv - 1,
+ )
+
+ # projection of the input hidden states
+ projection_size = self.intermediate_size + self.conv_dim + self.num_heads
+ self.in_proj = nn.Linear(
+ self.hidden_size,
+ projection_size,
+ bias=config.add_bias_linear,
+ )
+ # selective projection used to make dt, B and C input dependant
+
+ # time step projection (discretization)
+ # instantiate once and copy inv_dt in init_weights of PretrainedModel
+ self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
+
+ # S4D real initialization. These are not discretized!
+ # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
+ A = torch.arange(1, self.num_heads + 1)
+ self.A_log = nn.Parameter(torch.log(A))
+ self.A_log._no_weight_decay = True
+ self.norm = Zamba2RMSNormGated(self.intermediate_size, eps=1e-5)
+ self.D = nn.Parameter(torch.ones(self.num_heads))
+ self.D._no_weight_decay = True
+
+ self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear)
+
+ if not is_fast_path_available:
+ logger.warning_once(
+ "The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
+ " is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
+ " https://github.com/Dao-AILab/causal-conv1d"
+ )
+
+ def cuda_kernels_forward(
+ self,
+ hidden_states: torch.Tensor,
+ cache_params: Optional[Zamba2HybridDynamicCache] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ ):
+ # set up dimensions for reshapes later
+
+ batch_size, seq_len, _ = hidden_states.shape
+ groups_time_state_size = self.n_groups * self.ssm_state_size
+ d_to_remove = 2 * self.intermediate_size + 2 * self.n_groups * self.ssm_state_size + self.num_heads
+
+ # getting projected states from cache if it exists
+ if cache_params is not None and cache_params.has_previous_state:
+ in_projected_states = self.in_proj(hidden_states.squeeze(1)) # (B 2D)
+ d_mlp = (in_projected_states.shape[-1] - d_to_remove) // 2
+ split_projection_dim = [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads]
+ _, _, gate, hidden_states_B_C, dt = torch.split(in_projected_states, split_projection_dim, dim=-1)
+
+ hidden_states_B_C = causal_conv1d_update(
+ hidden_states_B_C,
+ cache_params.conv_states[self.layer_idx],
+ self.conv1d.weight.squeeze(1),
+ self.conv1d.bias,
+ self.activation,
+ )
+
+ hidden_states, B, C = torch.split(
+ hidden_states_B_C,
+ [self.intermediate_size, groups_time_state_size, groups_time_state_size],
+ dim=-1,
+ )
+ A = -torch.exp(self.A_log.float()) # (nheads,)
+
+ A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
+ dt = dt[:, :, None].expand(-1, -1, self.head_dim)
+ dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
+ D = self.D[:, None, ...].expand(-1, self.head_dim)
+ B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
+ C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
+ hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
+ hidden_states = selective_state_update(
+ cache_params.ssm_states[self.layer_idx],
+ hidden_states_reshaped,
+ dt,
+ A,
+ B,
+ C,
+ D,
+ z=None,
+ dt_bias=dt_bias,
+ dt_softplus=True,
+ )
+ hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
+ hidden_states = self.norm(hidden_states, gate)
+ out = self.out_proj(hidden_states)[:, None, ...]
+ # if no cache is found, calling the kernel
+ else:
+ if attention_mask is not None and not torch.all(attention_mask == 1):
+ # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
+ dtype = hidden_states.dtype
+ hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
+ # 1. Gated MLP's linear projection
+ projected_states = self.in_proj(hidden_states)
+ A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
+ dt_limit_kwargs = {} if self.time_step_limit is None else {"dt_limit": self.time_step_limit}
+ if attention_mask is not None:
+ input_not_masked = torch.all(attention_mask == 1)
+ else:
+ input_not_masked = True
+
+ if self.training and cache_params is None and input_not_masked:
+ out, ssm_state = mamba_split_conv1d_scan_combined(
+ projected_states,
+ self.conv1d.weight.squeeze(1),
+ self.conv1d.bias,
+ self.dt_bias,
+ A,
+ D=self.D,
+ chunk_size=self.chunk_size,
+ seq_idx=None,
+ activation=self.activation,
+ rmsnorm_weight=self.norm.weight,
+ rmsnorm_eps=self.norm.variance_epsilon,
+ outproj_weight=self.out_proj.weight,
+ outproj_bias=self.out_proj.bias,
+ headdim=self.head_dim,
+ ngroups=self.n_groups,
+ norm_before_gate=False,
+ return_final_states=True,
+ **dt_limit_kwargs,
+ )
+
+ else:
+ gate, hidden_states_B_C, time_step = torch.split(
+ projected_states,
+ [self.intermediate_size, self.conv_dim, self.num_heads],
+ dim=-1,
+ )
+
+ # 1D Convolution
+ if cache_params is not None:
+ hidden_states_B_C_t = hidden_states_B_C.transpose(1, 2)
+ conv_state = nn.functional.pad(
+ hidden_states_B_C_t, (self.conv_kernel_size - hidden_states_B_C_t.shape[-1], 0)
+ )
+ cache_params.conv_states[self.layer_idx].copy_(conv_state)
+ if causal_conv1d_fn is None or self.activation not in ["silu", "swish"]:
+ hidden_states_B_C = self.act(
+ self.conv1d(hidden_states_B_C.transpose(1, 2)).transpose(1, 2)[:, :seq_len]
+ ) # (B, L, self.d_inner + 2 * ngroups * d_state)
+ else:
+ hidden_states_B_C = causal_conv1d_fn(
+ x=hidden_states_B_C.transpose(1, 2),
+ weight=self.conv1d.weight.squeeze(1),
+ bias=self.conv1d.bias,
+ activation=self.activation,
+ ).transpose(1, 2)[:, :seq_len]
+ hidden_states, B, C = torch.split(
+ hidden_states_B_C,
+ [self.intermediate_size, groups_time_state_size, groups_time_state_size],
+ dim=-1,
+ )
+ if attention_mask is not None and not torch.all(attention_mask == 1):
+ # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
+ dtype = hidden_states.dtype
+ hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
+ scan_output, ssm_state = mamba_chunk_scan_combined(
+ hidden_states.view(batch_size, seq_len, -1, self.head_dim),
+ time_step,
+ A,
+ B.view(batch_size, seq_len, self.n_groups, -1),
+ C.view(batch_size, seq_len, self.n_groups, -1),
+ chunk_size=self.chunk_size,
+ D=self.D,
+ z=None,
+ seq_idx=None,
+ return_final_states=True,
+ dt_bias=self.dt_bias,
+ dt_softplus=True,
+ **dt_limit_kwargs,
+ )
+ if ssm_state is not None and cache_params is not None:
+ cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
+ scan_output = scan_output.view(batch_size, seq_len, -1)
+ # Multiply "gate" branch and apply extra normalization layer
+ scan_output = self.norm(scan_output, gate)
+ out = self.out_proj(scan_output)
+ return out
+
+ # fmt: off
+ def torch_forward(self, input_states, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
+ batch_size, seq_len, _ = input_states.shape
+ dtype = input_states.dtype
+ # Gated MLP's linear projection
+ if cache_params is not None and cache_params.has_previous_state:
+ projected_states = self.in_proj(input_states.squeeze(1))
+ else:
+ if attention_mask is not None and not torch.all(attention_mask==1):
+ # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
+ input_states = (input_states * attention_mask[:, :, None]).to(dtype)
+ projected_states = self.in_proj(input_states)
+ d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size- self.num_heads) // 2
+ _, _, gate, hidden_states, dt = projected_states.split(
+ [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
+ )
+
+ # Convolution sequence transformation
+ if cache_params is not None:
+ ssm_state = cache_params.ssm_states[self.layer_idx].clone()
+ ssm_state = ssm_state.to(hidden_states.device)
+ if cache_params.has_previous_state:
+ gate = gate.unsqueeze(1)
+ conv_state = cache_params.conv_states[self.layer_idx] # [batch, intermediate_size, conv_kernel_size]
+ conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
+ # handle batched generation - states are copied through
+ conv_state[:, :, -1] = hidden_states[:, 0, :] if hidden_states.ndim == 3 else hidden_states
+ cache_params.conv_states[self.layer_idx].copy_(conv_state)
+ hidden_states = torch.sum(conv_state.to(projected_states.device) * self.conv1d.weight[:, 0, :], dim=-1)
+ if self.use_conv_bias:
+ hidden_states += self.conv1d.bias
+ hidden_states = self.act(hidden_states).to(dtype)[:, None, ...] # [batch, 1, intermediate_size] : decoding
+ else:
+ hidden_states = hidden_states.transpose(1,2)
+ conv_state = nn.functional.pad(
+ hidden_states,
+ (self.conv_kernel_size - hidden_states.shape[-1], 0)
+ )
+ cache_params.conv_states[self.layer_idx].copy_(conv_state)
+ hidden_states = self.act(self.conv1d(hidden_states).transpose(1,2))[:, :seq_len, :] # [batch, intermediate_size, seq_len]
+ if attention_mask is not None and not torch.all(attention_mask==1):
+ dtype = hidden_states.dtype
+ # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
+ hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
+ else:
+ ssm_state = torch.zeros(
+ (batch_size, self.num_heads, self.head_dim, self.ssm_state_size),
+ device=hidden_states.device, dtype=dtype
+ )
+ hidden_states = self.act(self.conv1d(hidden_states.transpose(1, 2))[..., :seq_len].transpose(1, 2))
+ hidden_states, B, C = torch.split(hidden_states, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1)
+ A = -torch.exp(self.A_log.float()) # [num_heads]
+ if cache_params is not None and cache_params.has_previous_state:
+ # Note: there is no need to pad parameter matrices here, as there is just one new token
+ # for batched generation
+ dt = dt[:, None, ...] if dt.ndim == 2 else dt[:, 0, :][:, None, ...]
+ dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
+ # [num_heads] -> [num_heads, head_dim]
+ dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
+
+ dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
+ dt = torch.clamp(dt, self.time_step_min) #, self.time_step_max)
+ A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
+ # [bsz, num_heads, head_dim, state_size]
+ dA = torch.exp(dt[..., None] * A)
+
+ # Discretize B
+ # [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
+ # -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
+ B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
+ B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
+ B = B.reshape(batch_size, -1, B.shape[-1])
+ # [bsz, num_heads, head_dim, state_size]
+ dB = dt[..., None] * B[..., None, :]
+
+ # Discretize x into dB
+ # [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
+ hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
+ dBx = dB * hidden_states[..., None]
+
+ # State calculation
+ cache_params.ssm_states[self.layer_idx].copy_(
+ cache_params.ssm_states[self.layer_idx] * dA + dBx
+ )
+
+ # Subsequent output
+ # [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
+ C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
+ C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
+ C = C.reshape(batch_size, -1, C.shape[-1])
+ # [bsz, num_heads, head_dim]
+
+ ssm_states = cache_params.ssm_states[self.layer_idx].to(C.dtype) # Shape: [b, h, d, n]
+ # Reshape ssm_states to merge the first two dimensions
+ ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n]
+ C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
+ y = torch.bmm(ssm_states_reshaped, C_reshaped)
+ y = y.view(batch_size, self.num_heads, self.head_dim)
+
+ # D skip connection
+ # [num_heads] -> [num_heads, head_dim]
+ D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
+ y = (y + hidden_states * D).to(y.dtype)
+
+ # [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
+ y = y.reshape(batch_size, -1)[:, None, ...]
+ else:
+ # begin ssd naive implementation without einsums
+ dt = nn.functional.softplus(dt + self.dt_bias)
+ dt = torch.clamp(dt, self.time_step_min)
+ hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
+ B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
+ C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
+ B = B.repeat(1, 1, self.num_heads // self.n_groups, 1)
+ C = C.repeat(1, 1, self.num_heads // self.n_groups, 1)
+ pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
+
+ D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
+
+ # Discretize x and A
+ hidden_states = hidden_states * dt[..., None]
+ A = A.to(hidden_states.dtype) * dt
+
+ # Rearrange into blocks/chunks
+ hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
+
+
+ # [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
+ A = A.permute(0, 3, 1, 2)
+ A_cumsum = torch.cumsum(A, dim=-1)
+
+ # 1. Compute the output for each intra-chunk (diagonal blocks)
+ # This is the analog of a causal mask
+ L = torch.exp(segment_sum(A))
+
+ # First, contraction of C and B to get G (attention-weights like)
+ G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, : ,:] # shape: (b, c, l, s, h, n)
+ G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
+
+
+ # Step 2: Compute M, equivalent to applying attention mask to weights
+ M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
+ M = M_intermediate.sum(dim=-1)
+
+ # Step 3: Compute Y_diag (apply to values)
+ Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(3)
+
+ # (right term of low-rank factorization of off-diagonal blocks; B terms)
+
+ decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum))
+ B_decay_contraction = B * decay_states.permute(0, 2, 3, 1)[..., None]
+ # permute back B * decay states
+ states = (B_decay_contraction.permute(0, 1, 3, 2, 4)[..., None] * hidden_states.permute(0, 1, 3, 2, 4)[..., None, :]).sum(dim=3).permute(0, 1, 2, 4, 3)
+ if cache_params is not None and cache_params.has_previous_state:
+ previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...]
+ else:
+ previous_states = torch.zeros_like(states[:, :1])
+ states = torch.cat([previous_states, states], dim=1)
+ decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
+
+ states_permuted = states.permute(0, 2, 1, 3, 4)
+ result = (decay_chunk[..., None, None] * states_permuted[:, :, None, ...]).sum(dim=2)
+ new_states = result.permute(0, 2, 1, 3, 4)
+ states, ssm_state = new_states[:, :-1], new_states[:, -1]
+
+ # Compute state -> output conversion per chunk
+ # (left term of low-rank factorization of off-diagonal blocks; C terms)
+ state_decay_out = torch.exp(A_cumsum)
+ # compute Yoff
+ C_times_states = (C[..., None, :] * states[:, :, None, ...])
+ state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
+ Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None])
+ # Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks)
+
+ y = Y_diag + Y_off
+ # [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim]
+ y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
+
+ y = y + D_residual
+ # Cutting off padded chunks
+ if pad_size > 0:
+ y = y[:, :seq_len, :, :]
+ y = y.reshape(batch_size, seq_len, -1)
+ if ssm_state is not None and cache_params is not None:
+ cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
+
+ scan_output = self.norm(y, gate)
+
+ # end ssd naive
+
+ # 4. Final linear projection
+ contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size]
+ return contextualized_states
+ # fmt: on
+
+ def forward(
+ self,
+ hidden_states,
+ cache_params: Optional[Zamba2HybridDynamicCache] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ ):
+ if is_fast_path_available and "cuda" in self.in_proj.weight.device.type:
+ return self.cuda_kernels_forward(hidden_states, cache_params, attention_mask)
+
+ return self.torch_forward(hidden_states, cache_params, attention_mask)
+
+
+class Zamba2MLP(nn.Module):
+ def __init__(self, config: Zamba2Config, num_fwd_mem_blocks=None, block_id: int = None):
+ """
+ This MLP layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this layer
+ is tied, un-tied adapter modules (formally same as LoRA, but used in the base model) are added to the up and gate projectors to increase expressivity with a small memory overhead.
+ """
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = config.intermediate_size
+ self.num_fwd_mem_blocks = num_fwd_mem_blocks
+ self.block_id = block_id
+
+ self.gate_up_proj = nn.Linear(self.hidden_size, 2 * self.intermediate_size, bias=config.add_bias_linear)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ self.gate_up_proj_adapter_list = nn.ModuleList([])
+ for i in range(self.num_fwd_mem_blocks):
+ if i % config.num_mem_blocks == block_id:
+ gate_up_proj_adapter = nn.Sequential(
+ nn.Linear(self.config.hidden_size, self.config.adapter_rank, bias=False),
+ nn.Linear(self.config.adapter_rank, 2 * self.intermediate_size, bias=False),
+ )
+ else:
+ gate_up_proj_adapter = nn.Identity()
+ self.gate_up_proj_adapter_list.append(gate_up_proj_adapter)
+
+ layer_block_map = config.hybrid_layer_ids
+ self.layer_dic = {value: index for index, value in enumerate(layer_block_map)}
+
+ def forward(self, hidden_state, layer_idx=None):
+ gate_up_state = self.gate_up_proj(hidden_state)
+ layer_idx = self.layer_dic[layer_idx]
+ gate_up_state = gate_up_state + self.gate_up_proj_adapter_list[layer_idx](hidden_state)
+
+ gate_up_state = torch.chunk(gate_up_state, 2, dim=-1)
+ hidden_state = self.act_fn(gate_up_state[0]) * gate_up_state[1]
+ output = self.down_proj(hidden_state)
+ return output
+
+
+class Zamba2AttentionDecoderLayer(nn.Module):
+ def __init__(self, config: Zamba2Config, block_id: int = None, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.block_id = block_id
+ num_gs = len(config.hybrid_layer_ids)
+ self.self_attn = Zamba2Attention(config, layer_idx=-1, num_fwd_mem_blocks=num_gs, block_id=block_id)
+ self.feed_forward = Zamba2MLP(config, num_fwd_mem_blocks=num_gs, block_id=block_id)
+ self.input_layernorm = Zamba2RMSNorm(config.attention_hidden_size, eps=config.rms_norm_eps)
+ self.pre_ff_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ original_hidden_states: torch.Tensor,
+ layer_idx: int,
+ attention_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Zamba2HybridDynamicCache] = None,
+ output_attentions: Optional[bool] = False,
+ position_embeddings: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)`
+ original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`.
+ This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The
+ concatenated tensor is then used as input of the pre-attention RMSNorm
+ (see fig. 2 in https://arxiv.org/pdf/2405.16712).
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, sequence_length)` where padding elements are indicated by 0.
+ past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
+ with `head_dim` being the embedding dimension of each attention head.
+ """
+ hidden_states = torch.concatenate([hidden_states, original_hidden_states], dim=-1)
+ hidden_states = self.input_layernorm(hidden_states)
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ layer_idx=layer_idx,
+ attention_mask=attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ position_embeddings=position_embeddings,
+ **kwargs,
+ )
+
+ hidden_states = self.pre_ff_layernorm(hidden_states)
+ hidden_states = self.feed_forward(hidden_states, layer_idx)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ return outputs
+
+
+class Zamba2MambaDecoderLayer(nn.Module):
+ def __init__(self, config: Zamba2Config, layer_idx: int):
+ super().__init__()
+ self.mamba = Zamba2MambaMixer(config=config, layer_idx=layer_idx)
+ self.input_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.layer_idx = layer_idx
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ original_hidden_states: Optional[torch.Tensor] = None,
+ layer_idx: int = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Zamba2HybridDynamicCache] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ transformer_hidden_states: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, sequence_length)` where padding elements are indicated by 0.
+ past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ """
+
+ residual = hidden_states
+
+ # `transformer_hidden_states` is the output from shared transformer + linear layer (see fig. 2 in https://arxiv.org/pdf/2405.16712).
+ # `transformer_hidden_states` is then added to the input to the mamba layer below (as described in eq. (6) of https://arxiv.org/pdf/2405.16712).
+ hidden_states = (
+ hidden_states + transformer_hidden_states if transformer_hidden_states is not None else hidden_states
+ )
+ hidden_states = self.input_layernorm(hidden_states)
+
+ hidden_states = self.mamba(
+ hidden_states=hidden_states,
+ cache_params=past_key_value,
+ attention_mask=attention_mask,
+ )
+
+ self_attn_weights = None
+
+ # residual connection after mamba
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (past_key_value,)
+
+ return outputs
+
+
+class Zamba2HybridLayer(nn.Module):
+ def __init__(
+ self, shared_transformer: Zamba2AttentionDecoderLayer, linear: nn.Linear, mamba: Zamba2MambaDecoderLayer
+ ):
+ super().__init__()
+ self.linear = linear
+ self.mamba_decoder = mamba
+ self.shared_transformer = shared_transformer
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ original_hidden_states: Optional[torch.Tensor] = None,
+ layer_idx: int = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Zamba2HybridDynamicCache] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ position_embeddings: Optional[torch.LongTensor] = None,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with
+ hidden activations to form the input of the shared transformer layer.
+ layer_idx (`int`): layer number.
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, sequence_length)` where padding elements are indicated by 0.
+ past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
+ with `head_dim` being the embedding dimension of each attention head.
+ """
+
+ layer_outputs = self.shared_transformer(
+ hidden_states,
+ original_hidden_states=original_hidden_states,
+ layer_idx=layer_idx,
+ attention_mask=causal_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ position_embeddings=position_embeddings,
+ )
+
+ transformer_hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ self_attn_weights = layer_outputs[1]
+
+ transformer_hidden_states = self.linear(transformer_hidden_states)
+
+ layer_outputs = self.mamba_decoder(
+ hidden_states,
+ transformer_hidden_states=transformer_hidden_states,
+ attention_mask=attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ position_embeddings=position_embeddings,
+ )
+
+ if output_attentions:
+ layer_outputs = (layer_outputs[0], self_attn_weights) + layer_outputs[2:]
+
+ return layer_outputs
+
+
+class Zamba2PreTrainedModel(PreTrainedModel):
+ config_class = Zamba2Config
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["Zamba2AttentionDecoderLayer", "Zamba2MambaDecoderLayer"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_flash_attn_2 = True
+ _supports_flex_attn = True
+ _supports_sdpa = False
+ _supports_cache_class = True # Note: only supports Zamba2HybridDynamicCache
+ _is_stateful = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, Zamba2MambaMixer):
+ module.A_log._no_weight_decay = True
+ module.D._no_weight_decay = True
+
+ dt = torch.exp(
+ torch.rand(self.config.n_mamba_heads)
+ * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
+ + math.log(self.config.time_step_min)
+ ).clamp(min=self.config.time_step_floor)
+ # # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
+ inv_dt = dt + torch.log(-torch.expm1(-dt))
+
+ with torch.no_grad():
+ module.dt_bias.copy_(inv_dt)
+ module.dt_bias._no_reinit = True
+
+
+ZAMBA2_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`Zamba2Config`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+ZAMBA2_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`Zamba2HybridDynamicCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ A Zamba2HybridDynamicCache object containing pre-computed hidden-states (keys and values in the
+ self-attention blocks and convolution and ssm states in the mamba blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ Key and value cache tensors have shape `(batch_size, num_heads, seq_len, head_dim)`.
+ Convolution and ssm states tensors have shape `(batch_size, d_inner, d_conv)` and
+ `(batch_size, d_inner, d_state)` respectively.
+ See the `Zamba2HybridDynamicCache` class for more details.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
+ the complete sequence length.
+"""
+
+
+@add_start_docstrings(
+ "The bare Zamba2 Model outputting raw hidden-states without any specific head on top.",
+ ZAMBA2_START_DOCSTRING,
+)
+class Zamba2Model(Zamba2PreTrainedModel):
+ """
+ Model consisting of *config.num_hidden_layers* layers.
+
+ Args:
+ config: Zamba2Config
+ """
+
+ def __init__(self, config: Zamba2Config):
+ super().__init__(config)
+ self.config = config
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ blocks = [Zamba2AttentionDecoderLayer(config, block_id=k) for k in range(config.num_mem_blocks)]
+ mamba_layers = []
+ linear_layers = []
+ self.layers_block_type = config.layers_block_type
+ for i in range(config.num_hidden_layers):
+ if config.layers_block_type[i] == "mamba":
+ mamba_layers.append(Zamba2MambaDecoderLayer(config, layer_idx=i))
+ elif config.layers_block_type[i] == "hybrid":
+ linear_layers.append(nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=False))
+ mamba_layers.append(Zamba2MambaDecoderLayer(config, layer_idx=i))
+ mamba_layers = iter(mamba_layers)
+ linear_layers = iter(linear_layers)
+ blocks = cycle(blocks)
+ layers = self.get_layers(blocks, linear_layers, mamba_layers)
+ self.layers = nn.ModuleList(layers)
+
+ self._attn_implementation = config._attn_implementation
+ self.final_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ if config.use_mem_rope:
+ if config.use_long_context:
+ logger.warning_once(
+ "`use_long_context` set to `True`: using rescaled `rope_theta` and extended `max_position_embeddings`."
+ )
+ self.rotary_emb = Zamba2RotaryEmbedding(config)
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(ZAMBA2_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Zamba2HybridDynamicCache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError(
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
+ )
+
+ if self.gradient_checkpointing and self.training and use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
+ )
+ use_cache = False
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ hidden_states = inputs_embeds
+
+ original_hidden_states = torch.clone(inputs_embeds)
+ # original_hidden_states: word embedding output that will be concatenated with hidden activations to form the input of the shared transformer layer
+
+ if use_cache and past_key_values is None:
+ batch_size = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
+ past_key_values = Zamba2HybridDynamicCache(self.config, batch_size, dtype=self.dtype, device=self.device)
+
+ if cache_position is None:
+ past_seen_tokens = (
+ past_key_values.get_seq_length(layer_idx=self.first_transformer_layer_id)
+ if past_key_values is not None
+ else 0
+ )
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
+
+ # create position embeddings to be shared across the decoder layers
+ if self.config.use_mem_rope:
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+ else:
+ position_embeddings = None
+
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for layer_idx, layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer.__call__,
+ hidden_states,
+ original_hidden_states,
+ layer_idx,
+ attention_mask,
+ causal_mask,
+ past_key_values,
+ output_attentions,
+ use_cache,
+ position_embeddings,
+ )
+ else:
+ layer_outputs = layer(
+ hidden_states,
+ original_hidden_states=original_hidden_states,
+ layer_idx=layer_idx,
+ attention_mask=attention_mask,
+ causal_mask=causal_mask,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ position_embeddings=position_embeddings,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ if layer_outputs[1] is not None:
+ # append attentions only of attention layers. Mamba layers return `None` as the attention weights
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.final_layernorm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if past_key_values and not past_key_values.has_previous_state:
+ past_key_values.has_previous_state = True
+
+ output = BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=past_key_values if use_cache else None,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+ return output if return_dict else output.to_tuple()
+
+ def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
+ if self.config._attn_implementation == "flash_attention_2":
+ if attention_mask is not None and 0.0 in attention_mask:
+ return attention_mask
+ return None
+
+ dtype, device = input_tensor.dtype, input_tensor.device
+ min_dtype = torch.finfo(dtype).min
+ sequence_length = input_tensor.shape[1]
+ target_length = cache_position[-1] + 1
+
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
+ if sequence_length != 1:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ if attention_mask.dim() == 2:
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
+
+ if (
+ self.config._attn_implementation == "sdpa"
+ and attention_mask is not None
+ and attention_mask.device.type == "cuda"
+ ):
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
+ # Details: https://github.com/pytorch/pytorch/issues/110213
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
+
+ return causal_mask
+
+ def get_layers(self, blocks, linear_layers, mamba_layers):
+ layers = []
+ self._tied_weights_keys = []
+ self.first_transformer_layer_id = 0
+ for layer_id, layer_type in enumerate(self.layers_block_type):
+ if layer_type == "hybrid":
+ if self.first_transformer_layer_id == 0:
+ self.first_transformer_layer_id = layer_id
+ block = next(blocks)
+ if self.config.num_mem_blocks * len(self.config.hybrid_layer_ids) > 1:
+ prefix_pattern = rf"^layers\.{layer_id}\.shared_transformer\."
+ main_keys_pattern = re.compile(
+ prefix_pattern
+ + r"(?:"
+ + r"self_attn\.(?:q_proj|k_proj|v_proj|o_proj)\.weight|"
+ + r"feed_forward\.(?:gate_up_proj|down_proj)\.weight|"
+ + r"(?:input_layernorm|pre_ff_layernorm)\.weight"
+ + r")$"
+ )
+ self._tied_weights_keys.append(main_keys_pattern)
+
+ adapter_id = 0
+ for _layer_type in self.layers_block_type:
+ if _layer_type == "hybrid" and adapter_id % self.config.num_mem_blocks == block.block_id:
+ adapter_pattern = re.compile(
+ r"^shared_transformer\.feed_forward\.gate_up_proj_adapter_list\."
+ + str(adapter_id)
+ + r"\.(?:0|1)\.weight$"
+ )
+ self._tied_weights_keys.append(adapter_pattern)
+ adapter_id += 1
+ if self.config.use_shared_attention_adapter:
+ adapter_id = 0
+ for _layer_type in self.layers_block_type:
+ if _layer_type == "hybrid" and adapter_id % self.config.num_mem_blocks == block.block_id:
+ attn_adapter_pattern = re.compile(
+ r"^shared_transformer\.self_attn\."
+ + r"(?:linear_q_adapter_list|linear_k_adapter_list|linear_v_adapter_list)\."
+ + str(adapter_id)
+ + r"\.(?:0|1)\.weight$"
+ )
+ self._tied_weights_keys.append(attn_adapter_pattern)
+ adapter_id += 1
+ layers.append(Zamba2HybridLayer(block, next(linear_layers), next(mamba_layers)))
+ else:
+ layers.append(next(mamba_layers))
+ return layers
+
+
+# Adapted from transformers.models.jamba.modeling_jamba.JambaForCausalLM with Jamba->Zamba2, JAMBA->ZAMBA2
+class Zamba2ForCausalLM(Zamba2PreTrainedModel, GenerationMixin):
+ def __init__(self, config: Zamba2Config):
+ super().__init__(config)
+ self.model = Zamba2Model(config)
+ self._tied_weights_keys = ["lm_head.weight", *self.model._tied_weights_keys]
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
+ @add_start_docstrings_to_model_forward(ZAMBA2_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Zamba2HybridDynamicCache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ logits_to_keep: Union[int, torch.Tensor] = 0,
+ **loss_kwargs,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ logits_to_keep (`int` or `torch.Tensor`, *optional*):
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, Zamba2ForCausalLM
+
+ >>> model = Zamba2ForCausalLM.from_pretrained("Zyphra/Zamba2-7B-v1")
+ >>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-7B-v1")
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ cache_position=cache_position,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ cache_position=None,
+ position_ids=None,
+ use_cache=True,
+ **kwargs,
+ ):
+ # Overwitten -- has a unique cache type, `Zamba2HybridDynamicCache`
+
+ empty_past_kv = past_key_values is None
+
+ # Omit tokens covered by past_key_values
+ if not empty_past_kv:
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
+ if inputs_embeds is not None: # Exception 1
+ input_ids = input_ids[:, -cache_position.shape[0] :]
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
+ input_ids = input_ids[:, cache_position]
+ else:
+ past_key_values = Zamba2HybridDynamicCache(
+ self.config, input_ids.shape[0], dtype=self.dtype, device=self.device
+ )
+
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if not empty_past_kv:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and empty_past_kv:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ "attention_mask": attention_mask,
+ "logits_to_keep": self.config.num_logits_to_keep,
+ "cache_position": cache_position,
+ }
+ )
+ return model_inputs
+
+
+@add_start_docstrings(
+ """
+ The Zamba2 Model with a sequence classification head on top (linear layer).
+
+ [`Zamba2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ ZAMBA2_START_DOCSTRING,
+)
+class Zamba2ForSequenceClassification(Zamba2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = Zamba2Model(config)
+ self._tied_weights_keys = self.model._tied_weights_keys
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(ZAMBA2_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+__all__ = ["Zamba2ForCausalLM", "Zamba2ForSequenceClassification", "Zamba2Model", "Zamba2PreTrainedModel"]
diff --git a/src/transformers/models/zamba2/modular_zamba2.py b/src/transformers/models/zamba2/modular_zamba2.py
new file mode 100644
index 000000000000..dd62d48ac41d
--- /dev/null
+++ b/src/transformers/models/zamba2/modular_zamba2.py
@@ -0,0 +1,1156 @@
+# coding=utf-8
+# Copyright 2024 Zyphra Technologies and the HuggingFace Inc. team. All rights reserved.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import math
+import re
+from itertools import cycle
+from typing import Callable, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_flash_attention_utils import FlashAttentionKwargs
+from ...modeling_outputs import BaseModelOutputWithPast
+from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
+from ...processing_utils import Unpack
+from ...utils import (
+ logging,
+)
+from ...utils.import_utils import (
+ is_causal_conv1d_available,
+ is_mamba_ssm_available,
+)
+from ..llama.modeling_llama import LlamaRotaryEmbedding, apply_rotary_pos_emb
+from ..mamba2.modeling_mamba2 import MambaRMSNormGated, pad_tensor_by_size, reshape_into_chunks, segment_sum
+from ..zamba.modeling_zamba import (
+ ZambaAttention,
+ ZambaAttentionDecoderLayer,
+ ZambaForCausalLM,
+ ZambaForSequenceClassification,
+ ZambaHybridDynamicCache,
+ ZambaHybridLayer,
+ ZambaMambaDecoderLayer,
+ ZambaModel,
+ ZambaRMSNorm,
+ eager_attention_forward,
+)
+from .configuration_zamba2 import Zamba2Config
+
+
+if is_mamba_ssm_available():
+ from mamba_ssm.ops.triton.selective_state_update import selective_state_update
+ from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined
+else:
+ selective_state_update, mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined = None, None, None
+
+if is_causal_conv1d_available():
+ from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
+else:
+ causal_conv1d_update, causal_conv1d_fn = None, None
+
+is_fast_path_available = all((selective_state_update, causal_conv1d_fn, causal_conv1d_update))
+
+
+_CONFIG_FOR_DOC = "Zyphra/Zamba2-2.7B"
+
+logger = logging.get_logger(__name__)
+
+
+class Zamba2RMSNormGated(MambaRMSNormGated):
+ pass
+
+
+class Zamba2RMSNorm(ZambaRMSNorm):
+ pass
+
+
+class Zamba2HybridDynamicCache(ZambaHybridDynamicCache):
+ """
+ A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
+ (which has a constant shape regardless of seq_len).
+
+ This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
+ and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
+ For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
+ while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
+ For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
+ while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
+ and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
+ """
+
+ def __init__(
+ self, config: Zamba2Config, batch_size: int, dtype: torch.dtype = torch.float16, device: Optional[str] = None
+ ):
+ self.dtype = dtype
+ self.layers_block_type = config.layers_block_type
+ self.has_previous_state = False
+ self.intermediate_size = int(config.mamba_expand * config.hidden_size)
+ self.ssm_state_size = config.mamba_d_state
+ self.conv_kernel_size = config.mamba_d_conv
+ self.n_mamba_heads = config.n_mamba_heads
+ self.transformer_layers = []
+ self._modules = {}
+ self._parameters = {}
+ self._buffers = {}
+ self.conv_states = {}
+ self.ssm_states = {}
+ for i in range(config.num_hidden_layers):
+ self.conv_states[i] = torch.zeros(
+ batch_size,
+ self.intermediate_size + 2 * config.mamba_ngroups * config.mamba_d_state,
+ self.conv_kernel_size,
+ device=device,
+ dtype=dtype,
+ )
+ self.ssm_states[i] = torch.zeros(
+ batch_size, self.n_mamba_heads, config.mamba_headdim, self.ssm_state_size, device=device, dtype=dtype
+ )
+ if self.layers_block_type[i] == "hybrid":
+ self.transformer_layers.append(i)
+ self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
+ self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
+
+ def update_conv_state(
+ self, layer_idx: int, new_conv_state: torch.Tensor, cache_position: torch.LongTensor
+ ) -> torch.Tensor:
+ conv_state = self.conv_states[layer_idx]
+ cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
+
+ conv_state = conv_state.roll(shifts=-1, dims=-1)
+ conv_state[:, :, cache_position] = new_conv_state.to(conv_state.device)
+ self.conv_states[layer_idx].zero_()
+ self.conv_states[layer_idx] += conv_state
+ return self.conv_states[layer_idx]
+
+ def reset(self):
+ self.conv_states.zero_()
+ self.ssm_states.zero_()
+
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
+ # take any layer that contains cache and not empty tensor
+ layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx
+ if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0:
+ return 0
+ return self.key_cache[layer_idx].shape[-2]
+
+
+class Zamba2RotaryEmbedding(LlamaRotaryEmbedding):
+ def __init__(
+ self,
+ config: Zamba2Config,
+ device=None,
+ ):
+ super().__init__(config, device)
+ # we cannot use the config here to parameterize because of a factor 2 for the head_dim
+ inv_freq, self.attention_scaling = self.rope_init_fn(
+ device=device, base=config.rope_theta, dim=config.attention_head_dim
+ )
+
+
+class Zamba2Attention(ZambaAttention):
+ """
+ Multi-headed attention from 'Attention Is All You Need' paper.
+
+ Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
+ The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
+ The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
+ (see fig. 2 in https://arxiv.org/pdf/2405.16712).
+ Additionally, replaced
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
+ Finally, this attention layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this
+ layer is tied, un-tied adapters (formally the same as LoRA but used in the base model) modules are added to the q, k, v projectors to increase
+ expressivity with a small memory overhead (see Fig. 2 of https://arxiv.org/pdf/2411.15242).
+ """
+
+ def __init__(
+ self,
+ config: Zamba2Config,
+ layer_idx: Optional[int] = None,
+ num_fwd_mem_blocks: int = None,
+ block_id: int = None,
+ ):
+ super().__init__(config, layer_idx)
+ self.num_fwd_mem_blocks = num_fwd_mem_blocks
+ self.layer_block_map = config.hybrid_layer_ids
+ self.block_id = block_id
+
+ if config.use_shared_attention_adapter:
+ self.linear_q_adapter_list = nn.ModuleList([])
+ self.linear_k_adapter_list = nn.ModuleList([])
+ self.linear_v_adapter_list = nn.ModuleList([])
+
+ for i in range(self.num_fwd_mem_blocks):
+ if i % config.num_mem_blocks == block_id:
+ linear_q_adapter = nn.Sequential(
+ nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False),
+ nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False),
+ )
+ linear_k_adapter = nn.Sequential(
+ nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False),
+ nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False),
+ )
+ linear_v_adapter = nn.Sequential(
+ nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False),
+ nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False),
+ )
+ else:
+ linear_q_adapter = nn.Identity()
+ linear_k_adapter = nn.Identity()
+ linear_v_adapter = nn.Identity()
+ self.linear_q_adapter_list.append(linear_q_adapter)
+ self.linear_k_adapter_list.append(linear_k_adapter)
+ self.linear_v_adapter_list.append(linear_v_adapter)
+
+ self.layer_dic = {value: index for index, value in enumerate(self.layer_block_map)}
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ layer_idx: int,
+ attention_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Zamba2HybridDynamicCache] = None,
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ input_shape = hidden_states.shape[:-1]
+ hidden_shape = (*input_shape, -1, self.head_dim)
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+ if self.config.use_shared_attention_adapter:
+ adapter_layer_idx = self.layer_dic[layer_idx]
+ query_states = query_states + self.linear_q_adapter_list[adapter_layer_idx](hidden_states)
+ key_states = key_states + self.linear_k_adapter_list[adapter_layer_idx](hidden_states)
+ value_states = value_states + self.linear_v_adapter_list[adapter_layer_idx](hidden_states)
+
+ query_states = query_states.view(hidden_shape).transpose(1, 2)
+ key_states = key_states.view(hidden_shape).transpose(1, 2)
+ value_states = value_states.view(hidden_shape).transpose(1, 2)
+
+ if self.config.use_mem_rope:
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ key_states, value_states = past_key_value.update(key_states, value_states, layer_idx)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
+ logger.warning_once(
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ else:
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, attn_weights = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ **kwargs,
+ )
+
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+ return attn_output, attn_weights
+
+
+class Zamba2MambaMixer(nn.Module):
+ """
+ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
+ A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
+ ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
+ and is why Mamba is called **selective** state spaces)
+ """
+
+ def __init__(self, config: Zamba2Config, layer_idx: int = None):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.ssm_state_size = config.mamba_d_state
+ self.conv_kernel_size = config.mamba_d_conv
+ self.intermediate_size = int(config.mamba_expand * self.hidden_size)
+ self.layer_idx = layer_idx
+ self.use_conv_bias = config.use_conv_bias
+ self.activation = "silu"
+ self.act = nn.SiLU()
+
+ self.n_groups = config.mamba_ngroups
+ self.head_dim = config.mamba_headdim
+ self.num_heads = self.config.n_mamba_heads
+ self.chunk_size = config.chunk_size
+
+ self.time_step_limit = config.time_step_limit
+ self.time_step_min = config.time_step_min
+ self.time_step_max = config.time_step_max
+
+ self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
+ self.conv1d = nn.Conv1d(
+ in_channels=self.conv_dim,
+ out_channels=self.conv_dim,
+ bias=True,
+ kernel_size=config.mamba_d_conv,
+ groups=self.conv_dim,
+ padding=config.mamba_d_conv - 1,
+ )
+
+ # projection of the input hidden states
+ projection_size = self.intermediate_size + self.conv_dim + self.num_heads
+ self.in_proj = nn.Linear(
+ self.hidden_size,
+ projection_size,
+ bias=config.add_bias_linear,
+ )
+ # selective projection used to make dt, B and C input dependant
+
+ # time step projection (discretization)
+ # instantiate once and copy inv_dt in init_weights of PretrainedModel
+ self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
+
+ # S4D real initialization. These are not discretized!
+ # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
+ A = torch.arange(1, self.num_heads + 1)
+ self.A_log = nn.Parameter(torch.log(A))
+ self.A_log._no_weight_decay = True
+ self.norm = Zamba2RMSNormGated(self.intermediate_size, eps=1e-5)
+ self.D = nn.Parameter(torch.ones(self.num_heads))
+ self.D._no_weight_decay = True
+
+ self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear)
+
+ if not is_fast_path_available:
+ logger.warning_once(
+ "The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
+ " is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
+ " https://github.com/Dao-AILab/causal-conv1d"
+ )
+
+ def cuda_kernels_forward(
+ self,
+ hidden_states: torch.Tensor,
+ cache_params: Optional[Zamba2HybridDynamicCache] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ ):
+ # set up dimensions for reshapes later
+
+ batch_size, seq_len, _ = hidden_states.shape
+ groups_time_state_size = self.n_groups * self.ssm_state_size
+ d_to_remove = 2 * self.intermediate_size + 2 * self.n_groups * self.ssm_state_size + self.num_heads
+
+ # getting projected states from cache if it exists
+ if cache_params is not None and cache_params.has_previous_state:
+ in_projected_states = self.in_proj(hidden_states.squeeze(1)) # (B 2D)
+ d_mlp = (in_projected_states.shape[-1] - d_to_remove) // 2
+ split_projection_dim = [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads]
+ _, _, gate, hidden_states_B_C, dt = torch.split(in_projected_states, split_projection_dim, dim=-1)
+
+ hidden_states_B_C = causal_conv1d_update(
+ hidden_states_B_C,
+ cache_params.conv_states[self.layer_idx],
+ self.conv1d.weight.squeeze(1),
+ self.conv1d.bias,
+ self.activation,
+ )
+
+ hidden_states, B, C = torch.split(
+ hidden_states_B_C,
+ [self.intermediate_size, groups_time_state_size, groups_time_state_size],
+ dim=-1,
+ )
+ A = -torch.exp(self.A_log.float()) # (nheads,)
+
+ A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
+ dt = dt[:, :, None].expand(-1, -1, self.head_dim)
+ dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
+ D = self.D[:, None, ...].expand(-1, self.head_dim)
+ B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
+ C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
+ hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
+ hidden_states = selective_state_update(
+ cache_params.ssm_states[self.layer_idx],
+ hidden_states_reshaped,
+ dt,
+ A,
+ B,
+ C,
+ D,
+ z=None,
+ dt_bias=dt_bias,
+ dt_softplus=True,
+ )
+ hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
+ hidden_states = self.norm(hidden_states, gate)
+ out = self.out_proj(hidden_states)[:, None, ...]
+ # if no cache is found, calling the kernel
+ else:
+ if attention_mask is not None and not torch.all(attention_mask == 1):
+ # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
+ dtype = hidden_states.dtype
+ hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
+ # 1. Gated MLP's linear projection
+ projected_states = self.in_proj(hidden_states)
+ A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
+ dt_limit_kwargs = {} if self.time_step_limit is None else {"dt_limit": self.time_step_limit}
+ if attention_mask is not None:
+ input_not_masked = torch.all(attention_mask == 1)
+ else:
+ input_not_masked = True
+
+ if self.training and cache_params is None and input_not_masked:
+ out, ssm_state = mamba_split_conv1d_scan_combined(
+ projected_states,
+ self.conv1d.weight.squeeze(1),
+ self.conv1d.bias,
+ self.dt_bias,
+ A,
+ D=self.D,
+ chunk_size=self.chunk_size,
+ seq_idx=None,
+ activation=self.activation,
+ rmsnorm_weight=self.norm.weight,
+ rmsnorm_eps=self.norm.variance_epsilon,
+ outproj_weight=self.out_proj.weight,
+ outproj_bias=self.out_proj.bias,
+ headdim=self.head_dim,
+ ngroups=self.n_groups,
+ norm_before_gate=False,
+ return_final_states=True,
+ **dt_limit_kwargs,
+ )
+
+ else:
+ gate, hidden_states_B_C, time_step = torch.split(
+ projected_states,
+ [self.intermediate_size, self.conv_dim, self.num_heads],
+ dim=-1,
+ )
+
+ # 1D Convolution
+ if cache_params is not None:
+ hidden_states_B_C_t = hidden_states_B_C.transpose(1, 2)
+ conv_state = nn.functional.pad(
+ hidden_states_B_C_t, (self.conv_kernel_size - hidden_states_B_C_t.shape[-1], 0)
+ )
+ cache_params.conv_states[self.layer_idx].copy_(conv_state)
+ if causal_conv1d_fn is None or self.activation not in ["silu", "swish"]:
+ hidden_states_B_C = self.act(
+ self.conv1d(hidden_states_B_C.transpose(1, 2)).transpose(1, 2)[:, :seq_len]
+ ) # (B, L, self.d_inner + 2 * ngroups * d_state)
+ else:
+ hidden_states_B_C = causal_conv1d_fn(
+ x=hidden_states_B_C.transpose(1, 2),
+ weight=self.conv1d.weight.squeeze(1),
+ bias=self.conv1d.bias,
+ activation=self.activation,
+ ).transpose(1, 2)[:, :seq_len]
+ hidden_states, B, C = torch.split(
+ hidden_states_B_C,
+ [self.intermediate_size, groups_time_state_size, groups_time_state_size],
+ dim=-1,
+ )
+ if attention_mask is not None and not torch.all(attention_mask == 1):
+ # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
+ dtype = hidden_states.dtype
+ hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
+ scan_output, ssm_state = mamba_chunk_scan_combined(
+ hidden_states.view(batch_size, seq_len, -1, self.head_dim),
+ time_step,
+ A,
+ B.view(batch_size, seq_len, self.n_groups, -1),
+ C.view(batch_size, seq_len, self.n_groups, -1),
+ chunk_size=self.chunk_size,
+ D=self.D,
+ z=None,
+ seq_idx=None,
+ return_final_states=True,
+ dt_bias=self.dt_bias,
+ dt_softplus=True,
+ **dt_limit_kwargs,
+ )
+ if ssm_state is not None and cache_params is not None:
+ cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
+ scan_output = scan_output.view(batch_size, seq_len, -1)
+ # Multiply "gate" branch and apply extra normalization layer
+ scan_output = self.norm(scan_output, gate)
+ out = self.out_proj(scan_output)
+ return out
+
+ # fmt: off
+ def torch_forward(self, input_states, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
+ batch_size, seq_len, _ = input_states.shape
+ dtype = input_states.dtype
+ # Gated MLP's linear projection
+ if cache_params is not None and cache_params.has_previous_state:
+ projected_states = self.in_proj(input_states.squeeze(1))
+ else:
+ if attention_mask is not None and not torch.all(attention_mask==1):
+ # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
+ input_states = (input_states * attention_mask[:, :, None]).to(dtype)
+ projected_states = self.in_proj(input_states)
+ d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size- self.num_heads) // 2
+ _, _, gate, hidden_states, dt = projected_states.split(
+ [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
+ )
+
+ # Convolution sequence transformation
+ if cache_params is not None:
+ ssm_state = cache_params.ssm_states[self.layer_idx].clone()
+ ssm_state = ssm_state.to(hidden_states.device)
+ if cache_params.has_previous_state:
+ gate = gate.unsqueeze(1)
+ conv_state = cache_params.conv_states[self.layer_idx] # [batch, intermediate_size, conv_kernel_size]
+ conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
+ # handle batched generation - states are copied through
+ conv_state[:, :, -1] = hidden_states[:, 0, :] if hidden_states.ndim == 3 else hidden_states
+ cache_params.conv_states[self.layer_idx].copy_(conv_state)
+ hidden_states = torch.sum(conv_state.to(projected_states.device) * self.conv1d.weight[:, 0, :], dim=-1)
+ if self.use_conv_bias:
+ hidden_states += self.conv1d.bias
+ hidden_states = self.act(hidden_states).to(dtype)[:, None, ...] # [batch, 1, intermediate_size] : decoding
+ else:
+ hidden_states = hidden_states.transpose(1,2)
+ conv_state = nn.functional.pad(
+ hidden_states,
+ (self.conv_kernel_size - hidden_states.shape[-1], 0)
+ )
+ cache_params.conv_states[self.layer_idx].copy_(conv_state)
+ hidden_states = self.act(self.conv1d(hidden_states).transpose(1,2))[:, :seq_len, :] # [batch, intermediate_size, seq_len]
+ if attention_mask is not None and not torch.all(attention_mask==1):
+ dtype = hidden_states.dtype
+ # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
+ hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
+ else:
+ ssm_state = torch.zeros(
+ (batch_size, self.num_heads, self.head_dim, self.ssm_state_size),
+ device=hidden_states.device, dtype=dtype
+ )
+ hidden_states = self.act(self.conv1d(hidden_states.transpose(1, 2))[..., :seq_len].transpose(1, 2))
+ hidden_states, B, C = torch.split(hidden_states, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1)
+ A = -torch.exp(self.A_log.float()) # [num_heads]
+ if cache_params is not None and cache_params.has_previous_state:
+ # Note: there is no need to pad parameter matrices here, as there is just one new token
+ # for batched generation
+ dt = dt[:, None, ...] if dt.ndim == 2 else dt[:, 0, :][:, None, ...]
+ dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
+ # [num_heads] -> [num_heads, head_dim]
+ dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
+
+ dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
+ dt = torch.clamp(dt, self.time_step_min) #, self.time_step_max)
+ A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
+ # [bsz, num_heads, head_dim, state_size]
+ dA = torch.exp(dt[..., None] * A)
+
+ # Discretize B
+ # [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
+ # -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
+ B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
+ B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
+ B = B.reshape(batch_size, -1, B.shape[-1])
+ # [bsz, num_heads, head_dim, state_size]
+ dB = dt[..., None] * B[..., None, :]
+
+ # Discretize x into dB
+ # [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
+ hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
+ dBx = dB * hidden_states[..., None]
+
+ # State calculation
+ cache_params.ssm_states[self.layer_idx].copy_(
+ cache_params.ssm_states[self.layer_idx] * dA + dBx
+ )
+
+ # Subsequent output
+ # [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
+ C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
+ C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
+ C = C.reshape(batch_size, -1, C.shape[-1])
+ # [bsz, num_heads, head_dim]
+
+ ssm_states = cache_params.ssm_states[self.layer_idx].to(C.dtype) # Shape: [b, h, d, n]
+ # Reshape ssm_states to merge the first two dimensions
+ ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n]
+ C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
+ y = torch.bmm(ssm_states_reshaped, C_reshaped)
+ y = y.view(batch_size, self.num_heads, self.head_dim)
+
+ # D skip connection
+ # [num_heads] -> [num_heads, head_dim]
+ D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
+ y = (y + hidden_states * D).to(y.dtype)
+
+ # [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
+ y = y.reshape(batch_size, -1)[:, None, ...]
+ else:
+ # begin ssd naive implementation without einsums
+ dt = nn.functional.softplus(dt + self.dt_bias)
+ dt = torch.clamp(dt, self.time_step_min)
+ hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
+ B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
+ C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
+ B = B.repeat(1, 1, self.num_heads // self.n_groups, 1)
+ C = C.repeat(1, 1, self.num_heads // self.n_groups, 1)
+ pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
+
+ D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
+
+ # Discretize x and A
+ hidden_states = hidden_states * dt[..., None]
+ A = A.to(hidden_states.dtype) * dt
+
+ # Rearrange into blocks/chunks
+ hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
+
+
+ # [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
+ A = A.permute(0, 3, 1, 2)
+ A_cumsum = torch.cumsum(A, dim=-1)
+
+ # 1. Compute the output for each intra-chunk (diagonal blocks)
+ # This is the analog of a causal mask
+ L = torch.exp(segment_sum(A))
+
+ # First, contraction of C and B to get G (attention-weights like)
+ G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, : ,:] # shape: (b, c, l, s, h, n)
+ G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
+
+
+ # Step 2: Compute M, equivalent to applying attention mask to weights
+ M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
+ M = M_intermediate.sum(dim=-1)
+
+ # Step 3: Compute Y_diag (apply to values)
+ Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(3)
+
+ # (right term of low-rank factorization of off-diagonal blocks; B terms)
+
+ decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum))
+ B_decay_contraction = B * decay_states.permute(0, 2, 3, 1)[..., None]
+ # permute back B * decay states
+ states = (B_decay_contraction.permute(0, 1, 3, 2, 4)[..., None] * hidden_states.permute(0, 1, 3, 2, 4)[..., None, :]).sum(dim=3).permute(0, 1, 2, 4, 3)
+ if cache_params is not None and cache_params.has_previous_state:
+ previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...]
+ else:
+ previous_states = torch.zeros_like(states[:, :1])
+ states = torch.cat([previous_states, states], dim=1)
+ decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
+
+ states_permuted = states.permute(0, 2, 1, 3, 4)
+ result = (decay_chunk[..., None, None] * states_permuted[:, :, None, ...]).sum(dim=2)
+ new_states = result.permute(0, 2, 1, 3, 4)
+ states, ssm_state = new_states[:, :-1], new_states[:, -1]
+
+ # Compute state -> output conversion per chunk
+ # (left term of low-rank factorization of off-diagonal blocks; C terms)
+ state_decay_out = torch.exp(A_cumsum)
+ # compute Yoff
+ C_times_states = (C[..., None, :] * states[:, :, None, ...])
+ state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
+ Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None])
+ # Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks)
+
+ y = Y_diag + Y_off
+ # [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim]
+ y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
+
+ y = y + D_residual
+ # Cutting off padded chunks
+ if pad_size > 0:
+ y = y[:, :seq_len, :, :]
+ y = y.reshape(batch_size, seq_len, -1)
+ if ssm_state is not None and cache_params is not None:
+ cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
+
+ scan_output = self.norm(y, gate)
+
+ # end ssd naive
+
+ # 4. Final linear projection
+ contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size]
+ return contextualized_states
+ # fmt: on
+
+ def forward(
+ self,
+ hidden_states,
+ cache_params: Optional[Zamba2HybridDynamicCache] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ ):
+ if is_fast_path_available and "cuda" in self.in_proj.weight.device.type:
+ return self.cuda_kernels_forward(hidden_states, cache_params, attention_mask)
+
+ return self.torch_forward(hidden_states, cache_params, attention_mask)
+
+
+class Zamba2MLP(nn.Module):
+ def __init__(self, config: Zamba2Config, num_fwd_mem_blocks=None, block_id: int = None):
+ """
+ This MLP layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this layer
+ is tied, un-tied adapter modules (formally same as LoRA, but used in the base model) are added to the up and gate projectors to increase expressivity with a small memory overhead.
+ """
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = config.intermediate_size
+ self.num_fwd_mem_blocks = num_fwd_mem_blocks
+ self.block_id = block_id
+
+ self.gate_up_proj = nn.Linear(self.hidden_size, 2 * self.intermediate_size, bias=config.add_bias_linear)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ self.gate_up_proj_adapter_list = nn.ModuleList([])
+ for i in range(self.num_fwd_mem_blocks):
+ if i % config.num_mem_blocks == block_id:
+ gate_up_proj_adapter = nn.Sequential(
+ nn.Linear(self.config.hidden_size, self.config.adapter_rank, bias=False),
+ nn.Linear(self.config.adapter_rank, 2 * self.intermediate_size, bias=False),
+ )
+ else:
+ gate_up_proj_adapter = nn.Identity()
+ self.gate_up_proj_adapter_list.append(gate_up_proj_adapter)
+
+ layer_block_map = config.hybrid_layer_ids
+ self.layer_dic = {value: index for index, value in enumerate(layer_block_map)}
+
+ def forward(self, hidden_state, layer_idx=None):
+ gate_up_state = self.gate_up_proj(hidden_state)
+ layer_idx = self.layer_dic[layer_idx]
+ gate_up_state = gate_up_state + self.gate_up_proj_adapter_list[layer_idx](hidden_state)
+
+ gate_up_state = torch.chunk(gate_up_state, 2, dim=-1)
+ hidden_state = self.act_fn(gate_up_state[0]) * gate_up_state[1]
+ output = self.down_proj(hidden_state)
+ return output
+
+
+class Zamba2AttentionDecoderLayer(ZambaAttentionDecoderLayer):
+ def __init__(self, config: Zamba2Config, block_id: int = None, layer_idx: Optional[int] = None):
+ self.block_id = block_id
+ num_gs = len(config.hybrid_layer_ids)
+ super().__init__(config, layer_idx)
+ self.self_attn = Zamba2Attention(config, layer_idx=-1, num_fwd_mem_blocks=num_gs, block_id=block_id)
+ self.feed_forward = Zamba2MLP(config, num_fwd_mem_blocks=num_gs, block_id=block_id)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ original_hidden_states: torch.Tensor,
+ layer_idx: int,
+ attention_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Zamba2HybridDynamicCache] = None,
+ output_attentions: Optional[bool] = False,
+ position_embeddings: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)`
+ original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`.
+ This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The
+ concatenated tensor is then used as input of the pre-attention RMSNorm
+ (see fig. 2 in https://arxiv.org/pdf/2405.16712).
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, sequence_length)` where padding elements are indicated by 0.
+ past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
+ with `head_dim` being the embedding dimension of each attention head.
+ """
+ hidden_states = torch.concatenate([hidden_states, original_hidden_states], dim=-1)
+ hidden_states = self.input_layernorm(hidden_states)
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ layer_idx=layer_idx,
+ attention_mask=attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ position_embeddings=position_embeddings,
+ **kwargs,
+ )
+
+ hidden_states = self.pre_ff_layernorm(hidden_states)
+ hidden_states = self.feed_forward(hidden_states, layer_idx)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ return outputs
+
+
+class Zamba2MambaDecoderLayer(ZambaMambaDecoderLayer):
+ def __init__(self, config: Zamba2Config, layer_idx: int):
+ super().__init__(config, layer_idx)
+ self.mamba = Zamba2MambaMixer(config=config, layer_idx=layer_idx)
+ self.input_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+
+class Zamba2HybridLayer(ZambaHybridLayer):
+ def __init__(
+ self, shared_transformer: Zamba2AttentionDecoderLayer, linear: nn.Linear, mamba: Zamba2MambaDecoderLayer
+ ):
+ super().__init__(shared_transformer, linear, mamba)
+ del self.shared_transf
+ self.shared_transformer = shared_transformer
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ original_hidden_states: Optional[torch.Tensor] = None,
+ layer_idx: int = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Zamba2HybridDynamicCache] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ position_embeddings: Optional[torch.LongTensor] = None,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with
+ hidden activations to form the input of the shared transformer layer.
+ layer_idx (`int`): layer number.
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, sequence_length)` where padding elements are indicated by 0.
+ past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
+ with `head_dim` being the embedding dimension of each attention head.
+ """
+
+ layer_outputs = self.shared_transformer(
+ hidden_states,
+ original_hidden_states=original_hidden_states,
+ layer_idx=layer_idx,
+ attention_mask=causal_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ position_embeddings=position_embeddings,
+ )
+
+ transformer_hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ self_attn_weights = layer_outputs[1]
+
+ transformer_hidden_states = self.linear(transformer_hidden_states)
+
+ layer_outputs = self.mamba_decoder(
+ hidden_states,
+ transformer_hidden_states=transformer_hidden_states,
+ attention_mask=attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ position_embeddings=position_embeddings,
+ )
+
+ if output_attentions:
+ layer_outputs = (layer_outputs[0], self_attn_weights) + layer_outputs[2:]
+
+ return layer_outputs
+
+
+class Zamba2PreTrainedModel(PreTrainedModel):
+ config_class = Zamba2Config
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["Zamba2AttentionDecoderLayer", "Zamba2MambaDecoderLayer"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_flash_attn_2 = True
+ _supports_flex_attn = True
+ _supports_sdpa = False
+ _supports_cache_class = True # Note: only supports Zamba2HybridDynamicCache
+ _is_stateful = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, Zamba2MambaMixer):
+ module.A_log._no_weight_decay = True
+ module.D._no_weight_decay = True
+
+ dt = torch.exp(
+ torch.rand(self.config.n_mamba_heads)
+ * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
+ + math.log(self.config.time_step_min)
+ ).clamp(min=self.config.time_step_floor)
+ # # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
+ inv_dt = dt + torch.log(-torch.expm1(-dt))
+
+ with torch.no_grad():
+ module.dt_bias.copy_(inv_dt)
+ module.dt_bias._no_reinit = True
+
+
+class Zamba2Model(ZambaModel, Zamba2PreTrainedModel):
+ """
+ Model consisting of *config.num_hidden_layers* layers.
+
+ Args:
+ config: Zamba2Config
+ """
+
+ def __init__(self, config: Zamba2Config):
+ Zamba2PreTrainedModel.__init__(self, config)
+ self.config = config
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ blocks = [Zamba2AttentionDecoderLayer(config, block_id=k) for k in range(config.num_mem_blocks)]
+ mamba_layers = []
+ linear_layers = []
+ self.layers_block_type = config.layers_block_type
+ for i in range(config.num_hidden_layers):
+ if config.layers_block_type[i] == "mamba":
+ mamba_layers.append(Zamba2MambaDecoderLayer(config, layer_idx=i))
+ elif config.layers_block_type[i] == "hybrid":
+ linear_layers.append(nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=False))
+ mamba_layers.append(Zamba2MambaDecoderLayer(config, layer_idx=i))
+ mamba_layers = iter(mamba_layers)
+ linear_layers = iter(linear_layers)
+ blocks = cycle(blocks)
+ layers = self.get_layers(blocks, linear_layers, mamba_layers)
+ self.layers = nn.ModuleList(layers)
+
+ self._attn_implementation = config._attn_implementation
+ self.final_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ if config.use_mem_rope:
+ if config.use_long_context:
+ logger.warning_once(
+ "`use_long_context` set to `True`: using rescaled `rope_theta` and extended `max_position_embeddings`."
+ )
+ self.rotary_emb = Zamba2RotaryEmbedding(config)
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_layers(self, blocks, linear_layers, mamba_layers):
+ layers = []
+ self._tied_weights_keys = []
+ self.first_transformer_layer_id = 0
+ for layer_id, layer_type in enumerate(self.layers_block_type):
+ if layer_type == "hybrid":
+ if self.first_transformer_layer_id == 0:
+ self.first_transformer_layer_id = layer_id
+ block = next(blocks)
+ if self.config.num_mem_blocks * len(self.config.hybrid_layer_ids) > 1:
+ prefix_pattern = rf"^layers\.{layer_id}\.shared_transformer\."
+ main_keys_pattern = re.compile(
+ prefix_pattern
+ + r"(?:"
+ + r"self_attn\.(?:q_proj|k_proj|v_proj|o_proj)\.weight|"
+ + r"feed_forward\.(?:gate_up_proj|down_proj)\.weight|"
+ + r"(?:input_layernorm|pre_ff_layernorm)\.weight"
+ + r")$"
+ )
+ self._tied_weights_keys.append(main_keys_pattern)
+
+ adapter_id = 0
+ for _layer_type in self.layers_block_type:
+ if _layer_type == "hybrid" and adapter_id % self.config.num_mem_blocks == block.block_id:
+ adapter_pattern = re.compile(
+ r"^shared_transformer\.feed_forward\.gate_up_proj_adapter_list\."
+ + str(adapter_id)
+ + r"\.(?:0|1)\.weight$"
+ )
+ self._tied_weights_keys.append(adapter_pattern)
+ adapter_id += 1
+ if self.config.use_shared_attention_adapter:
+ adapter_id = 0
+ for _layer_type in self.layers_block_type:
+ if _layer_type == "hybrid" and adapter_id % self.config.num_mem_blocks == block.block_id:
+ attn_adapter_pattern = re.compile(
+ r"^shared_transformer\.self_attn\."
+ + r"(?:linear_q_adapter_list|linear_k_adapter_list|linear_v_adapter_list)\."
+ + str(adapter_id)
+ + r"\.(?:0|1)\.weight$"
+ )
+ self._tied_weights_keys.append(attn_adapter_pattern)
+ adapter_id += 1
+ layers.append(Zamba2HybridLayer(block, next(linear_layers), next(mamba_layers)))
+ else:
+ layers.append(next(mamba_layers))
+ return layers
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Zamba2HybridDynamicCache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError(
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
+ )
+
+ if self.gradient_checkpointing and self.training and use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
+ )
+ use_cache = False
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ hidden_states = inputs_embeds
+
+ original_hidden_states = torch.clone(inputs_embeds)
+ # original_hidden_states: word embedding output that will be concatenated with hidden activations to form the input of the shared transformer layer
+
+ if use_cache and past_key_values is None:
+ batch_size = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
+ past_key_values = Zamba2HybridDynamicCache(self.config, batch_size, dtype=self.dtype, device=self.device)
+
+ if cache_position is None:
+ past_seen_tokens = (
+ past_key_values.get_seq_length(layer_idx=self.first_transformer_layer_id)
+ if past_key_values is not None
+ else 0
+ )
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
+
+ # create position embeddings to be shared across the decoder layers
+ if self.config.use_mem_rope:
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+ else:
+ position_embeddings = None
+
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for layer_idx, layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer.__call__,
+ hidden_states,
+ original_hidden_states,
+ layer_idx,
+ attention_mask,
+ causal_mask,
+ past_key_values,
+ output_attentions,
+ use_cache,
+ position_embeddings,
+ )
+ else:
+ layer_outputs = layer(
+ hidden_states,
+ original_hidden_states=original_hidden_states,
+ layer_idx=layer_idx,
+ attention_mask=attention_mask,
+ causal_mask=causal_mask,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ position_embeddings=position_embeddings,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ if layer_outputs[1] is not None:
+ # append attentions only of attention layers. Mamba layers return `None` as the attention weights
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.final_layernorm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if past_key_values and not past_key_values.has_previous_state:
+ past_key_values.has_previous_state = True
+
+ output = BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=past_key_values if use_cache else None,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+ return output if return_dict else output.to_tuple()
+
+
+class Zamba2ForCausalLM(ZambaForCausalLM):
+ pass
+
+
+class Zamba2ForSequenceClassification(ZambaForSequenceClassification):
+ pass
+
+
+__all__ = [
+ "Zamba2ForCausalLM",
+ "Zamba2ForSequenceClassification",
+ "Zamba2Model",
+ "Zamba2PreTrainedModel",
+]
diff --git a/src/transformers/pipelines/base.py b/src/transformers/pipelines/base.py
index f7480da357bd..971c0211b577 100644
--- a/src/transformers/pipelines/base.py
+++ b/src/transformers/pipelines/base.py
@@ -1148,6 +1148,9 @@ def device_placement(self):
elif self.device.type == "musa":
with torch.musa.device(self.device):
yield
+ elif self.device.type == "xpu":
+ with torch.xpu.device(self.device):
+ yield
else:
yield
diff --git a/src/transformers/pipelines/image_classification.py b/src/transformers/pipelines/image_classification.py
index 0085e5eb73f8..a9b318df5527 100644
--- a/src/transformers/pipelines/image_classification.py
+++ b/src/transformers/pipelines/image_classification.py
@@ -189,9 +189,9 @@ def _forward(self, model_inputs):
def postprocess(self, model_outputs, function_to_apply=None, top_k=5):
if function_to_apply is None:
- if self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels == 1:
+ if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
function_to_apply = ClassificationFunction.SIGMOID
- elif self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels > 1:
+ elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
function_to_apply = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None:
function_to_apply = self.model.config.function_to_apply
diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py
index 8e687724faf0..6d1965e29d79 100644
--- a/src/transformers/testing_utils.py
+++ b/src/transformers/testing_utils.py
@@ -1435,6 +1435,7 @@ def set_model_tester_for_less_flaky_test(test_case):
# TODO (if possible): Avoid exceptional cases
exceptional_classes = [
"ZambaModelTester",
+ "Zamba2ModelTester",
"RwkvModelTester",
"AriaVisionText2TextModelTester",
"GPTNeoModelTester",
diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py
index f49d65941c7b..f379119289c6 100644
--- a/src/transformers/utils/dummy_pt_objects.py
+++ b/src/transformers/utils/dummy_pt_objects.py
@@ -10576,6 +10576,34 @@ def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
+class Zamba2ForCausalLM(metaclass=DummyObject):
+ _backends = ["torch"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch"])
+
+
+class Zamba2ForSequenceClassification(metaclass=DummyObject):
+ _backends = ["torch"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch"])
+
+
+class Zamba2Model(metaclass=DummyObject):
+ _backends = ["torch"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch"])
+
+
+class Zamba2PreTrainedModel(metaclass=DummyObject):
+ _backends = ["torch"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch"])
+
+
class ZoeDepthForDepthEstimation(metaclass=DummyObject):
_backends = ["torch"]
diff --git a/tests/agents/test_agent_types.py b/tests/agents/test_agent_types.py
index 5b54c83d0a5f..0fb56417d5c3 100644
--- a/tests/agents/test_agent_types.py
+++ b/tests/agents/test_agent_types.py
@@ -47,7 +47,7 @@ def test_from_tensor(self):
path = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
- self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4))
+ torch.testing.assert_close(tensor, agent_type.to_raw(), rtol=1e-4, atol=1e-4)
del agent_type
@@ -56,7 +56,7 @@ def test_from_tensor(self):
# Ensure that the file contains the same value as the original tensor
new_tensor, _ = sf.read(path)
- self.assertTrue(torch.allclose(tensor, torch.tensor(new_tensor), atol=1e-4))
+ torch.testing.assert_close(tensor, torch.tensor(new_tensor), rtol=1e-4, atol=1e-4)
def test_from_string(self):
tensor = torch.rand(12, dtype=torch.float64) - 0.5
@@ -65,7 +65,7 @@ def test_from_string(self):
agent_type = AgentAudio(path)
- self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4))
+ torch.testing.assert_close(tensor, agent_type.to_raw(), rtol=1e-4, atol=1e-4)
self.assertEqual(agent_type.to_string(), path)
@@ -78,7 +78,7 @@ def test_from_tensor(self):
path = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
- self.assertTrue(torch.allclose(tensor, agent_type._tensor, atol=1e-4))
+ torch.testing.assert_close(tensor, agent_type._tensor, rtol=1e-4, atol=1e-4)
self.assertIsInstance(agent_type.to_raw(), Image.Image)
diff --git a/tests/bettertransformer/test_integration.py b/tests/bettertransformer/test_integration.py
index 8e2208bec611..8f67852bfd05 100644
--- a/tests/bettertransformer/test_integration.py
+++ b/tests/bettertransformer/test_integration.py
@@ -66,7 +66,7 @@ def test_transform_and_reverse(self):
)
output_from_pretrained = model_reloaded.generate(**inp)
- self.assertTrue(torch.allclose(output, output_from_pretrained))
+ torch.testing.assert_close(output, output_from_pretrained)
def test_error_save_pretrained(self):
r"""
diff --git a/tests/deepspeed/test_deepspeed.py b/tests/deepspeed/test_deepspeed.py
index 95a803601613..89a8bd1d1bb6 100644
--- a/tests/deepspeed/test_deepspeed.py
+++ b/tests/deepspeed/test_deepspeed.py
@@ -360,14 +360,14 @@ def bad_deepspeed_create_sinusoidal_positions(num_pos: int, dim: int) -> torch.T
model.config.max_position_embeddings, model.config.rotary_dim
)
self.assertFalse(torch.allclose(good_deepspeed_sin_cos, bad_deepspeed_sin_cos))
- self.assertTrue(torch.allclose(good_torch_sin_cos, good_deepspeed_sin_cos.cpu()))
+ torch.testing.assert_close(good_torch_sin_cos, good_deepspeed_sin_cos.cpu())
# Finally, we can see that the incorrect pattern is okay on vanilla torch, demostrating that this issue is
# exclusive to DeepSpeed
bad_torch_sin_cos = bad_deepspeed_create_sinusoidal_positions(
model.config.max_position_embeddings, model.config.rotary_dim
)
- self.assertTrue(torch.allclose(bad_torch_sin_cos, good_torch_sin_cos))
+ torch.testing.assert_close(bad_torch_sin_cos, good_torch_sin_cos)
class TrainerIntegrationDeepSpeedWithCustomConfig(TestCasePlus):
diff --git a/tests/generation/test_logits_process.py b/tests/generation/test_logits_process.py
index aeebb5c4c53d..a922a71c22c6 100644
--- a/tests/generation/test_logits_process.py
+++ b/tests/generation/test_logits_process.py
@@ -166,8 +166,8 @@ def test_temperature_dist_warper(self):
processed_scores = temp_dist_warper_smoother(input_ids, scores)
# uniform distribution stays uniform
- self.assertTrue(torch.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3))
- self.assertTrue(torch.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3))
+ torch.testing.assert_close(probs[0, :], warped_prob_sharp[0, :], rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(probs[0, :], warped_prob_smooth[0, :], rtol=1e-3, atol=1e-3)
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max())
@@ -288,7 +288,7 @@ def test_top_p_dist_warper(self):
EXPECTED_FILTERED_DIST = torch.tensor(
[[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float
)
- self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3))
+ torch.testing.assert_close(filtered_dist, EXPECTED_FILTERED_DIST, rtol=1e-3, atol=1e-3)
# processor should not change logits in-place
self.assertFalse(torch.all(top_p_warp(input_ids, dist) == dist))
@@ -335,7 +335,7 @@ def test_min_p_dist_warper(self):
device=torch_device,
dtype=torch.float,
)
- self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3))
+ torch.testing.assert_close(filtered_dist, EXPECTED_FILTERED_DIST, rtol=1e-3, atol=1e-3)
# processor should not change logits in-place
self.assertFalse(torch.all(min_p_warp(input_ids, dist) == dist))
@@ -372,7 +372,7 @@ def test_typical_dist_warper(self):
EXPECTED_FILTERED_DIST = torch.tensor(
[[0.97, 0.0, 0.0, 0.0], [0.0, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float
)
- self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3))
+ torch.testing.assert_close(filtered_dist, EXPECTED_FILTERED_DIST, rtol=1e-3, atol=1e-3)
# processor should not change logits in-place
self.assertFalse(torch.all(typical_warp(input_ids, dist) == dist))
@@ -422,7 +422,7 @@ def test_epsilon_dist_warper(self):
EXPECTED_FILTERED_DIST = torch.tensor(
[[0.87, 0, 0, 0], [0.4, 0.299, 0.101, 0.2]], device=torch_device, dtype=torch.float
)
- self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3))
+ torch.testing.assert_close(filtered_dist, EXPECTED_FILTERED_DIST, rtol=1e-3, atol=1e-3)
# processor should not change logits in-place
self.assertFalse(torch.all(epsilon_warp(input_ids, dist) == dist))
@@ -462,7 +462,7 @@ def test_eta_dist_warper(self):
EXPECTED_FILTERED_DIST = torch.tensor(
[[0.0, 0.1, 0.8, 0.1], [0.0, 0.0, 0.9, 0.0]], device=torch_device, dtype=torch.float
)
- self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3))
+ torch.testing.assert_close(filtered_dist, EXPECTED_FILTERED_DIST, rtol=1e-3, atol=1e-3)
# processor should not change logits in-place
self.assertFalse(torch.all(eta_warp(input_ids, dist) == dist))
@@ -599,7 +599,7 @@ def test_no_bad_words_dist_processor(self):
# check edge case
no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[4]], eos_token_id=eos_token_id)
filtered_scores = no_bad_words_dist_proc(input_ids, scores)
- self.assertTrue(torch.allclose(scores, filtered_scores, atol=1e-3))
+ torch.testing.assert_close(scores, filtered_scores, rtol=1e-3, atol=1e-3)
def test_bias_dist_processor(self):
vocab_size = 5
@@ -674,7 +674,7 @@ def test_processor_list(self):
scores_comp = processor(input_ids, scores_comp)
# scores should be equal
- self.assertTrue(torch.allclose(scores, scores_comp, atol=1e-3))
+ torch.testing.assert_close(scores, scores_comp, rtol=1e-3, atol=1e-3)
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())
diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py
index b47566354b44..438378dd4377 100644
--- a/tests/generation/test_utils.py
+++ b/tests/generation/test_utils.py
@@ -1531,7 +1531,7 @@ def _prepare_model_kwargs(input_ids, attention_mask, signature):
next_logits_with_padding = model(**model_kwargs).logits[:, -1, :]
# They should result in very similar logits
- torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, atol=1e-5, rtol=1e-5)
+ torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5)
@pytest.mark.generate
def test_past_key_values_format(self):
@@ -2279,6 +2279,7 @@ def _check_outputs(self, output, config, use_cache=False, num_return_sequences=1
"mamba",
"xlnet",
"zamba",
+ "zamba2",
)
has_standard_cache = not any(
model_name in config.__class__.__name__.lower() for model_name in models_without_standard_cache
@@ -2708,7 +2709,7 @@ def test_transition_scores_group_beam_search_encoder_decoder(self):
transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices)
transition_scores_sum = transition_scores.sum(-1)
- self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
+ torch.testing.assert_close(transition_scores_sum, outputs.sequences_scores, rtol=1e-3, atol=1e-3)
def test_beam_search_low_memory(self):
tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
diff --git a/tests/models/albert/test_modeling_albert.py b/tests/models/albert/test_modeling_albert.py
index 0a123c02ab77..8f3fe3f817a1 100644
--- a/tests/models/albert/test_modeling_albert.py
+++ b/tests/models/albert/test_modeling_albert.py
@@ -350,7 +350,7 @@ def test_inference_no_head_absolute_embedding(self):
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]]
)
- self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_export(self):
diff --git a/tests/models/align/test_modeling_align.py b/tests/models/align/test_modeling_align.py
index 3c7e679686f6..3a3a33edf609 100644
--- a/tests/models/align/test_modeling_align.py
+++ b/tests/models/align/test_modeling_align.py
@@ -651,4 +651,4 @@ def test_inference(self):
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[9.7093, 3.4679]], device=torch_device)
- self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
diff --git a/tests/models/altclip/test_modeling_altclip.py b/tests/models/altclip/test_modeling_altclip.py
index 658e2e38d9ad..c70269b1d196 100755
--- a/tests/models/altclip/test_modeling_altclip.py
+++ b/tests/models/altclip/test_modeling_altclip.py
@@ -612,7 +612,7 @@ def test_inference(self):
probs = outputs.logits_per_image.softmax(dim=1)
expected_probs = torch.tensor([[9.9942e-01, 5.7805e-04]], device=torch_device)
- self.assertTrue(torch.allclose(probs, expected_probs, atol=5e-3))
+ torch.testing.assert_close(probs, expected_probs, rtol=5e-3, atol=5e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
@@ -651,6 +651,6 @@ def test_inference_interpolate_pos_encoding(self):
[[-0.3589, -0.5939, 0.3534], [0.4346, 0.1647, 0.7071], [1.1404, -0.4716, 0.1664]]
).to(torch_device)
- self.assertTrue(
- torch.allclose(outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)
+ torch.testing.assert_close(
+ outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4
)
diff --git a/tests/models/aria/test_modeling_aria.py b/tests/models/aria/test_modeling_aria.py
index fcdb8f80d10f..a59a6ba07e9b 100644
--- a/tests/models/aria/test_modeling_aria.py
+++ b/tests/models/aria/test_modeling_aria.py
@@ -239,7 +239,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
diff --git a/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py b/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py
index ff33de487df3..7455d5ff8561 100644
--- a/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py
+++ b/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py
@@ -174,7 +174,7 @@ def test_integration(self):
feature_extractor = ASTFeatureExtractor()
input_values = feature_extractor(input_speech, return_tensors="pt").input_values
self.assertEqual(input_values.shape, (1, 1024, 128))
- self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-4))
+ torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)
def test_feat_extract_from_and_save_pretrained(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
diff --git a/tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py b/tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py
index fb90fec6afff..169ba3e8aeb4 100644
--- a/tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py
+++ b/tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py
@@ -266,4 +266,4 @@ def test_inference_audio_classification(self):
expected_slice = torch.tensor([-0.8760, -7.0042, -8.6602]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/autoformer/test_modeling_autoformer.py b/tests/models/autoformer/test_modeling_autoformer.py
index 489e872e6551..fddd3d94d4b7 100644
--- a/tests/models/autoformer/test_modeling_autoformer.py
+++ b/tests/models/autoformer/test_modeling_autoformer.py
@@ -445,7 +445,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]], device=torch_device
)
- self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_inference_head(self):
model = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly").to(torch_device)
@@ -463,7 +463,7 @@ def test_inference_head(self):
expected_slice = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]], device=torch_device
)
- self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_seq_to_seq_generation(self):
model = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly").to(torch_device)
@@ -481,4 +481,4 @@ def test_seq_to_seq_generation(self):
expected_slice = torch.tensor([3130.6763, 4056.5293, 7053.0786], device=torch_device)
mean_prediction = outputs.sequences.mean(dim=1)
- self.assertTrue(torch.allclose(mean_prediction[0, -3:], expected_slice, rtol=1e-1))
+ torch.testing.assert_close(mean_prediction[0, -3:], expected_slice, rtol=1e-1)
diff --git a/tests/models/bamba/test_modeling_bamba.py b/tests/models/bamba/test_modeling_bamba.py
index 16be88f94949..68da2fdf028a 100644
--- a/tests/models/bamba/test_modeling_bamba.py
+++ b/tests/models/bamba/test_modeling_bamba.py
@@ -312,11 +312,11 @@ def test_initialization(self):
for name, param in model.named_parameters():
if param.requires_grad:
if "A_log" in name:
- A = torch.arange(1, config.mamba_n_heads + 1, dtype=torch.float32)[None, :]
- self.assertTrue(torch.allclose(param.data, torch.log(A), atol=1e-5, rtol=1e-5))
+ A = torch.arange(1, config.mamba_n_heads + 1, dtype=torch.float32)
+ torch.testing.assert_close(param.data, torch.log(A), rtol=1e-5, atol=1e-5)
elif "D" in name:
D = torch.ones(config.mamba_n_heads, dtype=torch.float32)
- self.assertTrue(torch.allclose(param.data, D, atol=1e-5, rtol=1e-5))
+ torch.testing.assert_close(param.data, D, rtol=1e-5, atol=1e-5)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
@@ -482,7 +482,7 @@ def _prepare_model_kwargs(input_ids, attention_mask, signature):
next_logits_with_padding = model(**model_kwargs).logits[:, -1, :]
# They should result in very similar logits
- torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, atol=1e-5, rtol=1e-1)
+ torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5)
@slow
diff --git a/tests/models/bark/test_modeling_bark.py b/tests/models/bark/test_modeling_bark.py
index 9bb8ef33d759..06638550951a 100644
--- a/tests/models/bark/test_modeling_bark.py
+++ b/tests/models/bark/test_modeling_bark.py
@@ -599,7 +599,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_embeds = model(**inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
@require_torch_fp16
def test_generate_fp16(self):
@@ -688,7 +688,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_embeds = model(**inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
@require_torch_fp16
def test_generate_fp16(self):
@@ -1252,8 +1252,8 @@ def test_generate_batching(self):
self.assertEqual(tuple(audio_lengths), (output1.shape[1], output2.shape[1]))
# then assert almost equal
- self.assertTrue(torch.allclose(outputs[0, : audio_lengths[0]], output1.squeeze(), atol=2e-3))
- self.assertTrue(torch.allclose(outputs[1, : audio_lengths[1]], output2.squeeze(), atol=2e-3))
+ torch.testing.assert_close(outputs[0, : audio_lengths[0]], output1.squeeze(), rtol=2e-3, atol=2e-3)
+ torch.testing.assert_close(outputs[1, : audio_lengths[1]], output2.squeeze(), rtol=2e-3, atol=2e-3)
# now test single input with return_output_lengths = True
outputs, _ = self.model.generate(**s1, **args, return_output_lengths=True)
diff --git a/tests/models/bart/test_modeling_bart.py b/tests/models/bart/test_modeling_bart.py
index e4d0df141be2..a795bfcabfb4 100644
--- a/tests/models/bart/test_modeling_bart.py
+++ b/tests/models/bart/test_modeling_bart.py
@@ -887,7 +887,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-3))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-3, atol=1e-3)
@slow
def test_base_mask_filling(self):
diff --git a/tests/models/beit/test_modeling_beit.py b/tests/models/beit/test_modeling_beit.py
index e54273f78399..6ed9182ad365 100644
--- a/tests/models/beit/test_modeling_beit.py
+++ b/tests/models/beit/test_modeling_beit.py
@@ -634,7 +634,7 @@ def test_inference_masked_image_modeling_head(self):
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]]
).to(torch_device)
- self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], expected_slice, atol=1e-2))
+ torch.testing.assert_close(logits[bool_masked_pos][:3, :3], expected_slice, rtol=1e-2, atol=1e-2)
@slow
def test_inference_image_classification_head_imagenet_1k(self):
@@ -655,7 +655,7 @@ def test_inference_image_classification_head_imagenet_1k(self):
expected_slice = torch.tensor([-1.2385, -1.0987, -1.0108]).to(torch_device)
- self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
expected_class_idx = 281
self.assertEqual(logits.argmax(-1).item(), expected_class_idx)
@@ -681,7 +681,7 @@ def test_inference_image_classification_head_imagenet_22k(self):
expected_slice = torch.tensor([1.6881, -0.2787, 0.5901]).to(torch_device)
- self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
expected_class_idx = 2396
self.assertEqual(logits.argmax(-1).item(), expected_class_idx)
@@ -727,7 +727,7 @@ def test_inference_semantic_segmentation(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, :3, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_post_processing_semantic_segmentation(self):
diff --git a/tests/models/bert/test_modeling_bert.py b/tests/models/bert/test_modeling_bert.py
index 255660277425..db28e077b4ea 100644
--- a/tests/models/bert/test_modeling_bert.py
+++ b/tests/models/bert/test_modeling_bert.py
@@ -682,7 +682,7 @@ def test_inference_no_head_absolute_embedding(self):
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[[0.4249, 0.1008, 0.7531], [0.3771, 0.1188, 0.7467], [0.4152, 0.1098, 0.7108]]])
- self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_no_head_relative_embedding_key(self):
@@ -697,7 +697,7 @@ def test_inference_no_head_relative_embedding_key(self):
[[[0.0756, 0.3142, -0.5128], [0.3761, 0.3462, -0.5477], [0.2052, 0.3760, -0.1240]]]
)
- self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_no_head_relative_embedding_key_query(self):
@@ -712,7 +712,7 @@ def test_inference_no_head_relative_embedding_key_query(self):
[[[0.6496, 0.3784, 0.8203], [0.8148, 0.5656, 0.2636], [-0.0681, 0.5597, 0.7045]]]
)
- self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
def test_sdpa_ignored_mask(self):
pkv = []
diff --git a/tests/models/bert_generation/test_modeling_bert_generation.py b/tests/models/bert_generation/test_modeling_bert_generation.py
index ecd7a459e0ea..06fa6b6b12e9 100644
--- a/tests/models/bert_generation/test_modeling_bert_generation.py
+++ b/tests/models/bert_generation/test_modeling_bert_generation.py
@@ -319,7 +319,7 @@ def test_inference_no_head_absolute_embedding(self):
expected_slice = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
@@ -335,4 +335,4 @@ def test_inference_no_head_absolute_embedding(self):
expected_slice = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/big_bird/test_modeling_big_bird.py b/tests/models/big_bird/test_modeling_big_bird.py
index bda5cb62186a..8ca17eb14f18 100644
--- a/tests/models/big_bird/test_modeling_big_bird.py
+++ b/tests/models/big_bird/test_modeling_big_bird.py
@@ -674,12 +674,12 @@ def test_inference_block_sparse_pretraining(self):
device=torch_device,
)
- self.assertTrue(
- torch.allclose(prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, atol=1e-4)
+ torch.testing.assert_close(
+ prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, rtol=1e-4, atol=1e-4
)
expected_seq_relationship_logits = torch.tensor([[46.9465, 47.9517]], device=torch_device)
- self.assertTrue(torch.allclose(seq_relationship_logits, expected_seq_relationship_logits, atol=1e-4))
+ torch.testing.assert_close(seq_relationship_logits, expected_seq_relationship_logits, rtol=1e-4, atol=1e-4)
def test_inference_full_pretraining(self):
model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base", attention_type="original_full")
@@ -703,12 +703,12 @@ def test_inference_full_pretraining(self):
],
device=torch_device,
)
- self.assertTrue(
- torch.allclose(prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, atol=1e-4)
+ torch.testing.assert_close(
+ prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, rtol=1e-4, atol=1e-4
)
expected_seq_relationship_logits = torch.tensor([[41.4503, 41.2406]], device=torch_device)
- self.assertTrue(torch.allclose(seq_relationship_logits, expected_seq_relationship_logits, atol=1e-4))
+ torch.testing.assert_close(seq_relationship_logits, expected_seq_relationship_logits, rtol=1e-4, atol=1e-4)
def test_block_sparse_attention_probs(self):
"""
@@ -773,7 +773,7 @@ def test_block_sparse_attention_probs(self):
cl = torch.einsum("bhqk,bhkd->bhqd", attention_probs, value_layer)
cl = cl.view(context_layer.size())
- self.assertTrue(torch.allclose(context_layer, cl, atol=0.001))
+ torch.testing.assert_close(context_layer, cl, rtol=0.001, atol=0.001)
def test_block_sparse_context_layer(self):
model = BigBirdModel.from_pretrained(
@@ -822,7 +822,7 @@ def test_block_sparse_context_layer(self):
context_layer = context_layer[0]
self.assertEqual(context_layer.shape, torch.Size((1, 128, 768)))
- self.assertTrue(torch.allclose(context_layer[0, 64:78, 300:310], targeted_cl, atol=0.0001))
+ torch.testing.assert_close(context_layer[0, 64:78, 300:310], targeted_cl, rtol=0.0001, atol=0.0001)
def test_tokenizer_inference(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
@@ -871,7 +871,7 @@ def test_tokenizer_inference(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(prediction[0, 52:64, 320:324], expected_prediction, atol=1e-4))
+ torch.testing.assert_close(prediction[0, 52:64, 320:324], expected_prediction, rtol=1e-4, atol=1e-4)
def test_inference_question_answering(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-base-trivia-itc")
@@ -923,8 +923,8 @@ def test_inference_question_answering(self):
)
# fmt: on
- self.assertTrue(torch.allclose(start_logits[:, 64:96], target_start_logits, atol=1e-4))
- self.assertTrue(torch.allclose(end_logits[:, 64:96], target_end_logits, atol=1e-4))
+ torch.testing.assert_close(start_logits[:, 64:96], target_start_logits, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(end_logits[:, 64:96], target_end_logits, rtol=1e-4, atol=1e-4)
input_ids = inputs["input_ids"].tolist()
answer = [
@@ -966,4 +966,4 @@ def test_auto_padding(self):
# fmt: on
self.assertEqual(output.shape, torch.Size((1, 241, 768)))
- self.assertTrue(torch.allclose(output[0, 64:78, 300:310], target, atol=0.0001))
+ torch.testing.assert_close(output[0, 64:78, 300:310], target, rtol=0.0001, atol=0.0001)
diff --git a/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py b/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py
index 745283825f0c..5c8ae48e1b03 100644
--- a/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py
+++ b/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py
@@ -418,12 +418,12 @@ def _check_batched_forward(self, attn_type, tolerance=1e-3):
with torch.no_grad():
logits_single_first = model(input_ids=input_ids[:1, :-chunk_length], labels=labels[:1]).logits
- self.assertTrue(torch.allclose(logits_batched[0, -3:], logits_single_first[0, -3:], atol=tolerance))
+ torch.testing.assert_close(logits_batched[0, -3:], logits_single_first[0, -3:], rtol=tolerance, atol=tolerance)
with torch.no_grad():
logits_single_second = model(input_ids=input_ids[1:], labels=labels[1:, :-4]).logits
- self.assertTrue(torch.allclose(logits_batched[1, :3], logits_single_second[0, :3], atol=tolerance))
+ torch.testing.assert_close(logits_batched[1, :3], logits_single_second[0, :3], rtol=tolerance, atol=tolerance)
def test_auto_padding(self):
ids = [[7, 6, 9] * 65]
@@ -445,7 +445,7 @@ def test_auto_padding(self):
"logits"
]
- self.assertTrue(torch.allclose(output1, output2, atol=1e-5))
+ torch.testing.assert_close(output1, output2, rtol=1e-5, atol=1e-5)
def test_for_change_to_full_attn(self):
self.model_tester.seq_length = 9
@@ -462,7 +462,7 @@ def test_for_change_to_full_attn(self):
model.load_state_dict(state_dict)
outputs2 = model(**input_dict)["logits"]
- self.assertTrue(torch.allclose(outputs1, outputs2, atol=1e-5))
+ torch.testing.assert_close(outputs1, outputs2, rtol=1e-5, atol=1e-5)
@unittest.skip(
reason="This architecure has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245"
@@ -523,8 +523,8 @@ def test_inference_block_sparse(self):
)
# fmt: on
- self.assertTrue(
- torch.allclose(prediction_logits[0, 4:8, 128:156], expected_prediction_logits_slice, atol=1e-4)
+ torch.testing.assert_close(
+ prediction_logits[0, 4:8, 128:156], expected_prediction_logits_slice, rtol=1e-4, atol=1e-4
)
def test_inference_full_attn(self):
@@ -544,8 +544,8 @@ def test_inference_full_attn(self):
device=torch_device,
)
# fmt: on
- self.assertTrue(
- torch.allclose(prediction_logits[0, 4:8, 128:156], expected_prediction_logits_slice, atol=1e-4)
+ torch.testing.assert_close(
+ prediction_logits[0, 4:8, 128:156], expected_prediction_logits_slice, rtol=1e-4, atol=1e-4
)
def test_seq_to_seq_generation(self):
diff --git a/tests/models/biogpt/test_modeling_biogpt.py b/tests/models/biogpt/test_modeling_biogpt.py
index 4f1d5d6a42f8..1082f901584e 100644
--- a/tests/models/biogpt/test_modeling_biogpt.py
+++ b/tests/models/biogpt/test_modeling_biogpt.py
@@ -432,7 +432,7 @@ def test_inference_lm_head_model(self):
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_biogpt_generation(self):
diff --git a/tests/models/bit/test_modeling_bit.py b/tests/models/bit/test_modeling_bit.py
index 504e410bb466..8e366f506abf 100644
--- a/tests/models/bit/test_modeling_bit.py
+++ b/tests/models/bit/test_modeling_bit.py
@@ -296,7 +296,7 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
diff --git a/tests/models/blip/test_modeling_blip.py b/tests/models/blip/test_modeling_blip.py
index 7e1dbbe6bb9c..c4029e6377be 100644
--- a/tests/models/blip/test_modeling_blip.py
+++ b/tests/models/blip/test_modeling_blip.py
@@ -1431,5 +1431,5 @@ def test_inference_itm(self):
expected_scores = torch.Tensor([[0.0029, 0.9971]])
- self.assertTrue(torch.allclose(torch.nn.Softmax()(out_itm[0].cpu()), expected_scores, rtol=1e-3, atol=1e-3))
- self.assertTrue(torch.allclose(out[0].cpu(), torch.Tensor([[0.5162]]), rtol=1e-3, atol=1e-3))
+ torch.testing.assert_close(torch.nn.Softmax()(out_itm[0].cpu()), expected_scores, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(out[0].cpu(), torch.Tensor([[0.5162]]), rtol=1e-3, atol=1e-3)
diff --git a/tests/models/blip_2/test_modeling_blip_2.py b/tests/models/blip_2/test_modeling_blip_2.py
index 5556f14a0b93..628eaba73852 100644
--- a/tests/models/blip_2/test_modeling_blip_2.py
+++ b/tests/models/blip_2/test_modeling_blip_2.py
@@ -901,7 +901,7 @@ def _prepare_model_kwargs(input_ids, attention_mask, signature):
next_logits_with_padding = model(**model_kwargs, pixel_values=pixel_values).logits[:, -1, :]
# They should result in very similar logits
- self.assertTrue(torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=1e-5))
+ torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5)
@unittest.skip("BLIP2 cannot generate only from input ids, and requires pixel values in all cases to be present")
@parameterized.expand([("greedy", 1), ("beam search", 2)])
@@ -2215,8 +2215,8 @@ def test_inference_itm(self):
# verify
expected_scores = torch.Tensor([[0.0238, 0.9762]])
- self.assertTrue(torch.allclose(torch.nn.Softmax()(out_itm[0].cpu()), expected_scores, rtol=1e-3, atol=1e-3))
- self.assertTrue(torch.allclose(out[0].cpu(), torch.Tensor([[0.4406]]), rtol=1e-3, atol=1e-3))
+ torch.testing.assert_close(torch.nn.Softmax()(out_itm[0].cpu()), expected_scores, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(out[0].cpu(), torch.Tensor([[0.4406]]), rtol=1e-3, atol=1e-3)
@require_torch_accelerator
@require_torch_fp16
@@ -2235,10 +2235,8 @@ def test_inference_itm_fp16(self):
# verify
expected_scores = torch.Tensor([[0.0239, 0.9761]])
- self.assertTrue(
- torch.allclose(torch.nn.Softmax()(out_itm[0].cpu().float()), expected_scores, rtol=1e-3, atol=1e-3)
- )
- self.assertTrue(torch.allclose(out[0].cpu().float(), torch.Tensor([[0.4406]]), rtol=1e-3, atol=1e-3))
+ torch.testing.assert_close(torch.nn.Softmax()(out_itm[0].cpu().float()), expected_scores, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(out[0].cpu().float(), torch.Tensor([[0.4406]]), rtol=1e-3, atol=1e-3)
@require_torch_accelerator
@require_torch_fp16
diff --git a/tests/models/bridgetower/test_modeling_bridgetower.py b/tests/models/bridgetower/test_modeling_bridgetower.py
index cceeee4912dc..66d0d82b6d75 100644
--- a/tests/models/bridgetower/test_modeling_bridgetower.py
+++ b/tests/models/bridgetower/test_modeling_bridgetower.py
@@ -689,4 +689,4 @@ def test_inference_interpolate_pos_encoding(self):
[[-0.6518, 0.4978, -0.4544], [-2.6672, -0.0843, -0.4210], [-2.4510, -0.1002, -0.3458]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.image_features[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.image_features[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/bros/test_modeling_bros.py b/tests/models/bros/test_modeling_bros.py
index b16a37a226cd..1dbf2a92fb49 100644
--- a/tests/models/bros/test_modeling_bros.py
+++ b/tests/models/bros/test_modeling_bros.py
@@ -452,4 +452,4 @@ def test_inference_no_head(self):
).to(torch_device)
torch.set_printoptions(sci_mode=False)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/camembert/test_modeling_camembert.py b/tests/models/camembert/test_modeling_camembert.py
index f779c3a80909..bf41c8a9efc6 100644
--- a/tests/models/camembert/test_modeling_camembert.py
+++ b/tests/models/camembert/test_modeling_camembert.py
@@ -60,7 +60,7 @@ def test_output_embeds_base_model(self):
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
@require_torch_sdpa
@@ -81,4 +81,4 @@ def test_output_embeds_base_model_sdpa(self):
with torch.no_grad():
output = model(input_ids)["last_hidden_state"].detach()
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/canine/test_modeling_canine.py b/tests/models/canine/test_modeling_canine.py
index efc70dff499c..31d02a2c0414 100644
--- a/tests/models/canine/test_modeling_canine.py
+++ b/tests/models/canine/test_modeling_canine.py
@@ -562,7 +562,7 @@ def test_inference_no_head(self):
]
)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-2))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-2, atol=1e-2)
# verify pooled output
expected_shape = torch.Size((1, 768))
@@ -570,4 +570,4 @@ def test_inference_no_head(self):
expected_slice = torch.tensor([-0.884311497, -0.529064834, 0.723164916])
- self.assertTrue(torch.allclose(outputs.pooler_output[0, :3], expected_slice, atol=1e-2))
+ torch.testing.assert_close(outputs.pooler_output[0, :3], expected_slice, rtol=1e-2, atol=1e-2)
diff --git a/tests/models/chameleon/test_modeling_chameleon.py b/tests/models/chameleon/test_modeling_chameleon.py
index 06fa2cf3eb55..f0d9107119fe 100644
--- a/tests/models/chameleon/test_modeling_chameleon.py
+++ b/tests/models/chameleon/test_modeling_chameleon.py
@@ -320,7 +320,7 @@ def test_model_rope_scaling(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
diff --git a/tests/models/chinese_clip/test_modeling_chinese_clip.py b/tests/models/chinese_clip/test_modeling_chinese_clip.py
index 647b3ac7b73a..d63c152431cf 100644
--- a/tests/models/chinese_clip/test_modeling_chinese_clip.py
+++ b/tests/models/chinese_clip/test_modeling_chinese_clip.py
@@ -739,7 +739,7 @@ def test_inference(self):
probs = outputs.logits_per_image.softmax(dim=1)
expected_probs = torch.tensor([[1.2686e-03, 5.4499e-02, 6.7968e-04, 9.4355e-01]], device=torch_device)
- self.assertTrue(torch.allclose(probs, expected_probs, atol=5e-3))
+ torch.testing.assert_close(probs, expected_probs, rtol=5e-3, atol=5e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
@@ -775,6 +775,6 @@ def test_inference_interpolate_pos_encoding(self):
[[-0.3990, 0.2983, -0.1239], [-0.1452, -0.2759, 0.0403], [-0.3149, -0.4763, 0.8555]]
).to(torch_device)
- self.assertTrue(
- torch.allclose(outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)
+ torch.testing.assert_close(
+ outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4
)
diff --git a/tests/models/clap/test_feature_extraction_clap.py b/tests/models/clap/test_feature_extraction_clap.py
index 0d6c00b79dde..d0e2bb44a344 100644
--- a/tests/models/clap/test_feature_extraction_clap.py
+++ b/tests/models/clap/test_feature_extraction_clap.py
@@ -285,8 +285,8 @@ def test_integration_fusion_short_input(self):
input_features = feature_extractor(input_speech, return_tensors="pt", padding=padding).input_features
self.assertEqual(input_features.shape, (1, 4, 1001, 64))
- self.assertTrue(torch.allclose(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], atol=1e-4))
- self.assertTrue(torch.allclose(input_features[0, 0, idx_in_mel[1]], EXPECTED_VALUES[1], atol=1e-4))
+ torch.testing.assert_close(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(input_features[0, 0, idx_in_mel[1]], EXPECTED_VALUES[1], rtol=1e-4, atol=1e-4)
self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 1]))
self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 2]))
@@ -408,8 +408,8 @@ def test_integration_rand_trunc_short_input(self):
input_speech, return_tensors="pt", truncation="rand_trunc", padding=padding
).input_features
self.assertEqual(input_features.shape, (1, 1, 1001, 64))
- self.assertTrue(torch.allclose(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], atol=1e-4))
- self.assertTrue(torch.allclose(input_features[0, 0, idx_in_mel[1]], EXPECTED_VALUES[1], atol=1e-4))
+ torch.testing.assert_close(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(input_features[0, 0, idx_in_mel[1]], EXPECTED_VALUES[1], rtol=1e-4, atol=1e-4)
def test_integration_fusion_long_input(self):
# fmt: off
@@ -475,7 +475,7 @@ def test_integration_fusion_long_input(self):
set_seed(987654321)
input_features = feature_extractor(input_speech, return_tensors="pt", padding=padding).input_features
self.assertEqual(input_features.shape, (1, 4, 1001, 64))
- self.assertTrue(torch.allclose(input_features[0, block_idx, MEL_BIN], EXPECTED_VALUES, atol=1e-3))
+ torch.testing.assert_close(input_features[0, block_idx, MEL_BIN], EXPECTED_VALUES, rtol=1e-3, atol=1e-3)
def test_integration_rand_trunc_long_input(self):
# fmt: off
@@ -544,4 +544,4 @@ def test_integration_rand_trunc_long_input(self):
input_speech, return_tensors="pt", truncation="rand_trunc", padding=padding
).input_features
self.assertEqual(input_features.shape, (1, 1, 1001, 64))
- self.assertTrue(torch.allclose(input_features[0, 0, MEL_BIN], EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(input_features[0, 0, MEL_BIN], EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/clip/test_modeling_clip.py b/tests/models/clip/test_modeling_clip.py
index fa5de84e0620..75ee9a189ad4 100644
--- a/tests/models/clip/test_modeling_clip.py
+++ b/tests/models/clip/test_modeling_clip.py
@@ -1235,7 +1235,7 @@ def test_inference(self):
expected_logits = torch.tensor([[24.5701, 19.3049]], device=torch_device)
- self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
@@ -1270,6 +1270,6 @@ def test_inference_interpolate_pos_encoding(self):
[[-0.1538, 0.0322, -0.3235], [0.2893, 0.1135, -0.5708], [0.0461, 0.1540, -0.6018]]
).to(torch_device)
- self.assertTrue(
- torch.allclose(outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)
+ torch.testing.assert_close(
+ outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4
)
diff --git a/tests/models/clipseg/test_modeling_clipseg.py b/tests/models/clipseg/test_modeling_clipseg.py
index b2b047bb502c..4b712f199004 100644
--- a/tests/models/clipseg/test_modeling_clipseg.py
+++ b/tests/models/clipseg/test_modeling_clipseg.py
@@ -814,13 +814,13 @@ def test_inference_image_segmentation(self):
[[-7.4613, -7.4785, -7.3628], [-7.3268, -7.0899, -7.1333], [-6.9838, -6.7900, -6.8913]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_masks_slice, atol=1e-3))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_masks_slice, rtol=1e-3, atol=1e-3)
# verify conditional and pooled output
expected_conditional = torch.tensor([0.5601, -0.0314, 0.1980]).to(torch_device)
expected_pooled_output = torch.tensor([0.5036, -0.2681, -0.2644]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.conditional_embeddings[0, :3], expected_conditional, atol=1e-3))
- self.assertTrue(torch.allclose(outputs.pooled_output[0, :3], expected_pooled_output, atol=1e-3))
+ torch.testing.assert_close(outputs.conditional_embeddings[0, :3], expected_conditional, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(outputs.pooled_output[0, :3], expected_pooled_output, rtol=1e-3, atol=1e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
@@ -855,6 +855,6 @@ def test_inference_interpolate_pos_encoding(self):
[[-0.1538, 0.0322, -0.3235], [0.2893, 0.1135, -0.5708], [0.0461, 0.1540, -0.6018]]
).to(torch_device)
- self.assertTrue(
- torch.allclose(outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)
+ torch.testing.assert_close(
+ outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4
)
diff --git a/tests/models/clvp/test_feature_extraction_clvp.py b/tests/models/clvp/test_feature_extraction_clvp.py
index b57cb65ebb21..a536260f7ac2 100644
--- a/tests/models/clvp/test_feature_extraction_clvp.py
+++ b/tests/models/clvp/test_feature_extraction_clvp.py
@@ -238,4 +238,4 @@ def test_integration(self):
feature_extractor = ClvpFeatureExtractor.from_pretrained("susnato/clvp_dev")
input_features = feature_extractor(input_speech, sampling_rate=sr[0], return_tensors="pt").input_features
self.assertEqual(input_features.shape, (1, 80, 517))
- self.assertTrue(torch.allclose(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4))
+ torch.testing.assert_close(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/clvp/test_modeling_clvp.py b/tests/models/clvp/test_modeling_clvp.py
index a212b4781d0a..84a0101f6f28 100644
--- a/tests/models/clvp/test_modeling_clvp.py
+++ b/tests/models/clvp/test_modeling_clvp.py
@@ -591,14 +591,14 @@ def test_conditional_encoder(self):
[[-0.8582, 0.5228, 1.9944], [-0.0465, -1.1017, -0.0093], [-0.0466, -0.6030, -0.1280]]
)
- self.assertTrue(torch.allclose(conditioning_encoder_outputs[0, :3, :3], EXPECTED_OUTPUTS, atol=1e-4))
+ torch.testing.assert_close(conditioning_encoder_outputs[0, :3, :3], EXPECTED_OUTPUTS, rtol=1e-4, atol=1e-4)
def test_decoder_model_generate(self):
autoregressive_model_output = self.model.speech_decoder_model.generate(input_ids=self.text_tokens).cpu()
EXPECTED_OUTPUTS = torch.tensor([[147, 2, 54, 2, 43, 2, 169, 122, 29, 64, 2, 136, 37, 33, 9, 8193]])
- self.assertTrue(torch.allclose(autoregressive_model_output, EXPECTED_OUTPUTS))
+ torch.testing.assert_close(autoregressive_model_output, EXPECTED_OUTPUTS)
def test_text_and_speech_encoder_models(self):
# check for text embeds
@@ -608,7 +608,7 @@ def test_text_and_speech_encoder_models(self):
EXPECTED_TEXT_EMBEDS = torch.tensor([1.4798, -2.0005, 2.3902, -0.5042, 1.6401, -2.4135, -1.4800, 3.0118, -2.4422, 1.3266, 2.2339, 1.4761, -4.8983, -1.3592, 6.0251, 6.7364, 2.2576, 3.7229, -10.0436, 4.6676])
# fmt: on
- self.assertTrue(torch.allclose(text_embeds[0, :20], EXPECTED_TEXT_EMBEDS, atol=1e-4))
+ torch.testing.assert_close(text_embeds[0, :20], EXPECTED_TEXT_EMBEDS, rtol=1e-4, atol=1e-4)
# check for speech embeds
speech_embeds = self.model.speech_encoder_model(input_ids=self.text_tokens, return_dict=True)[0].cpu()
@@ -617,7 +617,7 @@ def test_text_and_speech_encoder_models(self):
EXPECTED_SPEECH_EMBEDS = torch.tensor([3.1202, -3.1183, -1.4264, -6.1339, 1.8885, -0.1983, 0.9461, -1.7414, 0.3320, -3.8400, -1.5715, 1.5096, -1.7576, 0.2387, 4.9758, 5.8450, -6.2534, 2.8587, -5.5816, 4.7821])
# fmt: on
- self.assertTrue(torch.allclose(speech_embeds[0, :20], EXPECTED_SPEECH_EMBEDS, atol=1e-4))
+ torch.testing.assert_close(speech_embeds[0, :20], EXPECTED_SPEECH_EMBEDS, rtol=1e-4, atol=1e-4)
def test_full_model_integration(self):
full_model_output = self.model.generate(
@@ -632,5 +632,5 @@ def test_full_model_integration(self):
EXPECTED_SPEECH_IDS = torch.tensor([[1953, 1080, 612], [1953, 612, 493], [1953, 612, 716]])
EXPECTED_SIMILARITY_SCORES = torch.tensor([[14.7660, 14.4569, 13.6472, 13.5683]])
- self.assertTrue(torch.allclose(full_model_output.speech_ids.cpu()[-3:, -3:], EXPECTED_SPEECH_IDS))
- self.assertTrue(torch.allclose(full_model_output.logits_per_text.cpu(), EXPECTED_SIMILARITY_SCORES))
+ torch.testing.assert_close(full_model_output.speech_ids.cpu()[-3:, -3:], EXPECTED_SPEECH_IDS)
+ torch.testing.assert_close(full_model_output.logits_per_text.cpu(), EXPECTED_SIMILARITY_SCORES)
diff --git a/tests/models/cohere/test_modeling_cohere.py b/tests/models/cohere/test_modeling_cohere.py
index 89e3b6898948..47cd68e3f728 100644
--- a/tests/models/cohere/test_modeling_cohere.py
+++ b/tests/models/cohere/test_modeling_cohere.py
@@ -363,4 +363,4 @@ def test_batched_small_model_logits(self):
output = model(**inputs)
logits = output.logits
- self.assertTrue(torch.allclose(EXPECTED_LOGITS, logits[:, :3, :3], rtol=1e-3, atol=1e-3))
+ torch.testing.assert_close(EXPECTED_LOGITS, logits[:, :3, :3], rtol=1e-3, atol=1e-3)
diff --git a/tests/models/colpali/test_modeling_colpali.py b/tests/models/colpali/test_modeling_colpali.py
index 646726ac700e..6f3ce6b96b41 100644
--- a/tests/models/colpali/test_modeling_colpali.py
+++ b/tests/models/colpali/test_modeling_colpali.py
@@ -238,7 +238,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
@slow
@require_vision
diff --git a/tests/models/conditional_detr/test_image_processing_conditional_detr.py b/tests/models/conditional_detr/test_image_processing_conditional_detr.py
index 4e46161a7bd0..4f15b5e77bf8 100644
--- a/tests/models/conditional_detr/test_image_processing_conditional_detr.py
+++ b/tests/models/conditional_detr/test_image_processing_conditional_detr.py
@@ -179,31 +179,31 @@ def test_call_pytorch_with_coco_detection_annotations(self):
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
- self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
- self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
+ torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
- self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
+ torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
- self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
+ torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
- self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
+ torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
- self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
+ torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1066])
- self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
+ torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
def test_call_pytorch_with_coco_panoptic_annotations(self):
@@ -225,34 +225,34 @@ def test_call_pytorch_with_coco_panoptic_annotations(self):
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
- self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
- self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
+ torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
- self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
+ torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
- self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
+ torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
- self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
+ torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify masks
expected_masks_sum = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
- self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
+ torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1066])
- self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
+ torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_detection_annotations with Detr->ConditionalDetr, facebook/detr-resnet-50 ->microsoft/conditional-detr-resnet-50
@@ -319,8 +319,8 @@ def test_batched_coco_detection_annotations(self):
[0.5790, 0.4115, 0.3430, 0.7161],
]
)
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066]))
@@ -371,8 +371,8 @@ def test_batched_coco_detection_annotations(self):
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_panoptic_annotations with Detr->ConditionalDetr
def test_batched_coco_panoptic_annotations(self):
@@ -442,8 +442,8 @@ def test_batched_coco_panoptic_annotations(self):
[0.2997, 0.2994, 0.5994, 0.5987],
]
)
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066]))
@@ -495,8 +495,8 @@ def test_batched_coco_panoptic_annotations(self):
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_max_width_max_height_resizing_and_pad_strategy with Detr->ConditionalDetr
def test_max_width_max_height_resizing_and_pad_strategy(self):
diff --git a/tests/models/conditional_detr/test_modeling_conditional_detr.py b/tests/models/conditional_detr/test_modeling_conditional_detr.py
index 2e2973679e91..5a4357c40c08 100644
--- a/tests/models/conditional_detr/test_modeling_conditional_detr.py
+++ b/tests/models/conditional_detr/test_modeling_conditional_detr.py
@@ -572,7 +572,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[0.4222, 0.7471, 0.8760], [0.6395, -0.2729, 0.7127], [-0.3090, 0.7642, 0.9529]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_object_detection_head(self):
model = ConditionalDetrForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50").to(
@@ -594,14 +594,14 @@ def test_inference_object_detection_head(self):
expected_slice_logits = torch.tensor(
[[-10.4372, -5.7558, -8.6764], [-10.5410, -5.8704, -8.0590], [-10.6827, -6.3469, -8.3923]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, rtol=1e-4, atol=1e-4)
expected_shape_boxes = torch.Size((1, model.config.num_queries, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
expected_slice_boxes = torch.tensor(
[[0.7733, 0.6576, 0.4496], [0.5171, 0.1184, 0.9094], [0.8846, 0.5647, 0.2486]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
# verify postprocessing
results = image_processor.post_process_object_detection(
@@ -612,6 +612,6 @@ def test_inference_object_detection_head(self):
expected_slice_boxes = torch.tensor([38.3089, 72.1022, 177.6293, 118.4512]).to(torch_device)
self.assertEqual(len(results["scores"]), 5)
- self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4))
+ torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-4, atol=1e-4)
self.assertSequenceEqual(results["labels"].tolist(), expected_labels)
- self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes))
+ torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes)
diff --git a/tests/models/convbert/test_modeling_convbert.py b/tests/models/convbert/test_modeling_convbert.py
index 84b50f572908..51daf2509c66 100644
--- a/tests/models/convbert/test_modeling_convbert.py
+++ b/tests/models/convbert/test_modeling_convbert.py
@@ -481,4 +481,4 @@ def test_inference_no_head(self):
[[[-0.0864, -0.4898, -0.3677], [0.1434, -0.2952, -0.7640], [-0.0112, -0.4432, -0.5432]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/convnext/test_modeling_convnext.py b/tests/models/convnext/test_modeling_convnext.py
index 7d7ba5c9b80e..1965a76fad11 100644
--- a/tests/models/convnext/test_modeling_convnext.py
+++ b/tests/models/convnext/test_modeling_convnext.py
@@ -288,7 +288,7 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([-0.0260, -0.4739, 0.1911]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
diff --git a/tests/models/convnextv2/test_modeling_convnextv2.py b/tests/models/convnextv2/test_modeling_convnextv2.py
index e5bb8e3d190a..18e7be96fbca 100644
--- a/tests/models/convnextv2/test_modeling_convnextv2.py
+++ b/tests/models/convnextv2/test_modeling_convnextv2.py
@@ -336,4 +336,4 @@ def test_inference_image_classification_head(self):
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([0.9996, 0.1966, -0.4386]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/cpmant/test_modeling_cpmant.py b/tests/models/cpmant/test_modeling_cpmant.py
index 404280428ef9..e796d850a8d0 100644
--- a/tests/models/cpmant/test_modeling_cpmant.py
+++ b/tests/models/cpmant/test_modeling_cpmant.py
@@ -185,7 +185,7 @@ def test_inference_masked_lm(self):
expected_slice = torch.tensor(
[[[6.1708, 5.9244, 1.0835], [6.5207, 6.2893, -11.3324], [-1.0107, -0.0576, -5.9577]]],
)
- self.assertTrue(torch.allclose(hidden_states[:, :3, :3], expected_slice, atol=1e-2))
+ torch.testing.assert_close(hidden_states[:, :3, :3], expected_slice, rtol=1e-2, atol=1e-2)
@require_torch
@@ -202,7 +202,7 @@ def test_inference_casual(self):
expected_slice = torch.tensor(
[[[-6.4267, -6.4083, -6.3958], [-5.8802, -5.9447, -5.7811], [-5.3896, -5.4820, -5.4295]]],
)
- self.assertTrue(torch.allclose(hidden_states[:, :3, :3], expected_slice, atol=1e-2))
+ torch.testing.assert_close(hidden_states[:, :3, :3], expected_slice, rtol=1e-2, atol=1e-2)
@tooslow
def test_simple_generation(self):
diff --git a/tests/models/cvt/test_modeling_cvt.py b/tests/models/cvt/test_modeling_cvt.py
index b07b8892957b..fe02a166562d 100644
--- a/tests/models/cvt/test_modeling_cvt.py
+++ b/tests/models/cvt/test_modeling_cvt.py
@@ -267,4 +267,4 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([0.9285, 0.9015, -0.3150]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/dac/test_feature_extraction_dac.py b/tests/models/dac/test_feature_extraction_dac.py
index 598a7c725ecc..1bc5439046aa 100644
--- a/tests/models/dac/test_feature_extraction_dac.py
+++ b/tests/models/dac/test_feature_extraction_dac.py
@@ -165,9 +165,9 @@ def test_integration(self):
feature_extractor = DacFeatureExtractor()
input_values = feature_extractor(input_audio, return_tensors="pt")["input_values"]
self.assertEqual(input_values.shape, (1, 1, 93696))
- self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-4))
+ torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)
audio_input_end = torch.tensor(input_audio[0][-30:], dtype=torch.float32)
- self.assertTrue(torch.allclose(input_values[0, 0, -46:-16], audio_input_end, atol=1e-4))
+ torch.testing.assert_close(input_values[0, 0, -46:-16], audio_input_end, rtol=1e-4, atol=1e-4)
# Ignore copy
@unittest.skip("The DAC model doesn't support stereo logic")
diff --git a/tests/models/dac/test_modeling_dac.py b/tests/models/dac/test_modeling_dac.py
index 62e22417966e..729e40463ef8 100644
--- a/tests/models/dac/test_modeling_dac.py
+++ b/tests/models/dac/test_modeling_dac.py
@@ -438,14 +438,14 @@ def test_integration_16khz(self):
encoder_outputs_mean = torch.tensor([v.float().mean().cpu().item() for v in encoder_outputs.to_tuple()])
# make sure audio encoded codes are correct
- self.assertTrue(torch.allclose(encoder_outputs_mean, expected_encoder_sums, atol=1e-3))
+ torch.testing.assert_close(encoder_outputs_mean, expected_encoder_sums, rtol=1e-3, atol=1e-3)
_, quantized_representation, _, _ = encoder_outputs.to_tuple()
input_values_dec = model.decode(quantized_representation)[0]
input_values_enc_dec = model(inputs["input_values"])[1]
# make sure forward and decode gives same result
- self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3))
+ torch.testing.assert_close(input_values_dec, input_values_enc_dec, rtol=1e-3, atol=1e-3)
arr = inputs["input_values"][0].cpu().numpy()
arr_enc_dec = input_values_enc_dec[0].cpu().numpy()
@@ -515,10 +515,10 @@ def test_integration_24khz(self):
input_values_from_codes = model.decode(audio_codes=encoder_outputs.audio_codes)[0]
# make sure decode from audio codes and quantized values give more or less the same results
- self.assertTrue(torch.allclose(input_values_from_codes, input_values_dec, atol=1e-5))
+ torch.testing.assert_close(input_values_from_codes, input_values_dec, rtol=1e-5, atol=1e-5)
# make sure forward and decode gives same result
- self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3))
+ torch.testing.assert_close(input_values_dec, input_values_enc_dec, rtol=1e-3, atol=1e-3)
arr = inputs["input_values"][0].cpu().numpy()
arr_enc_dec = input_values_enc_dec[0].cpu().numpy()
@@ -565,14 +565,14 @@ def test_integration_44khz(self):
encoder_outputs_mean = torch.tensor([v.float().mean().cpu().item() for v in encoder_outputs.to_tuple()])
# make sure audio encoded codes are correct
- self.assertTrue(torch.allclose(encoder_outputs_mean, expected_encoder_sums, atol=1e-3))
+ torch.testing.assert_close(encoder_outputs_mean, expected_encoder_sums, rtol=1e-3, atol=1e-3)
_, quantized_representation, _, _ = encoder_outputs.to_tuple()
input_values_dec = model.decode(quantized_representation)[0]
input_values_enc_dec = model(inputs["input_values"])[1]
# make sure forward and decode gives same result
- self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3))
+ torch.testing.assert_close(input_values_dec, input_values_enc_dec, rtol=1e-3, atol=1e-3)
arr = inputs["input_values"][0].cpu().numpy()
arr_enc_dec = input_values_enc_dec[0].cpu().numpy()
@@ -622,14 +622,14 @@ def test_integration_batch_16khz(self):
encoder_outputs_mean = torch.tensor([v.float().mean().item() for v in encoder_outputs.to_tuple()])
# make sure audio encoded codes are correct
- self.assertTrue(torch.allclose(encoder_outputs_mean, expected_encoder_sums, atol=1e-3))
+ torch.testing.assert_close(encoder_outputs_mean, expected_encoder_sums, rtol=1e-3, atol=1e-3)
_, quantized_representation, _, _ = encoder_outputs.to_tuple()
input_values_dec = model.decode(quantized_representation)[0]
input_values_enc_dec = model(inputs["input_values"])[1]
# make sure forward and decode gives same result
- self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3))
+ torch.testing.assert_close(input_values_dec, input_values_enc_dec, rtol=1e-3, atol=1e-3)
arr = inputs["input_values"].cpu().numpy()
arr_enc_dec = input_values_enc_dec.cpu().numpy()
@@ -679,14 +679,14 @@ def test_integration_batch_24khz(self):
encoder_outputs_mean = torch.tensor([v.float().mean().cpu().item() for v in encoder_outputs.to_tuple()])
# make sure audio encoded codes are correct
- self.assertTrue(torch.allclose(encoder_outputs_mean, expected_encoder_sums, atol=1e-3))
+ torch.testing.assert_close(encoder_outputs_mean, expected_encoder_sums, rtol=1e-3, atol=1e-3)
_, quantized_representation, _, _ = encoder_outputs.to_tuple()
input_values_dec = model.decode(quantized_representation)[0]
input_values_enc_dec = model(inputs["input_values"])[1]
# make sure forward and decode gives same result
- self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3))
+ torch.testing.assert_close(input_values_dec, input_values_enc_dec, rtol=1e-3, atol=1e-3)
arr = inputs["input_values"].cpu().numpy()
arr_enc_dec = input_values_enc_dec.cpu().numpy()
@@ -736,14 +736,14 @@ def test_integration_batch_44khz(self):
encoder_outputs_mean = torch.tensor([v.float().mean().cpu().item() for v in encoder_outputs.to_tuple()])
# make sure audio encoded codes are correct
- self.assertTrue(torch.allclose(encoder_outputs_mean, expected_encoder_sums, atol=1e-3))
+ torch.testing.assert_close(encoder_outputs_mean, expected_encoder_sums, rtol=1e-3, atol=1e-3)
_, quantized_representation, _, _ = encoder_outputs.to_tuple()
input_values_dec = model.decode(quantized_representation)[0]
input_values_enc_dec = model(inputs["input_values"])[1]
# make sure forward and decode gives same result
- self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3))
+ torch.testing.assert_close(input_values_dec, input_values_enc_dec, rtol=1e-3, atol=1e-3)
arr = inputs["input_values"].cpu().numpy()
arr_enc_dec = input_values_enc_dec.cpu().numpy()
diff --git a/tests/models/data2vec/test_modeling_data2vec_text.py b/tests/models/data2vec/test_modeling_data2vec_text.py
index 4e44d8c62068..45482febd167 100644
--- a/tests/models/data2vec/test_modeling_data2vec_text.py
+++ b/tests/models/data2vec/test_modeling_data2vec_text.py
@@ -527,7 +527,7 @@ def test_inference_masked_lm(self):
# compare the actual values for a slice.
expected_slice = torch.tensor([[[0.2328, 0.0000, 1.1710], [2.2525, 0.0000, 1.9937], [2.1280, 0.0000, 1.8691]]])
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_no_head(self):
@@ -541,4 +541,4 @@ def test_inference_no_head(self):
[[[0.1998, -0.0379, 0.0024], [-0.0971, -0.2214, -0.1798], [-0.0789, -0.2400, -0.1898]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/data2vec/test_modeling_data2vec_vision.py b/tests/models/data2vec/test_modeling_data2vec_vision.py
index 02276d905fa4..0a9d1fd1812c 100644
--- a/tests/models/data2vec/test_modeling_data2vec_vision.py
+++ b/tests/models/data2vec/test_modeling_data2vec_vision.py
@@ -548,7 +548,7 @@ def test_inference_image_classification_head_imagenet_1k(self):
expected_slice = torch.tensor([0.3277, -0.1395, 0.0911]).to(torch_device)
- self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
expected_top2 = [model.config.label2id[i] for i in ["remote control, remote", "tabby, tabby cat"]]
self.assertEqual(logits[0].topk(2).indices.cpu().tolist(), expected_top2)
diff --git a/tests/models/dbrx/test_modeling_dbrx.py b/tests/models/dbrx/test_modeling_dbrx.py
index dee93109da24..556887bda1a9 100644
--- a/tests/models/dbrx/test_modeling_dbrx.py
+++ b/tests/models/dbrx/test_modeling_dbrx.py
@@ -394,4 +394,4 @@ def test_tiny_model_logits(self):
]
]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/deberta/test_modeling_deberta.py b/tests/models/deberta/test_modeling_deberta.py
index 48d8cb67e34f..286dc940e080 100644
--- a/tests/models/deberta/test_modeling_deberta.py
+++ b/tests/models/deberta/test_modeling_deberta.py
@@ -310,4 +310,4 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]]
)
- self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4), f"{output[:, 1:4, 1:4]}")
+ torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/deberta_v2/test_modeling_deberta_v2.py b/tests/models/deberta_v2/test_modeling_deberta_v2.py
index ea26043248dd..02fd11ce4d5c 100644
--- a/tests/models/deberta_v2/test_modeling_deberta_v2.py
+++ b/tests/models/deberta_v2/test_modeling_deberta_v2.py
@@ -328,4 +328,4 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]]
)
- self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4), f"{output[:, 1:4, 1:4]}")
+ torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/decision_transformer/test_modeling_decision_transformer.py b/tests/models/decision_transformer/test_modeling_decision_transformer.py
index 0c95e6291c50..b1a4d0938f71 100644
--- a/tests/models/decision_transformer/test_modeling_decision_transformer.py
+++ b/tests/models/decision_transformer/test_modeling_decision_transformer.py
@@ -231,7 +231,7 @@ def test_autoregressive_prediction(self):
)
self.assertEqual(action_pred.shape, actions.shape)
- self.assertTrue(torch.allclose(action_pred[0, -1], expected_outputs[step], atol=1e-4))
+ torch.testing.assert_close(action_pred[0, -1], expected_outputs[step], rtol=1e-4, atol=1e-4)
state, reward, _, _ = ( # env.step(action)
torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32),
1.0,
diff --git a/tests/models/deformable_detr/test_image_processing_deformable_detr.py b/tests/models/deformable_detr/test_image_processing_deformable_detr.py
index 4a65f1b8d178..5a8825cc6c15 100644
--- a/tests/models/deformable_detr/test_image_processing_deformable_detr.py
+++ b/tests/models/deformable_detr/test_image_processing_deformable_detr.py
@@ -186,31 +186,31 @@ def test_call_pytorch_with_coco_detection_annotations(self):
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
- self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
- self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
+ torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
- self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
+ torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
- self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
+ torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
- self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
+ torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
- self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
+ torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1066])
- self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
+ torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
def test_call_pytorch_with_coco_panoptic_annotations(self):
@@ -233,35 +233,35 @@ def test_call_pytorch_with_coco_panoptic_annotations(self):
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
- self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
- self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
+ torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
- self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
+ torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
- self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
+ torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
- self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
+ torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify masks
expected_masks_sum = 822873
relative_error = torch.abs(encoding["labels"][0]["masks"].sum() - expected_masks_sum) / expected_masks_sum
self.assertTrue(relative_error < 1e-3)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
- self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
+ torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1066])
- self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
+ torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_detection_annotations with Detr->DeformableDetr
@@ -328,8 +328,8 @@ def test_batched_coco_detection_annotations(self):
[0.5790, 0.4115, 0.3430, 0.7161],
]
)
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066]))
@@ -380,8 +380,8 @@ def test_batched_coco_detection_annotations(self):
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_panoptic_annotations with Detr->DeformableDetr
def test_batched_coco_panoptic_annotations(self):
@@ -451,8 +451,8 @@ def test_batched_coco_panoptic_annotations(self):
[0.2997, 0.2994, 0.5994, 0.5987],
]
)
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066]))
@@ -504,8 +504,8 @@ def test_batched_coco_panoptic_annotations(self):
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_max_width_max_height_resizing_and_pad_strategy with Detr->DeformableDetr
def test_max_width_max_height_resizing_and_pad_strategy(self):
@@ -637,7 +637,7 @@ def test_fast_processor_equivalence_cpu_gpu_coco_detection_annotations(self):
)
)
# verify area
- self.assertTrue(torch.allclose(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu")))
+ torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu"))
# verify boxes
self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape)
self.assertTrue(
@@ -646,12 +646,12 @@ def test_fast_processor_equivalence_cpu_gpu_coco_detection_annotations(self):
)
)
# verify image_id
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu")
)
# verify is_crowd
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu")
)
# verify class_labels
self.assertTrue(
@@ -660,11 +660,11 @@ def test_fast_processor_equivalence_cpu_gpu_coco_detection_annotations(self):
)
)
# verify orig_size
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu")
)
# verify size
- self.assertTrue(torch.allclose(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu")))
+ torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu"))
@slow
@require_torch_gpu
@@ -701,7 +701,7 @@ def test_fast_processor_equivalence_cpu_gpu_coco_panoptic_annotations(self):
)
)
# verify area
- self.assertTrue(torch.allclose(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu")))
+ torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu"))
# verify boxes
self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape)
self.assertTrue(
@@ -710,12 +710,12 @@ def test_fast_processor_equivalence_cpu_gpu_coco_panoptic_annotations(self):
)
)
# verify image_id
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu")
)
# verify is_crowd
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu")
)
# verify class_labels
self.assertTrue(
@@ -729,8 +729,8 @@ def test_fast_processor_equivalence_cpu_gpu_coco_panoptic_annotations(self):
relative_error = torch.abs(masks_sum_cpu - masks_sum_gpu) / masks_sum_cpu
self.assertTrue(relative_error < 1e-3)
# verify orig_size
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu")
)
# verify size
- self.assertTrue(torch.allclose(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu")))
+ torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu"))
diff --git a/tests/models/deformable_detr/test_modeling_deformable_detr.py b/tests/models/deformable_detr/test_modeling_deformable_detr.py
index b77ffb6e7778..42f692864802 100644
--- a/tests/models/deformable_detr/test_modeling_deformable_detr.py
+++ b/tests/models/deformable_detr/test_modeling_deformable_detr.py
@@ -697,11 +697,11 @@ def test_inference_object_detection_head(self):
[[0.8693, 0.2289, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=1e-4, atol=1e-4)
expected_shape_boxes = torch.Size((1, model.config.num_queries, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=1e-4, atol=1e-4)
# verify postprocessing
results = image_processor.post_process_object_detection(
@@ -712,9 +712,9 @@ def test_inference_object_detection_head(self):
expected_slice_boxes = torch.tensor([16.5028, 52.8390, 318.2544, 470.7841]).to(torch_device)
self.assertEqual(len(results["scores"]), 5)
- self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4))
+ torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-4, atol=1e-4)
self.assertSequenceEqual(results["labels"].tolist(), expected_labels)
- self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes))
+ torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes)
def test_inference_object_detection_head_with_box_refine_two_stage(self):
model = DeformableDetrForObjectDetection.from_pretrained(
@@ -740,11 +740,11 @@ def test_inference_object_detection_head_with_box_refine_two_stage(self):
[[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=1e-4, atol=1e-4)
expected_shape_boxes = torch.Size((1, model.config.num_queries, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=1e-4, atol=1e-4)
@require_torch_accelerator
def test_inference_object_detection_head_equivalence_cpu_gpu(self):
diff --git a/tests/models/deit/test_modeling_deit.py b/tests/models/deit/test_modeling_deit.py
index 1b4ca6e206a9..1637b22e95ef 100644
--- a/tests/models/deit/test_modeling_deit.py
+++ b/tests/models/deit/test_modeling_deit.py
@@ -421,7 +421,7 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([-1.0266, 0.1912, -1.2861]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
diff --git a/tests/models/depth_anything/test_modeling_depth_anything.py b/tests/models/depth_anything/test_modeling_depth_anything.py
index 6e7b423e9ec3..91f958921740 100644
--- a/tests/models/depth_anything/test_modeling_depth_anything.py
+++ b/tests/models/depth_anything/test_modeling_depth_anything.py
@@ -267,7 +267,7 @@ def test_inference(self):
[[8.8223, 8.6483, 8.6216], [8.3332, 8.6047, 8.7545], [8.6547, 8.6885, 8.7472]],
).to(torch_device)
- self.assertTrue(torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-6))
+ torch.testing.assert_close(predicted_depth[0, :3, :3], expected_slice, rtol=1e-6, atol=1e-6)
# -- `metric` depth model --
image_processor = DPTImageProcessor.from_pretrained("depth-anything/depth-anything-V2-metric-indoor-small-hf")
@@ -290,7 +290,7 @@ def test_inference(self):
[[1.3349, 1.2947, 1.2802], [1.2794, 1.2338, 1.2901], [1.2630, 1.2219, 1.2478]],
).to(torch_device)
- self.assertTrue(torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_export(self):
for strict in [True, False]:
diff --git a/tests/models/detr/test_image_processing_detr.py b/tests/models/detr/test_image_processing_detr.py
index a0b469f2de92..2dc84fe5e019 100644
--- a/tests/models/detr/test_image_processing_detr.py
+++ b/tests/models/detr/test_image_processing_detr.py
@@ -248,31 +248,31 @@ def test_call_pytorch_with_coco_detection_annotations(self):
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
- self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
- self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
+ torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
- self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
+ torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
- self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
+ torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
- self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
+ torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
- self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
+ torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1066])
- self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
+ torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
def test_call_pytorch_with_coco_panoptic_annotations(self):
@@ -295,35 +295,35 @@ def test_call_pytorch_with_coco_panoptic_annotations(self):
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
- self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
- self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
+ torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
- self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
+ torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
- self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
+ torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
- self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
+ torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify masks
expected_masks_sum = 822873
relative_error = torch.abs(encoding["labels"][0]["masks"].sum() - expected_masks_sum) / expected_masks_sum
self.assertTrue(relative_error < 1e-3)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
- self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
+ torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1066])
- self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
+ torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
def test_batched_coco_detection_annotations(self):
@@ -389,8 +389,8 @@ def test_batched_coco_detection_annotations(self):
[0.5790, 0.4115, 0.3430, 0.7161],
]
)
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066]))
@@ -441,8 +441,8 @@ def test_batched_coco_detection_annotations(self):
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
def test_batched_coco_panoptic_annotations(self):
# prepare image, target and masks_path
@@ -511,8 +511,8 @@ def test_batched_coco_panoptic_annotations(self):
[0.2997, 0.2994, 0.5994, 0.5987],
]
)
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066]))
@@ -564,8 +564,8 @@ def test_batched_coco_panoptic_annotations(self):
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
def test_max_width_max_height_resizing_and_pad_strategy(self):
for image_processing_class in self.image_processor_list:
@@ -694,7 +694,7 @@ def test_fast_processor_equivalence_cpu_gpu_coco_detection_annotations(self):
)
)
# verify area
- self.assertTrue(torch.allclose(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu")))
+ torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu"))
# verify boxes
self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape)
self.assertTrue(
@@ -703,12 +703,12 @@ def test_fast_processor_equivalence_cpu_gpu_coco_detection_annotations(self):
)
)
# verify image_id
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu")
)
# verify is_crowd
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu")
)
# verify class_labels
self.assertTrue(
@@ -717,11 +717,11 @@ def test_fast_processor_equivalence_cpu_gpu_coco_detection_annotations(self):
)
)
# verify orig_size
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu")
)
# verify size
- self.assertTrue(torch.allclose(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu")))
+ torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu"))
@slow
@require_torch_gpu
@@ -756,7 +756,7 @@ def test_fast_processor_equivalence_cpu_gpu_coco_panoptic_annotations(self):
)
)
# verify area
- self.assertTrue(torch.allclose(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu")))
+ torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu"))
# verify boxes
self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape)
self.assertTrue(
@@ -765,12 +765,12 @@ def test_fast_processor_equivalence_cpu_gpu_coco_panoptic_annotations(self):
)
)
# verify image_id
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu")
)
# verify is_crowd
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu")
)
# verify class_labels
self.assertTrue(
@@ -784,8 +784,8 @@ def test_fast_processor_equivalence_cpu_gpu_coco_panoptic_annotations(self):
relative_error = torch.abs(masks_sum_cpu - masks_sum_gpu) / masks_sum_cpu
self.assertTrue(relative_error < 1e-3)
# verify orig_size
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu")
)
# verify size
- self.assertTrue(torch.allclose(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu")))
+ torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu"))
diff --git a/tests/models/detr/test_modeling_detr.py b/tests/models/detr/test_modeling_detr.py
index d1e36e32824d..1451eaeb80d8 100644
--- a/tests/models/detr/test_modeling_detr.py
+++ b/tests/models/detr/test_modeling_detr.py
@@ -588,7 +588,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[0.0616, -0.5146, -0.4032], [-0.7629, -0.4934, -1.7153], [-0.4768, -0.6403, -0.7826]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_object_detection_head(self):
model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50").to(torch_device)
@@ -608,14 +608,14 @@ def test_inference_object_detection_head(self):
expected_slice_logits = torch.tensor(
[[-19.1194, -0.0893, -11.0154], [-17.3640, -1.8035, -14.0219], [-20.0461, -0.5837, -11.1060]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, rtol=1e-4, atol=1e-4)
expected_shape_boxes = torch.Size((1, model.config.num_queries, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
expected_slice_boxes = torch.tensor(
[[0.4433, 0.5302, 0.8853], [0.5494, 0.2517, 0.0529], [0.4998, 0.5360, 0.9956]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
# verify postprocessing
results = image_processor.post_process_object_detection(
@@ -626,9 +626,9 @@ def test_inference_object_detection_head(self):
expected_slice_boxes = torch.tensor([40.1633, 70.8115, 175.5471, 117.9841]).to(torch_device)
self.assertEqual(len(results["scores"]), 5)
- self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4))
+ torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-4, atol=1e-4)
self.assertSequenceEqual(results["labels"].tolist(), expected_labels)
- self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes))
+ torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes)
def test_inference_panoptic_segmentation_head(self):
model = DetrForSegmentation.from_pretrained("facebook/detr-resnet-50-panoptic").to(torch_device)
@@ -648,21 +648,21 @@ def test_inference_panoptic_segmentation_head(self):
expected_slice_logits = torch.tensor(
[[-18.1565, -1.7568, -13.5029], [-16.8888, -1.4138, -14.1028], [-17.5709, -2.5080, -11.8654]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, rtol=1e-4, atol=1e-4)
expected_shape_boxes = torch.Size((1, model.config.num_queries, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
expected_slice_boxes = torch.tensor(
[[0.5344, 0.1789, 0.9285], [0.4420, 0.0572, 0.0875], [0.6630, 0.6887, 0.1017]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
expected_shape_masks = torch.Size((1, model.config.num_queries, 200, 267))
self.assertEqual(outputs.pred_masks.shape, expected_shape_masks)
expected_slice_masks = torch.tensor(
[[-7.7558, -10.8788, -11.9797], [-11.8881, -16.4329, -17.7451], [-14.7316, -19.7383, -20.3004]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.pred_masks[0, 0, :3, :3], expected_slice_masks, atol=1e-3))
+ torch.testing.assert_close(outputs.pred_masks[0, 0, :3, :3], expected_slice_masks, rtol=1e-3, atol=1e-3)
# verify postprocessing
results = image_processor.post_process_panoptic_segmentation(
@@ -681,7 +681,7 @@ def test_inference_panoptic_segmentation_head(self):
number_of_unique_segments, expected_number_of_segments + 1
) # we add 1 for the background class
self.assertTrue(results["segmentation"].shape, expected_shape)
- self.assertTrue(torch.allclose(results["segmentation"][:3, :3], expected_slice_segmentation, atol=1e-4))
+ torch.testing.assert_close(results["segmentation"][:3, :3], expected_slice_segmentation, rtol=1e-4, atol=1e-4)
self.assertTrue(len(results["segments_info"]), expected_number_of_segments)
self.assertDictEqual(results["segments_info"][0], expected_first_segment)
@@ -713,4 +713,4 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[0.0616, -0.5146, -0.4032], [-0.7629, -0.4934, -1.7153], [-0.4768, -0.6403, -0.7826]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/diffllama/test_modeling_diffllama.py b/tests/models/diffllama/test_modeling_diffllama.py
index 64dfb5b64955..da1fe24bda4f 100644
--- a/tests/models/diffllama/test_modeling_diffllama.py
+++ b/tests/models/diffllama/test_modeling_diffllama.py
@@ -420,7 +420,7 @@ def test_model_rope_scaling_from_config(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
diff --git a/tests/models/dinat/test_modeling_dinat.py b/tests/models/dinat/test_modeling_dinat.py
index 7cfb5846e071..27a3eafddc36 100644
--- a/tests/models/dinat/test_modeling_dinat.py
+++ b/tests/models/dinat/test_modeling_dinat.py
@@ -365,7 +365,7 @@ def test_inference_image_classification_head(self):
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.1545, -0.7667, 0.4642]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
diff --git a/tests/models/dinov2/test_modeling_dinov2.py b/tests/models/dinov2/test_modeling_dinov2.py
index 5caa3baec1a2..9d849373349c 100644
--- a/tests/models/dinov2/test_modeling_dinov2.py
+++ b/tests/models/dinov2/test_modeling_dinov2.py
@@ -330,7 +330,7 @@ def test_inference_no_head(self):
[[-2.1747, -0.4729, 1.0936], [-3.2780, -0.8269, -0.9210], [-2.9129, 1.1284, -0.7306]],
device=torch_device,
)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
diff --git a/tests/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py b/tests/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py
index 6aa62138e620..185492d6d405 100644
--- a/tests/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py
+++ b/tests/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py
@@ -355,7 +355,7 @@ def test_inference_no_head(self):
[[-0.4636, -1.4582, -0.0274], [-1.4738, -0.8858, 0.3002], [0.0714, -0.2407, -1.5940]],
device=torch_device,
)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
diff --git a/tests/models/distilbert/test_modeling_distilbert.py b/tests/models/distilbert/test_modeling_distilbert.py
index d4c51cea1257..367785b5e265 100644
--- a/tests/models/distilbert/test_modeling_distilbert.py
+++ b/tests/models/distilbert/test_modeling_distilbert.py
@@ -340,7 +340,7 @@ def test_flash_attn_2_inference_equivalence(self):
logits = model(dummy_input, output_hidden_states=True).hidden_states[-1]
logits_fa = model_fa(dummy_input, output_hidden_states=True).hidden_states[-1]
- self.assertTrue(torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2))
+ torch.testing.assert_close(logits_fa, logits, rtol=4e-2, atol=4e-2)
output_fa = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True)
logits_fa = output_fa.hidden_states[-1]
@@ -348,7 +348,7 @@ def test_flash_attn_2_inference_equivalence(self):
output = model(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True)
logits = output.hidden_states[-1]
- self.assertTrue(torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2))
+ torch.testing.assert_close(logits_fa[1:], logits[1:], rtol=4e-2, atol=4e-2)
# Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test.
@require_flash_attn
@@ -395,7 +395,7 @@ def test_flash_attn_2_inference_equivalence_right_padding(self):
logits = model(dummy_input, output_hidden_states=True).hidden_states[-1]
logits_fa = model_fa(dummy_input, output_hidden_states=True).hidden_states[-1]
- self.assertTrue(torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2))
+ torch.testing.assert_close(logits_fa, logits, rtol=4e-2, atol=4e-2)
output_fa = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True)
logits_fa = output_fa.hidden_states[-1]
@@ -403,7 +403,7 @@ def test_flash_attn_2_inference_equivalence_right_padding(self):
output = model(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True)
logits = output.hidden_states[-1]
- self.assertTrue(torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2))
+ torch.testing.assert_close(logits_fa[:-1], logits[:-1], rtol=4e-2, atol=4e-2)
@require_torch
@@ -421,7 +421,7 @@ def test_inference_no_head_absolute_embedding(self):
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]]
)
- self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_export(self):
diff --git a/tests/models/dit/test_modeling_dit.py b/tests/models/dit/test_modeling_dit.py
index 1804afdf8289..d0f9f79b3253 100644
--- a/tests/models/dit/test_modeling_dit.py
+++ b/tests/models/dit/test_modeling_dit.py
@@ -58,4 +58,4 @@ def test_for_image_classification(self):
device=torch_device,
dtype=torch.float,
)
- self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/dpr/test_modeling_dpr.py b/tests/models/dpr/test_modeling_dpr.py
index 7a41820f2d8e..3f05bc242fdb 100644
--- a/tests/models/dpr/test_modeling_dpr.py
+++ b/tests/models/dpr/test_modeling_dpr.py
@@ -272,7 +272,7 @@ def test_inference_no_head(self):
dtype=torch.float,
device=torch_device,
)
- self.assertTrue(torch.allclose(output[:, :10], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :10], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_reader_inference(self):
@@ -303,5 +303,5 @@ def test_reader_inference(self):
dtype=torch.float,
device=torch_device,
)
- self.assertTrue(torch.allclose(outputs.start_logits[:, :10], expected_start_logits, atol=1e-4))
- self.assertTrue(torch.allclose(outputs.end_logits[:, :10], expected_end_logits, atol=1e-4))
+ torch.testing.assert_close(outputs.start_logits[:, :10], expected_start_logits, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(outputs.end_logits[:, :10], expected_end_logits, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/dpt/test_image_processing_dpt.py b/tests/models/dpt/test_image_processing_dpt.py
index f68e9bb6130a..713c722a4c2b 100644
--- a/tests/models/dpt/test_image_processing_dpt.py
+++ b/tests/models/dpt/test_image_processing_dpt.py
@@ -17,14 +17,20 @@
import unittest
import numpy as np
+from datasets import load_dataset
-from transformers.file_utils import is_vision_available
+from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
+if is_torch_available():
+ import torch
+
if is_vision_available():
+ from PIL import Image
+
from transformers import DPTImageProcessor
@@ -42,6 +48,7 @@ def __init__(
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
+ do_reduce_labels=False,
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
@@ -56,6 +63,7 @@ def __init__(
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
+ self.do_reduce_labels = do_reduce_labels
def prepare_image_processor_dict(self):
return {
@@ -64,6 +72,7 @@ def prepare_image_processor_dict(self):
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
+ "do_reduce_labels": self.do_reduce_labels,
}
def expected_output_image_shape(self, images):
@@ -81,6 +90,28 @@ def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=F
)
+# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_single_inputs
+def prepare_semantic_single_inputs():
+ dataset = load_dataset("hf-internal-testing/fixtures_ade20k", split="test", trust_remote_code=True)
+
+ image = Image.open(dataset[0]["file"])
+ map = Image.open(dataset[1]["file"])
+
+ return image, map
+
+
+# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_batch_inputs
+def prepare_semantic_batch_inputs():
+ ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test", trust_remote_code=True)
+
+ image1 = Image.open(ds[0]["file"])
+ map1 = Image.open(ds[1]["file"])
+ image2 = Image.open(ds[2]["file"])
+ map2 = Image.open(ds[3]["file"])
+
+ return [image1, image2], [map1, map2]
+
+
@require_torch
@require_vision
class DPTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
@@ -105,6 +136,7 @@ def test_image_processor_properties(self):
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "do_pad"))
self.assertTrue(hasattr(image_processing, "size_divisor"))
+ self.assertTrue(hasattr(image_processing, "do_reduce_labels"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
@@ -138,3 +170,126 @@ def test_keep_aspect_ratio(self):
pixel_values = image_processor(image, return_tensors="pt").pixel_values
self.assertEqual(list(pixel_values.shape), [1, 3, 512, 672])
+
+ # Copied from transformers.tests.models.beit.test_image_processing_beit.BeitImageProcessingTest.test_call_segmentation_maps
+ def test_call_segmentation_maps(self):
+ # Initialize image_processor
+ image_processor = self.image_processing_class(**self.image_processor_dict)
+ # create random PyTorch tensors
+ image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
+ maps = []
+ for image in image_inputs:
+ self.assertIsInstance(image, torch.Tensor)
+ maps.append(torch.zeros(image.shape[-2:]).long())
+
+ # Test not batched input
+ encoding = image_processor(image_inputs[0], maps[0], return_tensors="pt")
+ self.assertEqual(
+ encoding["pixel_values"].shape,
+ (
+ 1,
+ self.image_processor_tester.num_channels,
+ self.image_processor_tester.size["height"],
+ self.image_processor_tester.size["width"],
+ ),
+ )
+ self.assertEqual(
+ encoding["labels"].shape,
+ (
+ 1,
+ self.image_processor_tester.size["height"],
+ self.image_processor_tester.size["width"],
+ ),
+ )
+ self.assertEqual(encoding["labels"].dtype, torch.long)
+ self.assertTrue(encoding["labels"].min().item() >= 0)
+ self.assertTrue(encoding["labels"].max().item() <= 255)
+
+ # Test batched
+ encoding = image_processor(image_inputs, maps, return_tensors="pt")
+ self.assertEqual(
+ encoding["pixel_values"].shape,
+ (
+ self.image_processor_tester.batch_size,
+ self.image_processor_tester.num_channels,
+ self.image_processor_tester.size["height"],
+ self.image_processor_tester.size["width"],
+ ),
+ )
+ self.assertEqual(
+ encoding["labels"].shape,
+ (
+ self.image_processor_tester.batch_size,
+ self.image_processor_tester.size["height"],
+ self.image_processor_tester.size["width"],
+ ),
+ )
+ self.assertEqual(encoding["labels"].dtype, torch.long)
+ self.assertTrue(encoding["labels"].min().item() >= 0)
+ self.assertTrue(encoding["labels"].max().item() <= 255)
+
+ # Test not batched input (PIL images)
+ image, segmentation_map = prepare_semantic_single_inputs()
+
+ encoding = image_processor(image, segmentation_map, return_tensors="pt")
+ self.assertEqual(
+ encoding["pixel_values"].shape,
+ (
+ 1,
+ self.image_processor_tester.num_channels,
+ self.image_processor_tester.size["height"],
+ self.image_processor_tester.size["width"],
+ ),
+ )
+ self.assertEqual(
+ encoding["labels"].shape,
+ (
+ 1,
+ self.image_processor_tester.size["height"],
+ self.image_processor_tester.size["width"],
+ ),
+ )
+ self.assertEqual(encoding["labels"].dtype, torch.long)
+ self.assertTrue(encoding["labels"].min().item() >= 0)
+ self.assertTrue(encoding["labels"].max().item() <= 255)
+
+ # Test batched input (PIL images)
+ images, segmentation_maps = prepare_semantic_batch_inputs()
+
+ encoding = image_processor(images, segmentation_maps, return_tensors="pt")
+ self.assertEqual(
+ encoding["pixel_values"].shape,
+ (
+ 2,
+ self.image_processor_tester.num_channels,
+ self.image_processor_tester.size["height"],
+ self.image_processor_tester.size["width"],
+ ),
+ )
+ self.assertEqual(
+ encoding["labels"].shape,
+ (
+ 2,
+ self.image_processor_tester.size["height"],
+ self.image_processor_tester.size["width"],
+ ),
+ )
+ self.assertEqual(encoding["labels"].dtype, torch.long)
+ self.assertTrue(encoding["labels"].min().item() >= 0)
+ self.assertTrue(encoding["labels"].max().item() <= 255)
+
+ # Copied from transformers.tests.models.beit.test_image_processing_beit.BeitImageProcessingTest.test_reduce_labels
+ def test_reduce_labels(self):
+ # Initialize image_processor
+ image_processor = self.image_processing_class(**self.image_processor_dict)
+
+ # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
+ image, map = prepare_semantic_single_inputs()
+ encoding = image_processor(image, map, return_tensors="pt")
+ self.assertTrue(encoding["labels"].min().item() >= 0)
+ self.assertTrue(encoding["labels"].max().item() <= 150)
+
+ image_processor.do_reduce_labels = True
+ encoding = image_processor(image, map, return_tensors="pt")
+ self.assertTrue(encoding["labels"].min().item() >= 0)
+ self.assertTrue(encoding["labels"].max().item() <= 255)
diff --git a/tests/models/dpt/test_modeling_dpt.py b/tests/models/dpt/test_modeling_dpt.py
index 7f841fbb2efc..c00b810cfe31 100644
--- a/tests/models/dpt/test_modeling_dpt.py
+++ b/tests/models/dpt/test_modeling_dpt.py
@@ -342,7 +342,7 @@ def test_inference_depth_estimation(self):
[[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_semantic_segmentation(self):
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade")
@@ -363,7 +363,7 @@ def test_inference_semantic_segmentation(self):
[[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_post_processing_semantic_segmentation(self):
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade")
@@ -410,7 +410,7 @@ def test_post_processing_depth_estimation(self):
predicted_depth.unsqueeze(0).unsqueeze(1), size=(500, 500), mode="bicubic", align_corners=False
).squeeze()
self.assertTrue(output_enlarged.shape == expected_shape)
- self.assertTrue(torch.allclose(predicted_depth_l, output_enlarged, rtol=1e-3))
+ torch.testing.assert_close(predicted_depth_l, output_enlarged, rtol=1e-3)
def test_export(self):
for strict in [True, False]:
@@ -431,4 +431,4 @@ def test_export(self):
eager_outputs = model(**inputs)
exported_outputs = exported_program.module().forward(inputs["pixel_values"])
self.assertEqual(eager_outputs.logits.shape, exported_outputs.logits.shape)
- self.assertTrue(torch.allclose(eager_outputs.logits, exported_outputs.logits, atol=1e-4))
+ torch.testing.assert_close(eager_outputs.logits, exported_outputs.logits, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/dpt/test_modeling_dpt_auto_backbone.py b/tests/models/dpt/test_modeling_dpt_auto_backbone.py
index ff6dd04528db..6b30ed323d4c 100644
--- a/tests/models/dpt/test_modeling_dpt_auto_backbone.py
+++ b/tests/models/dpt/test_modeling_dpt_auto_backbone.py
@@ -277,7 +277,7 @@ def test_inference_depth_estimation_dinov2(self):
[[6.0336, 7.1502, 7.4130], [6.8977, 7.2383, 7.2268], [7.9180, 8.0525, 8.0134]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_depth_estimation_beit(self):
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-beit-base-384")
@@ -299,7 +299,7 @@ def test_inference_depth_estimation_beit(self):
[[2669.7061, 2663.7144, 2674.9399], [2633.9326, 2650.9092, 2665.4270], [2621.8271, 2632.0129, 2637.2290]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_depth_estimation_swinv2(self):
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
@@ -321,4 +321,4 @@ def test_inference_depth_estimation_swinv2(self):
[[1032.7719, 1025.1886, 1030.2661], [1023.7619, 1021.0075, 1024.9121], [1022.5667, 1018.8522, 1021.4145]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/dpt/test_modeling_dpt_hybrid.py b/tests/models/dpt/test_modeling_dpt_hybrid.py
index dbdb5aa9e976..1229f3e40faf 100644
--- a/tests/models/dpt/test_modeling_dpt_hybrid.py
+++ b/tests/models/dpt/test_modeling_dpt_hybrid.py
@@ -335,4 +335,4 @@ def test_inference_depth_estimation(self):
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100, expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.predicted_depth[:3, :3, :3] / 100, expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/efficientnet/test_modeling_efficientnet.py b/tests/models/efficientnet/test_modeling_efficientnet.py
index 4162e1891409..796c5a149a20 100644
--- a/tests/models/efficientnet/test_modeling_efficientnet.py
+++ b/tests/models/efficientnet/test_modeling_efficientnet.py
@@ -259,4 +259,4 @@ def test_inference_image_classification_head(self):
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.2962, 0.4487, 0.4499]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/electra/test_modeling_electra.py b/tests/models/electra/test_modeling_electra.py
index f6cab7107790..e2aa0d41f219 100644
--- a/tests/models/electra/test_modeling_electra.py
+++ b/tests/models/electra/test_modeling_electra.py
@@ -485,4 +485,4 @@ def test_inference_no_head_absolute_embedding(self):
[[[0.4471, 0.6821, -0.3265], [0.4627, 0.5255, -0.3668], [0.4532, 0.3313, -0.4344]]]
)
- self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/emu3/test_modeling_emu3.py b/tests/models/emu3/test_modeling_emu3.py
index 79a51742ee67..c0e84f5c5e44 100644
--- a/tests/models/emu3/test_modeling_emu3.py
+++ b/tests/models/emu3/test_modeling_emu3.py
@@ -167,7 +167,7 @@ def test_model_rope_scaling(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
@@ -368,7 +368,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
@unittest.skip(
"Emu3 has a VQ module that uses `weight.data` directly in forward which prevent offloding on that module"
diff --git a/tests/models/encodec/test_feature_extraction_encodec.py b/tests/models/encodec/test_feature_extraction_encodec.py
index 112f1022c00e..d809a90f120f 100644
--- a/tests/models/encodec/test_feature_extraction_encodec.py
+++ b/tests/models/encodec/test_feature_extraction_encodec.py
@@ -159,7 +159,7 @@ def test_integration(self):
feature_extractor = EncodecFeatureExtractor()
input_values = feature_extractor(input_audio, return_tensors="pt").input_values
self.assertEqual(input_values.shape, (1, 1, 93680))
- self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-6))
+ torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6)
def test_integration_stereo(self):
# fmt: off
@@ -178,8 +178,8 @@ def test_integration_stereo(self):
feature_extractor = EncodecFeatureExtractor(feature_size=2)
input_values = feature_extractor(input_audio, return_tensors="pt").input_values
self.assertEqual(input_values.shape, (1, 2, 93680))
- self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-6))
- self.assertTrue(torch.allclose(input_values[0, 1, :30], EXPECTED_INPUT_VALUES * 0.5, atol=1e-6))
+ torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6)
+ torch.testing.assert_close(input_values[0, 1, :30], EXPECTED_INPUT_VALUES * 0.5, rtol=1e-6, atol=1e-6)
def test_truncation_and_padding(self):
input_audio = self._load_datasamples(2)
diff --git a/tests/models/encodec/test_modeling_encodec.py b/tests/models/encodec/test_modeling_encodec.py
index 47931cbe45c0..2d5eca4b83ae 100644
--- a/tests/models/encodec/test_modeling_encodec.py
+++ b/tests/models/encodec/test_modeling_encodec.py
@@ -324,7 +324,7 @@ def test_feed_forward_chunking(self):
inputs = self._prepare_for_class(inputs_dict, model_class)
inputs["input_values"] = inputs["input_values"].repeat(1, 1, 10)
- hidden_states_no_chunk = model(**inputs)[0]
+ hidden_states_no_chunk = model(**inputs)[1]
torch.manual_seed(0)
config.chunk_length_s = 1
@@ -335,8 +335,8 @@ def test_feed_forward_chunking(self):
model.to(torch_device)
model.eval()
- hidden_states_with_chunk = model(**inputs)[0]
- self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3))
+ hidden_states_with_chunk = model(**inputs)[1]
+ torch.testing.assert_close(hidden_states_no_chunk, hidden_states_with_chunk, rtol=1e-1, atol=1e-2)
@unittest.skip(
reason="The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic"
@@ -507,7 +507,7 @@ def test_integration_24kHz(self):
)[-1]
# make sure forward and decode gives same result
- self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3))
+ torch.testing.assert_close(input_values_dec, input_values_enc_dec, rtol=1e-3, atol=1e-3)
# make sure shape matches
self.assertTrue(inputs["input_values"].shape == input_values_enc_dec.shape)
@@ -563,7 +563,7 @@ def test_integration_48kHz(self):
)[-1]
# make sure forward and decode gives same result
- self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3))
+ torch.testing.assert_close(input_values_dec, input_values_enc_dec, rtol=1e-3, atol=1e-3)
# make sure shape matches
self.assertTrue(inputs["input_values"].shape == input_values_enc_dec.shape)
@@ -622,7 +622,7 @@ def test_batch_48kHz(self):
input_values_enc_dec = model(input_values, bandwidth=float(bandwidth))[-1]
# make sure forward and decode gives same result
- self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec, atol=1e-3))
+ torch.testing.assert_close(input_values_dec, input_values_enc_dec, rtol=1e-3, atol=1e-3)
# make sure shape matches
self.assertTrue(input_values.shape == input_values_enc_dec.shape)
diff --git a/tests/models/esm/test_modeling_esm.py b/tests/models/esm/test_modeling_esm.py
index 56a7e4d0c67f..1bffcca22211 100644
--- a/tests/models/esm/test_modeling_esm.py
+++ b/tests/models/esm/test_modeling_esm.py
@@ -317,7 +317,7 @@ def test_inference_masked_lm(self):
expected_slice = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_no_head(self):
with torch.no_grad():
@@ -330,7 +330,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_bitsandbytes
def test_inference_bitsandbytes(self):
diff --git a/tests/models/esm/test_modeling_esmfold.py b/tests/models/esm/test_modeling_esmfold.py
index 7c461bdc466e..ada6b773b589 100644
--- a/tests/models/esm/test_modeling_esmfold.py
+++ b/tests/models/esm/test_modeling_esmfold.py
@@ -282,4 +282,4 @@ def test_inference_protein_folding(self):
input_ids = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
position_outputs = model(input_ids)["positions"]
expected_slice = torch.tensor([2.5828, 0.7993, -10.9334], dtype=torch.float32)
- self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], expected_slice, atol=1e-4))
+ torch.testing.assert_close(position_outputs[0, 0, 0, 0], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/falcon/test_modeling_falcon.py b/tests/models/falcon/test_modeling_falcon.py
index 3ad46a92bc09..b92b4782998c 100644
--- a/tests/models/falcon/test_modeling_falcon.py
+++ b/tests/models/falcon/test_modeling_falcon.py
@@ -446,7 +446,7 @@ def test_model_rope_scaling_from_config(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
@@ -628,4 +628,4 @@ def test_falcon_alibi_sdpa_matches_eager(self):
falcon_output_eager = falcon(input_ids, output_attentions=True)[0]
falcon_output_sdpa = falcon(input_ids)[0]
- self.assertTrue(torch.allclose(falcon_output_eager, falcon_output_sdpa, atol=1e-3))
+ torch.testing.assert_close(falcon_output_eager, falcon_output_sdpa, rtol=1e-3, atol=1e-3)
diff --git a/tests/models/falcon_mamba/test_modeling_falcon_mamba.py b/tests/models/falcon_mamba/test_modeling_falcon_mamba.py
index eb1205db9cc1..75835da1a468 100644
--- a/tests/models/falcon_mamba/test_modeling_falcon_mamba.py
+++ b/tests/models/falcon_mamba/test_modeling_falcon_mamba.py
@@ -354,11 +354,12 @@ def test_initialization(self):
self.assertTrue(param.data.min().item() >= inv_dt[0])
elif "A_log" in name:
A = torch.arange(1, config.state_size + 1, dtype=torch.float32)[None, :]
- self.assertTrue(torch.allclose(param.data, torch.log(A), atol=1e-5, rtol=1e-5))
+ A = A.expand(config.intermediate_size, -1).contiguous()
+ torch.testing.assert_close(param.data, torch.log(A), rtol=1e-5, atol=1e-5)
elif "D" in name:
if param.requires_grad:
# check if it's a ones like
- self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5))
+ torch.testing.assert_close(param.data, torch.ones_like(param.data), rtol=1e-5, atol=1e-5)
@slow
# Ignore copy
diff --git a/tests/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py b/tests/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py
index 5191105bc2a1..cc413b94a63e 100644
--- a/tests/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py
+++ b/tests/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py
@@ -390,7 +390,7 @@ def test_inference_integration(self):
)
# fmt: on
- self.assertTrue(torch.allclose(spectrogram[0, :10, :10], expected_mel_spectrogram, atol=1e-4))
+ torch.testing.assert_close(spectrogram[0, :10, :10], expected_mel_spectrogram, rtol=1e-4, atol=1e-4)
self.assertEqual(spectrogram.shape, (1, 205, model.config.num_mel_bins))
def test_training_integration(self):
@@ -447,8 +447,8 @@ def test_training_integration(self):
expected_loss = torch.tensor(74.4595, device=torch_device)
- self.assertTrue(torch.allclose(spectrogram[0, :10, :10], expected_mel_spectrogram, atol=1e-3))
- self.assertTrue(torch.allclose(loss, expected_loss, atol=1e-4))
+ torch.testing.assert_close(spectrogram[0, :10, :10], expected_mel_spectrogram, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(loss, expected_loss, rtol=1e-4, atol=1e-4)
self.assertEqual(spectrogram.shape, (1, 224, model.config.num_mel_bins))
@@ -803,5 +803,5 @@ def test_inference_integration(self):
)
# fmt: on
- self.assertTrue(torch.allclose(waveform[0, :100], expected_waveform, atol=1e-4))
+ torch.testing.assert_close(waveform[0, :100], expected_waveform, rtol=1e-4, atol=1e-4)
self.assertEqual(waveform.shape, (1, 52480))
diff --git a/tests/models/flaubert/test_modeling_flaubert.py b/tests/models/flaubert/test_modeling_flaubert.py
index 396d02da956b..2ba0b509e47e 100644
--- a/tests/models/flaubert/test_modeling_flaubert.py
+++ b/tests/models/flaubert/test_modeling_flaubert.py
@@ -514,4 +514,4 @@ def test_inference_no_head_absolute_embedding(self):
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/flava/test_modeling_flava.py b/tests/models/flava/test_modeling_flava.py
index 1c35fd705ccd..e4949c54ca21 100644
--- a/tests/models/flava/test_modeling_flava.py
+++ b/tests/models/flava/test_modeling_flava.py
@@ -1346,7 +1346,7 @@ def test_inference(self):
)
expected_logits = torch.tensor([[16.1291, 8.4033], [16.1291, 8.4033]], device=torch_device)
- self.assertTrue(torch.allclose(outputs.contrastive_logits_per_image, expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.contrastive_logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
self.assertAlmostEqual(outputs.loss_info.mmm_text.item(), 2.0727925, places=4)
self.assertAlmostEqual(outputs.loss_info.mmm_image.item(), 7.0282096, places=4)
self.assertAlmostEqual(outputs.loss.item(), 11.3792324, places=4)
@@ -1397,7 +1397,7 @@ def test_inference_with_itm_labels(self):
)
expected_logits = torch.tensor([[16.1291, 8.4033], [16.1291, 8.4033]], device=torch_device)
- self.assertTrue(torch.allclose(outputs.contrastive_logits_per_image, expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.contrastive_logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
self.assertAlmostEqual(outputs.loss_info.mmm_text.item(), 2.0727925, places=4)
self.assertAlmostEqual(outputs.loss_info.mmm_image.item(), 6.8965902, places=4)
self.assertAlmostEqual(outputs.loss.item(), 9.6084213, places=4)
diff --git a/tests/models/fnet/test_modeling_fnet.py b/tests/models/fnet/test_modeling_fnet.py
index db323500bd1d..26eec0f29908 100644
--- a/tests/models/fnet/test_modeling_fnet.py
+++ b/tests/models/fnet/test_modeling_fnet.py
@@ -550,7 +550,7 @@ def test_inference_for_masked_lm(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
@require_tokenizers
@@ -592,7 +592,7 @@ def test_inference_for_next_sentence_prediction(self):
expected_slice = torch.tensor([[-0.2234, -0.0226]], device=torch_device)
- self.assertTrue(torch.allclose(output, expected_slice, atol=1e-4))
+ torch.testing.assert_close(output, expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_model(self):
@@ -610,4 +610,4 @@ def test_inference_model(self):
[[[4.1541, -0.1051, -0.1667], [-0.9144, 0.2939, -0.0086], [-0.8472, -0.7281, 0.0256]]], device=torch_device
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/focalnet/test_modeling_focalnet.py b/tests/models/focalnet/test_modeling_focalnet.py
index 48a33fd96e52..2d3d8b6f3ac5 100644
--- a/tests/models/focalnet/test_modeling_focalnet.py
+++ b/tests/models/focalnet/test_modeling_focalnet.py
@@ -426,7 +426,7 @@ def test_inference_image_classification_head(self):
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([0.2166, -0.4368, 0.2191]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
self.assertTrue(outputs.logits.argmax(dim=-1).item(), 281)
diff --git a/tests/models/fsmt/test_modeling_fsmt.py b/tests/models/fsmt/test_modeling_fsmt.py
index af95e0dca895..0d7f4d0cab72 100644
--- a/tests/models/fsmt/test_modeling_fsmt.py
+++ b/tests/models/fsmt/test_modeling_fsmt.py
@@ -513,7 +513,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[-1.5753, -1.5753, 2.8975], [-0.9540, -0.9540, 1.0299], [-3.3131, -3.3131, 0.5219]]
).to(torch_device)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def translation_setup(self, pair):
text = {
@@ -608,6 +608,6 @@ def test_positional_emb_weights_against_marian(self):
)
no_cache_pad_zero = emb1(input_ids)[0]
# XXX: only the 1st line matches the 3rd
- self.assertTrue(
- torch.allclose(torch.tensor(desired_weights, device=torch_device), no_cache_pad_zero[:3, :5], atol=1e-3)
+ torch.testing.assert_close(
+ torch.tensor(desired_weights, device=torch_device), no_cache_pad_zero[:3, :5], rtol=1e-3, atol=1e-3
)
diff --git a/tests/models/funnel/test_modeling_funnel.py b/tests/models/funnel/test_modeling_funnel.py
index e46e5dc58de6..6bd082686100 100644
--- a/tests/models/funnel/test_modeling_funnel.py
+++ b/tests/models/funnel/test_modeling_funnel.py
@@ -501,16 +501,16 @@ def test_inference_tiny_model(self):
expected_output_sum = torch.tensor(2344.8352)
expected_output_mean = torch.tensor(0.8052)
- self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4))
- self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4))
+ torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4)
attention_mask = torch.tensor([[1] * 7, [1] * 4 + [0] * 3] * 6 + [[0, 1, 1, 0, 0, 1, 1]])
output = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)[0].abs()
expected_output_sum = torch.tensor(2343.8425)
expected_output_mean = torch.tensor(0.8049)
- self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4))
- self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4))
+ torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4)
@slow
def test_inference_model(self):
@@ -521,5 +521,5 @@ def test_inference_model(self):
expected_output_sum = torch.tensor(235.7246)
expected_output_mean = torch.tensor(0.0256)
- self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4))
- self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4))
+ torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/git/test_modeling_git.py b/tests/models/git/test_modeling_git.py
index ccfb41459caf..ff9c086bdb2b 100644
--- a/tests/models/git/test_modeling_git.py
+++ b/tests/models/git/test_modeling_git.py
@@ -555,7 +555,7 @@ def test_forward_pass(self):
[[-0.9514, -0.9512, -0.9507], [-0.5454, -0.5453, -0.5453], [-0.8862, -0.8857, -0.8848]],
device=torch_device,
)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_image_captioning(self):
processor = GitProcessor.from_pretrained("microsoft/git-base")
@@ -576,7 +576,7 @@ def test_inference_image_captioning(self):
self.assertEqual(generated_caption, "two cats laying on a pink blanket")
self.assertTrue(outputs.scores[-1].shape, expected_shape)
expected_slice = torch.tensor([[-0.8805, -0.8803, -0.8799]], device=torch_device)
- self.assertTrue(torch.allclose(outputs.scores[-1][0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.scores[-1][0, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_visual_question_answering(self):
processor = GitProcessor.from_pretrained("microsoft/git-base-textvqa")
@@ -653,4 +653,4 @@ def test_inference_interpolate_pos_encoding(self):
[[-1.0296, 2.5960, 0.8703], [1.7027, 1.3302, -0.4543], [-1.4932, -0.1084, 0.0502]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/glpn/test_modeling_glpn.py b/tests/models/glpn/test_modeling_glpn.py
index 81e95ab244f9..f6bd7b146c37 100644
--- a/tests/models/glpn/test_modeling_glpn.py
+++ b/tests/models/glpn/test_modeling_glpn.py
@@ -342,4 +342,4 @@ def test_inference_depth_estimation(self):
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py b/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py
index 281594492500..1ac2db408123 100644
--- a/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py
+++ b/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py
@@ -589,4 +589,4 @@ def test_mqa_reduces_to_mha(self, seed, is_train_mode=True):
attention_mqa_result = attention_mqa(hidden_states)[0]
# CHECK THAT ALL OUTPUTS ARE THE SAME
- self.assertTrue(torch.allclose(attention_mha_result, attention_mqa_result, atol=1e-5))
+ torch.testing.assert_close(attention_mha_result, attention_mqa_result, rtol=1e-5, atol=1e-5)
diff --git a/tests/models/gpt_neox/test_modeling_gpt_neox.py b/tests/models/gpt_neox/test_modeling_gpt_neox.py
index 6d5e081d50b1..97403cb8e5cb 100644
--- a/tests/models/gpt_neox/test_modeling_gpt_neox.py
+++ b/tests/models/gpt_neox/test_modeling_gpt_neox.py
@@ -359,7 +359,7 @@ def test_model_rope_scaling_from_config(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
@@ -472,4 +472,4 @@ def pythia_integration_test(self):
# alternative: tokenizer('<|im_start|>system\nA chat between')
input_ids = torch.as_tensor(input_ids)[None].to(torch_device)
outputs = model(input_ids)["logits"][:, -1][0, :30]
- self.assertTrue(torch.allclose(EXPECTED_LOGITS, outputs, atol=1e-5))
+ torch.testing.assert_close(EXPECTED_LOGITS, outputs, rtol=1e-5, atol=1e-5)
diff --git a/tests/models/granite/test_modeling_granite.py b/tests/models/granite/test_modeling_granite.py
index 686544825c35..a7ef3024de6b 100644
--- a/tests/models/granite/test_modeling_granite.py
+++ b/tests/models/granite/test_modeling_granite.py
@@ -343,7 +343,7 @@ def test_model_rope_scaling_from_config(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
@@ -444,7 +444,7 @@ def test_model_3b_logits_bf16(self):
# fmt: off
EXPECTED_MEAN = torch.tensor([[-1.9798, -3.1626, -2.8062, -2.3777, -2.7091, -2.2338, -2.5924, -2.3974]])
- self.assertTrue(torch.allclose(EXPECTED_MEAN.to(torch_device), out.logits.mean(-1), atol=1e-2, rtol=1e-2))
+ torch.testing.assert_close(EXPECTED_MEAN.to(torch_device), out.logits.mean(-1), rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:15]
EXPECTED_SLICE = torch.tensor([[4.8750, -2.1875, -2.1875, -2.1875, -2.1875, -2.8438, -2.1875, -2.1875,
@@ -474,4 +474,4 @@ def test_model_3b_logits(self):
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-2.0984, -3.1294, -2.8153, -2.3568, -2.7337, -2.2624, -2.6016, -2.4022]])
- self.assertTrue(torch.allclose(EXPECTED_MEAN.to(torch_device), out.logits.float().mean(-1), atol=1e-2, rtol=1e-2))
+ torch.testing.assert_close(EXPECTED_MEAN.to(torch_device), out.logits.float().mean(-1), rtol=1e-2, atol=1e-2)
diff --git a/tests/models/granitemoe/test_modeling_granitemoe.py b/tests/models/granitemoe/test_modeling_granitemoe.py
index 31307865a77d..9e7b7c944214 100644
--- a/tests/models/granitemoe/test_modeling_granitemoe.py
+++ b/tests/models/granitemoe/test_modeling_granitemoe.py
@@ -342,7 +342,7 @@ def test_model_rope_scaling_from_config(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
@@ -441,9 +441,7 @@ def test_model_3b_logits(self):
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-2.2122, -1.6632, -2.9269, -2.3344, -2.0143, -3.0146, -2.6839, -2.5610]])
- self.assertTrue(
- torch.allclose(EXPECTED_MEAN.to(torch_device), out.logits.float().mean(-1), atol=1e-2, rtol=1e-2)
- )
+ torch.testing.assert_close(EXPECTED_MEAN.to(torch_device), out.logits.float().mean(-1), rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:15]
EXPECTED_SLICE = torch.tensor([[4.8785, -2.2890, -2.2892, -2.2885, -2.2890, -3.5007, -2.2897, -2.2892,
diff --git a/tests/models/grounding_dino/test_image_processing_grounding_dino.py b/tests/models/grounding_dino/test_image_processing_grounding_dino.py
index 5cc1e6c232c2..4ac70a4e0fb8 100644
--- a/tests/models/grounding_dino/test_image_processing_grounding_dino.py
+++ b/tests/models/grounding_dino/test_image_processing_grounding_dino.py
@@ -193,10 +193,10 @@ def test_post_process_object_detection(self):
self.assertEqual(results[0]["scores"].shape, (self.image_processor_tester.num_queries,))
expected_scores = torch.tensor([0.7050, 0.7222, 0.7222, 0.6829, 0.7220])
- self.assertTrue(torch.allclose(results[0]["scores"], expected_scores, atol=1e-4))
+ torch.testing.assert_close(results[0]["scores"], expected_scores, rtol=1e-4, atol=1e-4)
expected_box_slice = torch.tensor([0.6908, 0.4354, 1.0737, 1.3947])
- self.assertTrue(torch.allclose(results[0]["boxes"][0], expected_box_slice, atol=1e-4))
+ torch.testing.assert_close(results[0]["boxes"][0], expected_box_slice, rtol=1e-4, atol=1e-4)
@slow
# Copied from tests.models.deformable_detr.test_image_processing_deformable_detr.DeformableDetrImageProcessingTest.test_call_pytorch_with_coco_detection_annotations with DeformableDetr->GroundingDino
@@ -218,31 +218,31 @@ def test_call_pytorch_with_coco_detection_annotations(self):
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
- self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
- self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
+ torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
- self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
+ torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
- self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
+ torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
- self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
+ torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
- self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
+ torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1066])
- self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
+ torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_detection_annotations with Detr->GroundingDino
@@ -309,8 +309,8 @@ def test_batched_coco_detection_annotations(self):
[0.5790, 0.4115, 0.3430, 0.7161],
]
)
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066]))
@@ -361,8 +361,8 @@ def test_batched_coco_detection_annotations(self):
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
@slow
# Copied from tests.models.deformable_detr.test_image_processing_deformable_detr.DeformableDetrImageProcessingTest.test_call_pytorch_with_coco_panoptic_annotations with DeformableDetr->GroundingDino
@@ -386,35 +386,35 @@ def test_call_pytorch_with_coco_panoptic_annotations(self):
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
- self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
- self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
+ torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
- self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
+ torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
- self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
+ torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
- self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
+ torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify masks
expected_masks_sum = 822873
relative_error = torch.abs(encoding["labels"][0]["masks"].sum() - expected_masks_sum) / expected_masks_sum
self.assertTrue(relative_error < 1e-3)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
- self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
+ torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1066])
- self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
+ torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_panoptic_annotations with Detr->GroundingDino
@@ -485,8 +485,8 @@ def test_batched_coco_panoptic_annotations(self):
[0.2997, 0.2994, 0.5994, 0.5987],
]
)
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066]))
@@ -538,8 +538,8 @@ def test_batched_coco_panoptic_annotations(self):
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_max_width_max_height_resizing_and_pad_strategy with Detr->GroundingDino
def test_max_width_max_height_resizing_and_pad_strategy(self):
diff --git a/tests/models/grounding_dino/test_modeling_grounding_dino.py b/tests/models/grounding_dino/test_modeling_grounding_dino.py
index 30a8d44c8e90..b102c357e518 100644
--- a/tests/models/grounding_dino/test_modeling_grounding_dino.py
+++ b/tests/models/grounding_dino/test_modeling_grounding_dino.py
@@ -645,11 +645,11 @@ def test_inference_object_detection_head(self):
[[-4.8913, -0.1900, -0.2161], [-4.9653, -0.3719, -0.3950], [-5.9599, -3.3765, -3.3104]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=1e-3, atol=1e-3)
expected_shape_boxes = torch.Size((1, model.config.num_queries, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=1e-4, atol=1e-4)
# verify postprocessing
results = processor.image_processor.post_process_object_detection(
@@ -659,8 +659,8 @@ def test_inference_object_detection_head(self):
expected_slice_boxes = torch.tensor([344.8143, 23.1796, 637.4004, 373.8295]).to(torch_device)
self.assertEqual(len(results["scores"]), 2)
- self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-3))
- self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes, atol=1e-2))
+ torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-2, atol=1e-2)
# verify grounded postprocessing
expected_labels = ["a cat", "a cat"]
@@ -672,8 +672,8 @@ def test_inference_object_detection_head(self):
target_sizes=[(image.height, image.width)],
)[0]
- self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-3))
- self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes, atol=1e-2))
+ torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-2, atol=1e-2)
self.assertListEqual(results["text_labels"], expected_labels)
@require_torch_accelerator
@@ -697,12 +697,12 @@ def test_inference_object_detection_head_equivalence_cpu_gpu(self):
# 3. assert equivalence
for key in cpu_outputs.keys():
- self.assertTrue(torch.allclose(cpu_outputs[key], gpu_outputs[key].cpu(), atol=1e-3))
+ torch.testing.assert_close(cpu_outputs[key], gpu_outputs[key].cpu(), rtol=1e-3, atol=1e-3)
expected_logits = torch.tensor(
[[-4.8915, -0.1900, -0.2161], [-4.9658, -0.3716, -0.3948], [-5.9596, -3.3763, -3.3103]]
)
- self.assertTrue(torch.allclose(cpu_outputs.logits[0, :3, :3], expected_logits, atol=1e-3))
+ torch.testing.assert_close(cpu_outputs.logits[0, :3, :3], expected_logits, rtol=1e-3, atol=1e-3)
# assert postprocessing
results_cpu = processor.image_processor.post_process_object_detection(
@@ -713,8 +713,8 @@ def test_inference_object_detection_head_equivalence_cpu_gpu(self):
gpu_outputs, threshold=0.35, target_sizes=[(image.height, image.width)]
)[0]
- self.assertTrue(torch.allclose(results_cpu["scores"], result_gpu["scores"].cpu(), atol=1e-3))
- self.assertTrue(torch.allclose(results_cpu["boxes"], result_gpu["boxes"].cpu(), atol=1e-3))
+ torch.testing.assert_close(results_cpu["scores"], result_gpu["scores"].cpu(), rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(results_cpu["boxes"], result_gpu["boxes"].cpu(), rtol=1e-3, atol=1e-3)
def test_cross_attention_mask(self):
model = GroundingDinoForObjectDetection.from_pretrained("IDEA-Research/grounding-dino-tiny").to(torch_device)
@@ -738,6 +738,6 @@ def test_cross_attention_mask(self):
outputs2 = model(**encoding2)
outputs_batched = model(**encoding_batched)
- self.assertTrue(torch.allclose(outputs1.logits, outputs_batched.logits[:1], atol=1e-3))
+ torch.testing.assert_close(outputs1.logits, outputs_batched.logits[:1], rtol=1e-3, atol=1e-3)
# For some reason 12 elements are > 1e-3, but the rest are fine
- self.assertTrue(torch.allclose(outputs2.logits, outputs_batched.logits[1:], atol=1.8e-3))
+ torch.testing.assert_close(outputs2.logits, outputs_batched.logits[1:], rtol=1.8e-3, atol=1.8e-3)
diff --git a/tests/models/grounding_dino/test_processor_grounding_dino.py b/tests/models/grounding_dino/test_processor_grounding_dino.py
index 8f9ced4b0c48..d527853b1eca 100644
--- a/tests/models/grounding_dino/test_processor_grounding_dino.py
+++ b/tests/models/grounding_dino/test_processor_grounding_dino.py
@@ -136,10 +136,10 @@ def test_post_process_grounded_object_detection(self):
self.assertEqual(post_processed[0]["scores"].shape, (self.num_queries,))
expected_scores = torch.tensor([0.7050, 0.7222, 0.7222, 0.6829, 0.7220])
- self.assertTrue(torch.allclose(post_processed[0]["scores"], expected_scores, atol=1e-4))
+ torch.testing.assert_close(post_processed[0]["scores"], expected_scores, rtol=1e-4, atol=1e-4)
expected_box_slice = torch.tensor([0.6908, 0.4354, 1.0737, 1.3947])
- self.assertTrue(torch.allclose(post_processed[0]["boxes"][0], expected_box_slice, atol=1e-4))
+ torch.testing.assert_close(post_processed[0]["boxes"][0], expected_box_slice, rtol=1e-4, atol=1e-4)
# Copied from tests.models.clip.test_processor_clip.CLIPProcessorTest.test_save_load_pretrained_default with CLIP->GroundingDino,GroundingDinoTokenizer->BertTokenizer
def test_save_load_pretrained_default(self):
diff --git a/tests/models/groupvit/test_modeling_groupvit.py b/tests/models/groupvit/test_modeling_groupvit.py
index a4b4f3543aad..da1db5a1fc82 100644
--- a/tests/models/groupvit/test_modeling_groupvit.py
+++ b/tests/models/groupvit/test_modeling_groupvit.py
@@ -765,4 +765,4 @@ def test_inference(self):
expected_logits = torch.tensor([[13.3523, 6.3629]])
- self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
diff --git a/tests/models/hiera/test_modeling_hiera.py b/tests/models/hiera/test_modeling_hiera.py
index b118d6db5af6..923bdd115636 100644
--- a/tests/models/hiera/test_modeling_hiera.py
+++ b/tests/models/hiera/test_modeling_hiera.py
@@ -545,7 +545,7 @@ def test_inference_image_classification_head(self):
]
).to(torch_device)
- self.assertTrue(torch.allclose(inputs.pixel_values[0, :3, :3, :3], expected_pixel_values, atol=1e-4))
+ torch.testing.assert_close(inputs.pixel_values[0, :3, :3, :3], expected_pixel_values, rtol=1e-4, atol=1e-4)
# forward pass
with torch.no_grad():
@@ -557,7 +557,7 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([[0.8028, 0.2409, -0.2254, -0.3712, -0.2848]]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :5], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :5], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_interpolate_pos_encoding(self):
model = HieraModel.from_pretrained("facebook/hiera-tiny-224-hf").to(torch_device)
@@ -581,7 +581,7 @@ def test_inference_interpolate_pos_encoding(self):
[[1.7853, 0.0690, 0.3177], [2.6853, -0.2334, 0.0889], [1.5445, -0.1515, -0.0300]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_for_pretraining(self):
@@ -619,7 +619,7 @@ def test_inference_for_pretraining(self):
]
)
- self.assertTrue(torch.allclose(outputs.logits[0, :5, :5], expected_slice.to(torch_device), atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :5, :5], expected_slice.to(torch_device), rtol=1e-4, atol=1e-4)
@require_torch
diff --git a/tests/models/hubert/test_modeling_hubert.py b/tests/models/hubert/test_modeling_hubert.py
index 191d2f8c88c3..9f77379befe2 100644
--- a/tests/models/hubert/test_modeling_hubert.py
+++ b/tests/models/hubert/test_modeling_hubert.py
@@ -812,7 +812,7 @@ def test_inference_keyword_spotting(self):
expected_logits = torch.tensor([7.6692, 17.7795, 11.1562, 11.8232], dtype=torch.float16, device=torch_device)
self.assertListEqual(predicted_ids.tolist(), expected_labels)
- self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=3e-2))
+ torch.testing.assert_close(predicted_logits, expected_logits, rtol=3e-2, atol=3e-2)
def test_inference_intent_classification(self):
model = HubertForSequenceClassification.from_pretrained(
@@ -849,9 +849,9 @@ def test_inference_intent_classification(self):
self.assertListEqual(predicted_ids_location.tolist(), expected_labels_location)
# TODO: lower the tolerance after merging the padding fix https://github.com/pytorch/fairseq/pull/3572
- self.assertTrue(torch.allclose(predicted_logits_action, expected_logits_action, atol=3e-1))
- self.assertTrue(torch.allclose(predicted_logits_object, expected_logits_object, atol=3e-1))
- self.assertTrue(torch.allclose(predicted_logits_location, expected_logits_location, atol=3e-1))
+ torch.testing.assert_close(predicted_logits_action, expected_logits_action, rtol=3e-1, atol=3e-1)
+ torch.testing.assert_close(predicted_logits_object, expected_logits_object, rtol=3e-1, atol=3e-1)
+ torch.testing.assert_close(predicted_logits_location, expected_logits_location, rtol=3e-1, atol=3e-1)
def test_inference_speaker_identification(self):
model = HubertForSequenceClassification.from_pretrained(
@@ -877,7 +877,7 @@ def test_inference_speaker_identification(self):
self.assertListEqual(predicted_ids.tolist(), expected_labels)
# TODO: lower the tolerance after merging the padding fix https://github.com/pytorch/fairseq/pull/3572
- self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=10))
+ torch.testing.assert_close(predicted_logits, expected_logits, rtol=10, atol=10)
def test_inference_emotion_recognition(self):
model = HubertForSequenceClassification.from_pretrained(
@@ -899,7 +899,7 @@ def test_inference_emotion_recognition(self):
self.assertListEqual(predicted_ids.tolist(), expected_labels)
# TODO: lower the tolerance after merging the padding fix https://github.com/pytorch/fairseq/pull/3572
- self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-1))
+ torch.testing.assert_close(predicted_logits, expected_logits, rtol=1e-1, atol=1e-1)
def test_inference_distilhubert(self):
model = HubertModel.from_pretrained("ntu-spml/distilhubert").to(torch_device)
@@ -940,8 +940,8 @@ def test_inference_distilhubert(self):
)
expected_output_sum = -3776.0730
- self.assertTrue(torch.allclose(outputs[:, :4, :4], expected_outputs_first, atol=5e-3))
- self.assertTrue(torch.allclose(outputs[:, -4:, -4:], expected_outputs_last, atol=5e-3))
+ torch.testing.assert_close(outputs[:, :4, :4], expected_outputs_first, rtol=5e-3, atol=5e-3)
+ torch.testing.assert_close(outputs[:, -4:, -4:], expected_outputs_last, rtol=5e-3, atol=5e-3)
self.assertTrue(abs(outputs.sum() - expected_output_sum) < 0.1)
def test_inference_hubert_25hz(self):
@@ -977,6 +977,6 @@ def test_inference_hubert_25hz(self):
)
expected_output_sum = 1681.7603
- self.assertTrue(torch.allclose(outputs[:, :4, :4], expected_outputs_first, atol=5e-3))
- self.assertTrue(torch.allclose(outputs[:, -4:, -4:], expected_outputs_last, atol=5e-3))
+ torch.testing.assert_close(outputs[:, :4, :4], expected_outputs_first, rtol=5e-3, atol=5e-3)
+ torch.testing.assert_close(outputs[:, -4:, -4:], expected_outputs_last, rtol=5e-3, atol=5e-3)
self.assertTrue(abs(outputs.sum() - expected_output_sum) < 0.1)
diff --git a/tests/models/idefics/test_modeling_idefics.py b/tests/models/idefics/test_modeling_idefics.py
index a8f1304b6fc7..50b286ca51ab 100644
--- a/tests/models/idefics/test_modeling_idefics.py
+++ b/tests/models/idefics/test_modeling_idefics.py
@@ -666,7 +666,7 @@ def _prepare_model_kwargs(input_ids, attention_mask, image_attention_mask, signa
next_logits_with_padding = model(**model_kwargs, **inputs_dict).logits[:, -1, :]
# They should result in very similar logits
- self.assertTrue(torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=1e-5))
+ torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5)
@pytest.mark.generate
def test_generate_continue_from_past_key_values(self):
diff --git a/tests/models/idefics2/test_modeling_idefics2.py b/tests/models/idefics2/test_modeling_idefics2.py
index 762e96bd0cc0..8cafc606bd2c 100644
--- a/tests/models/idefics2/test_modeling_idefics2.py
+++ b/tests/models/idefics2/test_modeling_idefics2.py
@@ -562,7 +562,7 @@ def test_inputs_embeds_matches_input_ids_with_generate(self):
out_ids = model.generate(input_ids=input_ids, **inputs, max_new_tokens=2)
out_embeds = model.generate(input_ids=input_ids, inputs_embeds=inputs_embeds, **inputs, max_new_tokens=2)
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
@require_torch
diff --git a/tests/models/ijepa/test_modeling_ijepa.py b/tests/models/ijepa/test_modeling_ijepa.py
index 723ddcf79888..147e576036ce 100644
--- a/tests/models/ijepa/test_modeling_ijepa.py
+++ b/tests/models/ijepa/test_modeling_ijepa.py
@@ -288,7 +288,7 @@ def test_inference_no_head(self):
[[-0.0621, -0.0054, -2.7513], [-0.1952, 0.0909, -3.9536], [0.0942, -0.0331, -1.2833]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
@require_accelerate
@@ -338,4 +338,4 @@ def test_inference_interpolate_pos_encoding(self):
[[-0.0621, -0.0054, -2.7513], [-0.1952, 0.0909, -3.9536], [0.0942, -0.0331, -1.2833]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/imagegpt/test_modeling_imagegpt.py b/tests/models/imagegpt/test_modeling_imagegpt.py
index cdbe815431f3..a8bcb8d1802a 100644
--- a/tests/models/imagegpt/test_modeling_imagegpt.py
+++ b/tests/models/imagegpt/test_modeling_imagegpt.py
@@ -354,4 +354,4 @@ def test_inference_causal_lm_head(self):
[[2.3445, 2.6889, 2.7313], [1.0530, 1.2416, 0.5699], [0.2205, 0.7749, 0.3953]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/informer/test_modeling_informer.py b/tests/models/informer/test_modeling_informer.py
index 10cb2b71824e..d6a5220aac27 100644
--- a/tests/models/informer/test_modeling_informer.py
+++ b/tests/models/informer/test_modeling_informer.py
@@ -504,7 +504,7 @@ def test_inference_no_head(self):
[[0.4699, 0.7295, 0.8967], [0.4858, 0.3810, 0.9641], [-0.0233, 0.3608, 1.0303]],
device=torch_device,
)
- self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_inference_head(self):
model = InformerForPrediction.from_pretrained("huggingface/informer-tourism-monthly").to(torch_device)
@@ -527,7 +527,7 @@ def test_inference_head(self):
expected_slice = torch.tensor(
[[0.4170, 0.9067, 0.8153], [0.3004, 0.7574, 0.7066], [0.6803, -0.6323, 1.2802]], device=torch_device
)
- self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_seq_to_seq_generation(self):
model = InformerForPrediction.from_pretrained("huggingface/informer-tourism-monthly").to(torch_device)
@@ -547,4 +547,4 @@ def test_seq_to_seq_generation(self):
expected_slice = torch.tensor([3400.8005, 4289.2637, 7101.9209], device=torch_device)
mean_prediction = outputs.sequences.mean(dim=1)
- self.assertTrue(torch.allclose(mean_prediction[0, -3:], expected_slice, rtol=1e-1))
+ torch.testing.assert_close(mean_prediction[0, -3:], expected_slice, rtol=1e-1)
diff --git a/tests/models/instructblip/test_modeling_instructblip.py b/tests/models/instructblip/test_modeling_instructblip.py
index 52cb0c7c8ac8..d472274fabb2 100644
--- a/tests/models/instructblip/test_modeling_instructblip.py
+++ b/tests/models/instructblip/test_modeling_instructblip.py
@@ -748,7 +748,7 @@ def _prepare_model_kwargs(input_ids, attention_mask, signature):
).logits[:, -1, :]
# They should result in very similar logits
- self.assertTrue(torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=1e-5))
+ torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5)
@unittest.skip(
"InstructBLIP cannot generate only from input ids, and requires pixel values in all cases to be present"
diff --git a/tests/models/instructblipvideo/test_modeling_instructblipvideo.py b/tests/models/instructblipvideo/test_modeling_instructblipvideo.py
index 6b59a9878aa4..ef95aab8bf93 100644
--- a/tests/models/instructblipvideo/test_modeling_instructblipvideo.py
+++ b/tests/models/instructblipvideo/test_modeling_instructblipvideo.py
@@ -768,7 +768,7 @@ def _prepare_model_kwargs(input_ids, attention_mask, signature):
).logits[:, -1, :]
# They should result in very similar logits
- self.assertTrue(torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=1e-5))
+ torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5)
@unittest.skip(
"InstructBLIPVideo cannot generate only from input ids, and requires pixel values in all cases to be present"
diff --git a/tests/models/jamba/test_modeling_jamba.py b/tests/models/jamba/test_modeling_jamba.py
index 2f284763e08a..263b35345ba0 100644
--- a/tests/models/jamba/test_modeling_jamba.py
+++ b/tests/models/jamba/test_modeling_jamba.py
@@ -415,10 +415,11 @@ def test_initialization(self):
if param.requires_grad:
if "A_log" in name:
A = torch.arange(1, config.mamba_d_state + 1, dtype=torch.float32)[None, :]
- self.assertTrue(torch.allclose(param.data, torch.log(A), atol=1e-5, rtol=1e-5))
+ A = A.expand(config.mamba_expand * config.hidden_size, -1).contiguous()
+ torch.testing.assert_close(param.data, torch.log(A), rtol=1e-5, atol=1e-5)
elif "D" in name:
# check if it's a ones like
- self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5))
+ torch.testing.assert_close(param.data, torch.ones_like(param.data), rtol=1e-5, atol=1e-5)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
diff --git a/tests/models/jetmoe/test_modeling_jetmoe.py b/tests/models/jetmoe/test_modeling_jetmoe.py
index ba7dc5377c86..757783950b90 100644
--- a/tests/models/jetmoe/test_modeling_jetmoe.py
+++ b/tests/models/jetmoe/test_modeling_jetmoe.py
@@ -390,10 +390,10 @@ def test_model_8b_logits(self):
out = model(input_ids).logits.float().cpu()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[0.2507, -2.7073, -1.3445, -1.9363, -1.7216, -1.7370, -1.9054, -1.9792]])
- torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([-3.3689, 5.9006, 5.7450, -1.7012, -4.7072, -4.7071, -4.7071, -4.7071, -4.7072, -4.7072, -4.7072, -4.7071, 3.8321, 9.1746, -4.7071, -4.7072, -4.7071, -4.7072, -4.7071, -4.7072, -4.7071, -4.7071, -4.7071, -4.7071, -4.7071, -4.7071, -4.7071, -4.7071, -4.7071, -4.7071]) # fmt: skip
- torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-4, rtol=1e-4)
+ torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
del model
backend_empty_cache(torch_device)
diff --git a/tests/models/kosmos2/test_modeling_kosmos2.py b/tests/models/kosmos2/test_modeling_kosmos2.py
index ca4e05ff054b..bb318ba13221 100644
--- a/tests/models/kosmos2/test_modeling_kosmos2.py
+++ b/tests/models/kosmos2/test_modeling_kosmos2.py
@@ -515,7 +515,7 @@ def _prepare_model_kwargs(input_ids, attention_mask, pad_size, signature):
next_logits_with_padding = model(**model_kwargs, pixel_values=pixel_values).logits[:, -1, :]
# They should result in very similar logits
- self.assertTrue(torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=1e-3))
+ torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-3, atol=1e-3)
@slow
def test_model_from_pretrained(self):
@@ -892,6 +892,6 @@ def test_inference_interpolate_pos_encoding(self):
[[0.9148, -1.4148, 3.8040], [3.3443, 1.9478, 0.2080], [1.6604, 2.8184, -0.3618]]
).to(torch_device)
- self.assertTrue(
- torch.allclose(outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, atol=1e-1)
+ torch.testing.assert_close(
+ outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-2, atol=1e-2
)
diff --git a/tests/models/layoutlm/test_modeling_layoutlm.py b/tests/models/layoutlm/test_modeling_layoutlm.py
index 38dd86eb8b82..ab2a9cd51359 100644
--- a/tests/models/layoutlm/test_modeling_layoutlm.py
+++ b/tests/models/layoutlm/test_modeling_layoutlm.py
@@ -329,12 +329,12 @@ def test_forward_pass_no_head(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-3))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-3, atol=1e-3)
# test the pooled output on [1, :3]
expected_slice = torch.tensor([-0.6580, -0.0214, 0.8552], device=torch_device)
- self.assertTrue(torch.allclose(outputs.pooler_output[1, :3], expected_slice, atol=1e-3))
+ torch.testing.assert_close(outputs.pooler_output[1, :3], expected_slice, rtol=1e-3, atol=1e-3)
@slow
def test_forward_pass_sequence_classification(self):
diff --git a/tests/models/layoutlmv2/test_modeling_layoutlmv2.py b/tests/models/layoutlmv2/test_modeling_layoutlmv2.py
index 94cc4e95432c..a8b5083ebd51 100644
--- a/tests/models/layoutlmv2/test_modeling_layoutlmv2.py
+++ b/tests/models/layoutlmv2/test_modeling_layoutlmv2.py
@@ -559,7 +559,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[-0.1087, 0.0727, -0.3075], [0.0799, -0.0427, -0.0751], [-0.0367, 0.0480, -0.1358]], device=torch_device
)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-3))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-3, atol=1e-3)
# verify the pooled output
expected_shape = torch.Size((2, model.config.hidden_size))
diff --git a/tests/models/layoutlmv3/test_modeling_layoutlmv3.py b/tests/models/layoutlmv3/test_modeling_layoutlmv3.py
index d62a7273bd58..af0301cf6b83 100644
--- a/tests/models/layoutlmv3/test_modeling_layoutlmv3.py
+++ b/tests/models/layoutlmv3/test_modeling_layoutlmv3.py
@@ -417,4 +417,4 @@ def test_inference_no_head(self):
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/led/test_modeling_led.py b/tests/models/led/test_modeling_led.py
index 8e31758a4395..3d21fa0a69d5 100644
--- a/tests/models/led/test_modeling_led.py
+++ b/tests/models/led/test_modeling_led.py
@@ -540,7 +540,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]], device=torch_device
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_inference_head(self):
model = LEDForConditionalGeneration.from_pretrained("allenai/led-base-16384").to(torch_device)
@@ -557,7 +557,7 @@ def test_inference_head(self):
expected_slice = torch.tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]], device=torch_device
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_seq_to_seq_generation(self):
# this test requires 16GB of RAM
diff --git a/tests/models/levit/test_modeling_levit.py b/tests/models/levit/test_modeling_levit.py
index 6199d9cdfcfd..b35967a84eeb 100644
--- a/tests/models/levit/test_modeling_levit.py
+++ b/tests/models/levit/test_modeling_levit.py
@@ -409,4 +409,4 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([1.0448, -0.3745, -1.8317]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/lilt/test_modeling_lilt.py b/tests/models/lilt/test_modeling_lilt.py
index dc3aaaa4ee5e..9bfbb1c520c8 100644
--- a/tests/models/lilt/test_modeling_lilt.py
+++ b/tests/models/lilt/test_modeling_lilt.py
@@ -326,4 +326,4 @@ def test_inference_no_head(self):
)
self.assertTrue(outputs.last_hidden_state.shape, expected_shape)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], expected_slice, atol=1e-3))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :, :3], expected_slice, rtol=1e-3, atol=1e-3)
diff --git a/tests/models/llama/test_modeling_llama.py b/tests/models/llama/test_modeling_llama.py
index 8d492ce673da..c2abf19b2241 100644
--- a/tests/models/llama/test_modeling_llama.py
+++ b/tests/models/llama/test_modeling_llama.py
@@ -413,7 +413,7 @@ def test_model_rope_scaling_from_config(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
diff --git a/tests/models/llava/test_modeling_llava.py b/tests/models/llava/test_modeling_llava.py
index 0b8ebb9a17a3..23663ee649a7 100644
--- a/tests/models/llava/test_modeling_llava.py
+++ b/tests/models/llava/test_modeling_llava.py
@@ -242,7 +242,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
def test_mismatching_num_image_tokens(self):
"""
diff --git a/tests/models/llava_next/test_modeling_llava_next.py b/tests/models/llava_next/test_modeling_llava_next.py
index c797a2b0c4af..ce86a5695810 100644
--- a/tests/models/llava_next/test_modeling_llava_next.py
+++ b/tests/models/llava_next/test_modeling_llava_next.py
@@ -288,7 +288,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
def test_mismatching_num_image_tokens(self):
"""
diff --git a/tests/models/llava_next_video/test_modeling_llava_next_video.py b/tests/models/llava_next_video/test_modeling_llava_next_video.py
index 576329fcfae8..f7cf66b248f3 100644
--- a/tests/models/llava_next_video/test_modeling_llava_next_video.py
+++ b/tests/models/llava_next_video/test_modeling_llava_next_video.py
@@ -305,7 +305,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
def test_mismatching_num_image_tokens(self):
"""
diff --git a/tests/models/llava_onevision/test_modeling_llava_onevision.py b/tests/models/llava_onevision/test_modeling_llava_onevision.py
index 45cb433d867f..2674aaabd8cc 100644
--- a/tests/models/llava_onevision/test_modeling_llava_onevision.py
+++ b/tests/models/llava_onevision/test_modeling_llava_onevision.py
@@ -291,7 +291,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
@parameterized.expand(
[
diff --git a/tests/models/longformer/test_modeling_longformer.py b/tests/models/longformer/test_modeling_longformer.py
index 23765fe8ce27..304a0e47446a 100644
--- a/tests/models/longformer/test_modeling_longformer.py
+++ b/tests/models/longformer/test_modeling_longformer.py
@@ -466,7 +466,9 @@ def test_diagonalize(self):
self.assertTrue(padded_hidden_states.shape[-1] == chunked_hidden_states.shape[-1] + window_overlap_size - 1)
# first row => [0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000]
- self.assertTrue(torch.allclose(padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], atol=1e-3))
+ torch.testing.assert_close(
+ padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], rtol=1e-3, atol=1e-3
+ )
self.assertTrue(
torch.allclose(
padded_hidden_states[0, 0, 0, 4:],
@@ -475,7 +477,9 @@ def test_diagonalize(self):
)
)
# last row => [0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629]
- self.assertTrue(torch.allclose(padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], atol=1e-3))
+ torch.testing.assert_close(
+ padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], rtol=1e-3, atol=1e-3
+ )
self.assertTrue(
torch.allclose(
padded_hidden_states[0, 0, -1, :3],
@@ -493,8 +497,10 @@ def test_pad_and_transpose_last_two_dims(self):
self.assertEqual(padded_hidden_states.shape, (1, 8, 5))
expected_added_dim = torch.zeros((5,), device=torch_device, dtype=torch.float32)
- self.assertTrue(torch.allclose(expected_added_dim, padded_hidden_states[0, -1, :], atol=1e-6))
- self.assertTrue(torch.allclose(hidden_states[0, -1, :], padded_hidden_states.view(1, -1)[0, 24:32], atol=1e-6))
+ torch.testing.assert_close(expected_added_dim, padded_hidden_states[0, -1, :], rtol=1e-6, atol=1e-6)
+ torch.testing.assert_close(
+ hidden_states[0, -1, :], padded_hidden_states.view(1, -1)[0, 24:32], rtol=1e-6, atol=1e-6
+ )
def test_chunk(self):
hidden_states = self._get_hidden_states()
@@ -513,8 +519,10 @@ def test_chunk(self):
[0.4983, -1.8348, -0.7584, 2.0514], device=torch_device, dtype=torch.float32
)
- self.assertTrue(torch.allclose(chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, atol=1e-3))
- self.assertTrue(torch.allclose(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, atol=1e-3))
+ torch.testing.assert_close(
+ chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, rtol=1e-3, atol=1e-3
+ )
+ torch.testing.assert_close(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, rtol=1e-3, atol=1e-3)
self.assertEqual(chunked_hidden_states.shape, (1, 3, 4, 4))
def test_mask_invalid_locations(self):
@@ -728,8 +736,8 @@ def test_inference_no_head(self):
output_without_mask = model(input_ids)[0]
expected_output_slice = torch.tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], device=torch_device)
- self.assertTrue(torch.allclose(output[0, 0, -5:], expected_output_slice, atol=1e-4))
- self.assertTrue(torch.allclose(output_without_mask[0, 0, -5:], expected_output_slice, atol=1e-4))
+ torch.testing.assert_close(output[0, 0, -5:], expected_output_slice, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(output_without_mask[0, 0, -5:], expected_output_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_no_head_long(self):
@@ -749,8 +757,8 @@ def test_inference_no_head_long(self):
expected_output_sum = torch.tensor(74585.8594, device=torch_device)
expected_output_mean = torch.tensor(0.0243, device=torch_device)
- self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4))
- self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4))
+ torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4)
@slow
def test_inference_masked_lm_long(self):
@@ -769,6 +777,6 @@ def test_inference_masked_lm_long(self):
expected_prediction_scores_sum = torch.tensor(-6.1048e08, device=torch_device)
expected_prediction_scores_mean = torch.tensor(-3.0348, device=torch_device)
- self.assertTrue(torch.allclose(loss, expected_loss, atol=1e-4))
- self.assertTrue(torch.allclose(prediction_scores.sum(), expected_prediction_scores_sum, atol=1e-4))
- self.assertTrue(torch.allclose(prediction_scores.mean(), expected_prediction_scores_mean, atol=1e-4))
+ torch.testing.assert_close(loss, expected_loss, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(prediction_scores.sum(), expected_prediction_scores_sum, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(prediction_scores.mean(), expected_prediction_scores_mean, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/longt5/test_modeling_longt5.py b/tests/models/longt5/test_modeling_longt5.py
index a9d3e7479e95..c2c2563b5506 100644
--- a/tests/models/longt5/test_modeling_longt5.py
+++ b/tests/models/longt5/test_modeling_longt5.py
@@ -1362,8 +1362,10 @@ def test_inference_hidden_states(self):
# check if encoder_outputs match
expected_output_slice = torch.tensor([0.0629, -0.1294, -0.0089, 0.0772, 0.0663], device=torch_device)
- self.assertTrue(torch.allclose(output.encoder_hidden_states[-1][0, 0, :5], expected_output_slice, atol=1e-4))
+ torch.testing.assert_close(
+ output.encoder_hidden_states[-1][0, 0, :5], expected_output_slice, rtol=1e-4, atol=1e-4
+ )
# check if logits match
expected_output_slice = torch.tensor([5.5231, 6.1058, 3.1766, 8.2391, -5.9453], device=torch_device)
- self.assertTrue(torch.allclose(output.logits[0, 0, :5], expected_output_slice, atol=1e-4))
+ torch.testing.assert_close(output.logits[0, 0, :5], expected_output_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/luke/test_modeling_luke.py b/tests/models/luke/test_modeling_luke.py
index 8fd68cd3240e..1a0b63d33e0c 100644
--- a/tests/models/luke/test_modeling_luke.py
+++ b/tests/models/luke/test_modeling_luke.py
@@ -909,14 +909,14 @@ def test_inference_base_model(self):
expected_slice = torch.tensor(
[[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
# Verify entity hidden states
expected_shape = torch.Size((1, 1, 768))
self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_large_model(self):
@@ -944,11 +944,11 @@ def test_inference_large_model(self):
expected_slice = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
# Verify entity hidden states
expected_shape = torch.Size((1, 1, 1024))
self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/lxmert/test_modeling_lxmert.py b/tests/models/lxmert/test_modeling_lxmert.py
index 1ff8c002618b..50be9cce9b2a 100644
--- a/tests/models/lxmert/test_modeling_lxmert.py
+++ b/tests/models/lxmert/test_modeling_lxmert.py
@@ -803,4 +803,4 @@ def test_inference_no_head_absolute_embedding(self):
[[[0.2417, -0.9807, 0.1480], [1.2541, -0.8320, 0.5112], [1.4070, -1.1052, 0.6990]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/m2m_100/test_modeling_m2m_100.py b/tests/models/m2m_100/test_modeling_m2m_100.py
index 4fe0902c615b..b4ddb483d8c7 100644
--- a/tests/models/m2m_100/test_modeling_m2m_100.py
+++ b/tests/models/m2m_100/test_modeling_m2m_100.py
@@ -374,7 +374,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]], device=torch_device
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_inference_head(self):
model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(torch_device)
@@ -391,7 +391,7 @@ def test_inference_head(self):
expected_slice = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]], device=torch_device
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_seq_to_seq_generation(self):
model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(torch_device)
diff --git a/tests/models/mamba/test_modeling_mamba.py b/tests/models/mamba/test_modeling_mamba.py
index 455022140f7c..1ac0a25b6805 100644
--- a/tests/models/mamba/test_modeling_mamba.py
+++ b/tests/models/mamba/test_modeling_mamba.py
@@ -344,11 +344,12 @@ def test_initialization(self):
self.assertTrue(param.data.min().item() >= inv_dt[0])
elif "A_log" in name:
A = torch.arange(1, config.state_size + 1, dtype=torch.float32)[None, :]
- self.assertTrue(torch.allclose(param.data, torch.log(A), atol=1e-5, rtol=1e-5))
+ A = A.expand(config.intermediate_size, -1).contiguous()
+ torch.testing.assert_close(param.data, torch.log(A), rtol=1e-5, atol=1e-5)
elif "D" in name:
if param.requires_grad:
# check if it's a ones like
- self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5))
+ torch.testing.assert_close(param.data, torch.ones_like(param.data), rtol=1e-5, atol=1e-5)
@slow
def test_model_from_pretrained(self):
diff --git a/tests/models/mamba2/test_modeling_mamba2.py b/tests/models/mamba2/test_modeling_mamba2.py
index 17cbdc1e8d51..c5b787f64389 100644
--- a/tests/models/mamba2/test_modeling_mamba2.py
+++ b/tests/models/mamba2/test_modeling_mamba2.py
@@ -250,7 +250,7 @@ def test_initialization(self):
if "D" in name:
if param.requires_grad:
# check if it's a ones like
- self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5))
+ torch.testing.assert_close(param.data, torch.ones_like(param.data), rtol=1e-5, atol=1e-5)
@unittest.skip(reason="Mamba 2 weights are not tied")
def test_tied_weights_keys(self):
@@ -439,4 +439,4 @@ def test_mamba2_mixer_train_vs_eval_equivalence(self):
mixer.eval()
out_eval = mixer(hidden_states)
- self.assertTrue(torch.allclose(out_train, out_eval, atol=1e-3))
+ torch.testing.assert_close(out_train, out_eval, rtol=1e-3, atol=1e-3)
diff --git a/tests/models/markuplm/test_modeling_markuplm.py b/tests/models/markuplm/test_modeling_markuplm.py
index 8dfcee2484bd..7e2ce0b7c18f 100644
--- a/tests/models/markuplm/test_modeling_markuplm.py
+++ b/tests/models/markuplm/test_modeling_markuplm.py
@@ -388,4 +388,4 @@ def test_forward_pass_no_head(self):
[[0.0675, -0.0052, 0.5001], [-0.2281, 0.0802, 0.2192], [-0.0583, -0.3311, 0.1185]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/mask2former/test_image_processing_mask2former.py b/tests/models/mask2former/test_image_processing_mask2former.py
index b298336a81ce..aaca13dbc367 100644
--- a/tests/models/mask2former/test_image_processing_mask2former.py
+++ b/tests/models/mask2former/test_image_processing_mask2former.py
@@ -339,8 +339,8 @@ def get_instance_segmentation_and_mapping(annotation):
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
- self.assertTrue(torch.allclose(inputs["class_labels"][0], torch.tensor([30, 55])))
- self.assertTrue(torch.allclose(inputs["class_labels"][1], torch.tensor([4, 4, 23, 55])))
+ torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([30, 55]))
+ torch.testing.assert_close(inputs["class_labels"][1], torch.tensor([4, 4, 23, 55]))
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
@@ -381,8 +381,8 @@ def test_integration_semantic_segmentation(self):
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
- self.assertTrue(torch.allclose(inputs["class_labels"][0], torch.tensor([2, 4, 60])))
- self.assertTrue(torch.allclose(inputs["class_labels"][1], torch.tensor([0, 3, 7, 8, 15, 28, 30, 143])))
+ torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([2, 4, 60]))
+ torch.testing.assert_close(inputs["class_labels"][1], torch.tensor([0, 3, 7, 8, 15, 28, 30, 143]))
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
@@ -441,9 +441,9 @@ def create_panoptic_map(annotation, segments_info):
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip
- self.assertTrue(torch.allclose(inputs["class_labels"][0], torch.tensor(expected_class_labels)))
+ torch.testing.assert_close(inputs["class_labels"][0], torch.tensor(expected_class_labels))
expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip
- self.assertTrue(torch.allclose(inputs["class_labels"][1], expected_class_labels))
+ torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
diff --git a/tests/models/mask2former/test_modeling_mask2former.py b/tests/models/mask2former/test_modeling_mask2former.py
index a3caefe14ab5..af7704b1efaf 100644
--- a/tests/models/mask2former/test_modeling_mask2former.py
+++ b/tests/models/mask2former/test_modeling_mask2former.py
@@ -436,7 +436,7 @@ def test_inference_universal_segmentation_head(self):
[-6.6105, -6.3427, -6.4675],
]
expected_slice = torch.tensor(expected_slice).to(torch_device)
- self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(masks_queries_logits[0, 0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
# class_queries_logits
class_queries_logits = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1))
@@ -447,7 +447,9 @@ def test_inference_universal_segmentation_head(self):
[0.3045, -7.7293, -3.0275],
]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(
+ outputs.class_queries_logits[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE
+ )
@require_torch_accelerator
@require_torch_fp16
@@ -500,10 +502,10 @@ def test_export(self):
eager_outputs = model(**inputs)
exported_outputs = exported_program.module().forward(inputs["pixel_values"], inputs["pixel_mask"])
self.assertEqual(eager_outputs.masks_queries_logits.shape, exported_outputs.masks_queries_logits.shape)
- self.assertTrue(
- torch.allclose(eager_outputs.masks_queries_logits, exported_outputs.masks_queries_logits, atol=TOLERANCE)
+ torch.testing.assert_close(
+ eager_outputs.masks_queries_logits, exported_outputs.masks_queries_logits, rtol=TOLERANCE, atol=TOLERANCE
)
self.assertEqual(eager_outputs.class_queries_logits.shape, exported_outputs.class_queries_logits.shape)
- self.assertTrue(
- torch.allclose(eager_outputs.class_queries_logits, exported_outputs.class_queries_logits, atol=TOLERANCE)
+ torch.testing.assert_close(
+ eager_outputs.class_queries_logits, exported_outputs.class_queries_logits, rtol=TOLERANCE, atol=TOLERANCE
)
diff --git a/tests/models/maskformer/test_image_processing_maskformer.py b/tests/models/maskformer/test_image_processing_maskformer.py
index 8b3c7db762a5..d042c702a601 100644
--- a/tests/models/maskformer/test_image_processing_maskformer.py
+++ b/tests/models/maskformer/test_image_processing_maskformer.py
@@ -293,8 +293,8 @@ def get_instance_segmentation_and_mapping(annotation):
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
- self.assertTrue(torch.allclose(inputs["class_labels"][0], torch.tensor([30, 55])))
- self.assertTrue(torch.allclose(inputs["class_labels"][1], torch.tensor([4, 4, 23, 55])))
+ torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([30, 55]))
+ torch.testing.assert_close(inputs["class_labels"][1], torch.tensor([4, 4, 23, 55]))
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
@@ -335,8 +335,8 @@ def test_integration_semantic_segmentation(self):
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
- self.assertTrue(torch.allclose(inputs["class_labels"][0], torch.tensor([2, 4, 60])))
- self.assertTrue(torch.allclose(inputs["class_labels"][1], torch.tensor([0, 3, 7, 8, 15, 28, 30, 143])))
+ torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([2, 4, 60]))
+ torch.testing.assert_close(inputs["class_labels"][1], torch.tensor([0, 3, 7, 8, 15, 28, 30, 143]))
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
@@ -395,9 +395,9 @@ def create_panoptic_map(annotation, segments_info):
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip
- self.assertTrue(torch.allclose(inputs["class_labels"][0], torch.tensor(expected_class_labels)))
+ torch.testing.assert_close(inputs["class_labels"][0], torch.tensor(expected_class_labels))
expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip
- self.assertTrue(torch.allclose(inputs["class_labels"][1], expected_class_labels))
+ torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
diff --git a/tests/models/maskformer/test_modeling_maskformer.py b/tests/models/maskformer/test_modeling_maskformer.py
index 025261841b30..9298fe2d1c3c 100644
--- a/tests/models/maskformer/test_modeling_maskformer.py
+++ b/tests/models/maskformer/test_modeling_maskformer.py
@@ -567,7 +567,7 @@ def test_inference_instance_segmentation_head(self):
[-1.5795398, -1.9269832, -2.093942],
]
expected_slice = torch.tensor(expected_slice).to(torch_device)
- self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(masks_queries_logits[0, 0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
# class_queries_logits
class_queries_logits = outputs.class_queries_logits
self.assertEqual(
@@ -580,7 +580,9 @@ def test_inference_instance_segmentation_head(self):
[1.0766e-04, -7.7630e00, -5.1263e00],
]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(
+ outputs.class_queries_logits[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE
+ )
def test_inference_instance_segmentation_head_resnet_backbone(self):
model = (
@@ -607,7 +609,7 @@ def test_inference_instance_segmentation_head_resnet_backbone(self):
)
expected_slice = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
expected_slice = torch.tensor(expected_slice).to(torch_device)
- self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(masks_queries_logits[0, 0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
# class_queries_logits
class_queries_logits = outputs.class_queries_logits
self.assertEqual(
@@ -616,7 +618,9 @@ def test_inference_instance_segmentation_head_resnet_backbone(self):
expected_slice = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(
+ outputs.class_queries_logits[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE
+ )
@require_torch_accelerator
@require_torch_fp16
diff --git a/tests/models/mgp_str/test_modeling_mgp_str.py b/tests/models/mgp_str/test_modeling_mgp_str.py
index 559a9e596840..465444f6927e 100644
--- a/tests/models/mgp_str/test_modeling_mgp_str.py
+++ b/tests/models/mgp_str/test_modeling_mgp_str.py
@@ -259,4 +259,4 @@ def test_inference(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(outputs.logits[0][:, 1:4, 1:4], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0][:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/mimi/test_modeling_mimi.py b/tests/models/mimi/test_modeling_mimi.py
index 83660cde683d..4542fe3bbace 100644
--- a/tests/models/mimi/test_modeling_mimi.py
+++ b/tests/models/mimi/test_modeling_mimi.py
@@ -846,7 +846,7 @@ def test_integration(self):
)[1]
# make sure forward and decode gives same result
- self.assertTrue(torch.allclose(input_values_dec, input_values_enc_dec))
+ torch.testing.assert_close(input_values_dec, input_values_enc_dec)
# make sure shape matches
self.assertTrue(inputs["input_values"].shape == input_values_enc_dec.shape)
diff --git a/tests/models/mistral/test_modeling_mistral.py b/tests/models/mistral/test_modeling_mistral.py
index 70de4d9cf1ed..c4003da46230 100644
--- a/tests/models/mistral/test_modeling_mistral.py
+++ b/tests/models/mistral/test_modeling_mistral.py
@@ -450,7 +450,7 @@ def test_model_7b_logits(self):
out = model(input_ids).logits.float().cpu()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-2.5548, -2.5737, -3.0600, -2.5906, -2.8478, -2.8118, -2.9325, -2.7694]])
- torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# Key 9 for MI300, Key 8 for A100/A10, and Key 7 for T4.
#
diff --git a/tests/models/mllama/test_modeling_mllama.py b/tests/models/mllama/test_modeling_mllama.py
index cfd64aee5368..e1ded5e934bb 100644
--- a/tests/models/mllama/test_modeling_mllama.py
+++ b/tests/models/mllama/test_modeling_mllama.py
@@ -320,7 +320,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
def _check_attentions_for_generate(
self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1
diff --git a/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py b/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py
index d272347991a5..ab588cf20f26 100644
--- a/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py
+++ b/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py
@@ -248,4 +248,4 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([-4.1739, -1.1233, 3.1205]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py b/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py
index 2f8fb55554f1..7e96dea4fefe 100644
--- a/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py
+++ b/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py
@@ -303,7 +303,7 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([0.2445, -1.1993, 0.1905]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_semantic_segmentation(self):
@@ -333,4 +333,4 @@ def test_inference_semantic_segmentation(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, :3, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/mobilevit/test_modeling_mobilevit.py b/tests/models/mobilevit/test_modeling_mobilevit.py
index 9eb5878500d5..a14a5fb445d5 100644
--- a/tests/models/mobilevit/test_modeling_mobilevit.py
+++ b/tests/models/mobilevit/test_modeling_mobilevit.py
@@ -306,7 +306,7 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([-1.9364, -1.2327, -0.4653]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_semantic_segmentation(self):
@@ -336,7 +336,7 @@ def test_inference_semantic_segmentation(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, :3, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_post_processing_semantic_segmentation(self):
diff --git a/tests/models/mobilevitv2/test_modeling_mobilevitv2.py b/tests/models/mobilevitv2/test_modeling_mobilevitv2.py
index e2b565e4b9ce..136bb5131214 100644
--- a/tests/models/mobilevitv2/test_modeling_mobilevitv2.py
+++ b/tests/models/mobilevitv2/test_modeling_mobilevitv2.py
@@ -319,7 +319,7 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_semantic_segmentation(self):
@@ -349,7 +349,7 @@ def test_inference_semantic_segmentation(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, :3, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_post_processing_semantic_segmentation(self):
diff --git a/tests/models/modernbert/test_modeling_modernbert.py b/tests/models/modernbert/test_modeling_modernbert.py
index 9f286cf3985f..c7c34bf06150 100644
--- a/tests/models/modernbert/test_modeling_modernbert.py
+++ b/tests/models/modernbert/test_modeling_modernbert.py
@@ -383,7 +383,7 @@ def test_inference_masked_lm(self):
expected_slice = torch.tensor(
[[[3.8387, -0.2017, 12.2839], [3.6300, 0.6869, 14.7123], [-5.1137, -3.8122, 11.9874]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_no_head(self):
@@ -405,7 +405,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[[0.3151, -0.6417, -0.7027], [-0.7834, -1.5810, 0.4576], [1.0614, -0.7268, -0.0871]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_token_classification(self):
@@ -428,7 +428,7 @@ def test_inference_token_classification(self):
expected = torch.tensor(
[[[2.0159, 4.6569], [-0.9430, 3.1595], [-3.8770, 3.2653], [1.5752, 4.5167], [-1.6939, 1.2524]]]
)
- self.assertTrue(torch.allclose(output, expected, atol=1e-4))
+ torch.testing.assert_close(output, expected, rtol=1e-4, atol=1e-4)
@slow
def test_inference_sequence_classification(self):
@@ -451,7 +451,7 @@ def test_inference_sequence_classification(self):
self.assertEqual(output.shape, expected_shape)
expected = torch.tensor([[1.6466, 4.5662]])
- self.assertTrue(torch.allclose(output, expected, atol=1e-4))
+ torch.testing.assert_close(output, expected, rtol=1e-4, atol=1e-4)
@slow
def test_export(self):
diff --git a/tests/models/moonshine/test_modeling_moonshine.py b/tests/models/moonshine/test_modeling_moonshine.py
index 13469e231311..465ecec2083c 100644
--- a/tests/models/moonshine/test_modeling_moonshine.py
+++ b/tests/models/moonshine/test_modeling_moonshine.py
@@ -489,7 +489,7 @@ def test_tiny_logits_single(self):
-8.0796, -7.3300, -7.3672, -6.8765, -7.6876, -7.2682, -6.9866, -6.7457, -7.6855, -7.3050,
])
# fmt: on
- self.assertTrue(torch.allclose(outputs.logits[0][0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0][0, :30].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_base_logits_single(self):
@@ -507,7 +507,7 @@ def test_base_logits_single(self):
-7.9310, -8.1024, -7.8699, -7.8231, -8.0752, -7.9764, -7.8127, -8.0536, -7.9492, -7.9290,
])
# fmt: on
- self.assertTrue(torch.allclose(outputs.logits[0][0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0][0, :30].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_tiny_logits_batch(self):
@@ -525,7 +525,7 @@ def test_tiny_logits_batch(self):
[-10.8078, 4.0030, -0.0633, -5.0505, -5.3906, -5.4590, -5.2420, -5.4746, -5.2665, -5.3158]
])
# fmt: on
- self.assertTrue(torch.allclose(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_base_logits_batch(self):
@@ -545,7 +545,7 @@ def test_base_logits_batch(self):
])
# fmt: on
- self.assertTrue(torch.allclose(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_tiny_generation_single(self):
diff --git a/tests/models/moshi/test_modeling_moshi.py b/tests/models/moshi/test_modeling_moshi.py
index 7d4b855c10d8..adaf0fcc34ac 100644
--- a/tests/models/moshi/test_modeling_moshi.py
+++ b/tests/models/moshi/test_modeling_moshi.py
@@ -244,7 +244,7 @@ def test_resize_tokens_embeddings(self):
else:
old_embeddings_mean = torch.mean(model_embed.weight.data[:-10, :], axis=0)
new_embeddings_mean = torch.mean(model_embed.weight.data[-10:, :], axis=0)
- torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, atol=1e-3, rtol=1e-1)
+ torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, rtol=1e-3, atol=1e-3)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
if not is_deepspeed_zero3_enabled():
@@ -344,7 +344,7 @@ def test_resize_tokens_embeddings(self):
else:
old_embeddings_mean = torch.mean(model_embed.weight.data[:-10, :], axis=0)
new_embeddings_mean = torch.mean(model_embed.weight.data[-10:, :], axis=0)
- torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, atol=1e-3, rtol=1e-1)
+ torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, rtol=1e-3, atol=1e-3)
@unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.")
def test_cpu_offload(self):
@@ -733,7 +733,7 @@ def test_left_padding_compatibility(self):
next_logits_with_padding = model(**model_kwargs).logits[:, -1, :]
# They should result in very similar logits
- self.assertTrue(torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=1e-5))
+ torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5)
@require_torch_sdpa
@slow
@@ -810,8 +810,8 @@ def test_eager_matches_sdpa_generate(self):
depth_decoder_do_sample=False,
)
- self.assertTrue(torch.allclose(res_eager.sequences, res_sdpa.sequences))
- self.assertTrue(torch.allclose(res_eager.audio_sequences, res_sdpa.audio_sequences))
+ torch.testing.assert_close(res_eager.sequences, res_sdpa.sequences)
+ torch.testing.assert_close(res_eager.audio_sequences, res_sdpa.audio_sequences)
@pytest.mark.generate
def test_generate_without_input_ids(self):
diff --git a/tests/models/mpnet/test_modeling_mpnet.py b/tests/models/mpnet/test_modeling_mpnet.py
index 9f97f3c11b58..e71ec7566d85 100644
--- a/tests/models/mpnet/test_modeling_mpnet.py
+++ b/tests/models/mpnet/test_modeling_mpnet.py
@@ -264,4 +264,4 @@ def test_inference_no_head(self):
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]]
)
# compare the actual values for a slice.
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/mpt/test_modeling_mpt.py b/tests/models/mpt/test_modeling_mpt.py
index 55919cbbf959..5507c127bb94 100644
--- a/tests/models/mpt/test_modeling_mpt.py
+++ b/tests/models/mpt/test_modeling_mpt.py
@@ -513,4 +513,4 @@ def test_model_logits(self):
expected_slice = torch.Tensor([-0.2520, -0.2178, -0.1953]).to(torch_device, torch.bfloat16)
predicted_slice = outputs.hidden_states[-1][0, 0, :3]
- self.assertTrue(torch.allclose(expected_slice, predicted_slice, atol=1e-3, rtol=1e-3))
+ torch.testing.assert_close(expected_slice, predicted_slice, rtol=1e-3, atol=1e-3)
diff --git a/tests/models/mra/test_modeling_mra.py b/tests/models/mra/test_modeling_mra.py
index 7e785b5f5884..cb5713bc2b59 100644
--- a/tests/models/mra/test_modeling_mra.py
+++ b/tests/models/mra/test_modeling_mra.py
@@ -401,7 +401,7 @@ def test_inference_no_head(self):
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_masked_lm(self):
@@ -420,7 +420,7 @@ def test_inference_masked_lm(self):
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_masked_lm_long_input(self):
@@ -439,4 +439,4 @@ def test_inference_masked_lm_long_input(self):
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/musicgen/test_modeling_musicgen.py b/tests/models/musicgen/test_modeling_musicgen.py
index 3ea60d550e06..c1aba2838273 100644
--- a/tests/models/musicgen/test_modeling_musicgen.py
+++ b/tests/models/musicgen/test_modeling_musicgen.py
@@ -1821,7 +1821,7 @@ def test_logits_text_prompt(self):
# fmt: on
self.assertTrue(logits.shape == (*decoder_input_ids.shape, model.decoder.config.vocab_size))
- self.assertTrue(torch.allclose(logits[0, 0, :16].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(logits[0, 0, :16].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_logits_text_audio_prompt(self):
@@ -1859,7 +1859,7 @@ def test_logits_text_audio_prompt(self):
# fmt: on
self.assertTrue(logits.shape == (8, 50, 2048))
- self.assertTrue(torch.allclose(logits[0, -1, :16].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(logits[0, -1, :16].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_generate_unconditional_greedy(self):
@@ -1881,7 +1881,7 @@ def test_generate_unconditional_greedy(self):
# fmt: on
self.assertTrue(output_values.shape == (1, 1, 3200))
- self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_unconditional_sampling(self):
@@ -1904,7 +1904,7 @@ def test_generate_unconditional_sampling(self):
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
- self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_text_prompt_greedy(self):
@@ -1931,7 +1931,7 @@ def test_generate_text_prompt_greedy(self):
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
- self.assertTrue(torch.allclose(output_values[0, 0, :10].cpu(), EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, :10].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_text_prompt_greedy_with_classifier_free_guidance(self):
@@ -1958,7 +1958,7 @@ def test_generate_text_prompt_greedy_with_classifier_free_guidance(self):
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
- self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_text_prompt_sampling(self):
@@ -1986,7 +1986,7 @@ def test_generate_text_prompt_sampling(self):
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
- self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_text_audio_prompt(self):
@@ -2013,7 +2013,7 @@ def test_generate_text_audio_prompt(self):
self.assertTrue(
output_values.shape == (2, 1, 36480)
) # input values take shape 32000 and we generate from there
- self.assertTrue(torch.allclose(output_values[0, 0, -16:].cpu(), EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, -16:].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@require_torch
@@ -2053,8 +2053,8 @@ def test_generate_unconditional_greedy(self):
# (bsz, channels, seq_len)
self.assertTrue(output_values.shape == (1, 2, 5760))
- self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT, atol=1e-4))
- self.assertTrue(torch.allclose(output_values[0, 1, :16].cpu(), EXPECTED_VALUES_RIGHT, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(output_values[0, 1, :16].cpu(), EXPECTED_VALUES_RIGHT, rtol=1e-4, atol=1e-4)
@slow
def test_generate_text_audio_prompt(self):
@@ -2088,5 +2088,5 @@ def test_generate_text_audio_prompt(self):
# (bsz, channels, seq_len)
self.assertTrue(output_values.shape == (2, 2, 37760))
# input values take shape 32000 and we generate from there - we check the last (generated) values
- self.assertTrue(torch.allclose(output_values[0, 0, -16:].cpu(), EXPECTED_VALUES_LEFT, atol=1e-4))
- self.assertTrue(torch.allclose(output_values[0, 1, -16:].cpu(), EXPECTED_VALUES_RIGHT, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, -16:].cpu(), EXPECTED_VALUES_LEFT, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(output_values[0, 1, -16:].cpu(), EXPECTED_VALUES_RIGHT, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/musicgen_melody/test_modeling_musicgen_melody.py b/tests/models/musicgen_melody/test_modeling_musicgen_melody.py
index 98b554be65fb..0066c08720d6 100644
--- a/tests/models/musicgen_melody/test_modeling_musicgen_melody.py
+++ b/tests/models/musicgen_melody/test_modeling_musicgen_melody.py
@@ -1799,7 +1799,7 @@ def test_logits_text_prompt(self):
)
self.assertTrue(logits.shape == logits_shape)
- self.assertTrue(torch.allclose(logits[0, -1, :16].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(logits[0, -1, :16].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_logits_text_audio_prompt(self):
@@ -1841,7 +1841,7 @@ def test_logits_text_audio_prompt(self):
# fmt: on
self.assertTrue(logits.shape == (8, 240, 2048))
- self.assertTrue(torch.allclose(logits[1:3, -1, 32:40].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(logits[1:3, -1, 32:40].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_generate_unconditional_greedy(self):
@@ -1863,7 +1863,7 @@ def test_generate_unconditional_greedy(self):
# fmt: on
self.assertTrue(output_values.shape == (1, 1, 4480))
- self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_unconditional_sampling(self):
@@ -1888,7 +1888,7 @@ def test_generate_unconditional_sampling(self):
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
- self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_text_prompt_greedy(self):
@@ -1915,7 +1915,7 @@ def test_generate_text_prompt_greedy(self):
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
- self.assertTrue(torch.allclose(output_values[0, 0, :10].cpu(), EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, :10].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_text_prompt_greedy_with_classifier_free_guidance(self):
@@ -1943,7 +1943,7 @@ def test_generate_text_prompt_greedy_with_classifier_free_guidance(self):
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
- self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_text_prompt_sampling(self):
@@ -1977,7 +1977,7 @@ def test_generate_text_prompt_sampling(self):
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
- self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_text_audio_prompt(self):
@@ -2002,7 +2002,7 @@ def test_generate_text_audio_prompt(self):
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
- self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4))
+ torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@require_torch
@@ -2039,8 +2039,8 @@ def test_generate_unconditional_greedy(self):
# (bsz, channels, seq_len)
self.assertTrue(output_values.shape == (1, 2, 5760))
- self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT, atol=6e-4))
- self.assertTrue(torch.allclose(output_values[0, 1, :16].cpu(), EXPECTED_VALUES_LEFT, atol=6e-4))
+ torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT, rtol=6e-4, atol=6e-4)
+ torch.testing.assert_close(output_values[0, 1, :16].cpu(), EXPECTED_VALUES_LEFT, rtol=6e-4, atol=6e-4)
@slow
def test_generate_text_audio_prompt(self):
@@ -2071,5 +2071,9 @@ def test_generate_text_audio_prompt(self):
# (bsz, channels, seq_len)
self.assertTrue(output_values.shape == (2, 2, 5760))
- self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT_FIRST_SAMPLE, atol=1e-4))
- self.assertTrue(torch.allclose(output_values[1, 1, :16].cpu(), EXPECTED_VALUES_RIGHT_SECOND_SAMPLE, atol=1e-4))
+ torch.testing.assert_close(
+ output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT_FIRST_SAMPLE, rtol=1e-4, atol=1e-4
+ )
+ torch.testing.assert_close(
+ output_values[1, 1, :16].cpu(), EXPECTED_VALUES_RIGHT_SECOND_SAMPLE, rtol=1e-4, atol=1e-4
+ )
diff --git a/tests/models/mvp/test_modeling_mvp.py b/tests/models/mvp/test_modeling_mvp.py
index f07574bdb54e..92aa2d27d731 100644
--- a/tests/models/mvp/test_modeling_mvp.py
+++ b/tests/models/mvp/test_modeling_mvp.py
@@ -571,7 +571,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[0.3461, 0.3624, 0.2689], [0.3461, 0.3624, 0.2689], [-0.1562, 1.1637, -0.3784]], device=torch_device
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-3))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-3, atol=1e-3)
@slow
def test_summarization_inference(self):
diff --git a/tests/models/nllb_moe/test_modeling_nllb_moe.py b/tests/models/nllb_moe/test_modeling_nllb_moe.py
index f341fb078ef9..851bb25edfb5 100644
--- a/tests/models/nllb_moe/test_modeling_nllb_moe.py
+++ b/tests/models/nllb_moe/test_modeling_nllb_moe.py
@@ -519,7 +519,7 @@ def test_top_2_routing(self):
hidden_states = masked_hidden_states.sum(dim=0).reshape(self.batch_size, self.sequence_length, hidden_dim)
EXPECTED_MEAN_FAIRSEQ_HIDDEN_STATES = torch.Tensor([[ 7.0340e-04, 2.7997e-03, -1.3351e-02, -7.6705e-03, -3.5089e-03,3.9773e-03, 7.4593e-03, 1.2566e-02, 3.5860e-03, -2.7448e-02,-1.3731e-02, -1.0534e-02, -1.3606e-02, -1.5048e-02, -2.8914e-03,-5.0371e-03, -1.3963e-03, 6.0076e-03, -1.1380e-02, -1.4620e-02, 5.2401e-03, 8.4660e-04, -1.5319e-03, -1.6735e-02, 1.1302e-02, 3.6119e-03, 4.6084e-03, -1.3458e-02, 7.7792e-05, 1.4312e-02, 4.9107e-03, -5.0936e-03], [-4.4538e-03, 3.1026e-03, 1.4121e-04, -4.8121e-03, -5.6279e-03, 7.2493e-03, 3.9769e-03, 1.1114e-02, -1.5666e-03, -2.3477e-02, 8.7268e-03, 1.3446e-02, -2.8845e-05, -1.7287e-02, 8.7619e-03, -4.5316e-03, -1.2164e-02, 5.7461e-03, -4.5861e-03, -9.3907e-03, 2.9808e-02, 8.9206e-04, -7.6232e-04, -1.4173e-02, 3.0208e-03, 1.5310e-02, 9.7717e-03, 3.1014e-03, 7.8042e-03, 8.0197e-03, 3.4784e-03, -7.1728e-03]]) # fmt: skip
- self.assertTrue(torch.allclose(hidden_states.mean(1), EXPECTED_MEAN_FAIRSEQ_HIDDEN_STATES, 1e-4))
+ torch.testing.assert_close(hidden_states.mean(1), EXPECTED_MEAN_FAIRSEQ_HIDDEN_STATES, atol=1e-4, rtol=1e-4)
def test_batch_prioritized_routing(self):
set_seed(0)
diff --git a/tests/models/nougat/test_image_processing_nougat.py b/tests/models/nougat/test_image_processing_nougat.py
index 9d0b291ae37c..f923a2f159e0 100644
--- a/tests/models/nougat/test_image_processing_nougat.py
+++ b/tests/models/nougat/test_image_processing_nougat.py
@@ -142,7 +142,7 @@ def test_expected_output(self):
dummy_image = self.image_processor_tester.prepare_dummy_image()
image_processor = self.image_processor
inputs = image_processor(dummy_image, return_tensors="pt")
- self.assertTrue(torch.allclose(inputs["pixel_values"].mean(), torch.tensor(0.4906), atol=1e-3, rtol=1e-3))
+ torch.testing.assert_close(inputs["pixel_values"].mean(), torch.tensor(0.4906), rtol=1e-3, atol=1e-3)
def test_crop_margin_all_white(self):
image = np.uint8(np.ones((100, 100, 3)) * 255)
diff --git a/tests/models/nystromformer/test_modeling_nystromformer.py b/tests/models/nystromformer/test_modeling_nystromformer.py
index 3d812ebf04a3..73da6cec18f2 100644
--- a/tests/models/nystromformer/test_modeling_nystromformer.py
+++ b/tests/models/nystromformer/test_modeling_nystromformer.py
@@ -304,7 +304,7 @@ def test_inference_no_head(self):
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_masked_lm_end_to_end(self):
diff --git a/tests/models/olmo/test_modeling_olmo.py b/tests/models/olmo/test_modeling_olmo.py
index a85e9db34586..22e85bc339d8 100644
--- a/tests/models/olmo/test_modeling_olmo.py
+++ b/tests/models/olmo/test_modeling_olmo.py
@@ -339,7 +339,7 @@ def test_model_rope_scaling(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
@@ -356,10 +356,10 @@ def test_model_1b_logits(self):
out = model(torch.tensor(input_ids)).logits.float()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[2.2869, 0.3315, 0.9876, 1.4146, 1.8804, 2.0430, 1.7055, 1.2065]])
- torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([2.5551, -1.1230, 11.0510, 12.4977, 7.9651, 7.2342, 6.1885, 7.8340, 9.9847, 12.6695, 12.2345, 10.7970, 8.4749, 14.2483, 12.9588, 13.9233, 11.0496, 5.5749, 7.4466, 7.7914, 6.8440, 5.8951, 4.8180, 4.1935, 4.5216, 4.7256, 3.9553, 12.2870, 12.4990, 8.1591]) # fmt: skip
- torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
@slow
def test_model_7b_logits(self):
@@ -368,10 +368,10 @@ def test_model_7b_logits(self):
out = model(torch.tensor(input_ids)).logits.float()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[0.0271, 0.0249, -0.0578, -0.0870, 0.0167, 0.0710, 0.1002, 0.0677]])
- torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([-1.7433, -1.6685, 7.4941, 6.1506, 0.1364, -0.1127, 1.3224, 4.5458, 4.2068, 5.8296, 7.4723, 2.7925, 3.1245, 10.8872, 10.0758, 10.6717, 7.0945, 1.2398, 3.6766, 4.2365, 2.5655, 2.2222, 1.7418, 0.5223, 0.7753, 1.0938, 0.6723, 6.2522, 6.2264, 1.8105]) # fmt: skip
- torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
@slow
def test_model_7b_twin_2t_logits(self):
@@ -380,10 +380,10 @@ def test_model_7b_twin_2t_logits(self):
out = model(torch.tensor(input_ids)).logits.float()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-0.3636, -0.3825, -0.4800, -0.3696, -0.8388, -0.9737, -0.9849, -0.8356]])
- torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([-2.0833, -1.9234, 8.7312, 7.8049, 1.0372, 0.8941, 3.1548, 1.8502, 5.5511, 5.5793, 8.1166, 4.5906, 1.8691, 11.6377, 8.9858, 11.6447, 7.4549, 1.4725, 2.8399, 2.7568, 1.4011, 1.6958, 0.5572, 0.5231, 0.3068, 0.5364, 0.6769, 7.9636, 8.2379, 1.7950]) # fmt: skip
- torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
@slow
def test_model_7b_greedy_generation(self):
diff --git a/tests/models/olmo2/test_modeling_olmo2.py b/tests/models/olmo2/test_modeling_olmo2.py
index fe6dcfdb540a..ce2bd0519314 100644
--- a/tests/models/olmo2/test_modeling_olmo2.py
+++ b/tests/models/olmo2/test_modeling_olmo2.py
@@ -338,7 +338,7 @@ def test_model_rope_scaling(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
@@ -357,10 +357,10 @@ def test_model_7b_logits(self):
EXPECTED_MEAN = torch.tensor(
[[-13.0244, -13.9564, -11.8270, -11.3047, -12.3794, -12.4215, -15.6030, -12.7962]]
)
- torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([-5.3909, -13.9841, -13.6123, -14.5780, -13.9455, -13.2265, -13.4734, -11.9079, -9.2879, -12.6139, -11.4819, -5.9607, -11.9657, -6.3618, -11.1065, -7.3075, -6.5674, -6.7154, -7.3409, -7.9662, -8.0863, -8.1682, -8.7341, -8.7665, -8.8742, -9.7813, -8.0620, -12.5937, -7.6440, -11.3966]) # fmt: skip
- torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
@slow
def test_model_7b_greedy_generation(self):
diff --git a/tests/models/olmoe/test_modeling_olmoe.py b/tests/models/olmoe/test_modeling_olmoe.py
index 9efadb06eb41..c95b68625d0a 100644
--- a/tests/models/olmoe/test_modeling_olmoe.py
+++ b/tests/models/olmoe/test_modeling_olmoe.py
@@ -352,7 +352,7 @@ def test_model_rope_scaling(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
@@ -369,10 +369,10 @@ def test_model_7b_logits(self):
out = model(torch.tensor(input_ids)).logits.float()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-1.3814, -3.4450, -2.2990, -1.9542, -2.4387, -2.7941, -2.9312, -2.8309]])
- torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([-2.3874, -2.4076, -2.4995, 4.2278, 1.4004, -0.0252, 0.4189, -2.7560, 0.3531, 1.6678, -0.7941, -1.1818, -0.2920, 0.7131, -1.4173, 1.6723, 0.5406, 0.1345, -0.1800, 0.2304, 1.2791, 0.7489, 0.6341, -0.0151, -1.3693, -1.2532, -2.3921, 0.7376, 1.6876, 0.5483]) # fmt: skip
- torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2)
@slow
def test_model_7b_greedy_generation(self):
diff --git a/tests/models/omdet_turbo/test_modeling_omdet_turbo.py b/tests/models/omdet_turbo/test_modeling_omdet_turbo.py
index d057b35006d3..e3996ade0396 100644
--- a/tests/models/omdet_turbo/test_modeling_omdet_turbo.py
+++ b/tests/models/omdet_turbo/test_modeling_omdet_turbo.py
@@ -701,8 +701,8 @@ def test_inference_object_detection_head(self):
[[[0.2550, 0.5501, 0.4738, 0.8745], [0.7695, 0.4121, 0.4603, 0.7244], [0.7691, 0.4117, 0.4603, 0.7214]]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.decoder_class_logits[:3, :3], expected_class_logits, atol=1e-1))
- self.assertTrue(torch.allclose(outputs.decoder_coord_logits[:3, :3], expected_coord_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.decoder_class_logits[:3, :3], expected_class_logits, rtol=1e-1, atol=1e-1)
+ torch.testing.assert_close(outputs.decoder_coord_logits[:3, :3], expected_coord_logits, rtol=1e-3, atol=1e-3)
# verify grounded postprocessing
results = processor.post_process_grounded_object_detection(
@@ -712,8 +712,8 @@ def test_inference_object_detection_head(self):
expected_slice_boxes = torch.tensor([39.8870, 70.3522, 176.7424, 118.0354]).to(torch_device)
self.assertEqual(len(results["scores"]), 4)
- self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-2))
- self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes, atol=1e-2))
+ torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-2, atol=1e-2)
+ torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-2, atol=1e-2)
expected_text_labels = ["remote", "cat", "remote", "cat"]
self.assertListEqual(results["text_labels"], expected_text_labels)
@@ -745,8 +745,8 @@ def test_inference_object_detection_head_fp16(self):
[[[0.2550, 0.5501, 0.4738, 0.8745], [0.7695, 0.4121, 0.4603, 0.7244], [0.7691, 0.4117, 0.4603, 0.7214]]]
).to(torch_device, dtype=torch.float16)
- self.assertTrue(torch.allclose(outputs.decoder_class_logits[:3, :3], expected_class_logits, atol=1e-1))
- self.assertTrue(torch.allclose(outputs.decoder_coord_logits[:3, :3], expected_coord_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.decoder_class_logits[:3, :3], expected_class_logits, rtol=1e-1, atol=1e-1)
+ torch.testing.assert_close(outputs.decoder_coord_logits[:3, :3], expected_coord_logits, rtol=1e-3, atol=1e-3)
# verify grounded postprocessing
results = processor.post_process_grounded_object_detection(
@@ -758,8 +758,8 @@ def test_inference_object_detection_head_fp16(self):
)
self.assertEqual(len(results["scores"]), 4)
- self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-2))
- self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes, atol=1e-1))
+ torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-2, atol=1e-2)
+ torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-1, atol=1e-1)
expected_text_labels = ["remote", "cat", "remote", "cat"]
self.assertListEqual(results["text_labels"], expected_text_labels)
@@ -787,8 +787,8 @@ def test_inference_object_detection_head_no_task(self):
[[[0.2550, 0.5501, 0.4738, 0.8745], [0.7695, 0.4121, 0.4603, 0.7244], [0.7691, 0.4117, 0.4603, 0.7214]]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.decoder_class_logits[:3, :3], expected_class_logits, atol=1e-1))
- self.assertTrue(torch.allclose(outputs.decoder_coord_logits[:3, :3], expected_coord_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.decoder_class_logits[:3, :3], expected_class_logits, rtol=1e-1, atol=1e-1)
+ torch.testing.assert_close(outputs.decoder_coord_logits[:3, :3], expected_coord_logits, rtol=1e-3, atol=1e-3)
# verify grounded postprocessing
results = processor.post_process_grounded_object_detection(
@@ -798,8 +798,8 @@ def test_inference_object_detection_head_no_task(self):
expected_slice_boxes = torch.tensor([39.8870, 70.3522, 176.7424, 118.0354]).to(torch_device)
self.assertEqual(len(results["scores"]), 4)
- self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-2))
- self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes, atol=1e-2))
+ torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-2, atol=1e-2)
+ torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-2, atol=1e-2)
expected_text_labels = ["remote", "cat", "remote", "cat"]
self.assertListEqual(results["text_labels"], expected_text_labels)
@@ -831,8 +831,12 @@ def test_inference_object_detection_head_batched(self):
[[[0.2550, 0.5501, 0.4738]], [[0.2535, 0.6006, 0.0353]], [[0.3742, 0.3337, 0.0666]]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.decoder_class_logits[:, :1, :3], expected_class_logits, atol=1e-1))
- self.assertTrue(torch.allclose(outputs.decoder_coord_logits[:, :1, :3], expected_coord_logits, atol=1e-3))
+ torch.testing.assert_close(
+ outputs.decoder_class_logits[:, :1, :3], expected_class_logits, rtol=1e-1, atol=1e-1
+ )
+ torch.testing.assert_close(
+ outputs.decoder_coord_logits[:, :1, :3], expected_coord_logits, rtol=1e-3, atol=1e-3
+ )
# verify grounded postprocessing
results = processor.post_process_grounded_object_detection(
@@ -851,11 +855,11 @@ def test_inference_object_detection_head_batched(self):
).to(torch_device)
self.assertListEqual([len(result["scores"]) for result in results], [4, 4, 6])
- self.assertTrue(
- torch.allclose(torch.stack([result["scores"][0] for result in results]), expected_scores, atol=1e-2)
+ torch.testing.assert_close(
+ torch.stack([result["scores"][0] for result in results]), expected_scores, rtol=1e-2, atol=1e-2
)
- self.assertTrue(
- torch.allclose(torch.stack([result["boxes"][0, :] for result in results]), expected_slice_boxes, atol=1e-2)
+ torch.testing.assert_close(
+ torch.stack([result["boxes"][0, :] for result in results]), expected_slice_boxes, rtol=1e-2, atol=1e-2
)
expected_text_labels = [
@@ -889,8 +893,12 @@ def test_inference_object_detection_head_equivalence_cpu_gpu(self):
[[[0.2550, 0.5501, 0.4738, 0.8745], [0.7695, 0.4121, 0.4603, 0.7244], [0.7691, 0.4117, 0.4603, 0.7214]]]
)
- self.assertTrue(torch.allclose(cpu_outputs.decoder_class_logits[:3, :3], expected_class_logits, atol=1e-1))
- self.assertTrue(torch.allclose(cpu_outputs.decoder_coord_logits[:3, :3], expected_coord_logits, atol=1e-3))
+ torch.testing.assert_close(
+ cpu_outputs.decoder_class_logits[:3, :3], expected_class_logits, rtol=1e-1, atol=1e-1
+ )
+ torch.testing.assert_close(
+ cpu_outputs.decoder_coord_logits[:3, :3], expected_coord_logits, rtol=1e-3, atol=1e-3
+ )
# verify grounded postprocessing
results_cpu = processor.post_process_grounded_object_detection(
@@ -900,5 +908,5 @@ def test_inference_object_detection_head_equivalence_cpu_gpu(self):
gpu_outputs, text_labels=[text_labels], target_sizes=[image.size[::-1]]
)[0]
- self.assertTrue(torch.allclose(results_cpu["scores"], result_gpu["scores"].cpu(), atol=1e-2))
- self.assertTrue(torch.allclose(results_cpu["boxes"][0, :], result_gpu["boxes"][0, :].cpu(), atol=1e-2))
+ torch.testing.assert_close(results_cpu["scores"], result_gpu["scores"].cpu(), rtol=1e-2, atol=1e-2)
+ torch.testing.assert_close(results_cpu["boxes"][0, :], result_gpu["boxes"][0, :].cpu(), rtol=1e-2, atol=1e-2)
diff --git a/tests/models/omdet_turbo/test_processor_omdet_turbo.py b/tests/models/omdet_turbo/test_processor_omdet_turbo.py
index 341c7a1d9a13..66dbc4eb0176 100644
--- a/tests/models/omdet_turbo/test_processor_omdet_turbo.py
+++ b/tests/models/omdet_turbo/test_processor_omdet_turbo.py
@@ -106,10 +106,10 @@ def test_post_process_grounded_object_detection(self):
self.assertEqual(post_processed[0]["boxes"].shape, (self.num_queries, 4))
self.assertEqual(post_processed[0]["scores"].shape, (self.num_queries,))
expected_scores = torch.tensor([0.7310, 0.6579, 0.6513, 0.6444, 0.6252])
- self.assertTrue(torch.allclose(post_processed[0]["scores"], expected_scores, atol=1e-4))
+ torch.testing.assert_close(post_processed[0]["scores"], expected_scores, rtol=1e-4, atol=1e-4)
expected_box_slice = torch.tensor([14.9657, 141.2052, 30.0000, 312.9670])
- self.assertTrue(torch.allclose(post_processed[0]["boxes"][0], expected_box_slice, atol=1e-4))
+ torch.testing.assert_close(post_processed[0]["boxes"][0], expected_box_slice, rtol=1e-4, atol=1e-4)
def test_save_load_pretrained_additional_features(self):
processor = OmDetTurboProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
diff --git a/tests/models/oneformer/test_modeling_oneformer.py b/tests/models/oneformer/test_modeling_oneformer.py
index d75a76cd4f16..8f1df74ea627 100644
--- a/tests/models/oneformer/test_modeling_oneformer.py
+++ b/tests/models/oneformer/test_modeling_oneformer.py
@@ -576,7 +576,7 @@ def test_inference_universal_segmentation_head(self):
)
expected_slice = [[[3.1848, 4.2141, 4.1993], [2.9000, 3.5721, 3.6603], [2.5358, 3.0883, 3.6168]]]
expected_slice = torch.tensor(expected_slice).to(torch_device)
- self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(masks_queries_logits[0, 0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
# class_queries_logits
class_queries_logits = outputs.class_queries_logits
self.assertEqual(
@@ -586,7 +586,7 @@ def test_inference_universal_segmentation_head(self):
expected_slice = torch.tensor(
[[3.0668, -1.1833, -5.1103], [3.3440, -3.3620, -5.1101], [2.6017, -4.3613, -4.1444]]
).to(torch_device)
- self.assertTrue(torch.allclose(class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(class_queries_logits[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
@require_torch_accelerator
@require_torch_fp16
diff --git a/tests/models/oneformer/test_processor_oneformer.py b/tests/models/oneformer/test_processor_oneformer.py
index dae50040ec04..2a9d9f859cc9 100644
--- a/tests/models/oneformer/test_processor_oneformer.py
+++ b/tests/models/oneformer/test_processor_oneformer.py
@@ -504,9 +504,9 @@ def create_panoptic_map(annotation, segments_info):
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([4, 17, 32, 42, 12, 3, 5, 0, 43, 96, 104, 31, 125, 138, 87, 149]) # noqa: E231 # fmt: skip
- self.assertTrue(torch.allclose(inputs["class_labels"][0], expected_class_labels))
+ torch.testing.assert_close(inputs["class_labels"][0], expected_class_labels)
expected_class_labels = torch.tensor([19, 67, 82, 17, 12, 42, 3, 14, 5, 0, 115, 43, 8, 138, 125, 143]) # noqa: E231 # fmt: skip
- self.assertTrue(torch.allclose(inputs["class_labels"][1], expected_class_labels))
+ torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the task inputs
self.assertEqual(len(inputs["task_inputs"]), 2)
@@ -592,9 +592,9 @@ def create_panoptic_map(annotation, segments_info):
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 43, 43, 43, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip
- self.assertTrue(torch.allclose(inputs["class_labels"][0], expected_class_labels))
+ torch.testing.assert_close(inputs["class_labels"][0], expected_class_labels)
expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 12, 12, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip
- self.assertTrue(torch.allclose(inputs["class_labels"][1], expected_class_labels))
+ torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the task inputs
self.assertEqual(len(inputs["task_inputs"]), 2)
@@ -680,9 +680,9 @@ def create_panoptic_map(annotation, segments_info):
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip
- self.assertTrue(torch.allclose(inputs["class_labels"][0], expected_class_labels))
+ torch.testing.assert_close(inputs["class_labels"][0], expected_class_labels)
expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip
- self.assertTrue(torch.allclose(inputs["class_labels"][1], expected_class_labels))
+ torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the task inputs
self.assertEqual(len(inputs["task_inputs"]), 2)
diff --git a/tests/models/owlv2/test_modeling_owlv2.py b/tests/models/owlv2/test_modeling_owlv2.py
index cab47c2b5eac..e6aff1c7021c 100644
--- a/tests/models/owlv2/test_modeling_owlv2.py
+++ b/tests/models/owlv2/test_modeling_owlv2.py
@@ -826,7 +826,7 @@ def test_inference(self):
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[-6.2229, -8.2601]], device=torch_device)
- self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
@@ -858,7 +858,7 @@ def test_inference_interpolate_pos_encoding(self):
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[-6.2520, -8.2970]], device=torch_device)
- self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
expected_shape = torch.Size((1, 4097, 768))
self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape)
@@ -874,7 +874,7 @@ def test_inference_interpolate_pos_encoding(self):
expected_slice_boxes = torch.tensor(
[[0.2407, 0.0553, 0.4636], [0.1082, 0.0494, 0.1861], [0.2459, 0.0527, 0.4398]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
model = Owlv2ForObjectDetection.from_pretrained(model_name).to(torch_device)
query_image = prepare_img()
@@ -920,7 +920,7 @@ def test_inference_interpolate_pos_encoding(self):
]
)
- self.assertTrue(torch.allclose(model.box_bias[:3, :4], expected_default_box_bias, atol=1e-4))
+ torch.testing.assert_close(model.box_bias[:3, :4], expected_default_box_bias, rtol=1e-4, atol=1e-4)
# Interpolate with any resolution size.
processor.image_processor.size = {"height": 1264, "width": 1024}
@@ -945,7 +945,7 @@ def test_inference_interpolate_pos_encoding(self):
expected_slice_boxes = torch.tensor(
[[0.2438, 0.0945, 0.4675], [0.1361, 0.0431, 0.2406], [0.2465, 0.0428, 0.4429]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
query_image = prepare_img()
inputs = processor(
@@ -992,13 +992,11 @@ def test_inference_object_detection(self):
expected_slice_logits = torch.tensor(
[[-21.413497, -21.612638], [-19.008193, -19.548841], [-20.958896, -21.382694]]
).to(torch_device)
- resulted_slice_logits = outputs.logits[0, :3, :3]
- max_diff = torch.max(torch.abs(resulted_slice_logits - expected_slice_logits)).item()
- self.assertLess(max_diff, 3e-4)
-
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, rtol=1e-4, atol=1e-4)
expected_slice_boxes = torch.tensor(
[[0.241309, 0.051896, 0.453267], [0.139474, 0.045701, 0.250660], [0.233022, 0.050479, 0.427671]],
).to(torch_device)
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
resulted_slice_boxes = outputs.pred_boxes[0, :3, :3]
max_diff = torch.max(torch.abs(resulted_slice_boxes - expected_slice_boxes)).item()
self.assertLess(max_diff, 3e-4)
@@ -1044,7 +1042,7 @@ def test_inference_one_shot_object_detection(self):
expected_slice_boxes = torch.tensor(
[[0.2413, 0.0519, 0.4533], [0.1395, 0.0457, 0.2507], [0.2330, 0.0505, 0.4277]],
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
@slow
@require_torch_accelerator
diff --git a/tests/models/owlvit/test_modeling_owlvit.py b/tests/models/owlvit/test_modeling_owlvit.py
index d207135a58e8..81034df4cbc9 100644
--- a/tests/models/owlvit/test_modeling_owlvit.py
+++ b/tests/models/owlvit/test_modeling_owlvit.py
@@ -819,7 +819,7 @@ def test_inference(self):
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[3.4613, 0.9403]], device=torch_device)
- self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
@@ -851,7 +851,7 @@ def test_inference_interpolate_pos_encoding(self):
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[3.6278, 0.8861]], device=torch_device)
- self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
expected_shape = torch.Size((1, 626, 768))
self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape)
@@ -868,7 +868,7 @@ def test_inference_interpolate_pos_encoding(self):
expected_slice_boxes = torch.tensor(
[[0.0680, 0.0422, 0.1347], [0.2071, 0.0450, 0.4146], [0.2000, 0.0418, 0.3476]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
model = OwlViTForObjectDetection.from_pretrained(model_name).to(torch_device)
query_image = prepare_img()
@@ -913,7 +913,7 @@ def test_inference_interpolate_pos_encoding(self):
[-1.9452, -3.1332, -3.1332, -3.1332],
]
)
- self.assertTrue(torch.allclose(model.box_bias[:3, :4], expected_default_box_bias, atol=1e-4))
+ torch.testing.assert_close(model.box_bias[:3, :4], expected_default_box_bias, rtol=1e-4, atol=1e-4)
# Interpolate with any resolution size.
processor.image_processor.size = {"height": 1264, "width": 1024}
@@ -938,7 +938,7 @@ def test_inference_interpolate_pos_encoding(self):
expected_slice_boxes = torch.tensor(
[[0.0499, 0.0301, 0.0983], [0.2244, 0.0365, 0.4663], [0.1387, 0.0314, 0.1859]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
query_image = prepare_img()
inputs = processor(
@@ -985,7 +985,7 @@ def test_inference_object_detection(self):
expected_slice_boxes = torch.tensor(
[[0.0691, 0.0445, 0.1373], [0.1592, 0.0456, 0.3192], [0.1632, 0.0423, 0.2478]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
# test post-processing
post_processed_output = processor.post_process_grounded_object_detection(outputs)
@@ -1028,7 +1028,7 @@ def test_inference_one_shot_object_detection(self):
expected_slice_boxes = torch.tensor(
[[0.0691, 0.0445, 0.1373], [0.1592, 0.0456, 0.3192], [0.1632, 0.0423, 0.2478]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
@slow
@require_torch_accelerator
diff --git a/tests/models/paligemma/test_modeling_paligemma.py b/tests/models/paligemma/test_modeling_paligemma.py
index cab278a1dc8e..84cf1b154c29 100644
--- a/tests/models/paligemma/test_modeling_paligemma.py
+++ b/tests/models/paligemma/test_modeling_paligemma.py
@@ -232,7 +232,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
# Copied from tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_mismatching_num_image_tokens
def test_mismatching_num_image_tokens(self):
diff --git a/tests/models/patchtsmixer/test_modeling_patchtsmixer.py b/tests/models/patchtsmixer/test_modeling_patchtsmixer.py
index aae75b8586a3..7c8e0be72be1 100644
--- a/tests/models/patchtsmixer/test_modeling_patchtsmixer.py
+++ b/tests/models/patchtsmixer/test_modeling_patchtsmixer.py
@@ -483,7 +483,7 @@ def test_pretrain_head(self):
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[[[-0.9106]],[[1.5326]],[[-0.8245]],[[0.7439]],[[-0.7830]],[[2.6256]],[[-0.6485]],]],device=torch_device) # fmt: skip
- self.assertTrue(torch.allclose(output[0, :7, :1, :1], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[0, :7, :1, :1], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_forecasting_head(self):
model = PatchTSMixerForPrediction.from_pretrained("ibm/patchtsmixer-etth1-forecasting").to(torch_device)
@@ -504,7 +504,7 @@ def test_forecasting_head(self):
[[0.2471, 0.5036, 0.3596, 0.5401, -0.0985, 0.3423, -0.8439]],
device=torch_device,
)
- self.assertTrue(torch.allclose(output[0, :1, :7], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[0, :1, :7], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_prediction_generation(self):
model = PatchTSMixerForPrediction.from_pretrained("ibm/patchtsmixer-etth1-generate").to(torch_device)
@@ -526,7 +526,7 @@ def test_prediction_generation(self):
mean_prediction = outputs.sequences.mean(dim=1)
- self.assertTrue(torch.allclose(mean_prediction[0, -1:], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(mean_prediction[0, -1:], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
@require_torch
diff --git a/tests/models/patchtst/test_modeling_patchtst.py b/tests/models/patchtst/test_modeling_patchtst.py
index 3d0774f37268..0f6f019dc3ef 100644
--- a/tests/models/patchtst/test_modeling_patchtst.py
+++ b/tests/models/patchtst/test_modeling_patchtst.py
@@ -329,7 +329,7 @@ def test_pretrain_head(self):
[[[-0.0173]], [[-1.0379]], [[-0.1030]], [[0.3642]], [[0.1601]], [[-1.3136]], [[0.8780]]],
device=torch_device,
)
- self.assertTrue(torch.allclose(output[0, :7, :1, :1], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[0, :7, :1, :1], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
# Publishing of pretrained weights are under internal review. Pretrained model is not yet downloadable.
def test_prediction_head(self):
@@ -349,7 +349,7 @@ def test_prediction_head(self):
[[0.5142, 0.6928, 0.6118, 0.5724, -0.3735, -0.1336, -0.7124]],
device=torch_device,
)
- self.assertTrue(torch.allclose(output[0, :1, :7], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[0, :1, :7], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_prediction_generation(self):
model = PatchTSTForPrediction.from_pretrained("namctin/patchtst_etth1_forecast").to(torch_device)
@@ -367,7 +367,7 @@ def test_prediction_generation(self):
device=torch_device,
)
mean_prediction = outputs.sequences.mean(dim=1)
- self.assertTrue(torch.allclose(mean_prediction[0, -1:], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(mean_prediction[0, -1:], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_regression_generation(self):
model = PatchTSTForRegression.from_pretrained("ibm/patchtst-etth1-regression-distribution").to(torch_device)
@@ -385,4 +385,4 @@ def test_regression_generation(self):
device=torch_device,
)
mean_prediction = outputs.sequences.mean(dim=1)
- self.assertTrue(torch.allclose(mean_prediction[-5:], expected_slice, rtol=TOLERANCE))
+ torch.testing.assert_close(mean_prediction[-5:], expected_slice, rtol=TOLERANCE)
diff --git a/tests/models/pegasus_x/test_modeling_pegasus_x.py b/tests/models/pegasus_x/test_modeling_pegasus_x.py
index c6b4b2c86486..2463b7ab2613 100644
--- a/tests/models/pegasus_x/test_modeling_pegasus_x.py
+++ b/tests/models/pegasus_x/test_modeling_pegasus_x.py
@@ -595,7 +595,7 @@ def test_inference_no_head(self):
[[0.0702, -0.1552, 0.1192], [0.0836, -0.1848, 0.1304], [0.0673, -0.1686, 0.1045]], device=torch_device
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_inference_head(self):
model = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base").to(torch_device)
@@ -612,7 +612,7 @@ def test_inference_head(self):
expected_slice = torch.tensor(
[[0.0, 9.5705185, 1.5897303], [0.0, 9.833374, 1.5828674], [0.0, 10.429961, 1.5643371]], device=torch_device
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_seq_to_seq_generation(self):
hf = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base-arxiv").to(torch_device)
diff --git a/tests/models/perceiver/test_modeling_perceiver.py b/tests/models/perceiver/test_modeling_perceiver.py
index b0ec8ac45c35..e6bcb930ec61 100644
--- a/tests/models/perceiver/test_modeling_perceiver.py
+++ b/tests/models/perceiver/test_modeling_perceiver.py
@@ -683,7 +683,7 @@ def test_feed_forward_chunking(self):
torch.allclose(hidden_states_no_chunk[modality], hidden_states_with_chunk[modality], atol=1e-3)
)
else:
- self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3))
+ torch.testing.assert_close(hidden_states_no_chunk, hidden_states_with_chunk, rtol=1e-3, atol=1e-3)
def test_save_load(self):
for model_class in self.all_model_classes:
@@ -909,7 +909,7 @@ def test_inference_masked_lm(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
expected_greedy_predictions = [38, 115, 111, 121, 121, 111, 116, 109, 52]
masked_tokens_predictions = logits[0, 52:61].argmax(dim=-1).tolist()
@@ -938,7 +938,7 @@ def test_inference_image_classification(self):
expected_slice = torch.tensor([-1.1652, -0.1992, -0.7520], device=torch_device)
atol = 1e-3 if IS_ROCM_SYSTEM else 1e-4
- self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=atol))
+ torch.testing.assert_close(logits[0, :3], expected_slice, rtol=atol, atol=atol)
@slow
def test_inference_image_classification_fourier(self):
@@ -962,7 +962,7 @@ def test_inference_image_classification_fourier(self):
expected_slice = torch.tensor([-1.1295, -0.2832, 0.3226], device=torch_device)
- self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_image_classification_conv(self):
@@ -986,7 +986,7 @@ def test_inference_image_classification_conv(self):
expected_slice = torch.tensor([-1.1186, 0.0554, 0.0897], device=torch_device)
- self.assertTrue(torch.allclose(logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_optical_flow(self):
@@ -1030,7 +1030,7 @@ def test_inference_optical_flow(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, :3, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
diff --git a/tests/models/persimmon/test_modeling_persimmon.py b/tests/models/persimmon/test_modeling_persimmon.py
index e783cea95a63..c8725a5badce 100644
--- a/tests/models/persimmon/test_modeling_persimmon.py
+++ b/tests/models/persimmon/test_modeling_persimmon.py
@@ -410,7 +410,7 @@ def test_model_rope_scaling_from_config(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
@@ -483,14 +483,14 @@ def test_model_8b_chat_logits(self):
[[-11.4726, -11.1495, -11.2694, -11.2223, -10.9452, -11.0663, -11.0031, -11.1028]]
)
# change dtype to `torch.float32` before calling `mean` to avoid `nan` values
- torch.testing.assert_close(out.cpu().to(torch.float32).mean(-1), EXPECTED_MEAN, atol=1e-4, rtol=1e-4)
+ torch.testing.assert_close(out.cpu().to(torch.float32).mean(-1), EXPECTED_MEAN, rtol=1e-4, atol=1e-4)
# fmt: off
EXPECTED_SLICE = torch.tensor(
[-16.9062, -16.9062, -16.9062, -16.9062, -16.8906, -16.9062, -16.9531, -16.9062, -16.9062, -16.9062, -16.9531, -16.9062, -16.9531, -16.9062, -16.9062, -16.9062, -16.9062, -16.9062, -16.9531, -16.9062, -16.9062, -16.9062, -16.9062, -16.9062, -16.9062, -16.9531, -16.9062, -16.9531, -16.9062, -16.9062],
dtype=torch.float16
)
# fmt: on
- torch.testing.assert_close(out.cpu()[0, 0, :30], EXPECTED_SLICE, atol=1e-5, rtol=1e-5)
+ torch.testing.assert_close(out.cpu()[0, 0, :30], EXPECTED_SLICE, rtol=1e-5, atol=1e-5)
backend_empty_cache(torch_device)
del model
diff --git a/tests/models/phi/test_modeling_phi.py b/tests/models/phi/test_modeling_phi.py
index c7b59d278e4f..c54d4ebee5c3 100644
--- a/tests/models/phi/test_modeling_phi.py
+++ b/tests/models/phi/test_modeling_phi.py
@@ -389,7 +389,7 @@ def test_model_rope_scaling_from_config(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
@@ -463,7 +463,7 @@ def test_model_phi_1_logits(self):
EXPECTED_OUTPUT = torch.tensor([[2.2671, 6.7684, -2.0107, -1.2440, -1.5335, -2.3828, 6.9186, 6.4245, 3.1548, 0.9998, 0.0760, 4.4653, 4.9857, 4.2956, 1.2308, -1.4178, 0.1361, 0.5191, -0.5699, -2.2201, -3.0750, -3.9600, -4.5936, -3.7394, -2.7777, 6.1874, -0.4148, -1.5684, -0.5967, 0.2395], [1.7004, 4.0383, 0.0546, 0.4530, -0.3619, -0.9021, 1.8355, 1.3587, 1.2406, 2.5775, -0.8834, 5.1910, 4.2565, 4.1406, 3.0752, -0.9099, 1.1595, 0.0264, 0.3243, -1.1803, -1.3945, -2.1406, -3.9939, -1.4438, -2.9546, 3.9204, 1.0851, -1.0598, -1.7819, -0.4827]]).to(torch_device) # fmt: skip
- self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4))
+ torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4)
def test_model_phi_1_5_logits(self):
input_ids = {
@@ -479,7 +479,7 @@ def test_model_phi_1_5_logits(self):
EXPECTED_OUTPUT = torch.tensor([[12.2922, 13.3507, 8.6963, 9.1355, 9.3502, 9.2667, 14.2027, 13.1363, 13.5446, 11.1337, 9.9279, 16.7195, 13.0768, 14.9141, 11.9965, 8.0233, 10.3129, 10.6118, 10.0204, 9.3827, 8.8344, 8.2806, 8.0153, 8.0540, 7.0964, 16.5743, 11.1256, 9.6987, 11.4770, 10.5440], [12.3323, 14.6050, 8.9986, 8.1580, 9.5654, 6.6728, 12.5966, 12.6662, 12.2784, 11.7522, 8.2039, 16.3102, 11.2203, 13.6088, 12.0125, 9.1021, 9.8216, 10.0987, 9.0926, 8.4260, 8.8009, 7.6547, 6.8075, 7.7881, 7.4501, 15.7451, 10.5053, 8.3129, 10.0027, 9.2612]]).to(torch_device) # fmt: skip
- self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4))
+ torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4)
def test_model_phi_2_logits(self):
input_ids = {
@@ -495,7 +495,7 @@ def test_model_phi_2_logits(self):
EXPECTED_OUTPUT = torch.tensor([[6.4830, 6.1644, 3.4055, 2.2848, 5.4654, 2.8360, 5.5975, 5.5391, 7.3101, 4.2498, 2.5913, 10.3885, 6.4359, 8.7982, 5.6534, 0.5150, 2.7498, 3.1930, 2.4334, 1.7781, 1.5613, 1.3067, 0.8291, 0.5633, 0.6522, 9.8191, 5.5771, 2.7987, 4.2845, 3.7030], [6.0642, 7.8242, 3.4634, 1.9259, 4.3169, 2.0913, 6.0446, 3.6804, 6.6736, 4.0727, 2.1791, 11.4139, 5.6795, 7.5652, 6.2039, 2.7174, 4.3266, 3.6930, 2.8058, 2.6721, 2.3047, 2.0848, 2.0972, 2.0441, 1.3160, 9.2085, 4.5557, 3.0296, 2.6045, 2.4059]]).to(torch_device) # fmt: skip
- self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-3, rtol=1e-3))
+ torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-3, atol=1e-3)
def test_phi_2_generation(self):
model = PhiForCausalLM.from_pretrained("microsoft/phi-2")
diff --git a/tests/models/phi3/test_modeling_phi3.py b/tests/models/phi3/test_modeling_phi3.py
index 6ec663c6636f..1b2717858784 100644
--- a/tests/models/phi3/test_modeling_phi3.py
+++ b/tests/models/phi3/test_modeling_phi3.py
@@ -491,7 +491,7 @@ def test_model_rope_scaling_short_long_factor(self, scaling_type):
# KV cache is re-computed after reaching the (`config.original_max_position_embeddings`+1)th token position
self.assertFalse(torch.allclose(keys_with_short_factor, keys_with_long_factor, atol=1e-2, rtol=1e-2))
# Last token generated using long factor
- self.assertTrue(torch.allclose(last_token_logits, regenerated_last_token_logits, atol=1e-2, rtol=1e-2))
+ torch.testing.assert_close(last_token_logits, regenerated_last_token_logits, rtol=1e-2, atol=1e-2)
@slow
@@ -511,7 +511,7 @@ def test_model_phi3_mini_4k_instruct_logits(self):
EXPECTED_OUTPUT = torch.tensor([[ 0.9979, -1.9449, -2.5613, -2.2110, -0.9323, -2.2726, -3.2468, -2.0122,-1.0021, -1.2764, -1.0876, -1.2358, 3.9385, 6.2152, -0.3695, -2.3285,-1.2907, -1.8238, -1.9941, -2.2098, -0.6923, -1.6793, -1.1660, -2.0469,-0.7369, -1.4101, -1.4091, -3.1694, -1.8383, -1.1952],[ 3.0525, 1.9178, 3.7016, 0.9263, 0.3397, 1.9584, 2.1347, 0.3482, 1.3773, 0.2153, 0.2798, 0.8360, 9.0936, 11.4944, -0.3575, -0.9442,-0.1246, 1.3869, 0.9846, 1.7243, 0.9150, 1.0823, 0.4313, 1.5742, 0.2566, -0.1401, -1.3019, 0.4967, 0.6941, 0.7214]]).to(torch_device) # fmt: skip
- self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4))
+ torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4)
def test_phi3_mini_4k_instruct_generation(self):
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
@@ -572,7 +572,7 @@ def test_model_phi3_mini_128k_instruct_logits(self):
EXPECTED_OUTPUT = torch.tensor([[ 1.8478, -0.5709, -1.6792, -1.2133, -0.7809, -0.8817, -2.0969, -1.1191,-0.7731, -1.0483, -0.5961, -1.3067, 3.1325, 6.9442, -0.4803, -0.9154,-1.3085, -1.0822, -1.1433, -0.7660, -0.8531, -0.9150, -0.6179, -1.6153,-0.2239, -1.3207, -1.1187, -2.4795, -1.4733, -0.4931],[ 3.5839, 2.4722, 3.7130, 1.2032, 0.7356, 2.7777, 2.5256, 0.9157, 1.6431, 0.3533, 0.5100, 1.3512, 8.9873, 10.9815, 0.3530, 0.1473, 0.2051, 1.8553, 1.5988, 2.2268, 1.1897, 1.2829, 0.7894, 1.8895, 0.7666, 0.4122, -0.9316, 0.9936, 1.2722, 0.8263]]).to(torch_device) # fmt: skip
- self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4))
+ torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4)
def test_phi3_mini_128k_instruct_generation(self):
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-128k-instruct")
diff --git a/tests/models/phimoe/test_modeling_phimoe.py b/tests/models/phimoe/test_modeling_phimoe.py
index 881967076e7e..9ce4ae009104 100644
--- a/tests/models/phimoe/test_modeling_phimoe.py
+++ b/tests/models/phimoe/test_modeling_phimoe.py
@@ -491,7 +491,7 @@ def test_model_rope_scaling_short_long_factor(self, scaling_type):
# KV cache is re-computed after reaching the (`config.original_max_position_embeddings`+1)th token position
self.assertFalse(torch.allclose(keys_with_short_factor, keys_with_long_factor, atol=1e-3, rtol=1e-3))
# Last token generated using long factor
- self.assertTrue(torch.allclose(last_token_logits, regenerated_last_token_logits, atol=1e-2, rtol=1e-2))
+ torch.testing.assert_close(last_token_logits, regenerated_last_token_logits, rtol=1e-2, atol=1e-2)
@slow
@@ -518,7 +518,7 @@ def test_model_phimoe_instruct_logits(self):
-4.9375, 0.7148, -0.0972, 1.7656, -0.0801, 0.2217, 0.1875, -0.4629,
1.5781, 0.3535, 0.0874, 0.6836, -0.0518, -1.2969]]).to(torch_device) # fmt: skip
- self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4))
+ torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4)
def test_phimoe_instruct_generation(self):
model = PhimoeForCausalLM.from_pretrained("microsoft/Phi-3.5-MoE-instruct")
diff --git a/tests/models/pix2struct/test_image_processing_pix2struct.py b/tests/models/pix2struct/test_image_processing_pix2struct.py
index 6b12b3827dab..2650b3503b59 100644
--- a/tests/models/pix2struct/test_image_processing_pix2struct.py
+++ b/tests/models/pix2struct/test_image_processing_pix2struct.py
@@ -106,7 +106,7 @@ def test_expected_patches(self):
max_patch = 2048
inputs = image_processor(dummy_image, return_tensors="pt", max_patches=max_patch)
- self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606), atol=1e-3, rtol=1e-3))
+ torch.testing.assert_close(inputs.flattened_patches.mean(), torch.tensor(0.0606), rtol=1e-3, atol=1e-3)
def test_call_pil(self):
# Initialize image_processor
diff --git a/tests/models/pixtral/test_image_processing_pixtral.py b/tests/models/pixtral/test_image_processing_pixtral.py
index 1377b676917f..19bfde038f2a 100644
--- a/tests/models/pixtral/test_image_processing_pixtral.py
+++ b/tests/models/pixtral/test_image_processing_pixtral.py
@@ -281,7 +281,9 @@ def test_slow_fast_equivalence(self):
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
- self.assertTrue(torch.allclose(encoding_slow.pixel_values[0][0], encoding_fast.pixel_values[0][0], atol=1e-2))
+ torch.testing.assert_close(
+ encoding_slow.pixel_values[0][0], encoding_fast.pixel_values[0][0], rtol=1e-2, atol=1e-2
+ )
@slow
@require_torch_gpu
@@ -300,7 +302,9 @@ def test_can_compile_fast_image_processor(self):
image_processor = torch.compile(image_processor, mode="reduce-overhead")
output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt")
- self.assertTrue(torch.allclose(output_eager.pixel_values[0][0], output_compiled.pixel_values[0][0], atol=1e-4))
+ torch.testing.assert_close(
+ output_eager.pixel_values[0][0], output_compiled.pixel_values[0][0], rtol=1e-4, atol=1e-4
+ )
@unittest.skip(reason="PixtralImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy
def test_call_numpy_4_channels(self):
diff --git a/tests/models/poolformer/test_modeling_poolformer.py b/tests/models/poolformer/test_modeling_poolformer.py
index d9a522cde6f4..775df97cde30 100644
--- a/tests/models/poolformer/test_modeling_poolformer.py
+++ b/tests/models/poolformer/test_modeling_poolformer.py
@@ -236,4 +236,4 @@ def test_inference_image_classification_head(self):
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.6113, 0.1685, -0.0492]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/pop2piano/test_feature_extraction_pop2piano.py b/tests/models/pop2piano/test_feature_extraction_pop2piano.py
index 6b4b1b987a2f..2a1a767c2c63 100644
--- a/tests/models/pop2piano/test_feature_extraction_pop2piano.py
+++ b/tests/models/pop2piano/test_feature_extraction_pop2piano.py
@@ -148,7 +148,7 @@ def test_integration(self):
EXPECTED_INPUT_FEATURES = torch.tensor(
[[-7.1493, -6.8701, -4.3214], [-5.9473, -5.7548, -3.8438], [-6.1324, -5.9018, -4.3778]]
)
- self.assertTrue(torch.allclose(input_features[0, :3, :3], EXPECTED_INPUT_FEATURES, atol=1e-4))
+ torch.testing.assert_close(input_features[0, :3, :3], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
def test_attention_mask(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
diff --git a/tests/models/pop2piano/test_modeling_pop2piano.py b/tests/models/pop2piano/test_modeling_pop2piano.py
index 39ff67f08ce5..47cf47f6adaa 100644
--- a/tests/models/pop2piano/test_modeling_pop2piano.py
+++ b/tests/models/pop2piano/test_modeling_pop2piano.py
@@ -691,7 +691,7 @@ def test_mel_conditioner_integration(self):
[[1.0475305318832397, 0.29052114486694336, -0.47778210043907166], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]
)
- self.assertTrue(torch.allclose(outputs[0, :3, :3], EXPECTED_OUTPUTS, atol=1e-4))
+ torch.testing.assert_close(outputs[0, :3, :3], EXPECTED_OUTPUTS, rtol=1e-4, atol=1e-4)
@slow
@require_essentia
diff --git a/tests/models/pop2piano/test_tokenization_pop2piano.py b/tests/models/pop2piano/test_tokenization_pop2piano.py
index 29e8eacf26fd..6ee6721d81c7 100644
--- a/tests/models/pop2piano/test_tokenization_pop2piano.py
+++ b/tests/models/pop2piano/test_tokenization_pop2piano.py
@@ -87,8 +87,8 @@ def test_call(self):
)
expected_output_attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])
- self.assertTrue(torch.allclose(output["token_ids"], expected_output_token_ids, atol=1e-4))
- self.assertTrue(torch.allclose(output["attention_mask"], expected_output_attention_mask, atol=1e-4))
+ torch.testing.assert_close(output["token_ids"], expected_output_token_ids, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(output["attention_mask"], expected_output_attention_mask, rtol=1e-4, atol=1e-4)
def test_batch_decode(self):
# test batch decode with model, feature-extractor outputs(beatsteps, extrapolated_beatstep)
@@ -174,7 +174,7 @@ def test_batch_decode_outputs(self):
)
predicted_start_timings = torch.tensor(predicted_start_timings)
- self.assertTrue(torch.allclose(expected_start_timings, predicted_start_timings, atol=1e-4))
+ torch.testing.assert_close(expected_start_timings, predicted_start_timings, rtol=1e-4, atol=1e-4)
# Checking note end timings
expected_end_timings = torch.tensor(
@@ -187,7 +187,7 @@ def test_batch_decode_outputs(self):
)
predicted_end_timings = torch.tensor(predicted_end_timings)
- self.assertTrue(torch.allclose(expected_end_timings, predicted_end_timings, atol=1e-4))
+ torch.testing.assert_close(expected_end_timings, predicted_end_timings, rtol=1e-4, atol=1e-4)
def test_get_vocab(self):
vocab_dict = self.tokenizer.get_vocab()
diff --git a/tests/models/prophetnet/test_modeling_prophetnet.py b/tests/models/prophetnet/test_modeling_prophetnet.py
index 91da243dbbbe..1f86a7662c8f 100644
--- a/tests/models/prophetnet/test_modeling_prophetnet.py
+++ b/tests/models/prophetnet/test_modeling_prophetnet.py
@@ -1227,7 +1227,7 @@ def test_pretrained_checkpoint_hidden_states(self):
expected_slice = torch.tensor(
[[[-7.7729, -8.0343, -8.26001], [-7.74213, -7.8629, -8.6000], [-7.7328, -7.8269, -8.5264]]]
).to(torch_device)
- # self.assertTrue(torch.allclose(output_predited_logits[:, :3, :3], expected_slice, atol=1e-4))
+ # torch.testing.assert_close(output_predited_logits[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
assert torch.allclose(output_predited_logits[:, :3, :3], expected_slice, atol=1e-4)
# encoder outputs
@@ -1237,7 +1237,7 @@ def test_pretrained_checkpoint_hidden_states(self):
).to(torch_device)
expected_shape_encoder = torch.Size((1, 28, 1024))
self.assertEqual(encoder_outputs.shape, expected_shape_encoder)
- # self.assertTrue(torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4))
+ # torch.testing.assert_close(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, rtol=1e-4, atol=1e-4)
assert torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4)
# decoder outputs
@@ -1245,7 +1245,7 @@ def test_pretrained_checkpoint_hidden_states(self):
predicting_streams = decoder_outputs[1].view(1, model.config.ngram, 12, -1)
predicting_streams_logits = model.lm_head(predicting_streams)
next_first_stream_logits = predicting_streams_logits[:, 0]
- # self.assertTrue(torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4))
+ # torch.testing.assert_close(next_first_stream_logits[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
assert torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4)
@slow
diff --git a/tests/models/pvt/test_modeling_pvt.py b/tests/models/pvt/test_modeling_pvt.py
index e5f5fd0c1432..3bc5e3892d42 100644
--- a/tests/models/pvt/test_modeling_pvt.py
+++ b/tests/models/pvt/test_modeling_pvt.py
@@ -277,7 +277,7 @@ def test_inference_image_classification(self):
expected_slice = torch.tensor([-1.4192, -1.9158, -0.9702]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_model(self):
@@ -300,7 +300,7 @@ def test_inference_model(self):
[[-0.3086, 1.0402, 1.1816], [-0.2880, 0.5781, 0.6124], [0.1480, 0.6129, -0.0590]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
@require_accelerate
diff --git a/tests/models/pvt_v2/test_modeling_pvt_v2.py b/tests/models/pvt_v2/test_modeling_pvt_v2.py
index 334e890e7a89..1c69385745f0 100644
--- a/tests/models/pvt_v2/test_modeling_pvt_v2.py
+++ b/tests/models/pvt_v2/test_modeling_pvt_v2.py
@@ -336,7 +336,7 @@ def test_inference_image_classification(self):
expected_slice = torch.tensor([-1.4192, -1.9158, -0.9702]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_model(self):
@@ -359,7 +359,7 @@ def test_inference_model(self):
[[-0.3086, 1.0402, 1.1816], [-0.2880, 0.5781, 0.6124], [0.1480, 0.6129, -0.0590]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
@require_accelerate
diff --git a/tests/models/qwen2/test_modeling_qwen2.py b/tests/models/qwen2/test_modeling_qwen2.py
index ecfa9189d12e..e426aee98c24 100644
--- a/tests/models/qwen2/test_modeling_qwen2.py
+++ b/tests/models/qwen2/test_modeling_qwen2.py
@@ -446,11 +446,11 @@ def test_model_450m_logits(self):
out = model(input_ids).logits.float().cpu()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-1.9537, -1.6193, -1.4123, -1.4673, -1.8511, -1.9309, -1.9826, -2.1776]])
- torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([3.2025, 7.1265, 4.6058, 3.6423, 1.6357, 3.9265, 5.1883, 5.8760, 2.7942, 4.4823, 3.2571, 2.1063, 3.4275, 4.2028, 1.9767, 5.2115, 6.6756, 6.3999, 6.0483, 5.7378, 5.6660, 5.2298, 5.4103, 5.1248, 5.4376, 2.4570, 2.6107, 5.4039, 2.8077, 4.7777]) # fmt: skip
print(out[0, 0, :30])
- torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-4, rtol=1e-4)
+ torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
del model
backend_empty_cache(torch_device)
diff --git a/tests/models/qwen2_moe/test_modeling_qwen2_moe.py b/tests/models/qwen2_moe/test_modeling_qwen2_moe.py
index 21d11047ff1b..126450eacc5b 100644
--- a/tests/models/qwen2_moe/test_modeling_qwen2_moe.py
+++ b/tests/models/qwen2_moe/test_modeling_qwen2_moe.py
@@ -508,11 +508,11 @@ def test_model_a2_7b_logits(self):
out = model(input_ids).logits.float().cpu()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-4.2125, -3.6416, -4.9136, -4.3005, -4.9938, -3.4393, -3.5195, -4.1621]])
- torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
+ torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([2.3013, -0.6595, -0.1389, -1.4095, -1.7381, -1.7609, -2.0449, -2.4289, -3.0271, -2.1351, -0.6568, -4.6012, -1.9102, -0.7475, -3.1377, 4.6904, 7.1936, 7.0991, 6.4414, 6.1720, 6.2617, 5.8751, 5.6997, 5.6011, 5.5828, -3.9505, -0.5384, -0.3392, 1.2445, 2.0714]) # fmt: skip
print(out[0, 0, :30])
- torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-4, rtol=1e-4)
+ torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
del model
backend_empty_cache(torch_device)
diff --git a/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py b/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py
index 317e0e28ad14..bfa4dca85e32 100644
--- a/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py
+++ b/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py
@@ -16,6 +16,7 @@
import unittest
import numpy as np
+import requests
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize
@@ -296,3 +297,26 @@ def test_custom_patch_size(self):
encoded_video = prcocess_out.pixel_values_videos
expected_output_video_shape = (171500, 1176)
self.assertEqual(tuple(encoded_video.shape), expected_output_video_shape)
+
+ @require_vision
+ @require_torch
+ def test_slow_fast_equivalence(self):
+ dummy_image = Image.open(
+ requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw
+ )
+
+ if not self.test_slow_image_processor or not self.test_fast_image_processor:
+ self.skipTest(reason="Skipping slow/fast equivalence test")
+
+ if self.image_processing_class is None or self.fast_image_processing_class is None:
+ self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
+
+ image_processor_slow = self.image_processing_class(**self.image_processor_dict)
+ image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
+
+ encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
+ encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
+
+ torch.testing.assert_close(
+ encoding_slow.pixel_values, encoding_fast.pixel_values, rtol=100, atol=1e-2
+ ) # @yoni bit weird that we have such diffs
diff --git a/tests/models/reformer/test_modeling_reformer.py b/tests/models/reformer/test_modeling_reformer.py
index 25b28477a145..fde19b74543f 100644
--- a/tests/models/reformer/test_modeling_reformer.py
+++ b/tests/models/reformer/test_modeling_reformer.py
@@ -1095,7 +1095,7 @@ def test_lsh_layer_forward(self):
dtype=torch.float,
device=torch_device,
)
- self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
+ torch.testing.assert_close(output_slice, expected_output_slice, rtol=1e-3, atol=1e-3)
def test_lsh_layer_forward_complex(self):
config = self._get_basic_config_and_input()
@@ -1118,7 +1118,7 @@ def test_lsh_layer_forward_complex(self):
dtype=torch.float,
device=torch_device,
)
- self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
+ torch.testing.assert_close(output_slice, expected_output_slice, rtol=1e-3, atol=1e-3)
def test_local_layer_forward(self):
config = self._get_basic_config_and_input()
@@ -1136,7 +1136,7 @@ def test_local_layer_forward(self):
dtype=torch.float,
device=torch_device,
)
- self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
+ torch.testing.assert_close(output_slice, expected_output_slice, rtol=1e-3, atol=1e-3)
def test_local_layer_forward_complex(self):
config = self._get_basic_config_and_input()
@@ -1158,7 +1158,7 @@ def test_local_layer_forward_complex(self):
dtype=torch.float,
device=torch_device,
)
- self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
+ torch.testing.assert_close(output_slice, expected_output_slice, rtol=1e-3, atol=1e-3)
def test_lsh_model_forward(self):
config = self._get_basic_config_and_input()
@@ -1175,7 +1175,7 @@ def test_lsh_model_forward(self):
dtype=torch.float,
device=torch_device,
)
- self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
+ torch.testing.assert_close(output_slice, expected_output_slice, rtol=1e-3, atol=1e-3)
def test_local_model_forward(self):
config = self._get_basic_config_and_input()
@@ -1191,7 +1191,7 @@ def test_local_model_forward(self):
dtype=torch.float,
device=torch_device,
)
- self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
+ torch.testing.assert_close(output_slice, expected_output_slice, rtol=1e-3, atol=1e-3)
def test_lm_model_forward(self):
config = self._get_basic_config_and_input()
@@ -1210,7 +1210,7 @@ def test_lm_model_forward(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
+ torch.testing.assert_close(output_slice, expected_output_slice, rtol=1e-3, atol=1e-3)
def test_local_lm_model_grad(self):
config = self._get_basic_config_and_input()
@@ -1224,7 +1224,9 @@ def test_local_lm_model_grad(self):
input_ids, _ = self._get_input_ids_and_mask()
loss = model(input_ids=input_ids, labels=input_ids)[0]
- self.assertTrue(torch.allclose(loss, torch.tensor(5.8019, dtype=torch.float, device=torch_device), atol=1e-3))
+ torch.testing.assert_close(
+ loss, torch.tensor(5.8019, dtype=torch.float, device=torch_device), rtol=1e-3, atol=1e-3
+ )
loss.backward()
# check last grads to cover all proable errors
@@ -1246,9 +1248,9 @@ def test_local_lm_model_grad(self):
dtype=torch.float,
device=torch_device,
)
- self.assertTrue(torch.allclose(grad_slice_word, expected_grad_slice_word, atol=1e-3))
- self.assertTrue(torch.allclose(grad_slice_position_factor_1, expected_grad_slice_pos_fac_1, atol=1e-3))
- self.assertTrue(torch.allclose(grad_slice_position_factor_2, expected_grad_slice_pos_fac_2, atol=1e-3))
+ torch.testing.assert_close(grad_slice_word, expected_grad_slice_word, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(grad_slice_position_factor_1, expected_grad_slice_pos_fac_1, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(grad_slice_position_factor_2, expected_grad_slice_pos_fac_2, rtol=1e-3, atol=1e-3)
def test_lsh_lm_model_grad(self):
config = self._get_basic_config_and_input()
@@ -1264,7 +1266,9 @@ def test_lsh_lm_model_grad(self):
input_ids, _ = self._get_input_ids_and_mask()
loss = model(input_ids=input_ids, labels=input_ids)[0]
- self.assertTrue(torch.allclose(loss, torch.tensor(5.7854, dtype=torch.float, device=torch_device), atol=1e-3))
+ torch.testing.assert_close(
+ loss, torch.tensor(5.7854, dtype=torch.float, device=torch_device), rtol=1e-3, atol=1e-3
+ )
loss.backward()
# check last grads to cover all proable errors
grad_slice_word = model.reformer.embeddings.word_embeddings.weight.grad[0, :5]
@@ -1285,9 +1289,9 @@ def test_lsh_lm_model_grad(self):
dtype=torch.float,
device=torch_device,
)
- self.assertTrue(torch.allclose(grad_slice_word, expected_grad_slice_word, atol=1e-3))
- self.assertTrue(torch.allclose(grad_slice_position_factor_1, expected_grad_slice_pos_fac_1, atol=1e-3))
- self.assertTrue(torch.allclose(grad_slice_position_factor_2, expected_grad_slice_pos_fac_2, atol=1e-3))
+ torch.testing.assert_close(grad_slice_word, expected_grad_slice_word, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(grad_slice_position_factor_1, expected_grad_slice_pos_fac_1, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(grad_slice_position_factor_2, expected_grad_slice_pos_fac_2, rtol=1e-3, atol=1e-3)
@slow
def test_pretrained_generate_crime_and_punish(self):
diff --git a/tests/models/regnet/test_modeling_regnet.py b/tests/models/regnet/test_modeling_regnet.py
index 8613eb7f3df4..371e699d233c 100644
--- a/tests/models/regnet/test_modeling_regnet.py
+++ b/tests/models/regnet/test_modeling_regnet.py
@@ -250,4 +250,4 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([-0.4180, -1.5051, -3.4836]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/rembert/test_modeling_rembert.py b/tests/models/rembert/test_modeling_rembert.py
index 664888fcc0f3..f0e9e7a050b1 100644
--- a/tests/models/rembert/test_modeling_rembert.py
+++ b/tests/models/rembert/test_modeling_rembert.py
@@ -507,4 +507,6 @@ def test_inference_model(self):
# [-0.15887849032878876, -0.054529931396245956, 0.5356100797653198]
# ]]
- self.assertTrue(torch.allclose(output["last_hidden_state"][:, :, :3], expected_implementation, atol=1e-4))
+ torch.testing.assert_close(
+ output["last_hidden_state"][:, :, :3], expected_implementation, rtol=1e-4, atol=1e-4
+ )
diff --git a/tests/models/resnet/test_modeling_resnet.py b/tests/models/resnet/test_modeling_resnet.py
index a89e85bf320c..c940521a8d18 100644
--- a/tests/models/resnet/test_modeling_resnet.py
+++ b/tests/models/resnet/test_modeling_resnet.py
@@ -303,7 +303,7 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([-11.1069, -9.7877, -8.3777]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
diff --git a/tests/models/roberta/test_modeling_roberta.py b/tests/models/roberta/test_modeling_roberta.py
index 1c128513b17d..11171ee93453 100644
--- a/tests/models/roberta/test_modeling_roberta.py
+++ b/tests/models/roberta/test_modeling_roberta.py
@@ -541,7 +541,7 @@ def test_inference_masked_lm(self):
# roberta.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_no_head(self):
@@ -559,7 +559,7 @@ def test_inference_no_head(self):
# roberta.eval()
# expected_slice = roberta.extract_features(input_ids)[:, :3, :3].detach()
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_classification_head(self):
@@ -576,7 +576,7 @@ def test_inference_classification_head(self):
# roberta.eval()
# expected_tensor = roberta.predict("mnli", input_ids, return_logits=True).detach()
- self.assertTrue(torch.allclose(output, expected_tensor, atol=1e-4))
+ torch.testing.assert_close(output, expected_tensor, rtol=1e-4, atol=1e-4)
@slow
def test_export(self):
diff --git a/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py b/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py
index e64aaddbeb46..1333e2d5989f 100644
--- a/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py
+++ b/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py
@@ -544,7 +544,7 @@ def test_inference_masked_lm(self):
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], EXPECTED_SLICE, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
@slow
def test_inference_no_head(self):
@@ -558,4 +558,4 @@ def test_inference_no_head(self):
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], EXPECTED_SLICE, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/roformer/test_modeling_roformer.py b/tests/models/roformer/test_modeling_roformer.py
index 1c22243b3707..bdae0aea13dc 100644
--- a/tests/models/roformer/test_modeling_roformer.py
+++ b/tests/models/roformer/test_modeling_roformer.py
@@ -523,7 +523,7 @@ def test_inference_masked_lm(self):
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
diff --git a/tests/models/rt_detr/test_image_processing_rt_detr.py b/tests/models/rt_detr/test_image_processing_rt_detr.py
index 2be3ea3e7651..97718d97406f 100644
--- a/tests/models/rt_detr/test_image_processing_rt_detr.py
+++ b/tests/models/rt_detr/test_image_processing_rt_detr.py
@@ -171,31 +171,31 @@ def test_call_pytorch_with_coco_detection_annotations(self):
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.5490, 0.5647, 0.5725])
- self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([2827.9883, 5403.4761, 235036.7344, 402070.2188, 71068.8281, 79601.2812])
- self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
+ torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
- self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
+ torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
- self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
+ torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
- self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
+ torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
- self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
+ torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([640, 640])
- self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
+ torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
def test_image_processor_outputs(self):
@@ -211,7 +211,7 @@ def test_image_processor_outputs(self):
# verify pixel values: output values
expected_slice = torch.tensor([0.5490196347236633, 0.5647059082984924, 0.572549045085907])
- self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-5))
+ torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-5, atol=1e-5)
def test_multiple_images_processor_outputs(self):
images_urls = [
@@ -255,7 +255,7 @@ def test_multiple_images_processor_outputs(self):
[0.19607844948768616, 0.21176472306251526, 0.3607843220233917],
]
)
- self.assertTrue(torch.allclose(encoding["pixel_values"][:, 1, 0, :3], expected_slices, atol=1e-5))
+ torch.testing.assert_close(encoding["pixel_values"][:, 1, 0, :3], expected_slices, rtol=1e-5, atol=1e-5)
@slow
def test_batched_coco_detection_annotations(self):
@@ -321,8 +321,8 @@ def test_batched_coco_detection_annotations(self):
[0.7715, 0.4115, 0.4570, 0.7161],
]
)
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
# format and not in the range [0, 1]
@@ -369,8 +369,8 @@ def test_batched_coco_detection_annotations(self):
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
@slow
@require_torch_gpu
@@ -400,7 +400,7 @@ def test_fast_processor_equivalence_cpu_gpu_coco_detection_annotations(self):
)
)
# verify area
- self.assertTrue(torch.allclose(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu")))
+ torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu"))
# verify boxes
self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape)
self.assertTrue(
@@ -409,12 +409,12 @@ def test_fast_processor_equivalence_cpu_gpu_coco_detection_annotations(self):
)
)
# verify image_id
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu")
)
# verify is_crowd
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu")
)
# verify class_labels
self.assertTrue(
@@ -423,8 +423,8 @@ def test_fast_processor_equivalence_cpu_gpu_coco_detection_annotations(self):
)
)
# verify orig_size
- self.assertTrue(
- torch.allclose(encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu"))
+ torch.testing.assert_close(
+ encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu")
)
# verify size
- self.assertTrue(torch.allclose(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu")))
+ torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu"))
diff --git a/tests/models/rt_detr/test_modeling_rt_detr.py b/tests/models/rt_detr/test_modeling_rt_detr.py
index 368e2dd140f3..c3ccc89efc26 100644
--- a/tests/models/rt_detr/test_modeling_rt_detr.py
+++ b/tests/models/rt_detr/test_modeling_rt_detr.py
@@ -745,11 +745,11 @@ def test_inference_object_detection_head(self):
]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=1e-4, atol=1e-4)
expected_shape_boxes = torch.Size((1, 300, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=1e-4, atol=1e-4)
# verify postprocessing
results = image_processor.post_process_object_detection(
@@ -769,6 +769,6 @@ def test_inference_object_detection_head(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(results["scores"][:4], expected_scores, atol=1e-4))
+ torch.testing.assert_close(results["scores"][:4], expected_scores, rtol=1e-4, atol=1e-4)
self.assertSequenceEqual(results["labels"][:4].tolist(), expected_labels)
- self.assertTrue(torch.allclose(results["boxes"][:4], expected_slice_boxes, atol=1e-4))
+ torch.testing.assert_close(results["boxes"][:4], expected_slice_boxes, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/rwkv/test_modeling_rwkv.py b/tests/models/rwkv/test_modeling_rwkv.py
index 0bc5c2de0701..6517c96509c4 100644
--- a/tests/models/rwkv/test_modeling_rwkv.py
+++ b/tests/models/rwkv/test_modeling_rwkv.py
@@ -297,7 +297,7 @@ def test_initialization(self):
elif "time_first" in name:
if param.requires_grad:
# check if it's a ones like
- self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5))
+ torch.testing.assert_close(param.data, torch.ones_like(param.data), rtol=1e-5, atol=1e-5)
elif any(x in name for x in ["time_mix_key", "time_mix_receptance"]):
if param.requires_grad:
self.assertInterval(
diff --git a/tests/models/sam/test_modeling_sam.py b/tests/models/sam/test_modeling_sam.py
index 351016716a0c..c44046bd8161 100644
--- a/tests/models/sam/test_modeling_sam.py
+++ b/tests/models/sam/test_modeling_sam.py
@@ -539,8 +539,10 @@ def test_inference_mask_generation_no_point(self):
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
masks = outputs.pred_masks[0, 0, 0, 0, :3]
- self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.4515), atol=2e-4))
- self.assertTrue(torch.allclose(masks, torch.tensor([-4.1800, -3.4948, -3.4481]).to(torch_device), atol=2e-4))
+ torch.testing.assert_close(scores[-1], torch.tensor(0.4515), rtol=2e-4, atol=2e-4)
+ torch.testing.assert_close(
+ masks, torch.tensor([-4.1800, -3.4948, -3.4481]).to(torch_device), rtol=2e-4, atol=2e-4
+ )
def test_inference_mask_generation_one_point_one_bb(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
@@ -561,9 +563,9 @@ def test_inference_mask_generation_one_point_one_bb(self):
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
masks = outputs.pred_masks[0, 0, 0, 0, :3]
- self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9566), atol=2e-4))
- self.assertTrue(
- torch.allclose(masks, torch.tensor([-12.7729, -12.3665, -12.6061]).to(torch_device), atol=2e-4)
+ torch.testing.assert_close(scores[-1], torch.tensor(0.9566), rtol=2e-4, atol=2e-4)
+ torch.testing.assert_close(
+ masks, torch.tensor([-12.7729, -12.3665, -12.6061]).to(torch_device), rtol=2e-4, atol=2e-4
)
def test_inference_mask_generation_batched_points_batched_images(self):
@@ -605,8 +607,8 @@ def test_inference_mask_generation_batched_points_batched_images(self):
]
)
EXPECTED_MASKS = torch.tensor([-2.8550, -2.7988, -2.9625])
- self.assertTrue(torch.allclose(scores, EXPECTED_SCORES, atol=1e-3))
- self.assertTrue(torch.allclose(masks, EXPECTED_MASKS, atol=1e-3))
+ torch.testing.assert_close(scores, EXPECTED_SCORES, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(masks, EXPECTED_MASKS, rtol=1e-3, atol=1e-3)
def test_inference_mask_generation_one_point_one_bb_zero(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
@@ -632,7 +634,7 @@ def test_inference_mask_generation_one_point_one_bb_zero(self):
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
- self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.7894), atol=1e-4))
+ torch.testing.assert_close(scores[-1], torch.tensor(0.7894), rtol=1e-4, atol=1e-4)
def test_inference_mask_generation_one_point(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
@@ -653,7 +655,7 @@ def test_inference_mask_generation_one_point(self):
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
- self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9675), atol=1e-4))
+ torch.testing.assert_close(scores[-1], torch.tensor(0.9675), rtol=1e-4, atol=1e-4)
# With no label
input_points = [[[400, 650]]]
@@ -663,7 +665,7 @@ def test_inference_mask_generation_one_point(self):
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
- self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9675), atol=1e-4))
+ torch.testing.assert_close(scores[-1], torch.tensor(0.9675), rtol=1e-4, atol=1e-4)
def test_inference_mask_generation_two_points(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
@@ -684,7 +686,7 @@ def test_inference_mask_generation_two_points(self):
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
- self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9762), atol=1e-4))
+ torch.testing.assert_close(scores[-1], torch.tensor(0.9762), rtol=1e-4, atol=1e-4)
# no labels
inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device)
@@ -693,7 +695,7 @@ def test_inference_mask_generation_two_points(self):
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
- self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9762), atol=1e-4))
+ torch.testing.assert_close(scores[-1], torch.tensor(0.9762), rtol=1e-4, atol=1e-4)
def test_inference_mask_generation_two_points_batched(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
@@ -714,8 +716,8 @@ def test_inference_mask_generation_two_points_batched(self):
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
- self.assertTrue(torch.allclose(scores[0][-1], torch.tensor(0.9762), atol=1e-4))
- self.assertTrue(torch.allclose(scores[1][-1], torch.tensor(0.9637), atol=1e-4))
+ torch.testing.assert_close(scores[0][-1], torch.tensor(0.9762), rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(scores[1][-1], torch.tensor(0.9637), rtol=1e-4, atol=1e-4)
def test_inference_mask_generation_one_box(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
@@ -733,7 +735,7 @@ def test_inference_mask_generation_one_box(self):
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
- self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.7937), atol=1e-4))
+ torch.testing.assert_close(scores[-1], torch.tensor(0.7937), rtol=1e-4, atol=1e-4)
def test_inference_mask_generation_batched_image_one_point(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
@@ -762,7 +764,7 @@ def test_inference_mask_generation_batched_image_one_point(self):
with torch.no_grad():
outputs = model(**inputs)
scores_single = outputs.iou_scores.squeeze()
- self.assertTrue(torch.allclose(scores_batched[1, :], scores_single, atol=1e-4))
+ torch.testing.assert_close(scores_batched[1, :], scores_single, rtol=1e-4, atol=1e-4)
def test_inference_mask_generation_two_points_point_batch(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
@@ -812,7 +814,7 @@ def test_inference_mask_generation_three_boxes_point_batch(self):
iou_scores = outputs.iou_scores.cpu()
self.assertTrue(iou_scores.shape == (1, 3, 3))
- torch.testing.assert_close(iou_scores, EXPECTED_IOU, atol=1e-4, rtol=1e-4)
+ torch.testing.assert_close(iou_scores, EXPECTED_IOU, rtol=1e-4, atol=1e-4)
def test_dummy_pipeline_generation(self):
generator = pipeline("mask-generation", model="facebook/sam-vit-base", device=torch_device)
diff --git a/tests/models/seamless_m4t/test_feature_extraction_seamless_m4t.py b/tests/models/seamless_m4t/test_feature_extraction_seamless_m4t.py
index 7c13f97b64d7..f5e59e49fcd1 100644
--- a/tests/models/seamless_m4t/test_feature_extraction_seamless_m4t.py
+++ b/tests/models/seamless_m4t/test_feature_extraction_seamless_m4t.py
@@ -283,13 +283,13 @@ def test_call_torch(self):
# Test not batched input
encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="pt").input_features
encoded_sequences_2 = feature_extractor(pt_speech_inputs[0], return_tensors="pt").input_features
- self.assertTrue(torch.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))
+ torch.testing.assert_close(encoded_sequences_1, encoded_sequences_2, rtol=1e-3, atol=1e-3)
# Test batched
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="pt").input_features
encoded_sequences_2 = feature_extractor(pt_speech_inputs, return_tensors="pt").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
- self.assertTrue(torch.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
+ torch.testing.assert_close(enc_seq_1, enc_seq_2, rtol=1e-3, atol=1e-3)
# Test 2-D numpy arrays are batched.
speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)]
@@ -297,7 +297,7 @@ def test_call_torch(self):
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="pt").input_features
encoded_sequences_2 = feature_extractor(pt_speech_inputs, return_tensors="pt").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
- self.assertTrue(torch.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
+ torch.testing.assert_close(enc_seq_1, enc_seq_2, rtol=1e-3, atol=1e-3)
@require_torch
# Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad
@@ -339,7 +339,7 @@ def test_integration(self):
feature_extractor(input_speech, return_tensors="pt").input_features[0, 5, :30]
self.assertEqual(input_features.shape, (1, 279, 160))
- self.assertTrue(torch.allclose(input_features[0, 5, :30], EXPECTED_INPUT_FEATURES, atol=1e-4))
+ torch.testing.assert_close(input_features[0, 5, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
def test_zero_mean_unit_variance_normalization_trunc_np_longest(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
diff --git a/tests/models/segformer/test_modeling_segformer.py b/tests/models/segformer/test_modeling_segformer.py
index 9b5e04a5d02b..5f6493a36cbc 100644
--- a/tests/models/segformer/test_modeling_segformer.py
+++ b/tests/models/segformer/test_modeling_segformer.py
@@ -373,7 +373,7 @@ def test_inference_image_segmentation_ade(self):
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_image_segmentation_city(self):
@@ -402,7 +402,7 @@ def test_inference_image_segmentation_city(self):
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-1))
+ torch.testing.assert_close(outputs.logits[0, :3, :3, :3], expected_slice, rtol=1e-1, atol=1e-1)
@slow
def test_post_processing_semantic_segmentation(self):
diff --git a/tests/models/seggpt/test_image_processing_seggpt.py b/tests/models/seggpt/test_image_processing_seggpt.py
index 74e78f008201..5c58f4846ba9 100644
--- a/tests/models/seggpt/test_image_processing_seggpt.py
+++ b/tests/models/seggpt/test_image_processing_seggpt.py
@@ -231,11 +231,11 @@ def test_pixel_values(self):
]
)
- self.assertTrue(torch.allclose(inputs.pixel_values[0, :, :3, :3], expected_pixel_values, atol=1e-4))
- self.assertTrue(
- torch.allclose(inputs.prompt_pixel_values[0, :, :3, :3], expected_prompt_pixel_values, atol=1e-4)
+ torch.testing.assert_close(inputs.pixel_values[0, :, :3, :3], expected_pixel_values, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(
+ inputs.prompt_pixel_values[0, :, :3, :3], expected_prompt_pixel_values, rtol=1e-4, atol=1e-4
)
- self.assertTrue(torch.allclose(inputs.prompt_masks[0, :, :3, :3], expected_prompt_masks, atol=1e-4))
+ torch.testing.assert_close(inputs.prompt_masks[0, :, :3, :3], expected_prompt_masks, rtol=1e-4, atol=1e-4)
def test_prompt_mask_equivalence(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
diff --git a/tests/models/seggpt/test_modeling_seggpt.py b/tests/models/seggpt/test_modeling_seggpt.py
index 50d141aaeff0..c8b7362b6048 100644
--- a/tests/models/seggpt/test_modeling_seggpt.py
+++ b/tests/models/seggpt/test_modeling_seggpt.py
@@ -313,7 +313,7 @@ def test_seggpt_loss(self):
loss_value = loss(prompt_masks, pred_masks, label, bool_masked_pos)
expected_loss_value = torch.tensor(0.3340)
- self.assertTrue(torch.allclose(loss_value, expected_loss_value, atol=1e-4))
+ torch.testing.assert_close(loss_value, expected_loss_value, rtol=1e-4, atol=1e-4)
@slow
def test_model_from_pretrained(self):
@@ -386,7 +386,7 @@ def test_one_shot_inference(self):
]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.pred_masks[0, :, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.pred_masks[0, :, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
result = image_processor.post_process_semantic_segmentation(outputs, [input_image.size[::-1]])[0]
@@ -428,7 +428,7 @@ def test_few_shot_inference(self):
).to(torch_device)
self.assertEqual(outputs.pred_masks.shape, expected_shape)
- self.assertTrue(torch.allclose(outputs.pred_masks[0, :, 448:451, :3], expected_slice, atol=4e-4))
+ torch.testing.assert_close(outputs.pred_masks[0, :, 448:451, :3], expected_slice, rtol=4e-4, atol=4e-4)
@slow
def test_one_shot_with_label(self):
@@ -461,4 +461,4 @@ def test_one_shot_with_label(self):
outputs = model(**inputs, labels=labels, bool_masked_pos=bool_masked_pos)
expected_loss = torch.tensor(0.0074).to(torch_device)
- self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-4))
+ torch.testing.assert_close(outputs.loss, expected_loss, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/sew/test_modeling_sew.py b/tests/models/sew/test_modeling_sew.py
index 852f87c8f58a..9893fcf0b3b9 100644
--- a/tests/models/sew/test_modeling_sew.py
+++ b/tests/models/sew/test_modeling_sew.py
@@ -553,8 +553,8 @@ def test_inference_pretrained_batched(self):
)
expected_output_sum = 62146.7422
- self.assertTrue(torch.allclose(outputs[:, :4, :4], expected_outputs_first, atol=5e-3))
- self.assertTrue(torch.allclose(outputs[:, -4:, -4:], expected_outputs_last, atol=5e-3))
+ torch.testing.assert_close(outputs[:, :4, :4], expected_outputs_first, rtol=5e-3, atol=5e-3)
+ torch.testing.assert_close(outputs[:, -4:, -4:], expected_outputs_last, rtol=5e-3, atol=5e-3)
self.assertTrue(abs(outputs.sum() - expected_output_sum) < 5)
def test_inference_ctc_batched(self):
diff --git a/tests/models/sew_d/test_modeling_sew_d.py b/tests/models/sew_d/test_modeling_sew_d.py
index 34374eb1e0e6..43bd31d92a0b 100644
--- a/tests/models/sew_d/test_modeling_sew_d.py
+++ b/tests/models/sew_d/test_modeling_sew_d.py
@@ -567,8 +567,8 @@ def test_inference_pretrained_batched(self):
)
expected_output_sum = 54201.0469
- self.assertTrue(torch.allclose(outputs[:, :4, :4], expected_outputs_first, atol=1e-3))
- self.assertTrue(torch.allclose(outputs[:, -4:, -4:], expected_outputs_last, atol=1e-3))
+ torch.testing.assert_close(outputs[:, :4, :4], expected_outputs_first, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(outputs[:, -4:, -4:], expected_outputs_last, rtol=1e-3, atol=1e-3)
self.assertTrue(abs(outputs.sum() - expected_output_sum) < 1)
def test_inference_ctc_batched(self):
diff --git a/tests/models/siglip/test_modeling_siglip.py b/tests/models/siglip/test_modeling_siglip.py
index 61ac78f10299..3dec33018476 100644
--- a/tests/models/siglip/test_modeling_siglip.py
+++ b/tests/models/siglip/test_modeling_siglip.py
@@ -1014,12 +1014,12 @@ def test_inference(self):
expected_logits = torch.tensor([[-0.7567, -10.3354]], device=torch_device)
- self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
# verify the probs
probs = torch.sigmoid(logits_per_image) # these are the probabilities
expected_probs = torch.tensor([[3.1937e-01, 3.2463e-05]], device=torch_device)
- self.assertTrue(torch.allclose(probs, expected_probs, atol=1e-3))
+ torch.testing.assert_close(probs, expected_probs, rtol=1e-3, atol=1e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
diff --git a/tests/models/speecht5/test_feature_extraction_speecht5.py b/tests/models/speecht5/test_feature_extraction_speecht5.py
index 70d60f92238a..9b3f53947695 100644
--- a/tests/models/speecht5/test_feature_extraction_speecht5.py
+++ b/tests/models/speecht5/test_feature_extraction_speecht5.py
@@ -402,7 +402,7 @@ def test_integration(self):
feature_extractor = SpeechT5FeatureExtractor()
input_values = feature_extractor(input_speech, return_tensors="pt").input_values
self.assertEqual(input_values.shape, (1, 93680))
- self.assertTrue(torch.allclose(input_values[0, :30], EXPECTED_INPUT_VALUES, atol=1e-6))
+ torch.testing.assert_close(input_values[0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6)
def test_integration_target(self):
# fmt: off
@@ -418,4 +418,4 @@ def test_integration_target(self):
feature_extractor = SpeechT5FeatureExtractor()
input_values = feature_extractor(audio_target=input_speech, return_tensors="pt").input_values
self.assertEqual(input_values.shape, (1, 366, 80))
- self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-4))
+ torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/squeezebert/test_modeling_squeezebert.py b/tests/models/squeezebert/test_modeling_squeezebert.py
index e5323fe3e4bc..15b849722b78 100644
--- a/tests/models/squeezebert/test_modeling_squeezebert.py
+++ b/tests/models/squeezebert/test_modeling_squeezebert.py
@@ -294,4 +294,4 @@ def test_inference_classification_head(self):
expected_shape = torch.Size((1, 3))
self.assertEqual(output.shape, expected_shape)
expected_tensor = torch.tensor([[0.6401, -0.0349, -0.6041]])
- self.assertTrue(torch.allclose(output, expected_tensor, atol=1e-4))
+ torch.testing.assert_close(output, expected_tensor, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/stablelm/test_modeling_stablelm.py b/tests/models/stablelm/test_modeling_stablelm.py
index c8aa55399035..7c237b0bcfc1 100644
--- a/tests/models/stablelm/test_modeling_stablelm.py
+++ b/tests/models/stablelm/test_modeling_stablelm.py
@@ -395,7 +395,7 @@ def test_model_rope_scaling_from_config(self, scaling_type):
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
- self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
+ torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
@@ -465,11 +465,11 @@ def test_model_stablelm_3b_4e1t_logits(self):
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[2.7146, 2.4245, 1.5616, 1.4424, 2.6790]]).to(torch_device)
- self.assertTrue(torch.allclose(output.mean(dim=-1), EXPECTED_MEAN, atol=1e-4, rtol=1e-4))
+ torch.testing.assert_close(output.mean(dim=-1), EXPECTED_MEAN, rtol=1e-4, atol=1e-4)
# Expected logits sliced from [0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([7.1030, -1.4195, 9.9206, 7.7008, 4.9891, 4.2169, 5.5426, 3.7878, 6.7593, 5.7360, 8.4691, 5.5448, 5.0544, 10.4129, 8.5573, 13.0405, 7.3265, 3.5868, 6.1106, 5.9406, 5.6376, 5.7490, 5.4850, 4.8124, 5.1991, 4.6419, 4.5719, 9.9588, 6.7222, 4.5070]).to(torch_device) # fmt: skip
- self.assertTrue(torch.allclose(output[0, 0, :30], EXPECTED_SLICE, atol=1e-4, rtol=1e-4))
+ torch.testing.assert_close(output[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
@slow
def test_model_stablelm_3b_4e1t_generation(self):
@@ -498,11 +498,11 @@ def test_model_tiny_random_stablelm_2_logits(self):
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-2.7196, -3.6099, -2.6877, -3.1973, -3.9344]]).to(torch_device)
- self.assertTrue(torch.allclose(output.mean(dim=-1), EXPECTED_MEAN, atol=1e-4, rtol=1e-4))
+ torch.testing.assert_close(output.mean(dim=-1), EXPECTED_MEAN, rtol=1e-4, atol=1e-4)
# Expected logits sliced from [0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([2.8364, 5.3811, 5.1659, 7.5485, 4.3219, 6.3315, 1.3967, 6.9147, 3.9679, 6.4786, 5.9176, 3.3067, 5.2917, 0.1485, 3.9630, 7.9947,10.6727, 9.6757, 8.8772, 8.3527, 7.8445, 6.6025, 5.5786, 7.0985,6.1369, 3.4259, 1.9397, 4.6157, 4.8105, 3.1768]).to(torch_device) # fmt: skip
- self.assertTrue(torch.allclose(output[0, 0, :30], EXPECTED_SLICE, atol=1e-4, rtol=1e-4))
+ torch.testing.assert_close(output[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
@slow
def test_model_tiny_random_stablelm_2_generation(self):
diff --git a/tests/models/superpoint/test_modeling_superpoint.py b/tests/models/superpoint/test_modeling_superpoint.py
index e811d3f6b417..11f4fe11fc76 100644
--- a/tests/models/superpoint/test_modeling_superpoint.py
+++ b/tests/models/superpoint/test_modeling_superpoint.py
@@ -297,7 +297,7 @@ def test_inference(self):
atol=1e-4,
)
)
- self.assertTrue(torch.allclose(predicted_scores_image0_values, expected_scores_image0_values, atol=1e-4))
+ torch.testing.assert_close(predicted_scores_image0_values, expected_scores_image0_values, rtol=1e-4, atol=1e-4)
self.assertTrue(
torch.allclose(
predicted_descriptors_image0_value,
diff --git a/tests/models/swiftformer/test_modeling_swiftformer.py b/tests/models/swiftformer/test_modeling_swiftformer.py
index 3b8b3eb5ed65..234c8aa15fe9 100644
--- a/tests/models/swiftformer/test_modeling_swiftformer.py
+++ b/tests/models/swiftformer/test_modeling_swiftformer.py
@@ -288,4 +288,4 @@ def test_inference_image_classification_head(self):
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/swin/test_modeling_swin.py b/tests/models/swin/test_modeling_swin.py
index 963de232d68e..92c06de971b0 100644
--- a/tests/models/swin/test_modeling_swin.py
+++ b/tests/models/swin/test_modeling_swin.py
@@ -488,7 +488,7 @@ def test_inference_image_classification_head(self):
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.0948, -0.6454, -0.0921]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
diff --git a/tests/models/swin2sr/test_modeling_swin2sr.py b/tests/models/swin2sr/test_modeling_swin2sr.py
index a1358c9de0bf..91d04915d135 100644
--- a/tests/models/swin2sr/test_modeling_swin2sr.py
+++ b/tests/models/swin2sr/test_modeling_swin2sr.py
@@ -332,7 +332,7 @@ def test_inference_image_super_resolution_head(self):
expected_slice = torch.tensor(
[[0.5458, 0.5546, 0.5638], [0.5526, 0.5565, 0.5651], [0.5396, 0.5426, 0.5621]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.reconstruction[0, 0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.reconstruction[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_fp16(self):
processor = Swin2SRImageProcessor()
@@ -353,4 +353,4 @@ def test_inference_fp16(self):
expected_slice = torch.tensor(
[[0.5454, 0.5542, 0.5640], [0.5518, 0.5562, 0.5649], [0.5391, 0.5425, 0.5620]], dtype=model.dtype
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.reconstruction[0, 0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.reconstruction[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/swinv2/test_modeling_swinv2.py b/tests/models/swinv2/test_modeling_swinv2.py
index 5ef9a4b92e13..4bf309cc6abb 100644
--- a/tests/models/swinv2/test_modeling_swinv2.py
+++ b/tests/models/swinv2/test_modeling_swinv2.py
@@ -485,7 +485,7 @@ def test_inference_image_classification_head(self):
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.3947, -0.4306, 0.0026]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_fp16(self):
@@ -505,7 +505,7 @@ def test_inference_fp16(self):
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.3938, -0.4290, 0.0020], dtype=model.dtype).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
diff --git a/tests/models/t5/test_modeling_t5.py b/tests/models/t5/test_modeling_t5.py
index 52fec78d1e89..854a73f16b37 100644
--- a/tests/models/t5/test_modeling_t5.py
+++ b/tests/models/t5/test_modeling_t5.py
@@ -1703,7 +1703,7 @@ def test_compile_static_cache_encoder(self):
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
logits_compiled = model(**inputs)
- self.assertTrue(torch.allclose(logits[0][:, -3:, -3], logits_compiled[0][:, -3:, -3], atol=1e-5))
+ torch.testing.assert_close(logits[0][:, -3:, -3], logits_compiled[0][:, -3:, -3], rtol=1e-5, atol=1e-5)
@require_torch
diff --git a/tests/models/table_transformer/test_modeling_table_transformer.py b/tests/models/table_transformer/test_modeling_table_transformer.py
index 99d80b39e92b..50165cbe1a84 100644
--- a/tests/models/table_transformer/test_modeling_table_transformer.py
+++ b/tests/models/table_transformer/test_modeling_table_transformer.py
@@ -595,9 +595,9 @@ def test_table_detection(self):
[[-6.7329, -16.9590, 6.7447], [-8.0038, -22.3071, 6.9288], [-7.2445, -20.9855, 7.3465]],
device=torch_device,
)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=1e-4, atol=1e-4)
expected_boxes = torch.tensor(
[[0.4868, 0.1764, 0.6729], [0.6674, 0.4621, 0.3864], [0.4720, 0.1757, 0.6362]], device=torch_device
)
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-3))
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=1e-3, atol=1e-3)
diff --git a/tests/models/tapas/test_modeling_tapas.py b/tests/models/tapas/test_modeling_tapas.py
index 05618f4a4efd..980ff28b9ee9 100644
--- a/tests/models/tapas/test_modeling_tapas.py
+++ b/tests/models/tapas/test_modeling_tapas.py
@@ -605,12 +605,12 @@ def test_inference_no_head(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[:, :3, :3], expected_slice, atol=0.0005))
+ torch.testing.assert_close(outputs.last_hidden_state[:, :3, :3], expected_slice, rtol=0.0005, atol=0.0005)
# test the pooled output
expected_slice = torch.tensor([[0.987518311, -0.970520139, -0.994303405]], device=torch_device)
- self.assertTrue(torch.allclose(outputs.pooler_output[:, :3], expected_slice, atol=0.0005))
+ torch.testing.assert_close(outputs.pooler_output[:, :3], expected_slice, rtol=0.0005, atol=0.0005)
@unittest.skip(reason="Model not available yet")
def test_inference_masked_lm(self):
@@ -666,7 +666,7 @@ def test_inference_question_answering_head_conversational(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(logits, expected_tensor, atol=0.015))
+ torch.testing.assert_close(logits, expected_tensor, rtol=0.015, atol=0.015)
@slow
def test_inference_question_answering_head_conversational_absolute_embeddings(self):
@@ -716,7 +716,7 @@ def test_inference_question_answering_head_conversational_absolute_embeddings(se
device=torch_device,
)
- self.assertTrue(torch.allclose(logits, expected_tensor, atol=0.01))
+ torch.testing.assert_close(logits, expected_tensor, rtol=0.01, atol=0.01)
@slow
def test_inference_question_answering_head_weak_supervision(self):
@@ -744,7 +744,7 @@ def test_inference_question_answering_head_weak_supervision(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(logits[:, -6:], expected_slice, atol=0.4))
+ torch.testing.assert_close(logits[:, -6:], expected_slice, rtol=0.4, atol=0.4)
# test the aggregation logits
logits_aggregation = outputs.logits_aggregation
@@ -755,7 +755,7 @@ def test_inference_question_answering_head_weak_supervision(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(logits_aggregation, expected_tensor, atol=0.001))
+ torch.testing.assert_close(logits_aggregation, expected_tensor, rtol=0.001, atol=0.001)
# test the predicted answer coordinates and aggregation indices
EXPECTED_PREDICTED_ANSWER_COORDINATES = [[(0, 0)], [(1, 2)]]
@@ -813,7 +813,7 @@ def test_training_question_answering_head_weak_supervision(self):
# test the loss
loss = outputs.loss
expected_loss = torch.tensor(3.3527612686157227e-08, device=torch_device)
- self.assertTrue(torch.allclose(loss, expected_loss, atol=1e-6))
+ torch.testing.assert_close(loss, expected_loss, rtol=1e-6, atol=1e-6)
# test the logits on the first example
logits = outputs.logits
@@ -834,7 +834,7 @@ def test_training_question_answering_head_weak_supervision(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(logits[0, -9:], expected_slice, atol=1e-6))
+ torch.testing.assert_close(logits[0, -9:], expected_slice, rtol=1e-6, atol=1e-6)
# test the aggregation logits on the second example
logits_aggregation = outputs.logits_aggregation
@@ -842,7 +842,7 @@ def test_training_question_answering_head_weak_supervision(self):
self.assertEqual(logits_aggregation.shape, expected_shape)
expected_slice = torch.tensor([-4.0538, 40.0304, -5.3554, 23.3965], device=torch_device)
- self.assertTrue(torch.allclose(logits_aggregation[1, -4:], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits_aggregation[1, -4:], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_question_answering_head_strong_supervision(self):
@@ -890,7 +890,7 @@ def test_inference_question_answering_head_strong_supervision(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(logits, expected_tensor, atol=0.02))
+ torch.testing.assert_close(logits, expected_tensor, rtol=0.02, atol=0.02)
# test the aggregation logits
logits_aggregation = outputs.logits_aggregation
@@ -900,7 +900,7 @@ def test_inference_question_answering_head_strong_supervision(self):
[[16.5659733, -3.06624889, -2.34152961, -0.970244825]], device=torch_device
) # PyTorch model outputs [[16.5679, -3.0668, -2.3442, -0.9674]]
- self.assertTrue(torch.allclose(logits_aggregation, expected_tensor, atol=0.003))
+ torch.testing.assert_close(logits_aggregation, expected_tensor, rtol=0.003, atol=0.003)
@slow
def test_inference_classification_head(self):
@@ -922,7 +922,7 @@ def test_inference_classification_head(self):
[[0.795137286, 9.5572]], device=torch_device
) # Note that the PyTorch model outputs [[0.8057, 9.5281]]
- self.assertTrue(torch.allclose(outputs.logits, expected_tensor, atol=0.05))
+ torch.testing.assert_close(outputs.logits, expected_tensor, rtol=0.05, atol=0.05)
@require_torch
diff --git a/tests/models/textnet/test_modeling_textnet.py b/tests/models/textnet/test_modeling_textnet.py
index cf5e48506e52..0f02cfcaaf21 100644
--- a/tests/models/textnet/test_modeling_textnet.py
+++ b/tests/models/textnet/test_modeling_textnet.py
@@ -333,7 +333,9 @@ def test_inference_no_head(self):
[0.9210, 0.6099, 0.0000, 0.0000, 0.0000, 0.0000, 3.2207, 2.6602, 1.8925, 0.0000],
device=torch_device,
)
- self.assertTrue(torch.allclose(output.feature_maps[-1][0][10][12][:10], expected_slice_backbone, atol=1e-3))
+ torch.testing.assert_close(
+ output.feature_maps[-1][0][10][12][:10], expected_slice_backbone, rtol=1e-3, atol=1e-3
+ )
@require_torch
diff --git a/tests/models/time_series_transformer/test_modeling_time_series_transformer.py b/tests/models/time_series_transformer/test_modeling_time_series_transformer.py
index 5cd76b91612e..5f049bd92466 100644
--- a/tests/models/time_series_transformer/test_modeling_time_series_transformer.py
+++ b/tests/models/time_series_transformer/test_modeling_time_series_transformer.py
@@ -512,7 +512,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[0.8196, -1.5131, 1.4620], [1.1268, -1.3238, 1.5997], [1.5098, -1.0715, 1.7359]], device=torch_device
)
- self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_inference_head(self):
model = TimeSeriesTransformerForPrediction.from_pretrained(
@@ -534,7 +534,7 @@ def test_inference_head(self):
expected_slice = torch.tensor(
[[-1.2957, -1.0280, -0.6045], [-0.7017, -0.8193, -0.3717], [-1.0449, -0.8149, 0.1405]], device=torch_device
)
- self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE))
+ torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_seq_to_seq_generation(self):
model = TimeSeriesTransformerForPrediction.from_pretrained(
@@ -555,4 +555,4 @@ def test_seq_to_seq_generation(self):
expected_slice = torch.tensor([2825.2749, 3584.9207, 6763.9951], device=torch_device)
mean_prediction = outputs.sequences.mean(dim=1)
- self.assertTrue(torch.allclose(mean_prediction[0, -3:], expected_slice, rtol=1e-1))
+ torch.testing.assert_close(mean_prediction[0, -3:], expected_slice, rtol=1e-1)
diff --git a/tests/models/timesformer/test_modeling_timesformer.py b/tests/models/timesformer/test_modeling_timesformer.py
index 3eaed42efb4e..ec8b34e5e27a 100644
--- a/tests/models/timesformer/test_modeling_timesformer.py
+++ b/tests/models/timesformer/test_modeling_timesformer.py
@@ -352,4 +352,4 @@ def test_inference_for_video_classification(self):
expected_slice = torch.tensor([-0.3016, -0.7713, -0.4205]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/tvp/test_modeling_tvp.py b/tests/models/tvp/test_modeling_tvp.py
index 2912b8778424..6ab0dffde4eb 100644
--- a/tests/models/tvp/test_modeling_tvp.py
+++ b/tests/models/tvp/test_modeling_tvp.py
@@ -277,7 +277,7 @@ def test_inference_no_head(self):
expected_slice = torch.tensor(
[[-0.4902, -0.4121, -1.7872], [-0.2184, 2.1211, -0.9371], [0.1180, 0.5003, -0.1727]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_with_head(self):
model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device)
@@ -296,7 +296,7 @@ def test_inference_with_head(self):
expected_shape = torch.Size((1, 2))
assert outputs.logits.shape == expected_shape
expected_slice = torch.tensor([[0.5061, 0.4988]]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits, expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits, expected_slice, rtol=1e-4, atol=1e-4)
def test_interpolate_inference_no_head(self):
model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device)
diff --git a/tests/models/unispeech/test_modeling_unispeech.py b/tests/models/unispeech/test_modeling_unispeech.py
index 4290ac21ab64..2b18aa658873 100644
--- a/tests/models/unispeech/test_modeling_unispeech.py
+++ b/tests/models/unispeech/test_modeling_unispeech.py
@@ -597,4 +597,4 @@ def test_inference_pretraining(self):
)
# fmt: on
- self.assertTrue(torch.allclose(cosine_sim[:, :5], expected_cosine_sim_slice, atol=1e-3))
+ torch.testing.assert_close(cosine_sim[:, :5], expected_cosine_sim_slice, rtol=1e-3, atol=1e-3)
diff --git a/tests/models/unispeech_sat/test_modeling_unispeech_sat.py b/tests/models/unispeech_sat/test_modeling_unispeech_sat.py
index 1aa2da20d5ec..ce8cd4180bfe 100644
--- a/tests/models/unispeech_sat/test_modeling_unispeech_sat.py
+++ b/tests/models/unispeech_sat/test_modeling_unispeech_sat.py
@@ -845,7 +845,9 @@ def test_inference_encoder_base(self):
)
# fmt: on
- self.assertTrue(torch.allclose(outputs.last_hidden_state[:, :2, -2:], expected_hidden_states_slice, atol=1e-3))
+ torch.testing.assert_close(
+ outputs.last_hidden_state[:, :2, -2:], expected_hidden_states_slice, rtol=1e-3, atol=1e-3
+ )
def test_inference_encoder_large(self):
model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-large")
@@ -871,7 +873,9 @@ def test_inference_encoder_large(self):
)
# fmt: on
- self.assertTrue(torch.allclose(outputs.last_hidden_state[:, :2, -2:], expected_hidden_states_slice, atol=1e-3))
+ torch.testing.assert_close(
+ outputs.last_hidden_state[:, :2, -2:], expected_hidden_states_slice, rtol=1e-3, atol=1e-3
+ )
def test_inference_diarization(self):
model = UniSpeechSatForAudioFrameClassification.from_pretrained("microsoft/unispeech-sat-base-plus-sd").to(
@@ -900,7 +904,7 @@ def test_inference_diarization(self):
)
self.assertEqual(labels[0, :, 0].sum(), 270)
self.assertEqual(labels[0, :, 1].sum(), 647)
- self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2))
+ torch.testing.assert_close(outputs.logits[:, :4], expected_logits, rtol=1e-2, atol=1e-2)
def test_inference_speaker_verification(self):
model = UniSpeechSatForXVector.from_pretrained("microsoft/unispeech-sat-base-plus-sv").to(torch_device)
diff --git a/tests/models/univnet/test_feature_extraction_univnet.py b/tests/models/univnet/test_feature_extraction_univnet.py
index 2917d206dfde..85a26bee874c 100644
--- a/tests/models/univnet/test_feature_extraction_univnet.py
+++ b/tests/models/univnet/test_feature_extraction_univnet.py
@@ -360,6 +360,6 @@ def test_integration(self):
EXPECTED_MEAN = torch.tensor(-6.18862009)
EXPECTED_STDDEV = torch.tensor(2.80845642)
- torch.testing.assert_close(input_features_mean, EXPECTED_MEAN, atol=5e-5, rtol=5e-6)
+ torch.testing.assert_close(input_features_mean, EXPECTED_MEAN, rtol=5e-5, atol=5e-5)
torch.testing.assert_close(input_features_stddev, EXPECTED_STDDEV)
- torch.testing.assert_close(input_features[0, :30, 0], EXPECTED_INPUT_FEATURES, atol=1e-4, rtol=1e-5)
+ torch.testing.assert_close(input_features[0, :30, 0], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/univnet/test_modeling_univnet.py b/tests/models/univnet/test_modeling_univnet.py
index 6310224e07f2..9a7ade715527 100644
--- a/tests/models/univnet/test_modeling_univnet.py
+++ b/tests/models/univnet/test_modeling_univnet.py
@@ -276,9 +276,9 @@ def test_model_inference_batched(self):
EXPECTED_STDDEV = torch.tensor(0.35230172)
EXPECTED_SLICE = torch.tensor([-0.3408, -0.6045, -0.5052, 0.1160, -0.1556, -0.0405, -0.3024, -0.5290, -0.5019])
- torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, atol=1e-4, rtol=1e-5)
- torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, atol=1e-4, rtol=1e-5)
- torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, atol=5e-4, rtol=1e-5)
+ torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, rtol=5e-4, atol=5e-4)
def test_model_inference_unbatched(self):
# Load sample checkpoint from Tortoise TTS
@@ -300,9 +300,9 @@ def test_model_inference_unbatched(self):
EXPECTED_STDDEV = torch.tensor(0.33986747)
EXPECTED_SLICE = torch.tensor([-0.3276, -0.5504, -0.3484, 0.3574, -0.0373, -0.1826, -0.4880, -0.6431, -0.5162])
- torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, atol=1e-4, rtol=1e-5)
- torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, atol=1e-4, rtol=1e-5)
- torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, atol=1e-3, rtol=1e-5)
+ torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, rtol=1e-3, atol=1e-3)
def test_integration(self):
feature_extractor = UnivNetFeatureExtractor.from_pretrained("dg845/univnet-dev")
@@ -331,6 +331,6 @@ def test_integration(self):
EXPECTED_SLICE = torch.tensor([-4.3934e-04, -1.8203e-04, -3.3033e-04, -3.8716e-04, -1.6125e-04, 3.5389e-06, -3.3149e-04, -3.7613e-04, -2.3331e-04])
# fmt: on
- torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, atol=5e-6, rtol=1e-5)
- torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, atol=1e-4, rtol=1e-5)
- torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, atol=5e-6, rtol=1e-5)
+ torch.testing.assert_close(waveform_mean, EXPECTED_MEAN, rtol=5e-6, atol=5e-6)
+ torch.testing.assert_close(waveform_stddev, EXPECTED_STDDEV, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(waveform_slice, EXPECTED_SLICE, rtol=5e-6, atol=5e-6)
diff --git a/tests/models/upernet/test_modeling_upernet.py b/tests/models/upernet/test_modeling_upernet.py
index 43146a477931..94ddae0ee760 100644
--- a/tests/models/upernet/test_modeling_upernet.py
+++ b/tests/models/upernet/test_modeling_upernet.py
@@ -310,7 +310,7 @@ def test_inference_swin_backbone(self):
expected_slice = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_convnext_backbone(self):
processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny")
@@ -328,4 +328,4 @@ def test_inference_convnext_backbone(self):
expected_slice = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/video_llava/test_modeling_video_llava.py b/tests/models/video_llava/test_modeling_video_llava.py
index fa7800bdc47e..cead90bd869b 100644
--- a/tests/models/video_llava/test_modeling_video_llava.py
+++ b/tests/models/video_llava/test_modeling_video_llava.py
@@ -389,7 +389,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
def test_mismatching_num_image_tokens(self):
"""
diff --git a/tests/models/videomae/test_modeling_videomae.py b/tests/models/videomae/test_modeling_videomae.py
index 212eae147122..1e470e2d7845 100644
--- a/tests/models/videomae/test_modeling_videomae.py
+++ b/tests/models/videomae/test_modeling_videomae.py
@@ -385,7 +385,7 @@ def test_inference_for_video_classification(self):
expected_slice = torch.tensor([0.3669, -0.0688, -0.2421]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_for_pretraining(self):
@@ -409,11 +409,11 @@ def test_inference_for_pretraining(self):
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]], device=torch_device
)
self.assertEqual(outputs.logits.shape, expected_shape)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify the loss (`config.norm_pix_loss` = `True`)
expected_loss = torch.tensor([0.5142], device=torch_device)
- self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-4))
+ torch.testing.assert_close(outputs.loss, expected_loss, rtol=1e-4, atol=1e-4)
# verify the loss (`config.norm_pix_loss` = `False`)
model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short", norm_pix_loss=False).to(
@@ -424,4 +424,4 @@ def test_inference_for_pretraining(self):
outputs = model(**inputs)
expected_loss = torch.tensor(torch.tensor([0.6469]), device=torch_device)
- self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-4))
+ torch.testing.assert_close(outputs.loss, expected_loss, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/vilt/test_modeling_vilt.py b/tests/models/vilt/test_modeling_vilt.py
index b7c2c604522e..7977d6298fc3 100644
--- a/tests/models/vilt/test_modeling_vilt.py
+++ b/tests/models/vilt/test_modeling_vilt.py
@@ -587,7 +587,7 @@ def test_inference_masked_lm(self):
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-12.5061, -12.5123, -12.5174]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify masked token prediction equals "cats"
predicted_id = outputs.logits[0, 4, :].argmax(-1).item()
@@ -612,7 +612,7 @@ def test_inference_visual_question_answering(self):
expected_slice = torch.tensor([-15.9495, -18.1472, -10.3041]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# compute loss
vqa_labels = [[2, 3, 155, 800]]
@@ -673,4 +673,4 @@ def test_inference_natural_language_visual_reasoning(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/vipllava/test_modeling_vipllava.py b/tests/models/vipllava/test_modeling_vipllava.py
index 3cbac0ddefa8..44d2550bb492 100644
--- a/tests/models/vipllava/test_modeling_vipllava.py
+++ b/tests/models/vipllava/test_modeling_vipllava.py
@@ -226,7 +226,7 @@ def test_inputs_embeds_matches_input_ids(self):
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
# Copied from tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_mismatching_num_image_tokens
def test_mismatching_num_image_tokens(self):
diff --git a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py
index 2b517034bffb..a680e504cd63 100644
--- a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py
+++ b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py
@@ -1142,7 +1142,7 @@ def test_inference_handwritten(self):
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311]
).to(torch_device)
- self.assertTrue(torch.allclose(logits[0, 0, :10], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, 0, :10], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_printed(self):
@@ -1176,7 +1176,7 @@ def test_inference_printed(self):
device=torch_device,
)
- self.assertTrue(torch.allclose(logits[0, 0, :10], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, 0, :10], expected_slice, rtol=1e-4, atol=1e-4)
@require_vision
@@ -1272,7 +1272,7 @@ def test_inference_docvqa(self):
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([24.3873, -6.4491, 32.5394]).to(torch_device)
- self.assertTrue(torch.allclose(logits[0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# step 2: generation
task_prompt = "{user_input}"
@@ -1336,7 +1336,7 @@ def test_inference_cordv2(self):
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-27.4344, -3.2686, -19.3524], device=torch_device)
- self.assertTrue(torch.allclose(logits[0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# step 2: generation
task_prompt = ""
@@ -1398,7 +1398,7 @@ def test_inference_rvlcdip(self):
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-17.6490, -4.8381, -15.7577], device=torch_device)
- self.assertTrue(torch.allclose(logits[0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# step 2: generation
task_prompt = ""
@@ -1475,7 +1475,7 @@ def test_forward_pass(self):
[1.6253, -4.2179, 5.8532, -2.7911, -5.0609, -4.7397, -4.2890, -5.1073, -4.8908, -4.9729]
).to(torch_device)
- self.assertTrue(torch.allclose(logits[0, 0, :10], expected_slice, atol=1e-4))
+ torch.testing.assert_close(logits[0, 0, :10], expected_slice, rtol=1e-4, atol=1e-4)
def test_generation(self):
processor = self.default_processor
diff --git a/tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py b/tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py
index d935c0d27d1c..ab4adeb5d466 100644
--- a/tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py
+++ b/tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py
@@ -515,4 +515,4 @@ def test_inference(self):
expected_logits = torch.tensor([[1.2284727, 0.3104122]])
- self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
diff --git a/tests/models/visual_bert/test_modeling_visual_bert.py b/tests/models/visual_bert/test_modeling_visual_bert.py
index d24ea14b6510..5517f3e22ead 100644
--- a/tests/models/visual_bert/test_modeling_visual_bert.py
+++ b/tests/models/visual_bert/test_modeling_visual_bert.py
@@ -605,14 +605,14 @@ def test_inference_vqa_coco_pre(self):
[[[-5.1858, -5.1903, -4.9142], [-6.2214, -5.9238, -5.8381], [-6.3027, -5.9939, -5.9297]]]
)
- self.assertTrue(torch.allclose(output.prediction_logits[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output.prediction_logits[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
expected_shape_2 = torch.Size((1, 2))
self.assertEqual(output.seq_relationship_logits.shape, expected_shape_2)
expected_slice_2 = torch.tensor([[0.7393, 0.1754]])
- self.assertTrue(torch.allclose(output.seq_relationship_logits, expected_slice_2, atol=1e-4))
+ torch.testing.assert_close(output.seq_relationship_logits, expected_slice_2, rtol=1e-4, atol=1e-4)
@slow
def test_inference_vqa(self):
@@ -644,7 +644,7 @@ def test_inference_vqa(self):
[[-8.9898, 3.0803, -1.8016, 2.4542, -8.3420, -2.0224, -3.3124, -4.4139, -3.1491, -3.8997]]
)
- self.assertTrue(torch.allclose(output.logits[:, :10], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output.logits[:, :10], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_nlvr(self):
@@ -674,7 +674,7 @@ def test_inference_nlvr(self):
expected_slice = torch.tensor([[-1.1436, 0.8900]])
- self.assertTrue(torch.allclose(output.logits, expected_slice, atol=1e-4))
+ torch.testing.assert_close(output.logits, expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_vcr(self):
@@ -705,4 +705,4 @@ def test_inference_vcr(self):
expected_slice = torch.tensor([[-7.7697, -7.7697, -7.7697, -7.7697]])
- self.assertTrue(torch.allclose(output.logits, expected_slice, atol=1e-4))
+ torch.testing.assert_close(output.logits, expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/vit/test_modeling_vit.py b/tests/models/vit/test_modeling_vit.py
index cace1d377034..aeb38f73f296 100644
--- a/tests/models/vit/test_modeling_vit.py
+++ b/tests/models/vit/test_modeling_vit.py
@@ -285,7 +285,7 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([-0.2744, 0.8215, -0.0836]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
@@ -312,7 +312,7 @@ def test_inference_interpolate_pos_encoding(self):
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
@require_accelerate
diff --git a/tests/models/vit_mae/test_modeling_vit_mae.py b/tests/models/vit_mae/test_modeling_vit_mae.py
index 5cff9616e004..fb312a17e48a 100644
--- a/tests/models/vit_mae/test_modeling_vit_mae.py
+++ b/tests/models/vit_mae/test_modeling_vit_mae.py
@@ -331,7 +331,7 @@ def test_inference_for_pretraining(self):
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]]
)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice.to(torch_device), atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice.to(torch_device), rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
diff --git a/tests/models/vit_msn/test_modeling_vit_msn.py b/tests/models/vit_msn/test_modeling_vit_msn.py
index 3d4262d44970..bfee2d81de27 100644
--- a/tests/models/vit_msn/test_modeling_vit_msn.py
+++ b/tests/models/vit_msn/test_modeling_vit_msn.py
@@ -230,4 +230,4 @@ def test_inference_image_classification_head(self):
expected_slice = torch.tensor([0.5588, 0.6853, -0.5929]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/vitmatte/test_modeling_vitmatte.py b/tests/models/vitmatte/test_modeling_vitmatte.py
index 4f96eb8b11ff..d52cc38f7dba 100644
--- a/tests/models/vitmatte/test_modeling_vitmatte.py
+++ b/tests/models/vitmatte/test_modeling_vitmatte.py
@@ -292,4 +292,4 @@ def test_inference(self):
expected_slice = torch.tensor(
[[0.9977, 0.9987, 0.9990], [0.9980, 0.9998, 0.9998], [0.9983, 0.9998, 0.9998]], device=torch_device
)
- self.assertTrue(torch.allclose(alphas[0, 0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(alphas[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/vitpose/test_modeling_vitpose.py b/tests/models/vitpose/test_modeling_vitpose.py
index 73129956a3dc..f7acf4ca1d57 100644
--- a/tests/models/vitpose/test_modeling_vitpose.py
+++ b/tests/models/vitpose/test_modeling_vitpose.py
@@ -275,9 +275,9 @@ def test_inference_pose_estimation(self):
expected_scores = torch.tensor([8.7529e-01, 8.4315e-01, 9.2678e-01])
self.assertEqual(len(pose_results), 2)
- self.assertTrue(torch.allclose(pose_results[1]["bbox"].cpu(), expected_bbox, atol=1e-4))
- self.assertTrue(torch.allclose(pose_results[1]["keypoints"][:3].cpu(), expected_keypoints, atol=1e-2))
- self.assertTrue(torch.allclose(pose_results[1]["scores"][:3].cpu(), expected_scores, atol=1e-4))
+ torch.testing.assert_close(pose_results[1]["bbox"].cpu(), expected_bbox, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(pose_results[1]["keypoints"][:3].cpu(), expected_keypoints, rtol=1e-2, atol=1e-2)
+ torch.testing.assert_close(pose_results[1]["scores"][:3].cpu(), expected_scores, rtol=1e-4, atol=1e-4)
@slow
def test_batched_inference(self):
@@ -323,6 +323,6 @@ def test_batched_inference(self):
self.assertEqual(len(pose_results), 2)
self.assertEqual(len(pose_results[0]), 2)
- self.assertTrue(torch.allclose(pose_results[0][1]["bbox"].cpu(), expected_bbox, atol=1e-4))
- self.assertTrue(torch.allclose(pose_results[0][1]["keypoints"][:3].cpu(), expected_keypoints, atol=1e-2))
- self.assertTrue(torch.allclose(pose_results[0][1]["scores"][:3].cpu(), expected_scores, atol=1e-4))
+ torch.testing.assert_close(pose_results[0][1]["bbox"].cpu(), expected_bbox, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(pose_results[0][1]["keypoints"][:3].cpu(), expected_keypoints, rtol=1e-2, atol=1e-2)
+ torch.testing.assert_close(pose_results[0][1]["scores"][:3].cpu(), expected_scores, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/vits/test_modeling_vits.py b/tests/models/vits/test_modeling_vits.py
index 9733fb4bce1e..42524ebf8b1d 100644
--- a/tests/models/vits/test_modeling_vits.py
+++ b/tests/models/vits/test_modeling_vits.py
@@ -434,7 +434,7 @@ def test_forward(self):
]
)
# fmt: on
- self.assertTrue(torch.allclose(outputs.waveform[0, 10000:10030].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(outputs.waveform[0, 10000:10030].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@require_torch_fp16
def test_forward_fp16(self):
@@ -465,4 +465,4 @@ def test_forward_fp16(self):
]
).to(torch.float16)
# fmt: on
- self.assertTrue(torch.allclose(outputs.waveform[0, 10000:10030].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(outputs.waveform[0, 10000:10030].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/vivit/test_modeling_vivit.py b/tests/models/vivit/test_modeling_vivit.py
index 8e6b0825948d..5cab10700b73 100644
--- a/tests/models/vivit/test_modeling_vivit.py
+++ b/tests/models/vivit/test_modeling_vivit.py
@@ -357,7 +357,7 @@ def test_inference_for_video_classification(self):
# taken from original model
expected_slice = torch.tensor([-0.9498, 2.7971, -1.4049, 0.1024, -1.8353]).to(torch_device)
- self.assertTrue(torch.allclose(outputs.logits[0, :5], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :5], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
diff --git a/tests/models/wav2vec2/test_modeling_wav2vec2.py b/tests/models/wav2vec2/test_modeling_wav2vec2.py
index 9e82002f611f..10ca9a22e43c 100644
--- a/tests/models/wav2vec2/test_modeling_wav2vec2.py
+++ b/tests/models/wav2vec2/test_modeling_wav2vec2.py
@@ -1166,7 +1166,7 @@ def get_logits(model, input_features):
logits_2 = get_logits(model_2, input_features)
- self.assertTrue(torch.allclose(logits, logits_2, atol=1e-3))
+ torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3)
# test that loading adapter weights with mismatched vocab sizes can be loaded
def test_load_target_lang_with_mismatched_size(self):
@@ -1203,7 +1203,7 @@ def get_logits(model, input_features):
logits_2 = get_logits(model_2, input_features)
- self.assertTrue(torch.allclose(logits, logits_2, atol=1e-3))
+ torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3)
def test_load_attn_adapter(self):
processor = Wav2Vec2Processor.from_pretrained(
@@ -1250,7 +1250,7 @@ def get_logits(model, input_features):
model.load_adapter("ita", use_safetensors=True)
logits_2 = get_logits(model, input_features)
- self.assertTrue(torch.allclose(logits, logits_2, atol=1e-3))
+ torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3)
with tempfile.TemporaryDirectory() as tempdir:
model.save_pretrained(tempdir)
@@ -1271,7 +1271,7 @@ def get_logits(model, input_features):
logits_2 = get_logits(model, input_features)
- self.assertTrue(torch.allclose(logits, logits_2, atol=1e-3))
+ torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3)
model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter")
logits = get_logits(model, input_features)
@@ -1282,7 +1282,7 @@ def get_logits(model, input_features):
logits_2 = get_logits(model, input_features)
- self.assertTrue(torch.allclose(logits, logits_2, atol=1e-3))
+ torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3)
@slow
def test_model_from_pretrained(self):
@@ -1595,7 +1595,7 @@ def test_inference_integration(self):
], device=torch_device)
# fmt: on
- self.assertTrue(torch.allclose(cosine_sim_masked, expected_cosine_sim_masked, atol=1e-3))
+ torch.testing.assert_close(cosine_sim_masked, expected_cosine_sim_masked, rtol=1e-3, atol=1e-3)
def test_inference_pretrained(self):
model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base")
@@ -1734,7 +1734,7 @@ def test_inference_keyword_spotting(self):
expected_logits = torch.tensor([6.1186, 11.8961, 10.2931, 6.0898], device=torch_device)
self.assertListEqual(predicted_ids.tolist(), expected_labels)
- self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-2))
+ torch.testing.assert_close(predicted_logits, expected_logits, rtol=1e-2, atol=1e-2)
def test_inference_intent_classification(self):
model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ic").to(torch_device)
@@ -1762,9 +1762,9 @@ def test_inference_intent_classification(self):
self.assertListEqual(predicted_ids_object.tolist(), expected_labels_object)
self.assertListEqual(predicted_ids_location.tolist(), expected_labels_location)
- self.assertTrue(torch.allclose(predicted_logits_action, expected_logits_action, atol=1e-2))
- self.assertTrue(torch.allclose(predicted_logits_object, expected_logits_object, atol=1e-2))
- self.assertTrue(torch.allclose(predicted_logits_location, expected_logits_location, atol=1e-2))
+ torch.testing.assert_close(predicted_logits_action, expected_logits_action, rtol=1e-2, atol=1e-2)
+ torch.testing.assert_close(predicted_logits_object, expected_logits_object, rtol=1e-2, atol=1e-2)
+ torch.testing.assert_close(predicted_logits_location, expected_logits_location, rtol=1e-2, atol=1e-2)
def test_inference_speaker_identification(self):
model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-sid").to(torch_device)
@@ -1785,7 +1785,7 @@ def test_inference_speaker_identification(self):
expected_logits = torch.tensor([37.5627, 71.6362, 64.2419, 31.7778], device=torch_device)
self.assertListEqual(predicted_ids.tolist(), expected_labels)
- self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-2))
+ torch.testing.assert_close(predicted_logits, expected_logits, rtol=1e-2, atol=1e-2)
def test_inference_emotion_recognition(self):
model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-er").to(torch_device)
@@ -1804,7 +1804,7 @@ def test_inference_emotion_recognition(self):
expected_logits = torch.tensor([2.1722, 3.0779, 8.0287, 6.6797], device=torch_device)
self.assertListEqual(predicted_ids.tolist(), expected_labels)
- self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-2))
+ torch.testing.assert_close(predicted_logits, expected_logits, rtol=1e-2, atol=1e-2)
def test_phoneme_recognition(self):
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft").to(torch_device)
@@ -1936,7 +1936,7 @@ def test_inference_diarization(self):
)
self.assertEqual(labels[0, :, 0].sum(), 555)
self.assertEqual(labels[0, :, 1].sum(), 299)
- self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2))
+ torch.testing.assert_close(outputs.logits[:, :4], expected_logits, rtol=1e-2, atol=1e-2)
def test_inference_speaker_verification(self):
model = Wav2Vec2ForXVector.from_pretrained("anton-l/wav2vec2-base-superb-sv").to(torch_device)
diff --git a/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py b/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py
index f0320583bf96..eaea550ee976 100644
--- a/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py
+++ b/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py
@@ -506,5 +506,5 @@ def test_word_time_stamp_integration(self):
expected_end_tensor = torch.tensor([0.7800, 1.1000, 1.6600, 1.9200, 2.0400, 2.8000, 3.3000, 3.8800, 4.2800])
# fmt: on
- self.assertTrue(torch.allclose(start_times, expected_start_tensor, atol=0.01))
- self.assertTrue(torch.allclose(end_times, expected_end_tensor, atol=0.01))
+ torch.testing.assert_close(start_times, expected_start_tensor, rtol=0.01, atol=0.01)
+ torch.testing.assert_close(end_times, expected_end_tensor, rtol=0.01, atol=0.01)
diff --git a/tests/models/wavlm/test_modeling_wavlm.py b/tests/models/wavlm/test_modeling_wavlm.py
index b20792d83545..ed02c6aa1419 100644
--- a/tests/models/wavlm/test_modeling_wavlm.py
+++ b/tests/models/wavlm/test_modeling_wavlm.py
@@ -525,7 +525,7 @@ def test_inference_base(self):
EXPECTED_HIDDEN_STATES_SLICE = torch.tensor(
[[[0.0577, 0.1161], [0.0579, 0.1165]], [[0.0199, 0.1237], [0.0059, 0.0605]]]
)
- self.assertTrue(torch.allclose(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, atol=5e-2))
+ torch.testing.assert_close(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, rtol=5e-2, atol=5e-2)
def test_inference_large(self):
model = WavLMModel.from_pretrained("microsoft/wavlm-large").to(torch_device)
@@ -549,7 +549,7 @@ def test_inference_large(self):
[[[0.2122, 0.0500], [0.2118, 0.0563]], [[0.1353, 0.1818], [0.2453, 0.0595]]]
)
- self.assertTrue(torch.allclose(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, rtol=5e-2))
+ torch.testing.assert_close(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, rtol=5e-2)
def test_inference_diarization(self):
model = WavLMForAudioFrameClassification.from_pretrained("microsoft/wavlm-base-plus-sd").to(torch_device)
@@ -576,7 +576,7 @@ def test_inference_diarization(self):
)
self.assertEqual(labels[0, :, 0].sum(), 258)
self.assertEqual(labels[0, :, 1].sum(), 647)
- self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2))
+ torch.testing.assert_close(outputs.logits[:, :4], expected_logits, rtol=1e-2, atol=1e-2)
def test_inference_speaker_verification(self):
model = WavLMForXVector.from_pretrained("microsoft/wavlm-base-plus-sv").to(torch_device)
diff --git a/tests/models/whisper/test_feature_extraction_whisper.py b/tests/models/whisper/test_feature_extraction_whisper.py
index 4b2353bce002..ec2e29a41e0c 100644
--- a/tests/models/whisper/test_feature_extraction_whisper.py
+++ b/tests/models/whisper/test_feature_extraction_whisper.py
@@ -240,7 +240,7 @@ def test_torch_integration(self):
input_features = feature_extractor(input_speech, return_tensors="pt").input_features
self.assertEqual(input_features.shape, (1, 80, 3000))
- self.assertTrue(torch.allclose(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4))
+ torch.testing.assert_close(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
@unittest.mock.patch("transformers.models.whisper.feature_extraction_whisper.is_torch_available", lambda: False)
def test_numpy_integration(self):
@@ -302,4 +302,4 @@ def test_torch_integration_batch(self):
feature_extractor = WhisperFeatureExtractor()
input_features = feature_extractor(input_speech, return_tensors="pt").input_features
self.assertEqual(input_features.shape, (3, 80, 3000))
- self.assertTrue(torch.allclose(input_features[:, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4))
+ torch.testing.assert_close(input_features[:, 0, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py
index 2eff406a3b56..80c4025c259c 100644
--- a/tests/models/whisper/test_modeling_whisper.py
+++ b/tests/models/whisper/test_modeling_whisper.py
@@ -499,7 +499,7 @@ def test_encoder_sinusoidal_embed_positions(self):
for model_class in self.all_model_classes:
model = model_class(config)
embeds = model.get_encoder().embed_positions.weight
- self.assertTrue(torch.allclose(embeds, sinusoids(*embeds.shape)))
+ torch.testing.assert_close(embeds, sinusoids(*embeds.shape))
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
@@ -924,7 +924,7 @@ def test_flash_attn_2_inference_equivalence(self):
logits_fa = outputs_fa.decoder_hidden_states[-1]
# whisper FA2 needs very high tolerance
- self.assertTrue(torch.allclose(logits_fa, logits, atol=4e-1))
+ torch.testing.assert_close(logits_fa, logits, rtol=4e-1, atol=4e-1)
# check with inference + dropout
model.train()
@@ -969,7 +969,7 @@ def test_flash_attn_2_inference_equivalence_right_padding(self):
logits_fa = outputs_fa.decoder_hidden_states[-1]
# whisper FA2 needs very high tolerance
- self.assertTrue(torch.allclose(logits_fa, logits, atol=4e-1))
+ torch.testing.assert_close(logits_fa, logits, rtol=4e-1, atol=4e-1)
other_inputs = {
"decoder_input_ids": decoder_input_ids,
@@ -984,7 +984,7 @@ def test_flash_attn_2_inference_equivalence_right_padding(self):
logits_fa = outputs_fa.decoder_hidden_states[-1]
# whisper FA2 needs very high tolerance
- self.assertTrue(torch.allclose(logits_fa[:, -2:], logits[:, -2:], atol=4e-1))
+ torch.testing.assert_close(logits_fa[:, -2:], logits[:, -2:], rtol=4e-1, atol=4e-1)
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
@@ -1663,7 +1663,7 @@ def test_tiny_logits_librispeech(self):
]
)
# fmt: on
- self.assertTrue(torch.allclose(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
# fmt: off
EXPECTED_GENERATION = torch.tensor(
@@ -1677,7 +1677,7 @@ def test_tiny_logits_librispeech(self):
# fmt: on
head_logits = logits[0] @ model.decoder.embed_tokens.weight.T
- self.assertTrue(torch.allclose(head_logits[0, 0, :30].cpu(), EXPECTED_GENERATION, atol=1e-4))
+ torch.testing.assert_close(head_logits[0, 0, :30].cpu(), EXPECTED_GENERATION, rtol=1e-4, atol=1e-4)
@slow
def test_small_en_logits_librispeech(self):
@@ -1712,7 +1712,7 @@ def test_small_en_logits_librispeech(self):
]
)
# fmt: on
- self.assertTrue(torch.allclose(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_large_logits_librispeech(self):
@@ -1756,7 +1756,7 @@ def test_large_logits_librispeech(self):
)
# fmt: on
- self.assertTrue(torch.allclose(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_tiny_en_generation(self):
@@ -1868,7 +1868,7 @@ def test_large_batched_generation(self):
)
# fmt: on
- self.assertTrue(torch.allclose(generated_ids.cpu(), EXPECTED_LOGITS))
+ torch.testing.assert_close(generated_ids.cpu(), EXPECTED_LOGITS)
# fmt: off
EXPECTED_TRANSCRIPT = [
@@ -1942,7 +1942,7 @@ def test_tiny_en_batched_generation(self):
)
# fmt: on
- self.assertTrue(torch.allclose(generated_ids, EXPECTED_LOGITS))
+ torch.testing.assert_close(generated_ids, EXPECTED_LOGITS)
# fmt: off
EXPECTED_TRANSCRIPT = [
@@ -1975,7 +1975,7 @@ def test_tiny_timestamp_generation(self):
])
# fmt: on
- self.assertTrue(torch.allclose(generated_ids, EXPECTED_OUTPUT))
+ torch.testing.assert_close(generated_ids, EXPECTED_OUTPUT)
EXPECTED_TRANSCRIPT = [
{
@@ -2216,7 +2216,7 @@ def test_large_timestamp_generation(self):
50365, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 11, 293, 321, 366, 5404, 281, 2928, 702, 14943, 13, 50629, 50682, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50870, 50911, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256, 450, 10539, 949, 505, 11, 51245, 51287, 1034, 4680, 10117, 490, 3936, 293, 1080, 3542, 5160, 881, 26336, 281, 264, 1575, 13, 51494, 51523, 634, 575, 12525, 22618, 1968, 6144, 35617, 1456, 397, 266, 311, 589, 307, 534, 10281, 934, 439, 11, 51799, 51815, 50365, 293, 393, 4411, 50430
])
# fmt: on
- self.assertTrue(torch.allclose(generated_ids, EXPECTED_OUTPUT))
+ torch.testing.assert_close(generated_ids, EXPECTED_OUTPUT)
EXPECTED_TRANSCRIPT = [
{
@@ -2292,7 +2292,7 @@ def test_tiny_token_timestamp_generation(self):
])
# fmt: on
- self.assertTrue(torch.allclose(generate_outputs["token_timestamps"].to("cpu"), EXPECTED_OUTPUT))
+ torch.testing.assert_close(generate_outputs["token_timestamps"].to("cpu"), EXPECTED_OUTPUT)
@slow
def test_small_token_timestamp_generation(self):
@@ -2322,7 +2322,7 @@ def test_small_token_timestamp_generation(self):
])
# fmt: on
- self.assertTrue(torch.allclose(generate_outputs["token_timestamps"].to("cpu"), EXPECTED_OUTPUT))
+ torch.testing.assert_close(generate_outputs["token_timestamps"].to("cpu"), EXPECTED_OUTPUT)
@slow
def test_tiny_token_timestamp_batch_generation(self):
@@ -2403,7 +2403,7 @@ def test_tiny_token_timestamp_generation_longform(self):
# fmt: on
for segment, exp_segment in zip(generate_outputs["segments"][0], EXPECTED_OUTPUT):
- self.assertTrue(torch.allclose(segment["token_timestamps"], exp_segment))
+ torch.testing.assert_close(segment["token_timestamps"], exp_segment)
@slow
def test_tiny_specaugment_librispeech(self):
@@ -2438,7 +2438,7 @@ def test_tiny_specaugment_librispeech(self):
]
)
# fmt: on
- self.assertTrue(torch.allclose(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4))
+ torch.testing.assert_close(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_generate_with_prompt_ids(self):
diff --git a/tests/models/x_clip/test_modeling_x_clip.py b/tests/models/x_clip/test_modeling_x_clip.py
index 04dd2d9d2968..80ee63fb15c1 100644
--- a/tests/models/x_clip/test_modeling_x_clip.py
+++ b/tests/models/x_clip/test_modeling_x_clip.py
@@ -737,7 +737,7 @@ def test_inference(self):
expected_logits = torch.tensor([[14.0181, 20.2771, 14.4776]], device=torch_device)
- self.assertTrue(torch.allclose(outputs.logits_per_video, expected_logits, atol=1e-3))
+ torch.testing.assert_close(outputs.logits_per_video, expected_logits, rtol=1e-3, atol=1e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
@@ -771,6 +771,6 @@ def test_inference_interpolate_pos_encoding(self):
[[0.0126, 0.2109, 0.0609], [0.0448, 0.5862, -0.1688], [-0.0881, 0.8525, -0.3044]]
).to(torch_device)
- self.assertTrue(
- torch.allclose(outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)
+ torch.testing.assert_close(
+ outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4
)
diff --git a/tests/models/xlm_roberta/test_modeling_xlm_roberta.py b/tests/models/xlm_roberta/test_modeling_xlm_roberta.py
index f8ec1f5b7671..d9aac9578169 100644
--- a/tests/models/xlm_roberta/test_modeling_xlm_roberta.py
+++ b/tests/models/xlm_roberta/test_modeling_xlm_roberta.py
@@ -53,7 +53,7 @@ def test_xlm_roberta_base(self):
output = model(input_ids)["last_hidden_state"].detach()
self.assertEqual(output.shape, expected_output_shape)
# compare the actual values for a slice of last dim
- self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
+ torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3)
@require_torch_sdpa
def test_xlm_roberta_base_sdpa(self):
@@ -70,7 +70,7 @@ def test_xlm_roberta_base_sdpa(self):
output = model(input_ids)["last_hidden_state"].detach()
self.assertEqual(output.shape, expected_output_shape)
# compare the actual values for a slice of last dim
- self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
+ torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3)
@slow
def test_xlm_roberta_large(self):
@@ -89,4 +89,4 @@ def test_xlm_roberta_large(self):
output = model(input_ids)["last_hidden_state"].detach()
self.assertEqual(output.shape, expected_output_shape)
# compare the actual values for a slice of last dim
- self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
+ torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3)
diff --git a/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py b/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py
index 5d9abb238e79..9fac147c6176 100644
--- a/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py
+++ b/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py
@@ -542,7 +542,7 @@ def test_xlm_roberta_xl(self):
output = model(input_ids)["last_hidden_state"].detach()
self.assertEqual(output.shape, expected_output_shape)
# compare the actual values for a slice of last dim
- self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
+ torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3)
@unittest.skip(reason="Model is too large to be tested on the CI")
def test_xlm_roberta_xxl(self):
@@ -561,4 +561,4 @@ def test_xlm_roberta_xxl(self):
output = model(input_ids)["last_hidden_state"].detach()
self.assertEqual(output.shape, expected_output_shape)
# compare the actual values for a slice of last dim
- self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
+ torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3)
diff --git a/tests/models/xmod/test_modeling_xmod.py b/tests/models/xmod/test_modeling_xmod.py
index ae9a35c5d637..80a3ac13b06c 100644
--- a/tests/models/xmod/test_modeling_xmod.py
+++ b/tests/models/xmod/test_modeling_xmod.py
@@ -553,7 +553,7 @@ def test_xmod_base(self):
output = model(input_ids)["last_hidden_state"].detach()
self.assertEqual(output.shape, expected_output_shape)
# compare the actual values for a slice of last dim
- self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
+ torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3)
# language de_DE
model.set_default_language("de_DE")
@@ -569,7 +569,7 @@ def test_xmod_base(self):
output = model(input_ids)["last_hidden_state"].detach()
self.assertEqual(output.shape, expected_output_shape)
# compare the actual values for a slice of last dim
- self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
+ torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3)
@slow
def test_xmod_large_prenorm(self):
@@ -589,7 +589,7 @@ def test_xmod_large_prenorm(self):
output = model(input_ids)["last_hidden_state"].detach()
self.assertEqual(output.shape, expected_output_shape)
# compare the actual values for a slice of last dim
- self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
+ torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3)
# language de_DE
model.set_default_language("de_DE")
@@ -605,7 +605,7 @@ def test_xmod_large_prenorm(self):
output = model(input_ids)["last_hidden_state"].detach()
self.assertEqual(output.shape, expected_output_shape)
# compare the actual values for a slice of last dim
- self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
+ torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3)
@slow
def test_multilingual_batch(self):
@@ -631,7 +631,7 @@ def test_multilingual_batch(self):
output = model(input_ids, lang_ids=lang_ids)["last_hidden_state"].detach()
self.assertEqual(output.shape, expected_output_shape)
# compare the actual values for a slice of last dim
- self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
+ torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3)
@slow
def test_end_to_end_mask_fill(self):
diff --git a/tests/models/yolos/test_image_processing_yolos.py b/tests/models/yolos/test_image_processing_yolos.py
index 55a4be5c0992..e3b4dc409c41 100644
--- a/tests/models/yolos/test_image_processing_yolos.py
+++ b/tests/models/yolos/test_image_processing_yolos.py
@@ -185,8 +185,8 @@ def test_equivalence_padding(self):
encoded_images_with_method = image_processing_1.pad(image_inputs, return_tensors="pt")
encoded_images = image_processing_2(image_inputs, return_tensors="pt")
- self.assertTrue(
- torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4)
+ torch.testing.assert_close(
+ encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], rtol=1e-4, atol=1e-4
)
@parameterized.expand(
@@ -234,31 +234,31 @@ def test_call_pytorch_with_coco_detection_annotations(self):
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
- self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([5832.7256, 11144.6689, 484763.2500, 829269.8125, 146579.4531, 164177.6250])
- self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
+ torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
- self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
+ torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
- self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
+ torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
- self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
+ torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
- self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
+ torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1056])
- self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
+ torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
def test_call_pytorch_with_coco_panoptic_annotations(self):
@@ -280,34 +280,34 @@ def test_call_pytorch_with_coco_panoptic_annotations(self):
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
- self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([146591.5000, 163974.2500, 480092.2500, 11187.0000, 5824.5000, 7562.5000])
- self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
+ torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
- self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
+ torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
- self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
+ torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
- self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
+ torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify masks
expected_masks_sum = 815161
self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
- self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
+ torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1056])
- self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
+ torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
# Output size is slight different from DETR as yolos takes mod of 16
@slow
@@ -373,8 +373,8 @@ def test_batched_coco_detection_annotations(self):
[0.5845, 0.4115, 0.3462, 0.7161],
]
)
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3, atol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1056]))
@@ -425,8 +425,8 @@ def test_batched_coco_detection_annotations(self):
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1, atol=1)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1, atol=1)
# Output size is slight different from DETR as yolos takes mod of 16
def test_batched_coco_panoptic_annotations(self):
@@ -495,8 +495,8 @@ def test_batched_coco_panoptic_annotations(self):
[0.3026, 0.2994, 0.6051, 0.5987],
]
)
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3, atol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1056]))
@@ -548,8 +548,8 @@ def test_batched_coco_panoptic_annotations(self):
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
- self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1))
- self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
+ torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
+ torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_max_width_max_height_resizing_and_pad_strategy with Detr->Yolos
def test_max_width_max_height_resizing_and_pad_strategy(self):
diff --git a/tests/models/yolos/test_modeling_yolos.py b/tests/models/yolos/test_modeling_yolos.py
index 5c929aeb4099..e5857c8a338d 100644
--- a/tests/models/yolos/test_modeling_yolos.py
+++ b/tests/models/yolos/test_modeling_yolos.py
@@ -361,8 +361,8 @@ def test_inference_object_detection_head(self):
expected_slice_boxes = torch.tensor(
[[0.2536, 0.5449, 0.4643], [0.2037, 0.7735, 0.3672], [0.7692, 0.4056, 0.4549]], device=torch_device
)
- self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4))
- self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
+ torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=1e-4, atol=1e-4)
# verify postprocessing
results = image_processor.post_process_object_detection(
@@ -373,6 +373,6 @@ def test_inference_object_detection_head(self):
expected_slice_boxes = torch.tensor([331.8438, 80.5440, 369.9546, 188.0579]).to(torch_device)
self.assertEqual(len(results["scores"]), 5)
- self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4))
+ torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-4, atol=1e-4)
self.assertSequenceEqual(results["labels"].tolist(), expected_labels)
- self.assertTrue(torch.allclose(results["boxes"][0, :], expected_slice_boxes))
+ torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes)
diff --git a/tests/models/yoso/test_modeling_yoso.py b/tests/models/yoso/test_modeling_yoso.py
index 4cfb7e22a5d2..2f13e91fc558 100644
--- a/tests/models/yoso/test_modeling_yoso.py
+++ b/tests/models/yoso/test_modeling_yoso.py
@@ -375,7 +375,7 @@ def test_inference_no_head(self):
[[[-0.0611, 0.1242, 0.0840], [0.0280, -0.0048, 0.1125], [0.0106, 0.0226, 0.0751]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_masked_lm(self):
@@ -394,7 +394,7 @@ def test_inference_masked_lm(self):
[[[-2.1313, -3.7285, -2.2407], [-2.7047, -3.3314, -2.6408], [0.0629, -2.5166, -0.3356]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_masked_lm_long_input(self):
@@ -413,4 +413,4 @@ def test_inference_masked_lm_long_input(self):
[[[-2.3914, -4.3742, -5.0956], [-4.0988, -4.2384, -7.0406], [-3.1427, -3.7192, -6.6800]]]
)
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
diff --git a/tests/models/zamba/test_modeling_zamba.py b/tests/models/zamba/test_modeling_zamba.py
index fc2d94c75078..662a4d060b74 100644
--- a/tests/models/zamba/test_modeling_zamba.py
+++ b/tests/models/zamba/test_modeling_zamba.py
@@ -341,10 +341,14 @@ def test_initialization(self):
if param.requires_grad:
if "A_log" in name:
A = torch.arange(1, config.mamba_d_state + 1, dtype=torch.float32)[None, :]
- self.assertTrue(torch.allclose(param.data, torch.log(A), atol=1e-5, rtol=1e-5))
+ intermediate_dim = config.mamba_expand * config.hidden_size
+ A = A.expand(intermediate_dim, -1).reshape(
+ config.n_mamba_heads, intermediate_dim // config.n_mamba_heads, -1
+ )
+ torch.testing.assert_close(param.data, torch.log(A), rtol=1e-5, atol=1e-5)
elif "D" in name:
# check if it's a ones like
- self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5))
+ torch.testing.assert_close(param.data, torch.ones_like(param.data), rtol=1e-5, atol=1e-5)
elif "x_proj" in name or "dt_proj_weight" in name:
self.assertIn(
((param.data.mean() * 1e2).round() / 1e2).item(),
@@ -498,7 +502,7 @@ def _prepare_model_kwargs(input_ids, attention_mask, signature):
next_logits_with_padding = model(**model_kwargs).logits[:, -1, :]
# They should result in very similar logits
- self.assertTrue(torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=3e-3))
+ torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=3e-3, atol=3e-3)
@require_flash_attn
@require_torch_gpu
diff --git a/tests/models/zamba2/__init__.py b/tests/models/zamba2/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/tests/models/zamba2/test_modeling_zamba2.py b/tests/models/zamba2/test_modeling_zamba2.py
new file mode 100644
index 000000000000..2bd6732514c6
--- /dev/null
+++ b/tests/models/zamba2/test_modeling_zamba2.py
@@ -0,0 +1,666 @@
+# coding=utf-8
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Testing suite for the PyTorch Zamba model."""
+
+import math
+import tempfile
+import unittest
+
+import pytest
+from parameterized import parameterized
+
+from transformers import AutoTokenizer, Zamba2Config, is_torch_available
+from transformers.testing_utils import (
+ require_bitsandbytes,
+ require_flash_attn,
+ require_torch,
+ require_torch_gpu,
+ slow,
+ torch_device,
+)
+
+from ...generation.test_utils import GenerationTesterMixin
+from ...test_configuration_common import ConfigTester
+from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask
+from ...test_pipeline_mixin import PipelineTesterMixin
+
+
+if is_torch_available():
+ import torch
+
+ from transformers import (
+ Zamba2ForCausalLM,
+ Zamba2ForSequenceClassification,
+ Zamba2Model,
+ )
+ from transformers.models.zamba2.modeling_zamba2 import (
+ Zamba2HybridDynamicCache,
+ )
+
+
+class Zamba2ModelTester:
+ def __init__(
+ self,
+ parent,
+ batch_size=14,
+ seq_length=7,
+ is_training=True,
+ use_input_mask=True,
+ use_labels=True,
+ vocab_size=99,
+ hidden_size=16,
+ mamba_d_state=2,
+ chunk_size=8,
+ mamba_dt_rank="auto",
+ num_hidden_layers=2,
+ num_attention_heads=2,
+ n_mamba_heads=8,
+ mamba_ngroups=8,
+ intermediate_size=4,
+ hidden_act="gelu",
+ hidden_mamba_act="silu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=16,
+ type_sequence_label_size=2,
+ initializer_range=0.02,
+ num_labels=3,
+ num_choices=4,
+ scope=None,
+ layers_block_type=["mamba", "hybrid"],
+ num_mem_blocks=1,
+ use_mem_rope=True,
+ ):
+ self.parent = parent
+ self.batch_size = batch_size
+ self.seq_length = seq_length
+ self.is_training = is_training
+ self.use_input_mask = use_input_mask
+ self.use_labels = use_labels
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.mamba_dt_rank = mamba_dt_rank
+ self.mamba_d_state = mamba_d_state
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.n_mamba_heads = n_mamba_heads
+ self.mamba_ngroups = mamba_ngroups
+ self.chunk_size = chunk_size
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_mamba_act = hidden_mamba_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.type_sequence_label_size = type_sequence_label_size
+ self.initializer_range = initializer_range
+ self.num_labels = num_labels
+ self.num_choices = num_choices
+ self.scope = scope
+ self.layers_block_type = layers_block_type
+ self.num_mem_blocks = num_mem_blocks
+ self.use_mem_rope = use_mem_rope
+
+ def prepare_config_and_inputs(self):
+ input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
+
+ input_mask = None
+ if self.use_input_mask:
+ input_mask = random_attention_mask([self.batch_size, self.seq_length])
+
+ sequence_labels = None
+ token_labels = None
+ choice_labels = None
+ if self.use_labels:
+ sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
+ token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
+ choice_labels = ids_tensor([self.batch_size], self.num_choices)
+
+ config = self.get_config()
+
+ return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
+
+ def get_config(self):
+ return Zamba2Config(
+ vocab_size=self.vocab_size,
+ hidden_size=self.hidden_size,
+ mamba_dt_rank=self.mamba_dt_rank,
+ mamba_d_state=self.mamba_d_state,
+ num_hidden_layers=self.num_hidden_layers,
+ num_attention_heads=self.num_attention_heads,
+ n_mamba_heads=self.n_mamba_heads,
+ intermediate_size=self.intermediate_size,
+ chunk_size=self.chunk_size,
+ hidden_act=self.hidden_act,
+ mamba_ngroups=self.mamba_ngroups,
+ hidden_mamba_act=self.hidden_mamba_act,
+ hidden_dropout_prob=self.hidden_dropout_prob,
+ attention_probs_dropout_prob=self.attention_probs_dropout_prob,
+ max_position_embeddings=self.max_position_embeddings,
+ type_vocab_size=self.type_vocab_size,
+ is_decoder=True,
+ initializer_range=self.initializer_range,
+ use_mamba_kernels=False,
+ layers_block_type=self.layers_block_type,
+ num_mem_blocks=self.num_mem_blocks,
+ use_mem_rope=self.use_mem_rope,
+ )
+
+ def prepare_config_and_inputs_for_decoder(self):
+ (
+ config,
+ input_ids,
+ input_mask,
+ sequence_labels,
+ token_labels,
+ choice_labels,
+ ) = self.prepare_config_and_inputs()
+
+ config.is_decoder = True
+
+ return (
+ config,
+ input_ids,
+ input_mask,
+ sequence_labels,
+ token_labels,
+ choice_labels,
+ )
+
+ def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels):
+ model = Zamba2Model(config=config)
+ model.to(torch_device)
+ model.eval()
+ result = model(input_ids, attention_mask=input_mask)
+ result = model(input_ids)
+ self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
+
+ def create_and_check_for_causal_lm(
+ self,
+ config,
+ input_ids,
+ input_mask,
+ sequence_labels,
+ token_labels,
+ choice_labels,
+ ):
+ model = Zamba2ForCausalLM(config=config)
+ model.to(torch_device)
+ model.eval()
+ result = model(input_ids, attention_mask=input_mask, labels=token_labels)
+ result = model(input_ids, attention_mask=input_mask)
+ result = model(input_ids, labels=token_labels)
+ result = model(input_ids)
+ self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
+
+ def create_and_check_decoder_model_past_large_inputs(
+ self,
+ config,
+ input_ids,
+ input_mask,
+ sequence_labels,
+ token_labels,
+ choice_labels,
+ ):
+ config.is_decoder = True
+ config.add_cross_attention = False
+ model = Zamba2ForCausalLM(config=config)
+ model.to(torch_device)
+ model.eval()
+
+ # first forward pass
+ # Attention: Zamba2 needs the cache to be initialized to return a cache!
+ past_key_values = Zamba2HybridDynamicCache(config, input_ids.shape[0], model.dtype, device=model.device)
+ outputs = model(
+ input_ids,
+ attention_mask=input_mask,
+ past_key_values=past_key_values,
+ use_cache=True,
+ )
+ past_key_values = outputs.past_key_values
+
+ # create hypothetical multiple next token and extent to next_input_ids
+ next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
+ next_mask = ids_tensor((self.batch_size, 1), vocab_size=2)
+
+ # append to next input_ids and
+ next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
+ next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
+
+ output_from_no_past = model(
+ next_input_ids,
+ attention_mask=next_attention_mask,
+ output_hidden_states=True,
+ )["hidden_states"][0]
+ output_from_past = model(
+ next_tokens,
+ attention_mask=next_attention_mask,
+ past_key_values=past_key_values,
+ output_hidden_states=True,
+ cache_position=torch.arange(
+ input_ids.shape[1], input_ids.shape[1] + next_tokens.shape[1], device=model.device
+ ),
+ )["hidden_states"][0]
+
+ # select random slice
+ random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
+ output_from_no_past_slice = output_from_no_past[:, -1:, random_slice_idx].detach()
+ output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
+
+ self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
+
+ # test that outputs are equal for slice
+ self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
+
+ def create_and_check_for_sequence_classification(
+ self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
+ ):
+ config.num_labels = self.num_labels
+ model = Zamba2ForSequenceClassification(config)
+ model.to(torch_device)
+ model.eval()
+ result = model(input_ids, attention_mask=input_mask, labels=sequence_labels)
+ self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
+
+ def prepare_config_and_inputs_for_common(self):
+ config_and_inputs = self.prepare_config_and_inputs()
+ (
+ config,
+ input_ids,
+ input_mask,
+ sequence_labels,
+ token_labels,
+ choice_labels,
+ ) = config_and_inputs
+ inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
+ return config, inputs_dict
+
+
+@require_torch
+class Zamba2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
+ test_torchscript = False
+ all_model_classes = (
+ (
+ Zamba2Model,
+ Zamba2ForCausalLM,
+ Zamba2ForSequenceClassification,
+ )
+ if is_torch_available()
+ else ()
+ )
+ all_generative_model_classes = (Zamba2ForCausalLM,) if is_torch_available() else ()
+ pipeline_model_mapping = (
+ {
+ "feature-extraction": Zamba2Model,
+ "text-classification": Zamba2ForSequenceClassification,
+ "text-generation": Zamba2ForCausalLM,
+ "zero-shot": Zamba2ForSequenceClassification,
+ }
+ if is_torch_available()
+ else {}
+ )
+ test_headmasking = False
+ test_pruning = False
+
+ def setUp(self):
+ self.model_tester = Zamba2ModelTester(self)
+ self.config_tester = ConfigTester(self, config_class=Zamba2Config, hidden_size=37)
+
+ @unittest.skip("position_ids cannot be used to pad due to Mamba2 layers")
+ def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
+ pass
+
+ @unittest.skip("Zamba2 has a hybrid cache")
+ def test_past_key_values_format(self):
+ r"""
+ Zamba2's cache shape depends on whether a given layer is mamba or attention.
+ For mamba layers, the KV cache has shape is empty and has shape [batch_size, 0].
+ The shape checks of this test assume instead that every layer has an attention cache, so we skip it.
+ """
+ pass
+
+ @unittest.skip(reason="A large mamba2 would be necessary (and costly) for that")
+ def test_multi_gpu_data_parallel_forward(self):
+ pass
+
+ def test_config(self):
+ self.config_tester.run_common_tests()
+
+ def test_model(self):
+ config_and_inputs = self.model_tester.prepare_config_and_inputs()
+ self.model_tester.create_and_check_model(*config_and_inputs)
+
+ def test_for_causal_lm(self):
+ config_and_inputs = self.model_tester.prepare_config_and_inputs()
+ self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
+
+ def test_for_sequence_classification(self):
+ config_and_inputs = self.model_tester.prepare_config_and_inputs()
+ self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
+
+ def test_decoder_model_past_with_large_inputs(self):
+ config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
+ self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
+
+ def test_initialization(self):
+ r"""
+ Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently
+ """
+ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
+
+ configs_no_init = _config_zero_init(config)
+ for model_class in self.all_model_classes:
+ model = model_class(config=configs_no_init)
+ for name, param in model.named_parameters():
+ if param.requires_grad:
+ if "A_log" in name:
+ A = torch.arange(1, config.n_mamba_heads + 1, dtype=torch.float32)[None, :]
+ self.assertTrue(torch.allclose(param.data, torch.log(A), atol=1e-5, rtol=1e-5))
+ elif "D" in name:
+ # check if it's a ones like
+ self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5))
+ elif "dt_bias" in name:
+ dt = torch.exp(
+ torch.tensor([0, 1]) * (math.log(config.time_step_max) - math.log(config.time_step_min))
+ + math.log(config.time_step_min)
+ ).clamp(min=config.time_step_floor)
+ inv_dt = dt + torch.log(-torch.expm1(-dt))
+ if param.requires_grad:
+ self.assertTrue(param.data.max().item() <= inv_dt[1])
+ self.assertTrue(param.data.min().item() >= inv_dt[0])
+ else:
+ self.assertIn(
+ ((param.data.mean() * 1e9).round() / 1e9).item(),
+ [0.0, 1.0],
+ msg=f"Parameter {name} of model {model_class} seems not properly initialized",
+ )
+
+ @unittest.skip(reason="Cumbersome and redundant for Zamba2")
+ def test_mismatched_shapes_have_properly_initialized_weights(self):
+ r"""
+ Overriding the test_mismatched_shapes_have_properly_initialized_weights test because A_log and D params of the
+ Mamba block are initialized differently and we tested that in test_initialization
+ """
+ pass
+
+ def test_attention_outputs(self):
+ r"""
+ Overriding the test_attention_outputs test as the Zamba2 model outputs attention only for its attention layers
+ """
+ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
+ config.return_dict = True
+
+ seq_len = getattr(self.model_tester, "seq_length", None)
+ encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
+ encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
+
+ for model_class in self.all_model_classes:
+ inputs_dict["output_attentions"] = True
+ inputs_dict["output_hidden_states"] = False
+ config.return_dict = True
+ model = model_class(config)
+ model.to(torch_device)
+ model.eval()
+
+ with torch.no_grad():
+ outputs = model(**self._prepare_for_class(inputs_dict, model_class))
+ attentions = outputs.attentions
+
+ # check that output_attentions also work using config
+ del inputs_dict["output_attentions"]
+ config.output_attentions = True
+ model = model_class(config)
+ model.to(torch_device)
+ model.eval()
+ with torch.no_grad():
+ outputs = model(**self._prepare_for_class(inputs_dict, model_class))
+ attentions = outputs.attentions
+
+ self.assertListEqual(
+ list(attentions[0].shape[-3:]),
+ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
+ )
+ out_len = len(outputs)
+
+ # Check attention is always last and order is fine
+ inputs_dict["output_attentions"] = True
+ inputs_dict["output_hidden_states"] = True
+ model = model_class(config)
+ model.to(torch_device)
+ model.eval()
+ with torch.no_grad():
+ outputs = model(**self._prepare_for_class(inputs_dict, model_class))
+
+ added_hidden_states = 1
+ self.assertEqual(out_len + added_hidden_states, len(outputs))
+
+ self_attentions = outputs.attentions
+
+ self.assertListEqual(
+ list(self_attentions[0].shape[-3:]),
+ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
+ )
+
+ def _get_input_ids_and_config(self):
+ config_and_inputs = self.model_tester.prepare_config_and_inputs()
+ (
+ config,
+ input_ids,
+ input_mask,
+ sequence_labels,
+ token_labels,
+ choice_labels,
+ ) = config_and_inputs
+ return config, input_ids, input_mask
+
+ def test_left_padding_compatibility(self):
+ r"""
+ Overriding the test_left_padding_compatibility test as the mamba layers accentuate the numerical differences
+ effect of the left padding discussed in the issue in the note. Using a more permissive tolerance value.
+ """
+ import inspect
+ # NOTE: left-padding results in small numerical differences. This is expected.
+ # See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535
+
+ # First, filter out models that don't support left padding - generative and decoder-only.
+ # Zamba2 is a decoder-only architecture
+ decoder_only_classes = self.all_generative_model_classes
+
+ # Then, test left-padding
+ def _prepare_model_kwargs(input_ids, attention_mask, signature):
+ model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask}
+ if "position_ids" in signature:
+ position_ids = torch.cumsum(attention_mask, dim=-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ model_kwargs["position_ids"] = position_ids
+ if "cache_position" in signature:
+ cache_position = torch.arange(input_ids.shape[-1], device=torch_device)
+ model_kwargs["cache_position"] = cache_position
+ return model_kwargs
+
+ for model_class in decoder_only_classes:
+ config, input_ids, attention_mask = self._get_input_ids_and_config()
+ model = model_class(config).to(torch_device).eval()
+ signature = inspect.signature(model.forward).parameters.keys()
+
+ # Without padding
+ model_kwargs = _prepare_model_kwargs(input_ids, attention_mask, signature)
+ next_logits_wo_padding = model(**model_kwargs).logits[:, -1, :]
+
+ # With left-padding (length 32)
+ pad_size = (input_ids.shape[0], 32)
+ padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * config.pad_token_id
+ padded_input_ids = torch.cat((padding, input_ids), dim=1)
+ padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1)
+ model_kwargs = _prepare_model_kwargs(padded_input_ids, padded_attention_mask, signature)
+ next_logits_with_padding = model(**model_kwargs).logits[:, -1, :]
+
+ # They should result in very similar logits
+ self.assertTrue(torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=3e-3))
+
+ @require_flash_attn
+ @require_torch_gpu
+ @require_bitsandbytes
+ @pytest.mark.flash_attn_test
+ @slow
+ def test_flash_attn_2_fp32_ln(self):
+ r"""
+ Overriding the test_flash_attn_2_fp32_ln test as the Zamba2 model, like Mixtral, doesn't support
+ right padding + use cache with FA2
+ """
+ for model_class in self.all_generative_model_classes:
+ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
+ model = model_class(config)
+
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ model.save_pretrained(tmpdirname)
+
+ dummy_input = inputs_dict[model.main_input_name]
+ dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input))
+ # NOTE: Zamba2 does not support right padding + use_cache with FA2.
+ dummy_attention_mask[:, -1] = 1
+
+ model = model_class.from_pretrained(
+ tmpdirname,
+ torch_dtype=torch.float16,
+ attn_implementation="flash_attention_2",
+ low_cpu_mem_usage=True,
+ load_in_4bit=True,
+ )
+
+ for _, param in model.named_parameters():
+ # upcast only layer norms
+ if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16):
+ param.data = param.data.to(torch.float32)
+
+ _ = model(dummy_input)
+ # with attention mask
+ _ = model(dummy_input, attention_mask=dummy_attention_mask)
+
+ @require_flash_attn
+ @require_torch_gpu
+ @pytest.mark.flash_attn_test
+ @slow
+ def test_flash_attn_2_inference_equivalence_right_padding(self):
+ r"""
+ Overriding the test_flash_attn_2_inference_padding_right test as the Zamba2 model, like Mixtral, doesn't support
+ right padding + use cache with FA2
+ """
+ self.skipTest(reason="Zamba2 flash attention does not support right padding")
+
+ @unittest.skip(reason="Zamba2 has its own special cache type")
+ @parameterized.expand([(1, False), (1, True), (4, False)])
+ def test_new_cache_format(self, num_beams, do_sample):
+ pass
+
+
+@require_torch
+class Zamba2ModelIntegrationTest(unittest.TestCase):
+ model = None
+ tokenizer = None
+
+ @classmethod
+ @slow
+ def setUpClass(cls):
+ model_id = "Zyphra/Zamba2-1.2B"
+ cls.model = Zamba2ForCausalLM.from_pretrained(
+ model_id, torch_dtype=torch.float32, low_cpu_mem_usage=True, revision="PR"
+ )
+ cls.tokenizer = AutoTokenizer.from_pretrained(model_id, revision="PR")
+
+ @parameterized.expand([(torch_device,), ("cpu",)])
+ @slow
+ def test_simple_generate(self, torch_device):
+ self.model.to(torch_device)
+
+ input_ids = self.tokenizer("Hey how are you doing on this lovely evening?", return_tensors="pt")[
+ "input_ids"
+ ].to(torch_device)
+ out = self.model.generate(input_ids, do_sample=False, max_new_tokens=10)
+ output_sentence = self.tokenizer.decode(out[0, :])
+ self.assertEqual(
+ output_sentence,
+ " Hey how are you doing on this lovely evening?\n\nI'm doing well, thanks for",
+ )
+
+ with torch.no_grad():
+ logits = self.model(input_ids=input_ids).logits.to(dtype=torch.float32)
+
+ EXPECTED_LOGITS_NO_GRAD = torch.tensor(
+ [
+ -5.9587, 10.5152, 7.0382, -2.8728, -4.8143, -4.8142, -4.8142, -4.8144,
+ -4.8143, -4.8143, -4.8142, -4.8142, 6.0185, 18.0037, -4.8142, -4.8144,
+ -4.8143, -4.8142, -4.8143, -4.8143, -4.8143, -4.8143, -4.8142, -4.8143,
+ -4.8144, -4.8143, -4.8143, -4.8141, -4.8142, -4.8142, -4.8142, -4.8144,
+ -4.8143, -4.8143, -4.8143, -4.8142, -4.8144, -4.8144, -4.8142, -4.8142
+ ]
+ , dtype=torch.float32) # fmt: skip
+ torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD, rtol=1e-3, atol=1e-3)
+
+ @parameterized.expand([(torch_device,), ("cpu",)])
+ @slow
+ def test_simple_batched_generate_with_padding(self, torch_device):
+ self.model.to(torch_device)
+
+ inputs = self.tokenizer(
+ ["Hey how are you doing on this lovely evening?", "When did the Roman empire "],
+ padding=True,
+ return_tensors="pt",
+ ).to(torch_device)
+ out = self.model.generate(**inputs, do_sample=False, max_new_tokens=10)
+ output_sentences = self.tokenizer.batch_decode(out)
+ self.assertEqual(
+ output_sentences[0],
+ " Hey how are you doing on this lovely evening?\n\nI'm doing well, thanks for",
+ )
+
+ self.assertEqual(
+ output_sentences[1],
+ "[PAD][PAD][PAD][PAD] When did the Roman empire 1st fall?\nThe Roman Empire fell in",
+ )
+
+ with torch.no_grad():
+ logits = self.model(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"]).logits.to(
+ dtype=torch.float32
+ )
+
+ EXPECTED_LOGITS_NO_GRAD_0 = torch.tensor(
+ [
+ -5.9611, 10.5208, 7.0411, -2.8743, -4.8167, -4.8167, -4.8167, -4.8168,
+ -4.8167, -4.8167, -4.8167, -4.8166, 6.0218, 18.0062, -4.8167, -4.8168,
+ -4.8167, -4.8167, -4.8167, -4.8168, -4.8168, -4.8168, -4.8167, -4.8167,
+ -4.8168, -4.8167, -4.8167, -4.8165, -4.8167, -4.8167, -4.8167, -4.8169,
+ -4.8168, -4.8168, -4.8168, -4.8166, -4.8169, -4.8168, -4.8167, -4.8167
+ ]
+ , dtype=torch.float32) # fmt: skip
+
+ EXPECTED_LOGITS_NO_GRAD_1 = torch.tensor(
+ [
+ 0.1966, 6.3449, 3.8350, -5.7291, -6.5106, -6.5104, -6.5103, -6.5104,
+ -6.5103, -6.5104, -6.5106, -6.5105, 7.8700, 13.5434, -6.5104, -6.5096,
+ -6.5106, -6.5102, -6.5106, -6.5106, -6.5105, -6.5106, -6.5104, -6.5106,
+ -6.5105, -6.5106, -6.5106, -6.5113, -6.5102, -6.5105, -6.5108, -6.5105,
+ -6.5104, -6.5106, -6.5106, -6.5104, -6.5106, -6.5107, -6.5103, -6.5105 ]
+ , dtype=torch.float32) # fmt: skip
+
+ torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD_0, rtol=1e-3, atol=1e-3)
+ torch.testing.assert_close(
+ logits[1, -1, :40].cpu(),
+ EXPECTED_LOGITS_NO_GRAD_1,
+ rtol=1e-3,
+ atol=6e-3 if torch_device == "cpu" else 1e-3,
+ )
diff --git a/tests/models/zoedepth/test_modeling_zoedepth.py b/tests/models/zoedepth/test_modeling_zoedepth.py
index a9c1ffb149d8..aef49c4752c2 100644
--- a/tests/models/zoedepth/test_modeling_zoedepth.py
+++ b/tests/models/zoedepth/test_modeling_zoedepth.py
@@ -253,7 +253,7 @@ def test_inference_depth_estimation(self):
[[1.0020, 1.0219, 1.0389], [1.0349, 1.0816, 1.1000], [1.0576, 1.1094, 1.1249]],
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_depth_estimation_multiple_heads(self):
image_processor = ZoeDepthImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti")
@@ -275,7 +275,7 @@ def test_inference_depth_estimation_multiple_heads(self):
[[1.1571, 1.1438, 1.1783], [1.2163, 1.2036, 1.2320], [1.2688, 1.2461, 1.2734]],
).to(torch_device)
- self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
+ torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def check_target_size(
self,
@@ -301,7 +301,7 @@ def check_target_size(
out_l.unsqueeze(0).unsqueeze(1), size=img.size[::-1], mode="bicubic", align_corners=False
)
self.assertTrue((np.array(out_l.shape)[::-1] == np.array(img.size) * 2).all())
- self.assertTrue(torch.allclose(out, out_l_reduced, rtol=2e-2))
+ torch.testing.assert_close(out, out_l_reduced, rtol=2e-2)
def check_post_processing_test(self, image_processor, images, model, pad_input=True, flip_aug=True):
inputs = image_processor(images=images, return_tensors="pt", do_pad=pad_input).to(torch_device)
@@ -323,7 +323,7 @@ def check_post_processing_test(self, image_processor, images, model, pad_input=T
for img, out, expected_slice in zip(images, outputs, expected_slices):
out = out["predicted_depth"]
self.assertTrue(img.size == out.shape[::-1])
- self.assertTrue(torch.allclose(expected_slice, out[:3, :3], rtol=1e-3))
+ torch.testing.assert_close(expected_slice, out[:3, :3], rtol=1e-3)
self.check_target_size(image_processor, pad_input, images, outputs, raw_outputs, raw_outputs_flipped)
diff --git a/tests/peft_integration/test_peft_integration.py b/tests/peft_integration/test_peft_integration.py
index 6d6330d3d4f6..61b60901ca1a 100644
--- a/tests/peft_integration/test_peft_integration.py
+++ b/tests/peft_integration/test_peft_integration.py
@@ -166,7 +166,7 @@ def test_peft_enable_disable_adapters(self):
peft_logits_enabled = peft_model(dummy_input).logits
- self.assertTrue(torch.allclose(peft_logits, peft_logits_enabled, atol=1e-12, rtol=1e-12))
+ torch.testing.assert_close(peft_logits, peft_logits_enabled, rtol=1e-12, atol=1e-12)
self.assertFalse(torch.allclose(peft_logits_enabled, peft_logits_disabled, atol=1e-12, rtol=1e-12))
def test_peft_add_adapter(self):
diff --git a/tests/quantization/bnb/test_4bit.py b/tests/quantization/bnb/test_4bit.py
index 76094d0fe862..f7e3c8382980 100644
--- a/tests/quantization/bnb/test_4bit.py
+++ b/tests/quantization/bnb/test_4bit.py
@@ -684,7 +684,7 @@ def test_serialization(self, quant_type="nf4", double_quant=True, safe_serializa
encoded_input = tokenizer(self.input_text, return_tensors="pt").to(torch_device)
out_0 = model_0(**encoded_input)
out_1 = model_1(**encoded_input)
- self.assertTrue(torch.allclose(out_0["logits"], out_1["logits"], atol=0.05))
+ torch.testing.assert_close(out_0["logits"], out_1["logits"], rtol=0.05, atol=0.05)
# comparing generate() outputs
encoded_input = tokenizer(self.input_text, return_tensors="pt").to(torch_device)
diff --git a/tests/quantization/ggml/test_ggml.py b/tests/quantization/ggml/test_ggml.py
index 12648a8aac32..e00186618ae6 100644
--- a/tests/quantization/ggml/test_ggml.py
+++ b/tests/quantization/ggml/test_ggml.py
@@ -15,6 +15,8 @@
import tempfile
import unittest
+from parameterized import parameterized
+
from transformers import AddedToken, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers.testing_utils import (
require_gguf,
@@ -23,20 +25,205 @@
slow,
torch_device,
)
-from transformers.utils import is_torch_available
+from transformers.utils import is_gguf_available, is_torch_available
if is_torch_available():
import torch
+if is_gguf_available():
+ from gguf import GGMLQuantizationType as QuantType
+
+
+@require_gguf
+@require_torch_gpu
+@slow
+class GgufQuantizationTests(unittest.TestCase):
+ """
+ Test cases for weights dequantization with GGUF models.
+ Note: The quantization names should keep aligned with `GGMLQuantizationType` in gguf-py:
+ https://github.com/ggerganov/llama.cpp/blob/4b0c638b9a68f577cb2066b638c9f622d91ee661/gguf-py/gguf/constants.py#L1545-L1576
+ So quantization like Q4_K_M or Q4_K_S dshouldn't be added to this tests.
+ """
+
+ example_text = "Hello"
+
+ def run_gguf_model(self, gguf_model_id: str, gguf_filename: str, expected_text: str):
+ tokenizer = AutoTokenizer.from_pretrained(gguf_model_id, gguf_file=gguf_filename)
+ model = AutoModelForCausalLM.from_pretrained(gguf_model_id, gguf_file=gguf_filename).to(torch_device)
+
+ text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
+ out = model.generate(**text, max_new_tokens=10)
+ self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), expected_text)
+
+ @parameterized.expand(
+ [
+ # standard quants
+ ("Q4_0", "Hello, World!\n\nStep 3: Add"),
+ ("Q5_0", "Hello, World!\n\n5. Use a library"),
+ ("Q8_0", "Hello, World!\n\n5. Use a library"),
+ ],
+ )
+ def test_standard_quants(self, quant_type: str, expected_text: str):
+ gguf_model_id = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
+ filename_format = "tinyllama-1.1b-chat-v1.0.{quant_type}.gguf"
+ gguf_filename = filename_format.format(quant_type=quant_type)
+ self.run_gguf_model(gguf_model_id, gguf_filename, expected_text)
+
+ # k-quants
+ @parameterized.expand(
+ [
+ ("Q2_K", "Hello, I'm a 22 year old female"),
+ ("Q3_K", "Hello\n\nI am trying to create a simple program that"),
+ ("Q4_K", "Hello\n\nI am trying to create a simple program that"),
+ ("Q5_K", "Helloveda is a 1999 Indian"),
+ ("Q6_K", "Hello\n\nI am trying to create a simple program that"),
+ ],
+ )
+ def test_k_quants(self, quant_type: str, expected_text: str):
+ gguf_model_id = "legraphista/Qwen2.5-0.5B-Instruct-IMat-GGUF"
+ filename_format = "Qwen2.5-0.5B-Instruct.{quant_type}.gguf"
+ gguf_filename = filename_format.format(quant_type=quant_type)
+ self.run_gguf_model(gguf_model_id, gguf_filename, expected_text)
+
+ @parameterized.expand(
+ [
+ # i-matrix
+ ("IQ1_S", "Hello, I'm a friend of mine, I"),
+ ("IQ1_M", "Hello, I am interested in purching a copy of"),
+ ("IQ2_XXS", "Hello, I'm a software engineer. I'"),
+ ("IQ2_XS", "Hello World!\n\n```\n<|user|"),
+ ("IQ2_S", "Hello World!\n\n```\n<|user|"),
+ ("IQ3_XXS", "Hello, I am interested in your product. Can you"),
+ ("IQ4_XS", "Hello, world!\n\n5. Using a loop"),
+ ("IQ3_S", "Hello, World!\n\n5. Python:\n"),
+ ("IQ4_NL", "Hello, world!\n\n5. Using a loop"),
+ ],
+ )
+ def test_imatrix_quants(self, quant_type: str, expected_text: str):
+ gguf_model_id = "duyntnet/TinyLlama-1.1B-Chat-v1.0-imatrix-GGUF"
+ filename_format = "TinyLlama-1.1B-Chat-v1.0-{quant_type}.gguf"
+ gguf_filename = filename_format.format(quant_type=quant_type)
+ self.run_gguf_model(gguf_model_id, gguf_filename, expected_text)
+
@require_gguf
@require_torch_gpu
@slow
class GgufIntegrationTests(unittest.TestCase):
+ """
+ Test cases for basic interoperability with GGUF models:
+ - Tokenization
+ - Model dtype casting and serialization
+ """
+
+ example_text = "Hello"
original_model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
- model_id = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
- imatrix_model_id = "duyntnet/TinyLlama-1.1B-Chat-v1.0-imatrix-GGUF"
+ gguf_model_id = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
+ gguf_filename = "tinyllama-1.1b-chat-v1.0.{quant_type}.gguf"
+
+ def test_tokenization_xnli(self):
+ import tqdm
+ from datasets import load_dataset
+
+ q8_0_gguf_model_id = self.gguf_filename.format(quant_type=QuantType.Q8_0.name)
+ gguf_tokenizer = AutoTokenizer.from_pretrained(self.gguf_model_id, gguf_file=q8_0_gguf_model_id)
+ original_tokenizer = AutoTokenizer.from_pretrained(self.original_model_id)
+
+ dataset = load_dataset("google/code_x_glue_ct_code_to_text", "go")
+ for item in tqdm.tqdm(dataset["validation"]):
+ string = item["code"]
+ encoded1 = gguf_tokenizer.encode(string)
+ encoded2 = original_tokenizer.encode(string)
+
+ self.assertEqual(encoded1, encoded2)
+
+ decoded1 = gguf_tokenizer.decode(encoded1, skip_special_tokens=True)
+ decoded2 = original_tokenizer.decode(encoded2, skip_special_tokens=True)
+
+ self.assertEqual(decoded1, decoded2)
+
+ dataset = load_dataset("facebook/xnli", "all_languages")
+
+ for i, item in enumerate(tqdm.tqdm(dataset["train"].select(range(100)))):
+ for string in item["premise"].values():
+ encoded1 = gguf_tokenizer.encode(string)
+ encoded2 = original_tokenizer.encode(string)
+
+ self.assertEqual(encoded1, encoded2)
+
+ decoded1 = gguf_tokenizer.decode(encoded1, skip_special_tokens=True)
+ decoded2 = original_tokenizer.decode(encoded2, skip_special_tokens=True)
+
+ self.assertEqual(decoded1, decoded2)
+
+ # With special tokens
+ gguf_tokenizer = AutoTokenizer.from_pretrained(self.gguf_model_id, gguf_file=q8_0_gguf_model_id)
+ original_tokenizer = AutoTokenizer.from_pretrained(self.original_model_id)
+
+ gguf_tokenizer.add_special_tokens(
+ {"additional_special_tokens": [AddedToken("", rstrip=False, lstrip=False)]}
+ )
+ original_tokenizer.add_special_tokens(
+ {"additional_special_tokens": [AddedToken("", rstrip=False, lstrip=False)]}
+ )
+
+ text = "Hello . Hello"
+
+ encoded1 = gguf_tokenizer.encode(text)
+ encoded2 = original_tokenizer.encode(text)
+
+ self.assertEqual(encoded1, encoded2)
+
+ decoded1 = gguf_tokenizer.decode(encoded1, skip_special_tokens=True)
+ decoded2 = original_tokenizer.decode(encoded2, skip_special_tokens=True)
+
+ self.assertEqual(decoded1, decoded2)
+
+ def test_q2_k_serialization(self):
+ q2_k_gguf_model_id = self.gguf_filename.format(quant_type=QuantType.Q2_K.name)
+ EXPECTED_TEXT = "Hello, World!\n\n[10:0"
+
+ tokenizer = AutoTokenizer.from_pretrained(self.gguf_model_id, gguf_file=q2_k_gguf_model_id)
+ model = AutoModelForCausalLM.from_pretrained(self.gguf_model_id, gguf_file=q2_k_gguf_model_id).to(torch_device)
+
+ orig_text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
+ orig_out = model.generate(**orig_text, max_new_tokens=10)
+ self.assertEqual(tokenizer.decode(orig_out[0], skip_special_tokens=True), EXPECTED_TEXT)
+
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ model.save_pretrained(tmpdirname)
+ tokenizer.save_pretrained(tmpdirname)
+
+ model = AutoModelForCausalLM.from_pretrained(tmpdirname).to(torch_device)
+ tokenizer = AutoTokenizer.from_pretrained(tmpdirname)
+
+ text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
+ out = model.generate(**text, max_new_tokens=10)
+
+ self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
+
+ def test_q6_k_fp16(self):
+ q6_k_gguf_model_id = self.gguf_filename.format(quant_type=QuantType.Q6_K.name)
+
+ tokenizer = AutoTokenizer.from_pretrained(self.gguf_model_id, gguf_file=q6_k_gguf_model_id)
+ model = AutoModelForCausalLM.from_pretrained(
+ self.gguf_model_id, gguf_file=q6_k_gguf_model_id, torch_dtype=torch.float16
+ ).to(torch_device)
+
+ self.assertTrue(model.lm_head.weight.dtype == torch.float16)
+
+ text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
+ out = model.generate(**text, max_new_tokens=10)
+
+ EXPECTED_TEXT = "Hello, World!\n\nStep 3: Add"
+ self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
+
+
+@require_gguf
+@require_torch_gpu
+@slow
+class GgufModelTests(unittest.TestCase):
mistral_model_id = "TheBloke/Mistral-7B-Instruct-v0.2-GGUF"
qwen2_model_id = "Qwen/Qwen1.5-0.5B-Chat-GGUF"
qwen2moe_model_id = "gdax/Qwen1.5-MoE-A2.7B_gguf"
@@ -68,34 +255,13 @@ class GgufIntegrationTests(unittest.TestCase):
original_gemma2_model_id = "google/gemma-2-2b-it"
gemma2_model_id = "bartowski/gemma-2-2b-it-GGUF"
- # standard quants
- q4_0_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q4_0.gguf"
- q5_0_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q5_0.gguf"
- q8_0_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q8_0.gguf"
- # k-quants
- q2_k_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q2_K.gguf"
- q3_k_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q3_K_L.gguf"
- q4_k_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
- q5_k_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q5_K_M.gguf"
- q6_k_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q6_K.gguf"
- q4_k_m_stablelm_model_id = "stablelm-3b-4e1t.q4_k_m.gguf"
- # imatrix
- iq1_m_gguf_model_id = "TinyLlama-1.1B-Chat-v1.0-IQ1_M.gguf"
- iq1_s_gguf_model_id = "TinyLlama-1.1B-Chat-v1.0-IQ1_S.gguf"
- iq2_s_gguf_model_id = "TinyLlama-1.1B-Chat-v1.0-IQ2_S.gguf"
- iq2_xs_gguf_model_id = "TinyLlama-1.1B-Chat-v1.0-IQ2_XS.gguf"
- iq2_xxs_gguf_model_id = "TinyLlama-1.1B-Chat-v1.0-IQ2_XXS.gguf"
- iq3_s_gguf_model_id = "TinyLlama-1.1B-Chat-v1.0-IQ3_S.gguf"
- iq3_xxs_gguf_model_id = "TinyLlama-1.1B-Chat-v1.0-IQ3_XXS.gguf"
- iq4_xs_gguf_model_id = "TinyLlama-1.1B-Chat-v1.0-IQ4_XS.gguf"
- iq4_nl_gguf_model_id = "TinyLlama-1.1B-Chat-v1.0-IQ4_NL.gguf"
-
q4_0_phi3_model_id = "Phi-3-mini-4k-instruct-q4.gguf"
q4_0_mistral_model_id = "mistral-7b-instruct-v0.2.Q4_0.gguf"
q4_0_qwen2_model_id = "qwen1_5-0_5b-chat-q4_0.gguf"
q8_qwen2moe_model_id = "Qwen1.5-MoE-A2.7B_Q8_0.gguf"
q4_llama3_model_id = "Meta-Llama-3-8B-Q4_K_M.gguf"
fp16_bloom_model_id = "bloom-560m.fp16.gguf"
+ q4_k_m_stablelm_model_id = "stablelm-3b-4e1t.q4_k_m.gguf"
fp16_stablelm2_model_id = "stablelm-2-1_6b.fp16.gguf"
q8_bloom_model_id = "bloom-560m.q8_0.gguf"
f16_tinyllama_model_id = "TinyLlama-1.1B-Chat-v1.0.FP16.gguf"
@@ -120,237 +286,6 @@ class GgufIntegrationTests(unittest.TestCase):
example_text = "Hello"
- def test_q2_k(self):
- tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q2_k_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q2_k_gguf_model_id).to(torch_device)
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, World!\n\n[10:0"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_q2_k_serialization(self):
- tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q2_k_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q2_k_gguf_model_id).to(torch_device)
-
- with tempfile.TemporaryDirectory() as tmpdirname:
- model.save_pretrained(tmpdirname)
- tokenizer.save_pretrained(tmpdirname)
-
- model = AutoModelForCausalLM.from_pretrained(tmpdirname).to(torch_device)
- tokenizer = AutoTokenizer.from_pretrained(tmpdirname)
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, World!\n\n[10:0"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_q3_k(self):
- tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q3_k_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q3_k_gguf_model_id).to(torch_device)
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, World!\n\n```\n<|user"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_q5_0(self):
- tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q5_0_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q5_0_gguf_model_id).to(torch_device)
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, World!\n\n5. Use a library"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_q5_k(self):
- tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q5_k_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q5_k_gguf_model_id).to(torch_device)
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, World!\n\nStep 3: Add"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_q4_0(self):
- tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q4_0_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q4_0_gguf_model_id).to(torch_device)
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, World!\n\nStep 3: Add"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_q4_k_m(self):
- tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q4_k_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q4_k_gguf_model_id).to(torch_device)
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, World!\n\n5. Python:\n"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_q6_k(self):
- tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q6_k_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q6_k_gguf_model_id).to(torch_device)
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, World!\n\nStep 3: Add"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_q6_k_fp16(self):
- tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q6_k_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(
- self.model_id, gguf_file=self.q6_k_gguf_model_id, torch_dtype=torch.float16
- ).to(torch_device)
-
- self.assertTrue(model.lm_head.weight.dtype == torch.float16)
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, World!\n\nStep 3: Add"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_q8_0(self):
- tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q8_0_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q8_0_gguf_model_id).to(torch_device)
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, World!\n\n5. Use a library"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_iq1_s(self):
- tokenizer = AutoTokenizer.from_pretrained(self.imatrix_model_id, gguf_file=self.iq1_s_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.imatrix_model_id, gguf_file=self.iq1_s_gguf_model_id).to(
- torch_device
- )
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, I'm a friend of mine, I"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_iq1_m(self):
- tokenizer = AutoTokenizer.from_pretrained(self.imatrix_model_id, gguf_file=self.iq1_m_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.imatrix_model_id, gguf_file=self.iq1_m_gguf_model_id).to(
- torch_device
- )
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, I am interested in purching a copy of"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_iq2_s(self):
- tokenizer = AutoTokenizer.from_pretrained(self.imatrix_model_id, gguf_file=self.iq2_s_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.imatrix_model_id, gguf_file=self.iq2_s_gguf_model_id).to(
- torch_device
- )
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello World!\n\n```\n<|user|"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_iq2_xs(self):
- tokenizer = AutoTokenizer.from_pretrained(self.imatrix_model_id, gguf_file=self.iq2_xs_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.imatrix_model_id, gguf_file=self.iq2_xs_gguf_model_id).to(
- torch_device
- )
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello World!\n\n```\n<|user|"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_iq2_xxs(self):
- tokenizer = AutoTokenizer.from_pretrained(self.imatrix_model_id, gguf_file=self.iq2_xxs_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.imatrix_model_id, gguf_file=self.iq2_xxs_gguf_model_id).to(
- torch_device
- )
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, I'm a software engineer. I'"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_iq3_s(self):
- tokenizer = AutoTokenizer.from_pretrained(self.imatrix_model_id, gguf_file=self.iq3_s_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.imatrix_model_id, gguf_file=self.iq3_s_gguf_model_id).to(
- torch_device
- )
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, World!\n\n5. Python:\n"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_iq3_xxs(self):
- tokenizer = AutoTokenizer.from_pretrained(self.imatrix_model_id, gguf_file=self.iq3_xxs_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.imatrix_model_id, gguf_file=self.iq3_xxs_gguf_model_id).to(
- torch_device
- )
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, I am interested in your product. Can you"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_iq4_xs(self):
- tokenizer = AutoTokenizer.from_pretrained(self.imatrix_model_id, gguf_file=self.iq4_xs_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.imatrix_model_id, gguf_file=self.iq4_xs_gguf_model_id).to(
- torch_device
- )
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, world!\n\n5. Using a loop"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_iq4_nl(self):
- tokenizer = AutoTokenizer.from_pretrained(self.imatrix_model_id, gguf_file=self.iq4_nl_gguf_model_id)
- model = AutoModelForCausalLM.from_pretrained(self.imatrix_model_id, gguf_file=self.iq4_nl_gguf_model_id).to(
- torch_device
- )
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, world!\n\n5. Using a loop"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
- def test_f16(self):
- tokenizer = AutoTokenizer.from_pretrained(self.tinyllama_model_id, gguf_file=self.f16_tinyllama_model_id)
- model = AutoModelForCausalLM.from_pretrained(
- self.tinyllama_model_id, gguf_file=self.f16_tinyllama_model_id
- ).to(torch_device)
-
- text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
- out = model.generate(**text, max_new_tokens=10)
-
- EXPECTED_TEXT = "Hello, World!\n\n5. Node.js"
- self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
-
def test_mistral_q4_0(self):
tokenizer = AutoTokenizer.from_pretrained(self.mistral_model_id, gguf_file=self.q4_0_mistral_model_id)
model = AutoModelForCausalLM.from_pretrained(
@@ -784,7 +719,7 @@ def test_mamba_weights_conversion_fp16(self):
if "mixer.A_log" in layer_name:
# we should increase tolerance after exponential reversing
# and performing np.log(-weights) operation as numbers are slightly different
- torch.testing.assert_close(original_params, converted_state_dict[layer_name], atol=1e-3, rtol=1e-3)
+ torch.testing.assert_close(original_params, converted_state_dict[layer_name], rtol=1e-3, atol=1e-3)
else:
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
else:
@@ -904,60 +839,3 @@ def test_gemma2_weights_conversion_fp32(self):
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
else:
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
-
- def test_tokenization_xnli(self):
- import tqdm
- from datasets import load_dataset
-
- gguf_tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q8_0_gguf_model_id)
- original_tokenizer = AutoTokenizer.from_pretrained(self.original_model_id)
-
- dataset = load_dataset("google/code_x_glue_ct_code_to_text", "go")
- for item in tqdm.tqdm(dataset["validation"]):
- string = item["code"]
- encoded1 = gguf_tokenizer.encode(string)
- encoded2 = original_tokenizer.encode(string)
-
- self.assertEqual(encoded1, encoded2)
-
- decoded1 = gguf_tokenizer.decode(encoded1, skip_special_tokens=True)
- decoded2 = original_tokenizer.decode(encoded2, skip_special_tokens=True)
-
- self.assertEqual(decoded1, decoded2)
-
- dataset = load_dataset("facebook/xnli", "all_languages")
-
- for i, item in enumerate(tqdm.tqdm(dataset["train"].select(range(100)))):
- for string in item["premise"].values():
- encoded1 = gguf_tokenizer.encode(string)
- encoded2 = original_tokenizer.encode(string)
-
- self.assertEqual(encoded1, encoded2)
-
- decoded1 = gguf_tokenizer.decode(encoded1, skip_special_tokens=True)
- decoded2 = original_tokenizer.decode(encoded2, skip_special_tokens=True)
-
- self.assertEqual(decoded1, decoded2)
-
- # With special tokens
- gguf_tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q8_0_gguf_model_id)
- original_tokenizer = AutoTokenizer.from_pretrained(self.original_model_id)
-
- gguf_tokenizer.add_special_tokens(
- {"additional_special_tokens": [AddedToken("", rstrip=False, lstrip=False)]}
- )
- original_tokenizer.add_special_tokens(
- {"additional_special_tokens": [AddedToken("", rstrip=False, lstrip=False)]}
- )
-
- text = "Hello . Hello"
-
- encoded1 = gguf_tokenizer.encode(text)
- encoded2 = original_tokenizer.encode(text)
-
- self.assertEqual(encoded1, encoded2)
-
- decoded1 = gguf_tokenizer.decode(encoded1, skip_special_tokens=True)
- decoded2 = original_tokenizer.decode(encoded2, skip_special_tokens=True)
-
- self.assertEqual(decoded1, decoded2)
diff --git a/tests/test_image_processing_common.py b/tests/test_image_processing_common.py
index 1f2d1d0fe7e1..b722624564bf 100644
--- a/tests/test_image_processing_common.py
+++ b/tests/test_image_processing_common.py
@@ -181,10 +181,7 @@ def test_slow_fast_equivalence(self):
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
- self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))
- self.assertLessEqual(
- torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3
- )
+ torch.testing.assert_close(encoding_slow.pixel_values, encoding_fast.pixel_values, rtol=1e-1, atol=1e-2)
@require_vision
@require_torch
@@ -493,7 +490,7 @@ def test_can_compile_fast_image_processor(self):
image_processor = torch.compile(image_processor, mode="reduce-overhead")
output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt")
- self.assertTrue(torch.allclose(output_eager.pixel_values, output_compiled.pixel_values, atol=1e-4))
+ torch.testing.assert_close(output_eager.pixel_values, output_compiled.pixel_values, rtol=1e-4, atol=1e-4)
class AnnotationFormatTestMixin:
@@ -549,7 +546,7 @@ def _compare(a, b) -> None:
for idx in range(len(a)):
_compare(a[idx], b[idx])
elif isinstance(a, torch.Tensor):
- self.assertTrue(torch.allclose(a, b, atol=1e-3))
+ torch.testing.assert_close(a, b, rtol=1e-3, atol=1e-3)
elif isinstance(a, str):
self.assertEqual(a, b)
diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py
index 8a14ba666900..98709ba3b84a 100755
--- a/tests/test_modeling_common.py
+++ b/tests/test_modeling_common.py
@@ -1771,7 +1771,7 @@ def test_feed_forward_chunking(self):
model.eval()
hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0]
- self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3))
+ torch.testing.assert_close(hidden_states_no_chunk, hidden_states_with_chunk, rtol=1e-3, atol=1e-3)
def test_resize_position_vector_embeddings(self):
if not self.test_resize_position_embeddings:
@@ -1898,7 +1898,7 @@ def test_resize_tokens_embeddings(self):
else:
old_embeddings_mean = torch.mean(model_embed.weight.data[:-10, :], axis=0)
new_embeddings_mean = torch.mean(model_embed.weight.data[-10:, :], axis=0)
- torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, atol=1e-3, rtol=1e-1)
+ torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, rtol=1e-3, atol=1e-3)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
if not is_deepspeed_zero3_enabled():
@@ -2006,7 +2006,7 @@ def test_resize_tokens_embeddings(self):
else:
old_embeddings_mean = torch.mean(model_embed.weight.data[:-10, :], axis=0)
new_embeddings_mean = torch.mean(model_embed.weight.data[-10:, :], axis=0)
- torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, atol=1e-3, rtol=1e-1)
+ torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, rtol=1e-3, atol=1e-3)
@require_deepspeed
@require_torch_accelerator
@@ -2081,7 +2081,7 @@ def test_resize_embeddings_untied(self):
else:
old_embeddings_mean = torch.mean(output_embeds.weight.data[:-10, :], axis=0)
new_embeddings_mean = torch.mean(output_embeds.weight.data[-10:, :], axis=0)
- torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, atol=1e-3, rtol=1e-1)
+ torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, rtol=1e-3, atol=1e-3)
# check if the old bias mean close to added bias mean.
if output_embeds.bias is not None:
if is_deepspeed_zero3_enabled():
@@ -2092,7 +2092,7 @@ def test_resize_embeddings_untied(self):
old_bias_mean = torch.mean(output_embeds.bias.data[:-10], axis=0)
new_bias_mean = torch.mean(output_embeds.bias.data[-10:], axis=0)
- torch.testing.assert_close(old_bias_mean, new_bias_mean, atol=1e-5, rtol=1e-2)
+ torch.testing.assert_close(old_bias_mean, new_bias_mean, rtol=1e-5, atol=1e-5)
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
@@ -3049,7 +3049,7 @@ def test_inputs_embeds_matches_input_ids(self):
out_embeds = model(
inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **inputs
)[0]
- self.assertTrue(torch.allclose(out_embeds, out_ids))
+ torch.testing.assert_close(out_embeds, out_ids)
@require_non_xpu
@require_torch_multi_gpu
@@ -3170,10 +3170,10 @@ def cast_to_device(dictionary, device):
for value, parallel_value in zip(output, parallel_output):
if isinstance(value, torch.Tensor):
- self.assertTrue(torch.allclose(value, parallel_value.to("cpu"), atol=1e-7))
+ torch.testing.assert_close(value, parallel_value.to("cpu"), rtol=1e-7, atol=1e-7)
elif isinstance(value, (Tuple, List)):
for value_, parallel_value_ in zip(value, parallel_value):
- self.assertTrue(torch.allclose(value_, parallel_value_.to("cpu"), atol=1e-7))
+ torch.testing.assert_close(value_, parallel_value_.to("cpu"), rtol=1e-7, atol=1e-7)
def check_device_map_is_respected(self, model, device_map):
for param_name, param in model.named_parameters():
@@ -3229,9 +3229,12 @@ def test_disk_offload_bin(self):
new_output = new_model(**inputs_dict_class)
if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple):
- self.assertTrue(torch.allclose(a, b, atol=1e-5) for a, b in zip(base_output[0], new_output[0]))
+ [
+ torch.testing.assert_close(a, b, rtol=1e-5, atol=1e-5)
+ for a, b in zip(base_output[0], new_output[0])
+ ]
else:
- self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
+ torch.testing.assert_close(base_output[0], new_output[0], rtol=1e-5, atol=1e-5)
@require_accelerate
@mark.accelerate_tests
@@ -3264,9 +3267,12 @@ def test_disk_offload_safetensors(self):
new_output = new_model(**inputs_dict_class)
if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple):
- self.assertTrue(torch.allclose(a, b, atol=1e-5) for a, b in zip(base_output[0], new_output[0]))
+ [
+ torch.testing.assert_close(a, b, rtol=1e-5, atol=1e-5)
+ for a, b in zip(base_output[0], new_output[0])
+ ]
else:
- self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
+ torch.testing.assert_close(base_output[0], new_output[0], rtol=1e-5, atol=1e-5)
@require_accelerate
@mark.accelerate_tests
@@ -3303,9 +3309,12 @@ def test_cpu_offload(self):
new_output = new_model(**inputs_dict_class)
if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple):
- self.assertTrue(torch.allclose(a, b, atol=1e-5) for a, b in zip(base_output[0], new_output[0]))
+ [
+ torch.testing.assert_close(a, b, rtol=1e-5, atol=1e-5)
+ for a, b in zip(base_output[0], new_output[0])
+ ]
else:
- self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
+ torch.testing.assert_close(base_output[0], new_output[0], rtol=1e-5, atol=1e-5)
@require_accelerate
@mark.accelerate_tests
@@ -3341,9 +3350,12 @@ def test_model_parallelism(self):
new_output = new_model(**inputs_dict_class)
if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple):
- self.assertTrue(torch.allclose(a, b, atol=1e-5) for a, b in zip(base_output[0], new_output[0]))
+ [
+ torch.testing.assert_close(a, b, rtol=1e-5, atol=1e-5)
+ for a, b in zip(base_output[0], new_output[0])
+ ]
else:
- self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
+ torch.testing.assert_close(base_output[0], new_output[0], rtol=1e-5, atol=1e-5)
def test_problem_types(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
@@ -4555,10 +4567,10 @@ def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
logits_padded = res_padded.logits[inputs_dict["attention_mask"].bool()]
logits_padfree = res_padfree.logits[0]
- torch.testing.assert_close(logits_padded.argmax(-1), logits_padfree.argmax(-1), atol=0, rtol=0)
+ torch.testing.assert_close(logits_padded.argmax(-1), logits_padfree.argmax(-1), rtol=0, atol=0)
# acceptable numerical instability
tol = torch.finfo(torch.float16).eps
- torch.testing.assert_close(logits_padded, logits_padfree, atol=tol, rtol=tol)
+ torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol)
@is_pt_tf_cross_test
def test_tf_from_pt_safetensors(self):
@@ -4780,7 +4792,7 @@ def test_forward_with_logits_to_keep(self):
self.assertEqual(tuple(last_token_logits.shape), (batch_size, 1, vocab_size))
# Assert the last tokens are actually the same (except for the natural fluctuation due to order of FP ops)
- self.assertTrue(torch.allclose(all_logits[:, -1:, :], last_token_logits, atol=1e-5))
+ torch.testing.assert_close(all_logits[:, -1:, :], last_token_logits, rtol=1e-5, atol=1e-5)
@require_torch_gpu
def test_flex_attention_with_grads(self):
diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py
index 7df721b3f3cf..3de94511fb8e 100644
--- a/tests/trainer/test_trainer.py
+++ b/tests/trainer/test_trainer.py
@@ -609,8 +609,8 @@ def check_best_model_has_been_loaded(
state_dict = safetensors.torch.load_file(os.path.join(checkpoint, SAFE_WEIGHTS_NAME))
best_model.load_state_dict(state_dict)
best_model.to(trainer.args.device)
- self.assertTrue(torch.allclose(best_model.a, trainer.model.a))
- self.assertTrue(torch.allclose(best_model.b, trainer.model.b))
+ torch.testing.assert_close(best_model.a, trainer.model.a)
+ torch.testing.assert_close(best_model.b, trainer.model.b)
metrics = trainer.evaluate()
self.assertEqual(metrics[metric], best_value)
@@ -698,8 +698,8 @@ def setUp(self):
def check_trained_model(self, model, alternate_seed=False):
# Checks a training seeded with learning_rate = 0.1
(a, b) = self.alternate_trained_model if alternate_seed else self.default_trained_model
- self.assertTrue(torch.allclose(model.a, a))
- self.assertTrue(torch.allclose(model.b, b))
+ torch.testing.assert_close(model.a, a)
+ torch.testing.assert_close(model.b, b)
def test_reproducible_training(self):
# Checks that training worked, model trained and seed made a reproducible training.
@@ -1567,8 +1567,7 @@ def test_neftune(self):
# Check that we get identical embeddings just in case
emb1 = trainer.model.get_input_embeddings()(dummy_input)
emb2 = trainer.model.get_input_embeddings()(dummy_input)
-
- self.assertTrue(torch.allclose(emb1, emb2), "Neftune noise is still applied!")
+ torch.testing.assert_close(emb1, emb2)
def test_logging_inf_nan_filter(self):
config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4)
diff --git a/tests/trainer/test_trainer_utils.py b/tests/trainer/test_trainer_utils.py
index a730ff07ccb2..4a525d9faa81 100644
--- a/tests/trainer/test_trainer_utils.py
+++ b/tests/trainer/test_trainer_utils.py
@@ -162,7 +162,7 @@ def test_label_smoothing(self):
label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels)
log_probs = -nn.functional.log_softmax(random_logits, dim=-1)
expected_loss = (1 - epsilon) * loss + epsilon * log_probs.mean()
- self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss))
+ torch.testing.assert_close(label_smoothed_loss, expected_loss)
# With a few -100 labels
random_labels[0, 1] = -100
@@ -178,7 +178,7 @@ def test_label_smoothing(self):
log_probs[2, 1] = 0.0
log_probs[2, 3] = 0.0
expected_loss = (1 - epsilon) * loss + epsilon * log_probs.sum() / (num_labels * 17)
- self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss))
+ torch.testing.assert_close(label_smoothed_loss, expected_loss)
def test_group_by_length(self):
# Get some inputs of random lengths
diff --git a/tests/utils/test_activations.py b/tests/utils/test_activations.py
index bc2034187210..e19b575d1523 100644
--- a/tests/utils/test_activations.py
+++ b/tests/utils/test_activations.py
@@ -29,7 +29,7 @@ class TestActivations(unittest.TestCase):
def test_gelu_versions(self):
x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
torch_builtin = get_activation("gelu")
- self.assertTrue(torch.allclose(gelu_python(x), torch_builtin(x)))
+ torch.testing.assert_close(gelu_python(x), torch_builtin(x))
self.assertFalse(torch.allclose(gelu_python(x), gelu_new(x)))
def test_gelu_10(self):
@@ -43,7 +43,7 @@ def test_gelu_10(self):
clipped_mask = torch.where(y_gelu_10 < 10.0, 1, 0)
self.assertTrue(torch.max(y_gelu_10).item() == 10.0)
- self.assertTrue(torch.allclose(y_gelu * clipped_mask, y_gelu_10 * clipped_mask))
+ torch.testing.assert_close(y_gelu * clipped_mask, y_gelu_10 * clipped_mask)
def test_get_activation(self):
get_activation("gelu")
diff --git a/tests/utils/test_modeling_utils.py b/tests/utils/test_modeling_utils.py
index 84b5ebbb24ce..dfc311005096 100644
--- a/tests/utils/test_modeling_utils.py
+++ b/tests/utils/test_modeling_utils.py
@@ -742,14 +742,14 @@ def test_checkpoint_sharding_local_bin(self):
# Finally, check the model can be reloaded
new_model = BertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(model.parameters(), new_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
def test_checkpoint_sharding_from_hub(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded")
# the model above is the same as the model below, just a sharded version.
ref_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
for p1, p2 in zip(model.parameters(), ref_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
def test_checkpoint_variant_local_bin(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
@@ -769,7 +769,7 @@ def test_checkpoint_variant_local_bin(self):
new_model = BertModel.from_pretrained(tmp_dir, variant="v2")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
def test_checkpoint_variant_local_sharded_bin(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
@@ -793,7 +793,7 @@ def test_checkpoint_variant_local_sharded_bin(self):
new_model = BertModel.from_pretrained(tmp_dir, variant="v2")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
@require_safetensors
def test_checkpoint_variant_local_safe(self):
@@ -814,7 +814,7 @@ def test_checkpoint_variant_local_safe(self):
new_model = BertModel.from_pretrained(tmp_dir, variant="v2")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
@require_safetensors
def test_checkpoint_variant_local_sharded_safe(self):
@@ -839,7 +839,7 @@ def test_checkpoint_variant_local_sharded_safe(self):
new_model = BertModel.from_pretrained(tmp_dir, variant="v2")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
def test_checkpoint_loading_only_safetensors_available(self):
# Test that the loading behaviour is as expected when only safetensor checkpoints are available
@@ -872,7 +872,7 @@ def test_checkpoint_loading_only_safetensors_available(self):
new_model = BertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(model.parameters(), new_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
def test_checkpoint_loading_only_pytorch_bin_available(self):
# Test that the loading behaviour is as expected when only pytorch checkpoints are available
@@ -905,7 +905,7 @@ def test_checkpoint_loading_only_pytorch_bin_available(self):
new_model = BertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(model.parameters(), new_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
def test_checkpoint_variant_hub(self):
with tempfile.TemporaryDirectory() as tmp_dir:
@@ -1068,7 +1068,7 @@ def test_from_pretrained_disk_offload_task_model(self):
)
outputs2 = new_model_with_offload(inputs)
- self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu()))
+ torch.testing.assert_close(outputs1.logits.cpu(), outputs2.logits.cpu())
# With state dict temp offload
new_model_with_offload = AutoModelForCausalLM.from_pretrained(
@@ -1078,7 +1078,7 @@ def test_from_pretrained_disk_offload_task_model(self):
offload_state_dict=True,
)
outputs2 = new_model_with_offload(inputs)
- self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu()))
+ torch.testing.assert_close(outputs1.logits.cpu(), outputs2.logits.cpu())
@require_accelerate
@mark.accelerate_tests
@@ -1108,7 +1108,7 @@ def test_from_pretrained_disk_offload_derived_to_base_model(self):
tmp_dir, device_map=device_map, offload_folder=offload_folder
)
outputs2 = base_model_with_offload(inputs)
- self.assertTrue(torch.allclose(outputs1[0].cpu(), outputs2[0].cpu()))
+ torch.testing.assert_close(outputs1[0].cpu(), outputs2[0].cpu())
# With state dict temp offload
new_model_with_offload = AutoModel.from_pretrained(
@@ -1118,7 +1118,7 @@ def test_from_pretrained_disk_offload_derived_to_base_model(self):
offload_state_dict=True,
)
outputs2 = new_model_with_offload(inputs)
- self.assertTrue(torch.allclose(outputs1[0].cpu(), outputs2[0].cpu()))
+ torch.testing.assert_close(outputs1[0].cpu(), outputs2[0].cpu())
@slow
@require_torch
@@ -1169,7 +1169,7 @@ def test_save_model_with_device_map_cpu(self):
saved_model = AutoModelForCausalLM.from_pretrained(tmp_dir, device_map="cpu")
saved_model_output = saved_model(inputs)[0]
- self.assertTrue(torch.allclose(output, saved_model_output))
+ torch.testing.assert_close(output, saved_model_output)
@require_accelerate
@mark.accelerate_tests
@@ -1205,8 +1205,8 @@ def test_save_offloaded_model(self):
saved_model = AutoModelForCausalLM.from_pretrained(tmp_dir, device_map=device_map)
postsaved_output = saved_model(inputs)[0]
- self.assertTrue(torch.allclose(output, presaved_output, atol=1e-4))
- self.assertTrue(torch.allclose(presaved_output, postsaved_output))
+ torch.testing.assert_close(output, presaved_output, rtol=1e-4, atol=1e-4)
+ torch.testing.assert_close(presaved_output, postsaved_output)
@require_safetensors
def test_use_safetensors(self):
@@ -1278,7 +1278,7 @@ def test_safetensors_save_and_load(self):
# Check models are equal
for p1, p2 in zip(model.parameters(), new_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
@require_safetensors
def test_safetensors_load_from_hub(self):
@@ -1287,7 +1287,7 @@ def test_safetensors_load_from_hub(self):
# Check models are equal
for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
@require_safetensors
def test_safetensors_save_and_load_sharded(self):
@@ -1305,7 +1305,7 @@ def test_safetensors_save_and_load_sharded(self):
# Check models are equal
for p1, p2 in zip(model.parameters(), new_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
@require_safetensors
def test_safetensors_load_from_hub_sharded(self):
@@ -1314,7 +1314,7 @@ def test_safetensors_load_from_hub_sharded(self):
# Check models are equal
for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
def test_base_model_to_head_model_load(self):
base_model = BaseModel(PretrainedConfig())
@@ -1324,7 +1324,7 @@ def test_base_model_to_head_model_load(self):
# Can load a base model in a model with head
model = ModelWithHead.from_pretrained(tmp_dir)
for p1, p2 in zip(model.base.parameters(), base_model.parameters()):
- self.assertTrue(torch.allclose(p1, p2))
+ torch.testing.assert_close(p1, p2)
# It doesn't work if the state dict has a mix of keys of the head and base without prefix though.
base_state_dict = base_model.state_dict()
@@ -1615,7 +1615,7 @@ def test_model_from_pretrained_from_mlx(self):
with torch.no_grad():
outputs = model(input_ids)
outputs_from_saved = new_model(input_ids)
- self.assertTrue(torch.allclose(outputs_from_saved["logits"], outputs["logits"]))
+ torch.testing.assert_close(outputs_from_saved["logits"], outputs["logits"])
def test_warning_for_beta_gamma_parameters(self):
class TestGammaBetaNorm(torch.nn.Module):