Skip to content

Commit

Permalink
Skip Falcon 7B GGML Test (#35783)
Browse files Browse the repository at this point in the history
skip test
  • Loading branch information
MekkCyber authored Jan 20, 2025
1 parent 6894728 commit b80e334
Showing 1 changed file with 1 addition and 0 deletions.
1 change: 1 addition & 0 deletions tests/quantization/ggml/test_ggml.py
Original file line number Diff line number Diff line change
Expand Up @@ -636,6 +636,7 @@ def test_falcon7b_q2_k(self):
EXPECTED_TEXT = 'Hello,\nI am trying to use the "get_post_meta"'
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)

@unittest.skip("The test causes a torch.OutOfMemoryError on the CI but it passes with enough memory")
def test_falcon7b_weights_conversion_fp16(self):
quantized_model = AutoModelForCausalLM.from_pretrained(
self.falcon7b_model_id_fp16,
Expand Down

0 comments on commit b80e334

Please sign in to comment.