From b8c3607a8adedf4ed6bc151c1561411aa568f59b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roberto=20Tom=C3=A1s=20Collins?= Date: Fri, 6 Dec 2024 18:39:08 -0500 Subject: [PATCH 1/3] add 128k yarn context for Qwen --- convert_hf_to_gguf.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 9f1419e29eb4e..d7a94ac1d9164 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1989,6 +1989,15 @@ def set_vocab(self): except FileNotFoundError: self._set_vocab_gpt2() + def set_gguf_parameters(self): + super().set_gguf_parameters() + if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: + if self.hparams["rope_scaling"].get("type") == "yarn": + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) + self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) + self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"]) + self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1) + @Model.register("Qwen2MoeForCausalLM") class Qwen2MoeModel(Model): From b1fdc8c460eb5289eed41a3879e17141a92ce4af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roberto=20Tom=C3=A1s=20Collins?= Date: Fri, 6 Dec 2024 21:06:33 -0500 Subject: [PATCH 2/3] added property for model tensors --- gguf-py/gguf/constants.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 703199fcb3f68..f0b893cf7e7dd 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -761,6 +761,7 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.OUTPUT_NORM, MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, MODEL_TENSOR.ATTN_NORM, MODEL_TENSOR.ATTN_Q, MODEL_TENSOR.ATTN_K, From 575f266167333a5ce892b7ebd9ecf8679284a11e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roberto=20Tom=C3=A1s=20Collins?= Date: Sat, 7 Dec 2024 15:12:55 -0500 Subject: [PATCH 3/3] removing useless line --- convert_hf_to_gguf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index d7a94ac1d9164..10b586d6785ab 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1996,7 +1996,6 @@ def set_gguf_parameters(self): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"]) - self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1) @Model.register("Qwen2MoeForCausalLM")