From dd8086cbfc4bbfe8c3a39a7cc86d17907c9b1d74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Tue, 6 May 2025 08:56:18 +0200 Subject: [PATCH 1/2] set yarn metadata if present --- convert_hf_to_gguf.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index a47d7df6fd3..b288ae26e31 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2761,6 +2761,11 @@ def set_gguf_parameters(self): if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None: self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size) logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}") + if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: + if self.hparams["rope_scaling"].get("type") == "yarn": + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) + self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) + self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"]) _experts: list[dict[str, Tensor]] | None = None From de1583b105c0cb9b027a582283a377f02c92bf9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Tue, 6 May 2025 11:06:06 +0200 Subject: [PATCH 2/2] add comment about enabling YaRN Co-authored-by: Xuan-Son Nguyen --- convert_hf_to_gguf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index b288ae26e31..de6d55cb082 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2761,6 +2761,8 @@ def set_gguf_parameters(self): if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None: self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size) logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}") + # YaRN is not enabled by default + # To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: if self.hparams["rope_scaling"].get("type") == "yarn": self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)