From 430d5ea74d91774e0366ede292e5c7f0702f4f43 Mon Sep 17 00:00:00 2001 From: Alexander Matveev Date: Wed, 24 Jul 2024 13:44:01 +0000 Subject: [PATCH 1/2] fix awq_marlin and gptq_marlin flags --- vllm/model_executor/layers/quantization/awq_marlin.py | 3 ++- vllm/model_executor/layers/quantization/gptq_marlin.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/layers/quantization/awq_marlin.py b/vllm/model_executor/layers/quantization/awq_marlin.py index 092f87b623e7..8bd7a0a12135 100644 --- a/vllm/model_executor/layers/quantization/awq_marlin.py +++ b/vllm/model_executor/layers/quantization/awq_marlin.py @@ -69,7 +69,8 @@ def from_config(cls, config: Dict[str, Any]) -> "AWQMarlinConfig": def override_quantization_method(cls, hf_quant_cfg, user_quant) -> Optional[str]: can_convert = cls.is_awq_marlin_compatible(hf_quant_cfg) - is_valid_user_quant = (user_quant is None or user_quant == "marlin") + is_valid_user_quant = (user_quant is None or user_quant == "marlin" + or user_quant == "awq_marlin") if can_convert and is_valid_user_quant: msg = ("The model is convertible to {} during runtime." diff --git a/vllm/model_executor/layers/quantization/gptq_marlin.py b/vllm/model_executor/layers/quantization/gptq_marlin.py index 5b4d614ae2e7..bdcc9c3b4f0c 100644 --- a/vllm/model_executor/layers/quantization/gptq_marlin.py +++ b/vllm/model_executor/layers/quantization/gptq_marlin.py @@ -79,7 +79,8 @@ def override_quantization_method(cls, hf_quant_cfg, user_quant) -> Optional[str]: can_convert = cls.is_gptq_marlin_compatible(hf_quant_cfg) - is_valid_user_quant = (user_quant is None or user_quant == "marlin") + is_valid_user_quant = (user_quant is None or user_quant == "marlin" + or user_quant == "gptq_marlin") if can_convert and is_valid_user_quant: msg = ("The model is convertible to {} during runtime." From 4cb2a1780d97e9ad28d2b714813b0c57512805f5 Mon Sep 17 00:00:00 2001 From: Alexander Matveev Date: Wed, 24 Jul 2024 18:47:04 +0000 Subject: [PATCH 2/2] ping --- vllm/model_executor/layers/quantization/awq_marlin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/layers/quantization/awq_marlin.py b/vllm/model_executor/layers/quantization/awq_marlin.py index 8bd7a0a12135..5ffbb8e854e8 100644 --- a/vllm/model_executor/layers/quantization/awq_marlin.py +++ b/vllm/model_executor/layers/quantization/awq_marlin.py @@ -25,7 +25,7 @@ class AWQMarlinConfig(QuantizationConfig): def __init__(self, weight_bits: int, group_size: int, has_zp: bool, lm_head_quantized: bool) -> None: self.weight_bits = weight_bits - self.pack_factor = 32 // self.weight_bits # packed into int32 + self.pack_factor = 32 // self.weight_bits # packed into 32bits self.group_size = group_size self.has_zp = has_zp self.lm_head_quantized = lm_head_quantized