Skip to content

Commit 9c13cb1

Browse files
committed
Update vllm_utils.py
1 parent 33302aa commit 9c13cb1

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

unsloth_zoo/vllm_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1125,13 +1125,13 @@ def convert_vllm_to_huggingface(quant_state_dict, config, dtype = torch.float16,
11251125
kwargs['block_size'] = quantization_config['weight_block_size']
11261126
try:
11271127
from transformers.integrations.finegrained_fp8 import FP8Linear # This has patched forward pass for LoRA and training support. Patched in unsloth/kernels/fp8.py
1128-
except ImportError:
1128+
except:
11291129
raise ImportError("Unsloth: FP8 models need importing FP8Linear from `transformers.integrations.finegrained_fp8` but we don't see it.")
11301130
elif quant_method == 'fbgemm_fp8':
11311131
kwargs['input_scale_ub'] = torch.tensor([quantization_config['activation_scale_ub']], device = get_target_device())
11321132
try:
11331133
from transformers.integrations.fbgemm_fp8 import FbgemmFp8Linear # This has patched forward pass for LoRA and training support
1134-
except ImportError:
1134+
except:
11351135
raise ImportError("Unsloth: FP8 models need importing FbgemmFP8Linear from `transformers.integrations.fbgemm_fp8` but we don't see it.")
11361136
# Get bnb_config flags
11371137
elif bnb_config is not None:

0 commit comments

Comments
 (0)