Skip to content

Commit f718fc4

Browse files
committed
Bug fixes
1 parent 1c48211 commit f718fc4

File tree

5 files changed

+43
-34
lines changed

5 files changed

+43
-34
lines changed

unsloth/__init__.py

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -57,25 +57,14 @@
5757
# Log Unsloth is being used
5858
os.environ["UNSLOTH_IS_PRESENT"] = "1"
5959

60-
# Try importing PyTorch and check version
61-
try:
62-
import torch
63-
except ModuleNotFoundError:
64-
raise ImportError(
65-
"Unsloth: Pytorch is not installed. Go to https://pytorch.org/.\n"\
66-
"We have some installation instructions on our Github page."
67-
)
68-
except Exception as exception:
69-
raise exception
70-
pass
71-
7260
import importlib.util
7361
from pathlib import Path
7462
from importlib.metadata import version as importlib_version
63+
from importlib.metadata import PackageNotFoundError
7564
# Check for unsloth_zoo
7665
try:
7766
unsloth_zoo_version = importlib_version("unsloth_zoo")
78-
if Version(unsloth_zoo_version) < Version("2025.10.12"):
67+
if Version(unsloth_zoo_version) < Version("2025.10.13"):
7968
print(
8069
"Unsloth: Please update Unsloth and Unsloth-Zoo to the latest version!\n"\
8170
"Do this via `pip install --upgrade --force-reinstall --no-cache-dir --no-deps unsloth unsloth_zoo`"
@@ -89,10 +78,22 @@
8978
# except:
9079
# raise ImportError("Unsloth: Please update unsloth_zoo via `pip install --upgrade --no-cache-dir --no-deps unsloth_zoo`")
9180
import unsloth_zoo
92-
except NotImplementedError as e:
93-
raise NotImplementedError(str(e))
94-
except Exception as e:
95-
raise ImportError(f"Unsloth: Please install unsloth_zoo via `pip install unsloth_zoo` Also error = {str(e)}")
81+
except PackageNotFoundError:
82+
raise ImportError(f"Unsloth: Please install unsloth_zoo via `pip install unsloth_zoo` then retry!")
83+
except:
84+
raise
85+
pass
86+
87+
# Try importing PyTorch and check version
88+
try:
89+
import torch
90+
except ModuleNotFoundError:
91+
raise ImportError(
92+
"Unsloth: Pytorch is not installed. Go to https://pytorch.org/.\n"\
93+
"We have some installation instructions on our Github page."
94+
)
95+
except Exception as exception:
96+
raise exception
9697
pass
9798

9899
from unsloth_zoo.device_type import (

unsloth/import_fixes.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def ignore_logger_messages():
137137
pass
138138

139139
def patch_ipykernel_hf_xet():
140-
# HF-XET == 1.1.10 and ipykernel == 7.0.0 causes issues
140+
# HF-XET == 1.1.10 and ipykernel == 7.0.0 / 7.0.1 causes issues
141141
# See https://github.com/huggingface/xet-core/issues/526
142142
# 2025-10-13T20:37:33.028737Z ERROR Python exception updating progress:, error: PyErr { type: <class 'LookupError'>, value: LookupError(<ContextVar name='shell_parent' at 0x7535b4cebd80>), traceback: Some(<traceback object at 0x753408489f40>) }, caller: "src/progress_update.rs:313"
143143
# at /home/runner/work/xet-core/xet-core/error_printer/src/lib.rs:28
@@ -150,12 +150,11 @@ def patch_ipykernel_hf_xet():
150150
Version(importlib_version("hf_xet")) == Version("1.1.10")
151151
) and (
152152
(ipykernel_version == Version("7.0.0")) or \
153-
(ipykernel_version == Version("7.0.1")) or \ # 7.0.1 seems to also break with LookupError: <ContextVar name='shell_parent' at 0x7a9775143ec0>
154-
(ipykernel_version >= Version("7.0.2"))
153+
(ipykernel_version == Version("7.0.1")) # 7.0.1 seems to also break with LookupError: <ContextVar name='shell_parent' at 0x7a9775143ec0>
155154
):
156155
print(
157-
"#### Unsloth: `hf_xet==1.1.10` and `ipykernel>=7.0.0` breaks progress bars. Using ASCII progress bars.\n"\
158-
"#### Unsloth: To re-enable progress bars, please downgrade to `ipykernel<7.0.0` or wait for a fix to\n"\
156+
"#### Unsloth: `hf_xet==1.1.10` and `ipykernel==7.0.0` or `ipykernel==7.0.1` breaks progress bars. Using ASCII progress bars.\n"\
157+
"#### Unsloth: To re-enable progress bars, please upgrade to `ipykernel>=7.1.0` or wait for a fix to\n"\
159158
"https://github.com/huggingface/xet-core/issues/526"
160159
)
161160
# from huggingface_hub.utils import disable_progress_bars
@@ -168,7 +167,6 @@ def patch_ipykernel_hf_xet():
168167
_tauto.trange = _tstd.trange
169168
_tnb.tqdm = _tstd.tqdm
170169
_tnb.trange = _tstd.trange
171-
pass
172170
pass
173171

174172
def patch_trackio():

unsloth/models/_utils.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
__version__ = "2025.10.11"
15+
__version__ = "2025.10.12"
1616

1717
__all__ = [
1818
"SUPPORTS_BFLOAT16",
@@ -975,12 +975,12 @@ def try_vllm_check():
975975
from huggingface_hub import snapshot_download
976976
from unsloth_zoo.rl_environments import execute_with_time_limit
977977
if has_internet():
978-
@execute_with_time_limit(120)
979978
def stats_check():
980979
with tempfile.TemporaryDirectory(ignore_cleanup_errors = True) as f:
981980
snapshot_download(f"unslothai/{statistics}", force_download = True, cache_dir = f, local_dir = f)
981+
time_limited_stats_check = execute_with_time_limit(120)(stats_check)
982982
try:
983-
stats_check()
983+
time_limited_stats_check()
984984
except TimeoutError:
985985
raise TimeoutError(
986986
"Unsloth: HuggingFace seems to be down after trying for 120 seconds :(\n"\
@@ -993,6 +993,9 @@ def stats_check():
993993
"model = FastLanguageModel.from_pretrained('unsloth/gpt-oss-20b')\n"\
994994
"```"
995995
)
996+
except:
997+
# Try no time limit check
998+
stats_check()
996999
pass
9971000
pass
9981001
pass

unsloth/models/loader.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ def from_pretrained(
133133
revision = None,
134134
use_exact_model_name = False,
135135
offload_embedding = False,
136+
float32_mixed_precision = None, # Forces float32 mixed precision
136137

137138
fast_inference = False, # uses vLLM
138139
gpu_memory_utilization = 0.5,
@@ -172,7 +173,7 @@ def from_pretrained(
172173
fullgraph = True, # No graph breaks
173174
use_exact_model_name = use_exact_model_name,
174175
offload_embedding = offload_embedding,
175-
176+
float32_mixed_precision = float32_mixed_precision,
176177
# Pass vLLM/inference parameters
177178
fast_inference = fast_inference,
178179
gpu_memory_utilization = gpu_memory_utilization,
@@ -449,7 +450,7 @@ def from_pretrained(
449450
fullgraph = True, # No graph breaks
450451
use_exact_model_name = use_exact_model_name,
451452
offload_embedding = offload_embedding,
452-
453+
float32_mixed_precision = float32_mixed_precision,
453454
# Pass vLLM/inference parameters
454455
fast_inference = fast_inference,
455456
gpu_memory_utilization = gpu_memory_utilization,
@@ -594,7 +595,7 @@ def from_pretrained(
594595
whisper_task = None,
595596
unsloth_force_compile = False,
596597
offload_embedding = False,
597-
598+
float32_mixed_precision = None, # Forces float32 mixed precision
598599
# Add the missing vLLM/inference parameters
599600
fast_inference = False, # uses vLLM
600601
gpu_memory_utilization = 0.5,
@@ -1008,7 +1009,7 @@ def from_pretrained(
10081009
whisper_task = whisper_task,
10091010
auto_config = model_config,
10101011
offload_embedding = offload_embedding,
1011-
1012+
float32_mixed_precision = float32_mixed_precision,
10121013
# Pass vLLM/inference parameters
10131014
fast_inference = fast_inference,
10141015
gpu_memory_utilization = gpu_memory_utilization,

unsloth/models/vision.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -316,6 +316,7 @@ def from_pretrained(
316316
whisper_task = None,
317317
auto_config = None,
318318
offload_embedding = False,
319+
float32_mixed_precision = None, # Forces float32 mixed precision
319320
# vLLM parameters
320321
fast_inference = False,
321322
gpu_memory_utilization = 0.5,
@@ -780,6 +781,7 @@ def from_pretrained(
780781
trust_remote_code = trust_remote_code,
781782
model_type = model_type_arch,
782783
tokenizer = tokenizer,
784+
float32_mixed_precision = float32_mixed_precision,
783785
)
784786
# Clear deleted GPU items
785787
for _ in range(3):
@@ -940,13 +942,17 @@ def post_patch_model(
940942
trust_remote_code = False,
941943
model_type = None,
942944
tokenizer = None,
945+
float32_mixed_precision = None,
943946
):
944947
full_finetuning = os.environ.get("UNSLOTH_ENABLE_FULL_FINETUNING", "0") == "1"
945948

946-
float32_mixed_precision = True
947-
if _get_dtype(dtype_from_config(model.config)) == torch.bfloat16 and full_finetuning:
948-
# Use bfloat16 precision for full finetuning
949-
float32_mixed_precision = False
949+
if type(float32_mixed_precision) is bool:
950+
# Respect whatever it was set before
951+
else:
952+
float32_mixed_precision = True
953+
if _get_dtype(dtype_from_config(model.config)) == torch.bfloat16 and full_finetuning:
954+
# Use bfloat16 precision for full finetuning
955+
float32_mixed_precision = False
950956

951957
model = prepare_model_for_training(
952958
model,

0 commit comments

Comments
 (0)