Skip to content

Commit 3e2ebc3

Browse files
committed
respect kwargs spacing
1 parent 056d558 commit 3e2ebc3

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+0
-1796
lines changed

tests/saving/gpt-oss-merge/test_merged_model.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,6 @@ def safe_remove_directory(path):
2020
return False
2121

2222

23-
pass
24-
2523
print("🔥 Loading the 16-bit merged model from disk...")
2624
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
2725
model_name = "./gpt-oss-finetuned-merged",

tests/saving/gpt-oss-merge/train_and_merge.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,6 @@ def safe_remove_directory(path):
2121
return False
2222

2323

24-
pass
25-
2624
# This tokenizer will be used by the mapping function
2725
tokenizer = None
2826

tests/saving/language_models/test_save_merged_grpo_model.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -495,8 +495,6 @@ def formatting_prompts_func(examples):
495495
"text": texts,
496496
}
497497

498-
pass
499-
500498
limo_train = limo_train.map(
501499
formatting_prompts_func,
502500
batched = True,

tests/saving/text_to_speech_models/test_csm.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,6 @@ def find_lora_base_model(model_to_inspect):
8484
return current
8585

8686

87-
pass
88-
89-
9087
config_model = find_lora_base_model(model) if isinstance(model, PeftModel) else model
9188

9289
assert (

tests/saving/text_to_speech_models/test_lasa.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -91,9 +91,6 @@ def find_lora_base_model(model_to_inspect):
9191
return current
9292

9393

94-
pass
95-
96-
9794
config_model = find_lora_base_model(model) if isinstance(model, PeftModel) else model
9895

9996
assert (

tests/saving/text_to_speech_models/test_orpheus.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -86,9 +86,6 @@ def find_lora_base_model(model_to_inspect):
8686
return current
8787

8888

89-
pass
90-
91-
9289
config_model = find_lora_base_model(model) if isinstance(model, PeftModel) else model
9390

9491
assert (

tests/saving/text_to_speech_models/test_whisper.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,6 @@ def find_lora_base_model(model_to_inspect):
8484
return current
8585

8686

87-
pass
88-
89-
9087
config_model = find_lora_base_model(model) if isinstance(model, PeftModel) else model
9188

9289
assert (

tests/saving/vision_models/test_save_merge_qwen2.5vl32B_model_ocr_benchmark.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -189,8 +189,6 @@ def find_lora_base_model(model_to_inspect):
189189
return current
190190

191191

192-
pass
193-
194192
base = find_lora_base_model(model)
195193

196194
print((base.__class__.__name__))

tests/saving/vision_models/test_save_merge_vision_model_ocr_benchmark.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -189,8 +189,6 @@ def find_lora_base_model(model_to_inspect):
189189
return current
190190

191191

192-
pass
193-
194192
base = find_lora_base_model(model)
195193

196194
print((base.__class__.__name__))

unsloth/__init__.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
f"Please restructure your imports with 'import unsloth' at the top of your file.",
4242
stacklevel = 2,
4343
)
44-
pass
4544

4645
# Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so
4746
# enabling it will require much more work, so we have to prioritize. Please understand!
@@ -68,7 +67,6 @@
6867
)
6968
except Exception as exception:
7069
raise exception
71-
pass
7270

7371
import importlib.util
7472
from pathlib import Path
@@ -97,7 +95,6 @@
9795
raise ImportError(
9896
f"Unsloth: Please install unsloth_zoo via `pip install unsloth_zoo` Also error = {str(e)}"
9997
)
100-
pass
10198

10299
from unsloth_zoo.device_type import (
103100
is_hip,
@@ -147,14 +144,12 @@ def is_bf16_supported():
147144
return SUPPORTS_BFLOAT16
148145

149146
torch.cuda.is_bf16_supported = is_bf16_supported
150-
pass
151147
elif DEVICE_TYPE == "hip":
152148
SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported()
153149
elif DEVICE_TYPE == "xpu":
154150
# torch.xpu.is_bf16_supported() does not have including_emulation
155151
# set SUPPORTS_BFLOAT16 as torch.xpu.is_bf16_supported()
156152
SUPPORTS_BFLOAT16 = torch.xpu.is_bf16_supported()
157-
pass
158153

159154
# For Gradio HF Spaces?
160155
# if "SPACE_AUTHOR_NAME" not in os.environ and "SPACE_REPO_NAME" not in os.environ:
@@ -206,7 +201,6 @@ def is_bf16_supported():
206201
)[::-1][0]
207202
latest_cuda = possible_cudas[latest_cuda]
208203
os.system(f"ldconfig /usr/local/{latest_cuda}")
209-
pass
210204

211205
importlib.reload(bnb)
212206
importlib.reload(triton)
@@ -230,15 +224,13 @@ def is_bf16_supported():
230224
"Also try `sudo ldconfig /usr/local/cuda-xx.x` - find the latest cuda version.\n"
231225
"Unsloth will still run for now, but maybe it might crash - let's hope it works!"
232226
)
233-
pass
234227
elif DEVICE_TYPE == "hip":
235228
# NO-OP for rocm device
236229
pass
237230
elif DEVICE_TYPE == "xpu":
238231
import bitsandbytes as bnb
239232

240233
# TODO: check triton for intel installed properly.
241-
pass
242234

243235
from .models import *
244236
from .models import __version__

0 commit comments

Comments
 (0)