Skip to content
Merged

Nightly #3169

Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
c39f56f
Fix mamba
danielhanchen Aug 9, 2025
4bd35c5
Update loader.py
danielhanchen Aug 9, 2025
1f0a4c3
Update vision.py
danielhanchen Aug 9, 2025
3cb9719
Update loader.py
danielhanchen Aug 9, 2025
a4081af
Merge branch 'main' into nightly
danielhanchen Aug 13, 2025
1432eac
Filter vLLM standby logs (#3131)
Datta0 Aug 13, 2025
fd1124a
Update loader.py
danielhanchen Aug 13, 2025
b78189b
Add scaler
danielhanchen Aug 13, 2025
cd2e284
Update llama.py
danielhanchen Aug 13, 2025
5e976a5
Update _utils.py
danielhanchen Aug 13, 2025
f451adf
Versioning
danielhanchen Aug 13, 2025
dafc7b8
Merge branch 'main' into nightly
danielhanchen Aug 13, 2025
bf5c402
Merge branch 'main' into nightly
danielhanchen Aug 13, 2025
3b82c42
GPT OSS fix
danielhanchen Aug 14, 2025
61366ef
GPT OSS fix
danielhanchen Aug 14, 2025
de043d9
Update loader.py
danielhanchen Aug 14, 2025
c1ef6f1
Update vision.py
danielhanchen Aug 14, 2025
f18cd26
Update vision.py
danielhanchen Aug 14, 2025
0215224
Update loader.py
danielhanchen Aug 14, 2025
5ed4a46
Update vision.py
danielhanchen Aug 15, 2025
e2ebb99
Merge branch 'main' into nightly
danielhanchen Aug 15, 2025
a222558
Update vision.py
danielhanchen Aug 15, 2025
cdcfe7d
Merge branch 'main' into nightly
danielhanchen Aug 15, 2025
6cffb1c
Update llama.py
danielhanchen Aug 15, 2025
15d33a5
Update llama.py
danielhanchen Aug 15, 2025
95a4daf
Update llama.py
danielhanchen Aug 15, 2025
4104bba
Versioning
danielhanchen Aug 15, 2025
8cc1999
Update mapper.py
danielhanchen Aug 15, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ triton = [
]

huggingface = [
"unsloth_zoo>=2025.8.4",
"unsloth_zoo>=2025.8.5",
"packaging",
"tyro",
"transformers>=4.51.3,!=4.47.0,!=4.52.0,!=4.52.1,!=4.52.2,!=4.52.3,!=4.53.0",
Expand Down Expand Up @@ -384,7 +384,7 @@ colab-ampere-torch220 = [
"flash-attn>=2.6.3",
]
colab-new = [
"unsloth_zoo>=2025.8.4",
"unsloth_zoo>=2025.8.5",
"packaging",
"tyro",
"transformers>=4.51.3,!=4.47.0,!=4.52.0,!=4.52.1,!=4.52.2,!=4.52.3,!=4.53.0",
Expand Down
2 changes: 1 addition & 1 deletion unsloth/models/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

__version__ = "2025.8.5"
__version__ = "2025.8.6"

__all__ = [
"SUPPORTS_BFLOAT16",
Expand Down
3 changes: 2 additions & 1 deletion unsloth/models/llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -701,8 +701,9 @@ def LlamaModel_fast_forward(
# Fix out of bounds tokenization
if hasattr(self, "max_seq_length"):
if seq_length > self.max_seq_length:
shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
logger.warning_once(
f"Unsloth: Input IDs of length {seq_length} > the model's max sequence length of {self.max_seq_length}.\n"\
f"Unsloth: Input IDs of shape {shape} with length {seq_length} > the model's max sequence length of {self.max_seq_length}.\n"\
"We shall truncate it ourselves. It's imperative if you correct this issue first."
)
if input_ids is not None:
Expand Down
10 changes: 10 additions & 0 deletions unsloth/models/mapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -941,6 +941,16 @@
"Qwen/Qwen3-4B-Thinking-2507",
"unsloth/Qwen3-4B-Thinking-2507-bnb-4bit",
),
"unsloth/gemma-3-270m-it-unsloth-bnb-4bit" : (
"unsloth/gemma-3-270m-it",
"google/gemma-3-270m-it",
"unsloth/gemma-3-270m-it-bnb-4bit",
),
"unsloth/gemma-3-270m-unsloth-bnb-4bit" : (
"unsloth/gemma-3-270m",
"google/gemma-3-270m",
"unsloth/gemma-3-270m-bnb-4bit",
),
}

INT_TO_FLOAT_MAPPER = {}
Expand Down