|
| 1 | +from unsloth import FastLanguageModel |
| 2 | +from unsloth.chat_templates import get_chat_template |
| 3 | +from trl import SFTTrainer, SFTConfig |
| 4 | +from transformers import DataCollatorForSeq2Seq, TrainingArguments |
| 5 | +from datasets import load_dataset |
| 6 | +import torch |
| 7 | +import sys |
| 8 | +from pathlib import Path |
| 9 | + |
| 10 | +REPO_ROOT = Path(__file__).parents[3] |
| 11 | +sys.path.insert(0, str(REPO_ROOT)) |
| 12 | + |
| 13 | +from tests.utils.cleanup_utils import safe_remove_directory |
| 14 | + |
| 15 | +def formatting_prompts_func(examples): |
| 16 | + convos = examples["messages"] |
| 17 | + texts = [tokenizer.apply_chat_template(convo, tokenize=False, add_generation_prompt=False) for convo in convos] |
| 18 | + return {"text": texts} |
| 19 | + |
| 20 | +print(f"\n{'='*80}") |
| 21 | +print("🔍 PHASE 1: Loading Base Model and Initial Training") |
| 22 | +print(f"{'='*80}") |
| 23 | + |
| 24 | +if torch.cuda.is_bf16_supported(): |
| 25 | + compute_dtype = torch.bfloat16 |
| 26 | + attn_implementation = 'flash_attention_2' |
| 27 | +else: |
| 28 | + compute_dtype = torch.float16 |
| 29 | + attn_implementation = 'sdpa' |
| 30 | + |
| 31 | +model, tokenizer = FastLanguageModel.from_pretrained( |
| 32 | + model_name="unsloth/Llama-3.1-8B-Instruct", |
| 33 | + max_seq_length=2048, |
| 34 | + dtype=compute_dtype, |
| 35 | + load_in_4bit=True, |
| 36 | + load_in_8bit=False, |
| 37 | + full_finetuning=False, |
| 38 | + attn_implementation=attn_implementation |
| 39 | +) |
| 40 | + |
| 41 | +tokenizer = get_chat_template( |
| 42 | + tokenizer, |
| 43 | + chat_template="llama-3.1", |
| 44 | +) |
| 45 | + |
| 46 | +# Load small dataset for quick training |
| 47 | +dataset_train = load_dataset("allenai/openassistant-guanaco-reformatted", split="train[:100]") |
| 48 | +dataset_train = dataset_train.map(formatting_prompts_func, batched=True) |
| 49 | + |
| 50 | +print("✅ Base model loaded successfully!") |
| 51 | + |
| 52 | +print(f"\n{'='*80}") |
| 53 | +print("🔍 PHASE 2: First Fine-tuning") |
| 54 | +print(f"{'='*80}") |
| 55 | + |
| 56 | +model = FastLanguageModel.get_peft_model( |
| 57 | + model, |
| 58 | + r=16, |
| 59 | + target_modules=['k_proj', 'q_proj', 'v_proj', 'o_proj', "gate_proj", "down_proj", "up_proj"], |
| 60 | + lora_alpha=16, |
| 61 | + lora_dropout=0, |
| 62 | + bias="none", |
| 63 | + use_gradient_checkpointing="unsloth", |
| 64 | + random_state=3407, |
| 65 | + use_rslora=False, |
| 66 | + loftq_config=None, |
| 67 | +) |
| 68 | + |
| 69 | +from unsloth import is_bfloat16_supported |
| 70 | + |
| 71 | +trainer = SFTTrainer( |
| 72 | + model=model, |
| 73 | + tokenizer=tokenizer, |
| 74 | + train_dataset=dataset_train, |
| 75 | + dataset_text_field="text", |
| 76 | + max_seq_length=2048, |
| 77 | + data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer), |
| 78 | + dataset_num_proc=2, |
| 79 | + packing=False, |
| 80 | + args=TrainingArguments( |
| 81 | + per_device_train_batch_size=2, |
| 82 | + gradient_accumulation_steps=4, |
| 83 | + warmup_ratio=0.1, |
| 84 | + max_steps=10, # Very short training for test |
| 85 | + learning_rate=2e-4, |
| 86 | + fp16=not is_bfloat16_supported(), |
| 87 | + bf16=is_bfloat16_supported(), |
| 88 | + logging_steps=5, |
| 89 | + optim="adamw_8bit", |
| 90 | + lr_scheduler_type="linear", |
| 91 | + seed=3407, |
| 92 | + output_dir="outputs", |
| 93 | + report_to="none", |
| 94 | + ), |
| 95 | +) |
| 96 | + |
| 97 | +trainer_stats = trainer.train() |
| 98 | +print("✅ First fine-tuning completed!") |
| 99 | + |
| 100 | +print(f"\n{'='*80}") |
| 101 | +print("🔍 PHASE 3: Save with Forced 4bit Merge") |
| 102 | +print(f"{'='*80}") |
| 103 | + |
| 104 | +model.save_pretrained_merged( |
| 105 | + save_directory='./test_4bit_model', |
| 106 | + tokenizer=tokenizer, |
| 107 | + save_method="forced_merged_4bit" |
| 108 | +) |
| 109 | + |
| 110 | +print("✅ Model saved with forced 4bit merge!") |
| 111 | + |
| 112 | +print(f"\n{'='*80}") |
| 113 | +print("🔍 PHASE 4: Loading 4bit Model and Second Fine-tuning") |
| 114 | +print(f"{'='*80}") |
| 115 | + |
| 116 | +# Clean up first model |
| 117 | +del model |
| 118 | +del tokenizer |
| 119 | +torch.cuda.empty_cache() |
| 120 | + |
| 121 | +# Load the 4bit merged model |
| 122 | +model_4bit, tokenizer_4bit = FastLanguageModel.from_pretrained( |
| 123 | + model_name="./test_4bit_model", |
| 124 | + max_seq_length=2048, |
| 125 | + load_in_4bit=True, |
| 126 | + load_in_8bit=False, |
| 127 | +) |
| 128 | + |
| 129 | +tokenizer_4bit = get_chat_template( |
| 130 | + tokenizer_4bit, |
| 131 | + chat_template="llama-3.1", |
| 132 | +) |
| 133 | + |
| 134 | +print("✅ 4bit model loaded successfully!") |
| 135 | + |
| 136 | +# Add LoRA adapters to the 4bit model |
| 137 | +model_4bit = FastLanguageModel.get_peft_model( |
| 138 | + model_4bit, |
| 139 | + r=16, |
| 140 | + target_modules=['k_proj', 'q_proj', 'v_proj', 'o_proj', "gate_proj", "down_proj", "up_proj"], |
| 141 | + lora_alpha=16, |
| 142 | + lora_dropout=0, |
| 143 | + bias="none", |
| 144 | + use_gradient_checkpointing="unsloth", |
| 145 | + random_state=3407, |
| 146 | + use_rslora=False, |
| 147 | + loftq_config=None, |
| 148 | +) |
| 149 | + |
| 150 | +# Second fine-tuning |
| 151 | +trainer_4bit = SFTTrainer( |
| 152 | + model=model_4bit, |
| 153 | + tokenizer=tokenizer_4bit, |
| 154 | + train_dataset=dataset_train, |
| 155 | + dataset_text_field="text", |
| 156 | + max_seq_length=2048, |
| 157 | + data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer_4bit), |
| 158 | + dataset_num_proc=2, |
| 159 | + packing=False, |
| 160 | + args=TrainingArguments( |
| 161 | + per_device_train_batch_size=2, |
| 162 | + gradient_accumulation_steps=4, |
| 163 | + warmup_ratio=0.1, |
| 164 | + max_steps=10, # Very short training for test |
| 165 | + learning_rate=2e-4, |
| 166 | + fp16=not is_bfloat16_supported(), |
| 167 | + bf16=is_bfloat16_supported(), |
| 168 | + logging_steps=5, |
| 169 | + optim="adamw_8bit", |
| 170 | + lr_scheduler_type="linear", |
| 171 | + seed=3407, |
| 172 | + output_dir="outputs_4bit", |
| 173 | + report_to="none", |
| 174 | + ), |
| 175 | +) |
| 176 | + |
| 177 | +trainer_4bit.train() |
| 178 | +print("✅ Second fine-tuning on 4bit model completed!") |
| 179 | + |
| 180 | +print(f"\n{'='*80}") |
| 181 | +print("🔍 PHASE 5: Testing TypeError on Regular Merge (Should Fail)") |
| 182 | +print(f"{'='*80}") |
| 183 | + |
| 184 | +try: |
| 185 | + model_4bit.save_pretrained_merged( |
| 186 | + save_directory='./test_should_fail', |
| 187 | + tokenizer=tokenizer_4bit |
| 188 | + # No save_method specified, should default to regular merge |
| 189 | + ) |
| 190 | + assert False, "Expected TypeError but merge succeeded!" |
| 191 | +except TypeError as e: |
| 192 | + expected_error = "Base model should be a 16bits or mxfp4 base model for a 16bit model merge. Use `save_method=forced_merged_4bit` instead" |
| 193 | + assert expected_error in str(e), f"Unexpected error message: {str(e)}" |
| 194 | + print("✅ Correct TypeError raised for 4bit base model regular merge attempt!") |
| 195 | + print(f"Error message: {str(e)}") |
| 196 | + |
| 197 | +print(f"\n{'='*80}") |
| 198 | +print("🔍 PHASE 6: Successful Save with Forced 4bit Method") |
| 199 | +print(f"{'='*80}") |
| 200 | + |
| 201 | +try: |
| 202 | + model_4bit.save_pretrained_merged( |
| 203 | + save_directory='./test_4bit_second', |
| 204 | + tokenizer=tokenizer_4bit, |
| 205 | + save_method="forced_merged_4bit" |
| 206 | + ) |
| 207 | + print("✅ Successfully saved 4bit model with forced 4bit method!") |
| 208 | +except Exception as e: |
| 209 | + assert False, f"Phase 6 failed unexpectedly: {e}" |
| 210 | + |
| 211 | +print(f"\n{'='*80}") |
| 212 | +print("🔍 CLEANUP") |
| 213 | +print(f"{'='*80}") |
| 214 | + |
| 215 | +# Cleanup |
| 216 | +safe_remove_directory("./outputs") |
| 217 | +safe_remove_directory("./outputs_4bit") |
| 218 | +safe_remove_directory("./unsloth_compiled_cache") |
| 219 | +safe_remove_directory("./test_4bit_model") |
| 220 | +safe_remove_directory("./test_4bit_second") |
| 221 | +safe_remove_directory("./test_should_fail") |
| 222 | + |
| 223 | +print("✅ All tests passed successfully!") |
0 commit comments