Skip to content

Commit 37b1e06

Browse files
committed
Nit
1 parent 46b803e commit 37b1e06

File tree

2 files changed

+2
-12
lines changed

2 files changed

+2
-12
lines changed

src/peft/tuners/lora/model.py

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -261,16 +261,9 @@ def _create_and_replace(
261261
# adding an additional adapter: it is not automatically trainable
262262
new_module.requires_grad_(False)
263263

264-
self._replace_module(
265-
parent=parent,
266-
child_name=target_name,
267-
new_module=new_module,
268-
child=target,
269-
is_tied=is_tied,
270-
adapter_name=adapter_name,
271-
)
264+
self._replace_module(parent=parent, child_name=target_name, new_module=new_module, child=target)
272265

273-
def _replace_module(self, parent, child_name, new_module, child, is_tied, adapter_name):
266+
def _replace_module(self, parent, child_name, new_module, child):
274267
# override in LoraModel to handle quantized weights properly
275268

276269
setattr(parent, child_name, new_module)

tests/test_initialization.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4837,9 +4837,6 @@ def prepare_inputs_for_generation(self):
48374837
def get_input_embeddings(self):
48384838
return self.model.embed_tokens
48394839

4840-
def get_output_embeddings(self):
4841-
return self.lm_head
4842-
48434840
return CausalLM().eval().to(self.torch_device)
48444841

48454842
@pytest.mark.parametrize("layer", ["lm_head", "embed_tokens", ["lm_head", "embed_tokens"]])

0 commit comments

Comments
 (0)