Skip to content

Commit 09fdf27

Browse files
committed
Implement huggingface checkpoint loading
1 parent a1b067e commit 09fdf27

File tree

1 file changed

+195
-14
lines changed

1 file changed

+195
-14
lines changed

examples/pre-training/ernie/pretrain.py

Lines changed: 195 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,10 @@
3535
PdArgumentParser,
3636
get_last_checkpoint,
3737
)
38+
from paddleformers.trainer.unified_checkpoint import unified_checkpoint
39+
from paddleformers.transformers.model_utils import unwrap_model
40+
41+
from safetensors import safe_open
3842

3943
try:
4044
from paddleformers.utils.downloader import get_static_model_on_pdc
@@ -202,6 +206,191 @@ def _collate_data(data, stack_fn=Stack()):
202206
return train_dataset, valid_dataset, test_dataset, _collate_data
203207

204208

209+
def load_huggingface_checkpoint(model, args):
210+
fused_rms_norm_replace = [
211+
("self_attn.fused_rms_norm_linear.rms_norm_weight", "input_layernorm.weight"),
212+
("self_attn.fused_rms_norm_linear.linear_weight", "self_attn.qkv_proj.weight"),
213+
]
214+
shared_layers_prefix = "shared_layers.embed_weight_share."
215+
unnamed_layers = ["ernie.norm.weight", "lm_head.weight"]
216+
217+
logger.info(f"Loading huggingface checkpoint from {args.model_name_or_path}")
218+
with open(
219+
os.path.join(args.model_name_or_path, "model.safetensors.index.json")
220+
) as f:
221+
weight_map = json.load(f)["weight_map"]
222+
223+
ep_degree = fleet.get_hybrid_communicate_group().get_expert_parallel_world_size()
224+
ep_rank = fleet.get_hybrid_communicate_group().get_expert_parallel_rank()
225+
expert_offset = (model.config.moe_num_experts // ep_degree) * ep_rank
226+
227+
def param_to_weight(name):
228+
# for PP=1, we only need to substitute the fused_rms_norm and expert_id
229+
for src, dst in fused_rms_norm_replace:
230+
name = name.replace(src, dst)
231+
if m := re.search(r"mlp\.experts\.(\d+)", name):
232+
expert_id = expert_offset + int(m.group(1))
233+
s, e = m.span()
234+
name = name[:s] + f"mlp.experts.{expert_id}" + name[e:]
235+
if isinstance(model, ErnieMoEForCausalLM):
236+
return name
237+
238+
# for PP>1, we also need to handle special layers and adjust layer_idx
239+
if name.startswith(shared_layers_prefix):
240+
return "ernie." + name[len(shared_layers_prefix) :]
241+
layer_idx, stem = name.split(".", maxsplit=1)
242+
if stem == "weight":
243+
return unnamed_layers.pop(0)
244+
if stem.startswith("mtp"):
245+
return f"ernie.{stem}"
246+
return f"ernie.layers.{int(layer_idx) - 1}.{stem}"
247+
248+
def try_torch_format(weight_key):
249+
if weight_key.startswith("ernie."):
250+
weight_key = "model." + weight_key[6:]
251+
252+
key_decompose = [weight_key]
253+
if ".up_gate_proj." in weight_key:
254+
key_decompose = [
255+
weight_key.replace(".up_gate_proj.", ".gate_proj."),
256+
weight_key.replace(".up_gate_proj.", ".up_proj."),
257+
]
258+
elif ".qkv_proj." in weight_key:
259+
key_decompose = [
260+
weight_key.replace(".qkv_proj.", ".q_proj."),
261+
weight_key.replace(".qkv_proj.", ".k_proj."),
262+
weight_key.replace(".qkv_proj.", ".v_proj."),
263+
]
264+
265+
tensor_decompose = []
266+
for key in key_decompose:
267+
if not (weight_file := weight_map.get(key)):
268+
return None
269+
with safe_open(
270+
os.path.join(args.model_name_or_path, weight_file),
271+
framework="numpy",
272+
) as f:
273+
tensor = paddle.to_tensor(f.get_tensor(key))
274+
if "_proj." in key or ".gate." in key:
275+
tensor = tensor.T.contiguous()
276+
tensor_decompose.append(tensor)
277+
278+
if len(tensor_decompose) == 1:
279+
return tensor_decompose[0]
280+
else:
281+
return paddle.concat(tensor_decompose, axis=-1)
282+
283+
def auto_fix_shape(param, weight):
284+
assert len(param.shape) == len(weight.shape), "rank not match"
285+
if (
286+
len(param.shape) == 2
287+
and param.shape[0] == weight.shape[1]
288+
and param.shape[1] == weight.shape[0]
289+
):
290+
return weight.T.contiguous()
291+
assert all(
292+
p_dim <= w_dim for p_dim, w_dim in zip(param.shape, weight.shape)
293+
), "weight too small"
294+
indices = tuple(slice(0, dim) for dim in param.shape)
295+
return weight[indices].contiguous()
296+
297+
for name, param in model.named_parameters():
298+
weight_key = param_to_weight(name)
299+
if weight_file := weight_map.get(weight_key):
300+
with safe_open(
301+
os.path.join(args.model_name_or_path, weight_file),
302+
framework="numpy",
303+
) as f:
304+
weight = paddle.to_tensor(f.get_tensor(weight_key))
305+
elif (weight := try_torch_format(weight_key)) is None:
306+
logger.warning(
307+
f"param `{name}`'s weight `{weight_key}` not found. "
308+
"Skip initializing."
309+
)
310+
continue
311+
if param.shape != weight.shape:
312+
logger.warning(
313+
f"param `{name}`'s shape doesn't match weight `{weight_key}`: "
314+
f"{param.shape} and {weight.shape}. Auto fixing."
315+
)
316+
weight = auto_fix_shape(param, weight)
317+
param.copy_(weight)
318+
319+
320+
def get_expected_state_dict(model, **kwargs):
321+
fused_rms_norm_replace = [
322+
("self_attn.fused_rms_norm_linear.rms_norm_weight", "input_layernorm.weight"),
323+
("self_attn.fused_rms_norm_linear.linear_weight", "self_attn.qkv_proj.weight"),
324+
]
325+
shared_layers_prefix = "shared_layers.embed_weight_share."
326+
unnamed_layers = ["ernie.norm.weight", "lm_head.weight"]
327+
328+
model = unwrap_model(model)
329+
hcg = fleet.get_hybrid_communicate_group()
330+
ep_degree = hcg.get_expert_parallel_world_size()
331+
ep_rank = hcg.get_expert_parallel_rank()
332+
expert_offset = (model.config.moe_num_experts // ep_degree) * ep_rank
333+
334+
if model.config.head_dim is None:
335+
head_dim = model.config.hidden_size // model.config.num_attention_heads
336+
else:
337+
head_dim = model.config.head_dim
338+
q_dim = head_dim * model.config.num_attention_heads
339+
kv_dim = head_dim * model.config.num_key_value_heads
340+
341+
def copy_attr(out, param):
342+
if hasattr(param, "is_distributed"):
343+
out.is_distributed = param.is_distributed
344+
if hasattr(param, "no_sync"):
345+
out.no_sync = param.no_sync
346+
return out
347+
348+
def param_to_weight(name):
349+
# for PP=1, we only need to substitute the fused_rms_norm and expert_id
350+
for src, dst in fused_rms_norm_replace:
351+
name = name.replace(src, dst)
352+
if m := re.search(r"\.experts\.(\d+)\.", name):
353+
expert_id = expert_offset + int(m.group(1))
354+
s, e = m.span()
355+
name = name[:s] + f".experts.{expert_id}." + name[e:]
356+
if isinstance(model, ErnieMoEForCausalLM):
357+
return name
358+
359+
# for PP>1, we also need to handle special layers and adjust layer_idx
360+
if name.startswith(shared_layers_prefix):
361+
return "ernie." + name[len(shared_layers_prefix) :]
362+
layer_idx, stem = name.split(".", maxsplit=1)
363+
if stem == "weight":
364+
return unnamed_layers.pop(0)
365+
if stem.startswith("mtp"):
366+
return f"ernie.{stem}"
367+
return f"ernie.layers.{int(layer_idx) - 1}.{stem}"
368+
369+
state_dict = {}
370+
for name, param in model.state_dict().items():
371+
name = param_to_weight(name)
372+
if name.startswith("ernie."):
373+
name = "model." + name[6:]
374+
375+
if "_proj." in name or ".gate." in name:
376+
param = copy_attr(param.T, param)
377+
378+
if ".up_gate_proj." in name:
379+
gate, up = param.split(2)
380+
gate, up = copy_attr(gate, param), copy_attr(up, param)
381+
state_dict[name.replace(".up_gate_proj.", ".gate_proj.")] = gate
382+
state_dict[name.replace(".up_gate_proj.", ".up_proj.")] = up
383+
elif ".qkv_proj." in name:
384+
assert q_dim + kv_dim * 2 == param.shape[0]
385+
state_dict[name.replace(".qkv_proj.", ".q_proj.")] = param[:q_dim]
386+
state_dict[name.replace(".qkv_proj.", ".k_proj.")] = param[q_dim:-kv_dim]
387+
state_dict[name.replace(".qkv_proj.", ".v_proj.")] = param[-kv_dim:]
388+
else:
389+
state_dict[name] = param
390+
391+
return state_dict
392+
393+
205394
def main():
206395
if set_affinity is not None:
207396
set_affinity_code = set_affinity()
@@ -520,21 +709,12 @@ def sname_to_tname(pp_model):
520709
cfg.enable_delay_scale_loss = args.enable_delay_scale_loss
521710
register_pp_reshard_information(cfg.num_hidden_layers)
522711

523-
if args.from_scratch:
524-
model = ErnieMoEForCausalLMPipe(cfg)
525-
else:
526-
model = ErnieMoEForCausalLMPipe.from_pretrained(
527-
args.model_name_or_path,
528-
config=cfg,
529-
)
712+
model = ErnieMoEForCausalLMPipe(cfg)
530713
else:
531-
if args.from_scratch:
532-
model = ErnieMoEForCausalLM(cfg)
533-
else:
534-
model = ErnieMoEForCausalLM.from_pretrained(
535-
args.model_name_or_path,
536-
config=cfg,
537-
)
714+
model = ErnieMoEForCausalLM(cfg)
715+
716+
if not args.from_scratch:
717+
load_huggingface_checkpoint(model, args)
538718

539719
cfg = model.config
540720
logger.info(f"using model type:{type(model)}")
@@ -581,6 +761,7 @@ def sname_to_tname(pp_model):
581761
if args.do_train:
582762
train_result = trainer.train(resume_from_checkpoint=checkpoint)
583763
metrics = train_result.metrics
764+
unified_checkpoint.get_expected_state_dict = get_expected_state_dict
584765
trainer.save_model(args.output_dir)
585766
trainer.log_metrics("train", metrics)
586767
trainer.save_metrics("train", metrics)

0 commit comments

Comments
 (0)