Skip to content

Commit d47b243

Browse files
malay-nagdaAmir Hussein
authored andcommitted
perf scripts 25.07 refactor (NVIDIA-NeMo#13875)
* executor cleanup Signed-off-by: Malay Nagda <[email protected]> * restructure perf configs Signed-off-by: Malay Nagda <[email protected]> * Apply isort and black reformatting Signed-off-by: malay-nagda <[email protected]> * restructured methods Signed-off-by: Malay Nagda <[email protected]> * Apply isort and black reformatting Signed-off-by: malay-nagda <[email protected]> * restructured executor slurm Signed-off-by: Malay Nagda <[email protected]> * Apply isort and black reformatting Signed-off-by: malay-nagda <[email protected]> * cleanup Signed-off-by: Malay Nagda <[email protected]> * gpu arg in executor Signed-off-by: Malay Nagda <[email protected]> * import check Signed-off-by: Malay Nagda <[email protected]> * ub registration Signed-off-by: Malay Nagda <[email protected]> * Apply isort and black reformatting Signed-off-by: malay-nagda <[email protected]> * ub registration Signed-off-by: Malay Nagda <[email protected]> * ub registration Signed-off-by: Malay Nagda <[email protected]> * perf env vars Signed-off-by: Malay Nagda <[email protected]> * Apply isort and black reformatting Signed-off-by: malay-nagda <[email protected]> * log cfgs Signed-off-by: Malay Nagda <[email protected]> * cleanup Signed-off-by: Malay Nagda <[email protected]> * cleanup Signed-off-by: Malay Nagda <[email protected]> --------- Signed-off-by: Malay Nagda <[email protected]> Signed-off-by: malay-nagda <[email protected]> Co-authored-by: malay-nagda <[email protected]> Signed-off-by: Amir Hussein <[email protected]>
1 parent ad0297e commit d47b243

30 files changed

+629
-663
lines changed

scripts/performance/diffusion/pretrain_flux_12b.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,8 @@
2020
from nemo.lightning.run.plugins import NsysPlugin, PerfEnvPlugin
2121

2222
from ..argument_parser import parse_cli_args
23-
from ..utils import (
24-
args_sanity_check,
25-
get_user_configs,
26-
set_exp_logging_configs,
27-
set_primary_perf_configs,
28-
slurm_executor,
29-
)
23+
from ..executors import slurm_executor
24+
from ..helpers import args_sanity_check, get_user_configs, set_exp_logging_configs, set_primary_perf_configs
3025

3126

3227
def override_recipe_configs(
@@ -94,6 +89,7 @@ def override_recipe_configs(
9489
exp_name = f"{splitext(basename(__file__))[0]}_{args.compute_dtype}_{exp_config}"
9590

9691
executor = slurm_executor(
92+
args.gpu.lower(),
9793
args.account,
9894
args.partition,
9995
args.log_dir,

scripts/performance/executors.py

Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import os
16+
import sys
17+
from typing import Dict, List
18+
19+
import nemo_run as run
20+
from nemo_run.config import get_nemorun_home
21+
22+
from nemo.lightning.base import DEFAULT_NEMO_CACHE_HOME
23+
from nemo.utils import logging
24+
25+
DEFAULT_NEMO_HOME = os.getenv('NEMO_HOME', DEFAULT_NEMO_CACHE_HOME)
26+
27+
28+
def slurm_executor(
29+
gpu: str,
30+
account: str,
31+
partition: str,
32+
log_dir: str,
33+
nodes: int,
34+
num_gpus_per_node: int,
35+
time_limit: str = "00:30:00",
36+
container_image: str = "nvcr.io/nvidia/nemo:dev",
37+
custom_mounts: List[str] = [],
38+
custom_env_vars: Dict[str, str] = {},
39+
custom_srun_args: List[str] = [],
40+
hf_token: str = None,
41+
nemo_home: str = DEFAULT_NEMO_HOME,
42+
wandb_key: str = None,
43+
network: str = None,
44+
) -> run.SlurmExecutor:
45+
"""
46+
Slurm cluster definition with appropriate cluster params and NeMo container params needed for pre-training
47+
and fine-tuning experiments
48+
"""
49+
PERF_ENV_VARS = {
50+
"TORCH_NCCL_AVOID_RECORD_STREAMS": "1", # Disable caching NCCL communication buffer memory
51+
"TRANSFORMERS_OFFLINE": "1", # Enable online downloads from HuggingFace
52+
"TOKENIZERS_PARALLELISM": "False", # Restrict warning message prints
53+
"NCCL_NVLS_ENABLE": "0", # Disable NVLink SHARP to save memory
54+
"NVTE_FLASH_ATTN": "1", # Enable Flash Attention, which is needed to enable cuDNN fused attention
55+
"NVTE_FUSED_ATTN": "1", # Enable cuDNN fused attention
56+
"NEMO_LOG_MEMORY_USAGE": "1", # Print memory allocation
57+
}
58+
59+
err_msgs = []
60+
mounts = []
61+
srun_args = custom_srun_args.copy() + ["--mpi=pmix"]
62+
63+
if log_dir != get_nemorun_home():
64+
err_msgs.append(f"\nRun `export NEMORUN_HOME={log_dir}` in your shell environment and rerun this script.")
65+
if len(err_msgs) > 0:
66+
logging.error("\n".join(err_msgs))
67+
sys.exit(1)
68+
69+
if gpu.lower() not in ['b200']:
70+
# TODO: we currently disable PYTORCH_CUDA_ALLOC_CONF="expandable_segments:True"
71+
# on B200 as it causes an unexpected error. Add back when issue is debugged and fixed.
72+
PERF_ENV_VARS["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
73+
PERF_ENV_VARS["NEMORUN_HOME"] = log_dir
74+
if wandb_key is not None:
75+
PERF_ENV_VARS["WANDB_API_KEY"] = wandb_key
76+
77+
if num_gpus_per_node == 4:
78+
PERF_ENV_VARS["NCCL_NET_GDR_LEVEL"] = "PHB" # For NCCL 2.25
79+
PERF_ENV_VARS["NCCL_NET_GDR_C2C"] = 1 # For NCCL 2.26
80+
srun_args.append("numactl --cpunodebind=$((SLURM_LOCALID/2)) --membind=$((SLURM_LOCALID/2))")
81+
else:
82+
srun_args.append("numactl --cpunodebind=$((SLURM_LOCALID/4)) --membind=$((SLURM_LOCALID/4))")
83+
84+
if nemo_home != DEFAULT_NEMO_CACHE_HOME: # DO NOT change this to 'DEFAULT_NEMO_HOME'/'NEMO_HOME'
85+
PERF_ENV_VARS["NEMO_HOME"] = nemo_home
86+
mounts.extend([f"{nemo_home}:{nemo_home}"])
87+
if hf_token is not None:
88+
PERF_ENV_VARS.update({"HF_TOKEN": hf_token, "TRANSFORMERS_OFFLINE": "0"})
89+
90+
PERF_ENV_VARS |= custom_env_vars
91+
mounts.extend(custom_mounts)
92+
93+
# add --segment flag to sbatch if job uses GB200 and goes beyond one rack.
94+
segment = None
95+
if num_gpus_per_node == 4 and nodes > 18:
96+
for segment_candidate in range(18, 0, -1):
97+
if nodes % segment_candidate == 0:
98+
segment = segment_candidate
99+
break
100+
101+
executor = run.SlurmExecutor(
102+
account=account,
103+
partition=partition,
104+
tunnel=run.LocalTunnel(job_dir=os.path.join(log_dir, "experiments")),
105+
nodes=nodes,
106+
ntasks_per_node=num_gpus_per_node,
107+
container_image=container_image,
108+
container_mounts=mounts,
109+
env_vars=PERF_ENV_VARS,
110+
srun_args=srun_args,
111+
time=time_limit,
112+
mem="0",
113+
exclusive=True,
114+
packager=run.GitArchivePackager(),
115+
segment=segment,
116+
network=network,
117+
)
118+
119+
return executor

0 commit comments

Comments
 (0)