Skip to content

Commit 1e14449

Browse files
zhaochenyang20zhaochen20
authored andcommitted
[fsdp, sglang] fix: Using Agreesive Empty Cache instead (volcengine#3136)
### What does this PR do? > Add **concise** overview of what this PR aims to achieve or accomplish. Reference related GitHub issues and PRs that help with the review. ### Checklist Before Starting - [ ] Search for similar PRs. Paste at least one query link here: ... - [ ] Format the PR title as `[{modules}] {type}: {description}` (This will be checked by the CI) - `{modules}` include `fsdp`, `megatron`, `sglang`, `vllm`, `rollout`, `trainer`, `ci`, `training_utils`, `recipe`, `hardware`, `deployment`, `ray`, `worker`, `single_controller`, `misc`, `perf`, `model`, `algo`, `env`, `tool`, `ckpt`, `doc`, `data` - If this PR involves multiple modules, separate them with `,` like `[megatron, fsdp, doc]` - `{type}` is in `feat`, `fix`, `refactor`, `chore`, `test` - If this PR breaks any API (CLI arguments, config, function signature, etc.), add `[BREAKING]` to the beginning of the title. - Example: `[BREAKING][fsdp, megatron] feat: dynamic batching` ### Test > For changes that can not be tested by CI (e.g., algorithm implementation, new model support), validate by experiment(s) and show results like training curve plots, evaluation results, etc. ### API and Usage Example > Demonstrate how the API changes if any, and provide usage example(s) if possible. ```python # Add code snippet or script demonstrating how to use this ``` ### Design & Code Changes > Demonstrate the high-level design if this PR is complex, and list the specific changes. ### Checklist Before Submitting > [!IMPORTANT] > Please check all the following items before requesting a review, otherwise the reviewer might deprioritize this PR for review. - [ ] Read the [Contribute Guide](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md). - [ ] Apply [pre-commit checks](https://github.com/volcengine/verl/blob/main/CONTRIBUTING.md#code-linting-and-formatting): `pre-commit install && pre-commit run --all-files --show-diff-on-failure --color=always` - [ ] Add / Update [the documentation](https://github.com/volcengine/verl/tree/main/docs). - [ ] Add unit or end-to-end test(s) to [the CI workflow](https://github.com/volcengine/verl/tree/main/.github/workflows) to cover all the code. If not feasible, explain why: ... - [ ] Once your PR is ready for CI, send a message in [the `ci-request` channel](https://verl-project.slack.com/archives/C091TCESWB1) in [the `verl` Slack workspace](https://join.slack.com/t/verl-project/shared_invite/zt-3855yhg8g-CTkqXu~hKojPCmo7k_yXTQ). (If not accessible, please try [the Feishu group (飞书群)](https://applink.larkoffice.com/client/chat/chatter/add_by_link?link_token=772jd4f1-cd91-441e-a820-498c6614126a).) --------- Co-authored-by: zhaochenyang20 <[email protected]>
1 parent 01e4dea commit 1e14449

File tree

4 files changed

+19
-5
lines changed

4 files changed

+19
-5
lines changed

examples/grpo_trainer/run_qwen2_5_vl-7b-sglang.sh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,10 @@ python3 -m verl.trainer.main_ppo \
4141
trainer.logger='["console","wandb"]' \
4242
trainer.project_name='verl_grpo_example_geo3k' \
4343
trainer.experiment_name='qwen2_5_vl_7b_function_rm' \
44+
actor_rollout_ref.ref.strategy=fsdp2 \
45+
actor_rollout_ref.actor.strategy=fsdp2 \
46+
critic.strategy=fsdp2 \
47+
reward_model.strategy=fsdp2 \
4448
trainer.n_gpus_per_node=8 \
4549
trainer.nnodes=1 \
4650
trainer.save_freq=20 \

examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_multiturn.sh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,10 @@ python3 -m verl.trainer.main_ppo \
5555
trainer.save_freq=-1 \
5656
trainer.test_freq=20 \
5757
trainer.val_before_train=True \
58+
actor_rollout_ref.ref.strategy=fsdp2 \
59+
actor_rollout_ref.actor.strategy=fsdp2 \
60+
critic.strategy=fsdp2 \
61+
reward_model.strategy=fsdp2 \
5862
data.train_files=$HOME/data/gsm8k/train.parquet \
5963
data.val_files=$HOME/data/gsm8k/test.parquet \
6064
actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \

verl/workers/sharding_manager/fsdp_sglang.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
from verl.protocol import all_gather_data_proto
2929
from verl.utils.device import get_device_id, get_torch_device, set_expandable_segments
3030
from verl.utils.fsdp_utils import fsdp_version, load_fsdp_model_to_gpu, offload_fsdp_model_to_cpu
31+
from verl.utils.memory_utils import aggressive_empty_cache
3132
from verl.utils.model import convert_weight_keys
3233
from verl.utils.profiler import GPUMemoryLogger, log_gpu_memory_usage, simple_timer
3334
from verl.utils.torch_functional import check_device_is_available
@@ -124,7 +125,7 @@ async def release_memory(self):
124125

125126
@GPUMemoryLogger(role="FSDPSGLangShardingManager enter", logger=logger)
126127
async def wake_up(self):
127-
get_torch_device().empty_cache()
128+
aggressive_empty_cache(force_sync=True)
128129

129130
log_gpu_memory_usage("Before state_dict() in sharding manager memory", logger=logger)
130131
if self.offload_param:
@@ -146,6 +147,8 @@ async def wake_up(self):
146147

147148
# sglang need to set _set_allocator_settings to False
148149
logger.debug("fsdp sglang sharding_manager _set_allocator_settings to False")
150+
# Note(chenyang): SGLang is using torch memory pool to manage memory
151+
# which is incompatible with expandable segments
149152
set_expandable_segments(False)
150153

151154
if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.rollout_config.free_cache_engine:
@@ -161,7 +164,7 @@ async def wake_up(self):
161164
log_gpu_memory_usage("After sync model weights in sharding manager", logger=logger)
162165

163166
del params
164-
get_torch_device().empty_cache()
167+
aggressive_empty_cache(force_sync=True)
165168
log_gpu_memory_usage("After del state_dict and empty_cache in sharding manager", logger=logger)
166169

167170
if (
@@ -187,7 +190,7 @@ async def sleep(self):
187190
self.module.train()
188191

189192
# add empty cache after each compute
190-
get_torch_device().empty_cache()
193+
aggressive_empty_cache(force_sync=True)
191194

192195
# always set _set_allocator_settings to True when using sglang
193196
# it is required by fsdp2 to avoid oom

verl/workers/sharding_manager/megatron_sglang.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
offload_megatron_model_to_cpu,
3535
per_tensor_generator,
3636
)
37+
from verl.utils.memory_utils import aggressive_empty_cache
3738
from verl.utils.profiler import GPUMemoryLogger, log_gpu_memory_usage, simple_timer
3839
from verl.workers.rollout.sglang_rollout.utils import get_named_tensor_buckets
3940

@@ -163,6 +164,8 @@ async def release_memory(self):
163164

164165
@GPUMemoryLogger(role="MegatronSGLangShardingManager enter", logger=logger)
165166
async def wake_up(self):
167+
aggressive_empty_cache(force_sync=True)
168+
166169
if self.offload_param:
167170
load_megatron_model_to_gpu(self.actor_module, load_grad=False)
168171
if self.bridge is not None:
@@ -178,7 +181,7 @@ async def wake_up(self):
178181
await self.update_weights(per_tensor_param)
179182
if self.offload_param:
180183
offload_megatron_model_to_cpu(self.actor_module)
181-
get_torch_device().empty_cache()
184+
aggressive_empty_cache(force_sync=True)
182185
# important: need to manually set the random states of each tp to be identical.
183186
if self.device_mesh is not None:
184187
self.torch_random_states = get_torch_device().get_rng_state()
@@ -194,7 +197,7 @@ async def sleep(self):
194197
for model in self.actor_module:
195198
model.train()
196199
# add empty cache after each compute
197-
get_torch_device().empty_cache()
200+
aggressive_empty_cache(force_sync=True)
198201

199202
# restore random states
200203
if self.device_mesh is not None:

0 commit comments

Comments
 (0)