Skip to content
This repository was archived by the owner on Sep 4, 2025. It is now read-only.

Commit aa7817a

Browse files
russellbgroenenboomj
authored andcommitted
Set weights_only=True when using torch.load() (vllm-project#12366)
Signed-off-by: Russell Bryant <[email protected]>
1 parent a6221a1 commit aa7817a

File tree

4 files changed

+10
-6
lines changed

4 files changed

+10
-6
lines changed

vllm/assets/image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,4 +27,4 @@ def image_embeds(self) -> torch.Tensor:
2727
"""
2828
image_path = get_vllm_public_assets(filename=f"{self.name}.pt",
2929
s3_prefix=VLM_IMAGES_DIR)
30-
return torch.load(image_path, map_location="cpu")
30+
return torch.load(image_path, map_location="cpu", weights_only=True)

vllm/lora/models.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -272,7 +272,8 @@ def from_local_checkpoint(
272272
new_embeddings_tensor_path)
273273
elif os.path.isfile(new_embeddings_bin_file_path):
274274
embeddings = torch.load(new_embeddings_bin_file_path,
275-
map_location=device)
275+
map_location=device,
276+
weights_only=True)
276277

277278
rank = config["r"]
278279
lora_alpha = config["lora_alpha"]

vllm/model_executor/model_loader/weight_utils.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def convert_bin_to_safetensor_file(
8585
pt_filename: str,
8686
sf_filename: str,
8787
) -> None:
88-
loaded = torch.load(pt_filename, map_location="cpu")
88+
loaded = torch.load(pt_filename, map_location="cpu", weights_only=True)
8989
if "state_dict" in loaded:
9090
loaded = loaded["state_dict"]
9191
shared = _shared_pointers(loaded)
@@ -373,7 +373,9 @@ def np_cache_weights_iterator(
373373
disable=not enable_tqdm,
374374
bar_format=_BAR_FORMAT,
375375
):
376-
state = torch.load(bin_file, map_location="cpu")
376+
state = torch.load(bin_file,
377+
map_location="cpu",
378+
weights_only=True)
377379
for name, param in state.items():
378380
param_path = os.path.join(np_folder, name)
379381
with open(param_path, "wb") as f:
@@ -422,7 +424,7 @@ def pt_weights_iterator(
422424
disable=not enable_tqdm,
423425
bar_format=_BAR_FORMAT,
424426
):
425-
state = torch.load(bin_file, map_location="cpu")
427+
state = torch.load(bin_file, map_location="cpu", weights_only=True)
426428
yield from state.items()
427429
del state
428430
torch.cuda.empty_cache()

vllm/prompt_adapter/utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ def load_peft_weights(model_id: str,
8989
adapters_weights = safe_load_file(filename, device=device)
9090
else:
9191
adapters_weights = torch.load(filename,
92-
map_location=torch.device(device))
92+
map_location=torch.device(device),
93+
weights_only=True)
9394

9495
return adapters_weights

0 commit comments

Comments
 (0)