Skip to content

Commit 5391be5

Browse files
aliafzalmeta-codesync[bot]
authored andcommitted
Removing unused tests (#3555)
Summary: Pull Request resolved: #3555 These test have been skipped for quite some time now and they don't seem to imapct much. internal torchrec/modules/tests:test_feature_processor_ - torchrec.modules.tests.test_feature_processor_.PositionWeightedModuleTest: test_rematerialize_from_meta (844425044832114) constantly skipping since 2025-05-29, torchrec/modules/tests:test_feature_processor_ - torchrec.modules.tests.test_feature_processor_.PositionWeightedModuleTest: test_rematerialize_from_meta (562950069172161) constantly skipping since 2025-05-29 Reviewed By: kausv Differential Revision: D87352762 fbshipit-source-id: d6f12fa9c9b1e87d099e45bea1b5372a2f589097
1 parent 00f1ab6 commit 5391be5

File tree

1 file changed

+0
-37
lines changed

1 file changed

+0
-37
lines changed

torchrec/modules/tests/test_feature_processor_.py

Lines changed: 0 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -61,23 +61,6 @@ def test_populate_weights(self) -> None:
6161
weighted_features.lengths(), weighted_features_gm_script.lengths()
6262
)
6363

64-
# TODO: this test is not being run
65-
# pyre-ignore
66-
@unittest.skipIf(
67-
torch.cuda.device_count() <= 0,
68-
"Not enough GPUs, this test requires at least one GPU",
69-
)
70-
def test_rematerialize_from_meta(self) -> None:
71-
pw = PositionWeightedModule(max_feature_length=10, device=torch.device("meta"))
72-
self.assertTrue(pw.position_weight.is_meta)
73-
74-
# Re-materialize on cuda
75-
init_parameters(pw, torch.device("cuda"))
76-
self.assertTrue(not pw.position_weight.is_meta)
77-
torch.testing.assert_close(
78-
pw.position_weight, torch.ones_like(pw.position_weight)
79-
)
80-
8164

8265
class PositionWeightedCollectionModuleTest(unittest.TestCase):
8366
def test_populate_weights(self) -> None:
@@ -133,26 +116,6 @@ def test_populate_weights(self) -> None:
133116
empty_fp_kjt.length_per_key(), empty_fp_kjt_gm_script.length_per_key()
134117
)
135118

136-
# TODO: this test is not being run
137-
# pyre-ignore
138-
@unittest.skipIf(
139-
torch.cuda.device_count() <= 0,
140-
"Not enough GPUs, this test requires at least one GPU",
141-
)
142-
def test_rematerialize_from_meta(self) -> None:
143-
pwmc = PositionWeightedModuleCollection(
144-
max_feature_lengths={"f1": 10, "f2": 10},
145-
device=torch.device("meta"),
146-
)
147-
self.assertTrue(all(param.is_meta for param in pwmc.position_weights.values()))
148-
149-
# Re-materialize on cuda
150-
init_parameters(pwmc, torch.device("cuda"))
151-
for key, param in pwmc.position_weights.items():
152-
self.assertTrue(not param.is_meta)
153-
self.assertTrue(pwmc.position_weights_dict[key] is param)
154-
torch.testing.assert_close(param, torch.ones_like(param))
155-
156119
# pyre-ignore
157120
@unittest.skipIf(
158121
torch.cuda.device_count() <= 0,

0 commit comments

Comments
 (0)