Skip to content

Commit b786dad

Browse files
committed
del ut
Signed-off-by: shen-shanshan <[email protected]>
1 parent 880c092 commit b786dad

File tree

1 file changed

+2
-11
lines changed

1 file changed

+2
-11
lines changed

tests/ut/models/test_qwen2_5_vl.py

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@
88
from tests.ut.base import PytestBase
99
from vllm_ascend.models.qwen2_5_vl import (
1010
AscendQwen2_5_VisionAttention, AscendQwen2_5_VisionBlock,
11-
AscendQwen2_5_VisionPatchEmbed, AscendQwen2_5_VisionRotaryEmbedding,
12-
AscendQwen2_5_VisionTransformer, AscendQwen2_5_VLForConditionalGeneration)
11+
AscendQwen2_5_VisionRotaryEmbedding, AscendQwen2_5_VisionTransformer,
12+
AscendQwen2_5_VLForConditionalGeneration)
1313

1414

1515
class TestAscendQwen2_5_VisionAttention(PytestBase):
@@ -215,15 +215,6 @@ def test_vision_block_forward(self, mocker: MockerFixture):
215215
assert torch.all(x * 3 == output)
216216

217217

218-
class TestAscendQwen2_5_VisionPatchEmbed(PytestBase):
219-
220-
def test_forward(self):
221-
patch_embed = AscendQwen2_5_VisionPatchEmbed()
222-
223-
ret = patch_embed(torch.rand((120, 1176)))
224-
assert ret.shape == (120, 1152)
225-
226-
227218
class TestAscendQwen2_5_VisionRotaryEmbedding(PytestBase):
228219

229220
def init_rotary_embedding(

0 commit comments

Comments
 (0)