Skip to content
Closed
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions src/frontends/pytorch/src/op/pad.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,14 @@ OutputVector translate_reflection_pad_nd_fx(const NodeContext& context) {
return translate_pad_common(context, data, paddings, pad_value, "reflect");
}

OutputVector translate_replicate_pad_nd_fx(const NodeContext & context) {
num_inputs_check(context, 2, 2);
auto data = context.get_input(0);
auto paddings = context.const_input<std::vector<int64_t>>(1);
Output<Node> pad_value = context.mark_node(v0::Constant::create(element::f32, Shape{}, {0}));
return translate_pad_common(context, data, paddings, pad_value, "replicate");
}

} // namespace op
} // namespace pytorch
} // namespace frontend
Expand Down
8 changes: 8 additions & 0 deletions src/frontends/pytorch/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,8 @@ OP_CONVERTER(translate_new_zeros_fx);
OP_CONVERTER(translate_ones_fx);
OP_CONVERTER(translate_ones_like_fx);
OP_CONVERTER(translate_reflection_pad_nd_fx);
OP_CONVERTER(translate_replicate_pad_nd_fx);
OP_CONVERTER(translate_reshape_fx);
OP_CONVERTER(translate_repeat_fx);
OP_CONVERTER(translate_rsub_fx);
OP_CONVERTER(translate_scalar_tensor_fx);
Expand Down Expand Up @@ -626,6 +628,9 @@ const std::unordered_map<std::string, CreatorFunction> get_supported_ops_ts() {
{"aten::remainder", op::translate_remainder},
{"aten::repeat", op::translate_1to1_match_2_inputs<opset10::Tile>},
{"aten::repeat_interleave", op::translate_repeat_interleave},
{"aten::replicate_pad1d", op::translate_replicate_pad_nd_fx},
{"aten::replicate_pad2d", op::translate_replicate_pad_nd_fx},
{"aten::replicate_pad3d", op::translate_replicate_pad_nd_fx},
{"aten::reshape", op::translate_reshape},
{"aten::reshape_as", op::translate_reshape_as},
// TO DO: enable behaviour for resolve_conj and resolve_neg complex tensors,
Expand Down Expand Up @@ -945,6 +950,9 @@ const std::unordered_map<std::string, CreatorFunction> get_supported_ops_fx() {
{"aten.reflection_pad1d.default", op::translate_reflection_pad_nd_fx},
{"aten.reflection_pad2d.default", op::translate_reflection_pad_nd_fx},
{"aten.reflection_pad3d.default", op::translate_reflection_pad_nd_fx},
{"aten.replicate_pad1d.default", op::translate_replicate_pad_nd_fx},
{"aten.replicate_pad2d.default", op::translate_replicate_pad_nd_fx},
{"aten.replicate_pad3d.default", op::translate_replicate_pad_nd_fx},
{"aten.relu.default", op::translate_1to1_match_1_inputs<opset10::Relu>},
{"aten.relu_.default", op::inplace_op<op::translate_1to1_match_1_inputs<opset10::Relu>>},
{"aten.repeat.default", op::translate_repeat_fx},
Expand Down
122 changes: 120 additions & 2 deletions tests/layer_tests/pytorch_tests/test_pad.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,9 +219,9 @@ def __init__(self, pads):
if ndim == 1:
self.pad = torch.nn.ReflectionPad1d(pads)
elif ndim == 2:
self.pad = torch.nn.ReflectionPad1d(pads)
self.pad = torch.nn.ReflectionPad2d(pads)
elif ndim == 3:
self.pad = torch.nn.ReflectionPad1d(pads)
self.pad = torch.nn.ReflectionPad3d(pads)
else:
raise Exception("Unsupported pads")

Expand All @@ -244,3 +244,121 @@ def test_reflection_padnd(self, pads, dtype, ie_device, precision, ir_version):
print(ndim)
self._test(*self.create_model(pads), ie_device, precision, ir_version,
kwargs_to_prepare_input={"ndim": ndim, "dtype": dtype})

class TestReplicatePad1D(PytorchLayerTest):
def _prepare_input(self, ndim=4, dtype="float32"):
import numpy as np
input_5d_shape = [5,9,1,1,2,4]
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

please remove input_5d_shape and ndim from here and add normal input_shape to @pytest.mark.parametrize.
The same comment for other tests

Copy link
Contributor Author

@11happy 11happy Feb 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done,
please suggest the input_shapes to keep, currently kept random from my end,
thank you

return (np.random.randn(*input_5d_shape[:ndim]).astype(dtype),)

def create_model(self, pads):
import torch
import torch.nn.functional as F

class aten_pad(torch.nn.Module):
def __init__(self, pads):
super().__init__()
self.pad = torch.nn.ReplicationPad1d(pads)

def forward(self, x):
return self.pad(x)

return aten_pad(pads), None, "aten::pad"

@pytest.mark.parametrize("dtype", ["float32", "float64", "int32"])
@pytest.mark.parametrize("pads", [
1,
2,
3,
(1, 2),
(2, 1),
(2, 3),
(3, 4),
])

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
def test_replicate_padnd(self, pads, dtype, ie_device, precision, ir_version):
ndim = 3
self._test(*self.create_model(pads), ie_device, precision, ir_version,
kwargs_to_prepare_input={"ndim": ndim, "dtype": dtype})

class TestReplicatePad2D(PytorchLayerTest):
def _prepare_input(self, ndim=4, dtype="float32"):
import numpy as np
input_5d_shape = [5,9,1,1,2,4]
return (np.random.randn(*input_5d_shape[:ndim]).astype(dtype),)

def create_model(self, pads):
import torch
import torch.nn.functional as F

class aten_pad(torch.nn.Module):
def __init__(self, pads):
super().__init__()
self.pad = torch.nn.ReplicationPad2d(pads)

def forward(self, x):
return self.pad(x)

return aten_pad(pads), None, "aten::pad"

@pytest.mark.parametrize("dtype", ["float32", "float64", "int32"])
@pytest.mark.parametrize("pads", [
1,
2,
3,
(1, 2, 2, 1),
(2, 1, 3, 4),
(2, 3, 1, 2),
(3, 4, 5, 6),
])

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
def test_replicate_padnd(self, pads, dtype, ie_device, precision, ir_version):
ndim = 4
self._test(*self.create_model(pads), ie_device, precision, ir_version,
kwargs_to_prepare_input={"ndim": ndim, "dtype": dtype})

class TestReplicatePad3D(PytorchLayerTest):
def _prepare_input(self, ndim=4, dtype="float32"):
import numpy as np
input_5d_shape = [5,9,1,1,2,4]
return (np.random.randn(*input_5d_shape[:ndim]).astype(dtype),)

def create_model(self, pads):
import torch
import torch.nn.functional as F

class aten_pad(torch.nn.Module):
def __init__(self, pads):
super().__init__()
self.pad = torch.nn.ReplicationPad3d(pads)

def forward(self, x):
return self.pad(x)

return aten_pad(pads), None, "aten::pad"

@pytest.mark.parametrize("dtype", ["float32", "float64", "int32"])
@pytest.mark.parametrize("pads", [
1,
2,
3,
(1, 2, 2, 1, 3, 4),
(2, 1, 3, 4, 2, 1),
(2, 3, 1, 2, 2, 1),
(3, 4, 5, 6, 1, 2),
])

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
def test_replicate_padnd(self, pads, dtype, ie_device, precision, ir_version):
ndim = 5
self._test(*self.create_model(pads), ie_device, precision, ir_version,
kwargs_to_prepare_input={"ndim": ndim, "dtype": dtype})

Loading