Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -4351,14 +4351,49 @@ bool UniqueConsecutiveOpInferSymbolicShape(
return inverse_dims;
}();

infer_context->SetShapeOrDataForValue(
op->result(0), symbol::TensorShapeOrDataDimExprs{out_dims});
infer_context->SetShapeOrDataForValue(
op->result(1),
return_inverse ? symbol::TensorShapeOrDataDimExprs{inverse_dims} : empty);
infer_context->SetShapeOrDataForValue(
op->result(2),
return_counts ? symbol::TensorShapeOrDataDimExprs{counts_dims} : empty);
const auto &IsZero = [&](const symbol::DimExpr &dim_expr) {
if (dim_expr.isa<int64_t>()) {
return dim_expr.dyn_cast<int64_t>() == static_cast<int64_t>(0);
}
return false;
};
bool size_0 = false;
for (size_t i = 0; i < x_dims_sym.size(); i++) {
if (IsZero(x_dims_sym.at(i))) {
size_0 = true;
break;
}
}

if (size_0) {
if (axes.empty()) {
infer_context->SetShapeOrDataForValue(
op->result(0), symbol::TensorShapeOrDataDimExprs{});
infer_context->SetShapeOrDataForValue(
op->result(1),
return_inverse ? symbol::TensorShapeOrDataDimExprs{x_dims_sym}
: empty);
} else {
infer_context->SetShapeOrDataForValue(
op->result(0), symbol::TensorShapeOrDataDimExprs{x_dims_sym});
infer_context->SetShapeOrDataForValue(
op->result(1),
return_inverse ? symbol::TensorShapeOrDataDimExprs{} : empty);
}
infer_context->SetShapeOrDataForValue(
op->result(2),
return_counts ? symbol::TensorShapeOrDataDimExprs{} : empty);
} else {
infer_context->SetShapeOrDataForValue(
op->result(0), symbol::TensorShapeOrDataDimExprs{out_dims});
infer_context->SetShapeOrDataForValue(
op->result(1),
return_inverse ? symbol::TensorShapeOrDataDimExprs{inverse_dims}
: empty);
infer_context->SetShapeOrDataForValue(
op->result(2),
return_counts ? symbol::TensorShapeOrDataDimExprs{counts_dims} : empty);
}

return true;
}
Expand Down
16 changes: 16 additions & 0 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5918,6 +5918,22 @@ void UniqueConsecutiveInferMeta(const MetaTensor& x,
if (return_counts) {
counts->set_dims({-1});
}
if (x.numel() == 0) {
if (axis.empty()) {
out->set_dims({0});
if (return_inverse) {
index->set_dims(in_dims);
}
} else {
out->set_dims(in_dims);
if (return_inverse) {
index->set_dims({0});
}
}
if (return_counts) {
counts->set_dims({0});
Comment on lines +5922 to +5934
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个地方还是有些疑问。
out->set_dims({0}) 设置完后,tensor的shape是不是[0] ? shape [0] 和shape[ ] 是两个不同的shape,shape [0]的dims().size() 为1,是1d Tensor,尽管是0-size。shape [ ]的dims().size() 为0,是0d Tensor也是个标量。 试试out->set_dims({}),让tensor的shape为[ ]
请把

import torch
x = torch.randn([0,1,2])
output, inverse, counts = torch.unique_consecutive(x, return_inverse=True,return_counts=True)
print("x ",x)
print("output ",output)
print("inverse ",inverse)
print("counts ",counts)

转化为paddle的代码,跑一下,将结果贴到PR描述里。

}
}
}

void UniqueInferMeta(const MetaTensor& x,
Expand Down
11 changes: 11 additions & 0 deletions paddle/phi/kernels/cpu/unique_consecutive_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,17 @@ void UniqueConsecutiveKernel(const Context& dev_ctx,
DenseTensor* out,
DenseTensor* index,
DenseTensor* counts) {
if (out && out->numel() == 0) {
dev_ctx.template Alloc<T>(out);
if (return_inverse) {
dev_ctx.Alloc(index, dtype);
}
if (return_counts) {
dev_ctx.Alloc(counts, dtype);
}

return;
}
if (dtype == phi::DataType::INT32) {
PADDLE_ENFORCE_LE(
x.numel(),
Expand Down
11 changes: 11 additions & 0 deletions paddle/phi/kernels/gpu/unique_consecutive_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,17 @@ void UniqueConsecutiveKernel(const Context& dev_ctx,
DenseTensor* out,
DenseTensor* index,
DenseTensor* counts) {
if (out && out->numel() == 0) {
dev_ctx.template Alloc<T>(out);
if (return_inverse) {
dev_ctx.Alloc(index, dtype);
}
if (return_counts) {
dev_ctx.Alloc(counts, dtype);
}

return;
}
if (dtype == phi::DataType::INT32) {
PADDLE_ENFORCE_LE(
x.numel() + 1,
Expand Down
33 changes: 33 additions & 0 deletions test/legacy_test/test_unique_consecutive_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,6 +372,39 @@ def test_check_output(self):
self.check_output(check_pir=True, check_symbol_infer=False)


class TestUniqueConsecutiveZeroSize(TestUniqueConsecutiveOp):
"""ZeroSize input"""

def config(self):
self.x_size = (0, 2, 4)
self.x_range = 20
self.return_inverse = True
self.return_counts = True
self.python_api = paddle.unique_consecutive

def setUp(self):
self.init_kernel_type()
self.config()
self.op_type = "unique_consecutive"
x = np.random.randint(self.x_range, size=self.x_size).astype(self.dtype)
result, inverse, counts = reference_unique_consecutive(
x, self.return_inverse, self.return_counts
)
result = np.array(result).astype(self.dtype)
inverse = inverse.astype(self.dtype).reshape(x.shape)
counts = counts.astype(self.dtype)
self.inputs = {
'X': x,
}
self.attrs = {
'return_inverse': self.return_inverse,
'return_counts': self.return_counts,
'dtype': paddle.int32,
}
self.python_out_sig = ["Out", "Index", "Counts"]
self.outputs = {'Out': result, 'Index': inverse, 'Counts': counts}


if __name__ == "__main__":
paddle.enable_static()
unittest.main()
Loading