Skip to content
7 changes: 5 additions & 2 deletions lite/backends/metal/metal_image.mm
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,11 @@
dim.emplace_back(pad_to_four_dim_[i]);
});
} break;
case 0:
break;
case 0: {
for (int i = 0; i < 4; ++i) {
dim.emplace_back(pad_to_four_dim_[i]);
}
} break;
default:
LOG(FATAL) << "metal_image: Dim size is error";
}
Expand Down
5 changes: 4 additions & 1 deletion lite/operators/gather_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,10 @@ bool GatherOp::InferShapeImpl() const {
inner_dim_size *= input_dim[i];
out_dim_vec.push_back(input_dim[i]);
}
out_dim_vec.push_back(index_size);
auto index_rank = param_.Index->dims().size();
if (index_rank) {
out_dim_vec.push_back(index_size);
}
for (int i = axis_index + 1; i < input_dim.size(); i++) {
outer_dim_size *= input_dim[i];
out_dim_vec.push_back(input_dim[i]);
Expand Down
2 changes: 1 addition & 1 deletion lite/operators/is_empty_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ bool IsEmptyOp::CheckShape() const {
}

bool IsEmptyOp::InferShapeImpl() const {
param_.Out->Resize({1});
param_.Out->Resize(std::vector<int64_t>({}));
return true;
}

Expand Down
2 changes: 1 addition & 1 deletion lite/operators/mean_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ bool MeanOp::CheckShape() const {
}

bool MeanOp::InferShapeImpl() const {
param_.Out->Resize(std::vector<int64_t>{1});
param_.Out->Resize(std::vector<int64_t>{});
return true;
}

Expand Down
4 changes: 0 additions & 4 deletions lite/operators/norm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ bool PNormOpLite::InferShapeImpl() const {
std::vector<int64_t> reduce_dims;
const bool asvector = param_.asvector;
if (asvector) {
reduce_dims.emplace_back(1);
if (keepdim) {
for (int64_t i = 1; i < x_dim.size(); ++i) {
reduce_dims.emplace_back(1);
Expand All @@ -85,9 +84,6 @@ bool PNormOpLite::InferShapeImpl() const {
for (int i = 0; i < x_dim.size(); ++i) {
if (i != axis) reduce_dims.emplace_back(x_dim[i]);
}
if (reduce_dims.size() == 0) {
reduce_dims.emplace_back(1);
}
}
x_dim[axis] = 1;

Expand Down
19 changes: 5 additions & 14 deletions lite/operators/reduce_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,7 @@ bool ReduceOp::CheckShape() const {
auto dims = param_.dim;
auto x_dims = param_.X->dims();
int x_rank = x_dims.size();
// dim at least is [0]
CHECK_GT(dims.size(), 0)
<< "The input dim should be greater than 0. But received the dim = "
<< dims.size();
for (int i = 0; i < dims.size(); i++) {
CHECK(dims[i] <= x_rank && dims[i] + x_rank >= 0)
<< "dims[i] is " << dims[i] << ", x_rank is " << x_rank;
}
return true;
return x_rank >= 0;
}

bool ReduceOp::InferShapeImpl() const {
Expand All @@ -58,10 +50,8 @@ bool ReduceOp::InferShapeImpl() const {
bool keep_dim = param_.keep_dim;

for (int i = 0; i < dims.size(); i++) {
CHECK(dims[i] <= x_rank && dims[i] + x_rank >= 0)
<< "dims[i] is " << dims[i] << ", x_rank is " << x_rank;
if (dims[i] < 0) {
dims[i] = x_rank + dims[i];
dims[i] = x_rank + dims[i] >= 0 ? x_rank + dims[i] : 0;
}
}
// recompute reduce_all
Expand All @@ -79,8 +69,9 @@ bool ReduceOp::InferShapeImpl() const {
if (reduce_all) {
if (keep_dim)
param_.Out->Resize(std::vector<int64_t>(x_rank, 1));
else
param_.Out->Resize(std::vector<int64_t>({1}));
else {
param_.Out->Resize(std::vector<int64_t>({}));
}
} else {
std::vector<int64_t> dims_vector(x_rank, 1);
for (int i = 0; i < x_rank; i++) dims_vector[i] = x_dims[i];
Expand Down
2 changes: 1 addition & 1 deletion lite/tests/kernels/is_empty_compute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class IsEmptyComputeTester : public arena::TestCase {
const auto* x = scope->FindTensor(x_);
auto* out = scope->NewTensor(out_);

out->Resize(DDim({1}));
out->Resize(DDim(std::vector<int64_t>{}));
auto* out_data = out->mutable_data<bool>();
out_data[0] = (x->numel() == 0) ? true : false;
}
Expand Down
2 changes: 1 addition & 1 deletion lite/tests/kernels/mean_compute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class MeanComputeTester : public arena::TestCase {
auto input = scope->FindTensor(input_);
auto output = scope->NewTensor(output_);

std::vector<int64_t> out_dims{1};
std::vector<int64_t> out_dims{};
output->Resize(out_dims);

auto input_data = input->data<float>();
Expand Down
11 changes: 7 additions & 4 deletions lite/tests/kernels/reduce_all_compute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -232,16 +232,17 @@ class ReduceAllComputeTester : public arena::TestCase {
}
}
reduce_all_ = (reduce_all_ || full_dim);
if (dim_.size() == 0) {
if (dim_.size() == 0 || x_rank == 0 || dim_.size() == x_rank) {
reduce_all_ = true;
}

std::vector<int64_t> out_dims;
if (reduce_all_) {
if (keep_dim_)
if (keep_dim_) {
out_dims = std::vector<int64_t>(x_rank, 1);
else
out_dims = std::vector<int64_t>{1};
} else {
out_dims = std::vector<int64_t>();
}
} else {
size_t out_rank = keep_dim_ ? x_rank : x_rank - dim_.size();
out_dims.resize(out_rank);
Expand Down Expand Up @@ -345,6 +346,8 @@ void test_reduce_all(Place place, float abs_err) {
default:
x_dims = DDim(std::vector<int64_t>({n, c, h, w}));
}
// 0d output tensor is not supported in NNAdapter Now
if (dims == 2 && dim.size() > 1) continue;

int last_dim = dim.back();
if (dim.back() < 0) {
Expand Down
11 changes: 7 additions & 4 deletions lite/tests/kernels/reduce_any_compute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -232,16 +232,17 @@ class ReduceAnyComputeTester : public arena::TestCase {
}
}
reduce_all_ = (reduce_all_ || full_dim);
if (dim_.size() == 0) {
if (dim_.size() == 0 || x_rank == 0 || dim_.size() == x_rank) {
reduce_all_ = true;
}

std::vector<int64_t> out_dims;
if (reduce_all_) {
if (keep_dim_)
if (keep_dim_) {
out_dims = std::vector<int64_t>(x_rank, 1);
else
out_dims = std::vector<int64_t>{1};
} else {
out_dims = std::vector<int64_t>();
}
} else {
size_t out_rank = keep_dim_ ? x_rank : x_rank - dim_.size();
out_dims.resize(out_rank);
Expand Down Expand Up @@ -347,6 +348,8 @@ void test_reduce_any(Place place, float abs_err) {
x_dims = DDim(std::vector<int64_t>({n, c, h, w}));
}

// 0d output tensor is not supported in NNAdapter Now
if (dims == 2 && dim.size() > 1) continue;
int last_dim = dim.back();
if (dim.back() < 0) {
last_dim += x_dims.size();
Expand Down
9 changes: 4 additions & 5 deletions lite/tests/kernels/reduce_max_compute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -283,16 +283,15 @@ class ReduceMaxComputeTester : public arena::TestCase {
}

std::stable_sort(dim_.begin(), dim_.end());
if (dim_.size() == 0) {
if (dim_.size() == 0 || x_rank == 0 || dim_.size() == x_rank) {
reduce_all_ = true;
}
std::vector<int64_t> out_dims;
if (reduce_all_) {
if (keep_dim_) {
out_dims.push_back(x_rank);
out_dims.push_back(1);
out_dims = std::vector<int64_t>(x_rank, 1);
} else {
out_dims.push_back(1);
out_dims = std::vector<int64_t>();
}
} else {
for (size_t i = 0; i < x_dims_.size(); i++) {
Expand All @@ -313,8 +312,8 @@ class ReduceMaxComputeTester : public arena::TestCase {
if (!keep_dim_ && out_dims.empty()) {
out_dims.push_back(1);
}
out->Resize(DDim(out_dims));
}
out->Resize(DDim(out_dims));

auto* out_data = out->mutable_data<float>();

Expand Down
12 changes: 6 additions & 6 deletions lite/tests/kernels/reduce_mean_compute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -224,16 +224,15 @@ class ReduceMeanComputeTester : public arena::TestCase {
}

std::stable_sort(dim_.begin(), dim_.end());
if (dim_.size() == 0) {
if (dim_.size() == 0 || x_rank == 0 || dim_.size() == x_rank) {
reduce_all_ = true;
}
std::vector<int64_t> out_dims;
if (reduce_all_) {
if (keep_dim_) {
out_dims.push_back(x_rank);
out_dims.push_back(1);
out_dims = std::vector<int64_t>(x_rank, 1);
} else {
out_dims.push_back(1);
out_dims = std::vector<int64_t>();
}
} else {
for (size_t i = 0; i < x_dims_.size(); i++) {
Expand All @@ -254,8 +253,8 @@ class ReduceMeanComputeTester : public arena::TestCase {
if (!keep_dim_ && out_dims.empty()) {
out_dims.push_back(1);
}
out->Resize(DDim(out_dims));
}
out->Resize(DDim(out_dims));

auto* out_data = out->mutable_data<float>();
size_t new_dims[] = {1, 1, 1, 1};
Expand Down Expand Up @@ -342,7 +341,8 @@ void test_reduce_mean(Place place,
default:
x_dims = DDim(std::vector<int64_t>({n, c, h, w}));
}

// 0d output tensor is not supported in NNAdapter Now
if (dims == 2 && dim.size() > 1) continue;
int last_dim = dim.back();
if (dim.back() < 0) {
last_dim += x_dims.size();
Expand Down
9 changes: 4 additions & 5 deletions lite/tests/kernels/reduce_min_compute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -283,16 +283,15 @@ class ReduceMinComputeTester : public arena::TestCase {
}

std::stable_sort(dim_.begin(), dim_.end());
if (dim_.size() == 0) {
if (dim_.size() == 0 || x_rank == 0 || dim_.size() == x_rank) {
reduce_all_ = true;
}
std::vector<int64_t> out_dims;
if (reduce_all_) {
if (keep_dim_) {
out_dims.push_back(x_rank);
out_dims.push_back(1);
out_dims = std::vector<int64_t>(x_rank, 1);
} else {
out_dims.push_back(1);
out_dims = std::vector<int64_t>();
}
} else {
for (size_t i = 0; i < x_dims_.size(); i++) {
Expand All @@ -313,8 +312,8 @@ class ReduceMinComputeTester : public arena::TestCase {
if (!keep_dim_ && out_dims.empty()) {
out_dims.push_back(1);
}
out->Resize(DDim(out_dims));
}
out->Resize(DDim(out_dims));

auto* out_data = out->mutable_data<float>();

Expand Down
7 changes: 3 additions & 4 deletions lite/tests/kernels/reduce_prod_compute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -208,15 +208,14 @@ class ReduceProdComputeTester : public arena::TestCase {
}
}
std::stable_sort(dim_.begin(), dim_.end());

std::vector<int64_t> out_dims;
if (reduce_all_ || dim_.size() == 0) {
if (keep_dim_) {
out->Resize({static_cast<int64_t>(x_rank), 1});
out_dims = std::vector<int64_t>(x_rank, 1);
} else {
out->Resize({1});
out_dims = std::vector<int64_t>();
}
} else {
std::vector<int64_t> out_dims;
for (size_t i = 0; i < x_dims_.size(); i++) {
out_dims.push_back(x_dims_[i]);
}
Expand Down
9 changes: 4 additions & 5 deletions lite/tests/kernels/reduce_sum_compute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -228,12 +228,9 @@ class ReduceSumComputeTester : public arena::TestCase {
std::vector<int64_t> out_dims;
if (reduce_all_) {
if (keep_dim_) {
out_dims.resize(x_rank);
for (int i = 0; i < x_rank; ++i) {
out_dims[i] = 1;
}
out_dims = std::vector<int64_t>(x_rank, 1);
} else {
out_dims.push_back(1);
out_dims = std::vector<int64_t>();
}
} else {
for (int i = 0; i < x_dims_.size(); i++) {
Expand Down Expand Up @@ -331,6 +328,8 @@ void test_reduce_sum(Place place,
if (std::find(dim.begin(), dim.end(), 0) == dim.end() &&
!keep_dim)
continue;
// 0d output tensor is not supported in NNAdapter Now
if (reduce_all) continue;
#endif
auto x_dims = DDim(std::vector<int64_t>({n, c, h, w}));
std::unique_ptr<arena::TestCase> tester(
Expand Down
6 changes: 3 additions & 3 deletions lite/tests/unittest_py/op/test_abs_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,9 @@ def _teller1(program_config, predictor_config):
if len(in_x_shape) == 0:
return True

self.add_ignore_check_case(_teller1,
IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Only test 0D-tensor on CPU(ARM/Host) now.")
self.add_ignore_check_case(
_teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"0D-tensor is not supported on this target now.")

def test(self, *args, **kwargs):
self.run_and_statis(quant=False, min_success_num=25, max_examples=100)
Expand Down
6 changes: 3 additions & 3 deletions lite/tests/unittest_py/op/test_acos_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,9 @@ def _teller1(program_config, predictor_config):
if len(in_x_shape) == 0:
return True

self.add_ignore_check_case(_teller1,
IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Only test 0D-tensor on CPU(ARM/Host) now.")
self.add_ignore_check_case(
_teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"0D-tensor is not supported on this target now.")

def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=100)
Expand Down
6 changes: 3 additions & 3 deletions lite/tests/unittest_py/op/test_asin_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,9 @@ def _teller1(program_config, predictor_config):
if len(in_x_shape) == 0:
return True

self.add_ignore_check_case(_teller1,
IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Only test 0D-tensor on CPU(ARM/Host) now.")
self.add_ignore_check_case(
_teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"0D-tensor is not supported on this target now.")

def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=100)
Expand Down
6 changes: 3 additions & 3 deletions lite/tests/unittest_py/op/test_assign_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,9 @@ def _teller2(program_config, predictor_config):
if len(in_x_shape) == 0:
return True

self.add_ignore_check_case(_teller2,
IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Only test 0D-tensor on CPU(ARM/Host) now.")
self.add_ignore_check_case(
_teller2, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"0D-tensor is not supported on this target now.")

def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
Expand Down
6 changes: 3 additions & 3 deletions lite/tests/unittest_py/op/test_atan_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,9 @@ def _teller1(program_config, predictor_config):
if len(in_x_shape) == 0:
return True

self.add_ignore_check_case(_teller1,
IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Only test 0D-tensor on CPU(ARM/Host) now.")
self.add_ignore_check_case(
_teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"0D-tensor is not supported on this target now.")

def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=100)
Expand Down
Loading