Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ std::vector<TRShape> shape_infer(const VariadicSplit* op,
sum_of_splits += (*split_lengths)[i];
}
}
const auto dimension_at_axis = data_shape[axis];
const auto& dimension_at_axis = data_shape[axis];

if (negative_one_idx >= 0 && dimension_at_axis.is_static()) {
(*split_lengths)[negative_one_idx] = dimension_at_axis.get_length() - sum_of_splits;
Expand Down
2 changes: 1 addition & 1 deletion src/core/src/evaluator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ class Evaluator {
// Request to execute the handleer. Pass what we know about the inputs to the
// handler and associate the results with the outputs
std::vector<V> inputs;
for (auto v : node->input_values()) {
for (const auto& v : node->input_values()) {
inputs.push_back(evaluator.get_value_map().at(v));
}
std::vector<V> outputs = m_handler(node, inputs);
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/layout.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@ std::tuple<PartialShape, Layout> LayoutUtils::find_squeeze(const Layout& src_lay
res.m_left_size = dst_layout.m_left_size;
int64_t dst_idx = 0;
for (int64_t src_idx = 0; src_idx < src_layout.m_left_size; src_idx++) {
auto src_dim_name = src_layout.m_index_map.at(src_idx);
const auto& src_dim_name = src_layout.m_index_map.at(src_idx);
if (dst_layout.has_name(src_dim_name)) {
if (!rank_dynamic) {
res_dims[dst_idx] = src_shape[src_idx];
Expand Down Expand Up @@ -496,7 +496,7 @@ std::tuple<PartialShape, Layout, size_t> LayoutUtils::find_unsqueeze(const Layou
res.m_left_size = dst_layout.m_left_size;
int64_t unset_idx = 0;
for (auto i = 0; i < dst_layout.m_left_size; i++) {
auto dim_name = dst_layout.m_index_map.at(i);
const auto& dim_name = dst_layout.m_index_map.at(i);
if (src_layout.has_name(dim_name)) {
auto src_idx = src_layout.get_index_by_name(dim_name);
res.m_names[dim_name] = src_idx + dims_cnt;
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/adaptive_avg_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ op::v8::AdaptiveAvgPool::AdaptiveAvgPool(const Output<Node>& data, const Output<
void op::v8::AdaptiveAvgPool::validate_and_infer_types() {
OV_OP_SCOPE(v8_AdaptiveAvgPool_validate_and_infer_types);

const auto output_shape = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)).front();
set_output_type(0, get_input_element_type(0), output_shape);
const auto output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this));
set_output_type(0, get_input_element_type(0), output_shapes[0]);
}

std::shared_ptr<Node> op::v8::AdaptiveAvgPool::clone_with_new_inputs(const OutputVector& new_args) const {
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/batch_to_space.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ void BatchToSpace::validate_and_infer_types() {
"block_shape and crops inputs must have integer element type. Got: ",
inputs_integer_et);

const auto output_shape = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)).front();
set_output_type(0, data_et, output_shape);
const auto& output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this));
set_output_type(0, data_et, output_shapes[0]);
}

std::shared_ptr<Node> BatchToSpace::clone_with_new_inputs(const OutputVector& new_args) const {
Expand Down
6 changes: 3 additions & 3 deletions src/core/src/op/broadcast.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ std::pair<bool, ov::AxisSet> ov::op::v3::Broadcast::get_broadcast_axes() const {
bool axes_known = false;

if (get_input_partial_shape(0).is_static() && get_output_partial_shape(0).is_static()) {
const auto arg_shape = get_input_shape(0);
const auto result_shape = get_output_shape(0);
const auto& arg_shape = get_input_shape(0);
const auto& result_shape = get_output_shape(0);
return get_broadcast_axes_bidirectional(arg_shape, result_shape);
}
return std::make_pair(axes_known, broadcast_axes);
Expand Down Expand Up @@ -108,7 +108,7 @@ ov::PartialShape get_result_shape_bidirectional(const ov::Node* this_ptr,

bool ov::op::v3::Broadcast::broadcast_evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
if (get_broadcast_spec().m_type == op::BroadcastType::BIDIRECTIONAL) {
auto arg_shape = inputs[0].get_shape();
const auto& arg_shape = inputs[0].get_shape();
ov::Shape target_shape = op::util::BroadcastBase::get_target_shape(inputs[1]);
ov::PartialShape result_shape =
get_result_shape_bidirectional(this, ov::PartialShape{arg_shape}, ov::PartialShape{target_shape});
Expand Down
3 changes: 2 additions & 1 deletion src/core/src/op/concat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ void Concat::validate_and_infer_types() {
input_shapes.push_back(get_input_partial_shape(i));
}

const auto output_shape = shape_infer(this, input_shapes).front();
const auto output_shapes = shape_infer(this, input_shapes);
const auto& output_shape = output_shapes[0];
if (output_shape.rank().is_static() && (get_concatenation_axis() < 0)) {
set_concatenation_axis(ov::util::normalize(get_axis(), output_shape.size()));
}
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/ctc_loss.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ void op::v4::CTCLoss::validate_and_infer_types() {
input_et);
}

const auto output_shape = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)).front();
set_output_type(0, logits_type, output_shape);
const auto& output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this));
set_output_type(0, logits_type, output_shapes[0]);
}

bool op::v4::CTCLoss::visit_attributes(AttributeVisitor& visitor) {
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/depth_to_space.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ std::shared_ptr<Node> DepthToSpace::clone_with_new_inputs(const OutputVector& ne
void DepthToSpace::validate_and_infer_types() {
OV_OP_SCOPE(v0_DepthToSpace_validate_and_infer_types);

const auto output_shape = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)).front();
set_output_type(0, get_input_element_type(0), output_shape);
const auto output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this));
set_output_type(0, get_input_element_type(0), output_shapes[0]);
}

bool DepthToSpace::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/gather_tree.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ void op::v1::GatherTree::validate_and_infer_types() {
"Element type of inputs must be numeric. Got: ",
result_et);

const auto output_shape = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)).front();
set_output_type(0, result_et, output_shape);
const auto output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this));
set_output_type(0, result_et, output_shapes[0]);
}
} // namespace ov
4 changes: 2 additions & 2 deletions src/core/src/op/loop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,8 @@ void Loop::validate_and_infer_types() {

auto body_parameter = m_bodies[0]->get_parameters().at(merged_input_description->m_body_parameter_index);

auto input_partial_shape = input(index).get_partial_shape();
auto input_type = input(index).get_element_type();
const auto& input_partial_shape = input(index).get_partial_shape();
const auto& input_type = input(index).get_element_type();

body_parameter->set_partial_shape(input_partial_shape);
body_parameter->set_element_type(input_type);
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/max_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -421,9 +421,9 @@ bool MaxPool::evaluate(TensorVector& outputs, const TensorVector& inputs) const
const auto input_shapes = std::vector<PartialShape>{inputs[0].get_shape()};
auto pads_begin = m_pads_begin;
auto pads_end = m_pads_end;
const auto output_shape = shape_infer(this, input_shapes, pads_begin, pads_end).front();
const auto output_shapes = shape_infer(this, input_shapes, pads_begin, pads_end);

outputs[0].set_shape(output_shape.get_shape());
outputs[0].set_shape(output_shapes[0].get_shape());

return ov::op::maxpool::evaluate_util(this, outputs, inputs, get_dilations(), get_axis());
}
Expand Down
18 changes: 8 additions & 10 deletions src/core/src/op/paged_attention.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,13 @@ PagedAttentionExtension::PagedAttentionExtension(const ov::OutputVector& args) :
}

void PagedAttentionExtension::validate_and_infer_types() {
auto value_cache_shape = get_input_partial_shape(4);
const auto& value_cache_shape = get_input_partial_shape(4);
// m_num_kv_heads = value_cache_shape[1];
// m_head_size = value_cache_shape[2];
// m_block_size = value_cache_shape[3];
NODE_VALIDATION_CHECK(this, value_cache_shape.size() == 4, "Value cache shape must be 4 dims");

// key_cache: shape [num_blocks, num_kv_heads, head_size/x, block_size, x]
auto key_cache_shape = get_input_partial_shape(3);
NODE_VALIDATION_CHECK(this,
value_cache_shape.size() == 4,
// value_cache_shape[0] == key_cache_shape[0] && // num_blocks
Expand All @@ -31,8 +30,8 @@ void PagedAttentionExtension::validate_and_infer_types() {
"Key cache shape must be 4 dims");

// query: shape [batch_size, seq_len, num_heads * head_size]
auto query_type = get_input_element_type(0);
auto query_shape = get_input_partial_shape(0);
const auto& query_type = get_input_element_type(0);
const auto& query_shape = get_input_partial_shape(0);
NODE_VALIDATION_CHECK(this,
// query_type.is_real() &&
query_shape.size() == 3,
Expand All @@ -44,8 +43,8 @@ void PagedAttentionExtension::validate_and_infer_types() {
query_shape);

// key: shape [batch_size, seq_len, num_kv_heads * head_size]
auto key_type = get_input_element_type(1);
auto key_shape = get_input_partial_shape(1);
const auto& key_type = get_input_element_type(1);
const auto& key_shape = get_input_partial_shape(1);
NODE_VALIDATION_CHECK(this,
// query_type == key_type &&
key_shape.size() == 3,
Expand All @@ -57,7 +56,6 @@ void PagedAttentionExtension::validate_and_infer_types() {

// value: shape [batch_size, seq_len, num_kv_heads * head_size]
// auto value_type = get_input_element_type(2);
auto value_shape = get_input_partial_shape(2);

// is_prompt: boolean scalar
NODE_VALIDATION_CHECK(this,
Expand All @@ -70,7 +68,7 @@ void PagedAttentionExtension::validate_and_infer_types() {
get_input_shape(5));

// slot_mapping: shape [batch_size, max_context_len]
auto slot_mapping_shape = get_input_partial_shape(6);
const auto& slot_mapping_shape = get_input_partial_shape(6);
NODE_VALIDATION_CHECK(this,
// get_input_element_type(6) == ov::element::i64 &&
slot_mapping_shape.size() == 2,
Expand All @@ -91,7 +89,7 @@ void PagedAttentionExtension::validate_and_infer_types() {
get_input_shape(7));

// context_lens: shape [batch_size]
auto context_lens_shape = get_input_partial_shape(8);
const auto& context_lens_shape = get_input_partial_shape(8);
NODE_VALIDATION_CHECK(this,
// get_input_element_type(8) == ov::element::i32 &&
context_lens_shape.size() == 1,
Expand Down Expand Up @@ -149,4 +147,4 @@ std::shared_ptr<ov::Node> PagedAttentionExtension::clone_with_new_inputs(const o
}

} // namespace op
} // namespace ov
} // namespace ov
4 changes: 2 additions & 2 deletions src/core/src/op/reverse.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ void Reverse::validate_and_infer_types() {
"In 'index' mode the second input must contain integer values.");
}

const auto output_shape = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)).front();
set_output_type(0, get_input_element_type(0), output_shape);
const auto output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this));
set_output_type(0, get_input_element_type(0), output_shapes[0]);
}

std::shared_ptr<ov::Node> Reverse::clone_with_new_inputs(const OutputVector& new_args) const {
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/reverse_sequence.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ void op::v0::ReverseSequence::validate_and_infer_types() {
"Sequence lengths element type must be numeric type. Got: ",
seq_lengths_et);

const auto output_shape = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)).front();
set_output_type(0, get_input_element_type(0), output_shape);
const auto& output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this));
set_output_type(0, get_input_element_type(0), output_shapes[0]);

m_normalized_seq_axis = ov::util::normalize_axis(this, m_seq_axis, get_input_partial_shape(0).rank());
}
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/roll.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ void Roll::validate_and_infer_types() {
axes_et.is_dynamic() || axes_et == element::i32 || axes_et == element::i64,
"Axes must have int32 or int64 element type.");

const auto output_shape = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)).front();
set_output_type(0, get_input_element_type(0), output_shape);
const auto output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this));
set_output_type(0, get_input_element_type(0), output_shapes[0]);
}

bool Roll::visit_attributes(AttributeVisitor& visitor) {
Expand Down
2 changes: 1 addition & 1 deletion src/core/src/op/shape_of.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ void ShapeOf::validate_and_infer_types() {
m_output_type == element::i64 || m_output_type == element::i32,
"Output type must be i32 or i64");
set_input_is_relevant_to_value(0, false);
const auto input_partial_shape = get_input_partial_shape(0);
const auto& input_partial_shape = get_input_partial_shape(0);
set_output_type(0, m_output_type, PartialShape{input_partial_shape.rank()});
}

Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/shuffle_channels.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ size_t ShuffleChannels::get_zero_based_axis() const {
void ShuffleChannels::validate_and_infer_types() {
OV_OP_SCOPE(v0_ShuffleChannels_validate_and_infer_types);

const auto output_shape = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)).front();
set_output_type(0, get_input_element_type(0), output_shape);
const auto output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this));
set_output_type(0, get_input_element_type(0), output_shapes[0]);
}

std::shared_ptr<Node> ShuffleChannels::clone_with_new_inputs(const OutputVector& new_args) const {
Expand Down
6 changes: 3 additions & 3 deletions src/core/src/op/slice.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,9 @@ bool Slice::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
shape_infer(this, ov::util::get_tensors_partial_shapes(inputs), make_tensor_accessor(inputs));
outputs[0].set_shape(output_shapes.front().to_shape());

const auto starts = ov::get_tensor_data_as<int64_t>(inputs[1]);
const auto steps = ov::get_tensor_data_as<int64_t>(inputs[3]);
const auto axes = slice_no_axes(this) ? default_axes(starts.size()) : ov::get_tensor_data_as<int64_t>(inputs[4]);
const auto& starts = ov::get_tensor_data_as<int64_t>(inputs[1]);
const auto& steps = ov::get_tensor_data_as<int64_t>(inputs[3]);
const auto& axes = slice_no_axes(this) ? default_axes(starts.size()) : ov::get_tensor_data_as<int64_t>(inputs[4]);

reference::slice(static_cast<const char*>(inputs[0].data()),
inputs[0].get_shape(),
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/space_to_batch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ void SpaceToBatch::validate_and_infer_types() {
"pads_end must be an integral number but got (",
pads_end_type,
").");
const auto output_shape = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)).front();
set_output_type(0, data_type, output_shape);
const auto& output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this));
set_output_type(0, data_type, output_shapes[0]);
}

std::shared_ptr<Node> SpaceToBatch::clone_with_new_inputs(const OutputVector& new_args) const {
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/space_to_depth.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ std::shared_ptr<Node> SpaceToDepth::clone_with_new_inputs(const OutputVector& ne
void SpaceToDepth::validate_and_infer_types() {
OV_OP_SCOPE(v0_SpaceToDepth_validate_and_infer_types);

const auto output_shape = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)).front();
set_output_type(0, get_input_element_type(0), output_shape);
const auto& output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this));
set_output_type(0, get_input_element_type(0), output_shapes[0]);
}

bool SpaceToDepth::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/op/strided_slice.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ StridedSlice::StridedSlice(const Output<Node>& data,

namespace {
std::shared_ptr<Node> calculate_default_strides(const Output<Node>& begin, const Output<Node>& end) {
const auto begin_pshape = begin.get_partial_shape();
const auto end_pshape = end.get_partial_shape();
const auto& begin_pshape = begin.get_partial_shape();
const auto& end_pshape = end.get_partial_shape();

size_t strides_length = 0;
if (begin_pshape.rank().is_static() && begin_pshape.rank().get_length() == 1 && begin_pshape[0].is_static()) {
Expand Down
6 changes: 2 additions & 4 deletions src/core/src/op/tensor_iterator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,14 +106,12 @@ void op::v0::TensorIterator::validate_and_infer_types() {

auto body_parameter = m_bodies[0]->get_parameters().at(merged_input_description->m_body_parameter_index);

auto body_param_partial_shape = body_parameter->get_partial_shape();
auto input_partial_shape = inputs().at(index).get_source_output().get_partial_shape();
const auto& input_partial_shape = inputs().at(index).get_source_output().get_partial_shape();
body_parameter->set_partial_shape(input_partial_shape);
} else if (auto invariant_input_description = ov::as_type_ptr<InvariantInputDescription>(input_description)) {
auto body_parameter = m_bodies[0]->get_parameters().at(invariant_input_description->m_body_parameter_index);

auto body_param_partial_shape = body_parameter->get_partial_shape();
auto input_partial_shape = inputs().at(index).get_source_output().get_partial_shape();
const auto& input_partial_shape = inputs().at(index).get_source_output().get_partial_shape();
body_parameter->set_partial_shape(input_partial_shape);
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/core/src/op/unique.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ void op::v10::Unique::validate_and_infer_types() {

if (input_shape.rank().is_static()) {
const auto normalized_axis = ov::util::normalize_axis(this, axis, input_shape.rank());
const auto dim_at_axis = input_shape[normalized_axis];
const auto& dim_at_axis = input_shape[normalized_axis];

Dimension output_dim_at_axis;
Dimension rev_idx_size;
Expand Down
Loading