diff --git a/src/plugins/template/src/compiled_model.cpp b/src/plugins/template/src/compiled_model.cpp index 65415c434e0b3c..cbfb51237ecca9 100644 --- a/src/plugins/template/src/compiled_model.cpp +++ b/src/plugins/template/src/compiled_model.cpp @@ -153,11 +153,11 @@ ov::Any ov::template_plugin::CompiledModel::get_property(const std::string& name auto ro_properties = default_ro_properties(); auto rw_properties = default_rw_properties(); - std::vector supported_properties; + auto supported_properties = decltype(ov::supported_properties)::value_type(); supported_properties.reserve(ro_properties.size() + rw_properties.size()); supported_properties.insert(supported_properties.end(), ro_properties.begin(), ro_properties.end()); supported_properties.insert(supported_properties.end(), rw_properties.begin(), rw_properties.end()); - return decltype(ov::supported_properties)::value_type(supported_properties); + return supported_properties; } return m_cfg.Get(name); diff --git a/src/plugins/template/src/config.cpp b/src/plugins/template/src/config.cpp index cf26fab0f7bcdb..d47e826e826283 100644 --- a/src/plugins/template/src/config.cpp +++ b/src/plugins/template/src/config.cpp @@ -73,7 +73,7 @@ Configuration::Configuration(const ov::AnyMap& config, const Configuration& defa value.as()); } } else if (ov::hint::num_requests == key) { - auto tmp_val = value.as(); + const auto& tmp_val = value.as(); int tmp_i = std::stoi(tmp_val); if (tmp_i >= 0) num_requests = tmp_i; diff --git a/src/plugins/template/src/plugin.cpp b/src/plugins/template/src/plugin.cpp index bfdd4a35600bcb..3cd8c6743d7409 100644 --- a/src/plugins/template/src/plugin.cpp +++ b/src/plugins/template/src/plugin.cpp @@ -271,38 +271,31 @@ ov::Any ov::template_plugin::Plugin::get_property(const std::string& name, const supported_properties.reserve(ro_properties.size() + rw_properties.size()); supported_properties.insert(supported_properties.end(), ro_properties.begin(), ro_properties.end()); supported_properties.insert(supported_properties.end(), rw_properties.begin(), rw_properties.end()); - return decltype(ov::supported_properties)::value_type(supported_properties); + return supported_properties; } else if (ov::internal::supported_properties == name) { return decltype(ov::internal::supported_properties)::value_type{ ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}, ov::PropertyName{ov::internal::exclusive_async_requests.name(), ov::PropertyMutability::RW}}; } else if (ov::available_devices == name) { // TODO: fill list of available devices - std::vector available_devices = {""}; - return decltype(ov::available_devices)::value_type(available_devices); + return decltype(ov::available_devices)::value_type{{""}}; } else if (ov::device::full_name == name) { - std::string device_name = "Template Device Full Name"; - return decltype(ov::device::full_name)::value_type(device_name); + return decltype(ov::device::full_name)::value_type{"Template Device Full Name"}; } else if (ov::device::architecture == name) { // TODO: return device architecture for device specified by DEVICE_ID config - std::string arch = get_device_name(); - return decltype(ov::device::architecture)::value_type(arch); + return decltype(ov::device::architecture)::value_type{get_device_name()}; } else if (ov::device::type == name) { - return decltype(ov::device::type)::value_type(ov::device::Type::INTEGRATED); + return decltype(ov::device::type)::value_type{ov::device::Type::INTEGRATED}; } else if (ov::internal::caching_properties == name) { - std::vector caching_properties = {ov::device::architecture}; - return decltype(ov::internal::caching_properties)::value_type(caching_properties); + return decltype(ov::internal::caching_properties)::value_type{ov::device::architecture}; } else if (ov::device::capabilities == name) { // TODO: fill actual list of supported capabilities: e.g. Template device supports only FP32 and EXPORT_IMPORT - std::vector capabilities = {ov::device::capability::FP32, ov::device::capability::EXPORT_IMPORT}; - return decltype(ov::device::capabilities)::value_type(capabilities); + return decltype(ov::device::capabilities)::value_type{ov::device::capability::FP32, + ov::device::capability::EXPORT_IMPORT}; } else if (ov::execution_devices == name) { - std::string dev = get_device_name(); - return decltype(ov::execution_devices)::value_type{dev}; + return decltype(ov::execution_devices)::value_type{get_device_name()}; } else if (ov::range_for_async_infer_requests == name) { - // TODO: fill with actual values - using uint = unsigned int; - return decltype(ov::range_for_async_infer_requests)::value_type(std::make_tuple(uint{1}, uint{1}, uint{1})); + return decltype(ov::range_for_async_infer_requests)::value_type{1, 1, 1}; } else { return m_cfg.Get(name); } diff --git a/src/plugins/template/src/sync_infer_request.cpp b/src/plugins/template/src/sync_infer_request.cpp index 85e5845052994f..41881e9839adaf 100644 --- a/src/plugins/template/src/sync_infer_request.cpp +++ b/src/plugins/template/src/sync_infer_request.cpp @@ -53,8 +53,9 @@ void collect_variables(const std::shared_ptr& ov_model, for (const auto& variable : ov_model->get_variables()) { if (!variable_context.get_variable_value(variable)) { - auto shape = variable->get_info().data_shape.is_dynamic() ? ov::Shape{0} - : variable->get_info().data_shape.to_shape(); + const auto& shape = variable->get_info().data_shape.is_dynamic() + ? ov::Shape{0} + : variable->get_info().data_shape.to_shape(); ov::Tensor tensor = ov::Tensor(variable->get_info().data_type, shape); variable_context.set_variable_value(variable, std::make_shared(tensor)); auto state = @@ -83,7 +84,7 @@ ov::template_plugin::InferRequest::InferRequest(const std::shared_ptrm_cfg.device_id) + "_" + name + "_WaitPipline"), }; - + m_durations = {}; m_executable = get_template_model()->get_template_plugin()->m_backend->compile(get_template_model()->m_model); // Allocate plugin backend specific memory handles @@ -252,11 +253,11 @@ void ov::template_plugin::InferRequest::infer_postprocess() { OPENVINO_ASSERT(get_outputs().size() == m_backend_output_tensors.size()); for (size_t i = 0; i < get_outputs().size(); i++) { const auto& result = get_template_model()->m_model->get_results()[i]; - auto host_tensor = m_backend_output_tensors[i]; + const auto& host_tensor = m_backend_output_tensors[i]; auto tensor = get_tensor(get_outputs()[i]); if (result->get_output_partial_shape(0).is_dynamic()) { ov::Output output{result->output(0).get_node(), result->output(0).get_index()}; - allocate_tensor(output, [host_tensor](ov::SoPtr& tensor) { + allocate_tensor(output, [&host_tensor](ov::SoPtr& tensor) { allocate_tensor_impl(tensor, host_tensor.get_element_type(), host_tensor.get_shape()); host_tensor.copy_to(ov::make_tensor(tensor)); });