Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 2 additions & 5 deletions src/bindings/js/node/tests/unit/basic.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -345,12 +345,9 @@ describe("ov basic tests.", () => {
});

it("Test importModelSync(stream, device, config: unsupported property) \
throws", () => {
no exception core property not passed to device", () => {
const tmpDir = "/tmp";
assert.throws(
() => core.importModelSync(userStream, "CPU", { CACHE_DIR: tmpDir }),
/Unsupported property CACHE_DIR by CPU plugin./,
);
core.importModelSync(userStream, "CPU", { CACHE_DIR: tmpDir });
});

it("importModel returns promise with CompiledModel", async () => {
Expand Down
124 changes: 25 additions & 99 deletions src/inference/src/dev/core_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -401,8 +401,7 @@ ov::Parsed ov::parseDeviceNameIntoConfig(const std::string& deviceName,

// remove core properties for HW devices
if (!ov::is_virtual_device(parsed._deviceName)) {
// note: ov::cache_dir kept as plugin may require it
CoreConfig::remove_core_skip_cache_dir(parsed._config);
CoreConfig::remove_core(parsed._config);
}
return parsed;
}
Expand Down Expand Up @@ -756,21 +755,6 @@ ov::Plugin ov::CoreImpl::get_plugin(const std::string& pluginName) const {
}
}
#endif
// TODO: remove this block of code once GPU removes support of ov::cache_dir
// also, remove device_supports_cache_dir at all
{
OPENVINO_SUPPRESS_DEPRECATED_START
if (device_supports_cache_dir(plugin)) {
auto cacheConfig = coreConfig.get_cache_config_for_device(plugin);
if (cacheConfig._cacheManager) {
desc.defaultConfig[ov::cache_dir.name()] = cacheConfig._cacheDir;
}
} else if (desc.defaultConfig.count(ov::cache_dir.name()) > 0) {
// Remove "CACHE_DIR" from config if it is not supported by plugin
desc.defaultConfig.erase(ov::cache_dir.name());
}
OPENVINO_SUPPRESS_DEPRECATED_END
}

allowNotImplemented([&]() {
// Add device specific value to support device_name.device_id cases
Expand Down Expand Up @@ -844,8 +828,7 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model(const std::shared_ptr<
config_with_batch,
is_proxy_device(patched_device_name));
auto plugin = get_plugin(parsed._deviceName);
// will consume ov::cache_dir if plugin not support it
const auto cache_manager = parsed._core_config.get_cache_config_for_device(plugin, parsed._config)._cacheManager;
const auto cache_manager = parsed._core_config.get_cache_config_for_device(plugin, config)._cacheManager;
auto res = import_compiled_model(plugin, {}, parsed._config, model);
// Skip caching for proxy plugin. HW plugin will load network from the cache
if (res) {
Expand Down Expand Up @@ -890,8 +873,7 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model(const std::shared_ptr<

auto parsed = parseDeviceNameIntoConfig(device_name, coreConfig, config_with_batch, is_proxy_device(device_name));
auto plugin = get_plugin(parsed._deviceName);
// will consume ov::cache_dir if plugin not support it
const auto cache_manager = parsed._core_config.get_cache_config_for_device(plugin, parsed._config)._cacheManager;
const auto cache_manager = parsed._core_config.get_cache_config_for_device(plugin, config)._cacheManager;
auto res = import_compiled_model(plugin, context, parsed._config, model);
// Skip caching for proxy plugin. HW plugin will load network from the cache
if (res) {
Expand Down Expand Up @@ -919,15 +901,14 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model(const std::string& mod
auto parsed = parse_device_config(device_name, coreConfig, config, false);
// in case of compile_model(file_name), we need to clear-up core-level properties
auto plugin = get_plugin(parsed._deviceName);
// will consume ov::cache_dir if plugin not support it
const auto cache_manager = parsed._core_config.get_cache_config_for_device(plugin, parsed._config)._cacheManager;
const auto cache_manager = parsed._core_config.get_cache_config_for_device(plugin, config)._cacheManager;
auto compiled_model = import_compiled_model(plugin, {}, parsed._config, model_path);

if (compiled_model) {
// hint::compiled_blob is set and imported skip compilation
} else if (cache_manager && device_supports_model_caching(plugin, parsed._config) && !is_proxy_device(plugin)) {
// Skip caching for proxy plugin. HW plugin will load network from the cache
CoreConfig::remove_core_skip_cache_dir(parsed._config);
CoreConfig::remove_core(parsed._config);
CacheContent cache_content{cache_manager, parsed._core_config.get_enable_mmap(), model_path};
cache_content.blobId = ov::ModelCache::compute_hash(model_path, create_compile_config(plugin, parsed._config));
const auto lock = cacheGuard.get_hash_lock(cache_content.blobId);
Expand All @@ -949,8 +930,7 @@ ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model(const std::string& mod
OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "Core::compile_model::from_memory");
auto parsed = parseDeviceNameIntoConfig(device_name, coreConfig, config);
auto plugin = get_plugin(parsed._deviceName);
// will consume ov::cache_dir if plugin not support it
const auto cache_manager = parsed._core_config.get_cache_config_for_device(plugin, parsed._config)._cacheManager;
const auto cache_manager = parsed._core_config.get_cache_config_for_device(plugin, config)._cacheManager;
auto compiled_model = import_compiled_model(plugin, {}, parsed._config);
// Skip caching for proxy plugin. HW plugin will load network from the cache
if (compiled_model) {
Expand Down Expand Up @@ -1366,34 +1346,7 @@ void ov::CoreImpl::set_property_for_device(const ov::AnyMap& configMap, const st
std::lock_guard<std::mutex> lock(get_mutex());
created_plugins.reserve(plugins.size());

// TODO: keep only:
// coreConfig.set_and_update(config);
// once GPU remove support of ov::cache_dir
// CoreConfg::set_and_update will drop CACHE_DIR from config map
// and updates core config with new ov::cache_dir
if (deviceName.empty()) {
coreConfig.set_and_update(config);
} else {
OPENVINO_SUPPRESS_DEPRECATED_START
auto cache_it = config.find(ov::cache_dir.name());
if (cache_it != config.end()) {
coreConfig.set_cache_dir_for_device((cache_it->second).as<std::string>(), clearDeviceName);
config.erase(cache_it);
}
OPENVINO_SUPPRESS_DEPRECATED_END
// apply and remove core properties
auto it = config.find(ov::force_tbb_terminate.name());
if (it != config.end()) {
auto flag = it->second.as<bool>();
ov::threading::executor_manager()->set_property({{it->first, flag}});
config.erase(it);
}

it = config.find(ov::enable_mmap.name());
if (it != config.end()) {
config.erase(it);
}
}
coreConfig.set_and_update(config, clearDeviceName);

if (!config.empty()) {
auto base_desc = pluginRegistry.find(clearDeviceName);
Expand Down Expand Up @@ -1430,17 +1383,6 @@ void ov::CoreImpl::set_property_for_device(const ov::AnyMap& configMap, const st
allowNotImplemented([&]() {
std::lock_guard<std::mutex> lock(get_mutex(plugin.first));
auto configCopy = config;
// TODO: remove once GPU remove explicit support of ov::cache_dir
{
OPENVINO_SUPPRESS_DEPRECATED_START
if (device_supports_cache_dir(plugin.second)) {
configCopy[ov::cache_dir.name()] = coreConfig.get_cache_config_for_device(plugin.second)._cacheDir;
} else if (configCopy.count(ov::cache_dir.name()) > 0) {
// Remove "CACHE_DIR" from config if it is not supported by plugin
configCopy.erase(ov::cache_dir.name());
}
OPENVINO_SUPPRESS_DEPRECATED_END
}
// Add device specific value to support device_name.device_id cases
{
if (!parser.get_device_id().empty()) {
Expand Down Expand Up @@ -1513,14 +1455,6 @@ bool ov::CoreImpl::device_supports_model_caching(const ov::Plugin& plugin, const
: plugin.supports_model_caching();
}

bool ov::CoreImpl::device_supports_cache_dir(const ov::Plugin& plugin) const {
try {
return util::contains(plugin.get_property(ov::supported_properties), ov::cache_dir);
} catch (const ov::NotImplemented&) {
return false;
}
}

ov::SoPtr<ov::ICompiledModel> ov::CoreImpl::compile_model_and_cache(ov::Plugin& plugin,
const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& parsedConfig,
Expand Down Expand Up @@ -1723,10 +1657,11 @@ void ov::CoreConfig::set(const ov::AnyMap& config) {
if (it != config.end()) {
std::lock_guard<std::mutex> lock(_cacheConfigMutex);
// fill global cache config
_cacheConfig = CoreConfig::CacheConfig::create(it->second.as<std::string>());
const auto& cache_dir = it->second.as<std::string>();
_cacheConfig = CoreConfig::CacheConfig::create(cache_dir);
// sets cache config per-device if it's not set explicitly before
for (auto& deviceCfg : _cacheConfigPerDevice) {
deviceCfg.second = CoreConfig::CacheConfig::create(it->second.as<std::string>());
deviceCfg.second = CoreConfig::CacheConfig::create(cache_dir);
}
}

Expand All @@ -1743,8 +1678,17 @@ void ov::CoreConfig::set(const ov::AnyMap& config) {
}
}

void ov::CoreConfig::set_and_update(ov::AnyMap& config) {
set(config);
void ov::CoreConfig::set(const ov::AnyMap& config, const std::string& device_name) {
if (device_name.empty()) {
set(config);
} else if (const auto cache_dir_entry = config.find(ov::cache_dir.name()); cache_dir_entry != config.end()) {
std::lock_guard<std::mutex> lock(_cacheConfigMutex);
_cacheConfigPerDevice[device_name] = CoreConfig::CacheConfig::create(cache_dir_entry->second.as<std::string>());
}
}

void ov::CoreConfig::set_and_update(ov::AnyMap& config, const std::string& device_name) {
set(config, device_name);
remove_core(config);
}

Expand All @@ -1754,17 +1698,6 @@ void ov::CoreConfig::remove_core(ov::AnyMap& config) {
}
}

void ov::CoreConfig::remove_core_skip_cache_dir(ov::AnyMap& config) {
for (const auto& name : {ov::enable_mmap.name(), ov::force_tbb_terminate.name(), ov::cache_model_path.name()}) {
config.erase(name);
}
}

void ov::CoreConfig::set_cache_dir_for_device(const std::string& dir, const std::string& name) {
std::lock_guard<std::mutex> lock(_cacheConfigMutex);
_cacheConfigPerDevice[name] = CoreConfig::CacheConfig::create(dir);
}

std::string ov::CoreConfig::get_cache_dir() const {
std::lock_guard<std::mutex> lock(_cacheConfigMutex);
return _cacheConfig._cacheDir;
Expand All @@ -1777,18 +1710,11 @@ bool ov::CoreConfig::get_enable_mmap() const {
// Creating thread-safe copy of config including shared_ptr to ICacheManager
// Passing empty or not-existing name will return global cache config
ov::CoreConfig::CacheConfig ov::CoreConfig::get_cache_config_for_device(const ov::Plugin& plugin,
ov::AnyMap& parsedConfig) const {
const ov::AnyMap& config) const {
// cache_dir is enabled locally in compile_model only
if (parsedConfig.count(ov::cache_dir.name())) {
const auto& cache_dir_val = parsedConfig.at(ov::cache_dir.name()).as<std::string>();
const auto& tempConfig = CoreConfig::CacheConfig::create(cache_dir_val);
// if plugin does not explicitly support cache_dir, and if plugin is not virtual, we need to remove
// it from config
if (!util::contains(plugin.get_property(ov::supported_properties), ov::cache_dir) &&
!ov::is_virtual_device(plugin.get_name())) {
parsedConfig.erase(ov::cache_dir.name());
}
return tempConfig;
if (config.count(ov::cache_dir.name())) {
const auto& cache_dir = config.at(ov::cache_dir.name()).as<std::string>();
return CoreConfig::CacheConfig::create(cache_dir);
} else { // cache_dir is set to Core globally or for the specific device
return get_cache_config_for_device(plugin);
}
Expand Down
15 changes: 5 additions & 10 deletions src/inference/src/dev/core_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,28 +36,26 @@ class CoreConfig final {
};

void set(const ov::AnyMap& config);
void set(const ov::AnyMap& config, const std::string& device_name);

/**
* @brief Removes core-level properties from config and triggers new state for core config
* @param config - config to be updated
* @param config config to be updated
* @param device_name device name for which config is applied (empty is for core-level)
*/
void set_and_update(ov::AnyMap& config);

OPENVINO_DEPRECATED("Don't use this method, it will be removed soon")
void set_cache_dir_for_device(const std::string& dir, const std::string& name);
void set_and_update(ov::AnyMap& config, const std::string& device_name);

std::string get_cache_dir() const;

bool get_enable_mmap() const;

CacheConfig get_cache_config_for_device(const ov::Plugin& plugin, ov::AnyMap& parsedConfig) const;
CacheConfig get_cache_config_for_device(const ov::Plugin& plugin, const ov::AnyMap& config) const;

// Creating thread-safe copy of global config including shared_ptr to ICacheManager
CacheConfig get_cache_config_for_device(const ov::Plugin& plugin) const;

// remove core properties
static void remove_core(ov::AnyMap& config);
static void remove_core_skip_cache_dir(ov::AnyMap& config);

private:
mutable std::mutex _cacheConfigMutex;
Expand Down Expand Up @@ -222,9 +220,6 @@ class CoreImpl : public ov::ICore, public std::enable_shared_from_this<ov::ICore
bool device_supports_property(const ov::Plugin& plugin, const ov::PropertyName& key) const;
bool device_supports_internal_property(const ov::Plugin& plugin, const ov::PropertyName& key) const;

OPENVINO_DEPRECATED("Don't use this method, it will be removed soon")
bool device_supports_cache_dir(const ov::Plugin& plugin) const;

ov::AnyMap create_compile_config(const ov::Plugin& plugin, const ov::AnyMap& origConfig) const;
ov::AnyMap create_compile_config(const std::string& device_name, const ov::AnyMap& origConfig) const override {
return create_compile_config(get_plugin(device_name), origConfig);
Expand Down
4 changes: 3 additions & 1 deletion src/inference/src/dev/iplugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,9 @@ std::shared_ptr<ov::ICompiledModel> ov::IPlugin::compile_model(const std::string
OPENVINO_ASSERT(core);
const auto model = core->read_model(model_path, {}, properties);
auto local_properties = properties;
CoreConfig::remove_core_skip_cache_dir(local_properties);
if (!ov::is_virtual_device(get_device_name())) {
CoreConfig::remove_core(local_properties);
}
return compile_model(model, local_properties);
}

Expand Down
Loading
Loading