diff --git a/base/base/defines.h b/base/base/defines.h index 91c35dc28b60..13001f1f79c5 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -155,6 +155,7 @@ # define TSA_ACQUIRE_SHARED(...) __attribute__((acquire_shared_capability(__VA_ARGS__))) /// function acquires a shared capability, but does not release it # define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure # define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability +# define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability /// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function) /// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of @@ -182,6 +183,7 @@ # define TSA_ACQUIRE_SHARED(...) # define TSA_TRY_ACQUIRE_SHARED(...) # define TSA_RELEASE_SHARED(...) +# define TSA_SCOPED_LOCKABLE(...) # define TSA_SUPPRESS_WARNING_FOR_READ(x) (x) # define TSA_SUPPRESS_WARNING_FOR_WRITE(x) (x) diff --git a/base/poco/Foundation/include/Poco/Logger.h b/base/poco/Foundation/include/Poco/Logger.h index ffe3766dfec7..cf2027186625 100644 --- a/base/poco/Foundation/include/Poco/Logger.h +++ b/base/poco/Foundation/include/Poco/Logger.h @@ -33,7 +33,8 @@ namespace Poco class Exception; - +class Logger; +using LoggerPtr = std::shared_ptr; class Foundation_API Logger : public Channel /// Logger is a special Channel that acts as the main @@ -870,6 +871,11 @@ class Foundation_API Logger : public Channel /// If the Logger does not yet exist, it is created, based /// on its parent logger. + static LoggerPtr getShared(const std::string & name); + /// Returns a shared pointer to the Logger with the given name. + /// If the Logger does not yet exist, it is created, based + /// on its parent logger. + static Logger & unsafeGet(const std::string & name); /// Returns a reference to the Logger with the given name. /// If the Logger does not yet exist, it is created, based @@ -885,6 +891,11 @@ class Foundation_API Logger : public Channel /// given name. The Logger's Channel and log level as set as /// specified. + static LoggerPtr createShared(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION); + /// Creates and returns a shared pointer to a Logger with the + /// given name. The Logger's Channel and log level as set as + /// specified. + static Logger & root(); /// Returns a reference to the root logger, which is the ultimate /// ancestor of all Loggers. @@ -893,13 +904,6 @@ class Foundation_API Logger : public Channel /// Returns a pointer to the Logger with the given name if it /// exists, or a null pointer otherwise. - static void destroy(const std::string & name); - /// Destroys the logger with the specified name. Does nothing - /// if the logger is not found. - /// - /// After a logger has been destroyed, all references to it - /// become invalid. - static void shutdown(); /// Shuts down the logging framework and releases all /// Loggers. @@ -929,8 +933,6 @@ class Foundation_API Logger : public Channel static const std::string ROOT; /// The name of the root logger (""). protected: - typedef std::map LoggerMap; - Logger(const std::string & name, Channel * pChannel, int level); ~Logger(); @@ -938,6 +940,7 @@ class Foundation_API Logger : public Channel void log(const std::string & text, Message::Priority prio, const char * file, int line); static std::string format(const std::string & fmt, int argc, std::string argv[]); + static Logger & unsafeCreate(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION); static Logger & parent(const std::string & name); static void add(Logger * pLogger); static Logger * find(const std::string & name); @@ -950,9 +953,6 @@ class Foundation_API Logger : public Channel std::string _name; Channel * _pChannel; std::atomic_int _level; - - static LoggerMap * _pLoggerMap; - static Mutex _mapMtx; }; diff --git a/base/poco/Foundation/include/Poco/RefCountedObject.h b/base/poco/Foundation/include/Poco/RefCountedObject.h index 4ad32e30cad9..db966089e006 100644 --- a/base/poco/Foundation/include/Poco/RefCountedObject.h +++ b/base/poco/Foundation/include/Poco/RefCountedObject.h @@ -38,15 +38,15 @@ class Foundation_API RefCountedObject /// Creates the RefCountedObject. /// The initial reference count is one. - void duplicate() const; - /// Increments the object's reference count. + size_t duplicate() const; + /// Increments the object's reference count, returns reference count before call. - void release() const throw(); + size_t release() const throw(); /// Decrements the object's reference count /// and deletes the object if the count - /// reaches zero. + /// reaches zero, returns reference count before call. - int referenceCount() const; + size_t referenceCount() const; /// Returns the reference count. protected: @@ -57,36 +57,40 @@ class Foundation_API RefCountedObject RefCountedObject(const RefCountedObject &); RefCountedObject & operator=(const RefCountedObject &); - mutable AtomicCounter _counter; + mutable std::atomic _counter; }; // // inlines // -inline int RefCountedObject::referenceCount() const +inline size_t RefCountedObject::referenceCount() const { - return _counter.value(); + return _counter.load(std::memory_order_acquire); } -inline void RefCountedObject::duplicate() const +inline size_t RefCountedObject::duplicate() const { - ++_counter; + return _counter.fetch_add(1, std::memory_order_acq_rel); } -inline void RefCountedObject::release() const throw() +inline size_t RefCountedObject::release() const throw() { + size_t reference_count_before = _counter.fetch_sub(1, std::memory_order_acq_rel); + try { - if (--_counter == 0) + if (reference_count_before == 1) delete this; } catch (...) { poco_unexpected(); } + + return reference_count_before; } diff --git a/base/poco/Foundation/src/Logger.cpp b/base/poco/Foundation/src/Logger.cpp index 3d5de585b4f9..cfc063c89791 100644 --- a/base/poco/Foundation/src/Logger.cpp +++ b/base/poco/Foundation/src/Logger.cpp @@ -20,12 +20,38 @@ #include "Poco/NumberParser.h" #include "Poco/String.h" +#include +#include + +namespace +{ + +std::mutex & getLoggerMutex() +{ + auto get_logger_mutex_placeholder_memory = []() + { + static char buffer[sizeof(std::mutex)]{}; + return buffer; + }; + + static std::mutex * logger_mutex = new (get_logger_mutex_placeholder_memory()) std::mutex(); + return *logger_mutex; +} + +struct LoggerEntry +{ + Poco::Logger * logger; + bool owned_by_shared_ptr = false; +}; + +using LoggerMap = std::unordered_map; +LoggerMap * _pLoggerMap = nullptr; + +} namespace Poco { -Logger::LoggerMap* Logger::_pLoggerMap = 0; -Mutex Logger::_mapMtx; const std::string Logger::ROOT; @@ -73,7 +99,7 @@ void Logger::setProperty(const std::string& name, const std::string& value) setChannel(LoggingRegistry::defaultRegistry().channelForName(value)); else if (name == "level") setLevel(value); - else + else Channel::setProperty(name, value); } @@ -112,17 +138,17 @@ void Logger::dump(const std::string& msg, const void* buffer, std::size_t length void Logger::setLevel(const std::string& name, int level) { - Mutex::ScopedLock lock(_mapMtx); + std::lock_guard lock(getLoggerMutex()); if (_pLoggerMap) { std::string::size_type len = name.length(); - for (LoggerMap::iterator it = _pLoggerMap->begin(); it != _pLoggerMap->end(); ++it) + for (auto & it : *_pLoggerMap) { - if (len == 0 || - (it->first.compare(0, len, name) == 0 && (it->first.length() == len || it->first[len] == '.'))) + if (len == 0 || + (it.first.compare(0, len, name) == 0 && (it.first.length() == len || it.first[len] == '.'))) { - it->second->setLevel(level); + it.second.logger->setLevel(level); } } } @@ -131,17 +157,17 @@ void Logger::setLevel(const std::string& name, int level) void Logger::setChannel(const std::string& name, Channel* pChannel) { - Mutex::ScopedLock lock(_mapMtx); + std::lock_guard lock(getLoggerMutex()); if (_pLoggerMap) { std::string::size_type len = name.length(); - for (LoggerMap::iterator it = _pLoggerMap->begin(); it != _pLoggerMap->end(); ++it) + for (auto & it : *_pLoggerMap) { if (len == 0 || - (it->first.compare(0, len, name) == 0 && (it->first.length() == len || it->first[len] == '.'))) + (it.first.compare(0, len, name) == 0 && (it.first.length() == len || it.first[len] == '.'))) { - it->second->setChannel(pChannel); + it.second.logger->setChannel(pChannel); } } } @@ -150,17 +176,17 @@ void Logger::setChannel(const std::string& name, Channel* pChannel) void Logger::setProperty(const std::string& loggerName, const std::string& propertyName, const std::string& value) { - Mutex::ScopedLock lock(_mapMtx); + std::lock_guard lock(getLoggerMutex()); if (_pLoggerMap) { std::string::size_type len = loggerName.length(); - for (LoggerMap::iterator it = _pLoggerMap->begin(); it != _pLoggerMap->end(); ++it) + for (auto & it : *_pLoggerMap) { if (len == 0 || - (it->first.compare(0, len, loggerName) == 0 && (it->first.length() == len || it->first[len] == '.'))) + (it.first.compare(0, len, loggerName) == 0 && (it.first.length() == len || it.first[len] == '.'))) { - it->second->setProperty(propertyName, value); + it.second.logger->setProperty(propertyName, value); } } } @@ -280,11 +306,88 @@ void Logger::formatDump(std::string& message, const void* buffer, std::size_t le } +namespace +{ + +struct LoggerDeleter +{ + void operator()(Poco::Logger * logger) + { + std::lock_guard lock(getLoggerMutex()); + + /// If logger infrastructure is destroyed just decrement logger reference count + if (!_pLoggerMap) + { + logger->release(); + return; + } + + auto it = _pLoggerMap->find(logger->name()); + assert(it != _pLoggerMap->end()); + + /** If reference count is 1, this means this shared pointer owns logger + * and need destroy it. + */ + size_t reference_count_before_release = logger->release(); + if (reference_count_before_release == 1) + { + assert(it->second.owned_by_shared_ptr); + _pLoggerMap->erase(it); + } + } +}; + + +inline LoggerPtr makeLoggerPtr(Logger & logger) +{ + return std::shared_ptr(&logger, LoggerDeleter()); +} + +} + + Logger& Logger::get(const std::string& name) { - Mutex::ScopedLock lock(_mapMtx); + std::lock_guard lock(getLoggerMutex()); + + Logger & logger = unsafeGet(name); + + /** If there are already shared pointer created for this logger + * we need to increment Logger reference count and now logger + * is owned by logger infrastructure. + */ + auto it = _pLoggerMap->find(name); + if (it->second.owned_by_shared_ptr) + { + it->second.logger->duplicate(); + it->second.owned_by_shared_ptr = false; + } + + return logger; +} + + +LoggerPtr Logger::getShared(const std::string & name) +{ + std::lock_guard lock(getLoggerMutex()); + bool logger_exists = _pLoggerMap && _pLoggerMap->contains(name); + + Logger & logger = unsafeGet(name); + + /** If logger already exists, then this shared pointer does not own it. + * If logger does not exists, logger infrastructure could be already destroyed + * or logger was created. + */ + if (logger_exists) + { + logger.duplicate(); + } + else if (_pLoggerMap) + { + _pLoggerMap->find(name)->second.owned_by_shared_ptr = true; + } - return unsafeGet(name); + return makeLoggerPtr(logger); } @@ -310,18 +413,24 @@ Logger& Logger::unsafeGet(const std::string& name) Logger& Logger::create(const std::string& name, Channel* pChannel, int level) { - Mutex::ScopedLock lock(_mapMtx); + std::lock_guard lock(getLoggerMutex()); - if (find(name)) throw ExistsException(); - Logger* pLogger = new Logger(name, pChannel, level); - add(pLogger); - return *pLogger; + return unsafeCreate(name, pChannel, level); } +LoggerPtr Logger::createShared(const std::string & name, Channel * pChannel, int level) +{ + std::lock_guard lock(getLoggerMutex()); + + Logger & logger = unsafeCreate(name, pChannel, level); + _pLoggerMap->find(name)->second.owned_by_shared_ptr = true; + + return makeLoggerPtr(logger); +} Logger& Logger::root() { - Mutex::ScopedLock lock(_mapMtx); + std::lock_guard lock(getLoggerMutex()); return unsafeGet(ROOT); } @@ -329,7 +438,7 @@ Logger& Logger::root() Logger* Logger::has(const std::string& name) { - Mutex::ScopedLock lock(_mapMtx); + std::lock_guard lock(getLoggerMutex()); return find(name); } @@ -337,14 +446,18 @@ Logger* Logger::has(const std::string& name) void Logger::shutdown() { - Mutex::ScopedLock lock(_mapMtx); + std::lock_guard lock(getLoggerMutex()); if (_pLoggerMap) { - for (LoggerMap::iterator it = _pLoggerMap->begin(); it != _pLoggerMap->end(); ++it) + for (auto & it : *_pLoggerMap) { - it->second->release(); + if (it.second.owned_by_shared_ptr) + continue; + + it.second.logger->release(); } + delete _pLoggerMap; _pLoggerMap = 0; } @@ -357,31 +470,15 @@ Logger* Logger::find(const std::string& name) { LoggerMap::iterator it = _pLoggerMap->find(name); if (it != _pLoggerMap->end()) - return it->second; + return it->second.logger; } return 0; } -void Logger::destroy(const std::string& name) -{ - Mutex::ScopedLock lock(_mapMtx); - - if (_pLoggerMap) - { - LoggerMap::iterator it = _pLoggerMap->find(name); - if (it != _pLoggerMap->end()) - { - it->second->release(); - _pLoggerMap->erase(it); - } - } -} - - void Logger::names(std::vector& names) { - Mutex::ScopedLock lock(_mapMtx); + std::lock_guard lock(getLoggerMutex()); names.clear(); if (_pLoggerMap) @@ -393,6 +490,14 @@ void Logger::names(std::vector& names) } } +Logger& Logger::unsafeCreate(const std::string & name, Channel * pChannel, int level) +{ + if (find(name)) throw ExistsException(); + Logger* pLogger = new Logger(name, pChannel, level); + add(pLogger); + + return *pLogger; +} Logger& Logger::parent(const std::string& name) { @@ -478,7 +583,8 @@ void Logger::add(Logger* pLogger) { if (!_pLoggerMap) _pLoggerMap = new LoggerMap; - _pLoggerMap->insert(LoggerMap::value_type(pLogger->name(), pLogger)); + + _pLoggerMap->emplace(pLogger->name(), LoggerEntry{pLogger, false /*owned_by_shared_ptr*/}); } diff --git a/programs/copier/ClusterCopier.h b/programs/copier/ClusterCopier.h index 063b13e90780..01f8b30f5463 100644 --- a/programs/copier/ClusterCopier.h +++ b/programs/copier/ClusterCopier.h @@ -20,7 +20,7 @@ class ClusterCopier : WithMutableContext const String & host_id_, const String & proxy_database_name_, ContextMutablePtr context_, - Poco::Logger * log_) + LoggerRawPtr log_) : WithMutableContext(context_), task_zookeeper_path(task_path_), host_id(host_id_), @@ -230,7 +230,7 @@ class ClusterCopier : WithMutableContext bool experimental_use_sample_offset{false}; - Poco::Logger * log; + LoggerRawPtr log; UInt64 max_table_tries = 3; UInt64 max_shard_partition_tries = 3; diff --git a/programs/copier/ZooKeeperStaff.h b/programs/copier/ZooKeeperStaff.h index 3d4a11186e34..d51848811c62 100644 --- a/programs/copier/ZooKeeperStaff.h +++ b/programs/copier/ZooKeeperStaff.h @@ -177,7 +177,7 @@ class CleanStateClock auto watch_callback = [stale = stale] (const Coordination::WatchResponse & rsp) { - auto logger = &Poco::Logger::get("ClusterCopier"); + auto logger = getLogger("ClusterCopier"); if (rsp.error == Coordination::Error::ZOK) { switch (rsp.type) diff --git a/programs/keeper-converter/KeeperConverter.cpp b/programs/keeper-converter/KeeperConverter.cpp index 7d25c1d50176..ea5a2d3c56a4 100644 --- a/programs/keeper-converter/KeeperConverter.cpp +++ b/programs/keeper-converter/KeeperConverter.cpp @@ -27,7 +27,7 @@ int mainEntryClickHouseKeeperConverter(int argc, char ** argv) po::store(po::command_line_parser(argc, argv).options(desc).run(), options); Poco::AutoPtr console_channel(new Poco::ConsoleChannel); - Poco::Logger * logger = &Poco::Logger::get("KeeperConverter"); + LoggerPtr logger = getLogger("KeeperConverter"); logger->setChannel(console_channel); if (options.count("help")) diff --git a/programs/library-bridge/CatBoostLibraryHandlerFactory.cpp b/programs/library-bridge/CatBoostLibraryHandlerFactory.cpp index 6ee078f6c5cf..7ce896636e70 100644 --- a/programs/library-bridge/CatBoostLibraryHandlerFactory.cpp +++ b/programs/library-bridge/CatBoostLibraryHandlerFactory.cpp @@ -13,7 +13,7 @@ CatBoostLibraryHandlerFactory & CatBoostLibraryHandlerFactory::instance() } CatBoostLibraryHandlerFactory::CatBoostLibraryHandlerFactory() - : log(&Poco::Logger::get("CatBoostLibraryHandlerFactory")) + : log(getLogger("CatBoostLibraryHandlerFactory")) { } diff --git a/programs/library-bridge/CatBoostLibraryHandlerFactory.h b/programs/library-bridge/CatBoostLibraryHandlerFactory.h index 6ba3fe84ec9e..e29834cbe791 100644 --- a/programs/library-bridge/CatBoostLibraryHandlerFactory.h +++ b/programs/library-bridge/CatBoostLibraryHandlerFactory.h @@ -31,7 +31,7 @@ class CatBoostLibraryHandlerFactory final : private boost::noncopyable /// map: model path --> catboost library handler std::unordered_map library_handlers TSA_GUARDED_BY(mutex); std::mutex mutex; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/programs/library-bridge/ExternalDictionaryLibraryAPI.cpp b/programs/library-bridge/ExternalDictionaryLibraryAPI.cpp index 70cd6fca3751..4fa5c991f0f8 100644 --- a/programs/library-bridge/ExternalDictionaryLibraryAPI.cpp +++ b/programs/library-bridge/ExternalDictionaryLibraryAPI.cpp @@ -9,40 +9,40 @@ const char DICT_LOGGER_NAME[] = "LibraryDictionarySourceExternal"; void ExternalDictionaryLibraryAPI::log(LogLevel level, CString msg) { - auto & logger = Poco::Logger::get(DICT_LOGGER_NAME); + auto logger = getLogger(DICT_LOGGER_NAME); switch (level) { case LogLevel::TRACE: - if (logger.trace()) - logger.trace(msg); + if (logger->trace()) + logger->trace(msg); break; case LogLevel::DEBUG: - if (logger.debug()) - logger.debug(msg); + if (logger->debug()) + logger->debug(msg); break; case LogLevel::INFORMATION: - if (logger.information()) - logger.information(msg); + if (logger->information()) + logger->information(msg); break; case LogLevel::NOTICE: - if (logger.notice()) - logger.notice(msg); + if (logger->notice()) + logger->notice(msg); break; case LogLevel::WARNING: - if (logger.warning()) - logger.warning(msg); + if (logger->warning()) + logger->warning(msg); break; case LogLevel::ERROR: - if (logger.error()) - logger.error(msg); + if (logger->error()) + logger->error(msg); break; case LogLevel::CRITICAL: - if (logger.critical()) - logger.critical(msg); + if (logger->critical()) + logger->critical(msg); break; case LogLevel::FATAL: - if (logger.fatal()) - logger.fatal(msg); + if (logger->fatal()) + logger->fatal(msg); break; } } diff --git a/programs/library-bridge/ExternalDictionaryLibraryHandlerFactory.cpp b/programs/library-bridge/ExternalDictionaryLibraryHandlerFactory.cpp index 6acd9af20ed8..1b2b57beeb11 100644 --- a/programs/library-bridge/ExternalDictionaryLibraryHandlerFactory.cpp +++ b/programs/library-bridge/ExternalDictionaryLibraryHandlerFactory.cpp @@ -26,7 +26,7 @@ void ExternalDictionaryLibraryHandlerFactory::create( if (library_handlers.contains(dictionary_id)) { - LOG_WARNING(&Poco::Logger::get("ExternalDictionaryLibraryHandlerFactory"), "Library handler with dictionary id {} already exists", dictionary_id); + LOG_WARNING(getLogger("ExternalDictionaryLibraryHandlerFactory"), "Library handler with dictionary id {} already exists", dictionary_id); return; } diff --git a/programs/library-bridge/LibraryBridgeHandlerFactory.cpp b/programs/library-bridge/LibraryBridgeHandlerFactory.cpp index 4af1f8355e80..e5ab22f2d40d 100644 --- a/programs/library-bridge/LibraryBridgeHandlerFactory.cpp +++ b/programs/library-bridge/LibraryBridgeHandlerFactory.cpp @@ -12,7 +12,7 @@ LibraryBridgeHandlerFactory::LibraryBridgeHandlerFactory( size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) - , log(&Poco::Logger::get(name_)) + , log(getLogger(name_)) , name(name_) , keep_alive_timeout(keep_alive_timeout_) { diff --git a/programs/library-bridge/LibraryBridgeHandlerFactory.h b/programs/library-bridge/LibraryBridgeHandlerFactory.h index 7565052c4cbe..5b0f088bc296 100644 --- a/programs/library-bridge/LibraryBridgeHandlerFactory.h +++ b/programs/library-bridge/LibraryBridgeHandlerFactory.h @@ -19,7 +19,7 @@ class LibraryBridgeHandlerFactory : public HTTPRequestHandlerFactory, WithContex std::unique_ptr createRequestHandler(const HTTPServerRequest & request) override; private: - Poco::Logger * log; + LoggerPtr log; const std::string name; const size_t keep_alive_timeout; }; diff --git a/programs/library-bridge/LibraryBridgeHandlers.cpp b/programs/library-bridge/LibraryBridgeHandlers.cpp index 9642dd7ee634..b820f2467664 100644 --- a/programs/library-bridge/LibraryBridgeHandlers.cpp +++ b/programs/library-bridge/LibraryBridgeHandlers.cpp @@ -46,7 +46,7 @@ namespace if (!response.sent()) *response.send() << message << std::endl; - LOG_WARNING(&Poco::Logger::get("LibraryBridge"), fmt::runtime(message)); + LOG_WARNING(getLogger("LibraryBridge"), fmt::runtime(message)); } std::shared_ptr parseColumns(String && column_string) @@ -91,7 +91,7 @@ static void writeData(Block data, OutputFormatPtr format) ExternalDictionaryLibraryBridgeRequestHandler::ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) , keep_alive_timeout(keep_alive_timeout_) - , log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeRequestHandler")) + , log(getLogger("ExternalDictionaryLibraryBridgeRequestHandler")) { } @@ -379,7 +379,7 @@ void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequ ExternalDictionaryLibraryBridgeExistsHandler::ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) , keep_alive_timeout(keep_alive_timeout_) - , log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeExistsHandler")) + , log(getLogger("ExternalDictionaryLibraryBridgeExistsHandler")) { } @@ -418,7 +418,7 @@ CatBoostLibraryBridgeRequestHandler::CatBoostLibraryBridgeRequestHandler( size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) , keep_alive_timeout(keep_alive_timeout_) - , log(&Poco::Logger::get("CatBoostLibraryBridgeRequestHandler")) + , log(getLogger("CatBoostLibraryBridgeRequestHandler")) { } @@ -616,7 +616,7 @@ void CatBoostLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & requ CatBoostLibraryBridgeExistsHandler::CatBoostLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) , keep_alive_timeout(keep_alive_timeout_) - , log(&Poco::Logger::get("CatBoostLibraryBridgeExistsHandler")) + , log(getLogger("CatBoostLibraryBridgeExistsHandler")) { } diff --git a/programs/library-bridge/LibraryBridgeHandlers.h b/programs/library-bridge/LibraryBridgeHandlers.h index 16815e847232..fad8652693c7 100644 --- a/programs/library-bridge/LibraryBridgeHandlers.h +++ b/programs/library-bridge/LibraryBridgeHandlers.h @@ -26,7 +26,7 @@ class ExternalDictionaryLibraryBridgeRequestHandler : public HTTPRequestHandler, static constexpr inline auto FORMAT = "RowBinary"; const size_t keep_alive_timeout; - Poco::Logger * log; + LoggerPtr log; }; @@ -40,7 +40,7 @@ class ExternalDictionaryLibraryBridgeExistsHandler : public HTTPRequestHandler, private: const size_t keep_alive_timeout; - Poco::Logger * log; + LoggerPtr log; }; @@ -69,7 +69,7 @@ class CatBoostLibraryBridgeRequestHandler : public HTTPRequestHandler, WithConte private: const size_t keep_alive_timeout; - Poco::Logger * log; + LoggerPtr log; }; @@ -83,7 +83,7 @@ class CatBoostLibraryBridgeExistsHandler : public HTTPRequestHandler, WithContex private: const size_t keep_alive_timeout; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 5768e744f94e..d284963b7e11 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -172,7 +172,7 @@ void LocalServer::tryInitPath() { // The path is not provided explicitly - use a unique path in the system temporary directory // (or in the current dir if temporary don't exist) - Poco::Logger * log = &logger(); + LoggerRawPtr log = &logger(); std::filesystem::path parent_folder; std::filesystem::path default_path; @@ -567,7 +567,7 @@ void LocalServer::processConfig() tryInitPath(); - Poco::Logger * log = &logger(); + LoggerRawPtr log = &logger(); /// Maybe useless if (config().has("macros")) diff --git a/programs/odbc-bridge/ColumnInfoHandler.h b/programs/odbc-bridge/ColumnInfoHandler.h index 3ba8b182ba60..d57ef6dd73ae 100644 --- a/programs/odbc-bridge/ColumnInfoHandler.h +++ b/programs/odbc-bridge/ColumnInfoHandler.h @@ -18,7 +18,7 @@ class ODBCColumnsInfoHandler : public HTTPRequestHandler, WithContext public: ODBCColumnsInfoHandler(size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) - , log(&Poco::Logger::get("ODBCColumnsInfoHandler")) + , log(getLogger("ODBCColumnsInfoHandler")) , keep_alive_timeout(keep_alive_timeout_) { } @@ -26,7 +26,7 @@ class ODBCColumnsInfoHandler : public HTTPRequestHandler, WithContext void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; private: - Poco::Logger * log; + LoggerPtr log; size_t keep_alive_timeout; }; diff --git a/programs/odbc-bridge/IdentifierQuoteHandler.h b/programs/odbc-bridge/IdentifierQuoteHandler.h index d57bbc0ca8ad..9b2927eb6a07 100644 --- a/programs/odbc-bridge/IdentifierQuoteHandler.h +++ b/programs/odbc-bridge/IdentifierQuoteHandler.h @@ -16,7 +16,7 @@ class IdentifierQuoteHandler : public HTTPRequestHandler, WithContext public: IdentifierQuoteHandler(size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) - , log(&Poco::Logger::get("IdentifierQuoteHandler")) + , log(getLogger("IdentifierQuoteHandler")) , keep_alive_timeout(keep_alive_timeout_) { } @@ -24,7 +24,7 @@ class IdentifierQuoteHandler : public HTTPRequestHandler, WithContext void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; private: - Poco::Logger * log; + LoggerPtr log; size_t keep_alive_timeout; }; diff --git a/programs/odbc-bridge/MainHandler.h b/programs/odbc-bridge/MainHandler.h index bc0fca8b9a51..e2488b20901a 100644 --- a/programs/odbc-bridge/MainHandler.h +++ b/programs/odbc-bridge/MainHandler.h @@ -24,7 +24,7 @@ class ODBCHandler : public HTTPRequestHandler, WithContext ContextPtr context_, const String & mode_) : WithContext(context_) - , log(&Poco::Logger::get("ODBCHandler")) + , log(getLogger("ODBCHandler")) , keep_alive_timeout(keep_alive_timeout_) , mode(mode_) { @@ -33,7 +33,7 @@ class ODBCHandler : public HTTPRequestHandler, WithContext void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override; private: - Poco::Logger * log; + LoggerPtr log; size_t keep_alive_timeout; String mode; diff --git a/programs/odbc-bridge/ODBCBlockInputStream.cpp b/programs/odbc-bridge/ODBCBlockInputStream.cpp index 3aa3d9a652b6..c46144c3dc83 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockInputStream.cpp @@ -23,7 +23,7 @@ namespace ErrorCodes ODBCSource::ODBCSource( nanodbc::ConnectionHolderPtr connection_holder, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_) : ISource(sample_block) - , log(&Poco::Logger::get("ODBCSource")) + , log(getLogger("ODBCSource")) , max_block_size{max_block_size_} , query(query_str) { diff --git a/programs/odbc-bridge/ODBCBlockInputStream.h b/programs/odbc-bridge/ODBCBlockInputStream.h index 79d5816ad014..dedd98f930f5 100644 --- a/programs/odbc-bridge/ODBCBlockInputStream.h +++ b/programs/odbc-bridge/ODBCBlockInputStream.h @@ -30,7 +30,7 @@ class ODBCSource final : public ISource column.insertFrom(sample_column, 0); } - Poco::Logger * log; + LoggerPtr log; const UInt64 max_block_size; ExternalResultDescription description; diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.cpp b/programs/odbc-bridge/ODBCBlockOutputStream.cpp index eb5901ad3e1f..87c09d1e7571 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockOutputStream.cpp @@ -19,7 +19,7 @@ ODBCSink::ODBCSink( ContextPtr local_context_, IdentifierQuotingStyle quoting_) : ISink(sample_block_) - , log(&Poco::Logger::get("ODBCSink")) + , log(getLogger("ODBCSink")) , connection_holder(std::move(connection_holder_)) , db_name(remote_database_name_) , table_name(remote_table_name_) diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.h b/programs/odbc-bridge/ODBCBlockOutputStream.h index f5e7b4e3a2d5..06edce92e1a6 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.h +++ b/programs/odbc-bridge/ODBCBlockOutputStream.h @@ -30,7 +30,7 @@ using ValueType = ExternalResultDescription::ValueType; void consume(Chunk chunk) override; private: - Poco::Logger * log; + LoggerPtr log; nanodbc::ConnectionHolderPtr connection_holder; std::string db_name; diff --git a/programs/odbc-bridge/ODBCHandlerFactory.cpp b/programs/odbc-bridge/ODBCHandlerFactory.cpp index dd21358df8c3..eebb0c24c7a8 100644 --- a/programs/odbc-bridge/ODBCHandlerFactory.cpp +++ b/programs/odbc-bridge/ODBCHandlerFactory.cpp @@ -11,7 +11,7 @@ namespace DB ODBCBridgeHandlerFactory::ODBCBridgeHandlerFactory(const std::string & name_, size_t keep_alive_timeout_, ContextPtr context_) : WithContext(context_) - , log(&Poco::Logger::get(name_)) + , log(getLogger(name_)) , name(name_) , keep_alive_timeout(keep_alive_timeout_) { diff --git a/programs/odbc-bridge/ODBCHandlerFactory.h b/programs/odbc-bridge/ODBCHandlerFactory.h index 3e3da7c9f246..4aaf1b55453c 100644 --- a/programs/odbc-bridge/ODBCHandlerFactory.h +++ b/programs/odbc-bridge/ODBCHandlerFactory.h @@ -22,7 +22,7 @@ class ODBCBridgeHandlerFactory : public HTTPRequestHandlerFactory, WithContext std::unique_ptr createRequestHandler(const HTTPServerRequest & request) override; private: - Poco::Logger * log; + LoggerPtr log; std::string name; size_t keep_alive_timeout; }; diff --git a/programs/odbc-bridge/ODBCPooledConnectionFactory.h b/programs/odbc-bridge/ODBCPooledConnectionFactory.h index e425dea47f7d..a0ff4ae3d964 100644 --- a/programs/odbc-bridge/ODBCPooledConnectionFactory.h +++ b/programs/odbc-bridge/ODBCPooledConnectionFactory.h @@ -92,7 +92,7 @@ T execute(nanodbc::ConnectionHolderPtr connection_holder, std::functionlogTree(log, ""); diff --git a/src/Access/Common/AllowedClientHosts.cpp b/src/Access/Common/AllowedClientHosts.cpp index 801ccd3748b9..c677465a7a1c 100644 --- a/src/Access/Common/AllowedClientHosts.cpp +++ b/src/Access/Common/AllowedClientHosts.cpp @@ -514,7 +514,7 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const throw; /// Try to ignore DNS errors: if host cannot be resolved, skip it and try next. LOG_WARNING( - &Poco::Logger::get("AddressPatterns"), + getLogger("AddressPatterns"), "Failed to check if the allowed client hosts contain address {}. {}, code = {}", client_address.toString(), e.displayText(), e.code()); return false; @@ -556,7 +556,7 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const throw; /// Try to ignore DNS errors: if host cannot be resolved, skip it and try next. LOG_WARNING( - &Poco::Logger::get("AddressPatterns"), + getLogger("AddressPatterns"), "Failed to check if the allowed client hosts contain address {}. {}, code = {}", client_address.toString(), e.displayText(), e.code()); return false; diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 04756162b468..f406f4b7fc1f 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -280,7 +280,7 @@ void ContextAccess::setUser(const UserPtr & user_) const } user_name = user->getName(); - trace_log = &Poco::Logger::get("ContextAccess (" + user_name + ")"); + trace_log = getLogger("ContextAccess (" + user_name + ")"); std::vector current_roles, current_roles_with_admin_option; if (params.use_default_roles) diff --git a/src/Access/ContextAccess.h b/src/Access/ContextAccess.h index 63604a03b4e8..842c19d9b203 100644 --- a/src/Access/ContextAccess.h +++ b/src/Access/ContextAccess.h @@ -218,7 +218,7 @@ class ContextAccess : public std::enable_shared_from_this const AccessControl * access_control = nullptr; const Params params; bool is_full_access = false; - mutable Poco::Logger * trace_log = nullptr; + mutable LoggerPtr trace_log = nullptr; mutable UserPtr user; mutable String user_name; mutable bool user_was_dropped = false; diff --git a/src/Access/DiskAccessStorage.cpp b/src/Access/DiskAccessStorage.cpp index ef88e8a225f2..61fa2435dff8 100644 --- a/src/Access/DiskAccessStorage.cpp +++ b/src/Access/DiskAccessStorage.cpp @@ -45,7 +45,7 @@ namespace } - AccessEntityPtr tryReadEntityFile(const String & file_path, Poco::Logger & log) + AccessEntityPtr tryReadEntityFile(const String & file_path, LoggerPtr log) { try { @@ -53,7 +53,7 @@ namespace } catch (...) { - tryLogCurrentException(&log); + tryLogCurrentException(log); return nullptr; } } @@ -376,7 +376,7 @@ void DiskAccessStorage::reloadAllAndRebuildLists() continue; const auto access_entity_file_path = getEntityFilePath(directory_path, id); - auto entity = tryReadEntityFile(access_entity_file_path, *getLogger()); + auto entity = tryReadEntityFile(access_entity_file_path, getLogger()); if (!entity) continue; diff --git a/src/Access/ExternalAuthenticators.cpp b/src/Access/ExternalAuthenticators.cpp index e4d4d2acd064..0b1244a1012d 100644 --- a/src/Access/ExternalAuthenticators.cpp +++ b/src/Access/ExternalAuthenticators.cpp @@ -255,7 +255,7 @@ void ExternalAuthenticators::reset() resetImpl(); } -void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log) +void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfiguration & config, LoggerPtr log) { std::scoped_lock lock(mutex); resetImpl(); diff --git a/src/Access/ExternalAuthenticators.h b/src/Access/ExternalAuthenticators.h index 7b47c9351fd9..631dbf5f9749 100644 --- a/src/Access/ExternalAuthenticators.h +++ b/src/Access/ExternalAuthenticators.h @@ -31,7 +31,7 @@ class ExternalAuthenticators { public: void reset(); - void setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log); + void setConfiguration(const Poco::Util::AbstractConfiguration & config, LoggerPtr log); // The name and readiness of the credentials must be verified before calling these. bool checkLDAPCredentials(const String & server, const BasicCredentials & credentials, diff --git a/src/Access/GSSAcceptor.cpp b/src/Access/GSSAcceptor.cpp index 7170028e4e64..29eddf513205 100644 --- a/src/Access/GSSAcceptor.cpp +++ b/src/Access/GSSAcceptor.cpp @@ -328,7 +328,7 @@ void GSSAcceptorContext::initHandles() } } -String GSSAcceptorContext::processToken(const String & input_token, Poco::Logger * log) +String GSSAcceptorContext::processToken(const String & input_token, LoggerPtr log) { std::scoped_lock lock(gss_global_mutex); @@ -455,7 +455,7 @@ void GSSAcceptorContext::initHandles() { } -String GSSAcceptorContext::processToken(const String &, Poco::Logger *) +String GSSAcceptorContext::processToken(const String &, LoggerPtr) { throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without GSS-API/Kerberos support"); } diff --git a/src/Access/GSSAcceptor.h b/src/Access/GSSAcceptor.h index ba448ae474e8..8d490fb47ae5 100644 --- a/src/Access/GSSAcceptor.h +++ b/src/Access/GSSAcceptor.h @@ -3,6 +3,7 @@ #include "config.h" #include +#include #include #include @@ -42,7 +43,7 @@ class GSSAcceptorContext const String & getRealm() const; bool isFailed() const; - MAYBE_NORETURN String processToken(const String & input_token, Poco::Logger * log); + MAYBE_NORETURN String processToken(const String & input_token, LoggerPtr log); private: void reset(); diff --git a/src/Access/IAccessStorage.cpp b/src/Access/IAccessStorage.cpp index 9468e8d220ab..eb3adf7dfc07 100644 --- a/src/Access/IAccessStorage.cpp +++ b/src/Access/IAccessStorage.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -563,7 +564,7 @@ UUID IAccessStorage::generateRandomID() } -void IAccessStorage::clearConflictsInEntitiesList(std::vector> & entities, const Poco::Logger * log_) +void IAccessStorage::clearConflictsInEntitiesList(std::vector> & entities, const LoggerPtr log_) { std::unordered_map positions_by_id; std::unordered_map positions_by_type_and_name[static_cast(AccessEntityType::MAX)]; @@ -619,12 +620,13 @@ void IAccessStorage::clearConflictsInEntitiesList(std::vector +#include #include #include #include @@ -186,9 +188,9 @@ class IAccessStorage : public boost::noncopyable virtual bool areCredentialsValid(const User & user, const Credentials & credentials, const ExternalAuthenticators & external_authenticators) const; virtual bool isAddressAllowed(const User & user, const Poco::Net::IPAddress & address) const; static UUID generateRandomID(); - Poco::Logger * getLogger() const; + LoggerPtr getLogger() const; static String formatEntityTypeWithName(AccessEntityType type, const String & name) { return AccessEntityTypeInfo::get(type).formatEntityNameWithType(name); } - static void clearConflictsInEntitiesList(std::vector> & entities, const Poco::Logger * log_); + static void clearConflictsInEntitiesList(std::vector> & entities, const LoggerPtr log_); [[noreturn]] void throwNotFound(const UUID & id) const; [[noreturn]] void throwNotFound(AccessEntityType type, const String & name) const; [[noreturn]] static void throwBadCast(const UUID & id, AccessEntityType type, const String & name, AccessEntityType required_type); @@ -207,7 +209,8 @@ class IAccessStorage : public boost::noncopyable private: const String storage_name; - mutable std::atomic log = nullptr; + mutable OnceFlag log_initialized; + mutable LoggerPtr log; }; diff --git a/src/Access/KerberosInit.cpp b/src/Access/KerberosInit.cpp index 772938ad9b29..3cda1c8e13c8 100644 --- a/src/Access/KerberosInit.cpp +++ b/src/Access/KerberosInit.cpp @@ -63,7 +63,7 @@ String KerberosInit::fmtError(krb5_error_code code) const void KerberosInit::init(const String & keytab_file, const String & principal, const String & cache_name) { - auto * log = &Poco::Logger::get("KerberosInit"); + auto log = getLogger("KerberosInit"); LOG_TRACE(log,"Trying to authenticate with Kerberos v5"); krb5_error_code ret; diff --git a/src/Access/LDAPClient.cpp b/src/Access/LDAPClient.cpp index 9606656f7329..6a2190d4886c 100644 --- a/src/Access/LDAPClient.cpp +++ b/src/Access/LDAPClient.cpp @@ -531,7 +531,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params) for (size_t i = 0; referrals[i]; ++i) { - LOG_WARNING(&Poco::Logger::get("LDAPClient"), "Received reference during LDAP search but not following it: {}", referrals[i]); + LOG_WARNING(getLogger("LDAPClient"), "Received reference during LDAP search but not following it: {}", referrals[i]); } } diff --git a/src/Access/RowPolicyCache.cpp b/src/Access/RowPolicyCache.cpp index bb9da674477c..13140099a639 100644 --- a/src/Access/RowPolicyCache.cpp +++ b/src/Access/RowPolicyCache.cpp @@ -91,7 +91,7 @@ void RowPolicyCache::PolicyInfo::setPolicy(const RowPolicyPtr & policy_) catch (...) { tryLogCurrentException( - &Poco::Logger::get("RowPolicy"), + getLogger("RowPolicy"), String("Could not parse the condition ") + toString(filter_type) + " of row policy " + backQuote(policy->getName())); } diff --git a/src/Backups/BackupCoordinationRemote.cpp b/src/Backups/BackupCoordinationRemote.cpp index 9b4343a1d3b2..ce331656a53a 100644 --- a/src/Backups/BackupCoordinationRemote.cpp +++ b/src/Backups/BackupCoordinationRemote.cpp @@ -167,14 +167,14 @@ BackupCoordinationRemote::BackupCoordinationRemote( { zookeeper_retries_info = ZooKeeperRetriesInfo( "BackupCoordinationRemote", - &Poco::Logger::get("BackupCoordinationRemote"), + getLogger("BackupCoordinationRemote"), keeper_settings.keeper_max_retries, keeper_settings.keeper_retry_initial_backoff_ms, keeper_settings.keeper_retry_max_backoff_ms); createRootNodes(); stage_sync.emplace( - zookeeper_path + "/stage", [this] { return getZooKeeper(); }, &Poco::Logger::get("BackupCoordination")); + zookeeper_path + "/stage", [this] { return getZooKeeper(); }, getLogger("BackupCoordination")); } BackupCoordinationRemote::~BackupCoordinationRemote() diff --git a/src/Backups/BackupCoordinationStageSync.cpp b/src/Backups/BackupCoordinationStageSync.cpp index e47732230752..13a4ba15062c 100644 --- a/src/Backups/BackupCoordinationStageSync.cpp +++ b/src/Backups/BackupCoordinationStageSync.cpp @@ -17,7 +17,7 @@ namespace ErrorCodes } -BackupCoordinationStageSync::BackupCoordinationStageSync(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, Poco::Logger * log_) +BackupCoordinationStageSync::BackupCoordinationStageSync(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, LoggerPtr log_) : zookeeper_path(zookeeper_path_) , get_zookeeper(get_zookeeper_) , log(log_) diff --git a/src/Backups/BackupCoordinationStageSync.h b/src/Backups/BackupCoordinationStageSync.h index 623b58fd9fa6..ce746b81a787 100644 --- a/src/Backups/BackupCoordinationStageSync.h +++ b/src/Backups/BackupCoordinationStageSync.h @@ -10,7 +10,7 @@ namespace DB class BackupCoordinationStageSync { public: - BackupCoordinationStageSync(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, Poco::Logger * log_); + BackupCoordinationStageSync(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, LoggerPtr log_); /// Sets the stage of the current host and signal other hosts if there were other hosts waiting for that. void set(const String & current_host, const String & new_stage, const String & message); @@ -33,7 +33,7 @@ class BackupCoordinationStageSync String zookeeper_path; zkutil::GetZooKeeper get_zookeeper; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Backups/BackupEntriesCollector.cpp b/src/Backups/BackupEntriesCollector.cpp index 4d117f54342c..b6826ee01acf 100644 --- a/src/Backups/BackupEntriesCollector.cpp +++ b/src/Backups/BackupEntriesCollector.cpp @@ -83,7 +83,7 @@ BackupEntriesCollector::BackupEntriesCollector( , context(context_) , on_cluster_first_sync_timeout(context->getConfigRef().getUInt64("backups.on_cluster_first_sync_timeout", 180000)) , consistent_metadata_snapshot_timeout(context->getConfigRef().getUInt64("backups.consistent_metadata_snapshot_timeout", 600000)) - , log(&Poco::Logger::get("BackupEntriesCollector")) + , log(getLogger("BackupEntriesCollector")) { } diff --git a/src/Backups/BackupEntriesCollector.h b/src/Backups/BackupEntriesCollector.h index eca0dd8e0784..685e3eede7cc 100644 --- a/src/Backups/BackupEntriesCollector.h +++ b/src/Backups/BackupEntriesCollector.h @@ -95,7 +95,7 @@ class BackupEntriesCollector : private boost::noncopyable ContextPtr context; std::chrono::milliseconds on_cluster_first_sync_timeout; std::chrono::milliseconds consistent_metadata_snapshot_timeout; - Poco::Logger * log; + LoggerPtr log; Strings all_hosts; DDLRenamingMap renaming_map; diff --git a/src/Backups/BackupFileInfo.cpp b/src/Backups/BackupFileInfo.cpp index 24548ca05fed..6ae6c225581f 100644 --- a/src/Backups/BackupFileInfo.cpp +++ b/src/Backups/BackupFileInfo.cpp @@ -114,7 +114,7 @@ String BackupFileInfo::describe() const } -BackupFileInfo buildFileInfoForBackupEntry(const String & file_name, const BackupEntryPtr & backup_entry, const BackupPtr & base_backup, Poco::Logger * log) +BackupFileInfo buildFileInfoForBackupEntry(const String & file_name, const BackupEntryPtr & backup_entry, const BackupPtr & base_backup, LoggerPtr log) { auto adjusted_path = removeLeadingSlash(file_name); @@ -132,7 +132,7 @@ BackupFileInfo buildFileInfoForBackupEntry(const String & file_name, const Backu } if (!log) - log = &Poco::Logger::get("FileInfoFromBackupEntry"); + log = getLogger("FileInfoFromBackupEntry"); std::optional base_backup_file_info = getInfoAboutFileFromBaseBackupIfExists(base_backup, adjusted_path); @@ -219,7 +219,7 @@ BackupFileInfos buildFileInfosForBackupEntries(const BackupEntries & backup_entr std::exception_ptr exception; auto thread_group = CurrentThread::getGroup(); - Poco::Logger * log = &Poco::Logger::get("FileInfosFromBackupEntries"); + LoggerPtr log = getLogger("FileInfosFromBackupEntries"); for (size_t i = 0; i != backup_entries.size(); ++i) { diff --git a/src/Backups/BackupFileInfo.h b/src/Backups/BackupFileInfo.h index 96df8ab2e0b4..883ec49acd56 100644 --- a/src/Backups/BackupFileInfo.h +++ b/src/Backups/BackupFileInfo.h @@ -62,7 +62,7 @@ struct BackupFileInfo using BackupFileInfos = std::vector; /// Builds a BackupFileInfo for a specified backup entry. -BackupFileInfo buildFileInfoForBackupEntry(const String & file_name, const BackupEntryPtr & backup_entry, const BackupPtr & base_backup, Poco::Logger * log); +BackupFileInfo buildFileInfoForBackupEntry(const String & file_name, const BackupEntryPtr & backup_entry, const BackupPtr & base_backup, LoggerPtr log); /// Builds a vector of BackupFileInfos for specified backup entries. BackupFileInfos buildFileInfosForBackupEntries(const BackupEntries & backup_entries, const BackupPtr & base_backup, ThreadPool & thread_pool); diff --git a/src/Backups/BackupIO_Disk.cpp b/src/Backups/BackupIO_Disk.cpp index cc6076541d0d..c91b2f5a2b7e 100644 --- a/src/Backups/BackupIO_Disk.cpp +++ b/src/Backups/BackupIO_Disk.cpp @@ -14,7 +14,7 @@ namespace ErrorCodes } BackupReaderDisk::BackupReaderDisk(const DiskPtr & disk_, const String & path_) - : disk(disk_), path(path_), log(&Poco::Logger::get("BackupReaderDisk")) + : disk(disk_), path(path_), log(getLogger("BackupReaderDisk")) { } diff --git a/src/Backups/BackupIO_Disk.h b/src/Backups/BackupIO_Disk.h index 600e4f8ff39c..b4cad1250aab 100644 --- a/src/Backups/BackupIO_Disk.h +++ b/src/Backups/BackupIO_Disk.h @@ -24,7 +24,7 @@ class BackupReaderDisk : public IBackupReader private: DiskPtr disk; std::filesystem::path path; - Poco::Logger * log; + LoggerPtr log; }; class BackupWriterDisk : public IBackupWriter diff --git a/src/Backups/BackupIO_File.cpp b/src/Backups/BackupIO_File.cpp index 5bf6d54928d4..a8856c21cf3b 100644 --- a/src/Backups/BackupIO_File.cpp +++ b/src/Backups/BackupIO_File.cpp @@ -12,7 +12,7 @@ namespace fs = std::filesystem; namespace DB { -BackupReaderFile::BackupReaderFile(const String & path_) : path(path_), log(&Poco::Logger::get("BackupReaderFile")) +BackupReaderFile::BackupReaderFile(const String & path_) : path(path_), log(getLogger("BackupReaderFile")) { } diff --git a/src/Backups/BackupIO_File.h b/src/Backups/BackupIO_File.h index e1f4324a39f2..e37c5a6a107f 100644 --- a/src/Backups/BackupIO_File.h +++ b/src/Backups/BackupIO_File.h @@ -21,7 +21,7 @@ class BackupReaderFile : public IBackupReader private: std::filesystem::path path; - Poco::Logger * log; + LoggerPtr log; }; class BackupWriterFile : public IBackupWriter diff --git a/src/Backups/BackupIO_S3.cpp b/src/Backups/BackupIO_S3.cpp index d1f4a15b5524..5f36398d1fd9 100644 --- a/src/Backups/BackupIO_S3.cpp +++ b/src/Backups/BackupIO_S3.cpp @@ -105,7 +105,7 @@ BackupReaderS3::BackupReaderS3( , client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_)) , read_settings(context_->getReadSettings()) , request_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).request_settings) - , log(&Poco::Logger::get("BackupReaderS3")) + , log(getLogger("BackupReaderS3")) { request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint } @@ -165,7 +165,7 @@ BackupWriterS3::BackupWriterS3( , client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_)) , read_settings(context_->getReadSettings()) , request_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).request_settings) - , log(&Poco::Logger::get("BackupWriterS3")) + , log(getLogger("BackupWriterS3")) { request_settings.updateFromSettings(context_->getSettingsRef()); request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint diff --git a/src/Backups/BackupIO_S3.h b/src/Backups/BackupIO_S3.h index 94e612484283..c62781cc310b 100644 --- a/src/Backups/BackupIO_S3.h +++ b/src/Backups/BackupIO_S3.h @@ -31,7 +31,7 @@ class BackupReaderS3 : public IBackupReader std::shared_ptr client; ReadSettings read_settings; S3Settings::RequestSettings request_settings; - Poco::Logger * log; + LoggerPtr log; }; @@ -78,7 +78,7 @@ class BackupWriterS3 : public IBackupWriter std::shared_ptr client; ReadSettings read_settings; S3Settings::RequestSettings request_settings; - Poco::Logger * log; + LoggerPtr log; std::optional supports_batch_delete; }; diff --git a/src/Backups/BackupImpl.cpp b/src/Backups/BackupImpl.cpp index 0ab1bf7f997f..186eb10335f8 100644 --- a/src/Backups/BackupImpl.cpp +++ b/src/Backups/BackupImpl.cpp @@ -115,7 +115,7 @@ BackupImpl::BackupImpl( , version(CURRENT_BACKUP_VERSION) , base_backup_info(base_backup_info_) , deduplicate_files(deduplicate_files_) - , log(&Poco::Logger::get("BackupImpl")) + , log(getLogger("BackupImpl")) { open(context_); } diff --git a/src/Backups/BackupImpl.h b/src/Backups/BackupImpl.h index bf94926c46c7..6e1ca48350ba 100644 --- a/src/Backups/BackupImpl.h +++ b/src/Backups/BackupImpl.h @@ -144,7 +144,7 @@ class BackupImpl : public IBackup bool writing_finalized = false; bool deduplicate_files = true; - const Poco::Logger * log; + const LoggerPtr log; }; } diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index 4572e4818298..0650dfc97d88 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -164,7 +164,7 @@ namespace BackupsWorker::BackupsWorker(size_t num_backup_threads, size_t num_restore_threads, bool allow_concurrent_backups_, bool allow_concurrent_restores_) : backups_thread_pool(CurrentMetrics::BackupsThreads, CurrentMetrics::BackupsThreadsActive, num_backup_threads, /* max_free_threads = */ 0, num_backup_threads) , restores_thread_pool(CurrentMetrics::RestoreThreads, CurrentMetrics::RestoreThreadsActive, num_restore_threads, /* max_free_threads = */ 0, num_restore_threads) - , log(&Poco::Logger::get("BackupsWorker")) + , log(getLogger("BackupsWorker")) , allow_concurrent_backups(allow_concurrent_backups_) , allow_concurrent_restores(allow_concurrent_restores_) { diff --git a/src/Backups/BackupsWorker.h b/src/Backups/BackupsWorker.h index d319daf42bdb..1f11a5d02ccc 100644 --- a/src/Backups/BackupsWorker.h +++ b/src/Backups/BackupsWorker.h @@ -140,7 +140,7 @@ class BackupsWorker std::atomic num_active_backups = 0; std::atomic num_active_restores = 0; mutable std::mutex infos_mutex; - Poco::Logger * log; + LoggerPtr log; const bool allow_concurrent_backups; const bool allow_concurrent_restores; }; diff --git a/src/Backups/RestoreCoordinationRemote.cpp b/src/Backups/RestoreCoordinationRemote.cpp index 10d085a696a8..fc4539811ce6 100644 --- a/src/Backups/RestoreCoordinationRemote.cpp +++ b/src/Backups/RestoreCoordinationRemote.cpp @@ -29,7 +29,7 @@ RestoreCoordinationRemote::RestoreCoordinationRemote( createRootNodes(); stage_sync.emplace( - zookeeper_path + "/stage", [this] { return getZooKeeper(); }, &Poco::Logger::get("RestoreCoordination")); + zookeeper_path + "/stage", [this] { return getZooKeeper(); }, getLogger("RestoreCoordination")); } RestoreCoordinationRemote::~RestoreCoordinationRemote() diff --git a/src/Backups/RestorerFromBackup.cpp b/src/Backups/RestorerFromBackup.cpp index 77f7512e0d17..2e49f038ca54 100644 --- a/src/Backups/RestorerFromBackup.cpp +++ b/src/Backups/RestorerFromBackup.cpp @@ -95,7 +95,7 @@ RestorerFromBackup::RestorerFromBackup( , context(context_) , on_cluster_first_sync_timeout(context->getConfigRef().getUInt64("backups.on_cluster_first_sync_timeout", 180000)) , create_table_timeout(context->getConfigRef().getUInt64("backups.create_table_timeout", 300000)) - , log(&Poco::Logger::get("RestorerFromBackup")) + , log(getLogger("RestorerFromBackup")) , tables_dependencies("RestorerFromBackup") { } diff --git a/src/Backups/RestorerFromBackup.h b/src/Backups/RestorerFromBackup.h index 93b5a6c76940..3ab19c7618c3 100644 --- a/src/Backups/RestorerFromBackup.h +++ b/src/Backups/RestorerFromBackup.h @@ -76,7 +76,7 @@ class RestorerFromBackup : private boost::noncopyable ContextMutablePtr context; std::chrono::milliseconds on_cluster_first_sync_timeout; std::chrono::milliseconds create_table_timeout; - Poco::Logger * log; + LoggerPtr log; Strings all_hosts; DDLRenamingMap renaming_map; diff --git a/src/BridgeHelper/IBridgeHelper.h b/src/BridgeHelper/IBridgeHelper.h index d4762087cc16..7c0f3ab619a5 100644 --- a/src/BridgeHelper/IBridgeHelper.h +++ b/src/BridgeHelper/IBridgeHelper.h @@ -52,7 +52,7 @@ class IBridgeHelper: protected WithContext virtual const Poco::Util::AbstractConfiguration & getConfig() const = 0; - virtual Poco::Logger * getLog() const = 0; + virtual LoggerPtr getLog() const = 0; virtual Poco::Timespan getHTTPTimeout() const = 0; diff --git a/src/BridgeHelper/LibraryBridgeHelper.cpp b/src/BridgeHelper/LibraryBridgeHelper.cpp index 60588951c323..74e7e23fa8cd 100644 --- a/src/BridgeHelper/LibraryBridgeHelper.cpp +++ b/src/BridgeHelper/LibraryBridgeHelper.cpp @@ -8,7 +8,7 @@ namespace DB LibraryBridgeHelper::LibraryBridgeHelper(ContextPtr context_) : IBridgeHelper(context_) , config(context_->getConfigRef()) - , log(&Poco::Logger::get("LibraryBridgeHelper")) + , log(getLogger("LibraryBridgeHelper")) , http_timeout(context_->getGlobalContext()->getSettingsRef().http_receive_timeout.value) , bridge_host(config.getString("library_bridge.host", DEFAULT_HOST)) , bridge_port(config.getUInt("library_bridge.port", DEFAULT_PORT)) diff --git a/src/BridgeHelper/LibraryBridgeHelper.h b/src/BridgeHelper/LibraryBridgeHelper.h index 1723d1f8fb4d..8940f9d1c9ee 100644 --- a/src/BridgeHelper/LibraryBridgeHelper.h +++ b/src/BridgeHelper/LibraryBridgeHelper.h @@ -31,7 +31,7 @@ class LibraryBridgeHelper : public IBridgeHelper const Poco::Util::AbstractConfiguration & getConfig() const override { return config; } - Poco::Logger * getLog() const override { return log; } + LoggerPtr getLog() const override { return log; } Poco::Timespan getHTTPTimeout() const override { return http_timeout; } @@ -40,7 +40,7 @@ class LibraryBridgeHelper : public IBridgeHelper static constexpr inline size_t DEFAULT_PORT = 9012; const Poco::Util::AbstractConfiguration & config; - Poco::Logger * log; + LoggerPtr log; const Poco::Timespan http_timeout; std::string bridge_host; size_t bridge_port; diff --git a/src/BridgeHelper/XDBCBridgeHelper.h b/src/BridgeHelper/XDBCBridgeHelper.h index 00a661a1fc43..cefe6c002a48 100644 --- a/src/BridgeHelper/XDBCBridgeHelper.h +++ b/src/BridgeHelper/XDBCBridgeHelper.h @@ -66,7 +66,7 @@ class XDBCBridgeHelper : public IXDBCBridgeHelper const std::string & connection_string_, bool use_connection_pooling_) : IXDBCBridgeHelper(context_->getGlobalContext()) - , log(&Poco::Logger::get(BridgeHelperMixin::getName() + "BridgeHelper")) + , log(getLogger(BridgeHelperMixin::getName() + "BridgeHelper")) , connection_string(connection_string_) , use_connection_pooling(use_connection_pooling_) , http_timeout(http_timeout_) @@ -124,7 +124,7 @@ class XDBCBridgeHelper : public IXDBCBridgeHelper const Poco::Util::AbstractConfiguration & getConfig() const override { return config; } - Poco::Logger * getLog() const override { return log; } + LoggerPtr getLog() const override { return log; } bool startBridgeManually() const override { return BridgeHelperMixin::startBridgeManually(); } @@ -147,7 +147,7 @@ class XDBCBridgeHelper : public IXDBCBridgeHelper private: using Configuration = Poco::Util::AbstractConfiguration; - Poco::Logger * log; + LoggerPtr log; std::string connection_string; bool use_connection_pooling; Poco::Timespan http_timeout; diff --git a/src/Client/Connection.h b/src/Client/Connection.h index b86567e2ed06..6bbfce0e9121 100644 --- a/src/Client/Connection.h +++ b/src/Client/Connection.h @@ -5,6 +5,7 @@ #include #include "config.h" +#include #include #include @@ -233,16 +234,18 @@ class Connection : public IServerConnection { } - Poco::Logger * get() + LoggerPtr get() { - if (!log) - log = &Poco::Logger::get("Connection (" + parent.getDescription() + ")"); + callOnce(log_initialized, [&] { + log = getLogger("Connection (" + parent.getDescription() + ")"); + }); return log; } private: - std::atomic log; + OnceFlag log_initialized; + LoggerPtr log; Connection & parent; }; diff --git a/src/Client/ConnectionEstablisher.cpp b/src/Client/ConnectionEstablisher.cpp index 731ab7c1e91c..5150eddd0e23 100644 --- a/src/Client/ConnectionEstablisher.cpp +++ b/src/Client/ConnectionEstablisher.cpp @@ -23,7 +23,7 @@ ConnectionEstablisher::ConnectionEstablisher( IConnectionPool * pool_, const ConnectionTimeouts * timeouts_, const Settings * settings_, - Poco::Logger * log_, + LoggerPtr log_, const QualifiedTableName * table_to_check_) : pool(pool_), timeouts(timeouts_), settings(settings_), log(log_), table_to_check(table_to_check_), is_finished(false) { @@ -109,7 +109,7 @@ ConnectionEstablisherAsync::ConnectionEstablisherAsync( IConnectionPool * pool_, const ConnectionTimeouts * timeouts_, const Settings * settings_, - Poco::Logger * log_, + LoggerPtr log_, const QualifiedTableName * table_to_check_) : connection_establisher(pool_, timeouts_, settings_, log_, table_to_check_) { diff --git a/src/Client/ConnectionEstablisher.h b/src/Client/ConnectionEstablisher.h index 495583ba7ecd..8c4908703dde 100644 --- a/src/Client/ConnectionEstablisher.h +++ b/src/Client/ConnectionEstablisher.h @@ -22,7 +22,7 @@ class ConnectionEstablisher ConnectionEstablisher(IConnectionPool * pool_, const ConnectionTimeouts * timeouts_, const Settings * settings_, - Poco::Logger * log, + LoggerPtr log, const QualifiedTableName * table_to_check = nullptr); /// Establish connection and save it in result, write possible exception message in fail_message. @@ -37,7 +37,7 @@ class ConnectionEstablisher IConnectionPool * pool; const ConnectionTimeouts * timeouts; const Settings * settings; - Poco::Logger * log; + LoggerPtr log; const QualifiedTableName * table_to_check; bool is_finished; @@ -61,7 +61,7 @@ class ConnectionEstablisherAsync ConnectionEstablisherAsync(IConnectionPool * pool_, const ConnectionTimeouts * timeouts_, const Settings * settings_, - Poco::Logger * log_, + LoggerPtr log_, const QualifiedTableName * table_to_check = nullptr); /// Resume establishing connection. If the process was not finished, diff --git a/src/Client/ConnectionPool.h b/src/Client/ConnectionPool.h index c3d0955019e7..9809fc207aaf 100644 --- a/src/Client/ConnectionPool.h +++ b/src/Client/ConnectionPool.h @@ -62,7 +62,7 @@ class ConnectionPool : public IConnectionPool, private PoolBase Protocol::Secure secure_, Int64 priority_ = 1) : Base(max_connections_, - &Poco::Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")), + getLogger("ConnectionPool (" + host_ + ":" + toString(port_) + ")")), host(host_), port(port_), default_database(default_database_), diff --git a/src/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp index c72825a54fed..b60b001b8641 100644 --- a/src/Client/ConnectionPoolWithFailover.cpp +++ b/src/Client/ConnectionPoolWithFailover.cpp @@ -29,7 +29,7 @@ ConnectionPoolWithFailover::ConnectionPoolWithFailover( LoadBalancing load_balancing, time_t decrease_error_period_, size_t max_error_cap_) - : Base(std::move(nested_pools_), decrease_error_period_, max_error_cap_, &Poco::Logger::get("ConnectionPoolWithFailover")) + : Base(std::move(nested_pools_), decrease_error_period_, max_error_cap_, getLogger("ConnectionPoolWithFailover")) , get_priority_load_balancing(load_balancing) { const std::string & local_hostname = getFQDNOrHostName(); diff --git a/src/Client/HedgedConnectionsFactory.cpp b/src/Client/HedgedConnectionsFactory.cpp index 1179aedc285a..a813b53d05f2 100644 --- a/src/Client/HedgedConnectionsFactory.cpp +++ b/src/Client/HedgedConnectionsFactory.cpp @@ -26,7 +26,7 @@ HedgedConnectionsFactory::HedgedConnectionsFactory( const Settings * settings_, const ConnectionTimeouts & timeouts_, std::shared_ptr table_to_check_) - : pool(pool_), settings(settings_), timeouts(timeouts_), table_to_check(table_to_check_), log(&Poco::Logger::get("HedgedConnectionsFactory")) + : pool(pool_), settings(settings_), timeouts(timeouts_), table_to_check(table_to_check_), log(getLogger("HedgedConnectionsFactory")) { shuffled_pools = pool->getShuffledPools(settings); for (auto shuffled_pool : shuffled_pools) diff --git a/src/Client/HedgedConnectionsFactory.h b/src/Client/HedgedConnectionsFactory.h index 194e962d5494..d6be6e8ab884 100644 --- a/src/Client/HedgedConnectionsFactory.h +++ b/src/Client/HedgedConnectionsFactory.h @@ -129,7 +129,7 @@ class HedgedConnectionsFactory int last_used_index = -1; bool fallback_to_stale_replicas; Epoll epoll; - Poco::Logger * log; + LoggerPtr log; std::string fail_messages; /// The maximum number of attempts to connect to replicas. diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index 99073d79bcd7..b927e29198a0 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -54,7 +54,7 @@ AsynchronousMetrics::AsynchronousMetrics( int update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_) : update_period(update_period_seconds) - , log(&Poco::Logger::get("AsynchronousMetrics")) + , log(getLogger("AsynchronousMetrics")) , protocol_server_metrics_func(protocol_server_metrics_func_) { #if defined(OS_LINUX) @@ -104,7 +104,7 @@ void AsynchronousMetrics::openSensors() catch (const ErrnoException & e) { LOG_WARNING( - &Poco::Logger::get("AsynchronousMetrics"), + getLogger("AsynchronousMetrics"), "Thermal monitor '{}' exists but could not be read: {}.", thermal_device_index, errnoToString(e.getErrno())); @@ -233,7 +233,7 @@ void AsynchronousMetrics::openSensorsChips() catch (const ErrnoException & e) { LOG_WARNING( - &Poco::Logger::get("AsynchronousMetrics"), + getLogger("AsynchronousMetrics"), "Hardware monitor '{}', sensor '{}' exists but could not be read: {}.", hwmon_name, sensor_index, diff --git a/src/Common/AsynchronousMetrics.h b/src/Common/AsynchronousMetrics.h index d104b872f52b..f48cbb3642dd 100644 --- a/src/Common/AsynchronousMetrics.h +++ b/src/Common/AsynchronousMetrics.h @@ -83,7 +83,7 @@ class AsynchronousMetrics bool first_run = true; TimePoint previous_update_time; - Poco::Logger * log; + LoggerPtr log; private: virtual void updateImpl(AsynchronousMetricValues & new_values, TimePoint update_time, TimePoint current_time) = 0; virtual void logImpl(AsynchronousMetricValues &) {} diff --git a/src/Common/AtomicLogger.h b/src/Common/AtomicLogger.h new file mode 100644 index 000000000000..4bda55e070b7 --- /dev/null +++ b/src/Common/AtomicLogger.h @@ -0,0 +1,51 @@ +#pragma once + +#include + +#include +#include +#include + + +/** AtomicLogger allows to atomically change logger. + * Standard library does not have atomic_shared_ptr, and we do not use std::atomic* operations, + * because standard library implementation uses fixed table of mutexes, and it is better to avoid contention here. + */ +class AtomicLogger +{ +public: + explicit AtomicLogger(LoggerPtr logger_) + : logger(std::move(logger_)) + {} + + explicit AtomicLogger(const std::string & log_name) + : AtomicLogger(::getLogger(log_name)) + {} + + void store(LoggerPtr new_logger) + { + std::lock_guard lock(log_mutex); + logger = std::move(new_logger); + } + + void store(const std::string & new_log_name) + { + auto new_logger = ::getLogger(new_log_name); + store(std::move(new_logger)); + } + + LoggerPtr load() const + { + DB::SharedLockGuard lock(log_mutex); + return logger; + } + + String loadName() const + { + DB::SharedLockGuard lock(log_mutex); + return logger->name(); + } +private: + mutable DB::SharedMutex log_mutex; + LoggerPtr logger; +}; diff --git a/src/Common/Config/ConfigProcessor.cpp b/src/Common/Config/ConfigProcessor.cpp index b632ea95928a..e32373c09954 100644 --- a/src/Common/Config/ConfigProcessor.cpp +++ b/src/Common/Config/ConfigProcessor.cpp @@ -65,24 +65,17 @@ ConfigProcessor::ConfigProcessor( , name_pool(new Poco::XML::NamePool(65521)) , dom_parser(name_pool) { - if (log_to_console && !Poco::Logger::has("ConfigProcessor")) + if (log_to_console && !hasLogger("ConfigProcessor")) { channel_ptr = new Poco::ConsoleChannel; - log = &Poco::Logger::create("ConfigProcessor", channel_ptr.get(), Poco::Message::PRIO_TRACE); + log = createLogger("ConfigProcessor", channel_ptr.get(), Poco::Message::PRIO_TRACE); } else { - log = &Poco::Logger::get("ConfigProcessor"); + log = getLogger("ConfigProcessor"); } } -ConfigProcessor::~ConfigProcessor() -{ - if (channel_ptr) /// This means we have created a new console logger in the constructor. - Poco::Logger::destroy("ConfigProcessor"); -} - - /// Vector containing the name of the element and a sorted list of attribute names and values /// (except "remove" and "replace" attributes). /// Serves as a unique identifier of the element contents for comparison. diff --git a/src/Common/Config/ConfigProcessor.h b/src/Common/Config/ConfigProcessor.h index aa8ac71446f6..43c283e5fdff 100644 --- a/src/Common/Config/ConfigProcessor.h +++ b/src/Common/Config/ConfigProcessor.h @@ -7,6 +7,8 @@ #include #include +#include + #include #include #include @@ -43,8 +45,6 @@ class ConfigProcessor bool log_to_console = false, const Substitutions & substitutions = Substitutions()); - ~ConfigProcessor(); - /// Perform config includes and substitutions and return the resulting XML-document. /// /// Suppose path is "/path/file.xml" @@ -113,7 +113,7 @@ class ConfigProcessor bool throw_on_bad_incl; - Poco::Logger * log; + LoggerPtr log; Poco::AutoPtr channel_ptr; Substitutions substitutions; diff --git a/src/Common/Config/ConfigReloader.h b/src/Common/Config/ConfigReloader.h index 982e21c91e2c..0c20b9cac905 100644 --- a/src/Common/Config/ConfigReloader.h +++ b/src/Common/Config/ConfigReloader.h @@ -71,7 +71,7 @@ class ConfigReloader static constexpr auto reload_interval = std::chrono::seconds(2); - Poco::Logger * log = &Poco::Logger::get("ConfigReloader"); + LoggerPtr log = getLogger("ConfigReloader"); std::string path; std::string include_from_path; diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index 81e2624d6db9..3611feddc7d6 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -103,7 +103,7 @@ DNSResolver::IPAddresses hostByName(const std::string & host) } catch (const Poco::Net::DNSException & e) { - LOG_ERROR(&Poco::Logger::get("DNSResolver"), "Cannot resolve host ({}), error {}: {}.", host, e.code(), e.name()); + LOG_ERROR(getLogger("DNSResolver"), "Cannot resolve host ({}), error {}: {}.", host, e.code(), e.name()); addresses.clear(); } @@ -200,7 +200,7 @@ struct DNSResolver::Impl }; -DNSResolver::DNSResolver() : impl(std::make_unique()), log(&Poco::Logger::get("DNSResolver")) {} +DNSResolver::DNSResolver() : impl(std::make_unique()), log(getLogger("DNSResolver")) {} Poco::Net::IPAddress DNSResolver::resolveHost(const std::string & host) { diff --git a/src/Common/DNSResolver.h b/src/Common/DNSResolver.h index a05456d3de87..185543ef36d3 100644 --- a/src/Common/DNSResolver.h +++ b/src/Common/DNSResolver.h @@ -72,7 +72,7 @@ class DNSResolver : private boost::noncopyable struct Impl; std::unique_ptr impl; - Poco::Logger * log; + LoggerPtr log; /// Updates cached value and returns true it has been changed. bool updateHost(const String & host); diff --git a/src/Common/ErrorHandlers.h b/src/Common/ErrorHandlers.h index f55b6c83a698..d89e17c34957 100644 --- a/src/Common/ErrorHandlers.h +++ b/src/Common/ErrorHandlers.h @@ -28,7 +28,7 @@ class ServerErrorHandler : public Poco::ErrorHandler void exception() override { logException(); } private: - Poco::Logger * log = &Poco::Logger::get("ServerErrorHandler"); + LoggerPtr log = getLogger("ServerErrorHandler"); void logException() { diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index 7e7ccfa48775..ef53b9a1a7ec 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -206,8 +206,9 @@ void tryLogCurrentException(const char * log_name, const std::string & start_of_ /// MemoryTracker until the exception will be logged. LockMemoryExceptionInThread lock_memory_tracker(VariableContext::Global); - /// Poco::Logger::get can allocate memory too - tryLogCurrentExceptionImpl(&Poco::Logger::get(log_name), start_of_message); + /// getLogger can allocate memory too + auto logger = getLogger(log_name); + tryLogCurrentExceptionImpl(logger.get(), start_of_message); } void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message) @@ -222,6 +223,16 @@ void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_ tryLogCurrentExceptionImpl(logger, start_of_message); } +void tryLogCurrentException(LoggerPtr logger, const std::string & start_of_message) +{ + tryLogCurrentException(logger.get(), start_of_message); +} + +void tryLogCurrentException(const AtomicLogger & logger, const std::string & start_of_message) +{ + tryLogCurrentException(logger.load(), start_of_message); +} + static void getNoSpaceLeftInfoMessage(std::filesystem::path path, String & msg) { path = std::filesystem::absolute(path); @@ -479,6 +490,23 @@ void tryLogException(std::exception_ptr e, Poco::Logger * logger, const std::str } } +void tryLogException(std::exception_ptr e, LoggerPtr logger, const std::string & start_of_message) +{ + try + { + std::rethrow_exception(std::move(e)); // NOLINT + } + catch (...) + { + tryLogCurrentException(logger, start_of_message); + } +} + +void tryLogException(std::exception_ptr e, const AtomicLogger & logger, const std::string & start_of_message) +{ + tryLogException(e, logger.load(), start_of_message); +} + std::string getExceptionMessage(const Exception & e, bool with_stacktrace, bool check_embedded_stacktrace) { return getExceptionMessageAndPattern(e, with_stacktrace, check_embedded_stacktrace).text; diff --git a/src/Common/Exception.h b/src/Common/Exception.h index 8e50c1114f47..329d6433ee93 100644 --- a/src/Common/Exception.h +++ b/src/Common/Exception.h @@ -8,6 +8,11 @@ #include #include +#include +#include +#include +#include +#include #include #include @@ -211,6 +216,8 @@ using Exceptions = std::vector; */ void tryLogCurrentException(const char * log_name, const std::string & start_of_message = ""); void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message = ""); +void tryLogCurrentException(LoggerPtr logger, const std::string & start_of_message = ""); +void tryLogCurrentException(const AtomicLogger & logger, const std::string & start_of_message = ""); /** Prints current exception in canonical format. @@ -256,6 +263,8 @@ struct ExecutionStatus void tryLogException(std::exception_ptr e, const char * log_name, const std::string & start_of_message = ""); void tryLogException(std::exception_ptr e, Poco::Logger * logger, const std::string & start_of_message = ""); +void tryLogException(std::exception_ptr e, LoggerPtr logger, const std::string & start_of_message = ""); +void tryLogException(std::exception_ptr e, const AtomicLogger & logger, const std::string & start_of_message = ""); std::string getExceptionMessage(const Exception & e, bool with_stacktrace, bool check_embedded_stacktrace = false); PreformattedMessage getExceptionMessageAndPattern(const Exception & e, bool with_stacktrace, bool check_embedded_stacktrace = false); diff --git a/src/Common/FileChecker.h b/src/Common/FileChecker.h index 1beab31ec8f6..5611a54f8662 100644 --- a/src/Common/FileChecker.h +++ b/src/Common/FileChecker.h @@ -46,7 +46,7 @@ class FileChecker size_t getRealFileSize(const String & path_) const; const DiskPtr disk; - const Poco::Logger * log = &Poco::Logger::get("FileChecker"); + const LoggerPtr log = getLogger("FileChecker"); String files_info_path; std::map map; diff --git a/src/Common/FrequencyHolder.h b/src/Common/FrequencyHolder.h index 74098598441e..428c64ce29ca 100644 --- a/src/Common/FrequencyHolder.h +++ b/src/Common/FrequencyHolder.h @@ -88,7 +88,7 @@ class FrequencyHolder void loadEncodingsFrequency() { - Poco::Logger * log = &Poco::Logger::get("EncodingsFrequency"); + LoggerPtr log = getLogger("EncodingsFrequency"); LOG_TRACE(log, "Loading embedded charset frequencies"); @@ -146,7 +146,7 @@ class FrequencyHolder void loadEmotionalDict() { - Poco::Logger * log = &Poco::Logger::get("EmotionalDict"); + LoggerPtr log = getLogger("EmotionalDict"); LOG_TRACE(log, "Loading embedded emotional dictionary"); auto resource = getResource("tonality_ru.zst"); @@ -184,7 +184,7 @@ class FrequencyHolder void loadProgrammingFrequency() { - Poco::Logger * log = &Poco::Logger::get("ProgrammingFrequency"); + LoggerPtr log = getLogger("ProgrammingFrequency"); LOG_TRACE(log, "Loading embedded programming languages frequencies loading"); diff --git a/src/Common/LRUCachePolicy.h b/src/Common/LRUCachePolicy.h index 4aee2135af71..f4eea9d351f3 100644 --- a/src/Common/LRUCachePolicy.h +++ b/src/Common/LRUCachePolicy.h @@ -174,7 +174,7 @@ class LRUCachePolicy : public ICachePolicy (1ull << 63)) { - LOG_ERROR(&Poco::Logger::get("LRUCache"), "LRUCache became inconsistent. There must be a bug in it."); + LOG_ERROR(getLogger("LRUCache"), "LRUCache became inconsistent. There must be a bug in it."); abort(); } } diff --git a/src/Common/LRUResourceCache.h b/src/Common/LRUResourceCache.h index 1fe3075a2a31..4ccaa272346d 100644 --- a/src/Common/LRUResourceCache.h +++ b/src/Common/LRUResourceCache.h @@ -235,7 +235,7 @@ class LRUResourceCache else { // should not reach here - LOG_ERROR(&Poco::Logger::get("LRUResourceCache"), "element is in invalid status."); + LOG_ERROR(getLogger("LRUResourceCache"), "element is in invalid status."); abort(); } } @@ -306,7 +306,7 @@ class LRUResourceCache auto it = cells.find(key); if (it == cells.end() || it->second.reference_count == 0) { - LOG_ERROR(&Poco::Logger::get("LRUResourceCache"), "try to release an invalid element"); + LOG_ERROR(getLogger("LRUResourceCache"), "try to release an invalid element"); abort(); } @@ -359,7 +359,7 @@ class LRUResourceCache auto cell_it = cells.find(key); if (cell_it == cells.end()) { - LOG_ERROR(&Poco::Logger::get("LRUResourceCache"), "LRUResourceCache became inconsistent. There must be a bug in it."); + LOG_ERROR(getLogger("LRUResourceCache"), "LRUResourceCache became inconsistent. There must be a bug in it."); abort(); } @@ -379,7 +379,7 @@ class LRUResourceCache if (loss_weight > current_weight + weight) { - LOG_ERROR(&Poco::Logger::get("LRUResourceCache"), "LRUResourceCache became inconsistent. There must be a bug in it."); + LOG_ERROR(getLogger("LRUResourceCache"), "LRUResourceCache became inconsistent. There must be a bug in it."); abort(); } diff --git a/src/Common/Logger.cpp b/src/Common/Logger.cpp new file mode 100644 index 000000000000..c8d557bc3a3b --- /dev/null +++ b/src/Common/Logger.cpp @@ -0,0 +1,27 @@ +#include +#include + +LoggerPtr getLogger(const std::string & name) +{ + return Poco::Logger::getShared(name); +} + +LoggerPtr createLogger(const std::string & name, Poco::Channel * channel, Poco::Message::Priority level) +{ + return Poco::Logger::createShared(name, channel, level); +} + +LoggerRawPtr getRawLogger(const std::string & name) +{ + return &Poco::Logger::get(name); +} + +LoggerRawPtr createRawLogger(const std::string & name, Poco::Channel * channel, Poco::Message::Priority level) +{ + return &Poco::Logger::create(name, channel, level); +} + +bool hasLogger(const std::string & name) +{ + return Poco::Logger::has(name); +} diff --git a/src/Common/Logger.h b/src/Common/Logger.h new file mode 100644 index 000000000000..6dcdea9a9d87 --- /dev/null +++ b/src/Common/Logger.h @@ -0,0 +1,49 @@ +#pragma once + +#include + +#include +#include +#include + +using LoggerPtr = Poco::LoggerPtr; + +using LoggerRawPtr = Poco::Logger *; + +/** RAII wrappers around Poco/Logger.h. + * + * You should use this functions in case Logger instance lifetime needs to be properly + * managed, because otherwise it will leak memory. + * + * For example when Logger is created when table is created and Logger contains table name. + * Then it must be destroyed when underlying table is destroyed. + */ + +/** Get Logger with specified name. If the Logger does not exists, it is created. + * Logger is destroyed, when last shared ptr that refers to Logger with specified name is destroyed. + */ +LoggerPtr getLogger(const std::string & name); + +/** Create Logger with specified name, channel and logging level. + * If Logger already exists, throws exception. + * Logger is destroyed, when last shared ptr that refers to Logger with specified name is destroyed. + */ +LoggerPtr createLogger(const std::string & name, Poco::Channel * channel, Poco::Message::Priority level = Poco::Message::PRIO_INFORMATION); + +/** Create raw Poco::Logger that will not be destroyed before program termination. + * This can be used in cases when specific Logger instance can be singletone. + * + * For example you need to pass Logger into low-level libraries as raw pointer, and using + * RAII wrapper is inconvenient. + * + * Generally you should always use getLogger functions. + */ + +LoggerRawPtr getRawLogger(const std::string & name); + +LoggerRawPtr createRawLogger(const std::string & name, Poco::Channel * channel, Poco::Message::Priority level = Poco::Message::PRIO_INFORMATION); + +/** Returns true, if currently Logger with specified name is created. + * Otherwise, returns false. + */ +bool hasLogger(const std::string & name); diff --git a/src/Common/LoggingFormatStringHelpers.h b/src/Common/LoggingFormatStringHelpers.h index b29510a2c933..a918430862a3 100644 --- a/src/Common/LoggingFormatStringHelpers.h +++ b/src/Common/LoggingFormatStringHelpers.h @@ -7,6 +7,8 @@ #include #include +#include + struct PreformattedMessage; consteval void formatStringCheckArgsNumImpl(std::string_view str, size_t nargs); template constexpr std::string_view tryGetStaticFormatString(T && x); @@ -173,10 +175,10 @@ class LogFrequencyLimiterIml static time_t last_cleanup; static std::mutex mutex; - Poco::Logger * logger; + LoggerPtr logger; time_t min_interval_s; public: - LogFrequencyLimiterIml(Poco::Logger * logger_, time_t min_interval_s_) : logger(logger_), min_interval_s(min_interval_s_) {} + LogFrequencyLimiterIml(LoggerPtr logger_, time_t min_interval_s_) : logger(logger_), min_interval_s(min_interval_s_) {} LogFrequencyLimiterIml & operator -> () { return *this; } bool is(Poco::Message::Priority priority) { return logger->is(priority); } @@ -188,18 +190,18 @@ class LogFrequencyLimiterIml /// Clears messages that were logged last time more than too_old_threshold_s seconds ago static void cleanup(time_t too_old_threshold_s = 600); - Poco::Logger * getLogger() { return logger; } + LoggerPtr getLogger() { return logger; } }; /// This wrapper is useful to save formatted message into a String before sending it to a logger class LogToStrImpl { String & out_str; - Poco::Logger * logger; + LoggerPtr logger; std::unique_ptr maybe_nested; bool propagate_to_actual_log = true; public: - LogToStrImpl(String & out_str_, Poco::Logger * logger_) : out_str(out_str_), logger(logger_) {} + LogToStrImpl(String & out_str_, LoggerPtr logger_) : out_str(out_str_), logger(logger_) {} LogToStrImpl(String & out_str_, std::unique_ptr && maybe_nested_) : out_str(out_str_), logger(maybe_nested_->getLogger()), maybe_nested(std::move(maybe_nested_)) {} LogToStrImpl & operator -> () { return *this; } diff --git a/src/Common/Macros.cpp b/src/Common/Macros.cpp index e5d4be446c13..7956990b8b4d 100644 --- a/src/Common/Macros.cpp +++ b/src/Common/Macros.cpp @@ -13,6 +13,10 @@ namespace ErrorCodes extern const int SYNTAX_ERROR; } +Macros::Macros(const Poco::Util::AbstractConfiguration & config, const String & key, LoggerPtr log) + : Macros(config, key, log.get()) +{} + Macros::Macros(const Poco::Util::AbstractConfiguration & config, const String & root_key, Poco::Logger * log) { Poco::Util::AbstractConfiguration::Keys keys; diff --git a/src/Common/Macros.h b/src/Common/Macros.h index 6ef87dfd51a7..366192f96d77 100644 --- a/src/Common/Macros.h +++ b/src/Common/Macros.h @@ -26,6 +26,7 @@ class Macros { public: Macros() = default; + Macros(const Poco::Util::AbstractConfiguration & config, const String & key, LoggerPtr log = nullptr); Macros(const Poco::Util::AbstractConfiguration & config, const String & key, Poco::Logger * log = nullptr); struct MacroExpansionInfo diff --git a/src/Common/MemoryTracker.cpp b/src/Common/MemoryTracker.cpp index e2129e1013e5..a19413800f71 100644 --- a/src/Common/MemoryTracker.cpp +++ b/src/Common/MemoryTracker.cpp @@ -123,14 +123,14 @@ void MemoryTracker::logPeakMemoryUsage() { log_peak_memory_usage_in_destructor = false; const auto * description = description_ptr.load(std::memory_order_relaxed); - LOG_DEBUG(&Poco::Logger::get("MemoryTracker"), + LOG_DEBUG(getLogger("MemoryTracker"), "Peak memory usage{}: {}.", (description ? " " + std::string(description) : ""), ReadableSize(peak)); } void MemoryTracker::logMemoryUsage(Int64 current) const { const auto * description = description_ptr.load(std::memory_order_relaxed); - LOG_DEBUG(&Poco::Logger::get("MemoryTracker"), + LOG_DEBUG(getLogger("MemoryTracker"), "Current memory usage{}: {}.", (description ? " " + std::string(description) : ""), ReadableSize(current)); } @@ -138,7 +138,7 @@ void MemoryTracker::injectFault() const { if (!memoryTrackerCanThrow(level, true)) { - LOG_WARNING(&Poco::Logger::get("MemoryTracker"), + LOG_WARNING(getLogger("MemoryTracker"), "Cannot inject fault at specific point. Uncaught exceptions: {}, stack trace:\n{}", std::uncaught_exceptions(), StackTrace().toString()); return; diff --git a/src/Common/NamedCollections/NamedCollectionUtils.cpp b/src/Common/NamedCollections/NamedCollectionUtils.cpp index 6ec09fb8a77f..acb0b0d3286b 100644 --- a/src/Common/NamedCollections/NamedCollectionUtils.cpp +++ b/src/Common/NamedCollections/NamedCollectionUtils.cpp @@ -134,7 +134,7 @@ class LoadFromSQL : private WithContext else { LOG_WARNING( - &Poco::Logger::get("NamedCollectionsLoadFromSQL"), + getLogger("NamedCollectionsLoadFromSQL"), "Unexpected file {} in named collections directory", current_path.filename().string()); } @@ -336,7 +336,7 @@ void loadFromConfigUnlocked(const Poco::Util::AbstractConfiguration & config, st { auto named_collections = LoadFromConfig(config).getAll(); LOG_TRACE( - &Poco::Logger::get("NamedCollectionsUtils"), + getLogger("NamedCollectionsUtils"), "Loaded {} collections from config", named_collections.size()); NamedCollectionFactory::instance().add(std::move(named_collections)); @@ -363,7 +363,7 @@ void loadFromSQLUnlocked(ContextPtr context, std::unique_lock &) { auto named_collections = LoadFromSQL(context).getAll(); LOG_TRACE( - &Poco::Logger::get("NamedCollectionsUtils"), + getLogger("NamedCollectionsUtils"), "Loaded {} collections from SQL", named_collections.size()); NamedCollectionFactory::instance().add(std::move(named_collections)); diff --git a/src/Common/PipeFDs.cpp b/src/Common/PipeFDs.cpp index 21a9ae59972f..5f9569afcc47 100644 --- a/src/Common/PipeFDs.cpp +++ b/src/Common/PipeFDs.cpp @@ -97,7 +97,7 @@ void LazyPipeFDs::setNonBlockingReadWrite() void LazyPipeFDs::tryIncreaseSize(int desired_size) { #if defined(OS_LINUX) - Poco::Logger * log = &Poco::Logger::get("Pipe"); + LoggerPtr log = getLogger("Pipe"); /** Increase pipe size to avoid slowdown during fine-grained trace collection. */ diff --git a/src/Common/PoolBase.h b/src/Common/PoolBase.h index 96a18ee6591d..4f52e199b513 100644 --- a/src/Common/PoolBase.h +++ b/src/Common/PoolBase.h @@ -180,9 +180,9 @@ class PoolBase : private boost::noncopyable protected: - Poco::Logger * log; + LoggerPtr log; - PoolBase(unsigned max_items_, Poco::Logger * log_) + PoolBase(unsigned max_items_, LoggerPtr log_) : max_items(max_items_), log(log_) { items.reserve(max_items); diff --git a/src/Common/PoolWithFailoverBase.h b/src/Common/PoolWithFailoverBase.h index 0e8fbb4e6d1f..6bff58c0a5a3 100644 --- a/src/Common/PoolWithFailoverBase.h +++ b/src/Common/PoolWithFailoverBase.h @@ -57,7 +57,7 @@ class PoolWithFailoverBase : private boost::noncopyable NestedPools nested_pools_, time_t decrease_error_period_, size_t max_error_cap_, - Poco::Logger * log_) + LoggerPtr log_) : nested_pools(std::move(nested_pools_)) , decrease_error_period(decrease_error_period_) , max_error_cap(max_error_cap_) @@ -157,7 +157,7 @@ class PoolWithFailoverBase : private boost::noncopyable /// The time when error counts were last decreased. time_t last_error_decrease_time = 0; - Poco::Logger * log; + LoggerPtr log; }; diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index e1af3ad77243..0e4bc6608f10 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -87,7 +87,7 @@ namespace ErrorCodes template QueryProfilerBase::QueryProfilerBase(UInt64 thread_id, int clock_type, UInt32 period, int pause_signal_) - : log(&Poco::Logger::get("QueryProfiler")) + : log(getLogger("QueryProfiler")) , pause_signal(pause_signal_) { #if defined(SANITIZER) diff --git a/src/Common/QueryProfiler.h b/src/Common/QueryProfiler.h index fb2f470b6d65..2d4d01fff607 100644 --- a/src/Common/QueryProfiler.h +++ b/src/Common/QueryProfiler.h @@ -1,5 +1,7 @@ #pragma once +#include + #include #include #include @@ -37,7 +39,7 @@ class QueryProfilerBase private: void tryCleanup(); - Poco::Logger * log; + LoggerPtr log; #if USE_UNWIND /// Timer id from timer_create(2) diff --git a/src/Common/SLRUCachePolicy.h b/src/Common/SLRUCachePolicy.h index e36bca83c616..f5f3ea54b315 100644 --- a/src/Common/SLRUCachePolicy.h +++ b/src/Common/SLRUCachePolicy.h @@ -236,7 +236,7 @@ class SLRUCachePolicy : public ICachePolicy (1ull << 63)) { - LOG_ERROR(&Poco::Logger::get("SLRUCache"), "SLRUCache became inconsistent. There must be a bug in it."); + LOG_ERROR(getLogger("SLRUCache"), "SLRUCache became inconsistent. There must be a bug in it."); abort(); } } diff --git a/src/Common/SensitiveDataMasker.cpp b/src/Common/SensitiveDataMasker.cpp index 34db78d00fb9..60e083a4ea53 100644 --- a/src/Common/SensitiveDataMasker.cpp +++ b/src/Common/SensitiveDataMasker.cpp @@ -109,7 +109,7 @@ SensitiveDataMasker::SensitiveDataMasker(const Poco::Util::AbstractConfiguration { Poco::Util::AbstractConfiguration::Keys keys; config.keys(config_prefix, keys); - Poco::Logger * logger = &Poco::Logger::get("SensitiveDataMaskerConfigRead"); + LoggerPtr logger = getLogger("SensitiveDataMaskerConfigRead"); std::set used_names; diff --git a/src/Common/SharedLockGuard.h b/src/Common/SharedLockGuard.h new file mode 100644 index 000000000000..1a60a69c6d4b --- /dev/null +++ b/src/Common/SharedLockGuard.h @@ -0,0 +1,32 @@ +// SharedLockGuard was introduced to master in https://github.com/ClickHouse/ClickHouse/pull/55278 +// since that PR also brings other features, we do not cherry-pick it directly but rather cut out only required part. +// https://github.com/ClickHouse/ClickHouse/blob/04272fa481353c6c197a79c13656af9ed210e163/src/Common/SharedLockGuard.h +// (master on 25-11-2023) + +#pragma once + +#include + +namespace DB +{ + +/** SharedLockGuard provide RAII-style locking mechanism for acquiring shared ownership of the implementation + * of the SharedLockable concept (for example std::shared_mutex or ContextSharedMutex) supplied as the + * constructor argument. Think of it as std::lock_guard which locks shared. + * + * On construction it acquires shared ownership using `lock_shared` method. + * On destruction shared ownership is released using `unlock_shared` method. + */ +template +class TSA_SCOPED_LOCKABLE SharedLockGuard +{ +public: + explicit SharedLockGuard(Mutex & mutex_) TSA_ACQUIRE_SHARED(mutex_) : mutex(mutex_) { mutex_.lock_shared(); } + + ~SharedLockGuard() TSA_RELEASE() { mutex.unlock_shared(); } + +private: + Mutex & mutex; +}; + +} diff --git a/src/Common/ShellCommand.cpp b/src/Common/ShellCommand.cpp index 0616ac6303cd..dd1a5ac7bb3a 100644 --- a/src/Common/ShellCommand.cpp +++ b/src/Common/ShellCommand.cpp @@ -54,9 +54,9 @@ ShellCommand::ShellCommand(pid_t pid_, int & in_fd_, int & out_fd_, int & err_fd { } -Poco::Logger * ShellCommand::getLogger() +LoggerPtr ShellCommand::getLogger() { - return &Poco::Logger::get("ShellCommand"); + return ::getLogger("ShellCommand"); } ShellCommand::~ShellCommand() diff --git a/src/Common/ShellCommand.h b/src/Common/ShellCommand.h index da65d2ae494f..5ebc1daefa1c 100644 --- a/src/Common/ShellCommand.h +++ b/src/Common/ShellCommand.h @@ -97,7 +97,7 @@ class ShellCommand final bool tryWaitProcessWithTimeout(size_t timeout_in_seconds); - static Poco::Logger * getLogger(); + static LoggerPtr getLogger(); /// Print command name and the list of arguments to log. NOTE: No escaping of arguments is performed. static void logCommand(const char * filename, char * const argv[]); diff --git a/src/Common/StatusFile.cpp b/src/Common/StatusFile.cpp index a9ffce7ddf88..b5c48b6a2467 100644 --- a/src/Common/StatusFile.cpp +++ b/src/Common/StatusFile.cpp @@ -56,9 +56,9 @@ StatusFile::StatusFile(std::string path_, FillFunction fill_) } if (!contents.empty()) - LOG_INFO(&Poco::Logger::get("StatusFile"), "Status file {} already exists - unclean restart. Contents:\n{}", path, contents); + LOG_INFO(getLogger("StatusFile"), "Status file {} already exists - unclean restart. Contents:\n{}", path, contents); else - LOG_INFO(&Poco::Logger::get("StatusFile"), "Status file {} already exists and is empty - probably unclean hardware restart.", path); + LOG_INFO(getLogger("StatusFile"), "Status file {} already exists and is empty - probably unclean hardware restart.", path); } fd = ::open(path.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0666); @@ -99,10 +99,10 @@ StatusFile::StatusFile(std::string path_, FillFunction fill_) StatusFile::~StatusFile() { if (0 != close(fd)) - LOG_ERROR(&Poco::Logger::get("StatusFile"), "Cannot close file {}, {}", path, errnoToString()); + LOG_ERROR(getLogger("StatusFile"), "Cannot close file {}, {}", path, errnoToString()); if (0 != unlink(path.c_str())) - LOG_ERROR(&Poco::Logger::get("StatusFile"), "Cannot unlink file {}, {}", path, errnoToString()); + LOG_ERROR(getLogger("StatusFile"), "Cannot unlink file {}, {}", path, errnoToString()); } } diff --git a/src/Common/SystemLogBase.h b/src/Common/SystemLogBase.h index c2cedb2ae390..0f98991cdbbd 100644 --- a/src/Common/SystemLogBase.h +++ b/src/Common/SystemLogBase.h @@ -95,7 +95,7 @@ class SystemLogBase : public ISystemLog static const char * getDefaultOrderBy() { return "(event_date, event_time)"; } protected: - Poco::Logger * log; + LoggerPtr log; // Queue is bounded. But its size is quite large to not block in all normal cases. std::vector queue; diff --git a/src/Common/TLDListsHolder.cpp b/src/Common/TLDListsHolder.cpp index 623b88f83a54..c3991b869831 100644 --- a/src/Common/TLDListsHolder.cpp +++ b/src/Common/TLDListsHolder.cpp @@ -55,7 +55,7 @@ void TLDListsHolder::parseConfig(const std::string & top_level_domains_path, con Poco::Util::AbstractConfiguration::Keys config_keys; config.keys("top_level_domains_lists", config_keys); - Poco::Logger * log = &Poco::Logger::get("TLDListsHolder"); + LoggerPtr log = getLogger("TLDListsHolder"); for (const auto & key : config_keys) { diff --git a/src/Common/TaskStatsInfoGetter.cpp b/src/Common/TaskStatsInfoGetter.cpp index 25030ee9670a..d8c649e5dde7 100644 --- a/src/Common/TaskStatsInfoGetter.cpp +++ b/src/Common/TaskStatsInfoGetter.cpp @@ -213,7 +213,7 @@ bool checkPermissionsImpl() { /// This error happens all the time when running inside Docker - consider it ok, /// don't create noise with this error. - LOG_DEBUG(&Poco::Logger::get(__PRETTY_FUNCTION__), getCurrentExceptionMessageAndPattern(/* with_stacktrace */ false)); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), getCurrentExceptionMessageAndPattern(/* with_stacktrace */ false)); } else { diff --git a/src/Common/ThreadProfileEvents.cpp b/src/Common/ThreadProfileEvents.cpp index 76a4d8b1adf1..bd1bc509057c 100644 --- a/src/Common/ThreadProfileEvents.cpp +++ b/src/Common/ThreadProfileEvents.cpp @@ -299,7 +299,7 @@ static void enablePerfEvent(int event_fd) { if (ioctl(event_fd, PERF_EVENT_IOC_ENABLE, 0)) { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Can't enable perf event with file descriptor {}: '{}' ({})", event_fd, errnoToString(), errno); } @@ -309,7 +309,7 @@ static void disablePerfEvent(int event_fd) { if (ioctl(event_fd, PERF_EVENT_IOC_DISABLE, 0)) { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Can't disable perf event with file descriptor {}: '{}' ({})", event_fd, errnoToString(), errno); } @@ -319,7 +319,7 @@ static void releasePerfEvent(int event_fd) { if (close(event_fd)) { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Can't close perf event file descriptor {}: {} ({})", event_fd, errnoToString(), errno); } @@ -332,12 +332,12 @@ static bool validatePerfEventDescriptor(int & fd) if (errno == EBADF) { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Event descriptor {} was closed from the outside; reopening", fd); } else { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Error while checking availability of event descriptor {}: {} ({})", fd, errnoToString(), errno); @@ -415,7 +415,7 @@ bool PerfEventsCounters::processThreadLocalChanges(const std::string & needed_ev bool has_cap_sys_admin = hasLinuxCapability(CAP_SYS_ADMIN); if (perf_event_paranoid >= 3 && !has_cap_sys_admin) { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Not enough permissions to record perf events: " "perf_event_paranoid = {} and CAP_SYS_ADMIN = 0", perf_event_paranoid); @@ -443,7 +443,7 @@ bool PerfEventsCounters::processThreadLocalChanges(const std::string & needed_ev // ENOENT means that the event is not supported. Don't log it, because // this is called for each thread and would be too verbose. Log other // error codes because they might signify an error. - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Failed to open perf event {} (event_type={}, event_config={}): " "'{}' ({})", event_info.settings_name, event_info.event_type, event_info.event_config, errnoToString(), errno); @@ -483,7 +483,7 @@ std::vector PerfEventsCounters::eventIndicesFromString(const std::string } else { - LOG_ERROR(&Poco::Logger::get("PerfEvents"), + LOG_ERROR(getLogger("PerfEvents"), "Unknown perf event name '{}' specified in settings", event_name); } } @@ -530,7 +530,7 @@ void PerfEventsCounters::finalizeProfileEvents(ProfileEvents::Counters & profile if (bytes_read != bytes_to_read) { - LOG_WARNING(&Poco::Logger::get("PerfEvents"), + LOG_WARNING(getLogger("PerfEvents"), "Can't read event value from file descriptor {}: '{}' ({})", fd, errnoToString(), errno); current_values[i] = {}; diff --git a/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp index 1b783aa9ec41..60ebe0dd809a 100644 --- a/src/Common/ThreadStatus.cpp +++ b/src/Common/ThreadStatus.cpp @@ -71,7 +71,7 @@ ThreadStatus::ThreadStatus() last_rusage = std::make_unique(); memory_tracker.setDescription("(for thread)"); - log = &Poco::Logger::get("ThreadStatus"); + log = getLogger("ThreadStatus"); current_thread = this; diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index 79474f292ecf..fd46e2388eff 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -1,11 +1,13 @@ #pragma once -#include #include +#include #include #include #include #include +#include + #include #include @@ -213,7 +215,7 @@ class ThreadStatus : public boost::noncopyable using Deleter = std::function; Deleter deleter; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; public: ThreadStatus(); diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index 67ad553bbd30..63958c52094f 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -53,7 +53,7 @@ void ZooKeeper::init(ZooKeeperArgs args_) { args = std::move(args_); - log = &Poco::Logger::get("ZooKeeper"); + log = getLogger("ZooKeeper"); if (args.implementation == "zookeeper") { @@ -1280,7 +1280,7 @@ Coordination::RequestPtr makeExistsRequest(const std::string & path) } -std::string normalizeZooKeeperPath(std::string zookeeper_path, bool check_starts_with_slash, Poco::Logger * log) +std::string normalizeZooKeeperPath(std::string zookeeper_path, bool check_starts_with_slash, LoggerPtr log) { if (!zookeeper_path.empty() && zookeeper_path.back() == '/') zookeeper_path.resize(zookeeper_path.size() - 1); @@ -1316,7 +1316,7 @@ String extractZooKeeperName(const String & path) return default_zookeeper_name; } -String extractZooKeeperPath(const String & path, bool check_starts_with_slash, Poco::Logger * log) +String extractZooKeeperPath(const String & path, bool check_starts_with_slash, LoggerPtr log) { if (path.empty()) throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "ZooKeeper path should not be empty"); diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h index acd6750fceda..c523fd82c304 100644 --- a/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -589,7 +589,7 @@ class ZooKeeper std::mutex mutex; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; std::shared_ptr zk_log; AtomicStopwatch session_uptime; @@ -661,11 +661,11 @@ class EphemeralNodeHolder using EphemeralNodeHolderPtr = EphemeralNodeHolder::Ptr; -String normalizeZooKeeperPath(std::string zookeeper_path, bool check_starts_with_slash, Poco::Logger * log = nullptr); +String normalizeZooKeeperPath(std::string zookeeper_path, bool check_starts_with_slash, LoggerPtr log = nullptr); String extractZooKeeperName(const String & path); -String extractZooKeeperPath(const String & path, bool check_starts_with_slash, Poco::Logger * log = nullptr); +String extractZooKeeperPath(const String & path, bool check_starts_with_slash, LoggerPtr log = nullptr); String getSequentialNodeName(const String & prefix, UInt64 number); diff --git a/src/Common/ZooKeeper/ZooKeeperCommon.cpp b/src/Common/ZooKeeper/ZooKeeperCommon.cpp index 1ee56936889f..aaaecaa122e5 100644 --- a/src/Common/ZooKeeper/ZooKeeperCommon.cpp +++ b/src/Common/ZooKeeper/ZooKeeperCommon.cpp @@ -878,7 +878,7 @@ ZooKeeperRequest::~ZooKeeperRequest() constexpr UInt64 max_request_time_ns = 1000000000ULL; /// 1 sec if (max_request_time_ns < elapsed_ns) { - LOG_TEST(&Poco::Logger::get(__PRETTY_FUNCTION__), "Processing of request xid={} took {} ms", xid, elapsed_ns / 1000000UL); + LOG_TEST(getLogger(__PRETTY_FUNCTION__), "Processing of request xid={} took {} ms", xid, elapsed_ns / 1000000UL); } } @@ -899,7 +899,7 @@ ZooKeeperResponse::~ZooKeeperResponse() constexpr UInt64 max_request_time_ns = 1000000000ULL; /// 1 sec if (max_request_time_ns < elapsed_ns) { - LOG_TEST(&Poco::Logger::get(__PRETTY_FUNCTION__), "Processing of response xid={} took {} ms", xid, elapsed_ns / 1000000UL); + LOG_TEST(getLogger(__PRETTY_FUNCTION__), "Processing of response xid={} took {} ms", xid, elapsed_ns / 1000000UL); } } diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index 8183569a7187..c98e7fa550f2 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -318,7 +318,7 @@ ZooKeeper::ZooKeeper( std::shared_ptr zk_log_) : args(args_) { - log = &Poco::Logger::get("ZooKeeperClient"); + log = getLogger("ZooKeeperClient"); std::atomic_store(&zk_log, std::move(zk_log_)); if (!args.chroot.empty()) diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.h b/src/Common/ZooKeeper/ZooKeeperImpl.h index 91c5083bda10..c67e8faa068e 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.h +++ b/src/Common/ZooKeeper/ZooKeeperImpl.h @@ -258,7 +258,7 @@ class ZooKeeper final : public IKeeper ThreadFromGlobalPool send_thread; ThreadFromGlobalPool receive_thread; - Poco::Logger * log; + LoggerPtr log; void connect( const Nodes & node, diff --git a/src/Common/ZooKeeper/ZooKeeperLock.cpp b/src/Common/ZooKeeper/ZooKeeperLock.cpp index a52c942a35f7..6eadcf224cce 100644 --- a/src/Common/ZooKeeper/ZooKeeperLock.cpp +++ b/src/Common/ZooKeeper/ZooKeeperLock.cpp @@ -24,7 +24,7 @@ ZooKeeperLock::ZooKeeperLock( : zookeeper(zookeeper_) , lock_path(fs::path(lock_prefix_) / lock_name_) , lock_message(lock_message_) - , log(&Poco::Logger::get("zkutil::Lock")) + , log(getLogger("zkutil::Lock")) { zookeeper->createIfNotExists(lock_prefix_, ""); } diff --git a/src/Common/ZooKeeper/ZooKeeperLock.h b/src/Common/ZooKeeper/ZooKeeperLock.h index 755ca1333b85..eb6a21fe63a6 100644 --- a/src/Common/ZooKeeper/ZooKeeperLock.h +++ b/src/Common/ZooKeeper/ZooKeeperLock.h @@ -45,7 +45,7 @@ class ZooKeeperLock std::string lock_path; std::string lock_message; - Poco::Logger * log; + LoggerPtr log; bool locked = false; }; diff --git a/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.h b/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.h index 491f97b52bdc..21c1e550d4d4 100644 --- a/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.h +++ b/src/Common/ZooKeeper/ZooKeeperWithFaultInjection.h @@ -46,7 +46,7 @@ class ZooKeeperWithFaultInjection zk::Ptr keeper_prev; std::unique_ptr fault_policy; std::string name; - Poco::Logger * logger = nullptr; + LoggerPtr logger = nullptr; UInt64 calls_total = 0; UInt64 calls_without_fault_injection = 0; const UInt64 seed = 0; @@ -58,7 +58,7 @@ class ZooKeeperWithFaultInjection double fault_injection_probability, UInt64 fault_injection_seed, std::string name_, - Poco::Logger * logger_) + LoggerPtr logger_) : keeper(keeper_), name(std::move(name_)), logger(logger_), seed(fault_injection_seed) { fault_policy = std::make_unique(fault_injection_probability, fault_injection_seed); @@ -76,7 +76,7 @@ class ZooKeeperWithFaultInjection using Ptr = std::shared_ptr; static ZooKeeperWithFaultInjection::Ptr createInstance( - double fault_injection_probability, UInt64 fault_injection_seed, const zk::Ptr & zookeeper, std::string name, Poco::Logger * logger) + double fault_injection_probability, UInt64 fault_injection_seed, const zk::Ptr & zookeeper, std::string name, LoggerPtr logger) { /// validate all parameters here, constructor just accept everything diff --git a/src/Common/callOnce.h b/src/Common/callOnce.h new file mode 100644 index 000000000000..402bb7365a14 --- /dev/null +++ b/src/Common/callOnce.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace DB +{ + +using OnceFlag = std::once_flag; + +template +void callOnce(OnceFlag & flag, Callable && func, Args&&... args) +{ + std::call_once(flag, std::forward(func), std::forward(args)...); +} + +} diff --git a/src/Common/logger_useful.h b/src/Common/logger_useful.h index ba1e2e7789b4..f98b96e6772b 100644 --- a/src/Common/logger_useful.h +++ b/src/Common/logger_useful.h @@ -8,6 +8,9 @@ #include #include +#include +#include + namespace Poco { class Logger; } @@ -16,10 +19,11 @@ namespace Poco { class Logger; } namespace { - [[maybe_unused]] const ::Poco::Logger * getLogger(const ::Poco::Logger * logger) { return logger; }; - [[maybe_unused]] const ::Poco::Logger * getLogger(const std::atomic<::Poco::Logger *> & logger) { return logger.load(); }; - [[maybe_unused]] std::unique_ptr getLogger(std::unique_ptr && logger) { return logger; }; - [[maybe_unused]] std::unique_ptr getLogger(std::unique_ptr && logger) { return logger; }; + [[maybe_unused]] LoggerPtr getLoggerHelper(const LoggerPtr & logger) { return logger; } + [[maybe_unused]] LoggerPtr getLoggerHelper(const AtomicLogger & logger) { return logger.load(); } + [[maybe_unused]] const ::Poco::Logger * getLoggerHelper(const ::Poco::Logger * logger) { return logger; } + [[maybe_unused]] std::unique_ptr getLoggerHelper(std::unique_ptr && logger) { return logger; } + [[maybe_unused]] std::unique_ptr getLoggerHelper(std::unique_ptr && logger) { return logger; } } #define LOG_IMPL_FIRST_ARG(X, ...) X @@ -32,7 +36,7 @@ namespace #define LOG_IMPL(logger, priority, PRIORITY, ...) do \ { \ - auto _logger = ::getLogger(logger); \ + auto _logger = ::getLoggerHelper(logger); \ const bool _is_clients_log = (DB::CurrentThread::getGroup() != nullptr) && \ (DB::CurrentThread::get().getClientLogsLevel() >= (priority)); \ if (_is_clients_log || _logger->is((PRIORITY))) \ diff --git a/src/Common/parseRemoteDescription.cpp b/src/Common/parseRemoteDescription.cpp index aa7122ffb4c3..b3510f210575 100644 --- a/src/Common/parseRemoteDescription.cpp +++ b/src/Common/parseRemoteDescription.cpp @@ -177,7 +177,7 @@ std::vector> parseRemoteDescriptionForExternalDataba size_t colon = address.find(':'); if (colon == String::npos) { - LOG_WARNING(&Poco::Logger::get("ParseRemoteDescription"), "Port is not found for host: {}. Using default port {}", address, default_port); + LOG_WARNING(getLogger("ParseRemoteDescription"), "Port is not found for host: {}. Using default port {}", address, default_port); result.emplace_back(std::make_pair(address, default_port)); } else diff --git a/src/Common/tests/gtest_log.cpp b/src/Common/tests/gtest_log.cpp index f92866626f9f..7162414fe96e 100644 --- a/src/Common/tests/gtest_log.cpp +++ b/src/Common/tests/gtest_log.cpp @@ -14,7 +14,7 @@ TEST(Logger, Log) { Poco::Logger::root().setLevel("none"); Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::NullChannel())); - Poco::Logger * log = &Poco::Logger::get("Log"); + LoggerPtr log = getLogger("Log"); /// This test checks that we don't pass this string to fmtlib, because it is the only argument. EXPECT_NO_THROW(LOG_INFO(log, fmt::runtime("Hello {} World"))); @@ -31,7 +31,6 @@ TEST(Logger, TestLog) LOG_TEST(log, "Hello World"); EXPECT_EQ(oss.str(), "Hello World\n"); - Poco::Logger::destroy("TestLogger"); } { /// Test logs invisible for other levels @@ -44,8 +43,6 @@ TEST(Logger, TestLog) LOG_TEST(log, "Hello World"); EXPECT_EQ(oss.str(), ""); - - Poco::Logger::destroy(std::string{level} + "_Logger"); } } diff --git a/src/Common/tests/gtest_poolbase.cpp b/src/Common/tests/gtest_poolbase.cpp index 20c3281c9646..879b1b166204 100644 --- a/src/Common/tests/gtest_poolbase.cpp +++ b/src/Common/tests/gtest_poolbase.cpp @@ -18,7 +18,7 @@ class MyPoolBase : public PoolBase using Ptr = PoolBase::Ptr; int last_destroy_value = 0; - MyPoolBase() : PoolBase(100, &Poco::Logger::get("MyPoolBase")) { } + MyPoolBase() : PoolBase(100, getLogger("MyPoolBase")) { } protected: ObjectPtr allocObject() override { return std::make_shared(); } diff --git a/src/Compression/CompressionCodecDeflateQpl.cpp b/src/Compression/CompressionCodecDeflateQpl.cpp index 29d90b7dbd6c..93ccaaeb3083 100644 --- a/src/Compression/CompressionCodecDeflateQpl.cpp +++ b/src/Compression/CompressionCodecDeflateQpl.cpp @@ -31,7 +31,7 @@ DeflateQplJobHWPool::DeflateQplJobHWPool() : random_engine(std::random_device()()) , distribution(0, MAX_HW_JOB_NUMBER - 1) { - Poco::Logger * log = &Poco::Logger::get("DeflateQplJobHWPool"); + LoggerPtr log = getLogger("DeflateQplJobHWPool"); UInt32 job_size = 0; const char * qpl_version = qpl_get_library_version(); @@ -117,7 +117,7 @@ void DeflateQplJobHWPool::unLockJob(UInt32 index) //HardwareCodecDeflateQpl HardwareCodecDeflateQpl::HardwareCodecDeflateQpl() - :log(&Poco::Logger::get("HardwareCodecDeflateQpl")) + :log(getLogger("HardwareCodecDeflateQpl")) { } diff --git a/src/Compression/CompressionCodecDeflateQpl.h b/src/Compression/CompressionCodecDeflateQpl.h index 3171a898311b..7c534f09f98e 100644 --- a/src/Compression/CompressionCodecDeflateQpl.h +++ b/src/Compression/CompressionCodecDeflateQpl.h @@ -84,7 +84,7 @@ class HardwareCodecDeflateQpl /// For each submission, push job ID && job object into this map; /// For flush, pop out job ID && job object from this map. Use job ID to release job lock and use job object to check job status till complete. std::map decomp_async_job_map; - Poco::Logger * log; + LoggerPtr log; }; class CompressionCodecDeflateQpl final : public ICompressionCodec diff --git a/src/Compression/CompressionCodecEncrypted.cpp b/src/Compression/CompressionCodecEncrypted.cpp index 022bbd583e46..a7ec9a995b1f 100644 --- a/src/Compression/CompressionCodecEncrypted.cpp +++ b/src/Compression/CompressionCodecEncrypted.cpp @@ -714,7 +714,7 @@ bool CompressionCodecEncrypted::Configuration::tryLoad(const Poco::Util::Abstrac /// if encryption is disabled, print warning about this. void CompressionCodecEncrypted::Configuration::load(const Poco::Util::AbstractConfiguration & config [[maybe_unused]], const String & config_prefix [[maybe_unused]]) { - LOG_WARNING(&Poco::Logger::get("CompressionCodecEncrypted"), "Server was built without SSL support. Encryption is disabled."); + LOG_WARNING(getLogger("CompressionCodecEncrypted"), "Server was built without SSL support. Encryption is disabled."); } } diff --git a/src/Coordination/Changelog.cpp b/src/Coordination/Changelog.cpp index d7a57583933d..d10e3c9e65a1 100644 --- a/src/Coordination/Changelog.cpp +++ b/src/Coordination/Changelog.cpp @@ -94,7 +94,7 @@ class ChangelogWriter : existing_changelogs(existing_changelogs_) , log_file_settings(log_file_settings_) , changelogs_dir(changelogs_dir_) - , log(&Poco::Logger::get("Changelog")) + , log(getLogger("Changelog")) { } @@ -375,7 +375,7 @@ class ChangelogWriter const std::filesystem::path changelogs_dir; - Poco::Logger * const log; + LoggerPtr const log; }; struct ChangelogReadResult @@ -411,7 +411,7 @@ class ChangelogReader } /// start_log_index -- all entries with index < start_log_index will be skipped, but accounted into total_entries_read_from_log - ChangelogReadResult readChangelog(IndexToLogEntry & logs, uint64_t start_log_index, Poco::Logger * log) + ChangelogReadResult readChangelog(IndexToLogEntry & logs, uint64_t start_log_index, LoggerPtr log) { ChangelogReadResult result{}; try @@ -508,7 +508,7 @@ class ChangelogReader Changelog::Changelog( const std::string & changelogs_dir_, - Poco::Logger * log_, + LoggerPtr log_, LogFileSettings log_file_settings) : changelogs_dir(changelogs_dir_) , changelogs_detached_dir(changelogs_dir / "detached") diff --git a/src/Coordination/Changelog.h b/src/Coordination/Changelog.h index 2a6645f0f1bf..5e491490ec47 100644 --- a/src/Coordination/Changelog.h +++ b/src/Coordination/Changelog.h @@ -87,7 +87,7 @@ class Changelog public: Changelog( const std::string & changelogs_dir_, - Poco::Logger * log_, + LoggerPtr log_, LogFileSettings log_file_settings); Changelog(Changelog &&) = delete; @@ -171,7 +171,7 @@ class Changelog const std::filesystem::path changelogs_dir; const std::filesystem::path changelogs_detached_dir; const uint64_t rotate_interval; - Poco::Logger * log; + LoggerPtr log; std::mutex writer_mutex; /// Current writer for changelog file diff --git a/src/Coordination/FourLetterCommand.cpp b/src/Coordination/FourLetterCommand.cpp index 8a7fdb82fb7c..04c6d6c911e9 100644 --- a/src/Coordination/FourLetterCommand.cpp +++ b/src/Coordination/FourLetterCommand.cpp @@ -191,7 +191,7 @@ void FourLetterCommandFactory::initializeAllowList(KeeperDispatcher & keeper_dis } else { - auto * log = &Poco::Logger::get("FourLetterCommandFactory"); + auto log = getLogger("FourLetterCommandFactory"); LOG_WARNING(log, "Find invalid keeper 4lw command {} when initializing, ignore it.", token); } } diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 1828182751d8..d470fdce671d 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -53,7 +53,7 @@ namespace ErrorCodes KeeperDispatcher::KeeperDispatcher() : responses_queue(std::numeric_limits::max()) , configuration_and_settings(std::make_shared()) - , log(&Poco::Logger::get("KeeperDispatcher")) + , log(getLogger("KeeperDispatcher")) {} void KeeperDispatcher::requestThread() @@ -813,7 +813,7 @@ Keeper4LWInfo KeeperDispatcher::getKeeper4LWInfo() const void KeeperDispatcher::cleanResources() { #if USE_JEMALLOC - LOG_TRACE(&Poco::Logger::get("KeeperDispatcher"), "Purging unused memory"); + LOG_TRACE(getLogger("KeeperDispatcher"), "Purging unused memory"); Stopwatch watch; mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0); ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge); diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index 77b5510cbb39..9038c5132ce9 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -75,7 +75,7 @@ class KeeperDispatcher KeeperConfigurationAndSettingsPtr configuration_and_settings; - Poco::Logger * log; + LoggerPtr log; /// Counter for new session_id requests. std::atomic internal_session_id_counter{0}; diff --git a/src/Coordination/KeeperLogStore.cpp b/src/Coordination/KeeperLogStore.cpp index d1bd2f9db187..1fdc441543d4 100644 --- a/src/Coordination/KeeperLogStore.cpp +++ b/src/Coordination/KeeperLogStore.cpp @@ -6,7 +6,7 @@ namespace DB KeeperLogStore::KeeperLogStore( const std::string & changelogs_path, LogFileSettings log_file_settings) - : log(&Poco::Logger::get("KeeperLogStore")) + : log(getLogger("KeeperLogStore")) , changelog(changelogs_path, log, log_file_settings) { if (log_file_settings.force_sync) diff --git a/src/Coordination/KeeperLogStore.h b/src/Coordination/KeeperLogStore.h index 108241e024e1..b63980aa7e26 100644 --- a/src/Coordination/KeeperLogStore.h +++ b/src/Coordination/KeeperLogStore.h @@ -74,7 +74,7 @@ class KeeperLogStore : public nuraft::log_store private: mutable std::mutex changelog_lock; - Poco::Logger * log; + LoggerPtr log; Changelog changelog TSA_GUARDED_BY(changelog_lock); }; diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 269efe914fd8..ec29b01d76c5 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -85,7 +85,7 @@ std::string checkAndGetSuperdigest(const String & user_and_digest) return user_and_digest; } -int32_t getValueOrMaxInt32AndLogWarning(uint64_t value, const std::string & name, Poco::Logger * log) +int32_t getValueOrMaxInt32AndLogWarning(uint64_t value, const std::string & name, LoggerPtr log) { if (value > std::numeric_limits::max()) { @@ -112,7 +112,7 @@ KeeperServer::KeeperServer( KeeperStateMachine::CommitCallback commit_callback) : server_id(configuration_and_settings_->server_id) , coordination_settings(configuration_and_settings_->coordination_settings) - , log(&Poco::Logger::get("KeeperServer")) + , log(getLogger("KeeperServer")) , is_recovering(config.getBool("keeper_server.force_recovery", false)) , keeper_context{std::make_shared()} , create_snapshot_on_exit(config.getBool("keeper_server.create_snapshot_on_exit", true)) diff --git a/src/Coordination/KeeperServer.h b/src/Coordination/KeeperServer.h index db4e9c1962e2..26401509b8ad 100644 --- a/src/Coordination/KeeperServer.h +++ b/src/Coordination/KeeperServer.h @@ -44,7 +44,7 @@ class KeeperServer nuraft::ptr last_local_config; - Poco::Logger * log; + LoggerPtr log; /// Callback func which is called by NuRaft on all internal events. /// Used to determine the moment when raft is ready to server new requests diff --git a/src/Coordination/KeeperSnapshotManager.cpp b/src/Coordination/KeeperSnapshotManager.cpp index 8b80db3e5209..e3fc2dc04155 100644 --- a/src/Coordination/KeeperSnapshotManager.cpp +++ b/src/Coordination/KeeperSnapshotManager.cpp @@ -351,7 +351,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial { if (keeper_context->ignore_system_path_on_startup || keeper_context->server_state != KeeperContext::Phase::INIT) { - LOG_ERROR(&Poco::Logger::get("KeeperSnapshotManager"), "{}. Ignoring it", error_msg); + LOG_ERROR(getLogger("KeeperSnapshotManager"), "{}. Ignoring it", error_msg); continue; } else @@ -367,7 +367,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial { if (keeper_context->ignore_system_path_on_startup || keeper_context->server_state != KeeperContext::Phase::INIT) { - LOG_ERROR(&Poco::Logger::get("KeeperSnapshotManager"), "{}. Ignoring it", error_msg); + LOG_ERROR(getLogger("KeeperSnapshotManager"), "{}. Ignoring it", error_msg); node = KeeperStorage::Node{}; } else @@ -408,7 +408,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial { #ifdef NDEBUG /// TODO (alesapin) remove this, it should be always CORRUPTED_DATA. - LOG_ERROR(&Poco::Logger::get("KeeperSnapshotManager"), "Children counter in stat.numChildren {}" + LOG_ERROR(getLogger("KeeperSnapshotManager"), "Children counter in stat.numChildren {}" " is different from actual children size {} for node {}", itr.value.stat.numChildren, itr.value.getChildren().size(), itr.key); #else throw Exception(ErrorCodes::LOGICAL_ERROR, "Children counter in stat.numChildren {}" diff --git a/src/Coordination/KeeperSnapshotManagerS3.cpp b/src/Coordination/KeeperSnapshotManagerS3.cpp index db3ff235b67f..5aedd361626c 100644 --- a/src/Coordination/KeeperSnapshotManagerS3.cpp +++ b/src/Coordination/KeeperSnapshotManagerS3.cpp @@ -42,7 +42,7 @@ struct KeeperSnapshotManagerS3::S3Configuration KeeperSnapshotManagerS3::KeeperSnapshotManagerS3() : snapshots_s3_queue(std::numeric_limits::max()) - , log(&Poco::Logger::get("KeeperSnapshotManagerS3")) + , log(getLogger("KeeperSnapshotManagerS3")) , uuid(UUIDHelpers::generateV4()) {} diff --git a/src/Coordination/KeeperSnapshotManagerS3.h b/src/Coordination/KeeperSnapshotManagerS3.h index 197f528b1927..33328248fe2f 100644 --- a/src/Coordination/KeeperSnapshotManagerS3.h +++ b/src/Coordination/KeeperSnapshotManagerS3.h @@ -43,7 +43,7 @@ class KeeperSnapshotManagerS3 std::atomic shutdown_called{false}; - Poco::Logger * log; + LoggerPtr log; UUID uuid; diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index f13f7413b648..233ee3112c13 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -60,7 +60,7 @@ KeeperStateMachine::KeeperStateMachine( , responses_queue(responses_queue_) , snapshots_queue(snapshots_queue_) , last_committed_idx(0) - , log(&Poco::Logger::get("KeeperStateMachine")) + , log(getLogger("KeeperStateMachine")) , superdigest(superdigest_) , keeper_context(keeper_context_) , snapshot_manager_s3(snapshot_manager_s3_) @@ -131,7 +131,7 @@ void assertDigest( if (!KeeperStorage::checkDigest(first, second)) { LOG_FATAL( - &Poco::Logger::get("KeeperStateMachine"), + getLogger("KeeperStateMachine"), "Digest for nodes is not matching after {} request of type '{}'.\nExpected digest - {}, actual digest - {} (digest " "{}). Keeper will terminate to avoid inconsistencies.\nExtra information about the request:\n{}", committing ? "committing" : "preprocessing", @@ -464,7 +464,7 @@ void KeeperStateMachine::save_logical_snp_obj( } } -static int bufferFromFile(Poco::Logger * log, const std::string & path, nuraft::ptr & data_out) +static int bufferFromFile(LoggerPtr log, const std::string & path, nuraft::ptr & data_out) { if (path.empty() || !std::filesystem::exists(path)) { diff --git a/src/Coordination/KeeperStateMachine.h b/src/Coordination/KeeperStateMachine.h index 1de9a3e6fa5c..00dbbcb4f1f8 100644 --- a/src/Coordination/KeeperStateMachine.h +++ b/src/Coordination/KeeperStateMachine.h @@ -144,7 +144,7 @@ class KeeperStateMachine : public nuraft::state_machine /// Last committed Raft log number. std::atomic last_committed_idx; - Poco::Logger * log; + LoggerPtr log; /// Cluster config for our quorum. /// It's a copy of config stored in StateManager, but here diff --git a/src/Coordination/KeeperStateManager.cpp b/src/Coordination/KeeperStateManager.cpp index cfb3519e5976..e4d963691e2e 100644 --- a/src/Coordination/KeeperStateManager.cpp +++ b/src/Coordination/KeeperStateManager.cpp @@ -216,7 +216,7 @@ KeeperStateManager::KeeperStateManager( , secure(false) , log_store(nuraft::cs_new(logs_path, LogFileSettings{.force_sync =false, .compress_logs = false, .rotate_interval = 5000})) , server_state_path(state_file_path) - , logger(&Poco::Logger::get("KeeperStateManager")) + , logger(getLogger("KeeperStateManager")) { auto peer_config = nuraft::cs_new(my_server_id, host + ":" + std::to_string(port)); configuration_wrapper.cluster_config = nuraft::cs_new(); @@ -247,7 +247,7 @@ KeeperStateManager::KeeperStateManager( .overallocate_size = coordination_settings->log_file_overallocate_size })) , server_state_path(state_file_path) - , logger(&Poco::Logger::get("KeeperStateManager")) + , logger(getLogger("KeeperStateManager")) { } @@ -448,7 +448,7 @@ ConfigUpdateActions KeeperStateManager::getConfigurationDiff(const Poco::Util::A if (old_endpoint != server_config->get_endpoint()) { LOG_WARNING( - &Poco::Logger::get("RaftConfiguration"), + getLogger("RaftConfiguration"), "Config will be ignored because a server with ID {} is already present in the cluster on a different endpoint ({}). " "The endpoint of the current servers should not be changed. For servers on a new endpoint, please use a new ID.", new_id, diff --git a/src/Coordination/KeeperStateManager.h b/src/Coordination/KeeperStateManager.h index 5d210f8c0ea5..5e54938e478f 100644 --- a/src/Coordination/KeeperStateManager.h +++ b/src/Coordination/KeeperStateManager.h @@ -138,7 +138,7 @@ class KeeperStateManager : public nuraft::state_mgr const std::filesystem::path server_state_path; - Poco::Logger * logger; + LoggerPtr logger; public: /// Parse configuration from xml config. diff --git a/src/Coordination/KeeperStorage.cpp b/src/Coordination/KeeperStorage.cpp index 6b2696034f19..d06948c19cd2 100644 --- a/src/Coordination/KeeperStorage.cpp +++ b/src/Coordination/KeeperStorage.cpp @@ -594,7 +594,7 @@ namespace [[noreturn]] void onStorageInconsistency() { LOG_ERROR( - &Poco::Logger::get("KeeperStorage"), + getLogger("KeeperStorage"), "Inconsistency found between uncommitted and committed data. Keeper will terminate to avoid undefined behaviour."); std::terminate(); } @@ -849,7 +849,7 @@ void handleSystemNodeModification(const KeeperContext & keeper_context, std::str "If you still want to ignore it, you can set 'keeper_server.ignore_system_path_on_startup' to true.", error_msg); - LOG_ERROR(&Poco::Logger::get("KeeperStorage"), fmt::runtime(error_msg)); + LOG_ERROR(getLogger("KeeperStorage"), fmt::runtime(error_msg)); } } @@ -2282,7 +2282,7 @@ void KeeperStorage::rollbackRequest(int64_t rollback_zxid, bool allow_missing) } catch (...) { - LOG_FATAL(&Poco::Logger::get("KeeperStorage"), "Failed to rollback log. Terminating to avoid inconsistencies"); + LOG_FATAL(getLogger("KeeperStorage"), "Failed to rollback log. Terminating to avoid inconsistencies"); std::terminate(); } } diff --git a/src/Coordination/LoggerWrapper.h b/src/Coordination/LoggerWrapper.h index ae3ff1553b09..5e2378da6f9a 100644 --- a/src/Coordination/LoggerWrapper.h +++ b/src/Coordination/LoggerWrapper.h @@ -25,7 +25,7 @@ class LoggerWrapper : public nuraft::logger public: LoggerWrapper(const std::string & name, LogsLevel level_) - : log(&Poco::Logger::get(name)) + : log(getLogger(name)) , level(level_) { log->setLevel(static_cast(LEVELS.at(level))); @@ -56,7 +56,7 @@ class LoggerWrapper : public nuraft::logger } private: - Poco::Logger * log; + LoggerPtr log; std::atomic level; }; diff --git a/src/Coordination/TinyContext.cpp b/src/Coordination/TinyContext.cpp index 47b0a48dcdaa..7df081412019 100644 --- a/src/Coordination/TinyContext.cpp +++ b/src/Coordination/TinyContext.cpp @@ -40,7 +40,7 @@ void TinyContext::initializeKeeperDispatcher([[maybe_unused]] bool start_async) MultiVersion::Version macros; if (config_ref.has("macros")) - macros = std::make_unique(config_ref, "macros", &Poco::Logger::get("TinyContext")); + macros = std::make_unique(config_ref, "macros", getLogger("TinyContext")); keeper_dispatcher->initialize(config_ref, true, start_async, macros); } } @@ -79,7 +79,7 @@ void TinyContext::updateKeeperConfiguration([[maybe_unused]] const Poco::Util::A MultiVersion::Version macros; if (config_.has("macros")) - macros = std::make_unique(config_, "macros", &Poco::Logger::get("TinyContext")); + macros = std::make_unique(config_, "macros", getLogger("TinyContext")); keeper_dispatcher->updateConfiguration(config_, macros); } diff --git a/src/Coordination/ZooKeeperDataReader.cpp b/src/Coordination/ZooKeeperDataReader.cpp index 5fa67a60b4b2..5d15422a7573 100644 --- a/src/Coordination/ZooKeeperDataReader.cpp +++ b/src/Coordination/ZooKeeperDataReader.cpp @@ -89,7 +89,7 @@ void deserializeACLMap(KeeperStorage & storage, ReadBuffer & in) } } -int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * log) +int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, LoggerPtr log) { int64_t max_zxid = 0; std::string path; @@ -146,7 +146,7 @@ int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, Poco::L return max_zxid; } -void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, Poco::Logger * log) +void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, LoggerPtr log) { LOG_INFO(log, "Deserializing storage snapshot {}", snapshot_path); int64_t zxid = getZxidFromName(snapshot_path); @@ -185,7 +185,7 @@ void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::st LOG_INFO(log, "Finished, snapshot ZXID {}", storage.zxid); } -void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, Poco::Logger * log) +void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, LoggerPtr log) { namespace fs = std::filesystem; std::map existing_snapshots; @@ -473,7 +473,7 @@ bool hasErrorsInMultiRequest(Coordination::ZooKeeperRequestPtr request) } -bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * /*log*/) +bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, LoggerPtr /*log*/) { int64_t checksum; Coordination::read(checksum, in); @@ -528,7 +528,7 @@ bool deserializeTxn(KeeperStorage & storage, ReadBuffer & in, Poco::Logger * /*l return true; } -void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, Poco::Logger * log) +void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, LoggerPtr log) { ReadBufferFromFile reader(log_path); @@ -552,7 +552,7 @@ void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string LOG_INFO(log, "Finished {} deserialization, totally read {} records", log_path, counter); } -void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, Poco::Logger * log) +void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, LoggerPtr log) { namespace fs = std::filesystem; std::map existing_logs; diff --git a/src/Coordination/ZooKeeperDataReader.h b/src/Coordination/ZooKeeperDataReader.h index 6da6fd498af7..f8c0c54e3947 100644 --- a/src/Coordination/ZooKeeperDataReader.h +++ b/src/Coordination/ZooKeeperDataReader.h @@ -6,12 +6,12 @@ namespace DB { -void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, Poco::Logger * log); +void deserializeKeeperStorageFromSnapshot(KeeperStorage & storage, const std::string & snapshot_path, LoggerPtr log); -void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, Poco::Logger * log); +void deserializeKeeperStorageFromSnapshotsDir(KeeperStorage & storage, const std::string & path, LoggerPtr log); -void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, Poco::Logger * log); +void deserializeLogAndApplyToStorage(KeeperStorage & storage, const std::string & log_path, LoggerPtr log); -void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, Poco::Logger * log); +void deserializeLogsAndApplyToStorage(KeeperStorage & storage, const std::string & path, LoggerPtr log); } diff --git a/src/Coordination/tests/gtest_coordination.cpp b/src/Coordination/tests/gtest_coordination.cpp index b1bea8ddf248..e10c1a8b40fa 100644 --- a/src/Coordination/tests/gtest_coordination.cpp +++ b/src/Coordination/tests/gtest_coordination.cpp @@ -67,7 +67,7 @@ class CoordinationTest : public ::testing::TestWithParam { protected: DB::KeeperContextPtr keeper_context = std::make_shared(); - Poco::Logger * log{&Poco::Logger::get("CoordinationTest")}; + LoggerPtr log{getLogger("CoordinationTest")}; }; TEST_P(CoordinationTest, BuildTest) @@ -1429,7 +1429,7 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint nuraft::async_result::handler_type when_done = [&snapshot_created] (bool & ret, nuraft::ptr &/*exception*/) { snapshot_created = ret; - LOG_INFO(&Poco::Logger::get("CoordinationTest"), "Snapshot finished"); + LOG_INFO(getLogger("CoordinationTest"), "Snapshot finished"); }; state_machine->create_snapshot(s, when_done); diff --git a/src/Core/BackgroundSchedulePool.cpp b/src/Core/BackgroundSchedulePool.cpp index 5384ee7f9610..e7b819354cb0 100644 --- a/src/Core/BackgroundSchedulePool.cpp +++ b/src/Core/BackgroundSchedulePool.cpp @@ -110,7 +110,7 @@ void BackgroundSchedulePoolTaskInfo::execute() static constexpr UInt64 slow_execution_threshold_ms = 200; if (milliseconds >= slow_execution_threshold_ms) - LOG_TRACE(&Poco::Logger::get(log_name), "Execution took {} ms.", milliseconds); + LOG_TRACE(getLogger(log_name), "Execution took {} ms.", milliseconds); { std::lock_guard lock_schedule(schedule_mutex); @@ -154,7 +154,7 @@ BackgroundSchedulePool::BackgroundSchedulePool(size_t size_, CurrentMetrics::Met , size_metric(size_metric_, size_) , thread_name(thread_name_) { - LOG_INFO(&Poco::Logger::get("BackgroundSchedulePool/" + thread_name), "Create BackgroundSchedulePool with {} threads", size_); + LOG_INFO(getLogger("BackgroundSchedulePool/" + thread_name), "Create BackgroundSchedulePool with {} threads", size_); threads.resize(size_); for (auto & thread : threads) @@ -170,7 +170,7 @@ void BackgroundSchedulePool::increaseThreadsCount(size_t new_threads_count) if (new_threads_count < old_threads_count) { - LOG_WARNING(&Poco::Logger::get("BackgroundSchedulePool/" + thread_name), + LOG_WARNING(getLogger("BackgroundSchedulePool/" + thread_name), "Tried to increase the number of threads but the new threads count ({}) is not greater than old one ({})", new_threads_count, old_threads_count); return; } @@ -197,7 +197,7 @@ BackgroundSchedulePool::~BackgroundSchedulePool() tasks_cond_var.notify_all(); delayed_tasks_cond_var.notify_all(); - LOG_TRACE(&Poco::Logger::get("BackgroundSchedulePool/" + thread_name), "Waiting for threads to finish."); + LOG_TRACE(getLogger("BackgroundSchedulePool/" + thread_name), "Waiting for threads to finish."); delayed_thread.join(); for (auto & thread : threads) diff --git a/src/Core/BaseSettings.cpp b/src/Core/BaseSettings.cpp index 72a8070e6529..a7e1ab99af7e 100644 --- a/src/Core/BaseSettings.cpp +++ b/src/Core/BaseSettings.cpp @@ -47,8 +47,7 @@ void BaseSettingsHelpers::throwSettingNotFound(std::string_view name) void BaseSettingsHelpers::warningSettingNotFound(std::string_view name) { - static auto * log = &Poco::Logger::get("Settings"); - LOG_WARNING(log, "Unknown setting {}, skipping", name); + LOG_WARNING(getLogger("Settings"), "Unknown setting {}, skipping", name); } } diff --git a/src/Core/MySQL/Authentication.cpp b/src/Core/MySQL/Authentication.cpp index d43568178b06..3fa218cfb588 100644 --- a/src/Core/MySQL/Authentication.cpp +++ b/src/Core/MySQL/Authentication.cpp @@ -102,7 +102,7 @@ void Native41::authenticate( #if USE_SSL -Sha256Password::Sha256Password(RSA & public_key_, RSA & private_key_, Poco::Logger * log_) +Sha256Password::Sha256Password(RSA & public_key_, RSA & private_key_, LoggerPtr log_) : public_key(public_key_), private_key(private_key_), log(log_) { /** Native authentication sent 20 bytes + '\0' character = 21 bytes. diff --git a/src/Core/MySQL/Authentication.h b/src/Core/MySQL/Authentication.h index ee6aaac02bcb..3179fa20f593 100644 --- a/src/Core/MySQL/Authentication.h +++ b/src/Core/MySQL/Authentication.h @@ -61,7 +61,7 @@ class Native41 : public IPlugin class Sha256Password : public IPlugin { public: - Sha256Password(RSA & public_key_, RSA & private_key_, Poco::Logger * log_); + Sha256Password(RSA & public_key_, RSA & private_key_, LoggerPtr log_); String getName() override { return "sha256_password"; } @@ -74,7 +74,7 @@ class Sha256Password : public IPlugin private: RSA & public_key; RSA & private_key; - Poco::Logger * log; + LoggerPtr log; String scramble; }; #endif diff --git a/src/Core/PostgreSQL/Connection.cpp b/src/Core/PostgreSQL/Connection.cpp index 5a589a80d021..eea24dd69407 100644 --- a/src/Core/PostgreSQL/Connection.cpp +++ b/src/Core/PostgreSQL/Connection.cpp @@ -9,7 +9,7 @@ namespace postgres Connection::Connection(const ConnectionInfo & connection_info_, bool replication_, size_t num_tries_) : connection_info(connection_info_), replication(replication_), num_tries(num_tries_) - , log(&Poco::Logger::get("PostgreSQLReplicaConnection")) + , log(getLogger("PostgreSQLReplicaConnection")) { if (replication) connection_info = {fmt::format("{} replication=database", connection_info.connection_string), connection_info.host_port}; @@ -65,7 +65,7 @@ void Connection::updateConnection() if (replication) connection->set_variable("default_transaction_isolation", "'repeatable read'"); - LOG_DEBUG(&Poco::Logger::get("PostgreSQLConnection"), "New connection to {}", connection_info.host_port); + LOG_DEBUG(getLogger("PostgreSQLConnection"), "New connection to {}", connection_info.host_port); } void Connection::connect() diff --git a/src/Core/PostgreSQL/Connection.h b/src/Core/PostgreSQL/Connection.h index 96cc19babea0..1351f12053e9 100644 --- a/src/Core/PostgreSQL/Connection.h +++ b/src/Core/PostgreSQL/Connection.h @@ -6,6 +6,7 @@ #include #include +#include #include /** Methods to work with PostgreSQL connection object. @@ -59,7 +60,7 @@ class Connection : private boost::noncopyable bool replication; size_t num_tries; - Poco::Logger * log; + LoggerPtr log; }; using ConnectionPtr = std::unique_ptr; diff --git a/src/Core/PostgreSQL/PoolWithFailover.cpp b/src/Core/PostgreSQL/PoolWithFailover.cpp index 22cd88c07647..914f25a2ed17 100644 --- a/src/Core/PostgreSQL/PoolWithFailover.cpp +++ b/src/Core/PostgreSQL/PoolWithFailover.cpp @@ -31,7 +31,7 @@ PoolWithFailover::PoolWithFailover( , max_tries(max_tries_) , auto_close_connection(auto_close_connection_) { - LOG_TRACE(&Poco::Logger::get("PostgreSQLConnectionPool"), "PostgreSQL connection pool size: {}, connection wait timeout: {}, max failover tries: {}", + LOG_TRACE(getLogger("PostgreSQLConnectionPool"), "PostgreSQL connection pool size: {}, connection wait timeout: {}, max failover tries: {}", pool_size, pool_wait_timeout, max_tries_); for (const auto & [priority, configurations] : configurations_by_priority) @@ -55,13 +55,13 @@ PoolWithFailover::PoolWithFailover( , max_tries(max_tries_) , auto_close_connection(auto_close_connection_) { - LOG_TRACE(&Poco::Logger::get("PostgreSQLConnectionPool"), "PostgreSQL connection pool size: {}, connection wait timeout: {}, max failover tries: {}", + LOG_TRACE(getLogger("PostgreSQLConnectionPool"), "PostgreSQL connection pool size: {}, connection wait timeout: {}, max failover tries: {}", pool_size, pool_wait_timeout, max_tries_); /// Replicas have the same priority, but traversed replicas are moved to the end of the queue. for (const auto & [host, port] : configuration.addresses) { - LOG_DEBUG(&Poco::Logger::get("PostgreSQLPoolWithFailover"), "Adding address host: {}, port: {} to connection pool", host, port); + LOG_DEBUG(getLogger("PostgreSQLPoolWithFailover"), "Adding address host: {}, port: {} to connection pool", host, port); auto connection_string = formatConnectionString(configuration.database, host, port, configuration.username, configuration.password); replicas_with_priority[0].emplace_back(connection_string, pool_size); } diff --git a/src/Core/PostgreSQL/PoolWithFailover.h b/src/Core/PostgreSQL/PoolWithFailover.h index afef2933d298..906a289d550e 100644 --- a/src/Core/PostgreSQL/PoolWithFailover.h +++ b/src/Core/PostgreSQL/PoolWithFailover.h @@ -63,7 +63,7 @@ using RemoteDescription = std::vector>; size_t max_tries; bool auto_close_connection; std::mutex mutex; - Poco::Logger * log = &Poco::Logger::get("PostgreSQLConnectionPool"); + LoggerPtr log = getLogger("PostgreSQLConnectionPool"); }; using PoolWithFailoverPtr = std::shared_ptr; diff --git a/src/Core/PostgreSQLProtocol.h b/src/Core/PostgreSQLProtocol.h index 8c0654b559fe..538205bc3d23 100644 --- a/src/Core/PostgreSQLProtocol.h +++ b/src/Core/PostgreSQLProtocol.h @@ -883,7 +883,7 @@ class CleartextPasswordAuth : public AuthenticationMethod class AuthenticationManager { private: - Poco::Logger * log = &Poco::Logger::get("AuthenticationManager"); + LoggerPtr log = getLogger("AuthenticationManager"); std::unordered_map> type_to_method = {}; public: diff --git a/src/Core/ServerUUID.h b/src/Core/ServerUUID.h index 36bbf0e63153..b5ea17426cb2 100644 --- a/src/Core/ServerUUID.h +++ b/src/Core/ServerUUID.h @@ -1,12 +1,10 @@ #pragma once + #include +#include #include namespace fs = std::filesystem; -namespace Poco -{ - class Logger; -} namespace DB { diff --git a/src/Core/SettingsQuirks.cpp b/src/Core/SettingsQuirks.cpp index c2314f484c03..136ef3c57d25 100644 --- a/src/Core/SettingsQuirks.cpp +++ b/src/Core/SettingsQuirks.cpp @@ -16,7 +16,7 @@ namespace /// /// [1]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=339ddb53d373 /// [2]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=0c54a6a44bf3 -bool nestedEpollWorks(Poco::Logger * log) +bool nestedEpollWorks(LoggerPtr log) { if (Poco::Environment::os() != POCO_OS_LINUX) return true; @@ -47,7 +47,7 @@ namespace DB { /// Update some settings defaults to avoid some known issues. -void applySettingsQuirks(Settings & settings, Poco::Logger * log) +void applySettingsQuirks(Settings & settings, LoggerPtr log) { if (!nestedEpollWorks(log)) { diff --git a/src/Core/SettingsQuirks.h b/src/Core/SettingsQuirks.h index 38def8eebf2b..f6b2a4e33fae 100644 --- a/src/Core/SettingsQuirks.h +++ b/src/Core/SettingsQuirks.h @@ -1,9 +1,6 @@ #pragma once -namespace Poco -{ -class Logger; -} +#include namespace DB { @@ -11,6 +8,6 @@ namespace DB struct Settings; /// Update some settings defaults to avoid some known issues. -void applySettingsQuirks(Settings & settings, Poco::Logger * log = nullptr); +void applySettingsQuirks(Settings & settings, LoggerPtr log = nullptr); } diff --git a/src/Core/SortDescription.cpp b/src/Core/SortDescription.cpp index 66ca1539b713..5545fed9d844 100644 --- a/src/Core/SortDescription.cpp +++ b/src/Core/SortDescription.cpp @@ -107,10 +107,9 @@ static std::string getSortDescriptionDump(const SortDescription & description, c return buffer.str(); } -static Poco::Logger * getLogger() +static LoggerPtr getLogger() { - static Poco::Logger & logger = Poco::Logger::get("SortDescription"); - return &logger; + return ::getLogger("SortDescription"); } void compileSortDescriptionIfNeeded(SortDescription & description, const DataTypes & sort_description_types, bool increase_compile_attempts) diff --git a/src/Core/examples/coro.cpp b/src/Core/examples/coro.cpp index fbccc261e9d1..4698266d7e4c 100644 --- a/src/Core/examples/coro.cpp +++ b/src/Core/examples/coro.cpp @@ -176,7 +176,7 @@ int main() Poco::Logger::root().setChannel(app_channel); Poco::Logger::root().setLevel("trace"); - LOG_INFO(&Poco::Logger::get(""), "Starting"); + LOG_INFO(getLogger(""), "Starting"); try { diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 345d6e765fe3..e31bab81a011 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -194,7 +194,7 @@ class SignalListener : public Poco::Runnable static constexpr int SanitizerTrap = -3; explicit SignalListener(BaseDaemon & daemon_) - : log(&Poco::Logger::get("BaseDaemon")) + : log(getLogger("BaseDaemon")) , daemon(daemon_) { } @@ -269,7 +269,7 @@ class SignalListener : public Poco::Runnable } private: - Poco::Logger * log; + LoggerPtr log; BaseDaemon & daemon; void onTerminate(std::string_view message, UInt32 thread_num) const diff --git a/src/Daemon/SentryWriter.cpp b/src/Daemon/SentryWriter.cpp index 3c62e54b117e..ee3818c22c5c 100644 --- a/src/Daemon/SentryWriter.cpp +++ b/src/Daemon/SentryWriter.cpp @@ -70,7 +70,7 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config) { bool enabled = false; bool debug = config.getBool("send_crash_reports.debug", false); - auto * logger = &Poco::Logger::get("SentryWriter"); + auto logger = getLogger("SentryWriter"); if (config.getBool("send_crash_reports.enabled", false)) { @@ -142,7 +142,7 @@ void SentryWriter::shutdown() void SentryWriter::onFault(int sig, const std::string & error_message, const StackTrace & stack_trace) { - auto * logger = &Poco::Logger::get("SentryWriter"); + auto logger = getLogger("SentryWriter"); if (initialized) { sentry_value_t event = sentry_value_new_message_event(SENTRY_LEVEL_FATAL, "fault", error_message.c_str()); diff --git a/src/Databases/DatabaseDictionary.cpp b/src/Databases/DatabaseDictionary.cpp index 04529ff5293e..e4edc0176be8 100644 --- a/src/Databases/DatabaseDictionary.cpp +++ b/src/Databases/DatabaseDictionary.cpp @@ -50,7 +50,7 @@ namespace DatabaseDictionary::DatabaseDictionary(const String & name_, ContextPtr context_) : IDatabase(name_), WithContext(context_->getGlobalContext()) - , log(&Poco::Logger::get("DatabaseDictionary(" + database_name + ")")) + , log(getLogger("DatabaseDictionary(" + database_name + ")")) { } diff --git a/src/Databases/DatabaseDictionary.h b/src/Databases/DatabaseDictionary.h index 425d048aa65b..469801d183e6 100644 --- a/src/Databases/DatabaseDictionary.h +++ b/src/Databases/DatabaseDictionary.h @@ -48,7 +48,7 @@ class DatabaseDictionary final : public IDatabase, WithContext ASTPtr getCreateTableQueryImpl(const String & table_name, ContextPtr context, bool throw_on_error) const override; private: - Poco::Logger * log; + LoggerPtr log; Tables listTables(const FilterByNameFunction & filter_by_name) const; }; diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 199bae4fbb41..8b2323d095a5 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -350,7 +350,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String else { use_table_cache = safeGetLiteralValue(engine_args[4], engine_name); - LOG_WARNING(&Poco::Logger::get("DatabaseFactory"), "A deprecated syntax of PostgreSQL database engine is used"); + LOG_WARNING(getLogger("DatabaseFactory"), "A deprecated syntax of PostgreSQL database engine is used"); is_deprecated_syntax = true; } } diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 01afbdcaa576..80171b6f9452 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -642,7 +642,7 @@ void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const Iterat } ASTPtr DatabaseOnDisk::parseQueryFromMetadata( - Poco::Logger * logger, + LoggerPtr logger, ContextPtr local_context, const String & metadata_file_path, bool throw_on_error /*= true*/, diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 0db6a94b86d2..3dbccd09f041 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -66,7 +66,7 @@ class DatabaseOnDisk : public DatabaseWithOwnTablesBase String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); } String getMetadataPath() const override { return metadata_path; } - static ASTPtr parseQueryFromMetadata(Poco::Logger * log, ContextPtr context, const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false); + static ASTPtr parseQueryFromMetadata(LoggerPtr log, ContextPtr context, const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false); /// will throw when the table we want to attach already exists (in active / detached / detached permanently form) void checkMetadataFilenameAvailability(const String & to_table_name) const override; diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index bb98e2bd3bbd..c76bdef68344 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -181,7 +181,7 @@ void cleanupObjectDefinitionFromTemporaryFlags(ASTCreateQuery & query) DatabaseWithOwnTablesBase::DatabaseWithOwnTablesBase(const String & name_, const String & logger, ContextPtr context_) - : IDatabase(name_), WithContext(context_->getGlobalContext()), log(&Poco::Logger::get(logger)) + : IDatabase(name_), WithContext(context_->getGlobalContext()), log(getLogger(logger)) { } diff --git a/src/Databases/DatabasesCommon.h b/src/Databases/DatabasesCommon.h index c5842d7dac31..21395c39a006 100644 --- a/src/Databases/DatabasesCommon.h +++ b/src/Databases/DatabasesCommon.h @@ -45,7 +45,7 @@ class DatabaseWithOwnTablesBase : public IDatabase, protected WithContext protected: Tables tables TSA_GUARDED_BY(mutex); - Poco::Logger * log; + LoggerPtr log; DatabaseWithOwnTablesBase(const String & name_, const String & logger, ContextPtr context); diff --git a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp index aaf6b00dc4f1..f6ef6f942309 100644 --- a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp +++ b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp @@ -76,7 +76,7 @@ static BlockIO tryToExecuteQuery(const String & query_to_execute, ContextMutable catch (...) { tryLogCurrentException( - &Poco::Logger::get("MaterializedMySQLSyncThread(" + database + ")"), + getLogger("MaterializedMySQLSyncThread(" + database + ")"), "Query " + query_to_execute + " wasn't finished successfully"); throw; } @@ -214,7 +214,7 @@ MaterializedMySQLSyncThread::MaterializedMySQLSyncThread( MySQLClient && client_, MaterializedMySQLSettings * settings_) : WithContext(context_->getGlobalContext()) - , log(&Poco::Logger::get("MaterializedMySQLSyncThread")) + , log(getLogger("MaterializedMySQLSyncThread")) , database_name(database_name_) , mysql_database_name(mysql_database_name_) , pool(std::move(pool_)) /// NOLINT @@ -446,7 +446,7 @@ static inline void dumpDataForTables( StreamSettings mysql_input_stream_settings(context->getSettingsRef()); String mysql_select_all_query = "SELECT " + rewriteMysqlQueryColumn(connection, mysql_database_name, table_name, context->getSettingsRef()) + " FROM " + backQuoteIfNeed(mysql_database_name) + "." + backQuoteIfNeed(table_name); - LOG_INFO(&Poco::Logger::get("MaterializedMySQLSyncThread(" + database_name + ")"), "mysql_select_all_query is {}", mysql_select_all_query); + LOG_INFO(getLogger("MaterializedMySQLSyncThread(" + database_name + ")"), "mysql_select_all_query is {}", mysql_select_all_query); auto input = std::make_unique(connection, mysql_select_all_query, pipeline.getHeader(), mysql_input_stream_settings); auto counting = std::make_shared(pipeline.getHeader()); Pipe pipe(std::move(input)); @@ -458,7 +458,7 @@ static inline void dumpDataForTables( executor.execute(); const Progress & progress = counting->getProgress(); - LOG_INFO(&Poco::Logger::get("MaterializedMySQLSyncThread(" + database_name + ")"), + LOG_INFO(getLogger("MaterializedMySQLSyncThread(" + database_name + ")"), "Materialize MySQL step 1: dump {}, {} rows, {} in {} sec., {} rows/sec., {}/sec." , table_name, formatReadableQuantity(progress.written_rows), formatReadableSizeWithBinarySuffix(progress.written_bytes) , watch.elapsedSeconds(), formatReadableQuantity(static_cast(progress.written_rows / watch.elapsedSeconds())) diff --git a/src/Databases/MySQL/MaterializedMySQLSyncThread.h b/src/Databases/MySQL/MaterializedMySQLSyncThread.h index 4abea5e72dfb..8e66ecba81af 100644 --- a/src/Databases/MySQL/MaterializedMySQLSyncThread.h +++ b/src/Databases/MySQL/MaterializedMySQLSyncThread.h @@ -54,7 +54,7 @@ class MaterializedMySQLSyncThread : WithContext void assertMySQLAvailable(); private: - Poco::Logger * log; + LoggerPtr log; String database_name; String mysql_database_name; diff --git a/src/Databases/SQLite/DatabaseSQLite.cpp b/src/Databases/SQLite/DatabaseSQLite.cpp index d031fd8e4201..6645cfed96f8 100644 --- a/src/Databases/SQLite/DatabaseSQLite.cpp +++ b/src/Databases/SQLite/DatabaseSQLite.cpp @@ -32,7 +32,7 @@ DatabaseSQLite::DatabaseSQLite( , WithContext(context_->getGlobalContext()) , database_engine_define(database_engine_define_->clone()) , database_path(database_path_) - , log(&Poco::Logger::get("DatabaseSQLite")) + , log(getLogger("DatabaseSQLite")) { sqlite_db = openSQLiteDB(database_path_, context_, !is_attach_); } diff --git a/src/Databases/SQLite/DatabaseSQLite.h b/src/Databases/SQLite/DatabaseSQLite.h index a89fbc32c3d7..e5e93bbc8ce3 100644 --- a/src/Databases/SQLite/DatabaseSQLite.h +++ b/src/Databases/SQLite/DatabaseSQLite.h @@ -50,7 +50,7 @@ class DatabaseSQLite final : public IDatabase, WithContext mutable SQLitePtr sqlite_db; - Poco::Logger * log; + LoggerPtr log; bool checkSQLiteTable(const String & table_name) const; diff --git a/src/Databases/SQLite/SQLiteUtils.cpp b/src/Databases/SQLite/SQLiteUtils.cpp index 152370050f1c..a259e7ef7d30 100644 --- a/src/Databases/SQLite/SQLiteUtils.cpp +++ b/src/Databases/SQLite/SQLiteUtils.cpp @@ -21,7 +21,7 @@ void processSQLiteError(const String & message, bool throw_on_error) if (throw_on_error) throw Exception::createDeprecated(message, ErrorCodes::PATH_ACCESS_DENIED); else - LOG_ERROR(&Poco::Logger::get("SQLiteEngine"), fmt::runtime(message)); + LOG_ERROR(getLogger("SQLiteEngine"), fmt::runtime(message)); } String validateSQLiteDatabasePath(const String & path, const String & user_files_path, bool need_check, bool throw_on_error) @@ -53,7 +53,7 @@ SQLitePtr openSQLiteDB(const String & path, ContextPtr context, bool throw_on_er return nullptr; if (!fs::exists(database_path)) - LOG_DEBUG(&Poco::Logger::get("SQLite"), "SQLite database path {} does not exist, will create an empty SQLite database", database_path); + LOG_DEBUG(getLogger("SQLite"), "SQLite database path {} does not exist, will create an empty SQLite database", database_path); sqlite3 * tmp_sqlite_db = nullptr; int status; diff --git a/src/Databases/TablesDependencyGraph.cpp b/src/Databases/TablesDependencyGraph.cpp index 4ea9e646ab9a..9b22069345c9 100644 --- a/src/Databases/TablesDependencyGraph.cpp +++ b/src/Databases/TablesDependencyGraph.cpp @@ -736,10 +736,10 @@ void TablesDependencyGraph::log() const } -Poco::Logger * TablesDependencyGraph::getLogger() const +LoggerPtr TablesDependencyGraph::getLogger() const { if (!logger) - logger = &Poco::Logger::get(name_for_logging); + logger = ::getLogger(name_for_logging); return logger; } diff --git a/src/Databases/TablesDependencyGraph.h b/src/Databases/TablesDependencyGraph.h index e5be59d1ee9a..b0dc5cdcd59b 100644 --- a/src/Databases/TablesDependencyGraph.h +++ b/src/Databases/TablesDependencyGraph.h @@ -166,7 +166,7 @@ class TablesDependencyGraph mutable bool levels_calculated = false; const String name_for_logging; - mutable Poco::Logger * logger = nullptr; + mutable LoggerPtr logger = nullptr; Node * findNode(const StorageID & table_id) const; Node * addOrUpdateNode(const StorageID & table_id); @@ -178,7 +178,7 @@ class TablesDependencyGraph void setNeedRecalculateLevels() const; const NodesSortedByLevel & getNodesSortedByLevel() const; - Poco::Logger * getLogger() const; + LoggerPtr getLogger() const; }; } diff --git a/src/Databases/TablesLoader.cpp b/src/Databases/TablesLoader.cpp index 177b32daa2e2..6ebdb7790a9a 100644 --- a/src/Databases/TablesLoader.cpp +++ b/src/Databases/TablesLoader.cpp @@ -28,7 +28,7 @@ namespace ErrorCodes static constexpr size_t PRINT_MESSAGE_EACH_N_OBJECTS = 256; static constexpr size_t PRINT_MESSAGE_EACH_N_SECONDS = 5; -void logAboutProgress(Poco::Logger * log, size_t processed, size_t total, AtomicStopwatch & watch) +void logAboutProgress(LoggerPtr log, size_t processed, size_t total, AtomicStopwatch & watch) { if (processed % PRINT_MESSAGE_EACH_N_OBJECTS == 0 || watch.compareAndRestart(PRINT_MESSAGE_EACH_N_SECONDS)) { @@ -47,7 +47,7 @@ TablesLoader::TablesLoader(ContextMutablePtr global_context_, Databases database , pool(CurrentMetrics::TablesLoaderThreads, CurrentMetrics::TablesLoaderThreadsActive) { metadata.default_database = global_context->getCurrentDatabase(); - log = &Poco::Logger::get("TablesLoader"); + log = getLogger("TablesLoader"); } diff --git a/src/Databases/TablesLoader.h b/src/Databases/TablesLoader.h index eb07351bd7f5..37b89e0426a1 100644 --- a/src/Databases/TablesLoader.h +++ b/src/Databases/TablesLoader.h @@ -22,7 +22,7 @@ class AtomicStopwatch; namespace DB { -void logAboutProgress(Poco::Logger * log, size_t processed, size_t total, AtomicStopwatch & watch); +void logAboutProgress(LoggerPtr log, size_t processed, size_t total, AtomicStopwatch & watch); class IDatabase; @@ -70,7 +70,7 @@ class TablesLoader TablesDependencyGraph referential_dependencies; TablesDependencyGraph loading_dependencies; TablesDependencyGraph all_loading_dependencies; - Poco::Logger * log; + LoggerPtr log; std::atomic tables_processed{0}; AtomicStopwatch stopwatch; diff --git a/src/Dictionaries/CacheDictionary.cpp b/src/Dictionaries/CacheDictionary.cpp index c5c88a9f142a..00f81864d88a 100644 --- a/src/Dictionaries/CacheDictionary.cpp +++ b/src/Dictionaries/CacheDictionary.cpp @@ -64,7 +64,7 @@ CacheDictionary::CacheDictionary( update(unit_to_update); }) , dict_lifetime(dict_lifetime_) - , log(&Poco::Logger::get("ExternalDictionaries")) + , log(getLogger("ExternalDictionaries")) , allow_read_expired_keys(allow_read_expired_keys_) , rnd_engine(randomSeed()) { diff --git a/src/Dictionaries/CacheDictionary.h b/src/Dictionaries/CacheDictionary.h index e19c4a66b1f6..04cade4f19f6 100644 --- a/src/Dictionaries/CacheDictionary.h +++ b/src/Dictionaries/CacheDictionary.h @@ -197,7 +197,7 @@ class CacheDictionary final : public IDictionary const DictionaryLifetime dict_lifetime; - Poco::Logger * log; + LoggerPtr log; const bool allow_read_expired_keys; diff --git a/src/Dictionaries/CassandraDictionarySource.cpp b/src/Dictionaries/CassandraDictionarySource.cpp index e0cf2483b3d5..b3bf288ef5af 100644 --- a/src/Dictionaries/CassandraDictionarySource.cpp +++ b/src/Dictionaries/CassandraDictionarySource.cpp @@ -105,7 +105,7 @@ CassandraDictionarySource::CassandraDictionarySource( const DictionaryStructure & dict_struct_, const Configuration & configuration_, const Block & sample_block_) - : log(&Poco::Logger::get("CassandraDictionarySource")) + : log(getLogger("CassandraDictionarySource")) , dict_struct(dict_struct_) , configuration(configuration_) , sample_block(sample_block_) diff --git a/src/Dictionaries/CassandraDictionarySource.h b/src/Dictionaries/CassandraDictionarySource.h index 2591b33c6388..3700642fc5b5 100644 --- a/src/Dictionaries/CassandraDictionarySource.h +++ b/src/Dictionaries/CassandraDictionarySource.h @@ -77,7 +77,7 @@ class CassandraDictionarySource final : public IDictionarySource void maybeAllowFiltering(String & query) const; CassSessionShared getSession(); - Poco::Logger * log; + LoggerPtr log; const DictionaryStructure dict_struct; const Configuration configuration; Block sample_block; diff --git a/src/Dictionaries/CassandraHelpers.cpp b/src/Dictionaries/CassandraHelpers.cpp index e93b3fe8d496..4c569d00957a 100644 --- a/src/Dictionaries/CassandraHelpers.cpp +++ b/src/Dictionaries/CassandraHelpers.cpp @@ -47,7 +47,7 @@ void setupCassandraDriverLibraryLogging(CassLogLevel level) { std::call_once(setup_logging_flag, [level]() { - Poco::Logger * logger = &Poco::Logger::get("CassandraDriverLibrary"); + Poco::Logger * logger = getRawLogger("CassandraDriverLibrary"); cass_log_set_level(level); if (level != CASS_LOG_DISABLED) cass_log_set_callback(cassandraLogCallback, logger); diff --git a/src/Dictionaries/ClickHouseDictionarySource.h b/src/Dictionaries/ClickHouseDictionarySource.h index f37fae389ac3..a9eb879260f3 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.h +++ b/src/Dictionaries/ClickHouseDictionarySource.h @@ -83,7 +83,7 @@ class ClickHouseDictionarySource final : public IDictionarySource ContextMutablePtr context; ConnectionPoolWithFailoverPtr pool; const std::string load_all_query; - Poco::Logger * log = &Poco::Logger::get("ClickHouseDictionarySource"); + LoggerPtr log = getLogger("ClickHouseDictionarySource"); /// RegExpTreeDictionary is the only dictionary whose structure of attributions differ from the input block. /// For now we need to modify sample_block in the ctor of RegExpTreeDictionary. diff --git a/src/Dictionaries/DictionaryFactory.cpp b/src/Dictionaries/DictionaryFactory.cpp index d091e49d1f01..98f49f7b2be2 100644 --- a/src/Dictionaries/DictionaryFactory.cpp +++ b/src/Dictionaries/DictionaryFactory.cpp @@ -46,7 +46,7 @@ DictionaryPtr DictionaryFactory::create( DictionarySourcePtr source_ptr = DictionarySourceFactory::instance().create( name, config, config_prefix + ".source", dict_struct, global_context, config.getString(config_prefix + ".database", ""), created_from_ddl); - LOG_TRACE(&Poco::Logger::get("DictionaryFactory"), "Created dictionary source '{}' for dictionary '{}'", source_ptr->toString(), name); + LOG_TRACE(getLogger("DictionaryFactory"), "Created dictionary source '{}' for dictionary '{}'", source_ptr->toString(), name); const auto & layout_type = keys.front(); diff --git a/src/Dictionaries/DictionarySourceFactory.cpp b/src/Dictionaries/DictionarySourceFactory.cpp index c9f4bcb74461..15674ad6f2b5 100644 --- a/src/Dictionaries/DictionarySourceFactory.cpp +++ b/src/Dictionaries/DictionarySourceFactory.cpp @@ -65,7 +65,7 @@ namespace } -DictionarySourceFactory::DictionarySourceFactory() : log(&Poco::Logger::get("DictionarySourceFactory")) +DictionarySourceFactory::DictionarySourceFactory() : log(getLogger("DictionarySourceFactory")) { } diff --git a/src/Dictionaries/DictionarySourceFactory.h b/src/Dictionaries/DictionarySourceFactory.h index f4c3fa121633..407f64c405be 100644 --- a/src/Dictionaries/DictionarySourceFactory.h +++ b/src/Dictionaries/DictionarySourceFactory.h @@ -56,7 +56,7 @@ class DictionarySourceFactory : private boost::noncopyable using SourceRegistry = std::unordered_map; SourceRegistry registered_sources; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Dictionaries/DirectDictionary.cpp b/src/Dictionaries/DirectDictionary.cpp index d84967fbae64..916bd24e7010 100644 --- a/src/Dictionaries/DirectDictionary.cpp +++ b/src/Dictionaries/DirectDictionary.cpp @@ -118,7 +118,7 @@ Columns DirectDictionary::getColumns( block_key_columns.clear(); } - LOG_DEBUG(&Poco::Logger::get("DirectDictionary"), "read {} blocks with {} rows from pipeline in {} ms", + LOG_DEBUG(getLogger("DirectDictionary"), "read {} blocks with {} rows from pipeline in {} ms", block_num, rows_num, watch.elapsedMilliseconds()); Field value_to_insert; @@ -352,7 +352,7 @@ Pipe DirectDictionary::getSourcePipe( pipe = Pipe(std::make_shared>(std::move(pipeline))); } - LOG_DEBUG(&Poco::Logger::get("DirectDictionary"), "building pipeline for loading keys done in {} ms", watch.elapsedMilliseconds()); + LOG_DEBUG(getLogger("DirectDictionary"), "building pipeline for loading keys done in {} ms", watch.elapsedMilliseconds()); return pipe; } diff --git a/src/Dictionaries/Embedded/RegionsHierarchies.cpp b/src/Dictionaries/Embedded/RegionsHierarchies.cpp index be828b8b2817..05c54c49208e 100644 --- a/src/Dictionaries/Embedded/RegionsHierarchies.cpp +++ b/src/Dictionaries/Embedded/RegionsHierarchies.cpp @@ -6,7 +6,7 @@ RegionsHierarchies::RegionsHierarchies(IRegionsHierarchiesDataProviderPtr data_provider) { - Poco::Logger * log = &Poco::Logger::get("RegionsHierarchies"); + LoggerPtr log = getLogger("RegionsHierarchies"); LOG_DEBUG(log, "Adding default regions hierarchy"); data.emplace("", data_provider->getDefaultHierarchySource()); diff --git a/src/Dictionaries/Embedded/RegionsHierarchy.cpp b/src/Dictionaries/Embedded/RegionsHierarchy.cpp index c266bf7efb8e..b814372574a4 100644 --- a/src/Dictionaries/Embedded/RegionsHierarchy.cpp +++ b/src/Dictionaries/Embedded/RegionsHierarchy.cpp @@ -23,7 +23,7 @@ RegionsHierarchy::RegionsHierarchy(IRegionsHierarchyDataSourcePtr data_source_) void RegionsHierarchy::reload() { - Poco::Logger * log = &Poco::Logger::get("RegionsHierarchy"); + LoggerPtr log = getLogger("RegionsHierarchy"); if (!data_source->isModified()) return; diff --git a/src/Dictionaries/Embedded/RegionsNames.cpp b/src/Dictionaries/Embedded/RegionsNames.cpp index 93ca9e6dbc9f..15070a42fbc3 100644 --- a/src/Dictionaries/Embedded/RegionsNames.cpp +++ b/src/Dictionaries/Embedded/RegionsNames.cpp @@ -42,7 +42,7 @@ std::string RegionsNames::dumpSupportedLanguagesNames() void RegionsNames::reload() { - Poco::Logger * log = &Poco::Logger::get("RegionsNames"); + LoggerPtr log = getLogger("RegionsNames"); LOG_DEBUG(log, "Reloading regions names"); RegionID max_region_id = 0; diff --git a/src/Dictionaries/ExecutableDictionarySource.cpp b/src/Dictionaries/ExecutableDictionarySource.cpp index 91f914fb7ad2..4154ae5c3413 100644 --- a/src/Dictionaries/ExecutableDictionarySource.cpp +++ b/src/Dictionaries/ExecutableDictionarySource.cpp @@ -71,7 +71,7 @@ ExecutableDictionarySource::ExecutableDictionarySource( Block & sample_block_, std::shared_ptr coordinator_, ContextPtr context_) - : log(&Poco::Logger::get("ExecutableDictionarySource")) + : log(getLogger("ExecutableDictionarySource")) , dict_struct(dict_struct_) , configuration(configuration_) , sample_block(sample_block_) @@ -93,7 +93,7 @@ ExecutableDictionarySource::ExecutableDictionarySource( } ExecutableDictionarySource::ExecutableDictionarySource(const ExecutableDictionarySource & other) - : log(&Poco::Logger::get("ExecutableDictionarySource")) + : log(getLogger("ExecutableDictionarySource")) , update_time(other.update_time) , dict_struct(other.dict_struct) , configuration(other.configuration) diff --git a/src/Dictionaries/ExecutableDictionarySource.h b/src/Dictionaries/ExecutableDictionarySource.h index 0456d3cafef1..e38b39562f27 100644 --- a/src/Dictionaries/ExecutableDictionarySource.h +++ b/src/Dictionaries/ExecutableDictionarySource.h @@ -64,7 +64,7 @@ class ExecutableDictionarySource final : public IDictionarySource QueryPipeline getStreamForBlock(const Block & block); private: - Poco::Logger * log; + LoggerPtr log; time_t update_time = 0; const DictionaryStructure dict_struct; const Configuration configuration; diff --git a/src/Dictionaries/ExecutablePoolDictionarySource.cpp b/src/Dictionaries/ExecutablePoolDictionarySource.cpp index 0cc7696585f6..334dc6b3b3cc 100644 --- a/src/Dictionaries/ExecutablePoolDictionarySource.cpp +++ b/src/Dictionaries/ExecutablePoolDictionarySource.cpp @@ -40,7 +40,7 @@ ExecutablePoolDictionarySource::ExecutablePoolDictionarySource( , sample_block(sample_block_) , coordinator(std::move(coordinator_)) , context(context_) - , log(&Poco::Logger::get("ExecutablePoolDictionarySource")) + , log(getLogger("ExecutablePoolDictionarySource")) { /// Remove keys from sample_block for implicit_key dictionary because /// these columns will not be returned from source @@ -64,7 +64,7 @@ ExecutablePoolDictionarySource::ExecutablePoolDictionarySource(const ExecutableP , sample_block(other.sample_block) , coordinator(other.coordinator) , context(Context::createCopy(other.context)) - , log(&Poco::Logger::get("ExecutablePoolDictionarySource")) + , log(getLogger("ExecutablePoolDictionarySource")) { } diff --git a/src/Dictionaries/ExecutablePoolDictionarySource.h b/src/Dictionaries/ExecutablePoolDictionarySource.h index 1fc10d18b76a..3f1c9f9eb35c 100644 --- a/src/Dictionaries/ExecutablePoolDictionarySource.h +++ b/src/Dictionaries/ExecutablePoolDictionarySource.h @@ -73,7 +73,7 @@ class ExecutablePoolDictionarySource final : public IDictionarySource Block sample_block; std::shared_ptr coordinator; ContextPtr context; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Dictionaries/FileDictionarySource.cpp b/src/Dictionaries/FileDictionarySource.cpp index 86287971428e..16a4ecaee754 100644 --- a/src/Dictionaries/FileDictionarySource.cpp +++ b/src/Dictionaries/FileDictionarySource.cpp @@ -48,7 +48,7 @@ FileDictionarySource::FileDictionarySource(const FileDictionarySource & other) QueryPipeline FileDictionarySource::loadAll() { - LOG_TRACE(&Poco::Logger::get("FileDictionary"), "loadAll {}", toString()); + LOG_TRACE(getLogger("FileDictionary"), "loadAll {}", toString()); auto in_ptr = std::make_unique(filepath); auto source = context->getInputFormat(format, *in_ptr, sample_block, max_block_size); source->addBuffer(std::move(in_ptr)); diff --git a/src/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp index d2d3b344df54..f86b81c10447 100644 --- a/src/Dictionaries/HTTPDictionarySource.cpp +++ b/src/Dictionaries/HTTPDictionarySource.cpp @@ -32,7 +32,7 @@ HTTPDictionarySource::HTTPDictionarySource( const Poco::Net::HTTPBasicCredentials & credentials_, Block & sample_block_, ContextPtr context_) - : log(&Poco::Logger::get("HTTPDictionarySource")) + : log(getLogger("HTTPDictionarySource")) , update_time(std::chrono::system_clock::from_time_t(0)) , dict_struct(dict_struct_) , configuration(configuration_) @@ -45,7 +45,7 @@ HTTPDictionarySource::HTTPDictionarySource( } HTTPDictionarySource::HTTPDictionarySource(const HTTPDictionarySource & other) - : log(&Poco::Logger::get("HTTPDictionarySource")) + : log(getLogger("HTTPDictionarySource")) , update_time(other.update_time) , dict_struct(other.dict_struct) , configuration(other.configuration) diff --git a/src/Dictionaries/HTTPDictionarySource.h b/src/Dictionaries/HTTPDictionarySource.h index e22aacd89f1f..414372fe7ac2 100644 --- a/src/Dictionaries/HTTPDictionarySource.h +++ b/src/Dictionaries/HTTPDictionarySource.h @@ -66,7 +66,7 @@ class HTTPDictionarySource final : public IDictionarySource // wrap buffer using encoding from made request QueryPipeline createWrappedBuffer(std::unique_ptr http_buffer); - Poco::Logger * log; + LoggerPtr log; LocalDateTime getLastModification() const; diff --git a/src/Dictionaries/HashedDictionary.cpp b/src/Dictionaries/HashedDictionary.cpp index 0e5d18363e9a..1bb0989b567e 100644 --- a/src/Dictionaries/HashedDictionary.cpp +++ b/src/Dictionaries/HashedDictionary.cpp @@ -196,7 +196,7 @@ HashedDictionary::HashedDictionary( const HashedDictionaryConfiguration & configuration_, BlockPtr update_field_loaded_block_) : IDictionary(dict_id_) - , log(&Poco::Logger::get("HashedDictionary")) + , log(getLogger("HashedDictionary")) , dict_struct(dict_struct_) , source_ptr(std::move(source_ptr_)) , configuration(configuration_) @@ -1161,7 +1161,7 @@ void registerDictionaryHashed(DictionaryFactory & factory) const std::string dictionary_layout_prefix = ".layout." + dictionary_layout_name; const bool preallocate = config.getBool(config_prefix + dictionary_layout_prefix + ".preallocate", false); if (preallocate) - LOG_WARNING(&Poco::Logger::get("HashedDictionary"), "'prellocate' attribute is obsolete, consider looking at 'shards'"); + LOG_WARNING(getLogger("HashedDictionary"), "'prellocate' attribute is obsolete, consider looking at 'shards'"); Int64 shards = config.getInt(config_prefix + dictionary_layout_prefix + ".shards", 1); if (shards <= 0 || shards > 128) diff --git a/src/Dictionaries/HashedDictionary.h b/src/Dictionaries/HashedDictionary.h index 676836796479..30225ebabc3e 100644 --- a/src/Dictionaries/HashedDictionary.h +++ b/src/Dictionaries/HashedDictionary.h @@ -252,7 +252,7 @@ class HashedDictionary final : public IDictionary void resize(size_t added_rows); - Poco::Logger * log; + LoggerPtr log; const DictionaryStructure dict_struct; const DictionarySourcePtr source_ptr; diff --git a/src/Dictionaries/IPAddressDictionary.cpp b/src/Dictionaries/IPAddressDictionary.cpp index 803f607a3a75..290d4896d646 100644 --- a/src/Dictionaries/IPAddressDictionary.cpp +++ b/src/Dictionaries/IPAddressDictionary.cpp @@ -204,7 +204,7 @@ IPAddressDictionary::IPAddressDictionary( , dict_lifetime(dict_lifetime_) , require_nonempty(require_nonempty_) , access_to_key_from_attributes(dict_struct_.access_to_key_from_attributes) - , logger(&Poco::Logger::get("IPAddressDictionary")) + , logger(getLogger("IPAddressDictionary")) { createAttributes(); loadData(); diff --git a/src/Dictionaries/IPAddressDictionary.h b/src/Dictionaries/IPAddressDictionary.h index e1fabb89a7e5..8a0c8e9c0011 100644 --- a/src/Dictionaries/IPAddressDictionary.h +++ b/src/Dictionaries/IPAddressDictionary.h @@ -229,7 +229,7 @@ class IPAddressDictionary final : public IDictionary mutable std::atomic query_count{0}; mutable std::atomic found_count{0}; - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Dictionaries/LibraryDictionarySource.cpp b/src/Dictionaries/LibraryDictionarySource.cpp index 7eb4d803fe8f..f6f104ca11da 100644 --- a/src/Dictionaries/LibraryDictionarySource.cpp +++ b/src/Dictionaries/LibraryDictionarySource.cpp @@ -30,7 +30,7 @@ LibraryDictionarySource::LibraryDictionarySource( Block & sample_block_, ContextPtr context_, bool created_from_ddl) - : log(&Poco::Logger::get("LibraryDictionarySource")) + : log(getLogger("LibraryDictionarySource")) , dict_struct{dict_struct_} , config_prefix{config_prefix_} , path{config.getString(config_prefix + ".path", "")} @@ -78,7 +78,7 @@ LibraryDictionarySource::~LibraryDictionarySource() LibraryDictionarySource::LibraryDictionarySource(const LibraryDictionarySource & other) - : log(&Poco::Logger::get("LibraryDictionarySource")) + : log(getLogger("LibraryDictionarySource")) , dict_struct{other.dict_struct} , config_prefix{other.config_prefix} , path{other.path} diff --git a/src/Dictionaries/LibraryDictionarySource.h b/src/Dictionaries/LibraryDictionarySource.h index 57ab9976a3b6..04a3d838577c 100644 --- a/src/Dictionaries/LibraryDictionarySource.h +++ b/src/Dictionaries/LibraryDictionarySource.h @@ -75,7 +75,7 @@ class LibraryDictionarySource final : public IDictionarySource static Field getDictID() { return UUIDHelpers::generateV4(); } - Poco::Logger * log; + LoggerPtr log; const DictionaryStructure dict_struct; const std::string config_prefix; diff --git a/src/Dictionaries/MySQLDictionarySource.cpp b/src/Dictionaries/MySQLDictionarySource.cpp index 730217f96b79..ebf6fd05a102 100644 --- a/src/Dictionaries/MySQLDictionarySource.cpp +++ b/src/Dictionaries/MySQLDictionarySource.cpp @@ -173,7 +173,7 @@ MySQLDictionarySource::MySQLDictionarySource( mysqlxx::PoolWithFailoverPtr pool_, const Block & sample_block_, const StreamSettings & settings_) - : log(&Poco::Logger::get("MySQLDictionarySource")) + : log(getLogger("MySQLDictionarySource")) , update_time(std::chrono::system_clock::from_time_t(0)) , dict_struct(dict_struct_) , configuration(configuration_) @@ -187,7 +187,7 @@ MySQLDictionarySource::MySQLDictionarySource( /// copy-constructor is provided in order to support cloneability MySQLDictionarySource::MySQLDictionarySource(const MySQLDictionarySource & other) - : log(&Poco::Logger::get("MySQLDictionarySource")) + : log(getLogger("MySQLDictionarySource")) , update_time(other.update_time) , dict_struct(other.dict_struct) , configuration(other.configuration) diff --git a/src/Dictionaries/MySQLDictionarySource.h b/src/Dictionaries/MySQLDictionarySource.h index 1d43ebfe2ba6..d9eea3f3e261 100644 --- a/src/Dictionaries/MySQLDictionarySource.h +++ b/src/Dictionaries/MySQLDictionarySource.h @@ -82,7 +82,7 @@ class MySQLDictionarySource final : public IDictionarySource // execute invalidate_query. expects single cell in result std::string doInvalidateQuery(const std::string & request) const; - Poco::Logger * log; + LoggerPtr log; std::chrono::time_point update_time; const DictionaryStructure dict_struct; diff --git a/src/Dictionaries/NullDictionarySource.cpp b/src/Dictionaries/NullDictionarySource.cpp index 45dcc77f93d0..2d5656e13359 100644 --- a/src/Dictionaries/NullDictionarySource.cpp +++ b/src/Dictionaries/NullDictionarySource.cpp @@ -20,7 +20,7 @@ NullDictionarySource::NullDictionarySource(const NullDictionarySource & other) : QueryPipeline NullDictionarySource::loadAll() { - LOG_TRACE(&Poco::Logger::get("NullDictionarySource"), "loadAll {}", toString()); + LOG_TRACE(getLogger("NullDictionarySource"), "loadAll {}", toString()); return QueryPipeline(std::make_shared(sample_block)); } diff --git a/src/Dictionaries/PolygonDictionaryUtils.cpp b/src/Dictionaries/PolygonDictionaryUtils.cpp index c28c7c15aa7b..77d2da5ad3f8 100644 --- a/src/Dictionaries/PolygonDictionaryUtils.cpp +++ b/src/Dictionaries/PolygonDictionaryUtils.cpp @@ -69,7 +69,7 @@ const FinalCellWithSlabs * FinalCellWithSlabs::find(Coord, Coord) const SlabsPolygonIndex::SlabsPolygonIndex( const std::vector & polygons) - : log(&Poco::Logger::get("SlabsPolygonIndex")), + : log(getLogger("SlabsPolygonIndex")), sorted_x(uniqueX(polygons)) { indexBuild(polygons); diff --git a/src/Dictionaries/PolygonDictionaryUtils.h b/src/Dictionaries/PolygonDictionaryUtils.h index 94b8b9615778..4fb2bdf625a6 100644 --- a/src/Dictionaries/PolygonDictionaryUtils.h +++ b/src/Dictionaries/PolygonDictionaryUtils.h @@ -81,7 +81,7 @@ class SlabsPolygonIndex /** Auxiliary function for adding ring to the index */ void indexAddRing(const Ring & ring, size_t polygon_id); - Poco::Logger * log; + LoggerPtr log; /** Sorted distinct coordinates of all vertices */ std::vector sorted_x; diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index 9f254da0b119..7d9f80f77ddb 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -56,7 +56,7 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource( , configuration(configuration_) , pool(std::move(pool_)) , sample_block(sample_block_) - , log(&Poco::Logger::get("PostgreSQLDictionarySource")) + , log(getLogger("PostgreSQLDictionarySource")) , query_builder(makeExternalQueryBuilder(dict_struct, configuration.schema, configuration.table, configuration.query, configuration.where)) , load_all_query(query_builder.composeLoadAllQuery()) { @@ -69,7 +69,7 @@ PostgreSQLDictionarySource::PostgreSQLDictionarySource(const PostgreSQLDictionar , configuration(other.configuration) , pool(other.pool) , sample_block(other.sample_block) - , log(&Poco::Logger::get("PostgreSQLDictionarySource")) + , log(getLogger("PostgreSQLDictionarySource")) , query_builder(makeExternalQueryBuilder(dict_struct, configuration.schema, configuration.table, configuration.query, configuration.where)) , load_all_query(query_builder.composeLoadAllQuery()) , update_time(other.update_time) diff --git a/src/Dictionaries/PostgreSQLDictionarySource.h b/src/Dictionaries/PostgreSQLDictionarySource.h index 8ecf56a94305..aabd4516f4ee 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.h +++ b/src/Dictionaries/PostgreSQLDictionarySource.h @@ -62,7 +62,7 @@ class PostgreSQLDictionarySource final : public IDictionarySource const Configuration configuration; postgres::PoolWithFailoverPtr pool; Block sample_block; - Poco::Logger * log; + LoggerPtr log; ExternalQueryBuilder query_builder; const std::string load_all_query; std::chrono::time_point update_time; diff --git a/src/Dictionaries/RegExpTreeDictionary.cpp b/src/Dictionaries/RegExpTreeDictionary.cpp index c072ba78d465..0050cab0cc97 100644 --- a/src/Dictionaries/RegExpTreeDictionary.cpp +++ b/src/Dictionaries/RegExpTreeDictionary.cpp @@ -124,7 +124,7 @@ struct RegExpTreeDictionary::RegexTreeNode std::unordered_map attributes; }; -std::vector createStringPieces(const String & value, int num_captures, const String & regex, Poco::Logger * logger) +std::vector createStringPieces(const String & value, int num_captures, const String & regex, LoggerPtr logger) { std::vector result; String literal; @@ -369,7 +369,7 @@ RegExpTreeDictionary::RegExpTreeDictionary( source_ptr(source_ptr_), configuration(configuration_), use_vectorscan(use_vectorscan_), - logger(&Poco::Logger::get("RegExpTreeDictionary")) + logger(getLogger("RegExpTreeDictionary")) { if (auto * ch_source = typeid_cast(source_ptr.get())) { diff --git a/src/Dictionaries/RegExpTreeDictionary.h b/src/Dictionaries/RegExpTreeDictionary.h index 17a0c6bbef31..2a036459618f 100644 --- a/src/Dictionaries/RegExpTreeDictionary.h +++ b/src/Dictionaries/RegExpTreeDictionary.h @@ -167,7 +167,7 @@ class RegExpTreeDictionary : public IDictionary hs_database_t* origin_db; #endif - Poco::Logger * logger; + LoggerPtr logger; }; } diff --git a/src/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp index 23dc7db508de..7241e259a371 100644 --- a/src/Dictionaries/XDBCDictionarySource.cpp +++ b/src/Dictionaries/XDBCDictionarySource.cpp @@ -67,7 +67,7 @@ XDBCDictionarySource::XDBCDictionarySource( ContextPtr context_, const BridgeHelperPtr bridge_) : WithContext(context_->getGlobalContext()) - , log(&Poco::Logger::get(bridge_->getName() + "DictionarySource")) + , log(getLogger(bridge_->getName() + "DictionarySource")) , update_time(std::chrono::system_clock::from_time_t(0)) , dict_struct(dict_struct_) , configuration(configuration_) @@ -86,7 +86,7 @@ XDBCDictionarySource::XDBCDictionarySource( /// copy-constructor is provided in order to support cloneability XDBCDictionarySource::XDBCDictionarySource(const XDBCDictionarySource & other) : WithContext(other.getContext()) - , log(&Poco::Logger::get(other.bridge_helper->getName() + "DictionarySource")) + , log(getLogger(other.bridge_helper->getName() + "DictionarySource")) , update_time(other.update_time) , dict_struct(other.dict_struct) , configuration(other.configuration) diff --git a/src/Dictionaries/XDBCDictionarySource.h b/src/Dictionaries/XDBCDictionarySource.h index 8ca2e172aa6f..6011563c5223 100644 --- a/src/Dictionaries/XDBCDictionarySource.h +++ b/src/Dictionaries/XDBCDictionarySource.h @@ -76,7 +76,7 @@ class XDBCDictionarySource final : public IDictionarySource, WithContext QueryPipeline loadFromQuery(const Poco::URI & url, const Block & required_sample_block, const std::string & query) const; - Poco::Logger * log; + LoggerPtr log; std::chrono::time_point update_time; const DictionaryStructure dict_struct; diff --git a/src/Dictionaries/YAMLRegExpTreeDictionarySource.cpp b/src/Dictionaries/YAMLRegExpTreeDictionarySource.cpp index b9e8fd859ed9..c6d8f1368b58 100644 --- a/src/Dictionaries/YAMLRegExpTreeDictionarySource.cpp +++ b/src/Dictionaries/YAMLRegExpTreeDictionarySource.cpp @@ -284,7 +284,7 @@ Block parseYAMLAsRegExpTree(const YAML::Node & node, const String & key_name, co YAMLRegExpTreeDictionarySource::YAMLRegExpTreeDictionarySource( const String & filepath_, const DictionaryStructure & dict_struct, ContextPtr context_, bool created_from_ddl) - : filepath(filepath_), structure(dict_struct), context(context_), logger(&Poco::Logger::get(kYAMLRegExpTreeDictionarySource)) + : filepath(filepath_), structure(dict_struct), context(context_), logger(getLogger(kYAMLRegExpTreeDictionarySource)) { key_name = (*structure.key)[0].name; diff --git a/src/Dictionaries/YAMLRegExpTreeDictionarySource.h b/src/Dictionaries/YAMLRegExpTreeDictionarySource.h index f5dd9b7d186d..041cbca81c65 100644 --- a/src/Dictionaries/YAMLRegExpTreeDictionarySource.h +++ b/src/Dictionaries/YAMLRegExpTreeDictionarySource.h @@ -64,7 +64,7 @@ class YAMLRegExpTreeDictionarySource : public IDictionarySource ContextPtr context; - Poco::Logger * logger; + LoggerPtr logger; Poco::Timestamp last_modification; Poco::Timestamp getLastModification() const; diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index a3b7e4130141..a8341c74ee35 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -151,7 +151,7 @@ class DiskLocalReservation : public IReservation if (disk->reserved_bytes < size) { disk->reserved_bytes = 0; - LOG_ERROR(&Poco::Logger::get("DiskLocal"), "Unbalanced reservations size for disk '{}'.", disk->getName()); + LOG_ERROR(getLogger("DiskLocal"), "Unbalanced reservations size for disk '{}'.", disk->getName()); } else { @@ -159,7 +159,7 @@ class DiskLocalReservation : public IReservation } if (disk->reservation_count == 0) - LOG_ERROR(&Poco::Logger::get("DiskLocal"), "Unbalanced reservation count for disk '{}'.", disk->getName()); + LOG_ERROR(getLogger("DiskLocal"), "Unbalanced reservation count for disk '{}'.", disk->getName()); else --disk->reservation_count; } @@ -499,7 +499,7 @@ DiskLocal::DiskLocal(const String & name_, const String & path_, UInt64 keep_fre : IDisk(name_) , disk_path(path_) , keep_free_space_bytes(keep_free_space_bytes_) - , logger(&Poco::Logger::get("DiskLocal")) + , logger(getLogger("DiskLocal")) { data_source_description.type = DataSourceType::Local; diff --git a/src/Disks/DiskLocal.h b/src/Disks/DiskLocal.h index 14e29904422b..d3fe89d400a8 100644 --- a/src/Disks/DiskLocal.h +++ b/src/Disks/DiskLocal.h @@ -148,7 +148,7 @@ class DiskLocal : public IDisk const String disk_path; const String disk_checker_path = ".disk_checker_file"; std::atomic keep_free_space_bytes; - Poco::Logger * logger; + LoggerPtr logger; DataSourceDescription data_source_description; UInt64 reserved_bytes = 0; diff --git a/src/Disks/DiskLocalCheckThread.cpp b/src/Disks/DiskLocalCheckThread.cpp index 87fcc0d1cf5a..e95c614336bf 100644 --- a/src/Disks/DiskLocalCheckThread.cpp +++ b/src/Disks/DiskLocalCheckThread.cpp @@ -13,7 +13,7 @@ DiskLocalCheckThread::DiskLocalCheckThread(DiskLocal * disk_, ContextPtr context : WithContext(context_) , disk(std::move(disk_)) , check_period_ms(local_disk_check_period_ms) - , log(&Poco::Logger::get(fmt::format("DiskLocalCheckThread({})", disk->getName()))) + , log(getLogger(fmt::format("DiskLocalCheckThread({})", disk->getName()))) { task = getContext()->getSchedulePool().createTask(log->name(), [this] { run(); }); } diff --git a/src/Disks/DiskLocalCheckThread.h b/src/Disks/DiskLocalCheckThread.h index eb688d599ca0..046b75531360 100644 --- a/src/Disks/DiskLocalCheckThread.h +++ b/src/Disks/DiskLocalCheckThread.h @@ -29,7 +29,7 @@ class DiskLocalCheckThread : WithContext DiskLocal * disk; size_t check_period_ms; - Poco::Logger * log; + LoggerPtr log; std::atomic need_stop{false}; BackgroundSchedulePool::TaskHolder task; diff --git a/src/Disks/DiskSelector.cpp b/src/Disks/DiskSelector.cpp index 9894e4251a2b..10e7e2cf6da9 100644 --- a/src/Disks/DiskSelector.cpp +++ b/src/Disks/DiskSelector.cpp @@ -121,7 +121,7 @@ DiskSelectorPtr DiskSelector::updateFromConfig( if (num_disks_removed_from_config > 0) { LOG_WARNING( - &Poco::Logger::get("DiskSelector"), + getLogger("DiskSelector"), "{} disappeared from configuration, this change will be applied after restart of ClickHouse", warning.str()); } diff --git a/src/Disks/IDisk.cpp b/src/Disks/IDisk.cpp index 4969cc7c7007..6e27accbebdb 100644 --- a/src/Disks/IDisk.cpp +++ b/src/Disks/IDisk.cpp @@ -29,7 +29,7 @@ bool IDisk::isDirectoryEmpty(const String & path) const void IDisk::copyFile(const String & from_file_path, IDisk & to_disk, const String & to_file_path, const WriteSettings & settings) /// NOLINT { - LOG_DEBUG(&Poco::Logger::get("IDisk"), "Copying from {} (path: {}) {} to {} (path: {}) {}.", + LOG_DEBUG(getLogger("IDisk"), "Copying from {} (path: {}) {} to {} (path: {}) {}.", getName(), getPath(), from_file_path, to_disk.getName(), to_disk.getPath(), to_file_path); auto in = readFile(from_file_path); @@ -144,7 +144,7 @@ void IDisk::startup(ContextPtr context, bool skip_access_check) { if (isReadOnly()) { - LOG_DEBUG(&Poco::Logger::get("IDisk"), + LOG_DEBUG(getLogger("IDisk"), "Skip access check for disk {} (read-only disk).", getName()); } diff --git a/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.cpp b/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.cpp index 697b1b873596..167c03ca443c 100644 --- a/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.cpp +++ b/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.cpp @@ -59,9 +59,9 @@ AsynchronousReadIndirectBufferFromRemoteFS::AsynchronousReadIndirectBufferFromRe ? CurrentThread::getQueryId() : "") , current_reader_id(getRandomASCIIString(8)) #ifndef NDEBUG - , log(&Poco::Logger::get("AsynchronousBufferFromRemoteFS")) + , log(getLogger("AsynchronousBufferFromRemoteFS")) #else - , log(&Poco::Logger::get("AsyncBuffer(" + impl->getFileName() + ")")) + , log(getLogger("AsyncBuffer(" + impl->getFileName() + ")")) #endif { ProfileEvents::increment(ProfileEvents::RemoteFSBuffers); diff --git a/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.h b/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.h index 8cb0e2826b4f..8463cc8cbff9 100644 --- a/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.h +++ b/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.h @@ -93,7 +93,7 @@ class AsynchronousReadIndirectBufferFromRemoteFS : public ReadBufferFromFileBase std::optional read_until_position; - Poco::Logger * log; + LoggerPtr log; struct LastPrefetchInfo { diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 5eaee2e3026f..ff695ea66426 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -49,9 +49,9 @@ CachedOnDiskReadBufferFromFile::CachedOnDiskReadBufferFromFile( std::optional read_until_position_) : ReadBufferFromFileBase(settings_.remote_fs_buffer_size, nullptr, 0, file_size_) #ifndef NDEBUG - , log(&Poco::Logger::get("CachedOnDiskReadBufferFromFile(" + source_file_path_ + ")")) + , log(getLogger("CachedOnDiskReadBufferFromFile(" + source_file_path_ + ")")) #else - , log(&Poco::Logger::get("CachedOnDiskReadBufferFromFile")) + , log(getLogger("CachedOnDiskReadBufferFromFile")) #endif , cache_key(cache_key_) , source_file_path(source_file_path_) diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.h b/src/Disks/IO/CachedOnDiskReadBufferFromFile.h index 14e8ea6c7e73..f18e3fe7d154 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.h +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.h @@ -92,7 +92,7 @@ class CachedOnDiskReadBufferFromFile : public ReadBufferFromFileBase static bool canStartFromCache(size_t current_offset, const FileSegment & file_segment); - Poco::Logger * log; + LoggerPtr log; FileCache::Key cache_key; String source_file_path; diff --git a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp index 169bbfac886e..22dbe644cf08 100644 --- a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp @@ -30,7 +30,7 @@ FileSegmentRangeWriter::FileSegmentRangeWriter( const String & source_path_) : cache(cache_) , key(key_) - , log(&Poco::Logger::get("FileSegmentRangeWriter")) + , log(getLogger("FileSegmentRangeWriter")) , cache_log(cache_log_) , query_id(query_id_) , source_path(source_path_) @@ -199,7 +199,7 @@ CachedOnDiskWriteBufferFromFile::CachedOnDiskWriteBufferFromFile( const String & query_id_, const WriteSettings & settings_) : WriteBufferFromFileDecorator(std::move(impl_)) - , log(&Poco::Logger::get("CachedOnDiskWriteBufferFromFile")) + , log(getLogger("CachedOnDiskWriteBufferFromFile")) , cache(cache_) , source_path(source_path_) , key(key_) diff --git a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.h b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.h index 834e584c8db4..c8a8538f1d47 100644 --- a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.h +++ b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.h @@ -48,7 +48,7 @@ class FileSegmentRangeWriter FileCache * cache; FileSegment::Key key; - Poco::Logger * log; + LoggerPtr log; std::shared_ptr cache_log; String query_id; String source_path; @@ -83,7 +83,7 @@ class CachedOnDiskWriteBufferFromFile final : public WriteBufferFromFileDecorato private: void cacheData(char * data, size_t size, bool throw_on_error); - Poco::Logger * log; + LoggerPtr log; FileCachePtr cache; String source_path; diff --git a/src/Disks/IO/IOUringReader.cpp b/src/Disks/IO/IOUringReader.cpp index 7bf1982d515b..c4b06afb507f 100644 --- a/src/Disks/IO/IOUringReader.cpp +++ b/src/Disks/IO/IOUringReader.cpp @@ -44,7 +44,7 @@ namespace ErrorCodes } IOUringReader::IOUringReader(uint32_t entries_) - : log(&Poco::Logger::get("IOUringReader")) + : log(getLogger("IOUringReader")) { struct io_uring_probe * probe = io_uring_get_probe(); if (!probe) diff --git a/src/Disks/IO/IOUringReader.h b/src/Disks/IO/IOUringReader.h index e3fcf1164482..5c3cfe39c1b2 100644 --- a/src/Disks/IO/IOUringReader.h +++ b/src/Disks/IO/IOUringReader.h @@ -64,7 +64,7 @@ class IOUringReader final : public IAsynchronousReader return promise.get_future(); } - const Poco::Logger * log; + const LoggerPtr log; public: IOUringReader(uint32_t entries_); diff --git a/src/Disks/IO/ReadBufferFromAzureBlobStorage.h b/src/Disks/IO/ReadBufferFromAzureBlobStorage.h index 711b4ce23f74..95cce40b50c8 100644 --- a/src/Disks/IO/ReadBufferFromAzureBlobStorage.h +++ b/src/Disks/IO/ReadBufferFromAzureBlobStorage.h @@ -65,7 +65,7 @@ class ReadBufferFromAzureBlobStorage : public ReadBufferFromFileBase char * data_ptr; size_t data_capacity; - Poco::Logger * log = &Poco::Logger::get("ReadBufferFromAzureBlobStorage"); + LoggerPtr log = getLogger("ReadBufferFromAzureBlobStorage"); }; } diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index 8450e740ab54..9c182bfdde24 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -27,7 +27,7 @@ ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather( , blobs_to_read(blobs_to_read_) , settings(settings_) , query_id(CurrentThread::isInitialized() && CurrentThread::get().getQueryContext() != nullptr ? CurrentThread::getQueryId() : "") - , log(&Poco::Logger::get("ReadBufferFromRemoteFSGather")) + , log(getLogger("ReadBufferFromRemoteFSGather")) , enable_cache_log(!query_id.empty() && settings.enable_filesystem_cache_log) { if (blobs_to_read.empty()) diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.h b/src/Disks/IO/ReadBufferFromRemoteFSGather.h index abe57647a488..f5307deeb952 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.h +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.h @@ -76,7 +76,7 @@ friend class ReadIndirectBufferFromRemoteFS; String query_id; - Poco::Logger * log; + LoggerPtr log; SeekableReadBufferPtr current_buf; diff --git a/src/Disks/IO/ReadBufferFromWebServer.cpp b/src/Disks/IO/ReadBufferFromWebServer.cpp index 04f9ce7d53eb..bf0b7724bcf2 100644 --- a/src/Disks/IO/ReadBufferFromWebServer.cpp +++ b/src/Disks/IO/ReadBufferFromWebServer.cpp @@ -27,7 +27,7 @@ ReadBufferFromWebServer::ReadBufferFromWebServer( bool use_external_buffer_, size_t read_until_position_) : ReadBufferFromFileBase(settings_.remote_fs_buffer_size, nullptr, 0) - , log(&Poco::Logger::get("ReadBufferFromWebServer")) + , log(getLogger("ReadBufferFromWebServer")) , context(context_) , url(url_) , buf_size(settings_.remote_fs_buffer_size) diff --git a/src/Disks/IO/ReadBufferFromWebServer.h b/src/Disks/IO/ReadBufferFromWebServer.h index 03cd107bf9a3..a6d5b50e5e8d 100644 --- a/src/Disks/IO/ReadBufferFromWebServer.h +++ b/src/Disks/IO/ReadBufferFromWebServer.h @@ -44,7 +44,7 @@ class ReadBufferFromWebServer : public ReadBufferFromFileBase private: std::unique_ptr initialize(); - Poco::Logger * log; + LoggerPtr log; ContextPtr context; const String url; diff --git a/src/Disks/IO/WriteBufferFromAzureBlobStorage.cpp b/src/Disks/IO/WriteBufferFromAzureBlobStorage.cpp index f8ca6b9ab075..b504c13487cc 100644 --- a/src/Disks/IO/WriteBufferFromAzureBlobStorage.cpp +++ b/src/Disks/IO/WriteBufferFromAzureBlobStorage.cpp @@ -27,7 +27,7 @@ WriteBufferFromAzureBlobStorage::WriteBufferFromAzureBlobStorage( size_t buf_size_, const WriteSettings & write_settings_) : BufferWithOwnMemory(buf_size_, nullptr, 0) - , log(&Poco::Logger::get("WriteBufferFromAzureBlobStorage")) + , log(getLogger("WriteBufferFromAzureBlobStorage")) , max_single_part_upload_size(max_single_part_upload_size_) , blob_path(blob_path_) , write_settings(write_settings_) diff --git a/src/Disks/IO/WriteBufferFromAzureBlobStorage.h b/src/Disks/IO/WriteBufferFromAzureBlobStorage.h index 274928093847..c5543c99a6df 100644 --- a/src/Disks/IO/WriteBufferFromAzureBlobStorage.h +++ b/src/Disks/IO/WriteBufferFromAzureBlobStorage.h @@ -42,7 +42,7 @@ class WriteBufferFromAzureBlobStorage : public BufferWithOwnMemory void execWithRetry(std::function func, size_t num_tries, size_t cost = 0); void uploadBlock(const char * data, size_t size); - Poco::Logger * log; + LoggerPtr log; const size_t max_single_part_upload_size; const std::string blob_path; diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index ad938e345bb7..ba19859de2de 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -31,7 +31,7 @@ AzureObjectStorage::AzureObjectStorage( : name(name_) , client(std::move(client_)) , settings(std::move(settings_)) - , log(&Poco::Logger::get("AzureObjectStorage")) + , log(getLogger("AzureObjectStorage")) { data_source_description.type = DataSourceType::AzureBlobStorage; data_source_description.description = client.get()->GetUrl(); diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h index 648016fb7320..23010b4196d2 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h @@ -132,7 +132,7 @@ class AzureObjectStorage : public IObjectStorage MultiVersion client; MultiVersion settings; - Poco::Logger * log; + LoggerPtr log; DataSourceDescription data_source_description; }; diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index 505b26ebb3a1..a0c1012f2029 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -29,7 +29,7 @@ CachedObjectStorage::CachedObjectStorage( , cache(cache_) , cache_settings(cache_settings_) , cache_config_name(cache_config_name_) - , log(&Poco::Logger::get(getName())) + , log(getLogger(getName())) { cache->initialize(); } diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h index 119dc25c66b1..eb0d11dbdc53 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h @@ -126,7 +126,7 @@ class CachedObjectStorage final : public IObjectStorage FileCachePtr cache; FileCacheSettings cache_settings; std::string cache_config_name; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Disks/ObjectStorages/Cached/registerDiskCache.cpp b/src/Disks/ObjectStorages/Cached/registerDiskCache.cpp index d8c4a9d42fd7..71bee1e2be0b 100644 --- a/src/Disks/ObjectStorages/Cached/registerDiskCache.cpp +++ b/src/Disks/ObjectStorages/Cached/registerDiskCache.cpp @@ -52,7 +52,7 @@ void registerDiskCache(DiskFactory & factory, bool /* global_skip_access_check * disk_object_storage->wrapWithCache(cache, file_cache_settings, name); LOG_INFO( - &Poco::Logger::get("DiskCache"), + getLogger("DiskCache"), "Registered cached disk (`{}`) with structure: {}", name, assert_cast(disk_object_storage.get())->getStructure()); diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.cpp b/src/Disks/ObjectStorages/DiskObjectStorage.cpp index 6143f0620b81..1bca04911a35 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorage.cpp @@ -121,7 +121,7 @@ DiskObjectStorage::DiskObjectStorage( uint64_t thread_pool_size_) : IDisk(name_, getAsyncExecutor(log_name, thread_pool_size_)) , object_storage_root_path(object_storage_root_path_) - , log (&Poco::Logger::get("DiskObjectStorage(" + log_name + ")")) + , log (getLogger("DiskObjectStorage(" + log_name + ")")) , metadata_storage(std::move(metadata_storage_)) , object_storage(std::move(object_storage_)) , send_metadata(send_metadata_) diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.h b/src/Disks/ObjectStorages/DiskObjectStorage.h index d6723d1eb715..651ed1efb401 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.h +++ b/src/Disks/ObjectStorages/DiskObjectStorage.h @@ -218,7 +218,7 @@ friend class DiskObjectStorageRemoteMetadataRestoreHelper; DiskTransactionPtr createObjectStorageTransaction(); const String object_storage_root_path; - Poco::Logger * log; + LoggerPtr log; MetadataStoragePtr metadata_storage; ObjectStoragePtr object_storage; diff --git a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp index c0f6fbd9de0c..b7617d1ab26c 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp @@ -290,7 +290,7 @@ struct RemoveRecursiveObjectStorageOperation final : public IDiskObjectStorageOp || e.code() == ErrorCodes::CANNOT_PARSE_INPUT_ASSERTION_FAILED) { LOG_DEBUG( - &Poco::Logger::get("RemoveRecursiveObjectStorageOperation"), + getLogger("RemoveRecursiveObjectStorageOperation"), "Can't read metadata because of an exception. Just remove it from the filesystem. Path: {}, exception: {}", metadata_storage.getPath() + path_to_remove, e.message()); @@ -802,7 +802,7 @@ void DiskObjectStorageTransaction::commit() catch (...) { tryLogCurrentException( - &Poco::Logger::get("DiskObjectStorageTransaction"), + getLogger("DiskObjectStorageTransaction"), fmt::format("An error occurred while executing transaction's operation #{} ({})", i, operations_to_execute[i]->getInfoForLog())); for (int64_t j = i; j >= 0; --j) @@ -814,7 +814,7 @@ void DiskObjectStorageTransaction::commit() catch (...) { tryLogCurrentException( - &Poco::Logger::get("DiskObjectStorageTransaction"), + getLogger("DiskObjectStorageTransaction"), fmt::format("An error occurred while undoing transaction's operation #{}", i)); throw; diff --git a/src/Disks/ObjectStorages/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/LocalObjectStorage.cpp index 67e2cc2d74b9..26d8b20555a7 100644 --- a/src/Disks/ObjectStorages/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/LocalObjectStorage.cpp @@ -24,7 +24,7 @@ namespace ErrorCodes } LocalObjectStorage::LocalObjectStorage() - : log(&Poco::Logger::get("LocalObjectStorage")) + : log(getLogger("LocalObjectStorage")) { data_source_description.type = DataSourceType::Local; if (auto block_device_id = tryGetBlockDeviceId("/"); block_device_id.has_value()) diff --git a/src/Disks/ObjectStorages/LocalObjectStorage.h b/src/Disks/ObjectStorages/LocalObjectStorage.h index b04e3fa62855..2e66fb36cb12 100644 --- a/src/Disks/ObjectStorages/LocalObjectStorage.h +++ b/src/Disks/ObjectStorages/LocalObjectStorage.h @@ -86,7 +86,7 @@ class LocalObjectStorage : public IObjectStorage bool isRemote() const override { return false; } private: - Poco::Logger * log; + LoggerPtr log; DataSourceDescription data_source_description; }; diff --git a/src/Disks/ObjectStorages/S3/ProxyListConfiguration.cpp b/src/Disks/ObjectStorages/S3/ProxyListConfiguration.cpp index 7c7bc7966ea8..72d040eb1ecb 100644 --- a/src/Disks/ObjectStorages/S3/ProxyListConfiguration.cpp +++ b/src/Disks/ObjectStorages/S3/ProxyListConfiguration.cpp @@ -22,7 +22,7 @@ ClientConfigurationPerRequest ProxyListConfiguration::getConfiguration(const Aws cfg.proxy_host = proxies[index].getHost(); cfg.proxy_port = proxies[index].getPort(); - LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Use proxy: {}", proxies[index].toString()); + LOG_DEBUG(getLogger("AWSClient"), "Use proxy: {}", proxies[index].toString()); return cfg; } diff --git a/src/Disks/ObjectStorages/S3/ProxyResolverConfiguration.cpp b/src/Disks/ObjectStorages/S3/ProxyResolverConfiguration.cpp index 14db39b3f3d0..47e483235122 100644 --- a/src/Disks/ObjectStorages/S3/ProxyResolverConfiguration.cpp +++ b/src/Disks/ObjectStorages/S3/ProxyResolverConfiguration.cpp @@ -26,7 +26,7 @@ ProxyResolverConfiguration::ProxyResolverConfiguration(const Poco::URI & endpoin ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const Aws::Http::HttpRequest &) { - LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Obtain proxy using resolver: {}", endpoint.toString()); + LOG_DEBUG(getLogger("AWSClient"), "Obtain proxy using resolver: {}", endpoint.toString()); std::lock_guard lock(cache_mutex); @@ -34,7 +34,7 @@ ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const if (cache_ttl.count() && cache_valid && now <= cache_timestamp + cache_ttl && now >= cache_timestamp) { - LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Use cached proxy: {}://{}:{}", Aws::Http::SchemeMapper::ToString(cached_config.proxy_scheme), cached_config.proxy_host, cached_config.proxy_port); + LOG_DEBUG(getLogger("AWSClient"), "Use cached proxy: {}://{}:{}", Aws::Http::SchemeMapper::ToString(cached_config.proxy_scheme), cached_config.proxy_host, cached_config.proxy_port); return cached_config; } @@ -84,7 +84,7 @@ ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const /// Read proxy host as string from response body. Poco::StreamCopier::copyToString(response_body_stream, proxy_host); - LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Use proxy: {}://{}:{}", proxy_scheme, proxy_host, proxy_port); + LOG_DEBUG(getLogger("AWSClient"), "Use proxy: {}://{}:{}", proxy_scheme, proxy_host, proxy_port); cached_config.proxy_scheme = Aws::Http::SchemeMapper::FromString(proxy_scheme.c_str()); cached_config.proxy_host = proxy_host; diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index bcdc97983be8..e5c3b8a75ca5 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -62,7 +62,7 @@ class S3ObjectStorage : public IObjectStorage data_source_description.is_cached = false; data_source_description.is_encrypted = false; - log = &Poco::Logger::get(logger_name); + log = getLogger(logger_name); } public: @@ -175,7 +175,7 @@ class S3ObjectStorage : public IObjectStorage const String version_id; - Poco::Logger * log; + LoggerPtr log; DataSourceDescription data_source_description; }; diff --git a/src/Disks/ObjectStorages/S3/copyS3FileToDisk.cpp b/src/Disks/ObjectStorages/S3/copyS3FileToDisk.cpp index 098e02595f59..76bd7da88299 100644 --- a/src/Disks/ObjectStorages/S3/copyS3FileToDisk.cpp +++ b/src/Disks/ObjectStorages/S3/copyS3FileToDisk.cpp @@ -36,7 +36,7 @@ void copyS3FileToDisk( auto destination_data_source_description = destination_disk->getDataSourceDescription(); if (destination_data_source_description != DataSourceDescription{DataSourceType::S3, s3_client->getInitialEndpoint(), false, false}) { - LOG_TRACE(&Poco::Logger::get("copyS3FileToDisk"), "Copying {} to disk {} through buffers", src_key, destination_disk->getName()); + LOG_TRACE(getLogger("copyS3FileToDisk"), "Copying {} to disk {} through buffers", src_key, destination_disk->getName()); ReadBufferFromS3 read_buffer{s3_client, src_bucket, src_key, {}, request_settings, read_settings}; if (*src_offset) read_buffer.seek(*src_offset, SEEK_SET); @@ -46,7 +46,7 @@ void copyS3FileToDisk( return; } - LOG_TRACE(&Poco::Logger::get("copyS3FileToDisk"), "Copying {} to disk {} using native copy", src_key, destination_disk->getName()); + LOG_TRACE(getLogger("copyS3FileToDisk"), "Copying {} to disk {} using native copy", src_key, destination_disk->getName()); String dest_bucket = destination_disk->getObjectStorage()->getObjectsNamespace(); diff --git a/src/Disks/ObjectStorages/S3/diskSettings.cpp b/src/Disks/ObjectStorages/S3/diskSettings.cpp index 09c7185b007c..3fe6faa16535 100644 --- a/src/Disks/ObjectStorages/S3/diskSettings.cpp +++ b/src/Disks/ObjectStorages/S3/diskSettings.cpp @@ -54,7 +54,7 @@ std::shared_ptr getProxyResolverConfiguration( auto proxy_port = proxy_resolver_config.getUInt(prefix + ".proxy_port"); auto cache_ttl = proxy_resolver_config.getUInt(prefix + ".proxy_cache_time", 10); - LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Configured proxy resolver: {}, Scheme: {}, Port: {}", + LOG_DEBUG(getLogger("DiskS3"), "Configured proxy resolver: {}, Scheme: {}, Port: {}", endpoint.toString(), proxy_scheme, proxy_port); return std::make_shared(endpoint, proxy_scheme, proxy_port, cache_ttl); @@ -79,7 +79,7 @@ std::shared_ptr getProxyListConfiguration( proxies.push_back(proxy_uri); - LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Configured proxy: {}", proxy_uri.toString()); + LOG_DEBUG(getLogger("DiskS3"), "Configured proxy: {}", proxy_uri.toString()); } if (!proxies.empty()) diff --git a/src/Disks/ObjectStorages/S3/registerDiskS3.cpp b/src/Disks/ObjectStorages/S3/registerDiskS3.cpp index 1c192a0d89cc..08d9f0cd3463 100644 --- a/src/Disks/ObjectStorages/S3/registerDiskS3.cpp +++ b/src/Disks/ObjectStorages/S3/registerDiskS3.cpp @@ -143,7 +143,7 @@ void registerDiskS3(DiskFactory & factory, bool global_skip_access_check) if (s3_capabilities.support_batch_delete && !CheckAccess::checkBatchRemove(*s3_storage, uri.key)) { LOG_WARNING( - &Poco::Logger::get("registerDiskS3"), + getLogger("registerDiskS3"), "Storage for disk {} does not support batch delete operations, " "so `s3_capabilities.support_batch_delete` was automatically turned off during the access check. " "To remove this message set `s3_capabilities.support_batch_delete` for the disk to `false`.", diff --git a/src/Disks/ObjectStorages/StoredObject.cpp b/src/Disks/ObjectStorages/StoredObject.cpp index 20f50d7676d4..7cfb9bf07f9d 100644 --- a/src/Disks/ObjectStorages/StoredObject.cpp +++ b/src/Disks/ObjectStorages/StoredObject.cpp @@ -53,7 +53,7 @@ StoredObject StoredObject::create( catch (...) { LOG_DEBUG( - &Poco::Logger::get("StoredObject"), + getLogger("StoredObject"), "Object does not exist while getting cache path hint (object path: {})", path); diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp index 4822ba6e0da5..fe9d85aa60ff 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp @@ -58,7 +58,7 @@ void WebObjectStorage::initialize(const String & uri_path) const FileData file_data{}; String dir_name = fs::path(uri_path.substr(url.size())) / ""; - LOG_TRACE(&Poco::Logger::get("DiskWeb"), "Adding directory: {}", dir_name); + LOG_TRACE(getLogger("DiskWeb"), "Adding directory: {}", dir_name); while (!metadata_buf.eof()) { @@ -83,7 +83,7 @@ void WebObjectStorage::initialize(const String & uri_path) const file_path = file_path.substr(url.size()); files.emplace(std::make_pair(file_path, file_data)); - LOG_TRACE(&Poco::Logger::get("DiskWeb"), "Adding file: {}, size: {}", file_path, file_data.size); + LOG_TRACE(getLogger("DiskWeb"), "Adding file: {}, size: {}", file_path, file_data.size); } files.emplace(std::make_pair(dir_name, FileData({ .type = FileType::Directory }))); @@ -113,7 +113,7 @@ WebObjectStorage::WebObjectStorage( ContextPtr context_) : WithContext(context_->getGlobalContext()) , url(url_) - , log(&Poco::Logger::get("WebObjectStorage")) + , log(getLogger("WebObjectStorage")) { } @@ -121,7 +121,7 @@ bool WebObjectStorage::exists(const StoredObject & object) const { const auto & path = object.absolute_path; - LOG_TRACE(&Poco::Logger::get("DiskWeb"), "Checking existence of path: {}", path); + LOG_TRACE(getLogger("DiskWeb"), "Checking existence of path: {}", path); if (files.find(path) != files.end()) return true; @@ -140,7 +140,7 @@ bool WebObjectStorage::exists(const StoredObject & object) const if (can_throw) throw Exception(ErrorCodes::NETWORK_ERROR, "Cannot load disk metadata. Error: {}", message); - LOG_TRACE(&Poco::Logger::get("DiskWeb"), "Cannot load disk metadata. Error: {}", message); + LOG_TRACE(getLogger("DiskWeb"), "Cannot load disk metadata. Error: {}", message); return false; } } diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.h b/src/Disks/ObjectStorages/Web/WebObjectStorage.h index 2dab8fdb62df..7f92d4641c63 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.h +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.h @@ -118,7 +118,7 @@ class WebObjectStorage : public IObjectStorage, WithContext String url; private: - Poco::Logger * log; + LoggerPtr log; size_t min_bytes_for_seek; }; diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index ec0f201b8014..e94f042de7c7 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -39,7 +39,7 @@ StoragePolicy::StoragePolicy( const String & config_prefix, DiskSelectorPtr disks) : name(std::move(name_)) - , log(&Poco::Logger::get("StoragePolicy (" + name + ")")) + , log(getLogger("StoragePolicy (" + name + ")")) { Poco::Util::AbstractConfiguration::Keys keys; String volumes_prefix = config_prefix + ".volumes"; @@ -93,7 +93,7 @@ StoragePolicy::StoragePolicy(String name_, Volumes volumes_, double move_factor_ : volumes(std::move(volumes_)) , name(std::move(name_)) , move_factor(move_factor_) - , log(&Poco::Logger::get("StoragePolicy (" + name + ")")) + , log(getLogger("StoragePolicy (" + name + ")")) { if (volumes.empty()) throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "Storage policy {} must contain at least one Volume.", backQuote(name)); @@ -385,7 +385,7 @@ StoragePolicySelector::StoragePolicySelector( */ policies.emplace(name, std::make_shared(name, config, config_prefix + "." + name, disks)); - LOG_INFO(&Poco::Logger::get("StoragePolicySelector"), "Storage policy {} loaded", backQuote(name)); + LOG_INFO(getLogger("StoragePolicySelector"), "Storage policy {} loaded", backQuote(name)); } /// Add default policy if it isn't explicitly specified. diff --git a/src/Disks/StoragePolicy.h b/src/Disks/StoragePolicy.h index 7e6aff7bbda2..22b1eb00f06f 100644 --- a/src/Disks/StoragePolicy.h +++ b/src/Disks/StoragePolicy.h @@ -106,7 +106,7 @@ class StoragePolicy : public IStoragePolicy void buildVolumeIndices(); - Poco::Logger * log; + LoggerPtr log; }; diff --git a/src/Disks/TemporaryFileOnDisk.cpp b/src/Disks/TemporaryFileOnDisk.cpp index 6fe6fd5a1c9a..edc31ef72397 100644 --- a/src/Disks/TemporaryFileOnDisk.cpp +++ b/src/Disks/TemporaryFileOnDisk.cpp @@ -72,7 +72,7 @@ TemporaryFileOnDisk::~TemporaryFileOnDisk() if (!disk->exists(relative_path)) { - LOG_WARNING(&Poco::Logger::get("TemporaryFileOnDisk"), "Temporary path '{}' does not exist in '{}'", relative_path, disk->getPath()); + LOG_WARNING(getLogger("TemporaryFileOnDisk"), "Temporary path '{}' does not exist in '{}'", relative_path, disk->getPath()); return; } diff --git a/src/Disks/VolumeJBOD.cpp b/src/Disks/VolumeJBOD.cpp index 64bd2619665f..af07a2f543a4 100644 --- a/src/Disks/VolumeJBOD.cpp +++ b/src/Disks/VolumeJBOD.cpp @@ -21,7 +21,7 @@ VolumeJBOD::VolumeJBOD( : IVolume(name_, config, config_prefix, disk_selector) , disks_by_size(disks.begin(), disks.end()) { - Poco::Logger * logger = &Poco::Logger::get("StorageConfiguration"); + LoggerPtr logger = getLogger("StorageConfiguration"); auto has_max_bytes = config.has(config_prefix + ".max_data_part_size_bytes"); auto has_max_ratio = config.has(config_prefix + ".max_data_part_size_ratio"); diff --git a/src/Disks/getOrCreateDiskFromAST.cpp b/src/Disks/getOrCreateDiskFromAST.cpp index 637acff7b957..ef6c27d1292a 100644 --- a/src/Disks/getOrCreateDiskFromAST.cpp +++ b/src/Disks/getOrCreateDiskFromAST.cpp @@ -100,7 +100,7 @@ std::string getOrCreateDiskFromDiskAST(const ASTPtr & disk_function, ContextPtr FlattenDiskConfigurationVisitor{data}.visit(ast); auto disk_name = assert_cast(*ast).value.get(); - LOG_TRACE(&Poco::Logger::get("getOrCreateDiskFromDiskAST"), "Result disk name: {}", disk_name); + LOG_TRACE(getLogger("getOrCreateDiskFromDiskAST"), "Result disk name: {}", disk_name); return disk_name; } diff --git a/src/Formats/ProtobufSerializer.cpp b/src/Formats/ProtobufSerializer.cpp index 326ce0da45dc..45c31ed4b0ad 100644 --- a/src/Formats/ProtobufSerializer.cpp +++ b/src/Formats/ProtobufSerializer.cpp @@ -2892,7 +2892,7 @@ namespace { *root_serializer_ptr = message_serializer.get(); #if 0 - LOG_INFO(&Poco::Logger::get("ProtobufSerializer"), "Serialization tree:\n{}", get_root_desc_function(0)); + LOG_INFO(getLogger("ProtobufSerializer"), "Serialization tree:\n{}", get_root_desc_function(0)); #endif return message_serializer; } @@ -2901,7 +2901,7 @@ namespace auto envelope_serializer = std::make_unique(std::move(message_serializer), reader_or_writer); *root_serializer_ptr = envelope_serializer.get(); #if 0 - LOG_INFO(&Poco::Logger::get("ProtobufSerializer"), "Serialization tree:\n{}", get_root_desc_function(0)); + LOG_INFO(getLogger("ProtobufSerializer"), "Serialization tree:\n{}", get_root_desc_function(0)); #endif return envelope_serializer; } diff --git a/src/Functions/UserDefined/ExternalUserDefinedExecutableFunctionsLoader.cpp b/src/Functions/UserDefined/ExternalUserDefinedExecutableFunctionsLoader.cpp index d4ecbf66987f..f26dc381b66f 100644 --- a/src/Functions/UserDefined/ExternalUserDefinedExecutableFunctionsLoader.cpp +++ b/src/Functions/UserDefined/ExternalUserDefinedExecutableFunctionsLoader.cpp @@ -95,7 +95,7 @@ namespace } ExternalUserDefinedExecutableFunctionsLoader::ExternalUserDefinedExecutableFunctionsLoader(ContextPtr global_context_) - : ExternalLoader("external user defined function", &Poco::Logger::get("ExternalUserDefinedExecutableFunctionsLoader")) + : ExternalLoader("external user defined function", getLogger("ExternalUserDefinedExecutableFunctionsLoader")) , WithContext(global_context_) { setConfigSettings({"function", "name", "database", "uuid"}); diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.cpp index d67c48f166d8..00f08a9bcbac 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.cpp @@ -54,7 +54,7 @@ namespace UserDefinedSQLObjectsLoaderFromDisk::UserDefinedSQLObjectsLoaderFromDisk(const ContextPtr & global_context_, const String & dir_path_) : global_context(global_context_) , dir_path{makeDirectoryPathCanonical(dir_path_)} - , log{&Poco::Logger::get("UserDefinedSQLObjectsLoaderFromDisk")} + , log{getLogger("UserDefinedSQLObjectsLoaderFromDisk")} { createDirectory(); } diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.h b/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.h index 7b0bb291f424..be171dfd2947 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.h +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromDisk.h @@ -39,7 +39,7 @@ class UserDefinedSQLObjectsLoaderFromDisk : public IUserDefinedSQLObjectsLoader ContextPtr global_context; String dir_path; - Poco::Logger * log; + LoggerPtr log; std::atomic objects_loaded = false; }; diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.cpp b/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.cpp index 284adeb2c2d5..e015c533352c 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.cpp @@ -53,7 +53,7 @@ UserDefinedSQLObjectsLoaderFromZooKeeper::UserDefinedSQLObjectsLoaderFromZooKeep , zookeeper_getter{[global_context_]() { return global_context_->getZooKeeper(); }} , zookeeper_path{zookeeper_path_} , watch_queue{std::make_shared>>(std::numeric_limits::max())} - , log{&Poco::Logger::get("UserDefinedSQLObjectsLoaderFromZooKeeper")} + , log{getLogger("UserDefinedSQLObjectsLoaderFromZooKeeper")} { if (zookeeper_path.empty()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "ZooKeeper path must be non-empty"); diff --git a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.h b/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.h index 38e061fd4d9b..3e35e9024d13 100644 --- a/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.h +++ b/src/Functions/UserDefined/UserDefinedSQLObjectsLoaderFromZooKeeper.h @@ -75,7 +75,7 @@ class UserDefinedSQLObjectsLoaderFromZooKeeper : public IUserDefinedSQLObjectsLo using UserDefinedSQLObjectTypeAndName = std::pair; std::shared_ptr> watch_queue; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Functions/logTrace.cpp b/src/Functions/logTrace.cpp index 55f387cbfeb2..923ea9fd70ef 100644 --- a/src/Functions/logTrace.cpp +++ b/src/Functions/logTrace.cpp @@ -46,7 +46,7 @@ namespace throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument for function {} must be Constant string", getName()); - static auto * log = &Poco::Logger::get("FunctionLogTrace"); + static auto log = getLogger("FunctionLogTrace"); LOG_TRACE(log, fmt::runtime(message)); return DataTypeUInt8().createColumnConst(input_rows_count, 0); diff --git a/src/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp index 1b15bf0b94d4..37c1ac52a251 100644 --- a/src/IO/HTTPCommon.cpp +++ b/src/IO/HTTPCommon.cpp @@ -132,7 +132,7 @@ namespace bool proxy_https_, size_t max_pool_size_, bool resolve_host_ = true) - : Base(static_cast(max_pool_size_), &Poco::Logger::get("HTTPSessionPool")) + : Base(static_cast(max_pool_size_), getLogger("HTTPSessionPool")) , host(host_) , port(port_) , https(https_) @@ -253,7 +253,7 @@ namespace auto msg = Poco::AnyCast(session_data); if (!msg.empty()) { - LOG_TRACE((&Poco::Logger::get("HTTPCommon")), "Failed communicating with {} with error '{}' will try to reconnect session", host, msg); + LOG_TRACE((getLogger("HTTPCommon")), "Failed communicating with {} with error '{}' will try to reconnect session", host, msg); if (resolve_host) { diff --git a/src/IO/ReadBufferFromS3.h b/src/IO/ReadBufferFromS3.h index 84e8d36865cc..34155ecd9f25 100644 --- a/src/IO/ReadBufferFromS3.h +++ b/src/IO/ReadBufferFromS3.h @@ -45,7 +45,7 @@ class ReadBufferFromS3 : public ReadBufferFromFileBase Aws::S3::Model::GetObjectResult read_result; std::unique_ptr impl; - Poco::Logger * log = &Poco::Logger::get("ReadBufferFromS3"); + LoggerPtr log = getLogger("ReadBufferFromS3"); public: ReadBufferFromS3( diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index 784110f735ed..e65f48da3691 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -132,7 +132,7 @@ namespace detail bool http_skip_not_found_url; ReadSettings settings; - Poco::Logger * log; + LoggerPtr log; bool withPartialContent(const Range & range) const { @@ -320,7 +320,7 @@ namespace detail , read_range(read_range_) , http_skip_not_found_url(http_skip_not_found_url_) , settings {settings_} - , log(&Poco::Logger::get("ReadWriteBufferFromHTTP")) + , log(getLogger("ReadWriteBufferFromHTTP")) { if (settings.http_max_tries <= 0 || settings.http_retry_initial_backoff_ms <= 0 || settings.http_retry_initial_backoff_ms >= settings.http_retry_max_backoff_ms) diff --git a/src/IO/S3/AWSLogger.cpp b/src/IO/S3/AWSLogger.cpp index 48c30ccf8813..efc5d51b50f9 100644 --- a/src/IO/S3/AWSLogger.cpp +++ b/src/IO/S3/AWSLogger.cpp @@ -3,6 +3,7 @@ #if USE_AWS_S3 #include +#include namespace { @@ -38,7 +39,7 @@ AWSLogger::AWSLogger(bool enable_s3_requests_logging_) : enable_s3_requests_logging(enable_s3_requests_logging_) { for (auto [tag, name] : S3_LOGGER_TAG_NAMES) - tag_loggers[tag] = &Poco::Logger::get(name); + tag_loggers[tag] = getLogger(name); default_logger = tag_loggers[S3_LOGGER_TAG_NAMES[0][0]]; } diff --git a/src/IO/S3/AWSLogger.h b/src/IO/S3/AWSLogger.h index 897c0e8d9648..09762eea7859 100644 --- a/src/IO/S3/AWSLogger.h +++ b/src/IO/S3/AWSLogger.h @@ -4,8 +4,8 @@ #if USE_AWS_S3 #include - -#include +#include +#include namespace DB::S3 { @@ -27,9 +27,9 @@ class AWSLogger final : public Aws::Utils::Logging::LogSystemInterface void Flush() final {} private: - Poco::Logger * default_logger; + LoggerPtr default_logger; bool enable_s3_requests_logging; - std::unordered_map tag_loggers; + std::unordered_map tag_loggers; }; } diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index 500af4c58b72..d1f19ac99fde 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -119,7 +119,7 @@ Client::Client( bool use_virtual_addressing) : Aws::S3::S3Client(credentials_provider, client_configuration, std::move(sign_payloads), use_virtual_addressing) , max_redirects(max_redirects_) - , log(&Poco::Logger::get("S3Client")) + , log(getLogger("S3Client")) { auto * endpoint_provider = dynamic_cast(accessEndpointProvider().get()); endpoint_provider->GetBuiltInParameters().GetParameter("Region").GetString(explicit_region); @@ -136,7 +136,7 @@ Client::Client(const Client & other) , explicit_region(other.explicit_region) , detect_region(other.detect_region) , max_redirects(other.max_redirects) - , log(&Poco::Logger::get("S3Client")) + , log(getLogger("S3Client")) { cache = std::make_shared(*other.cache); ClientCacheRegistry::instance().registerClient(cache); @@ -529,7 +529,7 @@ void ClientCacheRegistry::clearCacheForAll() } else { - LOG_INFO(&Poco::Logger::get("ClientCacheRegistry"), "Deleting leftover S3 client cache"); + LOG_INFO(getLogger("ClientCacheRegistry"), "Deleting leftover S3 client cache"); it = client_caches.erase(it); } } diff --git a/src/IO/S3/Client.h b/src/IO/S3/Client.h index 5c68fca6f046..5088db9fddbc 100644 --- a/src/IO/S3/Client.h +++ b/src/IO/S3/Client.h @@ -211,7 +211,7 @@ class Client : private Aws::S3::S3Client const size_t max_redirects; - Poco::Logger * log; + LoggerPtr log; }; class ClientFactory diff --git a/src/IO/S3/Credentials.cpp b/src/IO/S3/Credentials.cpp index b6b264e274e8..401127cd04b2 100644 --- a/src/IO/S3/Credentials.cpp +++ b/src/IO/S3/Credentials.cpp @@ -39,7 +39,7 @@ bool areCredentialsEmptyOrExpired(const Aws::Auth::AWSCredentials & credentials, AWSEC2MetadataClient::AWSEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration, const char * endpoint_) : Aws::Internal::AWSHttpResourceClient(client_configuration) , endpoint(endpoint_) - , logger(&Poco::Logger::get("AWSEC2InstanceProfileConfigLoader")) + , logger(getLogger("AWSEC2InstanceProfileConfigLoader")) { } @@ -162,7 +162,7 @@ Aws::String AWSEC2MetadataClient::getCurrentRegion() const std::shared_ptr InitEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration) { Aws::String ec2_metadata_service_endpoint = Aws::Environment::GetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT"); - auto * logger = &Poco::Logger::get("AWSEC2InstanceProfileConfigLoader"); + auto logger = getLogger("AWSEC2InstanceProfileConfigLoader"); if (ec2_metadata_service_endpoint.empty()) { Aws::String ec2_metadata_service_endpoint_mode = Aws::Environment::GetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE"); @@ -200,7 +200,7 @@ std::shared_ptr InitEC2MetadataClient(const Aws::Client::C AWSEC2InstanceProfileConfigLoader::AWSEC2InstanceProfileConfigLoader(const std::shared_ptr & client_, bool use_secure_pull_) : client(client_) , use_secure_pull(use_secure_pull_) - , logger(&Poco::Logger::get("AWSEC2InstanceProfileConfigLoader")) + , logger(getLogger("AWSEC2InstanceProfileConfigLoader")) { } @@ -242,7 +242,7 @@ bool AWSEC2InstanceProfileConfigLoader::LoadInternal() AWSInstanceProfileCredentialsProvider::AWSInstanceProfileCredentialsProvider(const std::shared_ptr & config_loader) : ec2_metadata_config_loader(config_loader) , load_frequency_ms(Aws::Auth::REFRESH_THRESHOLD) - , logger(&Poco::Logger::get("AWSInstanceProfileCredentialsProvider")) + , logger(getLogger("AWSInstanceProfileCredentialsProvider")) { LOG_INFO(logger, "Creating Instance with injected EC2MetadataClient and refresh rate."); } @@ -287,7 +287,7 @@ void AWSInstanceProfileCredentialsProvider::refreshIfExpired() AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider::AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider( DB::S3::PocoHTTPClientConfiguration & aws_client_configuration, uint64_t expiration_window_seconds_) - : logger(&Poco::Logger::get("AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider")) + : logger(getLogger("AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider")) , expiration_window_seconds(expiration_window_seconds_) { // check environment variables @@ -420,7 +420,7 @@ S3CredentialsProviderChain::S3CredentialsProviderChain( const Aws::Auth::AWSCredentials & credentials, CredentialsConfiguration credentials_configuration) { - auto * logger = &Poco::Logger::get("S3CredentialsProviderChain"); + auto logger = getLogger("S3CredentialsProviderChain"); /// we don't provide any credentials to avoid signing if (credentials_configuration.no_sign_request) diff --git a/src/IO/S3/Credentials.h b/src/IO/S3/Credentials.h index cd9072f97657..fe20c4aa54ff 100644 --- a/src/IO/S3/Credentials.h +++ b/src/IO/S3/Credentials.h @@ -54,7 +54,7 @@ class AWSEC2MetadataClient : public Aws::Internal::AWSHttpResourceClient const Aws::String endpoint; mutable std::recursive_mutex token_mutex; mutable Aws::String token; - Poco::Logger * logger; + LoggerPtr logger; }; std::shared_ptr InitEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration); @@ -72,7 +72,7 @@ class AWSEC2InstanceProfileConfigLoader : public Aws::Config::AWSProfileConfigLo private: std::shared_ptr client; bool use_secure_pull; - Poco::Logger * logger; + LoggerPtr logger; }; class AWSInstanceProfileCredentialsProvider : public Aws::Auth::AWSCredentialsProvider @@ -91,7 +91,7 @@ class AWSInstanceProfileCredentialsProvider : public Aws::Auth::AWSCredentialsPr std::shared_ptr ec2_metadata_config_loader; Int64 load_frequency_ms; - Poco::Logger * logger; + LoggerPtr logger; }; class AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider : public Aws::Auth::AWSCredentialsProvider @@ -117,7 +117,7 @@ class AwsAuthSTSAssumeRoleWebIdentityCredentialsProvider : public Aws::Auth::AWS Aws::String session_name; Aws::String token; bool initialized = false; - Poco::Logger * logger; + LoggerPtr logger; uint64_t expiration_window_seconds; }; diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp index b0ab1c52409a..121f02469a8e 100644 --- a/src/IO/S3/PocoHTTPClient.cpp +++ b/src/IO/S3/PocoHTTPClient.cpp @@ -256,7 +256,7 @@ void PocoHTTPClient::makeRequestInternal( Aws::Utils::RateLimits::RateLimiterInterface *, Aws::Utils::RateLimits::RateLimiterInterface *) const { - Poco::Logger * log = &Poco::Logger::get("AWSClient"); + LoggerPtr log = getLogger("AWSClient"); auto uri = request.GetUri().GetURIString(); if (enable_s3_requests_logging) diff --git a/src/IO/S3/copyS3File.cpp b/src/IO/S3/copyS3File.cpp index 6972e9f1c828..ec7c3f66672a 100644 --- a/src/IO/S3/copyS3File.cpp +++ b/src/IO/S3/copyS3File.cpp @@ -53,7 +53,7 @@ namespace const std::optional> & object_metadata_, ThreadPoolCallbackRunner schedule_, bool for_disk_s3_, - const Poco::Logger * log_) + const LoggerPtr log_) : client_ptr(client_ptr_) , dest_bucket(dest_bucket_) , dest_key(dest_key_) @@ -77,7 +77,7 @@ namespace const std::optional> & object_metadata; ThreadPoolCallbackRunner schedule; bool for_disk_s3; - const Poco::Logger * log; + const LoggerPtr log; struct UploadPartTask { @@ -415,7 +415,7 @@ namespace const std::optional> & object_metadata_, ThreadPoolCallbackRunner schedule_, bool for_disk_s3_) - : UploadHelper(client_ptr_, dest_bucket_, dest_key_, request_settings_, object_metadata_, schedule_, for_disk_s3_, &Poco::Logger::get("copyDataToS3File")) + : UploadHelper(client_ptr_, dest_bucket_, dest_key_, request_settings_, object_metadata_, schedule_, for_disk_s3_, getLogger("copyDataToS3File")) , create_read_buffer(create_read_buffer_) , offset(offset_) , size(size_) @@ -579,7 +579,7 @@ namespace const std::optional> & object_metadata_, ThreadPoolCallbackRunner schedule_, bool for_disk_s3_) - : UploadHelper(client_ptr_, dest_bucket_, dest_key_, request_settings_, object_metadata_, schedule_, for_disk_s3_, &Poco::Logger::get("copyS3File")) + : UploadHelper(client_ptr_, dest_bucket_, dest_key_, request_settings_, object_metadata_, schedule_, for_disk_s3_, getLogger("copyS3File")) , src_bucket(src_bucket_) , src_key(src_key_) , offset(src_offset_) diff --git a/src/IO/WriteBufferFromHTTP.cpp b/src/IO/WriteBufferFromHTTP.cpp index 355c42a23c98..3142a57b5384 100644 --- a/src/IO/WriteBufferFromHTTP.cpp +++ b/src/IO/WriteBufferFromHTTP.cpp @@ -32,7 +32,7 @@ WriteBufferFromHTTP::WriteBufferFromHTTP( for (const auto & header: additional_headers) request.add(header.name, header.value); - LOG_TRACE((&Poco::Logger::get("WriteBufferToHTTP")), "Sending request to {}", uri.toString()); + LOG_TRACE((getLogger("WriteBufferToHTTP")), "Sending request to {}", uri.toString()); ostr = &session->sendRequest(request); } diff --git a/src/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h index e56d590c57a8..1c7a895b95a4 100644 --- a/src/IO/WriteBufferFromS3.h +++ b/src/IO/WriteBufferFromS3.h @@ -112,7 +112,7 @@ class WriteBufferFromS3 final : public BufferWithOwnMemory std::mutex bg_tasks_mutex; std::condition_variable bg_tasks_condvar; - Poco::Logger * log = &Poco::Logger::get("WriteBufferFromS3"); + LoggerPtr log = getLogger("WriteBufferFromS3"); WriteSettings write_settings; }; diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index d6fbf072d054..5a569b64e93b 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -106,7 +106,7 @@ class HashTablesStatistics if (const auto hint = cache->get(params.key)) { LOG_TRACE( - &Poco::Logger::get("Aggregator"), + getLogger("Aggregator"), "An entry for key={} found in cache: sum_of_sizes={}, median_size={}", params.key, hint->sum_of_sizes, @@ -130,7 +130,7 @@ class HashTablesStatistics || hint->median_size < median_size) { LOG_TRACE( - &Poco::Logger::get("Aggregator"), + getLogger("Aggregator"), "Statistics updated for key={}: new sum_of_sizes={}, median_size={}", params.key, sum_of_sizes, @@ -230,7 +230,7 @@ void initDataVariantsWithSizeHint( /// But we will also work with the big (i.e. not so cache friendly) HT from the beginning which may result in a slight slowdown. /// So let's just do nothing. LOG_TRACE( - &Poco::Logger::get("Aggregator"), + getLogger("Aggregator"), "No space were preallocated in hash tables because 'max_size_to_preallocate_for_aggregation' has too small value: {}, " "should be at least {}", stats_collecting_params.max_size_to_preallocate_for_aggregation, diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 3edd75e4addf..487d0d464aa9 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -1207,7 +1207,7 @@ class Aggregator final /// How many RAM were used to process the query before processing the first block. Int64 memory_usage_before_aggregation = 0; - Poco::Logger * log = &Poco::Logger::get("Aggregator"); + LoggerPtr log = getLogger("Aggregator"); /// For external aggregation. TemporaryDataOnDiskPtr tmp_data; diff --git a/src/Interpreters/AsynchronousInsertQueue.cpp b/src/Interpreters/AsynchronousInsertQueue.cpp index 2dd2409442ee..94fb61976bd6 100644 --- a/src/Interpreters/AsynchronousInsertQueue.cpp +++ b/src/Interpreters/AsynchronousInsertQueue.cpp @@ -376,7 +376,7 @@ try SCOPE_EXIT(CurrentMetrics::sub(CurrentMetrics::PendingAsyncInsert, data->entries.size())); - const auto * log = &Poco::Logger::get("AsynchronousInsertQueue"); + const auto log = getLogger("AsynchronousInsertQueue"); const auto & insert_query = assert_cast(*key.query); auto insert_context = Context::createCopy(global_context); diff --git a/src/Interpreters/AsynchronousInsertQueue.h b/src/Interpreters/AsynchronousInsertQueue.h index 23a2860364d8..1d96555ff37a 100644 --- a/src/Interpreters/AsynchronousInsertQueue.h +++ b/src/Interpreters/AsynchronousInsertQueue.h @@ -130,7 +130,7 @@ class AsynchronousInsertQueue : public WithContext /// Uses async_insert_busy_timeout_ms and processBatchDeadlines() std::vector dump_by_first_update_threads; - Poco::Logger * log = &Poco::Logger::get("AsynchronousInsertQueue"); + LoggerPtr log = getLogger("AsynchronousInsertQueue"); void processBatchDeadlines(size_t shard_num); void scheduleDataProcessingJob(const InsertQuery & key, InsertDataPtr data, ContextPtr global_context); diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 825dc70f9c81..f2a6cb10c9a8 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -35,7 +35,7 @@ FileCache::FileCache( , enable_filesystem_query_cache_limit(cache_settings_.enable_filesystem_query_cache_limit) , enable_bypass_cache_with_threashold(cache_settings_.enable_bypass_cache_with_threashold) , bypass_cache_threashold(cache_settings_.bypass_cache_threashold) - , log(&Poco::Logger::get("FileCache")) + , log(getLogger("FileCache")) , main_priority(std::make_unique()) , stash_priority(std::make_unique()) , max_stash_element_size(cache_settings_.max_elements) diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index afafa39c4c62..171586bfeffa 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -145,7 +145,7 @@ using QueryContextPtr = std::shared_ptr; const size_t bypass_cache_threashold; mutable std::mutex mutex; - Poco::Logger * log; + LoggerPtr log; bool is_initialized = false; std::exception_ptr initialization_exception; diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index 8122d7a168e5..9f0599e15226 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -41,9 +41,9 @@ FileSegment::FileSegment( , file_key(key_) , cache(cache_) #ifndef NDEBUG - , log(&Poco::Logger::get(fmt::format("FileSegment({}) : {}", getHexUIntLowercase(key_), range().toString()))) + , log(getLogger(fmt::format("FileSegment({}) : {}", getHexUIntLowercase(key_), range().toString()))) #else - , log(&Poco::Logger::get("FileSegment")) + , log(getLogger("FileSegment")) #endif , segment_kind(settings.kind) , is_unbound(settings.unbounded) diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index c6d9f90cbeec..2734536d7964 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -330,7 +330,7 @@ friend class StorageSystemFilesystemCache; Key file_key; FileCache * cache; - Poco::Logger * log; + LoggerPtr log; /// "detached" file segment means that it is not owned by cache ("detached" from cache). /// In general case, all file segments are owned by cache. diff --git a/src/Interpreters/Cache/LRUFileCachePriority.h b/src/Interpreters/Cache/LRUFileCachePriority.h index 2345d3c47dbe..11e4c5a113fd 100644 --- a/src/Interpreters/Cache/LRUFileCachePriority.h +++ b/src/Interpreters/Cache/LRUFileCachePriority.h @@ -33,7 +33,7 @@ class LRUFileCachePriority : public IFileCachePriority private: LRUQueue queue; - Poco::Logger * log = &Poco::Logger::get("LRUFileCachePriority"); + LoggerPtr log = getLogger("LRUFileCachePriority"); }; class LRUFileCachePriority::LRUFileCacheIterator : public IFileCachePriority::IIterator diff --git a/src/Interpreters/Cache/QueryCache.cpp b/src/Interpreters/Cache/QueryCache.cpp index 4f50b689a313..1dbfe61edef1 100644 --- a/src/Interpreters/Cache/QueryCache.cpp +++ b/src/Interpreters/Cache/QueryCache.cpp @@ -178,7 +178,7 @@ QueryCache::Writer::Writer(Cache & cache_, const Key & key_, if (auto entry = cache.getWithKey(key); entry.has_value() && !IsStale()(entry->key)) { skip_insert = true; /// Key already contained in cache and did not expire yet --> don't replace it - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Skipped insert (non-stale entry found), query: {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Skipped insert (non-stale entry found), query: {}", key.queryStringFromAst()); } } @@ -200,7 +200,7 @@ void QueryCache::Writer::buffer(Chunk && partial_query_result) { chunks.clear(); /// eagerly free some space skip_insert = true; - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Skipped insert (query result too big), new_entry_size_in_bytes: {} ({}), new_entry_size_in_rows: {} ({}), query: {}", new_entry_size_in_bytes, max_entry_size_in_bytes, new_entry_size_in_rows, max_entry_size_in_rows, key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Skipped insert (query result too big), new_entry_size_in_bytes: {} ({}), new_entry_size_in_rows: {} ({}), query: {}", new_entry_size_in_bytes, max_entry_size_in_bytes, new_entry_size_in_rows, max_entry_size_in_rows, key.queryStringFromAst()); } } @@ -213,14 +213,14 @@ void QueryCache::Writer::finalizeWrite() if (std::chrono::duration_cast(std::chrono::system_clock::now() - query_start_time) < min_query_runtime) { - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Skipped insert (query not expensive enough), query: {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Skipped insert (query not expensive enough), query: {}", key.queryStringFromAst()); return; } if (auto entry = cache.getWithKey(key); entry.has_value() && !IsStale()(entry->key)) { /// same check as in ctor because a parallel Writer could have inserted the current key in the meantime - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Skipped insert (non-stale entry found), query: {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Skipped insert (non-stale entry found), query: {}", key.queryStringFromAst()); return; } @@ -233,24 +233,24 @@ QueryCache::Reader::Reader(Cache & cache_, const Key & key, const std::lock_guar if (!entry.has_value()) { - LOG_TRACE(&Poco::Logger::get("QueryCache"), "No entry found for query {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "No entry found for query {}", key.queryStringFromAst()); return; } if (entry->key.username.has_value() && entry->key.username != key.username) { - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Inaccessible entry found for query {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Inaccessible entry found for query {}", key.queryStringFromAst()); return; } if (IsStale()(entry->key)) { - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Stale entry found for query {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Stale entry found for query {}", key.queryStringFromAst()); return; } pipe = Pipe(std::make_shared(entry->key.header, entry->mapped)); - LOG_TRACE(&Poco::Logger::get("QueryCache"), "Entry found for query {}", key.queryStringFromAst()); + LOG_TRACE(getLogger("QueryCache"), "Entry found for query {}", key.queryStringFromAst()); } bool QueryCache::Reader::hasCacheEntryForKey() const diff --git a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp index a8dfeb214d05..249c0623b17b 100644 --- a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp +++ b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp @@ -45,7 +45,7 @@ void WriteBufferToFileSegment::nextImpl() } catch (...) { - LOG_WARNING(&Poco::Logger::get("WriteBufferToFileSegment"), "Failed to write to the underlying buffer ({})", file_segment->getInfoForLog()); + LOG_WARNING(getLogger("WriteBufferToFileSegment"), "Failed to write to the underlying buffer ({})", file_segment->getInfoForLog()); throw; } diff --git a/src/Interpreters/ClusterDiscovery.cpp b/src/Interpreters/ClusterDiscovery.cpp index 36b2f17e8a11..f48ba926b4a9 100644 --- a/src/Interpreters/ClusterDiscovery.cpp +++ b/src/Interpreters/ClusterDiscovery.cpp @@ -107,7 +107,7 @@ ClusterDiscovery::ClusterDiscovery( const String & config_prefix) : context(Context::createCopy(context_)) , current_node_name(toString(ServerUUID::get())) - , log(&Poco::Logger::get("ClusterDiscovery")) + , log(getLogger("ClusterDiscovery")) { LOG_DEBUG(log, "Cluster discovery is enabled"); @@ -476,7 +476,7 @@ bool ClusterDiscovery::NodeInfo::parse(const String & data, NodeInfo & result) else { LOG_ERROR( - &Poco::Logger::get("ClusterDiscovery"), + getLogger("ClusterDiscovery"), "Unsupported version '{}' of data in zk node '{}'", ver, data.size() < 1024 ? data : "[data too long]"); } @@ -484,7 +484,7 @@ bool ClusterDiscovery::NodeInfo::parse(const String & data, NodeInfo & result) catch (Poco::Exception & e) { LOG_WARNING( - &Poco::Logger::get("ClusterDiscovery"), + getLogger("ClusterDiscovery"), "Can't parse '{}' from node: {}", data.size() < 1024 ? data : "[data too long]", e.displayText()); return false; diff --git a/src/Interpreters/ClusterDiscovery.h b/src/Interpreters/ClusterDiscovery.h index 1c7337406b7d..e37763b0d785 100644 --- a/src/Interpreters/ClusterDiscovery.h +++ b/src/Interpreters/ClusterDiscovery.h @@ -126,7 +126,7 @@ class ClusterDiscovery ThreadFromGlobalPool main_thread; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index 0cf3f3609944..029b3e57c67a 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -154,7 +154,7 @@ void SelectStreamFactory::createForShard( ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable); if (shard_info.hasRemoteConnections()) { - LOG_WARNING(&Poco::Logger::get("ClusterProxy::SelectStreamFactory"), + LOG_WARNING(getLogger("ClusterProxy::SelectStreamFactory"), "There is no table {} on local replica of shard {}, will try remote replicas.", main_table.getNameForLogs(), shard_info.shard_num); emplace_remote_stream(); @@ -192,7 +192,7 @@ void SelectStreamFactory::createForShard( /// If we reached this point, local replica is stale. ProfileEvents::increment(ProfileEvents::DistributedConnectionStaleReplica); - LOG_WARNING(&Poco::Logger::get("ClusterProxy::SelectStreamFactory"), "Local replica of shard {} is stale (delay: {}s.)", shard_info.shard_num, local_delay); + LOG_WARNING(getLogger("ClusterProxy::SelectStreamFactory"), "Local replica of shard {} is stale (delay: {}s.)", shard_info.shard_num, local_delay); if (!settings.fallback_to_stale_replicas_for_distributed_queries) { diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index 439033f89f26..fed9eda2af9d 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -35,7 +35,7 @@ namespace ErrorCodes namespace ClusterProxy { -ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr context, const Settings & settings, const StorageID & main_table, const SelectQueryInfo * query_info, Poco::Logger * log) +ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr context, const Settings & settings, const StorageID & main_table, const SelectQueryInfo * query_info, LoggerPtr log) { Settings new_settings = settings; new_settings.queue_max_wait_ms = Cluster::saturate(new_settings.queue_max_wait_ms, settings.max_execution_time); @@ -155,7 +155,7 @@ void executeQuery( QueryProcessingStage::Enum processed_stage, const StorageID & main_table, const ASTPtr & table_func_ptr, - SelectStreamFactory & stream_factory, Poco::Logger * log, + SelectStreamFactory & stream_factory, LoggerPtr log, const ASTPtr & query_ast, ContextPtr context, const SelectQueryInfo & query_info, const ExpressionActionsPtr & sharding_key_expr, const std::string & sharding_key_column_name, @@ -325,7 +325,7 @@ void executeQueryWithParallelReplicas( getThrottler(new_context), std::move(scalars), std::move(external_tables), - &Poco::Logger::get("ReadFromParallelRemoteReplicasStep"), + getLogger("ReadFromParallelRemoteReplicasStep"), query_info.storage_limits, parallel_group_id); diff --git a/src/Interpreters/ClusterProxy/executeQuery.h b/src/Interpreters/ClusterProxy/executeQuery.h index 41f6da55686b..2e18575b2982 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.h +++ b/src/Interpreters/ClusterProxy/executeQuery.h @@ -35,7 +35,7 @@ class SelectStreamFactory; /// /// @return new Context with adjusted settings ContextMutablePtr updateSettingsForCluster( - const Cluster & cluster, ContextPtr context, const Settings & settings, const StorageID & main_table, const SelectQueryInfo * query_info = nullptr, Poco::Logger * log = nullptr); + const Cluster & cluster, ContextPtr context, const Settings & settings, const StorageID & main_table, const SelectQueryInfo * query_info = nullptr, LoggerPtr log = nullptr); using AdditionalShardFilterGenerator = std::function; /// Execute a distributed query, creating a query plan, from which the query pipeline can be built. @@ -47,7 +47,7 @@ void executeQuery( QueryProcessingStage::Enum processed_stage, const StorageID & main_table, const ASTPtr & table_func_ptr, - SelectStreamFactory & stream_factory, Poco::Logger * log, + SelectStreamFactory & stream_factory, LoggerPtr log, const ASTPtr & query_ast, ContextPtr context, const SelectQueryInfo & query_info, const ExpressionActionsPtr & sharding_key_expr, const std::string & sharding_key_column_name, diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 04aa7b48d1ad..6bd5b7b547b6 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -180,7 +180,7 @@ namespace ErrorCodes */ struct ContextSharedPart : boost::noncopyable { - Poco::Logger * log = &Poco::Logger::get("Context"); + LoggerPtr log = getLogger("Context"); /// For access of most of shared objects. Recursive mutex. mutable std::recursive_mutex mutex; @@ -788,7 +788,7 @@ void Context::setPath(const String & path) shared->user_scripts_path = shared->path + "user_scripts/"; } -static void setupTmpPath(Poco::Logger * log, const std::string & path) +static void setupTmpPath(LoggerPtr log, const std::string & path) try { LOG_DEBUG(log, "Setting up {} to store temporary data in it", path); @@ -3366,7 +3366,7 @@ void Context::setDefaultProfiles(const Poco::Util::AbstractConfiguration & confi shared->system_profile_name = config.getString("system_profile", shared->default_profile_name); setCurrentProfile(shared->system_profile_name); - applySettingsQuirks(settings, &Poco::Logger::get("SettingsQuirks")); + applySettingsQuirks(settings, getLogger("SettingsQuirks")); shared->buffer_profile_name = config.getString("buffer_profile", shared->system_profile_name); buffer_context = Context::createCopy(shared_from_this()); diff --git a/src/Interpreters/CrossToInnerJoinVisitor.cpp b/src/Interpreters/CrossToInnerJoinVisitor.cpp index 8755daa1dc88..a327f067a6de 100644 --- a/src/Interpreters/CrossToInnerJoinVisitor.cpp +++ b/src/Interpreters/CrossToInnerJoinVisitor.cpp @@ -254,7 +254,7 @@ void CrossToInnerJoinMatcher::visit(ASTSelectQuery & select, ASTPtr &, Data & da ASTPtr on_expr = makeOnExpression(expr_it->second); if (rewritten = joined.rewriteCrossToInner(on_expr); rewritten) { - LOG_DEBUG(&Poco::Logger::get("CrossToInnerJoin"), "Rewritten '{}' to '{}'", query_before, queryToString(*joined.tableJoin())); + LOG_DEBUG(getLogger("CrossToInnerJoin"), "Rewritten '{}' to '{}'", query_before, queryToString(*joined.tableJoin())); } } diff --git a/src/Interpreters/DDLTask.cpp b/src/Interpreters/DDLTask.cpp index 799f1b0b4f42..7fc91026cae0 100644 --- a/src/Interpreters/DDLTask.cpp +++ b/src/Interpreters/DDLTask.cpp @@ -190,7 +190,7 @@ ContextMutablePtr DDLTaskBase::makeQueryContext(ContextPtr from_context, const Z } -bool DDLTask::findCurrentHostID(ContextPtr global_context, Poco::Logger * log) +bool DDLTask::findCurrentHostID(ContextPtr global_context, LoggerPtr log) { bool host_in_hostlist = false; std::exception_ptr first_exception = nullptr; @@ -244,7 +244,7 @@ bool DDLTask::findCurrentHostID(ContextPtr global_context, Poco::Logger * log) return host_in_hostlist; } -void DDLTask::setClusterInfo(ContextPtr context, Poco::Logger * log) +void DDLTask::setClusterInfo(ContextPtr context, LoggerPtr log) { auto * query_on_cluster = dynamic_cast(query.get()); if (!query_on_cluster) diff --git a/src/Interpreters/DDLTask.h b/src/Interpreters/DDLTask.h index 2043de6701e5..e2d92f146973 100644 --- a/src/Interpreters/DDLTask.h +++ b/src/Interpreters/DDLTask.h @@ -139,9 +139,9 @@ struct DDLTask : public DDLTaskBase { DDLTask(const String & name, const String & path) : DDLTaskBase(name, path) {} - bool findCurrentHostID(ContextPtr global_context, Poco::Logger * log); + bool findCurrentHostID(ContextPtr global_context, LoggerPtr log); - void setClusterInfo(ContextPtr context, Poco::Logger * log); + void setClusterInfo(ContextPtr context, LoggerPtr log); String getShardID() const override; diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index c4529af2c516..678e05dfcb9b 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -76,7 +76,7 @@ DDLWorker::DDLWorker( const CurrentMetrics::Metric * max_entry_metric_, const CurrentMetrics::Metric * max_pushed_entry_metric_) : context(Context::createCopy(context_)) - , log(&Poco::Logger::get(logger_name)) + , log(getLogger(logger_name)) , pool_size(pool_size_) , max_entry_metric(max_entry_metric_) , max_pushed_entry_metric(max_pushed_entry_metric_) diff --git a/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h index 6cf034edae8c..34f0bab895ac 100644 --- a/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -122,7 +122,7 @@ class DDLWorker void runCleanupThread(); ContextMutablePtr context; - Poco::Logger * log; + LoggerPtr log; std::string host_fqdn; /// current host domain name std::string host_fqdn_id; /// host_name:port diff --git a/src/Interpreters/DNSCacheUpdater.cpp b/src/Interpreters/DNSCacheUpdater.cpp index bf88d19b7ef7..6a398e08cdf4 100644 --- a/src/Interpreters/DNSCacheUpdater.cpp +++ b/src/Interpreters/DNSCacheUpdater.cpp @@ -23,7 +23,7 @@ void DNSCacheUpdater::run() /// Reload cluster config if IP of any host has been changed since last update. if (resolver.updateCache(max_consecutive_failures)) { - LOG_INFO(&Poco::Logger::get("DNSCacheUpdater"), "IPs of some hosts have been changed. Will reload cluster config."); + LOG_INFO(getLogger("DNSCacheUpdater"), "IPs of some hosts have been changed. Will reload cluster config."); try { getContext()->reloadClusterConfig(); @@ -44,7 +44,7 @@ void DNSCacheUpdater::run() void DNSCacheUpdater::start() { - LOG_INFO(&Poco::Logger::get("DNSCacheUpdater"), "Update period {} seconds", update_period_seconds); + LOG_INFO(getLogger("DNSCacheUpdater"), "Update period {} seconds", update_period_seconds); task_handle->activateAndSchedule(); } diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 634d87280acf..5e9805b89ecc 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -685,7 +685,7 @@ DatabaseCatalog::DatabaseCatalog(ContextMutablePtr global_context_) , referential_dependencies{"ReferentialDeps"} , loading_dependencies{"LoadingDeps"} , view_dependencies{"ViewDeps"} - , log(&Poco::Logger::get("DatabaseCatalog")) + , log(getLogger("DatabaseCatalog")) { } diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 3cd5529aefc9..5d0f48309996 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -303,7 +303,7 @@ class DatabaseCatalog : boost::noncopyable, WithMutableContext /// View dependencies between a source table and its view. TablesDependencyGraph view_dependencies TSA_GUARDED_BY(databases_mutex); - Poco::Logger * log; + LoggerPtr log; std::atomic_bool is_shutting_down = false; diff --git a/src/Interpreters/DirectJoin.cpp b/src/Interpreters/DirectJoin.cpp index e148db1d8e6e..47d1b322ab6b 100644 --- a/src/Interpreters/DirectJoin.cpp +++ b/src/Interpreters/DirectJoin.cpp @@ -67,7 +67,7 @@ DirectKeyValueJoin::DirectKeyValueJoin(std::shared_ptr table_join_, : table_join(table_join_) , storage(storage_) , right_sample_block(right_sample_block_) - , log(&Poco::Logger::get("DirectKeyValueJoin")) + , log(getLogger("DirectKeyValueJoin")) { if (!table_join->oneDisjunct() || table_join->getOnlyClause().key_names_left.size() != 1 || diff --git a/src/Interpreters/DirectJoin.h b/src/Interpreters/DirectJoin.h index bdbd155dc366..0f4c9fe7605c 100644 --- a/src/Interpreters/DirectJoin.h +++ b/src/Interpreters/DirectJoin.h @@ -60,7 +60,7 @@ class DirectKeyValueJoin : public IJoin Block right_sample_block; Block right_sample_block_with_storage_column_names; Block sample_block_with_columns_to_add; - Poco::Logger * log; + LoggerPtr log; }; diff --git a/src/Interpreters/EmbeddedDictionaries.cpp b/src/Interpreters/EmbeddedDictionaries.cpp index 6c0ccce66b57..1435d16cb073 100644 --- a/src/Interpreters/EmbeddedDictionaries.cpp +++ b/src/Interpreters/EmbeddedDictionaries.cpp @@ -125,7 +125,7 @@ EmbeddedDictionaries::EmbeddedDictionaries( ContextPtr context_, const bool throw_on_error) : WithContext(context_) - , log(&Poco::Logger::get("EmbeddedDictionaries")) + , log(getLogger("EmbeddedDictionaries")) , geo_dictionaries_loader(std::move(geo_dictionaries_loader_)) , reload_period(getContext()->getConfigRef().getInt("builtin_dictionaries_reload_interval", 3600)) { diff --git a/src/Interpreters/EmbeddedDictionaries.h b/src/Interpreters/EmbeddedDictionaries.h index 674b3a7f01eb..b1cc6d51707f 100644 --- a/src/Interpreters/EmbeddedDictionaries.h +++ b/src/Interpreters/EmbeddedDictionaries.h @@ -25,7 +25,7 @@ namespace DB class EmbeddedDictionaries : WithContext { private: - Poco::Logger * log; + LoggerPtr log; MultiVersion regions_hierarchies; MultiVersion regions_names; diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 73004f29b59e..66e560bf26b2 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -116,7 +116,7 @@ bool allowEarlyConstantFolding(const ActionsDAG & actions, const Settings & sett return true; } -Poco::Logger * getLogger() { return &Poco::Logger::get("ExpressionAnalyzer"); } +LoggerPtr getLogger() { return ::getLogger("ExpressionAnalyzer"); } } diff --git a/src/Interpreters/ExpressionJIT.cpp b/src/Interpreters/ExpressionJIT.cpp index dfc88e970528..103d715bf225 100644 --- a/src/Interpreters/ExpressionJIT.cpp +++ b/src/Interpreters/ExpressionJIT.cpp @@ -38,10 +38,9 @@ static CHJIT & getJITInstance() return jit; } -static Poco::Logger * getLogger() +static LoggerPtr getLogger() { - static Poco::Logger & logger = Poco::Logger::get("ExpressionJIT"); - return &logger; + return ::getLogger("ExpressionJIT"); } class CompiledFunctionHolder : public CompiledExpressionCacheEntry diff --git a/src/Interpreters/ExternalDictionariesLoader.cpp b/src/Interpreters/ExternalDictionariesLoader.cpp index 080878c7d869..0ff0dc436878 100644 --- a/src/Interpreters/ExternalDictionariesLoader.cpp +++ b/src/Interpreters/ExternalDictionariesLoader.cpp @@ -22,7 +22,7 @@ namespace ErrorCodes /// Must not acquire Context lock in constructor to avoid possibility of deadlocks. ExternalDictionariesLoader::ExternalDictionariesLoader(ContextPtr global_context_) - : ExternalLoader("external dictionary", &Poco::Logger::get("ExternalDictionariesLoader")) + : ExternalLoader("external dictionary", getLogger("ExternalDictionariesLoader")) , WithContext(global_context_) { setConfigSettings({"dictionary", "name", "database", "uuid"}); diff --git a/src/Interpreters/ExternalLoader.cpp b/src/Interpreters/ExternalLoader.cpp index 04a116ec0c73..b7c441a8d437 100644 --- a/src/Interpreters/ExternalLoader.cpp +++ b/src/Interpreters/ExternalLoader.cpp @@ -101,7 +101,7 @@ namespace class ExternalLoader::LoadablesConfigReader : private boost::noncopyable { public: - LoadablesConfigReader(const String & type_name_, Poco::Logger * log_) + LoadablesConfigReader(const String & type_name_, LoggerPtr log_) : type_name(type_name_), log(log_) { } @@ -383,7 +383,7 @@ class ExternalLoader::LoadablesConfigReader : private boost::noncopyable } const String type_name; - Poco::Logger * log; + LoggerPtr log; std::mutex mutex; ExternalLoaderConfigSettings settings; @@ -407,7 +407,7 @@ class ExternalLoader::LoadingDispatcher : private boost::noncopyable LoadingDispatcher( const CreateObjectFunction & create_object_function_, const String & type_name_, - Poco::Logger * log_) + LoggerPtr log_) : create_object(create_object_function_) , type_name(type_name_) , log(log_) @@ -1197,7 +1197,7 @@ class ExternalLoader::LoadingDispatcher : private boost::noncopyable const CreateObjectFunction create_object; const String type_name; - Poco::Logger * log; + LoggerPtr log; mutable std::mutex mutex; std::condition_variable event; @@ -1277,7 +1277,7 @@ class ExternalLoader::PeriodicUpdater : private boost::noncopyable }; -ExternalLoader::ExternalLoader(const String & type_name_, Poco::Logger * log_) +ExternalLoader::ExternalLoader(const String & type_name_, LoggerPtr log_) : config_files_reader(std::make_unique(type_name_, log_)) , loading_dispatcher(std::make_unique( [this](auto && a, auto && b, auto && c) { return createObject(a, b, c); }, diff --git a/src/Interpreters/ExternalLoader.h b/src/Interpreters/ExternalLoader.h index 3ce7c40ef036..4d16e409f822 100644 --- a/src/Interpreters/ExternalLoader.h +++ b/src/Interpreters/ExternalLoader.h @@ -83,7 +83,7 @@ class ExternalLoader template static constexpr bool is_vector_load_result_type = std::is_same_v || std::is_same_v; - ExternalLoader(const String & type_name_, Poco::Logger * log); + ExternalLoader(const String & type_name_, LoggerPtr log); virtual ~ExternalLoader(); /// Adds a repository which will be used to read configurations from. @@ -229,7 +229,7 @@ class ExternalLoader std::unique_ptr periodic_updater; const String type_name; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Interpreters/FullSortingMergeJoin.h b/src/Interpreters/FullSortingMergeJoin.h index a94d7a7dfc65..1257799cd713 100644 --- a/src/Interpreters/FullSortingMergeJoin.h +++ b/src/Interpreters/FullSortingMergeJoin.h @@ -24,7 +24,7 @@ class FullSortingMergeJoin : public IJoin : table_join(table_join_) , right_sample_block(right_sample_block_) { - LOG_TRACE(&Poco::Logger::get("FullSortingMergeJoin"), "Will use full sorting merge join"); + LOG_TRACE(getLogger("FullSortingMergeJoin"), "Will use full sorting merge join"); } const TableJoin & getTableJoin() const override { return *table_join; } diff --git a/src/Interpreters/GraceHashJoin.cpp b/src/Interpreters/GraceHashJoin.cpp index 7795061072c5..b85173ca2bb3 100644 --- a/src/Interpreters/GraceHashJoin.cpp +++ b/src/Interpreters/GraceHashJoin.cpp @@ -121,7 +121,7 @@ class GraceHashJoin::FileBucket : boost::noncopyable public: using BucketLock = std::unique_lock; - explicit FileBucket(size_t bucket_index_, TemporaryFileStream & left_file_, TemporaryFileStream & right_file_, Poco::Logger * log_) + explicit FileBucket(size_t bucket_index_, TemporaryFileStream & left_file_, TemporaryFileStream & right_file_, LoggerPtr log_) : idx{bucket_index_} , left_file{left_file_} , right_file{right_file_} @@ -223,7 +223,7 @@ class GraceHashJoin::FileBucket : boost::noncopyable std::atomic state; - Poco::Logger * log; + LoggerPtr log; }; namespace @@ -261,7 +261,7 @@ GraceHashJoin::GraceHashJoin( const Block & right_sample_block_, TemporaryDataOnDiskScopePtr tmp_data_, bool any_take_last_row_) - : log{&Poco::Logger::get("GraceHashJoin")} + : log{getLogger("GraceHashJoin")} , context{context_} , table_join{std::move(table_join_)} , left_sample_block{left_sample_block_} diff --git a/src/Interpreters/GraceHashJoin.h b/src/Interpreters/GraceHashJoin.h index b8d83f4cad0b..38729e2478fb 100644 --- a/src/Interpreters/GraceHashJoin.h +++ b/src/Interpreters/GraceHashJoin.h @@ -120,7 +120,7 @@ class GraceHashJoin final : public IJoin /// Structure block to store in the HashJoin according to sample_block. Block prepareRightBlock(const Block & block); - Poco::Logger * log; + LoggerPtr log; ContextPtr context; std::shared_ptr table_join; Block left_sample_block; diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index 697940445ea6..032a02481af8 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -219,7 +219,7 @@ HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_s , asof_inequality(table_join->getAsofInequality()) , data(std::make_shared()) , right_sample_block(right_sample_block_) - , log(&Poco::Logger::get("HashJoin")) + , log(getLogger("HashJoin")) { LOG_DEBUG(log, "({}) Datatype: {}, kind: {}, strictness: {}, right header: {}", fmt::ptr(this), data->type, kind, strictness, right_sample_block.dumpStructure()); LOG_DEBUG(log, "({}) Keys: {}", fmt::ptr(this), TableJoin::formatClauses(table_join->getClauses(), true)); diff --git a/src/Interpreters/HashJoin.h b/src/Interpreters/HashJoin.h index b29b6e617c82..b65aed781784 100644 --- a/src/Interpreters/HashJoin.h +++ b/src/Interpreters/HashJoin.h @@ -404,7 +404,7 @@ class HashJoin : public IJoin /// Left table column names that are sources for required_right_keys columns std::vector required_right_keys_sources; - Poco::Logger * log; + LoggerPtr log; /// Should be set via setLock to protect hash table from modification from StorageJoin /// If set HashJoin instance is not available for modification (addJoinedBlock) diff --git a/src/Interpreters/InternalTextLogsQueue.cpp b/src/Interpreters/InternalTextLogsQueue.cpp index 3be58a11beba..ca8461937ac3 100644 --- a/src/Interpreters/InternalTextLogsQueue.cpp +++ b/src/Interpreters/InternalTextLogsQueue.cpp @@ -43,7 +43,7 @@ void InternalTextLogsQueue::pushBlock(Block && log_block) if (blocksHaveEqualStructure(sample_block, log_block)) (void)(emplace(log_block.mutateColumns())); else - LOG_WARNING(&Poco::Logger::get("InternalTextLogsQueue"), "Log block have different structure"); + LOG_WARNING(getLogger("InternalTextLogsQueue"), "Log block have different structure"); } std::string_view InternalTextLogsQueue::getPriorityName(int priority) diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 77179ad2f321..d4893447eee8 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1167,7 +1167,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) } else if (create.attach && !create.attach_short_syntax && getContext()->getClientInfo().query_kind != ClientInfo::QueryKind::SECONDARY_QUERY) { - auto * log = &Poco::Logger::get("InterpreterCreateQuery"); + auto log = getLogger("InterpreterCreateQuery"); LOG_WARNING(log, "ATTACH TABLE query with full table definition is not recommended: " "use either ATTACH TABLE {}; to attach existing table " "or CREATE TABLE {} ; to create new table " diff --git a/src/Interpreters/InterpreterKillQueryQuery.cpp b/src/Interpreters/InterpreterKillQueryQuery.cpp index 3330159aff51..0980e1bb3018 100644 --- a/src/Interpreters/InterpreterKillQueryQuery.cpp +++ b/src/Interpreters/InterpreterKillQueryQuery.cpp @@ -161,7 +161,7 @@ class SyncKillQuerySource : public ISource if (curr_process.processed) continue; - LOG_DEBUG(&Poco::Logger::get("KillQuery"), "Will kill query {} (synchronously)", curr_process.query_id); + LOG_DEBUG(getLogger("KillQuery"), "Will kill query {} (synchronously)", curr_process.query_id); auto code = process_list.sendCancelToQuery(curr_process.query_id, curr_process.user, true); @@ -229,7 +229,7 @@ BlockIO InterpreterKillQueryQuery::execute() for (const auto & query_desc : queries_to_stop) { if (!query.test) - LOG_DEBUG(&Poco::Logger::get("KillQuery"), "Will kill query {} (asynchronously)", query_desc.query_id); + LOG_DEBUG(getLogger("KillQuery"), "Will kill query {} (asynchronously)", query_desc.query_id); auto code = (query.test) ? CancellationCode::Unknown : process_list.sendCancelToQuery(query_desc.query_id, query_desc.user, true); insertResultRow(query_desc.source_num, code, processes_block, header, res_columns); } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index c034d41d3c39..b3fbadecebf7 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -373,7 +373,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( : IInterpreterUnionOrSelectQuery(options_.modify_inplace ? query_ptr_ : query_ptr_->clone(), context_, options_) , storage(storage_) , input_pipe(std::move(input_pipe_)) - , log(&Poco::Logger::get("InterpreterSelectQuery")) + , log(getLogger("InterpreterSelectQuery")) , metadata_snapshot(metadata_snapshot_) , prepared_sets(prepared_sets_) { diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h index 58fddb8ffe98..f0c9ebd1a2db 100644 --- a/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -239,7 +239,7 @@ class InterpreterSelectQuery : public IInterpreterUnionOrSelectQuery /// Used when we read from prepared input, not table or subquery. std::optional input_pipe; - Poco::Logger * log; + LoggerPtr log; StorageMetadataPtr metadata_snapshot; StorageSnapshotPtr storage_snapshot; diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index cd3e5579e120..6ccb48dbbfdd 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -201,7 +201,7 @@ void InterpreterSystemQuery::startStopAction(StorageActionBlockType action_type, void InterpreterSystemQuery::startStopActionInDatabase(StorageActionBlockType action_type, bool start, const String & database_name, const DatabasePtr & database, - const ContextPtr & local_context, Poco::Logger * log) + const ContextPtr & local_context, LoggerPtr log) { auto manager = local_context->getActionLocksManager(); auto access = local_context->getAccess(); @@ -231,7 +231,7 @@ void InterpreterSystemQuery::startStopActionInDatabase(StorageActionBlockType ac InterpreterSystemQuery::InterpreterSystemQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_) - : WithMutableContext(context_), query_ptr(query_ptr_->clone()), log(&Poco::Logger::get("InterpreterSystemQuery")) + : WithMutableContext(context_), query_ptr(query_ptr_->clone()), log(getLogger("InterpreterSystemQuery")) { } diff --git a/src/Interpreters/InterpreterSystemQuery.h b/src/Interpreters/InterpreterSystemQuery.h index 8a1cdaf8edd9..87ab2ad84fcc 100644 --- a/src/Interpreters/InterpreterSystemQuery.h +++ b/src/Interpreters/InterpreterSystemQuery.h @@ -42,11 +42,11 @@ class InterpreterSystemQuery : public IInterpreter, WithMutableContext static void startStopActionInDatabase(StorageActionBlockType action_type, bool start, const String & database_name, const DatabasePtr & database, - const ContextPtr & local_context, Poco::Logger * log); + const ContextPtr & local_context, LoggerPtr log); private: ASTPtr query_ptr; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; StorageID table_id = StorageID::createEmpty(); /// Will be set up if query contains table name VolumePtr volume_ptr; diff --git a/src/Interpreters/InterserverCredentials.cpp b/src/Interpreters/InterserverCredentials.cpp index 094b58789a8d..c344732a2620 100644 --- a/src/Interpreters/InterserverCredentials.cpp +++ b/src/Interpreters/InterserverCredentials.cpp @@ -35,7 +35,7 @@ InterserverCredentials::CurrentCredentials InterserverCredentials::parseCredenti const Poco::Util::AbstractConfiguration & config, const std::string & root_tag) { - auto * log = &Poco::Logger::get("InterserverCredentials"); + auto log = getLogger("InterserverCredentials"); CurrentCredentials store; store.emplace_back(current_user_, current_password_); if (config.getBool(root_tag + ".allow_empty", false)) diff --git a/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp index 80b2fe5302c4..796251c84e48 100644 --- a/src/Interpreters/JoinedTables.cpp +++ b/src/Interpreters/JoinedTables.cpp @@ -334,7 +334,7 @@ std::shared_ptr JoinedTables::makeTableJoin(const ASTSelectQuery & se auto dictionary = dictionary_helper.getDictionary(dictionary_name); if (!dictionary) { - LOG_TRACE(&Poco::Logger::get("JoinedTables"), "Can't use dictionary join: dictionary '{}' was not found", dictionary_name); + LOG_TRACE(getLogger("JoinedTables"), "Can't use dictionary join: dictionary '{}' was not found", dictionary_name); return nullptr; } diff --git a/src/Interpreters/MergeJoin.cpp b/src/Interpreters/MergeJoin.cpp index a5ab6b25d02e..0001754f6552 100644 --- a/src/Interpreters/MergeJoin.cpp +++ b/src/Interpreters/MergeJoin.cpp @@ -479,7 +479,7 @@ MergeJoin::MergeJoin(std::shared_ptr table_join_, const Block & right , max_joined_block_rows(table_join->maxJoinedBlockRows()) , max_rows_in_right_block(table_join->maxRowsInRightBlock()) , max_files_to_merge(table_join->maxFilesToMerge()) - , log(&Poco::Logger::get("MergeJoin")) + , log(getLogger("MergeJoin")) { switch (table_join->strictness()) { diff --git a/src/Interpreters/MergeJoin.h b/src/Interpreters/MergeJoin.h index 8b5d884a0e6a..445c63335833 100644 --- a/src/Interpreters/MergeJoin.h +++ b/src/Interpreters/MergeJoin.h @@ -116,7 +116,7 @@ class MergeJoin : public IJoin Names lowcard_right_keys; - Poco::Logger * log; + LoggerPtr log; void changeLeftColumns(Block & block, MutableColumns && columns) const; void addRightColumns(Block & block, MutableColumns && columns); diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index ed875c534129..a678deadfa2f 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -1144,7 +1144,7 @@ void MutationsInterpreter::Source::read( createMergeTreeSequentialSource( plan, *data, storage_snapshot, part, std::move(virtual_columns.columns_to_read), apply_deleted_mask_, filter, context_, - &Poco::Logger::get("MutationsInterpreter")); + getLogger("MutationsInterpreter")); virtual_columns.addVirtuals(plan); } diff --git a/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp index cf231cbd7c9a..d14d9144b3d8 100644 --- a/src/Interpreters/PartLog.cpp +++ b/src/Interpreters/PartLog.cpp @@ -246,7 +246,7 @@ bool PartLog::addNewParts( } catch (...) { - tryLogCurrentException(part_log ? part_log->log : &Poco::Logger::get("PartLog"), __PRETTY_FUNCTION__); + tryLogCurrentException(part_log ? part_log->log : getLogger("PartLog"), __PRETTY_FUNCTION__); return false; } diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index d66d4bdea644..16b5457c3b86 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -86,7 +86,7 @@ ProcessList::insert(const String & query_, const IAST * ast, ContextMutablePtr q if (!is_unlimited_query && max_size && processes.size() >= max_size) { if (queue_max_wait_ms) - LOG_WARNING(&Poco::Logger::get("ProcessList"), "Too many simultaneous queries, will wait {} ms.", queue_max_wait_ms); + LOG_WARNING(getLogger("ProcessList"), "Too many simultaneous queries, will wait {} ms.", queue_max_wait_ms); if (!queue_max_wait_ms || !have_space.wait_for(lock, std::chrono::milliseconds(queue_max_wait_ms), [&]{ return processes.size() < max_size; })) throw Exception(ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES, "Too many simultaneous queries. Maximum: {}", max_size); } @@ -292,7 +292,7 @@ ProcessListEntry::~ProcessListEntry() auto user_process_list_it = parent.user_to_queries.find(user); if (user_process_list_it == parent.user_to_queries.end()) { - LOG_ERROR(&Poco::Logger::get("ProcessList"), "Logical error: cannot find user in ProcessList"); + LOG_ERROR(getLogger("ProcessList"), "Logical error: cannot find user in ProcessList"); std::terminate(); } @@ -317,7 +317,7 @@ ProcessListEntry::~ProcessListEntry() if (!found) { - LOG_ERROR(&Poco::Logger::get("ProcessList"), "Logical error: cannot find query by query_id and pointer to ProcessListElement in ProcessListForUser"); + LOG_ERROR(getLogger("ProcessList"), "Logical error: cannot find query by query_id and pointer to ProcessListElement in ProcessListForUser"); std::terminate(); } diff --git a/src/Interpreters/Session.cpp b/src/Interpreters/Session.cpp index f0bb339e278c..f81411887ff7 100644 --- a/src/Interpreters/Session.cpp +++ b/src/Interpreters/Session.cpp @@ -265,7 +265,7 @@ class NamedSessionsStorage ThreadFromGlobalPool thread; bool quit = false; - Poco::Logger * log = &Poco::Logger::get("NamedSessionsStorage"); + LoggerPtr log = getLogger("NamedSessionsStorage"); }; @@ -282,7 +282,7 @@ void Session::shutdownNamedSessions() Session::Session(const ContextPtr & global_context_, ClientInfo::Interface interface_, bool is_secure, const std::string & certificate) : auth_id(UUIDHelpers::generateV4()), global_context(global_context_), - log(&Poco::Logger::get(String{magic_enum::enum_name(interface_)} + "-Session")) + log(getLogger(String{magic_enum::enum_name(interface_)} + "-Session")) { prepared_client_info.emplace(); prepared_client_info->interface = interface_; diff --git a/src/Interpreters/Session.h b/src/Interpreters/Session.h index 443867806d61..0072b420488b 100644 --- a/src/Interpreters/Session.h +++ b/src/Interpreters/Session.h @@ -100,7 +100,7 @@ class Session std::shared_ptr named_session; bool named_session_created = false; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; }; } diff --git a/src/Interpreters/Set.h b/src/Interpreters/Set.h index 00eff614c7cf..1259d6e9fecf 100644 --- a/src/Interpreters/Set.h +++ b/src/Interpreters/Set.h @@ -32,7 +32,7 @@ class Set /// store all set elements in explicit form. /// This is needed for subsequent use for index. Set(const SizeLimits & limits_, bool fill_set_elements_, bool transform_null_in_) - : log(&Poco::Logger::get("Set")), + : log(getLogger("Set")), limits(limits_), fill_set_elements(fill_set_elements_), transform_null_in(transform_null_in_) { } @@ -103,7 +103,7 @@ class Set /// Types for set_elements. DataTypes set_elements_types; - Poco::Logger * log; + LoggerPtr log; /// Limitations on the maximum size of the set SizeLimits limits; diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 78513920236a..866fe09d5e76 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -115,13 +115,13 @@ std::shared_ptr createSystemLog( { if (!config.has(config_prefix)) { - LOG_DEBUG(&Poco::Logger::get("SystemLog"), + LOG_DEBUG(getLogger("SystemLog"), "Not creating {}.{} since corresponding section '{}' is missing from config", default_database_name, default_table_name, config_prefix); return {}; } - LOG_DEBUG(&Poco::Logger::get("SystemLog"), + LOG_DEBUG(getLogger("SystemLog"), "Creating {}.{} from {}", default_database_name, default_table_name, config_prefix); String database = config.getString(config_prefix + ".database", default_database_name); @@ -130,7 +130,7 @@ std::shared_ptr createSystemLog( if (database != default_database_name) { /// System tables must be loaded before other tables, but loading order is undefined for all databases except `system` - LOG_ERROR(&Poco::Logger::get("SystemLog"), "Custom database name for a system table specified in config." + LOG_ERROR(getLogger("SystemLog"), "Custom database name for a system table specified in config." " Table `{}` will be created in `system` database instead of `{}`", table, database); database = default_database_name; } @@ -310,7 +310,7 @@ SystemLog::SystemLog( , flush_interval_milliseconds(flush_interval_milliseconds_) { assert(database_name_ == DatabaseCatalog::SYSTEM_DATABASE); - log = &Poco::Logger::get("SystemLog (" + database_name_ + "." + table_name_ + ")"); + log = getLogger("SystemLog (" + database_name_ + "." + table_name_ + ")"); } template diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index dd525f9ab21d..107a8f0da5bc 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -453,12 +453,12 @@ TableJoin::createConvertingActions( for (const auto & col : dag->getResultColumns()) output_cols.push_back(col.name + ": " + col.type->getName()); - LOG_DEBUG(&Poco::Logger::get("TableJoin"), "{} JOIN converting actions: [{}] -> [{}]", + LOG_DEBUG(getLogger("TableJoin"), "{} JOIN converting actions: [{}] -> [{}]", side, fmt::join(input_cols, ", "), fmt::join(output_cols, ", ")); } else { - LOG_DEBUG(&Poco::Logger::get("TableJoin"), "{} JOIN converting actions: empty", side); + LOG_DEBUG(getLogger("TableJoin"), "{} JOIN converting actions: empty", side); return; } }; @@ -547,7 +547,7 @@ void TableJoin::inferJoinKeyCommonType(const LeftNamesAndTypes & left, const Rig if (!left_type_map.empty() || !right_type_map.empty()) { LOG_TRACE( - &Poco::Logger::get("TableJoin"), + getLogger("TableJoin"), "Infer supertype for joined columns. Left: [{}], Right: [{}]", formatTypeMap(left_type_map, left_types), formatTypeMap(right_type_map, right_types)); @@ -703,7 +703,7 @@ static void addJoinConditionWithAnd(ASTPtr & current_cond, const ASTPtr & new_co void TableJoin::addJoinCondition(const ASTPtr & ast, bool is_left) { auto & cond_ast = is_left ? clauses.back().on_filter_condition_left : clauses.back().on_filter_condition_right; - LOG_TRACE(&Poco::Logger::get("TableJoin"), "Adding join condition for {} table: {} -> {}", + LOG_TRACE(getLogger("TableJoin"), "Adding join condition for {} table: {} -> {}", (is_left ? "left" : "right"), ast ? queryToString(ast) : "NULL", cond_ast ? queryToString(cond_ast) : "NULL"); addJoinConditionWithAnd(cond_ast, ast); } diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index c57de88d964c..ddb44bf4cac4 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -124,7 +124,7 @@ struct TemporaryFileStream::OutputWriter , out_compressed_buf(*out_buf) , out_writer(out_compressed_buf, DBMS_TCP_PROTOCOL_VERSION, header_) { - LOG_TEST(&Poco::Logger::get("TemporaryFileStream"), "Writing to temporary file {}", path); + LOG_TEST(getLogger("TemporaryFileStream"), "Writing to temporary file {}", path); } OutputWriter(std::unique_ptr out_buf_, const Block & header_) @@ -132,7 +132,7 @@ struct TemporaryFileStream::OutputWriter , out_compressed_buf(*out_buf) , out_writer(out_compressed_buf, DBMS_TCP_PROTOCOL_VERSION, header_) { - LOG_TEST(&Poco::Logger::get("TemporaryFileStream"), + LOG_TEST(getLogger("TemporaryFileStream"), "Writing to temporary file {}", static_cast(out_buf.get())->getFileName()); } @@ -198,7 +198,7 @@ struct TemporaryFileStream::InputReader , in_compressed_buf(in_file_buf) , in_reader(in_compressed_buf, header_, DBMS_TCP_PROTOCOL_VERSION) { - LOG_TEST(&Poco::Logger::get("TemporaryFileStream"), "Reading {} from {}", header_.dumpStructure(), path); + LOG_TEST(getLogger("TemporaryFileStream"), "Reading {} from {}", header_.dumpStructure(), path); } explicit InputReader(const String & path) @@ -206,7 +206,7 @@ struct TemporaryFileStream::InputReader , in_compressed_buf(in_file_buf) , in_reader(in_compressed_buf, DBMS_TCP_PROTOCOL_VERSION) { - LOG_TEST(&Poco::Logger::get("TemporaryFileStream"), "Reading from {}", path); + LOG_TEST(getLogger("TemporaryFileStream"), "Reading from {}", path); } Block read() diff --git a/src/Interpreters/TraceCollector.cpp b/src/Interpreters/TraceCollector.cpp index 49588d490f5e..919bce6cbbec 100644 --- a/src/Interpreters/TraceCollector.cpp +++ b/src/Interpreters/TraceCollector.cpp @@ -35,7 +35,7 @@ TraceCollector::~TraceCollector() try { if (!thread.joinable()) - LOG_ERROR(&Poco::Logger::get("TraceCollector"), "TraceCollector thread is malformed and cannot be joined"); + LOG_ERROR(getLogger("TraceCollector"), "TraceCollector thread is malformed and cannot be joined"); else stop(); diff --git a/src/Interpreters/TransactionLog.cpp b/src/Interpreters/TransactionLog.cpp index 6257e617d4ac..466bfc54dab9 100644 --- a/src/Interpreters/TransactionLog.cpp +++ b/src/Interpreters/TransactionLog.cpp @@ -21,7 +21,7 @@ namespace ErrorCodes extern const int UNKNOWN_STATUS_OF_TRANSACTION; } -static void tryWriteEventToSystemLog(Poco::Logger * log, ContextPtr context, +static void tryWriteEventToSystemLog(LoggerPtr log, ContextPtr context, TransactionsInfoLogElement::Type type, const TransactionID & tid, CSN csn = Tx::UnknownCSN) try { @@ -44,7 +44,7 @@ catch (...) TransactionLog::TransactionLog() : global_context(Context::getGlobalContextInstance()) - , log(&Poco::Logger::get("TransactionLog")) + , log(getLogger("TransactionLog")) , zookeeper_path(global_context->getConfigRef().getString("transaction_log.zookeeper_path", "/clickhouse/txn")) , zookeeper_path_log(zookeeper_path + "/log") , fault_probability_before_commit(global_context->getConfigRef().getDouble("transaction_log.fault_probability_before_commit", 0)) diff --git a/src/Interpreters/TransactionLog.h b/src/Interpreters/TransactionLog.h index 6e8777d85198..58847553dfda 100644 --- a/src/Interpreters/TransactionLog.h +++ b/src/Interpreters/TransactionLog.h @@ -154,7 +154,7 @@ class TransactionLog final : public SingletonHelper CSN getCSNImpl(const TIDHash & tid_hash, const std::atomic * failback_with_strict_load_csn = nullptr) const; const ContextPtr global_context; - Poco::Logger * const log; + LoggerPtr const log; /// The newest snapshot available for reading std::atomic latest_snapshot; diff --git a/src/Interpreters/TransactionVersionMetadata.cpp b/src/Interpreters/TransactionVersionMetadata.cpp index 01735a798b91..7bedca5d5c75 100644 --- a/src/Interpreters/TransactionVersionMetadata.cpp +++ b/src/Interpreters/TransactionVersionMetadata.cpp @@ -23,7 +23,7 @@ namespace ErrorCodes VersionMetadata::VersionMetadata() { /// It would be better to make it static, but static loggers do not work for some reason (initialization order?) - log = &Poco::Logger::get("VersionMetadata"); + log = getLogger("VersionMetadata"); } /// It can be used for introspection purposes only diff --git a/src/Interpreters/TransactionVersionMetadata.h b/src/Interpreters/TransactionVersionMetadata.h index 18ac445cc29d..4309975d195b 100644 --- a/src/Interpreters/TransactionVersionMetadata.h +++ b/src/Interpreters/TransactionVersionMetadata.h @@ -72,7 +72,7 @@ struct VersionMetadata String toString(bool one_line = true) const; - Poco::Logger * log; + LoggerPtr log; VersionMetadata(); }; diff --git a/src/Interpreters/TransactionsInfoLog.cpp b/src/Interpreters/TransactionsInfoLog.cpp index b62cd4672d84..b4c46045319b 100644 --- a/src/Interpreters/TransactionsInfoLog.cpp +++ b/src/Interpreters/TransactionsInfoLog.cpp @@ -88,7 +88,7 @@ void TransactionsInfoLogElement::appendToBlock(MutableColumns & columns) const } -void tryWriteEventToSystemLog(Poco::Logger * log, +void tryWriteEventToSystemLog(LoggerPtr log, TransactionsInfoLogElement::Type type, const TransactionID & tid, const TransactionInfoContext & context) try diff --git a/src/Interpreters/TransactionsInfoLog.h b/src/Interpreters/TransactionsInfoLog.h index fc3783b59163..d1a591669f82 100644 --- a/src/Interpreters/TransactionsInfoLog.h +++ b/src/Interpreters/TransactionsInfoLog.h @@ -53,7 +53,7 @@ class TransactionsInfoLog : public SystemLog }; -void tryWriteEventToSystemLog(Poco::Logger * log, TransactionsInfoLogElement::Type type, +void tryWriteEventToSystemLog(LoggerPtr log, TransactionsInfoLogElement::Type type, const TransactionID & tid, const TransactionInfoContext & context); } diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index 70e9cace02f1..b35b97a0a804 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -592,13 +592,13 @@ bool tryJoinOnConst(TableJoin & analyzed_join, const ASTPtr & on_expression, Con if (eval_const_res.value()) { /// JOIN ON 1 == 1 - LOG_DEBUG(&Poco::Logger::get("TreeRewriter"), "Join on constant executed as cross join"); + LOG_DEBUG(getLogger("TreeRewriter"), "Join on constant executed as cross join"); analyzed_join.resetToCross(); } else { /// JOIN ON 1 != 1 - LOG_DEBUG(&Poco::Logger::get("TreeRewriter"), "Join on constant executed as empty join"); + LOG_DEBUG(getLogger("TreeRewriter"), "Join on constant executed as empty join"); analyzed_join.resetKeys(); } return true; diff --git a/src/Interpreters/executeDDLQueryOnCluster.cpp b/src/Interpreters/executeDDLQueryOnCluster.cpp index c40b9c779c92..0e5e1dc4978c 100644 --- a/src/Interpreters/executeDDLQueryOnCluster.cpp +++ b/src/Interpreters/executeDDLQueryOnCluster.cpp @@ -212,7 +212,7 @@ class DDLQueryStatusSource final : public ISource String node_path; ContextPtr context; Stopwatch watch; - Poco::Logger * log; + LoggerPtr log; NameSet waiting_hosts; /// hosts from task host list NameSet finished_hosts; /// finished hosts from host list @@ -296,7 +296,7 @@ DDLQueryStatusSource::DDLQueryStatusSource( , node_path(zk_node_path) , context(context_) , watch(CLOCK_MONOTONIC_COARSE) - , log(&Poco::Logger::get("DDLQueryStatusSource")) + , log(getLogger("DDLQueryStatusSource")) { auto output_mode = context->getSettingsRef().distributed_ddl_output_mode; throw_on_timeout = output_mode == DistributedDDLOutputMode::THROW || output_mode == DistributedDDLOutputMode::NONE; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index e60510dc5f3f..9020430cbb2b 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -113,7 +113,7 @@ static void logQuery(const String & query, ContextPtr context, bool internal, Qu { if (internal) { - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "(internal) {} (stage: {})", toOneLineQuery(query), QueryProcessingStage::toString(stage)); + LOG_DEBUG(getLogger("executeQuery"), "(internal) {} (stage: {})", toOneLineQuery(query), QueryProcessingStage::toString(stage)); } else { @@ -136,7 +136,7 @@ static void logQuery(const String & query, ContextPtr context, bool internal, Qu if (auto txn = context->getCurrentTransaction()) transaction_info = fmt::format(" (TID: {}, TIDH: {})", txn->tid, txn->tid.getHash()); - LOG_DEBUG(&Poco::Logger::get("executeQuery"), "(from {}{}{}){}{} {} (stage: {})", + LOG_DEBUG(getLogger("executeQuery"), "(from {}{}{}){}{} {} (stage: {})", client_info.current_address.toString(), (current_user != "default" ? ", user: " + current_user : ""), (!initial_query_id.empty() && current_query_id != initial_query_id ? ", initial_query_id: " + initial_query_id : std::string()), @@ -147,7 +147,7 @@ static void logQuery(const String & query, ContextPtr context, bool internal, Qu if (client_info.client_trace_context.trace_id != UUID()) { - LOG_TRACE(&Poco::Logger::get("executeQuery"), + LOG_TRACE(getLogger("executeQuery"), "OpenTelemetry traceparent '{}'", client_info.client_trace_context.composeTraceparentHeader()); } @@ -202,9 +202,9 @@ static void logException(ContextPtr context, QueryLogElement & elem, bool log_er elem.stack_trace); if (log_error) - LOG_ERROR(&Poco::Logger::get("executeQuery"), message); + LOG_ERROR(getLogger("executeQuery"), message); else - LOG_INFO(&Poco::Logger::get("executeQuery"), message); + LOG_INFO(getLogger("executeQuery"), message); } static void onExceptionBeforeStart( @@ -566,7 +566,7 @@ static std::tuple executeQueryImpl( bool async_insert = false; auto * queue = context->getAsynchronousInsertQueue(); - auto * logger = &Poco::Logger::get("executeQuery"); + auto logger = getLogger("executeQuery"); if (insert_query && settings.async_insert) { @@ -969,7 +969,7 @@ static std::tuple executeQueryImpl( double elapsed_seconds = static_cast(info.elapsed_microseconds) / 1000000.0; double rows_per_second = static_cast(elem.read_rows) / elapsed_seconds; LOG_DEBUG( - &Poco::Logger::get("executeQuery"), + getLogger("executeQuery"), "Read {} rows, {} in {} sec., {} rows/sec., {}/sec.", elem.read_rows, ReadableSize(elem.read_bytes), diff --git a/src/Interpreters/loadMetadata.cpp b/src/Interpreters/loadMetadata.cpp index 83af2684322e..29257c615bd2 100644 --- a/src/Interpreters/loadMetadata.cpp +++ b/src/Interpreters/loadMetadata.cpp @@ -160,7 +160,7 @@ static void checkIncompleteOrdinaryToAtomicConversion(ContextPtr context, const void loadMetadata(ContextMutablePtr context, const String & default_database_name) { - Poco::Logger * log = &Poco::Logger::get("loadMetadata"); + LoggerPtr log = getLogger("loadMetadata"); String path = context->getPath() + "metadata"; @@ -267,7 +267,7 @@ static void loadSystemDatabaseImpl(ContextMutablePtr context, const String & dat } } -static void convertOrdinaryDatabaseToAtomic(Poco::Logger * log, ContextMutablePtr context, const DatabasePtr & database, +static void convertOrdinaryDatabaseToAtomic(LoggerPtr log, ContextMutablePtr context, const DatabasePtr & database, const String & name, const String tmp_name) { /// It's kind of C++ script that creates temporary database with Atomic engine, @@ -346,7 +346,7 @@ static void convertOrdinaryDatabaseToAtomic(Poco::Logger * log, ContextMutablePt /// Can be called only during server startup when there are no queries from users. static void maybeConvertOrdinaryDatabaseToAtomic(ContextMutablePtr context, const String & database_name, bool tables_started) { - Poco::Logger * log = &Poco::Logger::get("loadMetadata"); + LoggerPtr log = getLogger("loadMetadata"); auto database = DatabaseCatalog::instance().getDatabase(database_name); if (!database) @@ -456,14 +456,14 @@ void convertDatabasesEnginesIfNeed(ContextMutablePtr context) if (!fs::exists(convert_flag_path)) return; - LOG_INFO(&Poco::Logger::get("loadMetadata"), "Found convert_ordinary_to_atomic file in flags directory, " + LOG_INFO(getLogger("loadMetadata"), "Found convert_ordinary_to_atomic file in flags directory, " "will try to convert all Ordinary databases to Atomic"); for (const auto & [name, _] : DatabaseCatalog::instance().getDatabases()) if (name != DatabaseCatalog::SYSTEM_DATABASE) maybeConvertOrdinaryDatabaseToAtomic(context, name, /* tables_started */ true); - LOG_INFO(&Poco::Logger::get("loadMetadata"), "Conversion finished, removing convert_ordinary_to_atomic flag"); + LOG_INFO(getLogger("loadMetadata"), "Conversion finished, removing convert_ordinary_to_atomic flag"); fs::remove(convert_flag_path); } diff --git a/src/Parsers/DumpASTNode.h b/src/Parsers/DumpASTNode.h index 60fcece55904..5efc0e018f47 100644 --- a/src/Parsers/DumpASTNode.h +++ b/src/Parsers/DumpASTNode.h @@ -165,7 +165,7 @@ class DebugASTLog : log(nullptr) { if constexpr (_enable) - log = &Poco::Logger::get("AST"); + log = getLogger("AST"); } ~DebugASTLog() @@ -177,7 +177,7 @@ class DebugASTLog WriteBuffer * stream() { return (_enable ? &buf : nullptr); } private: - Poco::Logger * log; + LoggerPtr log; WriteBufferFromOwnString buf; }; diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 0dc0178f7680..f00a8ed032e0 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -984,7 +984,7 @@ void Planner::buildQueryPlanIfNeeded() if (query_plan.isInitialized()) return; - LOG_TRACE(&Poco::Logger::get("Planner"), "Query {} to stage {}{}", + LOG_TRACE(getLogger("Planner"), "Query {} to stage {}{}", query_tree->formatConvertedASTForErrorMessage(), QueryProcessingStage::toString(select_query_options.to_stage), select_query_options.only_analyze ? " only analyze" : ""); @@ -1139,7 +1139,7 @@ void Planner::buildPlanForQueryNode() auto from_stage = join_tree_query_plan.from_stage; query_plan = std::move(join_tree_query_plan.query_plan); - LOG_TRACE(&Poco::Logger::get("Planner"), "Query {} from stage {} to stage {}{}", + LOG_TRACE(getLogger("Planner"), "Query {} from stage {} to stage {}{}", query_tree->formatConvertedASTForErrorMessage(), QueryProcessingStage::toString(from_stage), QueryProcessingStage::toString(select_query_options.to_stage), diff --git a/src/Processors/Executors/PipelineExecutor.h b/src/Processors/Executors/PipelineExecutor.h index 147e5b2744a5..63c85afee3ce 100644 --- a/src/Processors/Executors/PipelineExecutor.h +++ b/src/Processors/Executors/PipelineExecutor.h @@ -83,7 +83,7 @@ class PipelineExecutor std::atomic_bool cancelled = false; std::atomic_bool cancelled_reading = false; - Poco::Logger * log = &Poco::Logger::get("PipelineExecutor"); + LoggerPtr log = getLogger("PipelineExecutor"); /// Now it's used to check if query was killed. QueryStatusPtr process_list_element; diff --git a/src/Processors/Formats/IRowInputFormat.cpp b/src/Processors/Formats/IRowInputFormat.cpp index 176fd5b2a565..f177c2c4c48f 100644 --- a/src/Processors/Formats/IRowInputFormat.cpp +++ b/src/Processors/Formats/IRowInputFormat.cpp @@ -231,7 +231,7 @@ Chunk IRowInputFormat::generate() { if (num_errors && (params.allow_errors_num > 0 || params.allow_errors_ratio > 0)) { - Poco::Logger * log = &Poco::Logger::get("IRowInputFormat"); + LoggerPtr log = getLogger("IRowInputFormat"); LOG_DEBUG(log, "Skipped {} rows with errors while reading the input stream", num_errors); } diff --git a/src/Processors/Formats/ISchemaReader.cpp b/src/Processors/Formats/ISchemaReader.cpp index c96cb373a2d8..e01117d13bf4 100644 --- a/src/Processors/Formats/ISchemaReader.cpp +++ b/src/Processors/Formats/ISchemaReader.cpp @@ -72,7 +72,7 @@ void IIRowSchemaReader::setContext(ContextPtr & context) } else { - LOG_WARNING(&Poco::Logger::get("IIRowSchemaReader"), "Couldn't parse schema inference hints: {}. This setting will be ignored", hints_parsing_error); + LOG_WARNING(getLogger("IIRowSchemaReader"), "Couldn't parse schema inference hints: {}. This setting will be ignored", hints_parsing_error); } } diff --git a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp index c2602a4d1d52..b70b069145e2 100644 --- a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp @@ -918,7 +918,7 @@ class AvroConfluentRowInputFormat::SchemaRegistry try { Poco::URI url(base_url, "/schemas/ids/" + std::to_string(id)); - LOG_TRACE((&Poco::Logger::get("AvroConfluentRowInputFormat")), "Fetching schema id = {}", id); + LOG_TRACE((getLogger("AvroConfluentRowInputFormat")), "Fetching schema id = {}", id); /// One second for connect/send/receive. Just in case. ConnectionTimeouts timeouts({1, 0}, {1, 0}, {1, 0}); @@ -945,7 +945,7 @@ class AvroConfluentRowInputFormat::SchemaRegistry Poco::JSON::Parser parser; auto json_body = parser.parse(*response_body).extract(); auto schema = json_body->getValue("schema"); - LOG_TRACE((&Poco::Logger::get("AvroConfluentRowInputFormat")), "Successfully fetched schema id = {}\n{}", id, schema); + LOG_TRACE((getLogger("AvroConfluentRowInputFormat")), "Successfully fetched schema id = {}\n{}", id, schema); return avro::compileJsonSchemaFromString(schema); } catch (const Exception &) diff --git a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h index 790d05e83dd6..2bfaf4b8becf 100644 --- a/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h +++ b/src/Processors/Formats/Impl/ParallelFormattingOutputFormat.h @@ -84,7 +84,7 @@ class ParallelFormattingOutputFormat : public IOutputFormat , pool(CurrentMetrics::ParallelFormattingOutputFormatThreads, CurrentMetrics::ParallelFormattingOutputFormatThreadsActive, params.max_threads_for_parallel_formatting) { - LOG_TEST(&Poco::Logger::get("ParallelFormattingOutputFormat"), "Parallel formatting is being used"); + LOG_TEST(getLogger("ParallelFormattingOutputFormat"), "Parallel formatting is being used"); NullWriteBuffer buf; save_totals_and_extremes_in_statistics = internal_formatter_creator(buf)->areTotalsAndExtremesUsedInFinalize(); diff --git a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h index a0408c34d0fa..fe8707585918 100644 --- a/src/Processors/Formats/Impl/ParallelParsingInputFormat.h +++ b/src/Processors/Formats/Impl/ParallelParsingInputFormat.h @@ -108,7 +108,7 @@ class ParallelParsingInputFormat : public IInputFormat // bump into reader thread on wraparound. processing_units.resize(params.max_threads + 2); - LOG_TRACE(&Poco::Logger::get("ParallelParsingInputFormat"), "Parallel parsing is used"); + LOG_TRACE(getLogger("ParallelParsingInputFormat"), "Parallel parsing is used"); } ~ParallelParsingInputFormat() override diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index 8f2affee7f88..c85826e84381 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -476,7 +476,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx &found_in_cache, delimiter); - LOG_TEST(&Poco::Logger::get("ValuesBlockInputFormat"), "Will use an expression template to parse column {}: {}", + LOG_TEST(getLogger("ValuesBlockInputFormat"), "Will use an expression template to parse column {}: {}", column_idx, structure->dumpTemplate()); templates[column_idx].emplace(structure); diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp index 0c23dd51f3c8..acbe3b148306 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp @@ -27,7 +27,7 @@ CollapsingSortedAlgorithm::CollapsingSortedAlgorithm( const String & sign_column, bool only_positive_sign_, size_t max_block_size, - Poco::Logger * log_, + LoggerPtr log_, WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes) : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, max_row_refs) diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h index f457af05bd56..5b86596a4666 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h @@ -33,7 +33,7 @@ class CollapsingSortedAlgorithm final : public IMergingAlgorithmWithSharedChunks const String & sign_column, bool only_positive_sign_, /// For select final. Skip rows with sum(sign) < 0. size_t max_block_size, - Poco::Logger * log_, + LoggerPtr log_, WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); @@ -62,7 +62,7 @@ class CollapsingSortedAlgorithm final : public IMergingAlgorithmWithSharedChunks PODArray current_row_sources; /// Sources of rows with the current primary key size_t count_incorrect_data = 0; /// To prevent too many error messages from writing to the log. - Poco::Logger * log; + LoggerPtr log; void reportIncorrectData(); void insertRow(RowRef & row); diff --git a/src/Processors/Merges/Algorithms/RowRef.h b/src/Processors/Merges/Algorithms/RowRef.h index cf741c1b53b8..caebd29f8e68 100644 --- a/src/Processors/Merges/Algorithms/RowRef.h +++ b/src/Processors/Merges/Algorithms/RowRef.h @@ -83,7 +83,7 @@ class SharedChunkAllocator { if (free_chunks.size() != chunks.size()) { - LOG_ERROR(&Poco::Logger::get("SharedChunkAllocator"), "SharedChunkAllocator was destroyed before RowRef was released. StackTrace: {}", StackTrace().toString()); + LOG_ERROR(getLogger("SharedChunkAllocator"), "SharedChunkAllocator was destroyed before RowRef was released. StackTrace: {}", StackTrace().toString()); return; } @@ -100,7 +100,7 @@ class SharedChunkAllocator /// This may happen if allocator was removed before chunks. /// Log message and exit, because we don't want to throw exception in destructor. - LOG_ERROR(&Poco::Logger::get("SharedChunkAllocator"), "SharedChunkAllocator was destroyed before RowRef was released. StackTrace: {}", StackTrace().toString()); + LOG_ERROR(getLogger("SharedChunkAllocator"), "SharedChunkAllocator was destroyed before RowRef was released. StackTrace: {}", StackTrace().toString()); return; } diff --git a/src/Processors/Merges/CollapsingSortedTransform.h b/src/Processors/Merges/CollapsingSortedTransform.h index abe3eefb401a..6136f4bee025 100644 --- a/src/Processors/Merges/CollapsingSortedTransform.h +++ b/src/Processors/Merges/CollapsingSortedTransform.h @@ -27,7 +27,7 @@ class CollapsingSortedTransform final : public IMergingTransform 0) { - LOG_DEBUG(&Poco::Logger::get("QueryPlanOptimizations"), "Pushed down filter {} to the {} side of join", split_filter_column_name, kind); + LOG_DEBUG(getLogger("QueryPlanOptimizations"), "Pushed down filter {} to the {} side of join", split_filter_column_name, kind); } return updated_steps; }; diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp index 557b76293ea6..710432d0e10f 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrewhere.cpp @@ -151,7 +151,7 @@ void optimizePrewhere(Stack & stack, QueryPlan::Nodes & nodes) storage_metadata, queried_columns, storage.supportedPrewhereColumns(), - &Poco::Logger::get("QueryPlanOptimizePrewhere")}; + getLogger("QueryPlanOptimizePrewhere")}; auto optimize_result = where_optimizer.optimize(filter_step->getExpression(), filter_step->getFilterColumnName(), diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index 745899103ea0..6a5deecab098 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -135,7 +135,7 @@ std::optional matchAggregateFunctions( if (it == projection_aggregate_functions.end()) { // LOG_TRACE( - // &Poco::Logger::get("optimizeUseProjections"), + // getLogger("optimizeUseProjections"), // "Cannot match agg func {} by name {}", // aggregate.column_name, aggregate.function->getName()); @@ -163,7 +163,7 @@ std::optional matchAggregateFunctions( /// not match. if (!candidate.function->getStateType()->equals(*aggregate.function->getStateType())) { - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Cannot match agg func {} vs {} by state {} vs {}", + // LOG_TRACE(getLogger("optimizeUseProjections"), "Cannot match agg func {} vs {} by state {} vs {}", // aggregate.column_name, candidate.column_name, // candidate.function->getStateType()->getName(), aggregate.function->getStateType()->getName()); continue; @@ -212,7 +212,7 @@ std::optional matchAggregateFunctions( if (mt == matches.end()) { // LOG_TRACE( - // &Poco::Logger::get("optimizeUseProjections"), + // getLogger("optimizeUseProjections"), // "Cannot match agg func {} vs {} : can't match arg {} vs {} : no node in map", // aggregate.column_name, candidate.column_name, query_name, proj_name); @@ -223,7 +223,7 @@ std::optional matchAggregateFunctions( if (node_match.node != proj_node || node_match.monotonicity) { // LOG_TRACE( - // &Poco::Logger::get("optimizeUseProjections"), + // getLogger("optimizeUseProjections"), // "Cannot match agg func {} vs {} : can't match arg {} vs {} : no match or monotonicity", // aggregate.column_name, candidate.column_name, query_name, proj_name); @@ -303,7 +303,7 @@ ActionsDAGPtr analyzeAggregateProjection( // for (const auto & [node, match] : matches) // { - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Match {} {} -> {} {} (with monotonicity : {})", + // LOG_TRACE(getLogger("optimizeUseProjections"), "Match {} {} -> {} {} (with monotonicity : {})", // static_cast(node), node->result_name, // static_cast(match.node), (match.node ? match.node->result_name : ""), match.monotonicity != std::nullopt); // } @@ -397,7 +397,7 @@ ActionsDAGPtr analyzeAggregateProjection( /// Not a match and there is no matched child. if (frame.node->type == ActionsDAG::ActionType::INPUT) { - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Cannot find match for {}", frame.node->result_name); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Cannot find match for {}", frame.node->result_name); return {}; } @@ -407,7 +407,7 @@ ActionsDAGPtr analyzeAggregateProjection( } } - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Folding actions by projection"); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Folding actions by projection"); auto proj_dag = query.dag->foldActionsByProjection(new_inputs, query_key_nodes); appendAggregateFunctions(*proj_dag, aggregates, *matched_aggregates); @@ -470,7 +470,7 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( if (!can_use_minmax_projection && agg_projections.empty()) return candidates; - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Has agg projection"); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Has agg projection"); QueryDAG dag; if (!dag.build(*node.children.front())) @@ -478,23 +478,23 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( auto query_index = buildDAGIndex(*dag.dag); - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Query DAG: {}", dag.dag->dumpDAG()); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Query DAG: {}", dag.dag->dumpDAG()); candidates.has_filter = dag.filter_node; if (can_use_minmax_projection) { const auto * projection = &*(metadata->minmax_count_projection); - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Try projection {}", projection->name); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Try projection {}", projection->name); auto info = getAggregatingProjectionInfo(*projection, context, metadata, key_virtual_columns); - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Projection DAG {}", info.before_aggregation->dumpDAG()); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection DAG {}", info.before_aggregation->dumpDAG()); if (auto proj_dag = analyzeAggregateProjection(info, dag, query_index, keys, aggregates)) { - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Projection analyzed DAG {}", proj_dag->dumpDAG()); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection analyzed DAG {}", proj_dag->dumpDAG()); AggregateProjectionCandidate candidate{.info = std::move(info), .dag = std::move(proj_dag)}; MergeTreeData::DataPartsVector minmax_projection_normal_parts; - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Projection sample block {}", sample_block.dumpStructure()); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection sample block {}", sample_block.dumpStructure()); auto block = reading.getMergeTreeData().getMinMaxCountProjectionBlock( metadata, candidate.dag->getRequiredColumnsNames(), @@ -505,7 +505,7 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( max_added_blocks.get(), context); - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Projection sample block 2 {}", block.dumpStructure()); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection sample block 2 {}", block.dumpStructure()); if (block) { @@ -524,12 +524,12 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( candidates.real.reserve(agg_projections.size()); for (const auto * projection : agg_projections) { - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Try projection {}", projection->name); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Try projection {}", projection->name); auto info = getAggregatingProjectionInfo(*projection, context, metadata, key_virtual_columns); - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Projection DAG {}", info.before_aggregation->dumpDAG()); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection DAG {}", info.before_aggregation->dumpDAG()); if (auto proj_dag = analyzeAggregateProjection(info, dag, query_index, keys, aggregates)) { - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Projection analyzed DAG {}", proj_dag->dumpDAG()); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection analyzed DAG {}", proj_dag->dumpDAG()); AggregateProjectionCandidate candidate{.info = std::move(info), .dag = std::move(proj_dag)}; candidate.projection = projection; candidates.real.emplace_back(std::move(candidate)); @@ -622,7 +622,7 @@ bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & /// Add reading from projection step. if (candidates.minmax_projection) { - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Minmax proj block {}", + // LOG_TRACE(getLogger("optimizeUseProjections"), "Minmax proj block {}", // candidates.minmax_projection->block.dumpStructure()); Pipe pipe(std::make_shared(std::move(candidates.minmax_projection->block))); @@ -666,7 +666,7 @@ bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & reading->setAnalyzedResult(std::move(best_candidate->merge_tree_ordinary_select_result_ptr)); } - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Projection reading header {}", + // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection reading header {}", // projection_reading->getOutputStream().header.dumpStructure()); projection_reading->setStepDescription(best_candidate->projection->name); diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp index 496308c8071e..5086bda1fd73 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp @@ -112,7 +112,7 @@ bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes) if (query.dag) { query.dag->removeUnusedActions(); - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Query DAG: {}", query.dag->dumpDAG()); + // LOG_TRACE(getLogger("optimizeUseProjections"), "Query DAG: {}", query.dag->dumpDAG()); } } @@ -128,7 +128,7 @@ bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes) auto ordinary_reading_select_result = reading->selectRangesToRead(parts); size_t ordinary_reading_marks = ordinary_reading_select_result->marks(); - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), + // LOG_TRACE(getLogger("optimizeUseProjections"), // "Marks for ordinary reading {}", ordinary_reading_marks); std::shared_ptr max_added_blocks = getMaxAddedBlocks(reading); @@ -152,7 +152,7 @@ bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes) if (!analyzed) continue; - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), + // LOG_TRACE(getLogger("optimizeUseProjections"), // "Marks for projection {} {}", projection->name ,candidate.sum_marks); if (candidate.sum_marks >= ordinary_reading_marks) @@ -173,7 +173,7 @@ bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes) storage_snapshot->storage, storage_snapshot->metadata, storage_snapshot->object_columns); //, storage_snapshot->data); proj_snapshot->addProjection(best_candidate->projection); - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Proj snapshot {}", + // LOG_TRACE(getLogger("optimizeUseProjections"), "Proj snapshot {}", // proj_snapshot->getColumns(GetColumnsOptions::Kind::All).toString()); auto query_info_copy = query_info; @@ -201,7 +201,7 @@ bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes) if (has_ordinary_parts) reading->setAnalyzedResult(std::move(best_candidate->merge_tree_ordinary_select_result_ptr)); - // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Projection reading header {}", + // LOG_TRACE(getLogger("optimizeUseProjections"), "Projection reading header {}", // projection_reading->getOutputStream().header.dumpStructure()); projection_reading->setStepDescription(best_candidate->projection->name); diff --git a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp index c9a0270f6e7a..d06971698a4f 100644 --- a/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp +++ b/src/Processors/QueryPlan/Optimizations/removeRedundantDistinct.cpp @@ -39,14 +39,14 @@ namespace else ss << value; - LOG_DEBUG(&Poco::Logger::get("redundantDistinct"), "{}{}{}", key, separator, ss.str()); + LOG_DEBUG(getLogger("redundantDistinct"), "{}{}{}", key, separator, ss.str()); } } void logActionsDAG(const String & prefix, const ActionsDAGPtr & actions) { if constexpr (debug_logging_enabled) - LOG_DEBUG(&Poco::Logger::get("redundantDistinct"), "{} :\n{}", prefix, actions->dumpDAG()); + LOG_DEBUG(getLogger("redundantDistinct"), "{} :\n{}", prefix, actions->dumpDAG()); } using DistinctColumns = std::set; diff --git a/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp b/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp index 41e30dee83e5..aa87e697f66e 100644 --- a/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp +++ b/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp @@ -111,7 +111,7 @@ class QueryPlanVisitor { const IQueryPlanStep * current_step = node->step.get(); LOG_DEBUG( - &Poco::Logger::get("QueryPlanVisitor"), + getLogger("QueryPlanVisitor"), "{}: {}: {}", prefix, getStepId(current_step), diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index ae27d7344f72..dfab82a11ccb 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -181,7 +181,7 @@ ReadFromMergeTree::ReadFromMergeTree( size_t num_streams_, bool sample_factor_column_queried_, std::shared_ptr max_block_numbers_to_read_, - Poco::Logger * log_, + LoggerPtr log_, MergeTreeDataSelectAnalysisResultPtr analyzed_result_ptr_, bool enable_parallel_reading) : SourceStepWithFilter(DataStream{.header = IMergeTreeSelectAlgorithm::transformHeader( @@ -426,7 +426,7 @@ Pipe ReadFromMergeTree::readFromPool( false); } - auto * logger = &Poco::Logger::get(data.getLogName() + " (SelectExecutor)"); + auto logger = getLogger(data.getLogName() + " (SelectExecutor)"); LOG_DEBUG(logger, "Reading approx. {} rows with {} streams", total_rows, max_streams); for (size_t i = 0; i < max_streams; ++i) @@ -1158,7 +1158,7 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( const MergeTreeData & data, const Names & real_column_names, bool sample_factor_column_queried, - Poco::Logger * log) + LoggerPtr log) { const auto & settings = context->getSettingsRef(); if (settings.allow_experimental_analyzer || settings.query_plan_optimize_primary_key) @@ -1236,7 +1236,7 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToReadImpl( const MergeTreeData & data, const Names & real_column_names, bool sample_factor_column_queried, - Poco::Logger * log) + LoggerPtr log) { AnalysisResult result; const auto & settings = context->getSettingsRef(); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 5cbc7bf5c247..eab0d7d4ae1b 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -107,7 +107,7 @@ class ReadFromMergeTree final : public SourceStepWithFilter size_t num_streams_, bool sample_factor_column_queried_, std::shared_ptr max_block_numbers_to_read_, - Poco::Logger * log_, + LoggerPtr log_, MergeTreeDataSelectAnalysisResultPtr analyzed_result_ptr_, bool enable_parallel_reading ); @@ -145,7 +145,7 @@ class ReadFromMergeTree final : public SourceStepWithFilter const MergeTreeData & data, const Names & real_column_names, bool sample_factor_column_queried, - Poco::Logger * log); + LoggerPtr log); MergeTreeDataSelectAnalysisResultPtr selectRangesToRead(MergeTreeData::DataPartsVector parts) const; @@ -190,7 +190,7 @@ class ReadFromMergeTree final : public SourceStepWithFilter const MergeTreeData & data, const Names & real_column_names, bool sample_factor_column_queried, - Poco::Logger * log); + LoggerPtr log); int getSortDirection() const { @@ -229,7 +229,7 @@ class ReadFromMergeTree final : public SourceStepWithFilter std::shared_ptr max_block_numbers_to_read; - Poco::Logger * log; + LoggerPtr log; UInt64 selected_parts = 0; UInt64 selected_rows = 0; UInt64 selected_marks = 0; diff --git a/src/Processors/QueryPlan/ReadFromRemote.cpp b/src/Processors/QueryPlan/ReadFromRemote.cpp index 63325488e905..dab76758ff4a 100644 --- a/src/Processors/QueryPlan/ReadFromRemote.cpp +++ b/src/Processors/QueryPlan/ReadFromRemote.cpp @@ -104,7 +104,7 @@ ReadFromRemote::ReadFromRemote( ThrottlerPtr throttler_, Scalars scalars_, Tables external_tables_, - Poco::Logger * log_, + LoggerPtr log_, UInt32 shard_count_, std::shared_ptr storage_limits_) : ISourceStep(DataStream{.header = std::move(header_)}) @@ -168,7 +168,7 @@ void ReadFromRemote::addLazyPipe(Pipes & pipes, const ClusterProxy::SelectStream catch (const Exception & ex) { if (ex.code() == ErrorCodes::ALL_CONNECTION_TRIES_FAILED) - LOG_WARNING(&Poco::Logger::get("ClusterProxy::SelectStreamFactory"), + LOG_WARNING(getLogger("ClusterProxy::SelectStreamFactory"), "Connections to remote replicas of local shard {} failed, will use stale local replica", shard.shard_info.shard_num); else throw; @@ -280,7 +280,7 @@ ReadFromParallelRemoteReplicasStep::ReadFromParallelRemoteReplicasStep( ThrottlerPtr throttler_, Scalars scalars_, Tables external_tables_, - Poco::Logger * log_, + LoggerPtr log_, std::shared_ptr storage_limits_, UUID uuid_) : ISourceStep(DataStream{.header = std::move(header_)}) @@ -326,7 +326,7 @@ void ReadFromParallelRemoteReplicasStep::initializePipeline(QueryPipelineBuilder size_t all_replicas_count = current_settings.max_parallel_replicas; if (all_replicas_count > shard_info.all_addresses.size()) { - LOG_INFO(&Poco::Logger::get("ReadFromParallelRemoteReplicasStep"), + LOG_INFO(getLogger("ReadFromParallelRemoteReplicasStep"), "The number of replicas requested ({}) is bigger than the real number available in the cluster ({}). "\ "Will use the latter number to execute the query.", current_settings.max_parallel_replicas, shard_info.all_addresses.size()); all_replicas_count = shard_info.all_addresses.size(); diff --git a/src/Processors/QueryPlan/ReadFromRemote.h b/src/Processors/QueryPlan/ReadFromRemote.h index e1979ee1aaa8..707145e079d7 100644 --- a/src/Processors/QueryPlan/ReadFromRemote.h +++ b/src/Processors/QueryPlan/ReadFromRemote.h @@ -32,7 +32,7 @@ class ReadFromRemote final : public ISourceStep ThrottlerPtr throttler_, Scalars scalars_, Tables external_tables_, - Poco::Logger * log_, + LoggerPtr log_, UInt32 shard_count_, std::shared_ptr storage_limits_); @@ -53,7 +53,7 @@ class ReadFromRemote final : public ISourceStep Scalars scalars; Tables external_tables; std::shared_ptr storage_limits; - Poco::Logger * log; + LoggerPtr log; UInt32 shard_count; void addLazyPipe(Pipes & pipes, const ClusterProxy::SelectStreamFactory::Shard & shard); @@ -76,7 +76,7 @@ class ReadFromParallelRemoteReplicasStep : public ISourceStep ThrottlerPtr throttler_, Scalars scalars_, Tables external_tables_, - Poco::Logger * log_, + LoggerPtr log_, std::shared_ptr storage_limits_, UUID uuid); @@ -103,7 +103,7 @@ class ReadFromParallelRemoteReplicasStep : public ISourceStep Tables external_tables; std::shared_ptr storage_limits; - Poco::Logger * log; + LoggerPtr log; UUID uuid; }; diff --git a/src/Processors/Sources/MySQLSource.cpp b/src/Processors/Sources/MySQLSource.cpp index 9c7e83b3869e..982ba7046444 100644 --- a/src/Processors/Sources/MySQLSource.cpp +++ b/src/Processors/Sources/MySQLSource.cpp @@ -56,7 +56,7 @@ MySQLSource::MySQLSource( const Block & sample_block, const StreamSettings & settings_) : ISource(sample_block.cloneEmpty()) - , log(&Poco::Logger::get("MySQLSource")) + , log(getLogger("MySQLSource")) , connection{std::make_unique(entry, query_str)} , settings{std::make_unique(settings_)} { @@ -67,7 +67,7 @@ MySQLSource::MySQLSource( /// For descendant MySQLWithFailoverSource MySQLSource::MySQLSource(const Block &sample_block_, const StreamSettings & settings_) : ISource(sample_block_.cloneEmpty()) - , log(&Poco::Logger::get("MySQLSource")) + , log(getLogger("MySQLSource")) , settings(std::make_unique(settings_)) { description.init(sample_block_); diff --git a/src/Processors/Sources/MySQLSource.h b/src/Processors/Sources/MySQLSource.h index c4d447886c04..fc26ffa3645e 100644 --- a/src/Processors/Sources/MySQLSource.h +++ b/src/Processors/Sources/MySQLSource.h @@ -50,7 +50,7 @@ class MySQLSource : public ISource mysqlxx::UseQueryResult result; }; - Poco::Logger * log; + LoggerPtr log; std::unique_ptr connection; const std::unique_ptr settings; diff --git a/src/Processors/Transforms/AggregatingInOrderTransform.h b/src/Processors/Transforms/AggregatingInOrderTransform.h index af63ac61c3c5..5d50e97f5524 100644 --- a/src/Processors/Transforms/AggregatingInOrderTransform.h +++ b/src/Processors/Transforms/AggregatingInOrderTransform.h @@ -83,7 +83,7 @@ class AggregatingInOrderTransform : public IProcessor Chunk current_chunk; Chunk to_push_chunk; - Poco::Logger * log = &Poco::Logger::get("AggregatingInOrderTransform"); + LoggerPtr log = getLogger("AggregatingInOrderTransform"); }; diff --git a/src/Processors/Transforms/AggregatingTransform.h b/src/Processors/Transforms/AggregatingTransform.h index 048b69adae68..0715f4219d79 100644 --- a/src/Processors/Transforms/AggregatingTransform.h +++ b/src/Processors/Transforms/AggregatingTransform.h @@ -177,7 +177,7 @@ class AggregatingTransform : public IProcessor Processors processors; AggregatingTransformParamsPtr params; - Poco::Logger * log = &Poco::Logger::get("AggregatingTransform"); + LoggerPtr log = getLogger("AggregatingTransform"); ColumnRawPtrs key_columns; Aggregator::AggregateColumns aggregate_columns; diff --git a/src/Processors/Transforms/ColumnGathererTransform.cpp b/src/Processors/Transforms/ColumnGathererTransform.cpp index 7c2b93faa913..d7f52a538e1b 100644 --- a/src/Processors/Transforms/ColumnGathererTransform.cpp +++ b/src/Processors/Transforms/ColumnGathererTransform.cpp @@ -128,7 +128,7 @@ ColumnGathererTransform::ColumnGathererTransform( : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, num_inputs, row_sources_buf_, block_preferred_size_) - , log(&Poco::Logger::get("ColumnGathererStream")) + , log(getLogger("ColumnGathererStream")) { if (header.columns() != 1) throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "Header should have 1 column, but contains {}", diff --git a/src/Processors/Transforms/ColumnGathererTransform.h b/src/Processors/Transforms/ColumnGathererTransform.h index da6dc877abff..283a74092cfd 100644 --- a/src/Processors/Transforms/ColumnGathererTransform.h +++ b/src/Processors/Transforms/ColumnGathererTransform.h @@ -119,7 +119,7 @@ class ColumnGathererTransform final : public IMergingTransformcheckTypesOfKeys(header); join->initialize(header); ExtraBlockPtr tmp; join->joinBlock(header, tmp); - LOG_DEBUG(&Poco::Logger::get("JoiningTransform"), "After join block: '{}'", header.dumpStructure()); + LOG_DEBUG(getLogger("JoiningTransform"), "After join block: '{}'", header.dumpStructure()); return header; } diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index abeef0f9a474..d4266f90fd7c 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -273,7 +273,7 @@ MergeJoinAlgorithm::MergeJoinAlgorithm( size_t max_block_size_) : table_join(table_join_) , max_block_size(max_block_size_) - , log(&Poco::Logger::get("MergeJoinAlgorithm")) + , log(getLogger("MergeJoinAlgorithm")) { if (input_headers.size() != 2) throw Exception(ErrorCodes::LOGICAL_ERROR, "MergeJoinAlgorithm requires exactly two inputs"); @@ -847,7 +847,7 @@ MergeJoinTransform::MergeJoinTransform( /* always_read_till_end_= */ false, /* empty_chunk_on_finish_= */ true, table_join, input_headers, max_block_size) - , log(&Poco::Logger::get("MergeJoinTransform")) + , log(getLogger("MergeJoinTransform")) { LOG_TRACE(log, "Use MergeJoinTransform"); } diff --git a/src/Processors/Transforms/MergeJoinTransform.h b/src/Processors/Transforms/MergeJoinTransform.h index 58ac652cb185..9b5838ec0a18 100644 --- a/src/Processors/Transforms/MergeJoinTransform.h +++ b/src/Processors/Transforms/MergeJoinTransform.h @@ -276,7 +276,7 @@ class MergeJoinAlgorithm final : public IMergingAlgorithm Statistic stat; - Poco::Logger * log; + LoggerPtr log; }; class MergeJoinTransform final : public IMergingTransform @@ -296,7 +296,7 @@ class MergeJoinTransform final : public IMergingTransform protected: void onFinish() override; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index eebdd678a4be..ab06ad8b2e23 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -29,7 +29,7 @@ namespace DB class BufferingToFileTransform : public IAccumulatingTransform { public: - BufferingToFileTransform(const Block & header, TemporaryFileStream & tmp_stream_, Poco::Logger * log_) + BufferingToFileTransform(const Block & header, TemporaryFileStream & tmp_stream_, LoggerPtr log_) : IAccumulatingTransform(header, header) , tmp_stream(tmp_stream_) , log(log_) @@ -72,7 +72,7 @@ class BufferingToFileTransform : public IAccumulatingTransform private: TemporaryFileStream & tmp_stream; - Poco::Logger * log; + LoggerPtr log; }; MergeSortingTransform::MergeSortingTransform( diff --git a/src/Processors/Transforms/MergeSortingTransform.h b/src/Processors/Transforms/MergeSortingTransform.h index c64c93393ce0..6e8a4bdadae6 100644 --- a/src/Processors/Transforms/MergeSortingTransform.h +++ b/src/Processors/Transforms/MergeSortingTransform.h @@ -51,7 +51,7 @@ class MergeSortingTransform : public SortingTransform size_t sum_rows_in_blocks = 0; size_t sum_bytes_in_blocks = 0; - Poco::Logger * log = &Poco::Logger::get("MergeSortingTransform"); + LoggerPtr log = getLogger("MergeSortingTransform"); /// If remerge doesn't save memory at least several times, mark it as useless and don't do it anymore. bool remerge_is_useful = true; diff --git a/src/Processors/Transforms/MergingAggregatedTransform.h b/src/Processors/Transforms/MergingAggregatedTransform.h index 73e0d8cd0132..ade76b2f3048 100644 --- a/src/Processors/Transforms/MergingAggregatedTransform.h +++ b/src/Processors/Transforms/MergingAggregatedTransform.h @@ -21,7 +21,7 @@ class MergingAggregatedTransform : public IAccumulatingTransform private: AggregatingTransformParamsPtr params; - Poco::Logger * log = &Poco::Logger::get("MergingAggregatedTransform"); + LoggerPtr log = getLogger("MergingAggregatedTransform"); size_t max_threads; AggregatedDataVariants data_variants; diff --git a/src/Processors/Transforms/TTLCalcTransform.cpp b/src/Processors/Transforms/TTLCalcTransform.cpp index 31fb61239ef8..2b4ed96d4e38 100644 --- a/src/Processors/Transforms/TTLCalcTransform.cpp +++ b/src/Processors/Transforms/TTLCalcTransform.cpp @@ -13,7 +13,7 @@ TTLCalcTransform::TTLCalcTransform( bool force_) : IAccumulatingTransform(header_, header_) , data_part(data_part_) - , log(&Poco::Logger::get(storage_.getLogName() + " (TTLCalcTransform)")) + , log(getLogger(storage_.getLogName() + " (TTLCalcTransform)")) { auto old_ttl_infos = data_part->ttl_infos; diff --git a/src/Processors/Transforms/TTLCalcTransform.h b/src/Processors/Transforms/TTLCalcTransform.h index 495879400dce..baa31c01c526 100644 --- a/src/Processors/Transforms/TTLCalcTransform.h +++ b/src/Processors/Transforms/TTLCalcTransform.h @@ -38,7 +38,7 @@ class TTLCalcTransform : public IAccumulatingTransform /// ttl_infos and empty_columns are updating while reading const MergeTreeData::MutableDataPartPtr & data_part; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Processors/Transforms/TTLTransform.cpp b/src/Processors/Transforms/TTLTransform.cpp index 3250d012d5cf..cc29d2bb8399 100644 --- a/src/Processors/Transforms/TTLTransform.cpp +++ b/src/Processors/Transforms/TTLTransform.cpp @@ -25,7 +25,7 @@ TTLTransform::TTLTransform( bool force_) : IAccumulatingTransform(header_, header_) , data_part(data_part_) - , log(&Poco::Logger::get(storage_.getLogName() + " (TTLTransform)")) + , log(getLogger(storage_.getLogName() + " (TTLTransform)")) { auto old_ttl_infos = data_part->ttl_infos; diff --git a/src/Processors/Transforms/TTLTransform.h b/src/Processors/Transforms/TTLTransform.h index 3f0dffd19989..3606db7f4c2c 100644 --- a/src/Processors/Transforms/TTLTransform.h +++ b/src/Processors/Transforms/TTLTransform.h @@ -42,7 +42,7 @@ class TTLTransform : public IAccumulatingTransform /// ttl_infos and empty_columns are updating while reading const MergeTreeData::MutableDataPartPtr & data_part; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Processors/Transforms/buildPushingToViewsChain.cpp b/src/Processors/Transforms/buildPushingToViewsChain.cpp index 5ab1e811efb0..b76bc7411988 100644 --- a/src/Processors/Transforms/buildPushingToViewsChain.cpp +++ b/src/Processors/Transforms/buildPushingToViewsChain.cpp @@ -261,7 +261,7 @@ Chain buildPushingToViewsChain( if (view == nullptr) { LOG_WARNING( - &Poco::Logger::get("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName()); + getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName()); continue; } @@ -310,7 +310,7 @@ Chain buildPushingToViewsChain( // In case the materialized view is dropped/detached at this point, we register a warning and ignore it assert(materialized_view->is_dropped || materialized_view->is_detached); LOG_WARNING( - &Poco::Logger::get("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName()); + getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName()); continue; } @@ -796,14 +796,14 @@ void FinalizingViewsTransform::work() /// Exception will be ignored, it is saved here for the system.query_views_log if (materialized_views_ignore_errors) - tryLogException(view.exception, &Poco::Logger::get("PushingToViews"), "Cannot push to the storage, ignoring the error"); + tryLogException(view.exception, getLogger("PushingToViews"), "Cannot push to the storage, ignoring the error"); } else { view.runtime_stats->setStatus(QueryViewsLogElement::ViewStatus::QUERY_FINISH); LOG_TRACE( - &Poco::Logger::get("PushingToViews"), + getLogger("PushingToViews"), "Pushing ({}) from {} to {} took {} ms.", views_data->max_threads <= 1 ? "sequentially" : ("parallel " + std::to_string(views_data->max_threads)), views_data->source_storage_id.getNameForLogs(), diff --git a/src/QueryPipeline/RemoteQueryExecutor.h b/src/QueryPipeline/RemoteQueryExecutor.h index 576efb97bb13..37bbd4b8380e 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.h +++ b/src/QueryPipeline/RemoteQueryExecutor.h @@ -175,7 +175,7 @@ class RemoteQueryExecutor void setMainTable(StorageID main_table_) { main_table = std::move(main_table_); } - void setLogger(Poco::Logger * logger) { log = logger; } + void setLogger(LoggerPtr logger) { log = logger; } const Block & getHeader() const { return header; } @@ -261,7 +261,7 @@ class RemoteQueryExecutor PoolMode pool_mode = PoolMode::GET_MANY; StorageID main_table = StorageID::createEmpty(); - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; /// Send all scalars to remote servers void sendScalars(); diff --git a/src/Server/CertificateReloader.cpp b/src/Server/CertificateReloader.cpp index 8795d4807de1..c974f450c9a3 100644 --- a/src/Server/CertificateReloader.cpp +++ b/src/Server/CertificateReloader.cpp @@ -105,7 +105,7 @@ CertificateReloader::Data::Data(std::string cert_path, std::string key_path, std } -bool CertificateReloader::File::changeIfModified(std::string new_path, Poco::Logger * logger) +bool CertificateReloader::File::changeIfModified(std::string new_path, LoggerPtr logger) { std::error_code ec; std::filesystem::file_time_type new_modification_time = std::filesystem::last_write_time(new_path, ec); diff --git a/src/Server/CertificateReloader.h b/src/Server/CertificateReloader.h index 9f04179b8d6c..028914e682f1 100644 --- a/src/Server/CertificateReloader.h +++ b/src/Server/CertificateReloader.h @@ -14,6 +14,7 @@ #include #include #include +#include namespace DB @@ -51,7 +52,7 @@ class CertificateReloader private: CertificateReloader() = default; - Poco::Logger * log = &Poco::Logger::get("CertificateReloader"); + LoggerPtr log = getLogger("CertificateReloader"); struct File { @@ -61,7 +62,7 @@ class CertificateReloader std::string path; std::filesystem::file_time_type modification_time; - bool changeIfModified(std::string new_path, Poco::Logger * logger); + bool changeIfModified(std::string new_path, LoggerPtr logger); }; File cert_file{"certificate"}; diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index a6924310c3c6..610827b5279c 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -75,7 +75,7 @@ namespace static std::once_flag once_flag; std::call_once(once_flag, [&config] { - static Poco::Logger * logger = &Poco::Logger::get("grpc"); + static LoggerPtr logger = getLogger("grpc"); gpr_set_log_function([](gpr_log_func_args* args) { if (args->severity == GPR_LOG_SEVERITY_DEBUG) @@ -612,7 +612,7 @@ namespace class Call { public: - Call(CallType call_type_, std::unique_ptr responder_, IServer & iserver_, Poco::Logger * log_); + Call(CallType call_type_, std::unique_ptr responder_, IServer & iserver_, LoggerPtr log_); ~Call(); void start(const std::function & on_finish_call_callback); @@ -654,7 +654,7 @@ namespace const CallType call_type; std::unique_ptr responder; IServer & iserver; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; std::optional session; ContextMutablePtr query_context; @@ -715,7 +715,7 @@ namespace ThreadFromGlobalPool call_thread; }; - Call::Call(CallType call_type_, std::unique_ptr responder_, IServer & iserver_, Poco::Logger * log_) + Call::Call(CallType call_type_, std::unique_ptr responder_, IServer & iserver_, LoggerPtr log_) : call_type(call_type_), responder(std::move(responder_)), iserver(iserver_), log(log_) { } @@ -1840,7 +1840,7 @@ class GRPCServer::Runner GRPCServer::GRPCServer(IServer & iserver_, const Poco::Net::SocketAddress & address_to_listen_) : iserver(iserver_) , address_to_listen(address_to_listen_) - , log(&Poco::Logger::get("GRPCServer")) + , log(getLogger("GRPCServer")) , runner(std::make_unique(*this)) {} diff --git a/src/Server/GRPCServer.h b/src/Server/GRPCServer.h index 359a2506e953..a9c8161298fc 100644 --- a/src/Server/GRPCServer.h +++ b/src/Server/GRPCServer.h @@ -5,6 +5,7 @@ #if USE_GRPC #include #include +#include #include "clickhouse_grpc.grpc.pb.h" namespace Poco { class Logger; } @@ -46,7 +47,7 @@ class GRPCServer IServer & iserver; const Poco::Net::SocketAddress address_to_listen; - Poco::Logger * log; + LoggerPtr log; GRPCService grpc_service; std::unique_ptr grpc_server; std::unique_ptr queue; diff --git a/src/Server/HTTP/HTTPServerRequest.cpp b/src/Server/HTTP/HTTPServerRequest.cpp index 891ac39c9315..873972e4b60d 100644 --- a/src/Server/HTTP/HTTPServerRequest.cpp +++ b/src/Server/HTTP/HTTPServerRequest.cpp @@ -65,7 +65,7 @@ HTTPServerRequest::HTTPServerRequest(HTTPContextPtr context, HTTPServerResponse { stream = std::move(in); if (!startsWith(getContentType(), "multipart/form-data")) - LOG_WARNING(LogFrequencyLimiter(&Poco::Logger::get("HTTPServerRequest"), 10), "Got an HTTP request with no content length " + LOG_WARNING(LogFrequencyLimiter(getLogger("HTTPServerRequest"), 10), "Got an HTTP request with no content length " "and no chunked/multipart encoding, it may be impossible to distinguish graceful EOF from abnormal connection loss"); } else diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index bfdc067f733d..cf119f37f75a 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -130,7 +130,7 @@ bool tryAddHttpOptionHeadersFromConfig(HTTPServerResponse & response, const Poco { /// If there is empty header name, it will not be processed and message about it will be in logs if (config.getString("http_options_response." + config_key + ".name", "").empty()) - LOG_WARNING(&Poco::Logger::get("processOptionsRequest"), "Empty header was found in config. It will not be processed."); + LOG_WARNING(getLogger("processOptionsRequest"), "Empty header was found in config. It will not be processed."); else response.add(config.getString("http_options_response." + config_key + ".name", ""), config.getString("http_options_response." + config_key + ".value", "")); @@ -313,7 +313,7 @@ void HTTPHandler::pushDelayedResults(Output & used_output) HTTPHandler::HTTPHandler(IServer & server_, const std::string & name, const std::optional & content_type_override_) : server(server_) - , log(&Poco::Logger::get(name)) + , log(getLogger(name)) , default_settings(server.context()->getSettingsRef()) , content_type_override(content_type_override_) { diff --git a/src/Server/HTTPHandler.h b/src/Server/HTTPHandler.h index 5eda5927538e..5ec7d2e86e9a 100644 --- a/src/Server/HTTPHandler.h +++ b/src/Server/HTTPHandler.h @@ -88,7 +88,7 @@ class HTTPHandler : public HTTPRequestHandler }; IServer & server; - Poco::Logger * log; + LoggerPtr log; /// It is the name of the server that will be sent in an http-header X-ClickHouse-Server-Display-Name. String server_display_name; diff --git a/src/Server/HTTPRequestHandlerFactoryMain.cpp b/src/Server/HTTPRequestHandlerFactoryMain.cpp index 61a2909d30fd..15c0286c6c13 100644 --- a/src/Server/HTTPRequestHandlerFactoryMain.cpp +++ b/src/Server/HTTPRequestHandlerFactoryMain.cpp @@ -7,7 +7,7 @@ namespace DB { HTTPRequestHandlerFactoryMain::HTTPRequestHandlerFactoryMain(const std::string & name_) - : log(&Poco::Logger::get(name_)), name(name_) + : log(getLogger(name_)), name(name_) { } diff --git a/src/Server/HTTPRequestHandlerFactoryMain.h b/src/Server/HTTPRequestHandlerFactoryMain.h index b0e57bd6b3b7..056b9f7ef0c1 100644 --- a/src/Server/HTTPRequestHandlerFactoryMain.h +++ b/src/Server/HTTPRequestHandlerFactoryMain.h @@ -18,7 +18,7 @@ class HTTPRequestHandlerFactoryMain : public HTTPRequestHandlerFactory std::unique_ptr createRequestHandler(const HTTPServerRequest & request) override; private: - Poco::Logger * log; + LoggerPtr log; std::string name; std::vector child_factories; diff --git a/src/Server/InterserverIOHTTPHandler.h b/src/Server/InterserverIOHTTPHandler.h index da5b286b9e5e..040896968a2c 100644 --- a/src/Server/InterserverIOHTTPHandler.h +++ b/src/Server/InterserverIOHTTPHandler.h @@ -26,7 +26,7 @@ class InterserverIOHTTPHandler : public HTTPRequestHandler public: explicit InterserverIOHTTPHandler(IServer & server_) : server(server_) - , log(&Poco::Logger::get("InterserverIOHTTPHandler")) + , log(getLogger("InterserverIOHTTPHandler")) { } @@ -39,7 +39,7 @@ class InterserverIOHTTPHandler : public HTTPRequestHandler }; IServer & server; - Poco::Logger * log; + LoggerPtr log; CurrentMetrics::Increment metric_increment{CurrentMetrics::InterserverConnection}; diff --git a/src/Server/KeeperTCPHandler.cpp b/src/Server/KeeperTCPHandler.cpp index dec11b5a7136..0b12c65e8837 100644 --- a/src/Server/KeeperTCPHandler.cpp +++ b/src/Server/KeeperTCPHandler.cpp @@ -219,7 +219,7 @@ KeeperTCPHandler::KeeperTCPHandler( Poco::Timespan send_timeout_, const Poco::Net::StreamSocket & socket_) : Poco::Net::TCPServerConnection(socket_) - , log(&Poco::Logger::get("KeeperTCPHandler")) + , log(getLogger("KeeperTCPHandler")) , keeper_dispatcher(keeper_dispatcher_) , operation_timeout( 0, diff --git a/src/Server/KeeperTCPHandler.h b/src/Server/KeeperTCPHandler.h index ffdd50b805ab..6e0142fdac5f 100644 --- a/src/Server/KeeperTCPHandler.h +++ b/src/Server/KeeperTCPHandler.h @@ -61,7 +61,7 @@ class KeeperTCPHandler : public Poco::Net::TCPServerConnection ~KeeperTCPHandler() override; private: - Poco::Logger * log; + LoggerPtr log; std::shared_ptr keeper_dispatcher; Poco::Timespan operation_timeout; Poco::Timespan min_session_timeout; diff --git a/src/Server/KeeperTCPHandlerFactory.h b/src/Server/KeeperTCPHandlerFactory.h index 36f284442c6c..239bf8b55247 100644 --- a/src/Server/KeeperTCPHandlerFactory.h +++ b/src/Server/KeeperTCPHandlerFactory.h @@ -17,7 +17,7 @@ class KeeperTCPHandlerFactory : public TCPServerConnectionFactory private: ConfigGetter config_getter; std::shared_ptr keeper_dispatcher; - Poco::Logger * log; + LoggerPtr log; Poco::Timespan receive_timeout; Poco::Timespan send_timeout; @@ -37,7 +37,7 @@ class KeeperTCPHandlerFactory : public TCPServerConnectionFactory bool secure) : config_getter(config_getter_) , keeper_dispatcher(keeper_dispatcher_) - , log(&Poco::Logger::get(std::string{"KeeperTCP"} + (secure ? "S" : "") + "HandlerFactory")) + , log(getLogger(std::string{"KeeperTCP"} + (secure ? "S" : "") + "HandlerFactory")) , receive_timeout(/* seconds = */ receive_timeout_seconds, /* microseconds = */ 0) , send_timeout(/* seconds = */ send_timeout_seconds, /* microseconds = */ 0) { diff --git a/src/Server/MySQLHandler.cpp b/src/Server/MySQLHandler.cpp index 40cc51f8aae5..1b782878243e 100644 --- a/src/Server/MySQLHandler.cpp +++ b/src/Server/MySQLHandler.cpp @@ -71,7 +71,7 @@ MySQLHandler::MySQLHandler( : Poco::Net::TCPServerConnection(socket_) , server(server_) , tcp_server(tcp_server_) - , log(&Poco::Logger::get("MySQLHandler")) + , log(getLogger("MySQLHandler")) , connection_id(connection_id_) , auth_plugin(new MySQLProtocol::Authentication::Native41()) { diff --git a/src/Server/MySQLHandler.h b/src/Server/MySQLHandler.h index 3366e8792c9e..44b52c304c0e 100644 --- a/src/Server/MySQLHandler.h +++ b/src/Server/MySQLHandler.h @@ -61,7 +61,7 @@ class MySQLHandler : public Poco::Net::TCPServerConnection IServer & server; TCPServer & tcp_server; - Poco::Logger * log; + LoggerPtr log; uint32_t connection_id = 0; uint32_t server_capabilities = 0; diff --git a/src/Server/MySQLHandlerFactory.cpp b/src/Server/MySQLHandlerFactory.cpp index deadb10f9a95..7d2ee4e835ae 100644 --- a/src/Server/MySQLHandlerFactory.cpp +++ b/src/Server/MySQLHandlerFactory.cpp @@ -23,7 +23,7 @@ namespace ErrorCodes MySQLHandlerFactory::MySQLHandlerFactory(IServer & server_) : server(server_) - , log(&Poco::Logger::get("MySQLHandlerFactory")) + , log(getLogger("MySQLHandlerFactory")) { #if USE_SSL try diff --git a/src/Server/MySQLHandlerFactory.h b/src/Server/MySQLHandlerFactory.h index fa4ce93f7655..df891261cb93 100644 --- a/src/Server/MySQLHandlerFactory.h +++ b/src/Server/MySQLHandlerFactory.h @@ -19,7 +19,7 @@ class MySQLHandlerFactory : public TCPServerConnectionFactory { private: IServer & server; - Poco::Logger * log; + LoggerPtr log; #if USE_SSL struct RSADeleter diff --git a/src/Server/PostgreSQLHandler.h b/src/Server/PostgreSQLHandler.h index 6fc128e3883e..143cdfcd7620 100644 --- a/src/Server/PostgreSQLHandler.h +++ b/src/Server/PostgreSQLHandler.h @@ -39,7 +39,7 @@ class PostgreSQLHandler : public Poco::Net::TCPServerConnection void run() final; private: - Poco::Logger * log = &Poco::Logger::get("PostgreSQLHandler"); + LoggerPtr log = getLogger("PostgreSQLHandler"); IServer & server; TCPServer & tcp_server; diff --git a/src/Server/PostgreSQLHandlerFactory.cpp b/src/Server/PostgreSQLHandlerFactory.cpp index 6f2124861e7e..588c7910a886 100644 --- a/src/Server/PostgreSQLHandlerFactory.cpp +++ b/src/Server/PostgreSQLHandlerFactory.cpp @@ -7,7 +7,7 @@ namespace DB PostgreSQLHandlerFactory::PostgreSQLHandlerFactory(IServer & server_) : server(server_) - , log(&Poco::Logger::get("PostgreSQLHandlerFactory")) + , log(getLogger("PostgreSQLHandlerFactory")) { auth_methods = { diff --git a/src/Server/PostgreSQLHandlerFactory.h b/src/Server/PostgreSQLHandlerFactory.h index 35046325386b..f70ffd8d3304 100644 --- a/src/Server/PostgreSQLHandlerFactory.h +++ b/src/Server/PostgreSQLHandlerFactory.h @@ -14,7 +14,7 @@ class PostgreSQLHandlerFactory : public TCPServerConnectionFactory { private: IServer & server; - Poco::Logger * log; + LoggerPtr log; #if USE_SSL bool ssl_enabled = true; diff --git a/src/Server/ProxyV1Handler.h b/src/Server/ProxyV1Handler.h index e56f4cd35451..d8cb8a785c9f 100644 --- a/src/Server/ProxyV1Handler.h +++ b/src/Server/ProxyV1Handler.h @@ -14,7 +14,7 @@ class ProxyV1Handler : public Poco::Net::TCPServerConnection using StreamSocket = Poco::Net::StreamSocket; public: explicit ProxyV1Handler(const StreamSocket & socket, IServer & server_, const std::string & conf_name_, TCPProtocolStackData & stack_data_) - : Poco::Net::TCPServerConnection(socket), log(&Poco::Logger::get("ProxyV1Handler")), server(server_), conf_name(conf_name_), stack_data(stack_data_) {} + : Poco::Net::TCPServerConnection(socket), log(getLogger("ProxyV1Handler")), server(server_), conf_name(conf_name_), stack_data(stack_data_) {} void run() override; @@ -22,7 +22,7 @@ class ProxyV1Handler : public Poco::Net::TCPServerConnection bool readWord(int max_len, std::string & word, bool & eol); private: - Poco::Logger * log; + LoggerPtr log; IServer & server; std::string conf_name; TCPProtocolStackData & stack_data; diff --git a/src/Server/ProxyV1HandlerFactory.h b/src/Server/ProxyV1HandlerFactory.h index 028596d745d0..0398c8c1ccff 100644 --- a/src/Server/ProxyV1HandlerFactory.h +++ b/src/Server/ProxyV1HandlerFactory.h @@ -16,7 +16,7 @@ class ProxyV1HandlerFactory : public TCPServerConnectionFactory { private: IServer & server; - Poco::Logger * log; + LoggerPtr log; std::string conf_name; class DummyTCPHandler : public Poco::Net::TCPServerConnection @@ -28,7 +28,7 @@ class ProxyV1HandlerFactory : public TCPServerConnectionFactory public: explicit ProxyV1HandlerFactory(IServer & server_, const std::string & conf_name_) - : server(server_), log(&Poco::Logger::get("ProxyV1HandlerFactory")), conf_name(conf_name_) + : server(server_), log(getLogger("ProxyV1HandlerFactory")), conf_name(conf_name_) { } diff --git a/src/Server/ReplicasStatusHandler.cpp b/src/Server/ReplicasStatusHandler.cpp index 8c0ab0c1a3bd..e9b77f7061cd 100644 --- a/src/Server/ReplicasStatusHandler.cpp +++ b/src/Server/ReplicasStatusHandler.cpp @@ -111,7 +111,7 @@ void ReplicasStatusHandler::handleRequest(HTTPServerRequest & request, HTTPServe } catch (...) { - LOG_ERROR((&Poco::Logger::get("ReplicasStatusHandler")), "Cannot send exception to client"); + LOG_ERROR((getLogger("ReplicasStatusHandler")), "Cannot send exception to client"); } } } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 6eadd9a54f0f..7da11a06fea2 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -123,7 +123,7 @@ TCPHandler::TCPHandler(IServer & server_, TCPServer & tcp_server_, const Poco::N , server(server_) , tcp_server(tcp_server_) , parse_proxy_protocol(parse_proxy_protocol_) - , log(&Poco::Logger::get("TCPHandler")) + , log(getLogger("TCPHandler")) , server_display_name(std::move(server_display_name_)) { } @@ -132,7 +132,7 @@ TCPHandler::TCPHandler(IServer & server_, TCPServer & tcp_server_, const Poco::N : Poco::Net::TCPServerConnection(socket_) , server(server_) , tcp_server(tcp_server_) - , log(&Poco::Logger::get("TCPHandler")) + , log(getLogger("TCPHandler")) , forwarded_for(stack_data.forwarded_for) , certificate(stack_data.certificate) , default_database(stack_data.default_database) diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index d900bc71ab65..3d5b2355e6e6 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -159,7 +159,7 @@ class TCPHandler : public Poco::Net::TCPServerConnection IServer & server; TCPServer & tcp_server; bool parse_proxy_protocol = false; - Poco::Logger * log; + LoggerPtr log; String forwarded_for; String certificate; diff --git a/src/Server/TCPHandlerFactory.h b/src/Server/TCPHandlerFactory.h index fde04c6e0ab4..a8f0b11e6daa 100644 --- a/src/Server/TCPHandlerFactory.h +++ b/src/Server/TCPHandlerFactory.h @@ -18,7 +18,7 @@ class TCPHandlerFactory : public TCPServerConnectionFactory private: IServer & server; bool parse_proxy_protocol = false; - Poco::Logger * log; + LoggerPtr log; std::string server_display_name; class DummyTCPHandler : public Poco::Net::TCPServerConnection @@ -35,7 +35,7 @@ class TCPHandlerFactory : public TCPServerConnectionFactory */ TCPHandlerFactory(IServer & server_, bool secure_, bool parse_proxy_protocol_) : server(server_), parse_proxy_protocol(parse_proxy_protocol_) - , log(&Poco::Logger::get(std::string("TCP") + (secure_ ? "S" : "") + "HandlerFactory")) + , log(getLogger(std::string("TCP") + (secure_ ? "S" : "") + "HandlerFactory")) { server_display_name = server.config().getString("display_name", getFQDNOrHostName()); } diff --git a/src/Server/TCPProtocolStackFactory.h b/src/Server/TCPProtocolStackFactory.h index 7373e6e1c4ea..b76bb8d72fdf 100644 --- a/src/Server/TCPProtocolStackFactory.h +++ b/src/Server/TCPProtocolStackFactory.h @@ -23,7 +23,7 @@ class TCPProtocolStackFactory : public TCPServerConnectionFactory { private: IServer & server [[maybe_unused]]; - Poco::Logger * log; + LoggerPtr log; std::string conf_name; std::vector stack; AllowedClientHosts allowed_client_hosts; @@ -38,7 +38,7 @@ class TCPProtocolStackFactory : public TCPServerConnectionFactory public: template explicit TCPProtocolStackFactory(IServer & server_, const std::string & conf_name_, T... factory) - : server(server_), log(&Poco::Logger::get("TCPProtocolStackFactory")), conf_name(conf_name_), stack({factory...}) + : server(server_), log(getLogger("TCPProtocolStackFactory")), conf_name(conf_name_), stack({factory...}) { const auto & config = server.config(); /// Fill list of allowed hosts. diff --git a/src/Server/TLSHandlerFactory.h b/src/Server/TLSHandlerFactory.h index 9e3002d29719..19602c7d25e9 100644 --- a/src/Server/TLSHandlerFactory.h +++ b/src/Server/TLSHandlerFactory.h @@ -19,7 +19,7 @@ class TLSHandlerFactory : public TCPServerConnectionFactory { private: IServer & server; - Poco::Logger * log; + LoggerPtr log; std::string conf_name; class DummyTCPHandler : public Poco::Net::TCPServerConnection @@ -31,7 +31,7 @@ class TLSHandlerFactory : public TCPServerConnectionFactory public: explicit TLSHandlerFactory(IServer & server_, const std::string & conf_name_) - : server(server_), log(&Poco::Logger::get("TLSHandlerFactory")), conf_name(conf_name_) + : server(server_), log(getLogger("TLSHandlerFactory")), conf_name(conf_name_) { } diff --git a/src/Storages/Cache/ExternalDataSourceCache.h b/src/Storages/Cache/ExternalDataSourceCache.h index 937801c47671..a5dea2f63db8 100644 --- a/src/Storages/Cache/ExternalDataSourceCache.h +++ b/src/Storages/Cache/ExternalDataSourceCache.h @@ -91,7 +91,7 @@ class ExternalDataSourceCache : private boost::noncopyable std::mutex mutex; std::unique_ptr lru_caches; - Poco::Logger * log = &Poco::Logger::get("ExternalDataSourceCache"); + LoggerPtr log = getLogger("ExternalDataSourceCache"); String calculateLocalPath(IRemoteFileMetadataPtr meta) const; diff --git a/src/Storages/Cache/RemoteCacheController.cpp b/src/Storages/Cache/RemoteCacheController.cpp index b72f5336ea47..403d0c8e43b9 100644 --- a/src/Storages/Cache/RemoteCacheController.cpp +++ b/src/Storages/Cache/RemoteCacheController.cpp @@ -20,7 +20,7 @@ namespace ErrorCodes std::shared_ptr RemoteCacheController::recover(const std::filesystem::path & local_path_) { - auto * log = &Poco::Logger::get("RemoteCacheController"); + auto log = getLogger("RemoteCacheController"); if (!std::filesystem::exists(local_path_ / "data.bin")) { diff --git a/src/Storages/Cache/RemoteCacheController.h b/src/Storages/Cache/RemoteCacheController.h index 18732acc2737..414663dd9904 100644 --- a/src/Storages/Cache/RemoteCacheController.h +++ b/src/Storages/Cache/RemoteCacheController.h @@ -117,7 +117,7 @@ class RemoteCacheController //std::shared_ptr remote_read_buffer; std::unique_ptr data_file_writer; - Poco::Logger * log = &Poco::Logger::get("RemoteCacheController"); + LoggerPtr log = getLogger("RemoteCacheController"); }; using RemoteCacheControllerPtr = std::shared_ptr; diff --git a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp index 9a9a6651bc46..7b3d059d4d61 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp @@ -53,7 +53,7 @@ namespace { template -ConnectionPoolPtrs createPoolsForAddresses(const std::string & name, PoolFactory && factory, const Cluster::ShardsInfo & shards_info, Poco::Logger * log) +ConnectionPoolPtrs createPoolsForAddresses(const std::string & name, PoolFactory && factory, const Cluster::ShardsInfo & shards_info, LoggerPtr log) { ConnectionPoolPtrs pools; @@ -136,7 +136,7 @@ DistributedAsyncInsertDirectoryQueue::DistributedAsyncInsertDirectoryQueue( , default_sleep_time(storage.getDistributedSettingsRef().monitor_sleep_time_ms.totalMilliseconds()) , sleep_time(default_sleep_time) , max_sleep_time(storage.getDistributedSettingsRef().monitor_max_sleep_time_ms.totalMilliseconds()) - , log(&Poco::Logger::get(getLoggerName())) + , log(getLogger(getLoggerName())) , monitor_blocker(monitor_blocker_) , metric_pending_files(CurrentMetrics::DistributedFilesToInsert, 0) , metric_broken_files(CurrentMetrics::BrokenDistributedFilesToInsert, 0) diff --git a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.h b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.h index de8bb8138242..6039f65c2281 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.h +++ b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.h @@ -144,7 +144,7 @@ class DistributedAsyncInsertDirectoryQueue const std::chrono::milliseconds max_sleep_time; std::chrono::time_point last_decrease_time {std::chrono::system_clock::now()}; std::mutex mutex; - Poco::Logger * log; + LoggerPtr log; ActionBlocker & monitor_blocker; BackgroundSchedulePoolTaskHolder task_handle; diff --git a/src/Storages/Distributed/DistributedAsyncInsertHeader.cpp b/src/Storages/Distributed/DistributedAsyncInsertHeader.cpp index 018c1d863bba..166d6ab75edc 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertHeader.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertHeader.cpp @@ -18,7 +18,7 @@ namespace ErrorCodes extern const int CHECKSUM_DOESNT_MATCH; } -DistributedAsyncInsertHeader DistributedAsyncInsertHeader::read(ReadBufferFromFile & in, Poco::Logger * log) +DistributedAsyncInsertHeader DistributedAsyncInsertHeader::read(ReadBufferFromFile & in, LoggerPtr log) { DistributedAsyncInsertHeader distributed_header; diff --git a/src/Storages/Distributed/DistributedAsyncInsertHeader.h b/src/Storages/Distributed/DistributedAsyncInsertHeader.h index a7330fa5ef1b..fb4b46964637 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertHeader.h +++ b/src/Storages/Distributed/DistributedAsyncInsertHeader.h @@ -38,7 +38,7 @@ struct DistributedAsyncInsertHeader std::string block_header_string; Block block_header; - static DistributedAsyncInsertHeader read(ReadBufferFromFile & in, Poco::Logger * log); + static DistributedAsyncInsertHeader read(ReadBufferFromFile & in, LoggerPtr log); OpenTelemetry::TracingContextHolderPtr createTracingContextHolder(const char * function, std::shared_ptr open_telemetry_span_log) const; }; diff --git a/src/Storages/Distributed/DistributedAsyncInsertHelpers.cpp b/src/Storages/Distributed/DistributedAsyncInsertHelpers.cpp index 98073ba1e089..a9bdef31711d 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertHelpers.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertHelpers.cpp @@ -72,7 +72,7 @@ void writeRemoteConvert( RemoteInserter & remote, bool compression_expected, ReadBufferFromFile & in, - Poco::Logger * log) + LoggerPtr log) { if (!remote.getHeader()) { diff --git a/src/Storages/Distributed/DistributedAsyncInsertHelpers.h b/src/Storages/Distributed/DistributedAsyncInsertHelpers.h index 9543450418ca..202d9ff6fff8 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertHelpers.h +++ b/src/Storages/Distributed/DistributedAsyncInsertHelpers.h @@ -1,9 +1,7 @@ #pragma once -namespace Poco -{ -class Logger; -} +#include + namespace DB { @@ -30,6 +28,6 @@ void writeRemoteConvert( RemoteInserter & remote, bool compression_expected, ReadBufferFromFile & in, - Poco::Logger * log); + LoggerPtr log); } diff --git a/src/Storages/Distributed/DistributedAsyncInsertSource.cpp b/src/Storages/Distributed/DistributedAsyncInsertSource.cpp index 7992636ac112..33e53da2857f 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertSource.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertSource.cpp @@ -10,7 +10,7 @@ namespace DB struct DistributedAsyncInsertSource::Data { - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; ReadBufferFromFile in; CompressedReadBuffer decompressing_in; @@ -19,7 +19,7 @@ struct DistributedAsyncInsertSource::Data Block first_block; explicit Data(const String & file_name) - : log(&Poco::Logger::get("DistributedAsyncInsertSource")) + : log(getLogger("DistributedAsyncInsertSource")) , in(file_name) , decompressing_in(in) , block_in(decompressing_in, DistributedAsyncInsertHeader::read(in, log).revision) diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index 720a951299a1..178b9da7fb89 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -63,7 +63,7 @@ namespace ErrorCodes extern const int ABORTED; } -static Block adoptBlock(const Block & header, const Block & block, Poco::Logger * log) +static Block adoptBlock(const Block & header, const Block & block, LoggerPtr log) { if (blocksHaveEqualStructure(header, block)) return block; @@ -85,7 +85,7 @@ static Block adoptBlock(const Block & header, const Block & block, Poco::Logger } -static void writeBlockConvert(PushingPipelineExecutor & executor, const Block & block, size_t repeats, Poco::Logger * log) +static void writeBlockConvert(PushingPipelineExecutor & executor, const Block & block, size_t repeats, LoggerPtr log) { Block adopted_block = adoptBlock(executor.getHeader(), block, log); for (size_t i = 0; i < repeats; ++i) @@ -127,7 +127,7 @@ DistributedSink::DistributedSink( , insert_timeout(insert_timeout_) , main_table(main_table_) , columns_to_send(columns_to_send_.begin(), columns_to_send_.end()) - , log(&Poco::Logger::get("DistributedSink")) + , log(getLogger("DistributedSink")) { const auto & settings = context->getSettingsRef(); if (settings.max_distributed_depth && context->getClientInfo().distributed_depth >= settings.max_distributed_depth) diff --git a/src/Storages/Distributed/DistributedSink.h b/src/Storages/Distributed/DistributedSink.h index 1bb4419e1a56..654c1db354f3 100644 --- a/src/Storages/Distributed/DistributedSink.h +++ b/src/Storages/Distributed/DistributedSink.h @@ -152,7 +152,7 @@ class DistributedSink : public SinkToStorage std::atomic finished_jobs_count{0}; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/FileLog/FileLogDirectoryWatcher.cpp b/src/Storages/FileLog/FileLogDirectoryWatcher.cpp index 3c5342a1e832..57121b075071 100644 --- a/src/Storages/FileLog/FileLogDirectoryWatcher.cpp +++ b/src/Storages/FileLog/FileLogDirectoryWatcher.cpp @@ -5,7 +5,7 @@ namespace DB FileLogDirectoryWatcher::FileLogDirectoryWatcher(const std::string & path_, StorageFileLog & storage_, ContextPtr context_) : path(path_) , storage(storage_) - , log(&Poco::Logger::get("FileLogDirectoryWatcher(" + path + ")")) + , log(getLogger("FileLogDirectoryWatcher(" + path + ")")) , dw(std::make_unique(*this, path, context_)) { } diff --git a/src/Storages/FileLog/FileLogDirectoryWatcher.h b/src/Storages/FileLog/FileLogDirectoryWatcher.h index 4368f284c9dc..77e5d62616f3 100644 --- a/src/Storages/FileLog/FileLogDirectoryWatcher.h +++ b/src/Storages/FileLog/FileLogDirectoryWatcher.h @@ -66,7 +66,7 @@ class FileLogDirectoryWatcher /// accessed in thread created by dw. Events events; - Poco::Logger * log; + LoggerPtr log; std::mutex mutex; diff --git a/src/Storages/FileLog/ReadBufferFromFileLog.cpp b/src/Storages/FileLog/ReadBufferFromFileLog.cpp index 23db01eaefc7..d7b7aae65397 100644 --- a/src/Storages/FileLog/ReadBufferFromFileLog.cpp +++ b/src/Storages/FileLog/ReadBufferFromFileLog.cpp @@ -22,7 +22,7 @@ ReadBufferFromFileLog::ReadBufferFromFileLog( size_t stream_number_, size_t max_streams_number_) : ReadBuffer(nullptr, 0) - , log(&Poco::Logger::get("ReadBufferFromFileLog " + toString(stream_number_))) + , log(getLogger("ReadBufferFromFileLog " + toString(stream_number_))) , storage(storage_) , batch_size(max_batch_size) , poll_timeout(poll_timeout_) diff --git a/src/Storages/FileLog/ReadBufferFromFileLog.h b/src/Storages/FileLog/ReadBufferFromFileLog.h index 5991fe29b70e..a6cfe65bd4bc 100644 --- a/src/Storages/FileLog/ReadBufferFromFileLog.h +++ b/src/Storages/FileLog/ReadBufferFromFileLog.h @@ -43,7 +43,7 @@ class ReadBufferFromFileLog : public ReadBuffer BufferStatus buffer_status = BufferStatus::INIT; - Poco::Logger * log; + LoggerPtr log; StorageFileLog & storage; diff --git a/src/Storages/FileLog/StorageFileLog.cpp b/src/Storages/FileLog/StorageFileLog.cpp index dae6f6a7ca99..6b1a72f64189 100644 --- a/src/Storages/FileLog/StorageFileLog.cpp +++ b/src/Storages/FileLog/StorageFileLog.cpp @@ -65,7 +65,7 @@ StorageFileLog::StorageFileLog( , path(path_) , metadata_base_path(std::filesystem::path(metadata_base_path_) / "metadata") , format_name(format_name_) - , log(&Poco::Logger::get("StorageFileLog (" + table_id_.table_name + ")")) + , log(getLogger("StorageFileLog (" + table_id_.table_name + ")")) , disk(getContext()->getStoragePolicy("default")->getDisks().at(0)) , milliseconds_to_wait(filelog_settings->poll_directory_watch_events_backoff_init.totalMilliseconds()) { diff --git a/src/Storages/FileLog/StorageFileLog.h b/src/Storages/FileLog/StorageFileLog.h index c0c5ac904b53..804d13cb1734 100644 --- a/src/Storages/FileLog/StorageFileLog.h +++ b/src/Storages/FileLog/StorageFileLog.h @@ -148,7 +148,7 @@ class StorageFileLog final : public IStorage, WithContext FileInfos file_infos; const String format_name; - Poco::Logger * log; + LoggerPtr log; DiskPtr disk; diff --git a/src/Storages/Freeze.cpp b/src/Storages/Freeze.cpp index d2e19551c923..f8e9fccaade5 100644 --- a/src/Storages/Freeze.cpp +++ b/src/Storages/Freeze.cpp @@ -76,7 +76,7 @@ bool FreezeMetaData::load(DiskPtr data_disk, const String & path) readIntText(version, buffer); if (version < 1 || version > 2) { - LOG_ERROR(&Poco::Logger::get("FreezeMetaData"), "Unknown frozen metadata version: {}", version); + LOG_ERROR(getLogger("FreezeMetaData"), "Unknown frozen metadata version: {}", version); return false; } DB::assertChar('\n', buffer); diff --git a/src/Storages/Freeze.h b/src/Storages/Freeze.h index a64be7465dd2..5775653aaeaa 100644 --- a/src/Storages/Freeze.h +++ b/src/Storages/Freeze.h @@ -38,7 +38,7 @@ class Unfreezer private: ContextPtr local_context; zkutil::ZooKeeperPtr zookeeper; - Poco::Logger * log = &Poco::Logger::get("Unfreezer"); + LoggerPtr log = getLogger("Unfreezer"); static constexpr std::string_view backup_directory_prefix = "shadow"; static bool removeFreezedPart(DiskPtr disk, const String & path, const String & part_name, ContextPtr local_context, zkutil::ZooKeeperPtr zookeeper); }; diff --git a/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.cpp b/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.cpp index c683d59579b8..0c1633d6e545 100644 --- a/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.cpp +++ b/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.cpp @@ -44,7 +44,7 @@ AsynchronousReadBufferFromHDFS::AsynchronousReadBufferFromHDFS( , prefetch_buffer(settings_.remote_fs_buffer_size) , read_until_position(impl->getFileSize()) , use_prefetch(settings_.remote_fs_prefetch) - , log(&Poco::Logger::get("AsynchronousReadBufferFromHDFS")) + , log(getLogger("AsynchronousReadBufferFromHDFS")) { ProfileEvents::increment(ProfileEvents::RemoteFSBuffers); } diff --git a/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.h b/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.h index 07d32194a932..2c47b6b58561 100644 --- a/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.h +++ b/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.h @@ -62,7 +62,7 @@ class AsynchronousReadBufferFromHDFS : public BufferWithOwnMemory read_until_position; bool use_prefetch; - Poco::Logger * log; + LoggerPtr log; /// Metrics to profile prefetch Stopwatch interval_watch; diff --git a/src/Storages/HDFS/HDFSCommon.cpp b/src/Storages/HDFS/HDFSCommon.cpp index 932e80831fed..a0ab7ba2f7f9 100644 --- a/src/Storages/HDFS/HDFSCommon.cpp +++ b/src/Storages/HDFS/HDFSCommon.cpp @@ -55,7 +55,7 @@ void HDFSBuilderWrapper::loadFromConfig(const Poco::Util::AbstractConfiguration need_kinit = true; hadoop_kerberos_keytab = config.getString(key_path); #else // USE_KRB5 - LOG_WARNING(&Poco::Logger::get("HDFSClient"), "hadoop_kerberos_keytab parameter is ignored because ClickHouse was built without support of krb5 library."); + LOG_WARNING(getLogger("HDFSClient"), "hadoop_kerberos_keytab parameter is ignored because ClickHouse was built without support of krb5 library."); #endif // USE_KRB5 continue; } @@ -66,7 +66,7 @@ void HDFSBuilderWrapper::loadFromConfig(const Poco::Util::AbstractConfiguration hadoop_kerberos_principal = config.getString(key_path); hdfsBuilderSetPrincipal(hdfs_builder, hadoop_kerberos_principal.c_str()); #else // USE_KRB5 - LOG_WARNING(&Poco::Logger::get("HDFSClient"), "hadoop_kerberos_principal parameter is ignored because ClickHouse was built without support of krb5 library."); + LOG_WARNING(getLogger("HDFSClient"), "hadoop_kerberos_principal parameter is ignored because ClickHouse was built without support of krb5 library."); #endif // USE_KRB5 continue; } @@ -81,7 +81,7 @@ void HDFSBuilderWrapper::loadFromConfig(const Poco::Util::AbstractConfiguration hadoop_security_kerberos_ticket_cache_path = config.getString(key_path); // standard param - pass further #else // USE_KRB5 - LOG_WARNING(&Poco::Logger::get("HDFSClient"), "hadoop.security.kerberos.ticket.cache.path parameter is ignored because ClickHouse was built without support of krb5 library."); + LOG_WARNING(getLogger("HDFSClient"), "hadoop.security.kerberos.ticket.cache.path parameter is ignored because ClickHouse was built without support of krb5 library."); #endif // USE_KRB5 } @@ -95,7 +95,7 @@ void HDFSBuilderWrapper::loadFromConfig(const Poco::Util::AbstractConfiguration #if USE_KRB5 void HDFSBuilderWrapper::runKinit() { - LOG_DEBUG(&Poco::Logger::get("HDFSClient"), "Running KerberosInit"); + LOG_DEBUG(getLogger("HDFSClient"), "Running KerberosInit"); try { kerberosInit(hadoop_kerberos_keytab,hadoop_kerberos_principal,hadoop_security_kerberos_ticket_cache_path); @@ -104,7 +104,7 @@ void HDFSBuilderWrapper::runKinit() { throw Exception(ErrorCodes::KERBEROS_ERROR, "KerberosInit failure: {}", getExceptionMessage(e, false)); } - LOG_DEBUG(&Poco::Logger::get("HDFSClient"), "Finished KerberosInit"); + LOG_DEBUG(getLogger("HDFSClient"), "Finished KerberosInit"); } #endif // USE_KRB5 diff --git a/src/Storages/HDFS/StorageHDFS.h b/src/Storages/HDFS/StorageHDFS.h index e22e01b6242d..49f366f30520 100644 --- a/src/Storages/HDFS/StorageHDFS.h +++ b/src/Storages/HDFS/StorageHDFS.h @@ -97,7 +97,7 @@ class StorageHDFS final : public IStorage, WithContext bool is_path_with_globs; NamesAndTypesList virtual_columns; - Poco::Logger * log = &Poco::Logger::get("StorageHDFS"); + LoggerPtr log = getLogger("StorageHDFS"); }; class PullingPipelineExecutor; diff --git a/src/Storages/Hive/HiveCommon.cpp b/src/Storages/Hive/HiveCommon.cpp index 609adcf65c94..b58302f262ec 100644 --- a/src/Storages/Hive/HiveCommon.cpp +++ b/src/Storages/Hive/HiveCommon.cpp @@ -25,7 +25,7 @@ static const int hive_metastore_client_recv_timeout_ms = 10000; static const int hive_metastore_client_send_timeout_ms = 10000; ThriftHiveMetastoreClientPool::ThriftHiveMetastoreClientPool(ThriftHiveMetastoreClientBuilder builder_) - : PoolBase(max_hive_metastore_client_connections, &Poco::Logger::get("ThriftHiveMetastoreClientPool")), builder(builder_) + : PoolBase(max_hive_metastore_client_connections, getLogger("ThriftHiveMetastoreClientPool")), builder(builder_) { } diff --git a/src/Storages/Hive/HiveCommon.h b/src/Storages/Hive/HiveCommon.h index e2c19fb1684e..0f9d3364ffd4 100644 --- a/src/Storages/Hive/HiveCommon.h +++ b/src/Storages/Hive/HiveCommon.h @@ -115,7 +115,7 @@ class HiveMetastoreClient const bool empty_partition_keys; const HiveFilesCachePtr hive_files_cache; - Poco::Logger * log = &Poco::Logger::get("HiveMetastoreClient"); + LoggerPtr log = getLogger("HiveMetastoreClient"); }; @@ -138,7 +138,7 @@ class HiveMetastoreClient CacheBase table_metadata_cache; ThriftHiveMetastoreClientPool client_pool; - Poco::Logger * log = &Poco::Logger::get("HiveMetastoreClient"); + LoggerPtr log = getLogger("HiveMetastoreClient"); }; using HiveMetastoreClientPtr = std::shared_ptr; diff --git a/src/Storages/Hive/StorageHive.cpp b/src/Storages/Hive/StorageHive.cpp index 71830e8bc86c..dfe354c4d635 100644 --- a/src/Storages/Hive/StorageHive.cpp +++ b/src/Storages/Hive/StorageHive.cpp @@ -399,7 +399,7 @@ class StorageHiveSource : public ISource, WithContext bool generate_chunk_from_metadata{false}; UInt64 current_file_remained_rows = 0; - Poco::Logger * log = &Poco::Logger::get("StorageHive"); + LoggerPtr log = getLogger("StorageHive"); }; diff --git a/src/Storages/Hive/StorageHive.h b/src/Storages/Hive/StorageHive.h index 4600d929c6ab..55856d3d3141 100644 --- a/src/Storages/Hive/StorageHive.h +++ b/src/Storages/Hive/StorageHive.h @@ -156,7 +156,7 @@ class StorageHive final : public IStorage, WithContext std::shared_ptr storage_settings; - Poco::Logger * log = &Poco::Logger::get("StorageHive"); + LoggerPtr log = getLogger("StorageHive"); }; } diff --git a/src/Storages/IMessageProducer.cpp b/src/Storages/IMessageProducer.cpp index cf3146960417..c723ec77b700 100644 --- a/src/Storages/IMessageProducer.cpp +++ b/src/Storages/IMessageProducer.cpp @@ -4,7 +4,7 @@ namespace DB { -IMessageProducer::IMessageProducer(Poco::Logger * log_) : log(log_) +IMessageProducer::IMessageProducer(LoggerPtr log_) : log(log_) { } diff --git a/src/Storages/IMessageProducer.h b/src/Storages/IMessageProducer.h index 12580d5f94a3..c769c3251916 100644 --- a/src/Storages/IMessageProducer.h +++ b/src/Storages/IMessageProducer.h @@ -16,7 +16,7 @@ namespace DB class IMessageProducer { public: - explicit IMessageProducer(Poco::Logger * log_); + explicit IMessageProducer(LoggerPtr log_); /// Do some preparations. virtual void start(const ContextPtr & context) = 0; @@ -30,14 +30,14 @@ class IMessageProducer virtual ~IMessageProducer() = default; protected: - Poco::Logger * log; + LoggerPtr log; }; /// Implements interface for concurrent message producing. class AsynchronousMessageProducer : public IMessageProducer { public: - explicit AsynchronousMessageProducer(Poco::Logger * log_) : IMessageProducer(log_) {} + explicit AsynchronousMessageProducer(LoggerPtr log_) : IMessageProducer(log_) {} /// Create and schedule task in BackgroundSchedulePool that will produce messages. void start(const ContextPtr & context) override; diff --git a/src/Storages/IStorageDataLake.h b/src/Storages/IStorageDataLake.h index 9e322377fbd6..7fb749eba191 100644 --- a/src/Storages/IStorageDataLake.h +++ b/src/Storages/IStorageDataLake.h @@ -39,7 +39,7 @@ class IStorageDataLake : public Storage std::optional format_settings_) : Storage( getAdjustedConfiguration( - context_, Storage::updateConfiguration(context_, configuration_), &Poco::Logger::get("Storage" + String(name))), + context_, Storage::updateConfiguration(context_, configuration_), getLogger("Storage" + String(name))), table_id_, columns_, constraints_, @@ -57,13 +57,13 @@ class IStorageDataLake : public Storage { Storage::updateConfiguration(ctx, configuration); - auto new_configuration = getAdjustedConfiguration(ctx, configuration, &Poco::Logger::get("Storage" + String(name))); + auto new_configuration = getAdjustedConfiguration(ctx, configuration, getLogger("Storage" + String(name))); return Storage::getTableStructureFromData(new_configuration, format_settings, ctx, /*object_infos*/ nullptr); } static Configuration - getAdjustedConfiguration(const ContextPtr & context, const Configuration & configuration, Poco::Logger * log) + getAdjustedConfiguration(const ContextPtr & context, const Configuration & configuration, LoggerPtr log) { MetadataParser parser{configuration, context}; diff --git a/src/Storages/Kafka/KafkaConsumer.cpp b/src/Storages/Kafka/KafkaConsumer.cpp index 4a14e1292fa2..fb8cf11c83b3 100644 --- a/src/Storages/Kafka/KafkaConsumer.cpp +++ b/src/Storages/Kafka/KafkaConsumer.cpp @@ -47,7 +47,7 @@ const auto DRAIN_TIMEOUT_MS = 5000ms; KafkaConsumer::KafkaConsumer( ConsumerPtr consumer_, - Poco::Logger * log_, + LoggerPtr log_, size_t max_batch_size, size_t poll_timeout_, bool intermediate_commit_, diff --git a/src/Storages/Kafka/KafkaConsumer.h b/src/Storages/Kafka/KafkaConsumer.h index feda51a682e7..f5e6f25c5b06 100644 --- a/src/Storages/Kafka/KafkaConsumer.h +++ b/src/Storages/Kafka/KafkaConsumer.h @@ -27,7 +27,7 @@ class KafkaConsumer public: KafkaConsumer( ConsumerPtr consumer_, - Poco::Logger * log_, + LoggerPtr log_, size_t max_batch_size, size_t poll_timeout_, bool intermediate_commit_, @@ -85,7 +85,7 @@ class KafkaConsumer }; ConsumerPtr consumer; - Poco::Logger * log; + LoggerPtr log; const size_t batch_size = 1; const size_t poll_timeout = 0; size_t offsets_stored = 0; diff --git a/src/Storages/Kafka/KafkaProducer.cpp b/src/Storages/Kafka/KafkaProducer.cpp index 5ef5d080ddfc..e46055055795 100644 --- a/src/Storages/Kafka/KafkaProducer.cpp +++ b/src/Storages/Kafka/KafkaProducer.cpp @@ -18,7 +18,7 @@ namespace DB KafkaProducer::KafkaProducer( ProducerPtr producer_, const std::string & topic_, std::chrono::milliseconds poll_timeout, std::atomic & shutdown_called_, const Block & header) - : IMessageProducer(&Poco::Logger::get("KafkaProducer")) + : IMessageProducer(getLogger("KafkaProducer")) , producer(producer_) , topic(topic_) , timeout(poll_timeout) diff --git a/src/Storages/Kafka/KafkaSource.cpp b/src/Storages/Kafka/KafkaSource.cpp index 3c5253f7640a..bc6c92e78f61 100644 --- a/src/Storages/Kafka/KafkaSource.cpp +++ b/src/Storages/Kafka/KafkaSource.cpp @@ -33,7 +33,7 @@ KafkaSource::KafkaSource( const StorageSnapshotPtr & storage_snapshot_, const ContextPtr & context_, const Names & columns, - Poco::Logger * log_, + LoggerPtr log_, size_t max_block_size_, bool commit_in_suffix_) : ISource(storage_snapshot_->getSampleBlockForColumns(columns)) diff --git a/src/Storages/Kafka/KafkaSource.h b/src/Storages/Kafka/KafkaSource.h index 3d2edd4ebd18..125a1661f2fb 100644 --- a/src/Storages/Kafka/KafkaSource.h +++ b/src/Storages/Kafka/KafkaSource.h @@ -22,7 +22,7 @@ class KafkaSource : public ISource const StorageSnapshotPtr & storage_snapshot_, const ContextPtr & context_, const Names & columns, - Poco::Logger * log_, + LoggerPtr log_, size_t max_block_size_, bool commit_in_suffix = false); ~KafkaSource() override; @@ -41,7 +41,7 @@ class KafkaSource : public ISource StorageSnapshotPtr storage_snapshot; ContextPtr context; Names column_names; - Poco::Logger * log; + LoggerPtr log; UInt64 max_block_size; KafkaConsumerPtr consumer; diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 3381561eb1b6..8fda35876416 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -259,7 +259,7 @@ StorageKafka::StorageKafka( , max_rows_per_message(kafka_settings->kafka_max_rows_per_message.value) , schema_name(getContext()->getMacros()->expand(kafka_settings->kafka_schema.value)) , num_consumers(kafka_settings->kafka_num_consumers.value) - , log(&Poco::Logger::get("StorageKafka (" + table_id_.table_name + ")")) + , log(getLogger("StorageKafka (" + table_id_.table_name + ")")) , semaphore(0, static_cast(num_consumers)) , intermediate_commit(kafka_settings->kafka_commit_every_batch.value) , settings_adjustments(createSettingsAdjustments()) diff --git a/src/Storages/Kafka/StorageKafka.h b/src/Storages/Kafka/StorageKafka.h index 3559129cf744..e55ae2ed70c1 100644 --- a/src/Storages/Kafka/StorageKafka.h +++ b/src/Storages/Kafka/StorageKafka.h @@ -86,7 +86,7 @@ class StorageKafka final : public IStorage, WithContext const size_t max_rows_per_message; const String schema_name; const size_t num_consumers; /// total number of consumers - Poco::Logger * log; + LoggerPtr log; Poco::Semaphore semaphore; const bool intermediate_commit; const SettingsChanges settings_adjustments; diff --git a/src/Storages/LiveView/StorageLiveView.cpp b/src/Storages/LiveView/StorageLiveView.cpp index e0c5677f4305..79fb17f366eb 100644 --- a/src/Storages/LiveView/StorageLiveView.cpp +++ b/src/Storages/LiveView/StorageLiveView.cpp @@ -209,7 +209,7 @@ StorageLiveView::StorageLiveView( live_view_context = Context::createCopy(getContext()); live_view_context->makeQueryContext(); - log = &Poco::Logger::get("StorageLiveView (" + table_id_.database_name + "." + table_id_.table_name + ")"); + log = getLogger("StorageLiveView (" + table_id_.database_name + "." + table_id_.table_name + ")"); StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); diff --git a/src/Storages/LiveView/StorageLiveView.h b/src/Storages/LiveView/StorageLiveView.h index 002cbf96ebe6..3f2590527f7a 100644 --- a/src/Storages/LiveView/StorageLiveView.h +++ b/src/Storages/LiveView/StorageLiveView.h @@ -184,7 +184,7 @@ using MilliSeconds = std::chrono::milliseconds; ContextMutablePtr live_view_context; - Poco::Logger * log; + LoggerPtr log; bool is_periodically_refreshed = false; Seconds periodic_live_view_refresh; diff --git a/src/Storages/MeiliSearch/StorageMeiliSearch.cpp b/src/Storages/MeiliSearch/StorageMeiliSearch.cpp index fe8cc74f5773..c1a619f88662 100644 --- a/src/Storages/MeiliSearch/StorageMeiliSearch.cpp +++ b/src/Storages/MeiliSearch/StorageMeiliSearch.cpp @@ -34,7 +34,7 @@ StorageMeiliSearch::StorageMeiliSearch( const ColumnsDescription & columns_, const ConstraintsDescription & constraints_, const String & comment) - : IStorage(table_id), config{config_}, log(&Poco::Logger::get("StorageMeiliSearch (" + table_id.table_name + ")")) + : IStorage(table_id), config{config_}, log(getLogger("StorageMeiliSearch (" + table_id.table_name + ")")) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); diff --git a/src/Storages/MeiliSearch/StorageMeiliSearch.h b/src/Storages/MeiliSearch/StorageMeiliSearch.h index 30ff2f9b9faa..4483f5fe3f51 100644 --- a/src/Storages/MeiliSearch/StorageMeiliSearch.h +++ b/src/Storages/MeiliSearch/StorageMeiliSearch.h @@ -33,7 +33,7 @@ class StorageMeiliSearch final : public IStorage private: MeiliSearchConfiguration config; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/MergeTree/AsyncBlockIDsCache.cpp b/src/Storages/MergeTree/AsyncBlockIDsCache.cpp index 7a8a4cd43470..6e0e590fb6e9 100644 --- a/src/Storages/MergeTree/AsyncBlockIDsCache.cpp +++ b/src/Storages/MergeTree/AsyncBlockIDsCache.cpp @@ -78,7 +78,7 @@ AsyncBlockIDsCache::AsyncBlockIDsCache(StorageReplicatedMergeTree & storage_) update_min_interval(storage.getSettings()->async_block_ids_cache_min_update_interval_ms), path(storage.zookeeper_path + "/async_blocks"), log_name(storage.getStorageID().getFullTableName() + " (AsyncBlockIDsCache)"), - log(&Poco::Logger::get(log_name)) + log(getLogger(log_name)) { task = storage.getContext()->getSchedulePool().createTask(log_name, [this]{ update(); }); } diff --git a/src/Storages/MergeTree/AsyncBlockIDsCache.h b/src/Storages/MergeTree/AsyncBlockIDsCache.h index a661d00f8a61..f290be847252 100644 --- a/src/Storages/MergeTree/AsyncBlockIDsCache.h +++ b/src/Storages/MergeTree/AsyncBlockIDsCache.h @@ -46,7 +46,7 @@ class AsyncBlockIDsCache BackgroundSchedulePool::TaskHolder task; const String log_name; - Poco::Logger * log; + LoggerPtr log; }; using AsyncBlockIDsCachePtr = std::shared_ptr; diff --git a/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp b/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp index 175df9b6e28d..9148d9c97e5e 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp +++ b/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp @@ -52,7 +52,7 @@ std::string DataPartStorageOnDiskBase::getRelativePath() const return fs::path(root_path) / part_dir / ""; } -std::optional DataPartStorageOnDiskBase::getRelativePathForPrefix(Poco::Logger * log, const String & prefix, bool detached, bool broken) const +std::optional DataPartStorageOnDiskBase::getRelativePathForPrefix(LoggerPtr log, const String & prefix, bool detached, bool broken) const { assert(!broken || detached); String res; @@ -420,7 +420,7 @@ MutableDataPartStoragePtr DataPartStorageOnDiskBase::clonePart( const std::string & to, const std::string & dir_path, const DiskPtr & disk, - Poco::Logger * log) const + LoggerPtr log) const { String path_to_clone = fs::path(to) / dir_path / ""; @@ -441,7 +441,7 @@ MutableDataPartStoragePtr DataPartStorageOnDiskBase::clonePart( void DataPartStorageOnDiskBase::rename( std::string new_root_path, std::string new_part_dir, - Poco::Logger * log, + LoggerPtr log, bool remove_new_dir_if_exists, bool fsync_part_dir) { @@ -498,7 +498,7 @@ void DataPartStorageOnDiskBase::remove( const MergeTreeDataPartChecksums & checksums, std::list projections, bool is_temp, - Poco::Logger * log) + LoggerPtr log) { /// NOTE We rename part to delete_tmp_ instead of delete_tmp_ to avoid race condition /// when we try to remove two parts with the same name, but different relative paths, @@ -649,7 +649,7 @@ void DataPartStorageOnDiskBase::clearDirectory( const CanRemoveDescription & can_remove_description, const MergeTreeDataPartChecksums & checksums, bool is_temp, - Poco::Logger * log) + LoggerPtr log) { auto disk = volume->getDisk(); auto [can_remove_shared_data, names_not_to_remove] = can_remove_description; diff --git a/src/Storages/MergeTree/DataPartStorageOnDiskBase.h b/src/Storages/MergeTree/DataPartStorageOnDiskBase.h index 7c408dcf381d..a408f7c79002 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDiskBase.h +++ b/src/Storages/MergeTree/DataPartStorageOnDiskBase.h @@ -25,7 +25,7 @@ class DataPartStorageOnDiskBase : public IDataPartStorage UInt64 calculateTotalSizeOnDisk() const override; /// Returns path to place detached part in or nullopt if we don't need to detach part (if it already exists and has the same content) - std::optional getRelativePathForPrefix(Poco::Logger * log, const String & prefix, bool detached, bool broken) const override; + std::optional getRelativePathForPrefix(LoggerPtr log, const String & prefix, bool detached, bool broken) const override; /// Returns true if detached part already exists and has the same content (compares checksums.txt and the list of files) bool looksLikeBrokenDetachedPartHasTheSameContent(const String & detached_part_path, std::optional & original_checksums_content, @@ -68,12 +68,12 @@ class DataPartStorageOnDiskBase : public IDataPartStorage const std::string & to, const std::string & dir_path, const DiskPtr & disk, - Poco::Logger * log) const override; + LoggerPtr log) const override; void rename( std::string new_root_path, std::string new_part_dir, - Poco::Logger * log, + LoggerPtr log, bool remove_new_dir_if_exists, bool fsync_part_dir) override; @@ -82,7 +82,7 @@ class DataPartStorageOnDiskBase : public IDataPartStorage const MergeTreeDataPartChecksums & checksums, std::list projections, bool is_temp, - Poco::Logger * log) override; + LoggerPtr log) override; void changeRootPath(const std::string & from_root, const std::string & to_root) override; void createDirectories() override; @@ -122,7 +122,7 @@ class DataPartStorageOnDiskBase : public IDataPartStorage const CanRemoveDescription & can_remove_description, const MergeTreeDataPartChecksums & checksums, bool is_temp, - Poco::Logger * log); + LoggerPtr log); /// For names of expected data part files returns the actual names /// of files in filesystem to which data of these files is written. diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index c6efe9c95894..5842beefea8b 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -100,7 +100,7 @@ struct ReplicatedFetchReadCallback Service::Service(StorageReplicatedMergeTree & data_) : data(data_) - , log(&Poco::Logger::get(data.getStorageID().getNameForLogs() + " (Replicated PartsService)")) + , log(getLogger(data.getStorageID().getNameForLogs() + " (Replicated PartsService)")) {} std::string Service::getId(const std::string & node_id) const @@ -361,7 +361,7 @@ MergeTreeData::DataPartPtr Service::findPart(const String & name) Fetcher::Fetcher(StorageReplicatedMergeTree & data_) : data(data_) - , log(&Poco::Logger::get(data.getStorageID().getNameForLogs() + " (Fetcher)")) + , log(getLogger(data.getStorageID().getNameForLogs() + " (Fetcher)")) {} MergeTreeData::MutableDataPartPtr Fetcher::fetchSelectedPart( diff --git a/src/Storages/MergeTree/DataPartsExchange.h b/src/Storages/MergeTree/DataPartsExchange.h index 20c15039a2db..75d60204d31a 100644 --- a/src/Storages/MergeTree/DataPartsExchange.h +++ b/src/Storages/MergeTree/DataPartsExchange.h @@ -55,7 +55,7 @@ class Service final : public InterserverIOEndpoint /// StorageReplicatedMergeTree::shutdown() waits for all parts exchange handlers to finish, /// so Service will never access dangling reference to storage StorageReplicatedMergeTree & data; - Poco::Logger * log; + LoggerPtr log; }; /** Client for getting the parts from the table *MergeTree. @@ -136,7 +136,7 @@ class Fetcher final : private boost::noncopyable ThrottlerPtr throttler); StorageReplicatedMergeTree & data; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp index 996d2bc46a5c..a63218fc028d 100644 --- a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp +++ b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp @@ -64,7 +64,7 @@ std::optional createEphemeralLockInZooKeeper( { const String & failed_op_path = deduplication_path[failed_idx / 2]; LOG_DEBUG( - &Poco::Logger::get("createEphemeralLockInZooKeeper"), + getLogger("createEphemeralLockInZooKeeper"), "Deduplication path already exists: deduplication_path={}", failed_op_path); return EphemeralLockInZooKeeper{"", nullptr, "", failed_op_path}; @@ -73,7 +73,7 @@ std::optional createEphemeralLockInZooKeeper( else if (responses[0]->error == Coordination::Error::ZNODEEXISTS) { LOG_DEBUG( - &Poco::Logger::get("createEphemeralLockInZooKeeper"), + getLogger("createEphemeralLockInZooKeeper"), "Deduplication path already exists: deduplication_path={}", deduplication_path); return {}; @@ -119,7 +119,7 @@ EphemeralLockInZooKeeper::~EphemeralLockInZooKeeper() { if (Coordination::isHardwareError(e.code)) LOG_DEBUG( - &Poco::Logger::get("EphemeralLockInZooKeeper"), + getLogger("EphemeralLockInZooKeeper"), "ZooKeeper communication error during unlock: code={} message='{}'", e.code, e.message()); @@ -130,7 +130,7 @@ EphemeralLockInZooKeeper::~EphemeralLockInZooKeeper() /// But it's possible that the multi op request can be executed on server side, and client will not get response due to network issue. /// In such case, assumeUnlocked() will not be called, so we'll get ZNONODE error here since the noded is already deleted LOG_DEBUG( - &Poco::Logger::get("EphemeralLockInZooKeeper"), + getLogger("EphemeralLockInZooKeeper"), "ZooKeeper node was already deleted: code={} message={}", e.code, e.message()); @@ -168,7 +168,7 @@ EphemeralLocksInAllPartitions::EphemeralLocksInAllPartitions( Coordination::Error rc = zookeeper->tryMulti(lock_ops, lock_responses); if (rc == Coordination::Error::ZBADVERSION) { - LOG_TRACE(&Poco::Logger::get("EphemeralLocksInAllPartitions"), "Someone has inserted a block in a new partition while we were creating locks. Retry."); + LOG_TRACE(getLogger("EphemeralLocksInAllPartitions"), "Someone has inserted a block in a new partition while we were creating locks. Retry."); continue; } else if (rc != Coordination::Error::ZOK) diff --git a/src/Storages/MergeTree/IDataPartStorage.h b/src/Storages/MergeTree/IDataPartStorage.h index f92784cb0dad..649af4cc4a97 100644 --- a/src/Storages/MergeTree/IDataPartStorage.h +++ b/src/Storages/MergeTree/IDataPartStorage.h @@ -133,12 +133,12 @@ class IDataPartStorage : public boost::noncopyable const MergeTreeDataPartChecksums & checksums, std::list projections, bool is_temp, - Poco::Logger * log) = 0; + LoggerPtr log) = 0; /// Get a name like 'prefix_partdir_tryN' which does not exist in a root dir. /// TODO: remove it. virtual std::optional getRelativePathForPrefix( - Poco::Logger * log, const String & prefix, bool detached, bool broken) const = 0; + LoggerPtr log, const String & prefix, bool detached, bool broken) const = 0; /// Reset part directory, used for in-memory parts. /// TODO: remove it. @@ -223,7 +223,7 @@ class IDataPartStorage : public boost::noncopyable const std::string & to, const std::string & dir_path, const DiskPtr & disk, - Poco::Logger * log) const = 0; + LoggerPtr log) const = 0; /// Change part's root. from_root should be a prefix path of current root path. /// Right now, this is needed for rename table query. @@ -271,7 +271,7 @@ class IDataPartStorage : public boost::noncopyable virtual void rename( std::string new_root_path, std::string new_part_dir, - Poco::Logger * log, + LoggerPtr log, bool remove_new_dir_if_exists, bool fsync_part_dir) = 0; diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 734814fdc77a..401170fd7880 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1591,7 +1591,7 @@ try metadata_manager->deleteAll(true); metadata_manager->assertAllDeleted(true); - getDataPartStorage().rename(to.parent_path(), to.filename(), storage.log, remove_new_dir_if_exists, fsync_dir); + getDataPartStorage().rename(to.parent_path(), to.filename(), storage.log.load(), remove_new_dir_if_exists, fsync_dir); metadata_manager->updateAll(true); auto new_projection_root_path = to.string(); @@ -1699,7 +1699,7 @@ void IMergeTreeDataPart::remove() } bool is_temporary_part = is_temp || state == MergeTreeDataPartState::Temporary; - getDataPartStorage().remove(std::move(can_remove_callback), checksums, projection_checksums, is_temporary_part, storage.log); + getDataPartStorage().remove(std::move(can_remove_callback), checksums, projection_checksums, is_temporary_part, storage.log.load()); } std::optional IMergeTreeDataPart::getRelativePathForPrefix(const String & prefix, bool detached, bool broken) const @@ -1716,7 +1716,7 @@ std::optional IMergeTreeDataPart::getRelativePathForPrefix(const String if (detached && parent_part) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot detach projection"); - return getDataPartStorage().getRelativePathForPrefix(storage.log, prefix, detached, broken); + return getDataPartStorage().getRelativePathForPrefix(storage.log.load(), prefix, detached, broken); } std::optional IMergeTreeDataPart::getRelativePathForDetachedPart(const String & prefix, bool broken) const @@ -1773,7 +1773,8 @@ MutableDataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & di throw Exception(ErrorCodes::LOGICAL_ERROR, "Can not clone data part {} to empty directory.", name); String path_to_clone = fs::path(storage.relative_data_path) / directory_name / ""; - return getDataPartStorage().clonePart(path_to_clone, getDataPartStorage().getPartDirectory(), disk, storage.log); + + return getDataPartStorage().clonePart(path_to_clone, getDataPartStorage().getPartDirectory(), disk, storage.log.load()); } void IMergeTreeDataPart::checkConsistencyBase() const diff --git a/src/Storages/MergeTree/LeaderElection.h b/src/Storages/MergeTree/LeaderElection.h index f694ecab8e32..8c9672759754 100644 --- a/src/Storages/MergeTree/LeaderElection.h +++ b/src/Storages/MergeTree/LeaderElection.h @@ -19,7 +19,7 @@ namespace zkutil * For now, every replica can become leader if there is no leader among replicas with old version. */ -void checkNoOldLeaders(Poco::Logger * log, ZooKeeper & zookeeper, const String path) +void checkNoOldLeaders(LoggerPtr log, ZooKeeper & zookeeper, const String path) { /// Previous versions (before 21.12) used to create ephemeral sequential node path/leader_election- /// Replica with the lexicographically smallest node name becomes leader (before 20.6) or enables multi-leader mode (since 20.6) diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index ed1b83048e14..c1b84c1cb49f 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -25,7 +25,7 @@ MergeFromLogEntryTask::MergeFromLogEntryTask( StorageReplicatedMergeTree & storage_, IExecutableTask::TaskResultCallback & task_result_callback_) : ReplicatedMergeMutateTaskBase( - &Poco::Logger::get( + getLogger( storage_.getStorageID().getShortName() + "::" + selected_entry_->log_entry->new_part_name + " (MergeFromLogEntryTask)"), storage_, selected_entry_, diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 9b777345c1da..9a9ef92ce732 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -213,7 +213,7 @@ class MergeTask size_t sum_compressed_bytes_upper_bound{0}; bool blocks_are_granules_size{false}; - Poco::Logger * log{&Poco::Logger::get("MergeTask::PrepareStage")}; + LoggerPtr log{getLogger("MergeTask::PrepareStage")}; /// Dependencies for next stages std::list::const_iterator it_name_and_type; @@ -340,7 +340,7 @@ class MergeTask MergeTasks tasks_for_projections; MergeTasks::iterator projections_iterator; - Poco::Logger * log{&Poco::Logger::get("MergeTask::MergeProjectionsStage")}; + LoggerPtr log{getLogger("MergeTask::MergeProjectionsStage")}; }; using MergeProjectionsRuntimeContextPtr = std::shared_ptr; diff --git a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h index a27fb18c0fe2..a3efe939020e 100644 --- a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h +++ b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h @@ -336,7 +336,7 @@ class MergeTreeBackgroundExecutor final : boost::noncopyable std::condition_variable has_tasks TSA_GUARDED_BY(mutex); bool shutdown TSA_GUARDED_BY(mutex) = false; ThreadPool pool; - Poco::Logger * log = &Poco::Logger::get("MergeTreeBackgroundExecutor"); + LoggerPtr log = getLogger("MergeTreeBackgroundExecutor"); }; extern template class MergeTreeBackgroundExecutor; diff --git a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h index a9ade25646d2..956f40238764 100644 --- a/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeBaseSelectProcessor.h @@ -187,7 +187,7 @@ class IMergeTreeSelectAlgorithm size_t min_marks_to_read = 0; private: - Poco::Logger * log = &Poco::Logger::get("MergeTreeBaseSelectProcessor"); + LoggerPtr log = getLogger("MergeTreeBaseSelectProcessor"); std::atomic is_cancelled{false}; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 9287c30feb12..157b6849b902 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -305,8 +305,7 @@ MergeTreeData::MergeTreeData( , merging_params(merging_params_) , require_part_metadata(require_part_metadata_) , broken_part_callback(broken_part_callback_) - , log_name(std::make_shared(table_id_.getNameForLogs())) - , log(&Poco::Logger::get(*log_name)) + , log(table_id_.getNameForLogs()) , storage_settings(std::move(storage_settings_)) , pinned_part_uuids(std::make_shared()) , data_parts_by_info(data_parts_indexes.get()) @@ -1102,7 +1101,7 @@ MergeTreeData::PartLoadingTree::build(PartLoadingInfos nodes) } static std::optional calculatePartSizeSafe( - const MergeTreeData::DataPartPtr & part, Poco::Logger * log) + const MergeTreeData::DataPartPtr & part, const LoggerPtr & log) { try { @@ -1182,7 +1181,7 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPart( if (part_disk_ptr->exists(marker_path)) { /// NOTE: getBytesOnDisk() cannot be used here, since it may be zero if checksums.txt does not exist. - res.size_of_part = calculatePartSizeSafe(res.part, log); + res.size_of_part = calculatePartSizeSafe(res.part, log.load()); res.is_broken = true; auto part_size_str = res.size_of_part ? formatReadableSizeWithBinarySuffix(*res.size_of_part) : "failed to calculate size"; @@ -1226,7 +1225,7 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPart( /// Ignore broken parts that can appear as a result of hard server restart. if (res.is_broken) { - res.size_of_part = calculatePartSizeSafe(res.part, log); + res.size_of_part = calculatePartSizeSafe(res.part, log.load()); auto part_size_str = res.size_of_part ? formatReadableSizeWithBinarySuffix(*res.size_of_part) : "failed to calculate size"; LOG_ERROR(log, @@ -1989,7 +1988,7 @@ size_t MergeTreeData::clearOldTemporaryDirectories(const String & root_path, siz { /// Actually we don't rely on temporary_directories_lifetime when removing old temporaries directories, /// it's just an extra level of protection just in case we have a bug. - LOG_INFO(LogFrequencyLimiter(log, 10), "{} is in use (by merge/mutation/INSERT) (consider increasing temporary_directories_lifetime setting)", full_path); + LOG_INFO(LogFrequencyLimiter(log.load(), 10), "{} is in use (by merge/mutation/INSERT) (consider increasing temporary_directories_lifetime setting)", full_path); continue; } else @@ -2652,8 +2651,7 @@ void MergeTreeData::rename(const String & new_table_path, const StorageID & new_ void MergeTreeData::renameInMemory(const StorageID & new_table_id) { IStorage::renameInMemory(new_table_id); - std::atomic_store(&log_name, std::make_shared(new_table_id.getNameForLogs())); - log = &Poco::Logger::get(*log_name); + log.store(new_table_id.getNameForLogs()); } void MergeTreeData::dropAllData() @@ -5745,13 +5743,13 @@ ReservationPtr MergeTreeData::tryReserveSpacePreferringTTLRules( log, "Would like to reserve space on volume '{}' by TTL rule of table '{}' but volume was not found", move_ttl_entry->destination_name, - *std::atomic_load(&log_name)); + log.loadName()); else if (move_ttl_entry->destination_type == DataDestinationType::DISK && !move_ttl_entry->if_exists) LOG_WARNING( log, "Would like to reserve space on disk '{}' by TTL rule of table '{}' but disk was not found", move_ttl_entry->destination_name, - *std::atomic_load(&log_name)); + log.loadName()); } else if (is_insert && !perform_ttl_move_on_insert) { @@ -5760,7 +5758,7 @@ ReservationPtr MergeTreeData::tryReserveSpacePreferringTTLRules( "TTL move on insert to {} {} for table {} is disabled", (move_ttl_entry->destination_type == DataDestinationType::VOLUME ? "volume" : "disk"), move_ttl_entry->destination_name, - *std::atomic_load(&log_name)); + log.loadName()); } else { @@ -5776,13 +5774,13 @@ ReservationPtr MergeTreeData::tryReserveSpacePreferringTTLRules( log, "Would like to reserve space on volume '{}' by TTL rule of table '{}' but there is not enough space", move_ttl_entry->destination_name, - *std::atomic_load(&log_name)); + log.loadName()); else if (move_ttl_entry->destination_type == DataDestinationType::DISK) LOG_WARNING( log, "Would like to reserve space on disk '{}' by TTL rule of table '{}' but there is not enough space", move_ttl_entry->destination_name, - *std::atomic_load(&log_name)); + log.loadName()); } } } @@ -7892,7 +7890,7 @@ bool MergeTreeData::insertQueryIdOrThrowNoLock(const String & query_id, size_t m throw Exception( ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES, "Too many simultaneous queries for table {}. Maximum is: {}", - *std::atomic_load(&log_name), + log.loadName(), max_queries); query_id_set.insert(query_id); return true; @@ -8084,7 +8082,7 @@ ReservationPtr MergeTreeData::balancedReservation( } // Record submerging big parts in the tagger to clean them up. - tagger_ptr->emplace(*this, part_name, std::move(covered_parts), log); + tagger_ptr->emplace(*this, part_name, std::move(covered_parts), log.load()); } } } diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index f833625d37cc..442665b63522 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -450,7 +451,7 @@ class MergeTreeData : public IStorage, public WithMutableContext /// Load the set of data parts from disk. Call once - immediately after the object is created. void loadDataParts(bool skip_sanity_checks); - String getLogName() const { return *std::atomic_load(&log_name); } + String getLogName() const { return log.loadName(); } Int64 getMaxBlockNumber() const; @@ -1083,10 +1084,7 @@ class MergeTreeData : public IStorage, public WithMutableContext /// Engine-specific methods BrokenPartCallback broken_part_callback; - /// log_name will change during table RENAME. Use atomic_shared_ptr to allow concurrent RW. - /// NOTE clang-14 doesn't have atomic_shared_ptr yet. Use std::atomic* operations for now. - std::shared_ptr log_name; - std::atomic log; + AtomicLogger log; /// Storage settings. /// Use get and set to receive readonly versions. @@ -1534,10 +1532,10 @@ struct CurrentlySubmergingEmergingTagger MergeTreeData & storage; String emerging_part_name; MergeTreeData::DataPartsVector submerging_parts; - Poco::Logger * log; + LoggerPtr log; CurrentlySubmergingEmergingTagger( - MergeTreeData & storage_, const String & name_, MergeTreeData::DataPartsVector && parts_, Poco::Logger * log_) + MergeTreeData & storage_, const String & name_, MergeTreeData::DataPartsVector && parts_, LoggerPtr log_) : storage(storage_), emerging_part_name(name_), submerging_parts(std::move(parts_)), log(log_) { } diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 8300abd51bdc..c7075536e9f9 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -66,7 +66,7 @@ static const double DISK_USAGE_COEFFICIENT_TO_SELECT = 2; static const double DISK_USAGE_COEFFICIENT_TO_RESERVE = 1.1; MergeTreeDataMergerMutator::MergeTreeDataMergerMutator(MergeTreeData & data_) - : data(data_), log(&Poco::Logger::get(data.getLogName() + " (MergerMutator)")) + : data(data_), log(getLogger(data.getLogName() + " (MergerMutator)")) { } diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index 7c96fcfaeb3c..5af518446b6e 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -159,7 +159,7 @@ public : private: MergeTreeData & data; - Poco::Logger * log; + LoggerPtr log; /// When the last time you wrote to the log that the disk space was running out (not to write about this too often). time_t disk_space_warning_time = 0; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 22df8f298c4e..5a9c9ca92aaf 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -72,7 +72,7 @@ namespace ErrorCodes MergeTreeDataSelectExecutor::MergeTreeDataSelectExecutor(const MergeTreeData & data_) - : data(data_), log(&Poco::Logger::get(data.getLogName() + " (SelectExecutor)")) + : data(data_), log(getLogger(data.getLogName() + " (SelectExecutor)")) { } @@ -81,7 +81,7 @@ size_t MergeTreeDataSelectExecutor::getApproximateTotalRowsToRead( const StorageMetadataPtr & metadata_snapshot, const KeyCondition & key_condition, const Settings & settings, - Poco::Logger * log) + LoggerPtr log) { size_t rows_count = 0; @@ -489,7 +489,7 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( const StorageMetadataPtr & metadata_snapshot, ContextPtr context, bool sample_factor_column_queried, - Poco::Logger * log) + LoggerPtr log) { const Settings & settings = context->getSettingsRef(); /// Sampling. @@ -800,7 +800,7 @@ void MergeTreeDataSelectExecutor::filterPartsByPartition( const SelectQueryInfo & query_info, const ContextPtr & context, const PartitionIdToMaxBlock * max_block_numbers_to_read, - Poco::Logger * log, + LoggerPtr log, ReadFromMergeTree::IndexStats & index_stats) { const Settings & settings = context->getSettingsRef(); @@ -891,7 +891,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd const ContextPtr & context, const KeyCondition & key_condition, const MergeTreeReaderSettings & reader_settings, - Poco::Logger * log, + LoggerPtr log, size_t num_streams, ReadFromMergeTree::IndexStats & index_stats, bool use_skip_indexes) @@ -1408,7 +1408,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( const StorageMetadataPtr & metadata_snapshot, const KeyCondition & key_condition, const Settings & settings, - Poco::Logger * log) + LoggerPtr log) { MarkRanges res; @@ -1613,7 +1613,7 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingIndex( size_t & granules_dropped, MarkCache * mark_cache, UncompressedCache * uncompressed_cache, - Poco::Logger * log) + LoggerPtr log) { if (!index_helper->getDeserializedFormat(part->getDataPartStorage(), index_helper->getFileName())) { @@ -1742,7 +1742,7 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingMergedIndex( size_t & granules_dropped, MarkCache * mark_cache, UncompressedCache * uncompressed_cache, - Poco::Logger * log) + LoggerPtr log) { for (const auto & index_helper : indices) { @@ -1895,7 +1895,7 @@ void MergeTreeDataSelectExecutor::selectPartsToReadWithUUIDFilter( const PartitionIdToMaxBlock * max_block_numbers_to_read, ContextPtr query_context, PartFilterCounters & counters, - Poco::Logger * log) + LoggerPtr log) { /// process_parts prepare parts that have to be read for the query, /// returns false if duplicated parts' UUID have been met diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index a337574bb64e..d45bbb266f92 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -71,11 +71,11 @@ class MergeTreeDataSelectExecutor const StorageMetadataPtr & metadata_snapshot, const KeyCondition & key_condition, const Settings & settings, - Poco::Logger * log); + LoggerPtr log); private: const MergeTreeData & data; - Poco::Logger * log; + LoggerPtr log; /// Get the approximate value (bottom estimate - only by full marks) of the number of rows falling under the index. static size_t getApproximateTotalRowsToRead( @@ -83,7 +83,7 @@ class MergeTreeDataSelectExecutor const StorageMetadataPtr & metadata_snapshot, const KeyCondition & key_condition, const Settings & settings, - Poco::Logger * log); + LoggerPtr log); static MarkRanges filterMarksUsingIndex( MergeTreeIndexPtr index_helper, @@ -96,7 +96,7 @@ class MergeTreeDataSelectExecutor size_t & granules_dropped, MarkCache * mark_cache, UncompressedCache * uncompressed_cache, - Poco::Logger * log); + LoggerPtr log); static MarkRanges filterMarksUsingMergedIndex( MergeTreeIndices indices, @@ -109,7 +109,7 @@ class MergeTreeDataSelectExecutor size_t & granules_dropped, MarkCache * mark_cache, UncompressedCache * uncompressed_cache, - Poco::Logger * log); + LoggerPtr log); struct PartFilterCounters { @@ -143,7 +143,7 @@ class MergeTreeDataSelectExecutor const PartitionIdToMaxBlock * max_block_numbers_to_read, ContextPtr query_context, PartFilterCounters & counters, - Poco::Logger * log); + LoggerPtr log); public: /// For given number rows and bytes, get the number of marks to read. @@ -180,7 +180,7 @@ class MergeTreeDataSelectExecutor const SelectQueryInfo & query_info, const ContextPtr & context, const PartitionIdToMaxBlock * max_block_numbers_to_read, - Poco::Logger * log, + LoggerPtr log, ReadFromMergeTree::IndexStats & index_stats); /// Filter parts using primary key and secondary indexes. @@ -193,7 +193,7 @@ class MergeTreeDataSelectExecutor const ContextPtr & context, const KeyCondition & key_condition, const MergeTreeReaderSettings & reader_settings, - Poco::Logger * log, + LoggerPtr log, size_t num_streams, ReadFromMergeTree::IndexStats & index_stats, bool use_skip_indexes); @@ -210,7 +210,7 @@ class MergeTreeDataSelectExecutor const StorageMetadataPtr & metadata_snapshot, ContextPtr context, bool sample_factor_column_queried, - Poco::Logger * log); + LoggerPtr log); /// Check query limits: max_partitions_to_read, max_concurrent_queries. /// Also, return QueryIdHolder. If not null, we should keep it until query finishes. diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 37cfe4d065e0..c6083fe8bc36 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -284,7 +284,7 @@ Block MergeTreeDataWriter::mergeBlock( case MergeTreeData::MergingParams::Collapsing: return std::make_shared( block, 1, sort_description, merging_params.sign_column, - false, block_size + 1, &Poco::Logger::get("MergeTreeDataWriter")); + false, block_size + 1, getLogger("MergeTreeDataWriter")); case MergeTreeData::MergingParams::Summing: return std::make_shared( block, 1, sort_description, merging_params.columns_to_sum, @@ -552,7 +552,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( bool is_temp, IMergeTreeDataPart * parent_part, const MergeTreeData & data, - Poco::Logger * log, + LoggerPtr log, Block block, const ProjectionDescription & projection) { @@ -662,7 +662,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPartImpl( MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPart( const MergeTreeData & data, - Poco::Logger * log, + LoggerPtr log, Block block, const ProjectionDescription & projection, IMergeTreeDataPart * parent_part) @@ -681,7 +681,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeProjectionPart( /// projection part merges. MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempProjectionPart( const MergeTreeData & data, - Poco::Logger * log, + LoggerPtr log, Block block, const ProjectionDescription & projection, IMergeTreeDataPart * parent_part, diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.h b/src/Storages/MergeTree/MergeTreeDataWriter.h index 80b02831ab1e..55a701d9a297 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.h +++ b/src/Storages/MergeTree/MergeTreeDataWriter.h @@ -44,8 +44,9 @@ class MergeTreeDataWriter public: explicit MergeTreeDataWriter(MergeTreeData & data_) : data(data_) - , log(&Poco::Logger::get(data.getLogName() + " (Writer)")) - {} + , log(getLogger(data.getLogName() + " (Writer)")) + { + } /** Split the block to blocks, each of them must be written as separate part. * (split rows by partition) @@ -89,7 +90,7 @@ class MergeTreeDataWriter /// For insertion. static TemporaryPart writeProjectionPart( const MergeTreeData & data, - Poco::Logger * log, + LoggerPtr log, Block block, const ProjectionDescription & projection, IMergeTreeDataPart * parent_part); @@ -97,7 +98,7 @@ class MergeTreeDataWriter /// For mutation: MATERIALIZE PROJECTION. static TemporaryPart writeTempProjectionPart( const MergeTreeData & data, - Poco::Logger * log, + LoggerPtr log, Block block, const ProjectionDescription & projection, IMergeTreeDataPart * parent_part, @@ -124,12 +125,12 @@ class MergeTreeDataWriter bool is_temp, IMergeTreeDataPart * parent_part, const MergeTreeData & data, - Poco::Logger * log, + LoggerPtr log, Block block, const ProjectionDescription & projection); MergeTreeData & data; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/MergeTree/MergeTreeInOrderSelectProcessor.h b/src/Storages/MergeTree/MergeTreeInOrderSelectProcessor.h index f7c3f2946581..83d90a9362c0 100644 --- a/src/Storages/MergeTree/MergeTreeInOrderSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeInOrderSelectProcessor.h @@ -26,7 +26,7 @@ class MergeTreeInOrderSelectAlgorithm final : public MergeTreeSelectAlgorithm bool getNewTaskImpl() override; void finalizeNewTask() override {} - Poco::Logger * log = &Poco::Logger::get("MergeTreeInOrderSelectProcessor"); + LoggerPtr log = getLogger("MergeTreeInOrderSelectProcessor"); }; } diff --git a/src/Storages/MergeTree/MergeTreeMetadataCache.h b/src/Storages/MergeTree/MergeTreeMetadataCache.h index 57fb9ed88c46..f57c6c643e3b 100644 --- a/src/Storages/MergeTree/MergeTreeMetadataCache.h +++ b/src/Storages/MergeTree/MergeTreeMetadataCache.h @@ -3,6 +3,7 @@ #include "config.h" #if USE_ROCKSDB +#include #include #include #include @@ -36,7 +37,7 @@ class MergeTreeMetadataCache void shutdown(); private: std::unique_ptr rocksdb; - Poco::Logger * log = &Poco::Logger::get("MergeTreeMetadataCache"); + LoggerPtr log = getLogger("MergeTreeMetadataCache"); }; using MergeTreeMetadataCachePtr = std::shared_ptr; diff --git a/src/Storages/MergeTree/MergeTreePartsMover.h b/src/Storages/MergeTree/MergeTreePartsMover.h index 82fd271ee5f6..33989689c947 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.h +++ b/src/Storages/MergeTree/MergeTreePartsMover.h @@ -47,7 +47,7 @@ class MergeTreePartsMover explicit MergeTreePartsMover(MergeTreeData * data_) : data(data_) - , log(&Poco::Logger::get("MergeTreePartsMover")) + , log(getLogger("MergeTreePartsMover")) { } @@ -80,7 +80,7 @@ class MergeTreePartsMover private: MergeTreeData * data; - Poco::Logger * log; + LoggerPtr log; }; diff --git a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp index d76b8522f423..e7adf8979764 100644 --- a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp @@ -41,7 +41,7 @@ MergeTreePrefetchedReadPool::MergeTreePrefetchedReadPool( bool is_remote_read_, const MergeTreeSettings & storage_settings_) : WithContext(context_) - , log(&Poco::Logger::get("MergeTreePrefetchedReadPool(" + (parts_.empty() ? "" : parts_.front().data_part->storage.getStorageID().getNameForLogs()) + ")")) + , log(getLogger("MergeTreePrefetchedReadPool(" + (parts_.empty() ? "" : parts_.front().data_part->storage.getStorageID().getNameForLogs()) + ")")) , header(storage_snapshot_->getSampleBlockForColumns(column_names_)) , mark_cache(context_->getGlobalContext()->getMarkCache().get()) , uncompressed_cache(use_uncompressed_cache_ ? context_->getGlobalContext()->getUncompressedCache().get() : nullptr) diff --git a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.h b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.h index 98cfe28c5639..610e37547b57 100644 --- a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.h +++ b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.h @@ -74,7 +74,7 @@ class MergeTreePrefetchedReadPool : public IMergeTreeReadPool, private WithConte static std::string dumpTasks(const ThreadsTasks & tasks); - Poco::Logger * log; + LoggerPtr log; Block header; MarkCache * mark_cache; diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.h b/src/Storages/MergeTree/MergeTreeRangeReader.h index 5ffd464cfe27..6c5051ba71f0 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.h +++ b/src/Storages/MergeTree/MergeTreeRangeReader.h @@ -218,7 +218,7 @@ class MergeTreeRangeReader using RangesInfo = std::vector; - explicit ReadResult(Poco::Logger * log_) : log(log_) {} + explicit ReadResult(LoggerPtr log_) : log(log_) {} static size_t getLastMark(const MergeTreeRangeReader::ReadResult::RangesInfo & ranges); @@ -285,7 +285,7 @@ class MergeTreeRangeReader size_t countZeroTails(const IColumn::Filter & filter, NumRows & zero_tails, bool can_read_incomplete_granules) const; static size_t numZerosInTail(const UInt8 * begin, const UInt8 * end); - Poco::Logger * log; + LoggerPtr log; }; ReadResult read(size_t max_rows, MarkRanges & ranges); @@ -312,7 +312,7 @@ class MergeTreeRangeReader bool is_initialized = false; Names non_const_virtual_column_names; - Poco::Logger * log = &Poco::Logger::get("MergeTreeRangeReader"); + LoggerPtr log = getLogger("MergeTreeRangeReader"); }; } diff --git a/src/Storages/MergeTree/MergeTreeReadPool.h b/src/Storages/MergeTree/MergeTreeReadPool.h index b3356ec3351c..e484494976fc 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.h +++ b/src/Storages/MergeTree/MergeTreeReadPool.h @@ -157,7 +157,7 @@ class MergeTreeReadPool : public IMergeTreeReadPool std::vector threads_tasks; std::set remaining_thread_tasks; - Poco::Logger * log = &Poco::Logger::get("MergeTreeReadPool"); + LoggerPtr log = getLogger("MergeTreeReadPool"); }; @@ -212,7 +212,7 @@ class MergeTreeReadPoolParallelReplicas : public IMergeTreeReadPool RangesInDataPartsDescription buffered_ranges; size_t threads; bool no_more_tasks_available{false}; - Poco::Logger * log = &Poco::Logger::get("MergeTreeReadPoolParallelReplicas"); + LoggerPtr log = getLogger("MergeTreeReadPoolParallelReplicas"); std::mutex mutex; diff --git a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h index fd25748050a2..056304632e3a 100644 --- a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.h @@ -37,7 +37,7 @@ class MergeTreeReverseSelectAlgorithm final : public MergeTreeSelectAlgorithm /// Used for parallel replicas bool no_more_tasks{false}; - Poco::Logger * log = &Poco::Logger::get("MergeTreeReverseSelectProcessor"); + LoggerPtr log = getLogger("MergeTreeReverseSelectProcessor"); }; } diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 4539e0b36c59..a68cca31d181 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -61,7 +61,7 @@ class MergeTreeSequentialSource : public ISource /// Should read using direct IO bool read_with_direct_io; - Poco::Logger * log = &Poco::Logger::get("MergeTreeSequentialSource"); + LoggerPtr log = getLogger("MergeTreeSequentialSource"); std::optional mark_ranges; @@ -276,7 +276,7 @@ class ReadFromPart final : public ISourceStep bool apply_deleted_mask_, ActionsDAGPtr filter_, ContextPtr context_, - Poco::Logger * log_) + LoggerPtr log_) : ISourceStep(DataStream{.header = storage_snapshot_->getSampleBlockForColumns(columns_to_read_)}) , storage(storage_) , storage_snapshot(storage_snapshot_) @@ -328,7 +328,7 @@ class ReadFromPart final : public ISourceStep bool apply_deleted_mask; ActionsDAGPtr filter; ContextPtr context; - Poco::Logger * log; + LoggerPtr log; }; void createMergeTreeSequentialSource( @@ -340,7 +340,7 @@ void createMergeTreeSequentialSource( bool apply_deleted_mask, ActionsDAGPtr filter, ContextPtr context, - Poco::Logger * log) + LoggerPtr log) { auto reading = std::make_unique( storage, storage_snapshot, std::move(data_part), std::move(columns_to_read), apply_deleted_mask, filter, std::move(context), log); diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.h b/src/Storages/MergeTree/MergeTreeSequentialSource.h index fb249568e8f7..ca1dee65b133 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.h +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.h @@ -31,6 +31,6 @@ void createMergeTreeSequentialSource( bool apply_deleted_mask, ActionsDAGPtr filter, ContextPtr context, - Poco::Logger * log); + LoggerPtr log); } diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 479e50fdebbd..b0e67f844202 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -65,7 +65,7 @@ void MergeTreeSettings::loadFromQuery(ASTStorage & storage_def, ContextPtr conte if (ast && isDiskFunction(ast)) { auto disk_name = getOrCreateDiskFromDiskAST(ast, context); - LOG_TRACE(&Poco::Logger::get("MergeTreeSettings"), "Created custom disk {}", disk_name); + LOG_TRACE(getLogger("MergeTreeSettings"), "Created custom disk {}", disk_name); value = disk_name; } } diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index 1620ba98d589..bf899968f24b 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -28,7 +28,7 @@ MergeTreeWhereOptimizer::MergeTreeWhereOptimizer( const StorageMetadataPtr & metadata_snapshot, const Names & queried_columns_, const std::optional & supported_columns_, - Poco::Logger * log_) + LoggerPtr log_) : table_columns{collections::map( metadata_snapshot->getColumns().getAllPhysical(), [](const NameAndTypePair & col) { return col.name; })} , queried_columns{queried_columns_} diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.h b/src/Storages/MergeTree/MergeTreeWhereOptimizer.h index 18555a72db18..7b75f07ca49a 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.h +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.h @@ -39,7 +39,7 @@ class MergeTreeWhereOptimizer : private boost::noncopyable const StorageMetadataPtr & metadata_snapshot, const Names & queried_columns_, const std::optional & supported_columns_, - Poco::Logger * log_); + LoggerPtr log_); void optimize(SelectQueryInfo & select_query_info, const ContextPtr & context) const; @@ -141,7 +141,7 @@ class MergeTreeWhereOptimizer : private boost::noncopyable const Names queried_columns; const std::optional supported_columns; const NameSet sorting_key_names; - Poco::Logger * log; + LoggerPtr log; std::unordered_map column_sizes; UInt64 total_size_of_queried_columns = 0; }; diff --git a/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp b/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp index fabf2acdad3c..b332aa7ee9fc 100644 --- a/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp +++ b/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp @@ -35,7 +35,7 @@ MergeTreeWriteAheadLog::MergeTreeWriteAheadLog( , name(name_) , path(storage.getRelativeDataPath() + name_) , pool(storage.getContext()->getSchedulePool()) - , log(&Poco::Logger::get(storage.getLogName() + " (WriteAheadLog)")) + , log(getLogger(storage.getLogName() + " (WriteAheadLog)")) { init(); sync_task = pool.createTask("MergeTreeWriteAheadLog::sync", [this] diff --git a/src/Storages/MergeTree/MergeTreeWriteAheadLog.h b/src/Storages/MergeTree/MergeTreeWriteAheadLog.h index eba7698b9f9e..b107cfc0eae4 100644 --- a/src/Storages/MergeTree/MergeTreeWriteAheadLog.h +++ b/src/Storages/MergeTree/MergeTreeWriteAheadLog.h @@ -100,7 +100,7 @@ class MergeTreeWriteAheadLog mutable std::mutex write_mutex; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 903e4ae7af3c..4ec2f32874ad 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -159,7 +159,7 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( /// Finish columns serialization. writer->fillChecksums(checksums); - LOG_TRACE(&Poco::Logger::get("MergedBlockOutputStream"), "filled checksums {}", new_part->getNameWithState()); + LOG_TRACE(getLogger("MergedBlockOutputStream"), "filled checksums {}", new_part->getNameWithState()); for (const auto & [projection_name, projection_part] : new_part->getProjectionParts()) checksums.addFile( diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.h b/src/Storages/MergeTree/MutateFromLogEntryTask.h index c823df3b9993..a060bf48dedd 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.h +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.h @@ -20,7 +20,7 @@ class MutateFromLogEntryTask : public ReplicatedMergeMutateTaskBase StorageReplicatedMergeTree & storage_, Callback && task_result_callback_) : ReplicatedMergeMutateTaskBase( - &Poco::Logger::get(storage_.getStorageID().getShortName() + "::" + selected_entry_->log_entry->new_part_name + " (MutateFromLogEntryTask)"), + getLogger(storage_.getStorageID().getShortName() + "::" + selected_entry_->log_entry->new_part_name + " (MutateFromLogEntryTask)"), storage_, selected_entry_, task_result_callback_) diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 9bd0f148d6cd..a64a5025b0af 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -108,7 +108,7 @@ bool MutatePlainMergeTreeTask::executeStep() if (merge_mutate_entry->txn) merge_mutate_entry->txn->onException(); PreformattedMessage exception_message = getCurrentExceptionMessageAndPattern(/* with_stacktrace */ false); - LOG_ERROR(&Poco::Logger::get("MutatePlainMergeTreeTask"), exception_message); + LOG_ERROR(getLogger("MutatePlainMergeTreeTask"), exception_message); storage.updateMutationEntriesErrors(future_part, false, exception_message.text); write_part_log(ExecutionStatus::fromCurrentException("", true)); tryLogCurrentException(__PRETTY_FUNCTION__); diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 55666614759f..11771ecfeb31 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -763,7 +763,7 @@ struct MutationContext TableLockHolder * holder; MergeListEntry * mutate_entry; - Poco::Logger * log{&Poco::Logger::get("MutateTask")}; + LoggerPtr log{getLogger("MutateTask")}; FutureMergedMutatedPartPtr future_part; MergeTreeData::DataPartPtr source_part; @@ -842,7 +842,7 @@ class MergeProjectionPartsTask : public IExecutableTask , projection(projection_) , block_num(block_num_) , ctx(ctx_) - , log(&Poco::Logger::get("MergeProjectionPartsTask")) + , log(getLogger("MergeProjectionPartsTask")) { LOG_DEBUG(log, "Selected {} projection_parts from {} to {}", parts.size(), parts.front()->name, parts.back()->name); level_parts[current_level] = std::move(parts); @@ -947,7 +947,7 @@ class MergeProjectionPartsTask : public IExecutableTask size_t & block_num; MutationContextPtr ctx; - Poco::Logger * log; + LoggerPtr log; std::map level_parts; size_t current_level = 0; diff --git a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp index 3ef064ff7439..9618997579ee 100644 --- a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp +++ b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp @@ -109,7 +109,7 @@ class DefaultCoordinator : public ParallelReplicasReadingCoordinator::ImplInterf /// Once all task will be done by some replica, it can steal tasks std::vector reading_state; - Poco::Logger * log = &Poco::Logger::get("DefaultCoordinator"); + LoggerPtr log = getLogger("DefaultCoordinator"); std::atomic state_initialized{false}; @@ -342,7 +342,7 @@ class InOrderCoordinator : public ParallelReplicasReadingCoordinator::ImplInterf Parts all_parts_to_read; - Poco::Logger * log = &Poco::Logger::get(fmt::format("{}{}", magic_enum::enum_name(mode), "Coordinator")); + LoggerPtr log = getLogger(fmt::format("{}{}", magic_enum::enum_name(mode), "Coordinator")); }; diff --git a/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.cpp b/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.cpp index 3b382b7b32d4..cf5a0523819b 100644 --- a/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.cpp +++ b/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.cpp @@ -20,7 +20,7 @@ PartMovesBetweenShardsOrchestrator::PartMovesBetweenShardsOrchestrator(StorageRe : storage(storage_) , zookeeper_path(storage.zookeeper_path) , logger_name(storage.getStorageID().getFullTableName() + " (PartMovesBetweenShardsOrchestrator)") - , log(&Poco::Logger::get(logger_name)) + , log(getLogger(logger_name)) , entries_znode_path(zookeeper_path + "/part_moves_shard") { /// Schedule pool is not designed for long-running tasks. TODO replace with a separate thread? diff --git a/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.h b/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.h index 24454b897afc..4f9b5725f301 100644 --- a/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.h +++ b/src/Storages/MergeTree/PartMovesBetweenShardsOrchestrator.h @@ -177,7 +177,7 @@ class PartMovesBetweenShardsOrchestrator String zookeeper_path; String logger_name; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; std::atomic need_stop{false}; BackgroundSchedulePool::TaskHolder task; diff --git a/src/Storages/MergeTree/PartitionPruner.cpp b/src/Storages/MergeTree/PartitionPruner.cpp index 61293888f10d..9abe9bc142e9 100644 --- a/src/Storages/MergeTree/PartitionPruner.cpp +++ b/src/Storages/MergeTree/PartitionPruner.cpp @@ -54,7 +54,7 @@ bool PartitionPruner::canBePruned(const IMergeTreeDataPart & part) { WriteBufferFromOwnString buf; part.partition.serializeText(part.storage, buf, FormatSettings{}); - LOG_TRACE(&Poco::Logger::get("PartitionPruner"), "Partition {} gets pruned", buf.str()); + LOG_TRACE(getLogger("PartitionPruner"), "Partition {} gets pruned", buf.str()); } } diff --git a/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.h b/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.h index d9a1cbff166b..91253531cb92 100644 --- a/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.h +++ b/src/Storages/MergeTree/ReplicatedMergeMutateTaskBase.h @@ -18,7 +18,7 @@ class ReplicatedMergeMutateTaskBase : public IExecutableTask { public: ReplicatedMergeMutateTaskBase( - Poco::Logger * log_, + LoggerPtr log_, StorageReplicatedMergeTree & storage_, ReplicatedMergeTreeQueue::SelectedEntryPtr & selected_entry_, IExecutableTask::TaskResultCallback & task_result_callback_) @@ -58,7 +58,7 @@ class ReplicatedMergeMutateTaskBase : public IExecutableTask ReplicatedMergeTreeQueue::SelectedEntryPtr selected_entry; ReplicatedMergeTreeLogEntry & entry; MergeList::EntryPtr merge_mutate_entry{nullptr}; - Poco::Logger * log; + LoggerPtr log; StorageReplicatedMergeTree & storage; /// ProfileEvents for current part will be stored here ProfileEvents::Counters profile_counters; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp index c859c9948186..67f8e1cd80c2 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.cpp @@ -14,7 +14,7 @@ namespace ErrorCodes ReplicatedMergeTreeAttachThread::ReplicatedMergeTreeAttachThread(StorageReplicatedMergeTree & storage_) : storage(storage_) , log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeAttachThread)") - , log(&Poco::Logger::get(log_name)) + , log(getLogger(log_name)) { task = storage.getContext()->getSchedulePool().createTask(log_name, [this] { run(); }); const auto storage_settings = storage.getSettings(); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h index a491a06d6a51..4f7b1026a69c 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeAttachThread.h @@ -35,7 +35,7 @@ class ReplicatedMergeTreeAttachThread BackgroundSchedulePool::TaskHolder task; std::string log_name; - Poco::Logger * log; + LoggerPtr log; std::atomic first_try_done{false}; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index 0409cadc1e91..6e558c7dc3b7 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -24,7 +24,7 @@ namespace ErrorCodes ReplicatedMergeTreeCleanupThread::ReplicatedMergeTreeCleanupThread(StorageReplicatedMergeTree & storage_) : storage(storage_) , log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeCleanupThread)") - , log(&Poco::Logger::get(log_name)) + , log(getLogger(log_name)) { task = storage.getContext()->getSchedulePool().createTask(log_name, [this]{ run(); }); } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h index 35838625bbec..ea345d888c72 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h @@ -35,7 +35,7 @@ class ReplicatedMergeTreeCleanupThread private: StorageReplicatedMergeTree & storage; String log_name; - Poco::Logger * log; + LoggerPtr log; BackgroundSchedulePool::TaskHolder task; pcg64 rng{randomSeed()}; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index 0882ff5a0bce..b94794145eb0 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -28,7 +28,7 @@ static const auto PART_CHECK_ERROR_SLEEP_MS = 5 * 1000; ReplicatedMergeTreePartCheckThread::ReplicatedMergeTreePartCheckThread(StorageReplicatedMergeTree & storage_) : storage(storage_) , log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreePartCheckThread)") - , log(&Poco::Logger::get(log_name)) + , log(getLogger(log_name)) { task = storage.getContext()->getSchedulePool().createTask(log_name, [this] { run(); }); task->schedule(); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h index 170b2ca1f60e..4a730c017bd2 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h @@ -102,7 +102,7 @@ class ReplicatedMergeTreePartCheckThread StorageReplicatedMergeTree & storage; String log_name; - Poco::Logger * log; + LoggerPtr log; using StringSet = std::set; using PartToCheck = std::pair; /// The name of the part and the minimum time to check (or zero, if not important). diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index c0aac96dd310..fc2a318e5002 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -34,7 +34,7 @@ ReplicatedMergeTreeQueue::ReplicatedMergeTreeQueue(StorageReplicatedMergeTree & zookeeper_path = storage.zookeeper_path; replica_path = storage.replica_path; logger_name = storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeQueue)"; - log = &Poco::Logger::get(logger_name); + log = getLogger(logger_name); } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 36552129690a..58cbf4e2b63e 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -68,7 +68,7 @@ class ReplicatedMergeTreeQueue String zookeeper_path; String replica_path; String logger_name; - Poco::Logger * log = nullptr; + LoggerPtr log = nullptr; /// Protects the queue, future_parts and other queue state variables. mutable std::mutex state_mutex; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 357b9e0125aa..1356c3bb2455 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -33,7 +33,7 @@ static String generateActiveNodeIdentifier() ReplicatedMergeTreeRestartingThread::ReplicatedMergeTreeRestartingThread(StorageReplicatedMergeTree & storage_) : storage(storage_) , log_name(storage.getStorageID().getFullTableName() + " (ReplicatedMergeTreeRestartingThread)") - , log(&Poco::Logger::get(log_name)) + , log(getLogger(log_name)) , active_node_identifier(generateActiveNodeIdentifier()) { const auto storage_settings = storage.getSettings(); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h index 573d5ef57cf9..81dfff0c7274 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h @@ -40,7 +40,7 @@ class ReplicatedMergeTreeRestartingThread private: StorageReplicatedMergeTree & storage; String log_name; - Poco::Logger * log; + LoggerPtr log; std::atomic need_stop {false}; /// The random data we wrote into `/replicas/me/is_active`. diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index b0c23250a452..5d51414c4b76 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -45,7 +45,7 @@ struct ReplicatedMergeTreeSinkImpl::DelayedChunk { struct Partition { - Poco::Logger * log; + LoggerPtr log; MergeTreeDataWriter::TemporaryPart temp_part; UInt64 elapsed_ns; BlockIDsType block_id; @@ -57,7 +57,7 @@ struct ReplicatedMergeTreeSinkImpl::DelayedChunk ProfileEvents::Counters part_counters; Partition() = default; - Partition(Poco::Logger * log_, + Partition(LoggerPtr log_, MergeTreeDataWriter::TemporaryPart && temp_part_, UInt64 elapsed_ns_, BlockIDsType && block_id_, @@ -207,7 +207,7 @@ std::vector testSelfDeduplicate(std::vector data, std::vector::DelayedChunk::Partition part( - &Poco::Logger::get("testSelfDeduplicate"), MergeTreeDataWriter::TemporaryPart(), 0, std::move(hashes), std::move(block1), std::nullopt, std::move(profile_counters)); + getLogger("testSelfDeduplicate"), MergeTreeDataWriter::TemporaryPart(), 0, std::move(hashes), std::move(block1), std::nullopt, std::move(profile_counters)); part.filterSelfDuplicate(); @@ -280,7 +280,7 @@ ReplicatedMergeTreeSinkImpl::ReplicatedMergeTreeSinkImpl( , is_attach(is_attach_) , quorum_parallel(quorum_parallel_) , deduplicate(deduplicate_) - , log(&Poco::Logger::get(storage.getLogName() + " (Replicated OutputStream)")) + , log(getLogger(storage.getLogName() + " (Replicated OutputStream)")) , context(context_) , storage_snapshot(storage.getStorageSnapshotWithoutParts(metadata_snapshot)) { diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h index 3777a9f7285b..5906575e21ec 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h @@ -124,7 +124,7 @@ class ReplicatedMergeTreeSinkImpl : public SinkToStorage bool last_block_is_duplicate = false; using Logger = Poco::Logger; - Poco::Logger * log; + LoggerPtr log; ContextPtr context; StorageSnapshotPtr storage_snapshot; diff --git a/src/Storages/MergeTree/ZooKeeperRetries.h b/src/Storages/MergeTree/ZooKeeperRetries.h index 1a4d394857f1..409799a9b963 100644 --- a/src/Storages/MergeTree/ZooKeeperRetries.h +++ b/src/Storages/MergeTree/ZooKeeperRetries.h @@ -15,7 +15,7 @@ namespace ErrorCodes struct ZooKeeperRetriesInfo { ZooKeeperRetriesInfo() = default; - ZooKeeperRetriesInfo(std::string name_, Poco::Logger * logger_, UInt64 max_retries_, UInt64 initial_backoff_ms_, UInt64 max_backoff_ms_) + ZooKeeperRetriesInfo(std::string name_, LoggerPtr logger_, UInt64 max_retries_, UInt64 initial_backoff_ms_, UInt64 max_backoff_ms_) : name(std::move(name_)) , logger(logger_) , max_retries(max_retries_) @@ -25,7 +25,7 @@ struct ZooKeeperRetriesInfo } std::string name; - Poco::Logger * logger = nullptr; + LoggerPtr logger = nullptr; UInt64 max_retries = 0; UInt64 curr_backoff_ms = 0; UInt64 max_backoff_ms = 0; diff --git a/src/Storages/MessageQueueSink.cpp b/src/Storages/MessageQueueSink.cpp index 1aa19c9ccde5..4fb81d690707 100644 --- a/src/Storages/MessageQueueSink.cpp +++ b/src/Storages/MessageQueueSink.cpp @@ -20,7 +20,7 @@ MessageQueueSink::MessageQueueSink( void MessageQueueSink::onStart() { LOG_TEST( - &Poco::Logger::get("MessageQueueSink"), + getLogger("MessageQueueSink"), "Executing startup for MessageQueueSink"); initialize(); diff --git a/src/Storages/NATS/NATSConnection.cpp b/src/Storages/NATS/NATSConnection.cpp index 70b3599aa090..d7ad0cf8219e 100644 --- a/src/Storages/NATS/NATSConnection.cpp +++ b/src/Storages/NATS/NATSConnection.cpp @@ -13,7 +13,7 @@ static const auto RETRIES_MAX = 20; static const auto CONNECTED_TO_BUFFER_SIZE = 256; -NATSConnectionManager::NATSConnectionManager(const NATSConfiguration & configuration_, Poco::Logger * log_) +NATSConnectionManager::NATSConnectionManager(const NATSConfiguration & configuration_, LoggerPtr log_) : configuration(configuration_) , log(log_) , event_handler(loop.getLoop(), log) @@ -115,8 +115,8 @@ void NATSConnectionManager::connectImpl() } natsOptions_SetMaxReconnect(options, configuration.max_reconnect); natsOptions_SetReconnectWait(options, configuration.reconnect_wait); - natsOptions_SetDisconnectedCB(options, disconnectedCallback, log); - natsOptions_SetReconnectedCB(options, reconnectedCallback, log); + natsOptions_SetDisconnectedCB(options, disconnectedCallback, log.get()); + natsOptions_SetReconnectedCB(options, reconnectedCallback, log.get()); natsStatus status; { auto lock = event_handler.setThreadLocalLoop(); diff --git a/src/Storages/NATS/NATSConnection.h b/src/Storages/NATS/NATSConnection.h index b49070473b28..c350f395a927 100644 --- a/src/Storages/NATS/NATSConnection.h +++ b/src/Storages/NATS/NATSConnection.h @@ -24,7 +24,7 @@ struct NATSConfiguration class NATSConnectionManager { public: - NATSConnectionManager(const NATSConfiguration & configuration_, Poco::Logger * log_); + NATSConnectionManager(const NATSConfiguration & configuration_, LoggerPtr log_); ~NATSConnectionManager(); bool isConnected(); @@ -54,7 +54,7 @@ class NATSConnectionManager static void reconnectedCallback(natsConnection * nc, void * log); NATSConfiguration configuration; - Poco::Logger * log; + LoggerPtr log; UVLoop loop; NATSHandler event_handler; diff --git a/src/Storages/NATS/NATSConsumer.cpp b/src/Storages/NATS/NATSConsumer.cpp index c7b40973b72e..136cb13ddfac 100644 --- a/src/Storages/NATS/NATSConsumer.cpp +++ b/src/Storages/NATS/NATSConsumer.cpp @@ -21,7 +21,7 @@ NATSConsumer::NATSConsumer( StorageNATS & storage_, std::vector & subjects_, const String & subscribe_queue_name, - Poco::Logger * log_, + LoggerPtr log_, uint32_t queue_size_, const std::atomic & stopped_) : connection(connection_) diff --git a/src/Storages/NATS/NATSConsumer.h b/src/Storages/NATS/NATSConsumer.h index a6f950329aab..0bededf83431 100644 --- a/src/Storages/NATS/NATSConsumer.h +++ b/src/Storages/NATS/NATSConsumer.h @@ -24,7 +24,7 @@ class NATSConsumer StorageNATS & storage_, std::vector & subjects_, const String & subscribe_queue_name, - Poco::Logger * log_, + LoggerPtr log_, uint32_t queue_size_, const std::atomic & stopped_); @@ -57,7 +57,7 @@ class NATSConsumer StorageNATS & storage; std::vector subscriptions; std::vector subjects; - Poco::Logger * log; + LoggerPtr log; const std::atomic & stopped; bool subscribed = false; diff --git a/src/Storages/NATS/NATSHandler.cpp b/src/Storages/NATS/NATSHandler.cpp index 7006e5633a92..03f1fc1a4955 100644 --- a/src/Storages/NATS/NATSHandler.cpp +++ b/src/Storages/NATS/NATSHandler.cpp @@ -12,7 +12,7 @@ namespace DB static const auto MAX_THREAD_WORK_DURATION_MS = 60000; -NATSHandler::NATSHandler(uv_loop_t * loop_, Poco::Logger * log_) : +NATSHandler::NATSHandler(uv_loop_t * loop_, LoggerPtr log_) : loop(loop_), log(log_), loop_running(false), diff --git a/src/Storages/NATS/NATSHandler.h b/src/Storages/NATS/NATSHandler.h index e3894c888a3c..6f9ec398cfae 100644 --- a/src/Storages/NATS/NATSHandler.h +++ b/src/Storages/NATS/NATSHandler.h @@ -6,7 +6,7 @@ #include #include #include -#include +#include namespace DB { @@ -23,7 +23,7 @@ using LockPtr = std::unique_ptr>; class NATSHandler { public: - NATSHandler(uv_loop_t * loop_, Poco::Logger * log_); + NATSHandler(uv_loop_t * loop_, LoggerPtr log_); ~NATSHandler(); @@ -47,7 +47,7 @@ class NATSHandler private: uv_loop_t * loop; natsOptions * opts = nullptr; - Poco::Logger * log; + LoggerPtr log; std::atomic loop_running; std::atomic loop_state; diff --git a/src/Storages/NATS/NATSProducer.cpp b/src/Storages/NATS/NATSProducer.cpp index a8510149baf2..fb8abb016f80 100644 --- a/src/Storages/NATS/NATSProducer.cpp +++ b/src/Storages/NATS/NATSProducer.cpp @@ -23,7 +23,7 @@ NATSProducer::NATSProducer( const NATSConfiguration & configuration_, const String & subject_, std::atomic & shutdown_called_, - Poco::Logger * log_) + LoggerPtr log_) : AsynchronousMessageProducer(log_) , connection(configuration_, log_) , subject(subject_) diff --git a/src/Storages/NATS/NATSProducer.h b/src/Storages/NATS/NATSProducer.h index 0303d05969b2..6923553a551b 100644 --- a/src/Storages/NATS/NATSProducer.h +++ b/src/Storages/NATS/NATSProducer.h @@ -20,7 +20,7 @@ class NATSProducer : public AsynchronousMessageProducer const NATSConfiguration & configuration_, const String & subject_, std::atomic & shutdown_called_, - Poco::Logger * log_); + LoggerPtr log_); void produce(const String & message, size_t rows_in_message, const Columns & columns, size_t last_row) override; diff --git a/src/Storages/NATS/StorageNATS.cpp b/src/Storages/NATS/StorageNATS.cpp index aa4ec77b0d84..a063b522982d 100644 --- a/src/Storages/NATS/StorageNATS.cpp +++ b/src/Storages/NATS/StorageNATS.cpp @@ -58,7 +58,7 @@ StorageNATS::StorageNATS( , schema_name(getContext()->getMacros()->expand(nats_settings->nats_schema)) , num_consumers(nats_settings->nats_num_consumers.value) , max_rows_per_message(nats_settings->nats_max_rows_per_message) - , log(&Poco::Logger::get("StorageNATS (" + table_id_.table_name + ")")) + , log(getLogger("StorageNATS (" + table_id_.table_name + ")")) , semaphore(0, static_cast(num_consumers)) , queue_size(std::max(QUEUE_SIZE, static_cast(getMaxBlockSize()))) , is_attach(is_attach_) diff --git a/src/Storages/NATS/StorageNATS.h b/src/Storages/NATS/StorageNATS.h index 518d81fb1458..92e40f212308 100644 --- a/src/Storages/NATS/StorageNATS.h +++ b/src/Storages/NATS/StorageNATS.h @@ -78,7 +78,7 @@ class StorageNATS final : public IStorage, WithContext size_t num_consumers; size_t max_rows_per_message; - Poco::Logger * log; + LoggerPtr log; NATSConnectionManagerPtr connection; /// Connection for all consumers NATSConfiguration configuration; diff --git a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp index 8b11e6c61d00..a971f3adba7e 100644 --- a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp @@ -34,7 +34,7 @@ MaterializedPostgreSQLConsumer::MaterializedPostgreSQLConsumer( bool schema_as_a_part_of_table_name_, StorageInfos storages_info_, const String & name_for_logger) - : log(&Poco::Logger::get("PostgreSQLReplicaConsumer(" + name_for_logger + ")")) + : log(getLogger("PostgreSQLReplicaConsumer(" + name_for_logger + ")")) , context(context_) , replication_slot_name(replication_slot_name_) , publication_name(publication_name_) @@ -59,7 +59,7 @@ MaterializedPostgreSQLConsumer::StorageData::StorageData(const StorageInfo & sto : storage(storage_info.storage), buffer(storage_info.storage->getInMemoryMetadataPtr(), storage_info.attributes) { auto table_id = storage_info.storage->getStorageID(); - LOG_TRACE(&Poco::Logger::get("StorageMaterializedPostgreSQL"), + LOG_TRACE(getLogger("StorageMaterializedPostgreSQL"), "New buffer for table {}, number of attributes: {}, number if columns: {}, structure: {}", table_id.getNameForLogs(), buffer.attributes.size(), buffer.getColumnsNum(), buffer.description.sample_block.dumpStructure()); } diff --git a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h index 3412e6e422f4..5ffeeb430394 100644 --- a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h @@ -130,7 +130,7 @@ class MaterializedPostgreSQLConsumer return (static_cast(upper_half) << 32) + lower_half; } - Poco::Logger * log; + LoggerPtr log; ContextPtr context; const std::string replication_slot_name, publication_name; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 998db4ea79ec..102d5823b5b3 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -68,7 +68,7 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( const MaterializedPostgreSQLSettings & replication_settings, bool is_materialized_postgresql_database_) : WithContext(context_->getGlobalContext()) - , log(&Poco::Logger::get("PostgreSQLReplicationHandler")) + , log(getLogger("PostgreSQLReplicationHandler")) , is_attach(is_attach_) , postgres_database(postgres_database_) , postgres_schema(replication_settings.materialized_postgresql_schema) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 10a196cf31b9..8dbcd37c4308 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -101,7 +101,7 @@ friend class TemporaryReplicationSlot; void assertInitialized() const; - Poco::Logger * log; + LoggerPtr log; /// If it is not attach, i.e. a create query, then if publication already exists - always drop it. bool is_attach; diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index d194c8b8201b..e9a91cb5c72e 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -61,7 +61,7 @@ StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL( std::unique_ptr replication_settings) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) - , log(&Poco::Logger::get("StorageMaterializedPostgreSQL(" + postgres::formatNameForLogs(remote_database_name, remote_table_name_) + ")")) + , log(getLogger("StorageMaterializedPostgreSQL(" + postgres::formatNameForLogs(remote_database_name, remote_table_name_) + ")")) , is_materialized_postgresql_database(false) , has_nested(false) , nested_context(makeNestedTableContext(context_->getGlobalContext())) @@ -102,7 +102,7 @@ StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL( const String & postgres_table_name) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) - , log(&Poco::Logger::get("StorageMaterializedPostgreSQL(" + postgres::formatNameForLogs(postgres_database_name, postgres_table_name) + ")")) + , log(getLogger("StorageMaterializedPostgreSQL(" + postgres::formatNameForLogs(postgres_database_name, postgres_table_name) + ")")) , is_materialized_postgresql_database(true) , has_nested(false) , nested_context(makeNestedTableContext(context_->getGlobalContext())) @@ -121,7 +121,7 @@ StorageMaterializedPostgreSQL::StorageMaterializedPostgreSQL( const String & postgres_table_name) : IStorage(StorageID(nested_storage_->getStorageID().database_name, nested_storage_->getStorageID().table_name)) , WithContext(context_->getGlobalContext()) - , log(&Poco::Logger::get("StorageMaterializedPostgreSQL(" + postgres::formatNameForLogs(postgres_database_name, postgres_table_name) + ")")) + , log(getLogger("StorageMaterializedPostgreSQL(" + postgres::formatNameForLogs(postgres_database_name, postgres_table_name) + ")")) , is_materialized_postgresql_database(true) , has_nested(true) , nested_context(makeNestedTableContext(context_->getGlobalContext())) @@ -142,7 +142,7 @@ StoragePtr StorageMaterializedPostgreSQL::createTemporary() const auto tmp_storage = DatabaseCatalog::instance().tryGetTable(tmp_table_id, nested_context); if (tmp_storage) { - LOG_TRACE(&Poco::Logger::get("MaterializedPostgreSQLStorage"), "Temporary table {} already exists, dropping", tmp_table_id.getNameForLogs()); + LOG_TRACE(getLogger("MaterializedPostgreSQLStorage"), "Temporary table {} already exists, dropping", tmp_table_id.getNameForLogs()); InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), getContext(), tmp_table_id, /* no delay */true); } diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h index af0adb10f9f6..da0397f17e08 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.h @@ -138,7 +138,7 @@ class StorageMaterializedPostgreSQL final : public IStorage, WithContext String getNestedTableName() const; - Poco::Logger * log; + LoggerPtr log; /// Not nullptr only for single MaterializedPostgreSQL storage, because for MaterializedPostgreSQL /// database engine there is one replication handler for all tables. diff --git a/src/Storages/RabbitMQ/RabbitMQConnection.cpp b/src/Storages/RabbitMQ/RabbitMQConnection.cpp index 13d065774a27..98ceba42676b 100644 --- a/src/Storages/RabbitMQ/RabbitMQConnection.cpp +++ b/src/Storages/RabbitMQ/RabbitMQConnection.cpp @@ -11,7 +11,7 @@ static const auto CONNECT_SLEEP = 200; static const auto RETRIES_MAX = 20; -RabbitMQConnection::RabbitMQConnection(const RabbitMQConfiguration & configuration_, Poco::Logger * log_) +RabbitMQConnection::RabbitMQConnection(const RabbitMQConfiguration & configuration_, LoggerPtr log_) : configuration(configuration_) , log(log_) , event_handler(loop.getLoop(), log) diff --git a/src/Storages/RabbitMQ/RabbitMQConnection.h b/src/Storages/RabbitMQ/RabbitMQConnection.h index 698230b16f4a..5adb64561948 100644 --- a/src/Storages/RabbitMQ/RabbitMQConnection.h +++ b/src/Storages/RabbitMQ/RabbitMQConnection.h @@ -22,7 +22,7 @@ struct RabbitMQConfiguration class RabbitMQConnection { public: - RabbitMQConnection(const RabbitMQConfiguration & configuration_, Poco::Logger * log_); + RabbitMQConnection(const RabbitMQConfiguration & configuration_, LoggerPtr log_); bool isConnected(); @@ -51,7 +51,7 @@ class RabbitMQConnection void disconnectImpl(bool immediately = false); RabbitMQConfiguration configuration; - Poco::Logger * log; + LoggerPtr log; UVLoop loop; /// Preserve order of destruction here: diff --git a/src/Storages/RabbitMQ/RabbitMQConsumer.cpp b/src/Storages/RabbitMQ/RabbitMQConsumer.cpp index 65063e004a5e..1b51e1ebb2f3 100644 --- a/src/Storages/RabbitMQ/RabbitMQConsumer.cpp +++ b/src/Storages/RabbitMQ/RabbitMQConsumer.cpp @@ -23,7 +23,7 @@ RabbitMQConsumer::RabbitMQConsumer( std::vector & queues_, size_t channel_id_base_, const String & channel_base_, - Poco::Logger * log_, + LoggerPtr log_, uint32_t queue_size_) : event_handler(event_handler_) , queues(queues_) diff --git a/src/Storages/RabbitMQ/RabbitMQConsumer.h b/src/Storages/RabbitMQ/RabbitMQConsumer.h index c7adb8562128..b87a3e095466 100644 --- a/src/Storages/RabbitMQ/RabbitMQConsumer.h +++ b/src/Storages/RabbitMQ/RabbitMQConsumer.h @@ -31,7 +31,7 @@ class RabbitMQConsumer std::vector & queues_, size_t channel_id_base_, const String & channel_base_, - Poco::Logger * log_, + LoggerPtr log_, uint32_t queue_size_); struct AckTracker @@ -95,7 +95,7 @@ class RabbitMQConsumer std::vector queues; const String channel_base; const size_t channel_id_base; - Poco::Logger * log; + LoggerPtr log; std::atomic stopped; String channel_id; diff --git a/src/Storages/RabbitMQ/RabbitMQHandler.cpp b/src/Storages/RabbitMQ/RabbitMQHandler.cpp index 745af0d20e3a..be352f26f7be 100644 --- a/src/Storages/RabbitMQ/RabbitMQHandler.cpp +++ b/src/Storages/RabbitMQ/RabbitMQHandler.cpp @@ -8,7 +8,7 @@ namespace DB /* The object of this class is shared between concurrent consumers (who share the same connection == share the same * event loop and handler). */ -RabbitMQHandler::RabbitMQHandler(uv_loop_t * loop_, Poco::Logger * log_) : +RabbitMQHandler::RabbitMQHandler(uv_loop_t * loop_, LoggerPtr log_) : AMQP::LibUvHandler(loop_), loop(loop_), log(log_), diff --git a/src/Storages/RabbitMQ/RabbitMQHandler.h b/src/Storages/RabbitMQ/RabbitMQHandler.h index 4223732a4a07..244692cf8009 100644 --- a/src/Storages/RabbitMQ/RabbitMQHandler.h +++ b/src/Storages/RabbitMQ/RabbitMQHandler.h @@ -24,7 +24,7 @@ class RabbitMQHandler : public AMQP::LibUvHandler { public: - RabbitMQHandler(uv_loop_t * loop_, Poco::Logger * log_); + RabbitMQHandler(uv_loop_t * loop_, LoggerPtr log_); void onError(AMQP::TcpConnection * connection, const char * message) override; void onReady(AMQP::TcpConnection * connection) override; @@ -50,7 +50,7 @@ class RabbitMQHandler : public AMQP::LibUvHandler private: uv_loop_t * loop; - Poco::Logger * log; + LoggerPtr log; std::atomic connection_running, loop_running; std::atomic loop_state; diff --git a/src/Storages/RabbitMQ/RabbitMQProducer.cpp b/src/Storages/RabbitMQ/RabbitMQProducer.cpp index 246569060d00..7ad83213b9b1 100644 --- a/src/Storages/RabbitMQ/RabbitMQProducer.cpp +++ b/src/Storages/RabbitMQ/RabbitMQProducer.cpp @@ -31,7 +31,7 @@ RabbitMQProducer::RabbitMQProducer( const size_t channel_id_base_, const bool persistent_, std::atomic & shutdown_called_, - Poco::Logger * log_) + LoggerPtr log_) : AsynchronousMessageProducer(log_) , connection(configuration_, log_) , routing_keys(routing_keys_) diff --git a/src/Storages/RabbitMQ/RabbitMQProducer.h b/src/Storages/RabbitMQ/RabbitMQProducer.h index 70afbbb9b903..a790eda0d085 100644 --- a/src/Storages/RabbitMQ/RabbitMQProducer.h +++ b/src/Storages/RabbitMQ/RabbitMQProducer.h @@ -24,7 +24,7 @@ class RabbitMQProducer : public AsynchronousMessageProducer const size_t channel_id_base_, const bool persistent_, std::atomic & shutdown_called_, - Poco::Logger * log_); + LoggerPtr log_); void produce(const String & message, size_t rows_in_message, const Columns & columns, size_t last_row) override; diff --git a/src/Storages/RabbitMQ/RabbitMQSource.cpp b/src/Storages/RabbitMQ/RabbitMQSource.cpp index d755dff32021..c52321632001 100644 --- a/src/Storages/RabbitMQ/RabbitMQSource.cpp +++ b/src/Storages/RabbitMQ/RabbitMQSource.cpp @@ -67,7 +67,7 @@ RabbitMQSource::RabbitMQSource( , ack_in_suffix(ack_in_suffix_) , non_virtual_header(std::move(headers.first)) , virtual_header(std::move(headers.second)) - , log(&Poco::Logger::get("RabbitMQSource")) + , log(getLogger("RabbitMQSource")) , max_execution_time_ms(max_execution_time_) { storage.incrementReader(); diff --git a/src/Storages/RabbitMQ/RabbitMQSource.h b/src/Storages/RabbitMQ/RabbitMQSource.h index 5d2d8b256302..44ddc12a46f7 100644 --- a/src/Storages/RabbitMQ/RabbitMQSource.h +++ b/src/Storages/RabbitMQ/RabbitMQSource.h @@ -45,7 +45,7 @@ class RabbitMQSource : public ISource const Block non_virtual_header; const Block virtual_header; - Poco::Logger * log; + LoggerPtr log; RabbitMQConsumerPtr consumer; uint64_t max_execution_time_ms = 0; diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index 7999d4af71a5..4a30895f1ecb 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -85,7 +85,7 @@ StorageRabbitMQ::StorageRabbitMQ( , persistent(rabbitmq_settings->rabbitmq_persistent.value) , use_user_setup(rabbitmq_settings->rabbitmq_queue_consume.value) , hash_exchange(num_consumers > 1 || num_queues > 1) - , log(&Poco::Logger::get("StorageRabbitMQ (" + table_id_.table_name + ")")) + , log(getLogger("StorageRabbitMQ (" + table_id_.table_name + ")")) , semaphore(0, static_cast(num_consumers)) , unique_strbase(getRandomName()) , queue_size(std::max(QUEUE_SIZE, static_cast(getMaxBlockSize()))) diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.h b/src/Storages/RabbitMQ/StorageRabbitMQ.h index c6cb340619c4..5ff681919f49 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.h +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.h @@ -103,7 +103,7 @@ class StorageRabbitMQ final: public IStorage, WithContext bool use_user_setup; bool hash_exchange; - Poco::Logger * log; + LoggerPtr log; RabbitMQConnectionPtr connection; /// Connection for all consumers RabbitMQConfiguration configuration; diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index a69f26203e94..85e13c039808 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -138,7 +138,7 @@ StorageBuffer::StorageBuffer( , flush_thresholds(flush_thresholds_) , destination_id(destination_id_) , allow_materialized(allow_materialized_) - , log(&Poco::Logger::get("StorageBuffer (" + table_id_.getFullTableName() + ")")) + , log(getLogger("StorageBuffer (" + table_id_.getFullTableName() + ")")) , bg_pool(getContext()->getBufferFlushSchedulePool()) { StorageInMemoryMetadata storage_metadata; @@ -434,7 +434,7 @@ void StorageBuffer::read( } -static void appendBlock(Poco::Logger * log, const Block & from, Block & to) +static void appendBlock(LoggerPtr log, const Block & from, Block & to) { size_t rows = from.rows(); size_t old_rows = to.rows(); diff --git a/src/Storages/StorageBuffer.h b/src/Storages/StorageBuffer.h index 83d2376216b6..9946ae163e5d 100644 --- a/src/Storages/StorageBuffer.h +++ b/src/Storages/StorageBuffer.h @@ -157,7 +157,7 @@ friend class BufferSink; Writes lifetime_writes; Writes total_writes; - Poco::Logger * log; + LoggerPtr log; void flushAllBuffers(bool check_thresholds = true); bool flushBuffer(Buffer & buffer, bool check_thresholds, bool locked = false); diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 4dddd22fae5f..f3dad7f57a10 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -325,7 +325,7 @@ StorageDistributed::StorageDistributed( , remote_database(remote_database_) , remote_table(remote_table_) , remote_table_function_ptr(remote_table_function_ptr_) - , log(&Poco::Logger::get("StorageDistributed (" + id_.table_name + ")")) + , log(getLogger("StorageDistributed (" + id_.table_name + ")")) , owned_cluster(std::move(owned_cluster_)) , cluster_name(getContext()->getMacros()->expand(cluster_name_)) , has_sharding_key(sharding_key_) diff --git a/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h index 3a7fae447083..26819f1807ef 100644 --- a/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -220,7 +220,7 @@ class StorageDistributed final : public IStorage, WithContext String remote_table; ASTPtr remote_table_function_ptr; - Poco::Logger * log; + LoggerPtr log; /// Used to implement TableFunctionRemote. std::shared_ptr owned_cluster; diff --git a/src/Storages/StorageExecutable.cpp b/src/Storages/StorageExecutable.cpp index 9fabf1a9fb64..5824a7ed73f8 100644 --- a/src/Storages/StorageExecutable.cpp +++ b/src/Storages/StorageExecutable.cpp @@ -79,7 +79,7 @@ StorageExecutable::StorageExecutable( : IStorage(table_id_) , settings(settings_) , input_queries(input_queries_) - , log(settings.is_executable_pool ? &Poco::Logger::get("StorageExecutablePool") : &Poco::Logger::get("StorageExecutable")) + , log(settings.is_executable_pool ? getLogger("StorageExecutablePool") : getLogger("StorageExecutable")) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns); diff --git a/src/Storages/StorageExecutable.h b/src/Storages/StorageExecutable.h index 2393920fa3cf..7ad1f2d2d59a 100644 --- a/src/Storages/StorageExecutable.h +++ b/src/Storages/StorageExecutable.h @@ -46,7 +46,7 @@ class StorageExecutable final : public IStorage private: ExecutableSettings settings; std::vector input_queries; - Poco::Logger * log; + LoggerPtr log; std::unique_ptr coordinator; }; diff --git a/src/Storages/StorageFile.h b/src/Storages/StorageFile.h index cf81a6e45d14..8b96fa030244 100644 --- a/src/Storages/StorageFile.h +++ b/src/Storages/StorageFile.h @@ -124,7 +124,7 @@ friend class partitionedstoragefilesink; mutable std::shared_timed_mutex rwlock; - Poco::Logger * log = &Poco::Logger::get("StorageFile"); + LoggerPtr log = getLogger("StorageFile"); /// Total number of bytes to read (sums for multiple files in case of globs). Needed for progress bar. size_t total_bytes_to_read = 0; diff --git a/src/Storages/StorageHudi.cpp b/src/Storages/StorageHudi.cpp index 1dd2684d59b8..f6d8d668b3a0 100644 --- a/src/Storages/StorageHudi.cpp +++ b/src/Storages/StorageHudi.cpp @@ -23,7 +23,7 @@ namespace ErrorCodes template HudiMetadataParser::HudiMetadataParser(const Configuration & configuration_, ContextPtr context_) - : configuration(configuration_), context(context_), log(&Poco::Logger::get("StorageHudi")) + : configuration(configuration_), context(context_), log(getLogger("StorageHudi")) { } diff --git a/src/Storages/StorageHudi.h b/src/Storages/StorageHudi.h index 2bdd5126dc3d..d7d9f82e8be4 100644 --- a/src/Storages/StorageHudi.h +++ b/src/Storages/StorageHudi.h @@ -26,7 +26,7 @@ class HudiMetadataParser private: Configuration configuration; ContextPtr context; - Poco::Logger * log; + LoggerPtr log; }; struct StorageHudiName diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index 23bcdd23484f..dc60ab211553 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -103,7 +103,7 @@ void StorageJoin::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPt if (disk->exists(path)) disk->removeRecursive(path); else - LOG_INFO(&Poco::Logger::get("StorageJoin"), "Path {} is already removed from disk {}", path, disk->getName()); + LOG_INFO(getLogger("StorageJoin"), "Path {} is already removed from disk {}", path, disk->getName()); disk->createDirectories(path); disk->createDirectories(fs::path(path) / "tmp/"); diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index f570f132463e..7e9901ad93ae 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -251,7 +251,7 @@ StorageKeeperMap::StorageKeeperMap( , primary_key(primary_key_) , zookeeper_name(zkutil::extractZooKeeperName(root_path_)) , keys_limit(keys_limit_) - , log(&Poco::Logger::get(fmt::format("StorageKeeperMap ({})", table_id.getNameForLogs()))) + , log(getLogger(fmt::format("StorageKeeperMap ({})", table_id.getNameForLogs()))) { std::string path_prefix = context_->getConfigRef().getString("keeper_map_path_prefix", ""); if (path_prefix.empty()) diff --git a/src/Storages/StorageKeeperMap.h b/src/Storages/StorageKeeperMap.h index a16c662e547a..6a3f95c19e29 100644 --- a/src/Storages/StorageKeeperMap.h +++ b/src/Storages/StorageKeeperMap.h @@ -136,7 +136,7 @@ class StorageKeeperMap final : public IStorage, public IKeyValueEntity, WithCont mutable std::mutex init_mutex; mutable std::optional table_is_valid; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index abab2b2dc68c..aa539d3145e2 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -67,7 +67,7 @@ namespace ActionLocks extern const StorageActionBlockType PartsMove; } -static MergeTreeTransactionPtr tryGetTransactionForMutation(const MergeTreeMutationEntry & mutation, Poco::Logger * log = nullptr) +static MergeTreeTransactionPtr tryGetTransactionForMutation(const MergeTreeMutationEntry & mutation, LoggerPtr log = nullptr) { assert(!mutation.tid.isEmpty()); if (mutation.tid.isPrehistoric()) @@ -612,8 +612,9 @@ std::optional StorageMergeTree::getIncompleteMutationsS const auto & mutation_entry = current_mutation_it->second; - auto txn = tryGetTransactionForMutation(mutation_entry, log); + auto txn = tryGetTransactionForMutation(mutation_entry, log.load()); assert(txn || mutation_entry.tid.isPrehistoric()); + auto data_parts = getVisibleDataPartsVector(txn); for (const auto & data_part : data_parts) { @@ -729,7 +730,7 @@ CancellationCode StorageMergeTree::killMutation(const String & mutation_id) if (!to_kill) return CancellationCode::NotFound; - if (auto txn = tryGetTransactionForMutation(*to_kill, log)) + if (auto txn = tryGetTransactionForMutation(*to_kill, log.load())) { LOG_TRACE(log, "Cancelling transaction {} which had started mutation {}", to_kill->tid, mutation_id); TransactionLog::instance().rollbackTransaction(txn); @@ -1087,7 +1088,7 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMutate( } TransactionID first_mutation_tid = mutations_begin_it->second.tid; - MergeTreeTransactionPtr txn = tryGetTransactionForMutation(mutations_begin_it->second, log); + MergeTreeTransactionPtr txn = tryGetTransactionForMutation(mutations_begin_it->second, log.load()); assert(txn || first_mutation_tid.isPrehistoric()); if (txn) diff --git a/src/Storages/StorageMySQL.cpp b/src/Storages/StorageMySQL.cpp index 232ff87d9ed0..ad850f35fed7 100644 --- a/src/Storages/StorageMySQL.cpp +++ b/src/Storages/StorageMySQL.cpp @@ -62,7 +62,7 @@ StorageMySQL::StorageMySQL( , on_duplicate_clause{on_duplicate_clause_} , mysql_settings(mysql_settings_) , pool(std::make_shared(pool_)) - , log(&Poco::Logger::get("StorageMySQL (" + table_id_.table_name + ")")) + , log(getLogger("StorageMySQL (" + table_id_.table_name + ")")) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); diff --git a/src/Storages/StorageMySQL.h b/src/Storages/StorageMySQL.h index ae3d2f935b66..6b29771b843b 100644 --- a/src/Storages/StorageMySQL.h +++ b/src/Storages/StorageMySQL.h @@ -87,7 +87,7 @@ class StorageMySQL final : public IStorage, WithContext mysqlxx::PoolWithFailoverPtr pool; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index 8e1a799fa07e..5816843173f6 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -67,7 +67,7 @@ StoragePostgreSQL::StoragePostgreSQL( , remote_table_schema(remote_table_schema_) , on_conflict(on_conflict_) , pool(std::move(pool_)) - , log(&Poco::Logger::get("StoragePostgreSQL (" + table_id_.table_name + ")")) + , log(getLogger("StoragePostgreSQL (" + table_id_.table_name + ")")) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); diff --git a/src/Storages/StoragePostgreSQL.h b/src/Storages/StoragePostgreSQL.h index b3ff342da105..f038bbab2644 100644 --- a/src/Storages/StoragePostgreSQL.h +++ b/src/Storages/StoragePostgreSQL.h @@ -72,7 +72,7 @@ class StoragePostgreSQL final : public IStorage String on_conflict; postgres::PoolWithFailoverPtr pool; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 1214ce7bcb33..d8471b36fd27 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -283,7 +283,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( attach, [this] (const std::string & name) { enqueuePartForCheck(name); }) , zookeeper_name(zkutil::extractZooKeeperName(zookeeper_path_)) - , zookeeper_path(zkutil::extractZooKeeperPath(zookeeper_path_, /* check_starts_with_slash */ !attach, log)) + , zookeeper_path(zkutil::extractZooKeeperPath(zookeeper_path_, /* check_starts_with_slash */ !attach, log.load())) , replica_name(replica_name_) , replica_path(fs::path(zookeeper_path) / "replicas" / replica_name_) , reader(*this) @@ -708,7 +708,7 @@ bool StorageReplicatedMergeTree::createTableIfNotExists(const StorageMetadataPtr else { auto metadata_drop_lock = zkutil::EphemeralNodeHolder::existing(drop_lock_path, *zookeeper); - if (!removeTableNodesFromZooKeeper(zookeeper, zookeeper_path, metadata_drop_lock, log)) + if (!removeTableNodesFromZooKeeper(zookeeper, zookeeper_path, metadata_drop_lock, log.load())) { /// Someone is recursively removing table right now, we cannot create new table until old one is removed continue; @@ -934,12 +934,12 @@ void StorageReplicatedMergeTree::drop() { /// Session could expire, get it again zookeeper = getZooKeeperIfTableShutDown(); - dropReplica(zookeeper, zookeeper_path, replica_name, log, getSettings(), &has_metadata_in_zookeeper); + dropReplica(zookeeper, zookeeper_path, replica_name, log.load(), getSettings(), &has_metadata_in_zookeeper); } } void StorageReplicatedMergeTree::dropReplica(zkutil::ZooKeeperPtr zookeeper, const String & zookeeper_path, const String & replica, - Poco::Logger * logger, MergeTreeSettingsPtr table_settings, std::optional * has_metadata_out) + LoggerPtr logger, MergeTreeSettingsPtr table_settings, std::optional * has_metadata_out) { if (zookeeper->expired()) throw Exception(ErrorCodes::TABLE_WAS_NOT_DROPPED, "Table was not dropped because ZooKeeper session has expired."); @@ -1058,7 +1058,7 @@ void StorageReplicatedMergeTree::dropReplica(zkutil::ZooKeeperPtr zookeeper, con } bool StorageReplicatedMergeTree::removeTableNodesFromZooKeeper(zkutil::ZooKeeperPtr zookeeper, - const String & zookeeper_path, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock, Poco::Logger * logger) + const String & zookeeper_path, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock, LoggerPtr logger) { bool completely_removed = false; @@ -3616,7 +3616,7 @@ void StorageReplicatedMergeTree::startBeingLeader() return; } - zkutil::checkNoOldLeaders(log, *zookeeper, fs::path(zookeeper_path) / "leader_election"); + zkutil::checkNoOldLeaders(log.load(), *zookeeper, fs::path(zookeeper_path) / "leader_election"); LOG_INFO(log, "Became leader"); is_leader = true; @@ -8268,7 +8268,7 @@ StorageReplicatedMergeTree::unlockSharedData(const IMergeTreeDataPart & part, co return unlockSharedDataByID( part.getUniqueId(), shared_id, part.name, replica_name, - part.getDataPartStorage().getDiskType(), zookeeper, *getSettings(), log, zookeeper_path, format_version); + part.getDataPartStorage().getDiskType(), zookeeper, *getSettings(), log.load(), zookeeper_path, format_version); } namespace @@ -8284,7 +8284,7 @@ namespace /// But sometimes we need an opposite. When we deleting all_0_0_0_1 it can be non replicated to other replicas, so we are the only owner of this part. /// In this case when we will drop all_0_0_0_1 we will drop blobs for all_0_0_0. But it will lead to dataloss. For such case we need to check that other replicas /// still need parent part. -std::pair getParentLockedBlobs(const ZooKeeperWithFaultInjectionPtr & zookeeper_ptr, const std::string & zero_copy_part_path_prefix, const std::string & part_info_str, MergeTreeDataFormatVersion format_version, Poco::Logger * log) +std::pair getParentLockedBlobs(const ZooKeeperWithFaultInjectionPtr & zookeeper_ptr, const std::string & zero_copy_part_path_prefix, const std::string & part_info_str, MergeTreeDataFormatVersion format_version, LoggerPtr log) { NameSet files_not_to_remove; @@ -8347,7 +8347,7 @@ std::pair getParentLockedBlobs(const ZooKeeperWithFaultInjectionP std::pair StorageReplicatedMergeTree::unlockSharedDataByID( String part_id, const String & table_uuid, const String & part_name, const String & replica_name_, const std::string & disk_type, const ZooKeeperWithFaultInjectionPtr & zookeeper_ptr, const MergeTreeSettings & settings, - Poco::Logger * logger, const String & zookeeper_path_old, MergeTreeDataFormatVersion data_format_version) + LoggerPtr logger, const String & zookeeper_path_old, MergeTreeDataFormatVersion data_format_version) { boost::replace_all(part_id, "/", "_"); @@ -9020,7 +9020,7 @@ bool StorageReplicatedMergeTree::removeSharedDetachedPart(DiskPtr disk, const St detached_replica_name, toString(disk->getDataSourceDescription().type), std::make_shared(zookeeper), local_context->getReplicatedMergeTreeSettings(), - &Poco::Logger::get("StorageReplicatedMergeTree"), + getLogger("StorageReplicatedMergeTree"), detached_zookeeper_path, MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING); diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 21df2f1c189e..ef6e503dbab9 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -229,11 +229,11 @@ class StorageReplicatedMergeTree final : public MergeTreeData /** Remove a specific replica from zookeeper. */ static void dropReplica(zkutil::ZooKeeperPtr zookeeper, const String & zookeeper_path, const String & replica, - Poco::Logger * logger, MergeTreeSettingsPtr table_settings = nullptr, std::optional * has_metadata_out = nullptr); + LoggerPtr logger, MergeTreeSettingsPtr table_settings = nullptr, std::optional * has_metadata_out = nullptr); /// Removes table from ZooKeeper after the last replica was dropped static bool removeTableNodesFromZooKeeper(zkutil::ZooKeeperPtr zookeeper, const String & zookeeper_path, - const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock, Poco::Logger * logger); + const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock, LoggerPtr logger); /// Schedules job to execute in background pool (merge, mutate, drop range and so on) bool scheduleDataProcessingJob(BackgroundJobsAssignee & assignee) override; @@ -273,7 +273,7 @@ class StorageReplicatedMergeTree final : public MergeTreeData const std::string & disk_type, const ZooKeeperWithFaultInjectionPtr & zookeeper_, const MergeTreeSettings & settings, - Poco::Logger * logger, + LoggerPtr logger, const String & zookeeper_path_old, MergeTreeDataFormatVersion data_format_version); diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 5c3d5b3d3c8c..616388799617 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -1236,7 +1236,7 @@ void StorageS3::truncate(const ASTPtr & /* query */, const StorageMetadataPtr &, } for (const auto & error : response.GetResult().GetErrors()) - LOG_WARNING(&Poco::Logger::get("StorageS3"), "Failed to delete {}, error: {}", error.GetKey(), error.GetMessage()); + LOG_WARNING(getLogger("StorageS3"), "Failed to delete {}, error: {}", error.GetKey(), error.GetMessage()); } diff --git a/src/Storages/StorageS3.h b/src/Storages/StorageS3.h index 756247880392..f231c767c09b 100644 --- a/src/Storages/StorageS3.h +++ b/src/Storages/StorageS3.h @@ -220,7 +220,7 @@ class StorageS3Source : public ISource, WithContext std::shared_ptr file_iterator; size_t download_thread_num = 1; - Poco::Logger * log = &Poco::Logger::get("StorageS3Source"); + LoggerPtr log = getLogger("StorageS3Source"); ThreadPool create_reader_pool; ThreadPoolCallbackRunner create_reader_scheduler; diff --git a/src/Storages/StorageS3Cluster.cpp b/src/Storages/StorageS3Cluster.cpp index 5cefc065f217..ad0c66c34e6d 100644 --- a/src/Storages/StorageS3Cluster.cpp +++ b/src/Storages/StorageS3Cluster.cpp @@ -49,7 +49,7 @@ StorageS3Cluster::StorageS3Cluster( ContextPtr context_, bool structure_argument_was_provided_) : IStorageCluster(table_id_) - , log(&Poco::Logger::get("StorageS3Cluster (" + table_id_.table_name + ")")) + , log(getLogger("StorageS3Cluster (" + table_id_.table_name + ")")) , s3_configuration{configuration_} , cluster_name(configuration_.cluster_name) , format_name(configuration_.format) diff --git a/src/Storages/StorageS3Cluster.h b/src/Storages/StorageS3Cluster.h index 98a0bde260e2..155339b64657 100644 --- a/src/Storages/StorageS3Cluster.h +++ b/src/Storages/StorageS3Cluster.h @@ -48,7 +48,7 @@ class StorageS3Cluster : public IStorageCluster ClusterPtr getCluster(ContextPtr context) const override; private: - Poco::Logger * log; + LoggerPtr log; StorageS3::Configuration s3_configuration; String cluster_name; String format_name; diff --git a/src/Storages/StorageSQLite.cpp b/src/Storages/StorageSQLite.cpp index 10eba370d264..d0d30ea27583 100644 --- a/src/Storages/StorageSQLite.cpp +++ b/src/Storages/StorageSQLite.cpp @@ -41,7 +41,7 @@ StorageSQLite::StorageSQLite( , remote_table_name(remote_table_name_) , database_path(database_path_) , sqlite_db(sqlite_db_) - , log(&Poco::Logger::get("StorageSQLite (" + table_id_.table_name + ")")) + , log(getLogger("StorageSQLite (" + table_id_.table_name + ")")) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); diff --git a/src/Storages/StorageSQLite.h b/src/Storages/StorageSQLite.h index a021c00f627e..a4e2da5c7ab2 100644 --- a/src/Storages/StorageSQLite.h +++ b/src/Storages/StorageSQLite.h @@ -46,7 +46,7 @@ class StorageSQLite final : public IStorage, public WithContext String remote_table_name; String database_path; SQLitePtr sqlite_db; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp index 7c5ba497ec9f..ce70dab68cba 100644 --- a/src/Storages/StorageSet.cpp +++ b/src/Storages/StorageSet.cpp @@ -165,7 +165,7 @@ void StorageSet::truncate(const ASTPtr &, const StorageMetadataPtr & metadata_sn if (disk->exists(path)) disk->removeRecursive(path); else - LOG_INFO(&Poco::Logger::get("StorageSet"), "Path {} is already removed from disk {}", path, disk->getName()); + LOG_INFO(getLogger("StorageSet"), "Path {} is already removed from disk {}", path, disk->getName()); disk->createDirectories(path); disk->createDirectories(fs::path(path) / "tmp/"); @@ -226,7 +226,7 @@ void StorageSetOrJoinBase::restoreFromFile(const String & file_path) finishInsert(); /// TODO Add speed, compressed bytes, data volume in memory, compression ratio ... Generalize all statistics logging in project. - LOG_INFO(&Poco::Logger::get("StorageSetOrJoinBase"), "Loaded from backup file {}. {} rows, {}. State has {} unique rows.", + LOG_INFO(getLogger("StorageSetOrJoinBase"), "Loaded from backup file {}. {} rows, {}. State has {} unique rows.", file_path, info.rows, ReadableSize(info.bytes), getSize(ctx)); } diff --git a/src/Storages/StorageStripeLog.cpp b/src/Storages/StorageStripeLog.cpp index 034ab5a7f438..1e3f3f68db40 100644 --- a/src/Storages/StorageStripeLog.cpp +++ b/src/Storages/StorageStripeLog.cpp @@ -274,7 +274,7 @@ StorageStripeLog::StorageStripeLog( , index_file_path(table_path + "index.mrk") , file_checker(disk, table_path + "sizes.json") , max_compress_block_size(context_->getSettings().max_compress_block_size) - , log(&Poco::Logger::get("StorageStripeLog")) + , log(getLogger("StorageStripeLog")) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); diff --git a/src/Storages/StorageStripeLog.h b/src/Storages/StorageStripeLog.h index 3f1b4ed0ad5c..f897afe5e0fb 100644 --- a/src/Storages/StorageStripeLog.h +++ b/src/Storages/StorageStripeLog.h @@ -108,7 +108,7 @@ friend class StripeLogSink; mutable std::shared_timed_mutex rwlock; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 81624dc65707..d29baf2e2ae9 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -360,7 +360,7 @@ namespace catch (const Poco::Exception & e) { LOG_TRACE( - &Poco::Logger::get("StorageURLSource"), + getLogger("StorageURLSource"), "HTTP HEAD request to `{}` failed at try {}/{}. " "Error: {}.", request_uri.toString(), @@ -379,16 +379,16 @@ namespace || (res.has("Content-Range") && res.get("Content-Range").starts_with("bytes")); if (supports_ranges) - LOG_TRACE(&Poco::Logger::get("StorageURLSource"), "HTTP Range is supported"); + LOG_TRACE(getLogger("StorageURLSource"), "HTTP Range is supported"); else - LOG_TRACE(&Poco::Logger::get("StorageURLSource"), "HTTP Range is not supported"); + LOG_TRACE(getLogger("StorageURLSource"), "HTTP Range is not supported"); if (supports_ranges && res.getStatus() == Poco::Net::HTTPResponse::HTTP_PARTIAL_CONTENT && res.hasContentLength()) { LOG_TRACE( - &Poco::Logger::get("StorageURLSource"), + getLogger("StorageURLSource"), "Using ParallelReadBuffer with {} workers with chunks of {} bytes", download_threads, settings.max_download_buffer_size); @@ -422,14 +422,14 @@ namespace catch (const Poco::Exception & e) { LOG_TRACE( - &Poco::Logger::get("StorageURLSource"), + getLogger("StorageURLSource"), "Failed to setup ParallelReadBuffer because of an exception:\n{}.\nFalling back to the single-threaded " "buffer", e.displayText()); } } - LOG_TRACE(&Poco::Logger::get("StorageURLSource"), "Using single-threaded read buffer"); + LOG_TRACE(getLogger("StorageURLSource"), "Using single-threaded read buffer"); return wrapReadBufferWithCompressionMethod( std::make_unique( @@ -1008,7 +1008,7 @@ StorageURLWithFailover::StorageURLWithFailover( { Poco::URI poco_uri(uri_option); context_->getRemoteHostFilter().checkURL(poco_uri); - LOG_DEBUG(&Poco::Logger::get("StorageURLDistributed"), "Adding URL option: {}", uri_option); + LOG_DEBUG(getLogger("StorageURLDistributed"), "Adding URL option: {}", uri_option); uri_options.emplace_back(uri_option); } } diff --git a/src/Storages/StorageXDBC.cpp b/src/Storages/StorageXDBC.cpp index 4dd4f122ae69..441678e5c6a8 100644 --- a/src/Storages/StorageXDBC.cpp +++ b/src/Storages/StorageXDBC.cpp @@ -45,7 +45,7 @@ StorageXDBC::StorageXDBC( , bridge_helper(bridge_helper_) , remote_database_name(remote_database_name_) , remote_table_name(remote_table_name_) - , log(&Poco::Logger::get("Storage" + bridge_helper->getName())) + , log(getLogger("Storage" + bridge_helper->getName())) { uri = bridge_helper->getMainURI().toString(); } diff --git a/src/Storages/StorageXDBC.h b/src/Storages/StorageXDBC.h index 4dd685b02863..3b5e11485e7a 100644 --- a/src/Storages/StorageXDBC.h +++ b/src/Storages/StorageXDBC.h @@ -46,7 +46,7 @@ class StorageXDBC : public IStorageURLBase std::string remote_database_name; std::string remote_table_name; - Poco::Logger * log; + LoggerPtr log; std::string getReadMethod() const override; diff --git a/src/Storages/System/StorageSystemDatabases.cpp b/src/Storages/System/StorageSystemDatabases.cpp index 4d1f6c171db5..e86a66554948 100644 --- a/src/Storages/System/StorageSystemDatabases.cpp +++ b/src/Storages/System/StorageSystemDatabases.cpp @@ -50,7 +50,7 @@ static String getEngineFull(const DatabasePtr & database) return {}; guard.reset(); - LOG_TRACE(&Poco::Logger::get("StorageSystemDatabases"), "Failed to lock database {} ({}), will retry", name, database->getUUID()); + LOG_TRACE(getLogger("StorageSystemDatabases"), "Failed to lock database {} ({}), will retry", name, database->getUUID()); } ASTPtr ast = database->getCreateDatabaseQuery(); diff --git a/src/Storages/System/StorageSystemStackTrace.cpp b/src/Storages/System/StorageSystemStackTrace.cpp index 26411bf3bcbc..47240c240e3e 100644 --- a/src/Storages/System/StorageSystemStackTrace.cpp +++ b/src/Storages/System/StorageSystemStackTrace.cpp @@ -221,7 +221,7 @@ namespace StorageSystemStackTrace::StorageSystemStackTrace(const StorageID & table_id_) : IStorage(table_id_) - , log(&Poco::Logger::get("StorageSystemStackTrace")) + , log(getLogger("StorageSystemStackTrace")) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(ColumnsDescription({ diff --git a/src/Storages/System/StorageSystemStackTrace.h b/src/Storages/System/StorageSystemStackTrace.h index 9133a86aa555..8da04b1fa60d 100644 --- a/src/Storages/System/StorageSystemStackTrace.h +++ b/src/Storages/System/StorageSystemStackTrace.h @@ -39,7 +39,7 @@ class StorageSystemStackTrace final : public IStorage protected: mutable std::mutex mutex; - Poco::Logger * log; + LoggerPtr log; }; } diff --git a/src/Storages/UVLoop.h b/src/Storages/UVLoop.h index 4945e1b56fac..dd1d64973d12 100644 --- a/src/Storages/UVLoop.h +++ b/src/Storages/UVLoop.h @@ -63,7 +63,7 @@ class UVLoop : public boost::noncopyable private: std::unique_ptr loop_ptr; - Poco::Logger * log = &Poco::Logger::get("UVLoop"); + LoggerPtr log = getLogger("UVLoop"); static void onUVWalkClosingCallback(uv_handle_t * handle, void *) { diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index 3471e4ea6bfc..95c953118e29 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -1155,7 +1155,7 @@ StorageWindowView::StorageWindowView( bool attach_) : IStorage(table_id_) , WithContext(context_->getGlobalContext()) - , log(&Poco::Logger::get(fmt::format("StorageWindowView({}.{})", table_id_.database_name, table_id_.table_name))) + , log(getLogger(fmt::format("StorageWindowView({}.{})", table_id_.database_name, table_id_.table_name))) , fire_signal_timeout_s(context_->getSettingsRef().wait_for_window_view_fire_signal_timeout.totalSeconds()) , clean_interval_usec(context_->getSettingsRef().window_view_clean_interval.totalMicroseconds()) { diff --git a/src/Storages/WindowView/StorageWindowView.h b/src/Storages/WindowView/StorageWindowView.h index 847a4945d0e7..e08eb0b074bc 100644 --- a/src/Storages/WindowView/StorageWindowView.h +++ b/src/Storages/WindowView/StorageWindowView.h @@ -177,7 +177,7 @@ class StorageWindowView final : public IStorage, WithContext const Block & getOutputHeader() const; private: - Poco::Logger * log; + LoggerPtr log; /// Stored query, e.g. SELECT * FROM * GROUP BY tumble(now(), *) ASTPtr select_query; diff --git a/src/TableFunctions/Hive/TableFunctionHive.h b/src/TableFunctions/Hive/TableFunctionHive.h index ec09a87a8762..11e4efcaa665 100644 --- a/src/TableFunctions/Hive/TableFunctionHive.h +++ b/src/TableFunctions/Hive/TableFunctionHive.h @@ -24,7 +24,7 @@ class TableFunctionHive : public ITableFunction void parseArguments(const ASTPtr & ast_function_, ContextPtr context_) override; private: - Poco::Logger * logger = &Poco::Logger::get("TableFunctionHive"); + LoggerPtr logger = getLogger("TableFunctionHive"); String cluster_name; String hive_metastore_url; diff --git a/tests/config/users.d/readonly.xml b/tests/config/users.d/readonly.xml index 0fe1e3fe6d94..799de11decfc 100644 --- a/tests/config/users.d/readonly.xml +++ b/tests/config/users.d/readonly.xml @@ -9,7 +9,8 @@ - + + ::1 127.0.0.1 diff --git a/tests/config/users.d/session_log_test.xml b/tests/config/users.d/session_log_test.xml index daddaa6e4b9f..f93b0efd8284 100644 --- a/tests/config/users.d/session_log_test.xml +++ b/tests/config/users.d/session_log_test.xml @@ -17,8 +17,9 @@ - - + + + ::1 127.0.0.1 diff --git a/utils/keeper-data-dumper/main.cpp b/utils/keeper-data-dumper/main.cpp index e82b21079fea..fb480bf360a9 100644 --- a/utils/keeper-data-dumper/main.cpp +++ b/utils/keeper-data-dumper/main.cpp @@ -58,7 +58,7 @@ int main(int argc, char *argv[]) Poco::Logger::root().setChannel(channel); Poco::Logger::root().setLevel("trace"); } - auto * logger = &Poco::Logger::get("keeper-dumper"); + auto logger = getLogger("keeper-dumper"); ResponsesQueue queue(std::numeric_limits::max()); SnapshotsQueue snapshots_queue{1}; CoordinationSettingsPtr settings = std::make_shared();