Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions programs/server/Server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,10 @@
# include <azure/core/diagnostics/logger.hpp>
#endif

#if USE_PARQUET
# include <Processors/Formats/Impl/ParquetFileMetaDataCache.h>
#endif


#include <incbin.h>
/// A minimal file used when the server is run without installation
Expand Down Expand Up @@ -316,6 +320,7 @@ namespace ServerSetting
extern const ServerSettingsUInt64 page_cache_max_size;
extern const ServerSettingsDouble page_cache_free_memory_ratio;
extern const ServerSettingsUInt64 page_cache_lookahead_blocks;
extern const ServerSettingsUInt64 input_format_parquet_metadata_cache_max_size;
}

}
Expand Down Expand Up @@ -2376,6 +2381,7 @@ try
dns_cache_updater->start();

auto replicas_reconnector = ReplicasReconnector::init(global_context);
ParquetFileMetaDataCache::instance()->setMaxSizeInBytes(server_settings[ServerSetting::input_format_parquet_metadata_cache_max_size]);

/// Set current database name before loading tables and databases because
/// system logs may copy global context.
Expand Down
1 change: 1 addition & 0 deletions src/Access/Common/AccessType.h
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,7 @@ enum class AccessType : uint8_t
M(SYSTEM_DROP_SCHEMA_CACHE, "SYSTEM DROP SCHEMA CACHE, DROP SCHEMA CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_FORMAT_SCHEMA_CACHE, "SYSTEM DROP FORMAT SCHEMA CACHE, DROP FORMAT SCHEMA CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_S3_CLIENT_CACHE, "SYSTEM DROP S3 CLIENT, DROP S3 CLIENT CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_PARQUET_METADATA_CACHE, "SYSTEM DROP PARQUET METADATA CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_CACHE, "DROP CACHE", GROUP, SYSTEM) \
M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \
M(SYSTEM_RELOAD_USERS, "RELOAD USERS", GLOBAL, SYSTEM_RELOAD) \
Expand Down
3 changes: 2 additions & 1 deletion src/Common/ProfileEvents.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -968,7 +968,8 @@ The server successfully detected this situation and will download merged part fr
M(FilterTransformPassedRows, "Number of rows that passed the filter in the query", ValueType::Number) \
M(FilterTransformPassedBytes, "Number of bytes that passed the filter in the query", ValueType::Bytes) \
M(QueryPreempted, "How many times tasks are paused and waiting due to 'priority' setting", ValueType::Number) \

M(ParquetMetaDataCacheHits, "Number of times the read from filesystem cache hit the cache.", ValueType::Number) \
M(ParquetMetaDataCacheMisses, "Number of times the read from filesystem cache miss the cache.", ValueType::Number) \

#ifdef APPLY_FOR_EXTERNAL_EVENTS
#define APPLY_FOR_EVENTS(M) APPLY_FOR_BUILTIN_EVENTS(M) APPLY_FOR_EXTERNAL_EVENTS(M)
Expand Down
2 changes: 1 addition & 1 deletion src/Core/FormatFactorySettings.h
Original file line number Diff line number Diff line change
Expand Up @@ -1301,7 +1301,7 @@ Set the quoting rule for identifiers in SHOW CREATE query
DECLARE(IdentifierQuotingStyle, show_create_query_identifier_quoting_style, IdentifierQuotingStyle::Backticks, R"(
Set the quoting style for identifiers in SHOW CREATE query
)", 0) \

DECLARE(Bool, input_format_parquet_use_metadata_cache, true, R"(Enable parquet file metadata caching)", 0) \
// End of FORMAT_FACTORY_SETTINGS

#define OBSOLETE_FORMAT_SETTINGS(M, ALIAS) \
Expand Down
5 changes: 2 additions & 3 deletions src/Core/ServerSettings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1019,9 +1019,8 @@ namespace DB
<wait_dictionaries_load_at_startup>true</wait_dictionaries_load_at_startup>
```
)", 0) \
DECLARE(Bool, storage_shared_set_join_use_inner_uuid, false, "If enabled, an inner UUID is generated during the creation of SharedSet and SharedJoin. ClickHouse Cloud only", 0)


DECLARE(Bool, storage_shared_set_join_use_inner_uuid, false, "If enabled, an inner UUID is generated during the creation of SharedSet and SharedJoin. ClickHouse Cloud only", 0) \
DECLARE(UInt64, input_format_parquet_metadata_cache_max_size, 500000000, "Maximum size of parquet file metadata cache", 0) \
// clang-format on

/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in dumpToSystemServerSettingsColumns below
Expand Down
5 changes: 5 additions & 0 deletions src/Core/SettingsChangesHistory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,11 @@ const VersionToSettingsChangesMap & getSettingsChangesHistory()
{"parallel_replicas_for_cluster_engines", false, true, "New setting."},
/// Release closed. Please use 25.4
});
addSettingsChanges(settings_changes_history, "24.12.2.20000",
{
// Altinity Antalya modifications atop of 24.12
{"input_format_parquet_use_metadata_cache", true, true, "New setting, turned ON by default"}, // https://github.com/Altinity/ClickHouse/pull/586
});
addSettingsChanges(settings_changes_history, "25.2",
{
/// Release closed. Please use 25.3
Expand Down
15 changes: 15 additions & 0 deletions src/Interpreters/InterpreterSystemQuery.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,10 @@
#include <Formats/ProtobufSchemas.h>
#endif

#if USE_PARQUET
#include <Processors/Formats/Impl/ParquetFileMetaDataCache.h>
#endif

#if USE_AWS_S3
#include <IO/S3/Client.h>
#endif
Expand Down Expand Up @@ -419,6 +423,16 @@ BlockIO InterpreterSystemQuery::execute()
getContext()->clearQueryResultCache(query.query_result_cache_tag);
break;
}
case Type::DROP_PARQUET_METADATA_CACHE:
{
#if USE_PARQUET
getContext()->checkAccess(AccessType::SYSTEM_DROP_PARQUET_METADATA_CACHE);
ParquetFileMetaDataCache::instance()->clear();
break;
#else
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "The server was compiled without the support for Parquet");
#endif
}
case Type::DROP_COMPILED_EXPRESSION_CACHE:
#if USE_EMBEDDED_COMPILER
getContext()->checkAccess(AccessType::SYSTEM_DROP_COMPILED_EXPRESSION_CACHE);
Expand Down Expand Up @@ -1464,6 +1478,7 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster()
case Type::DROP_PAGE_CACHE:
case Type::DROP_SCHEMA_CACHE:
case Type::DROP_FORMAT_SCHEMA_CACHE:
case Type::DROP_PARQUET_METADATA_CACHE:
case Type::DROP_S3_CLIENT_CACHE:
{
required_access.emplace_back(AccessType::SYSTEM_DROP_CACHE);
Expand Down
1 change: 1 addition & 0 deletions src/Parsers/ASTSystemQuery.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -456,6 +456,7 @@ void ASTSystemQuery::formatImpl(WriteBuffer & ostr, const FormatSettings & setti
case Type::DROP_SKIPPING_INDEX_CACHE:
case Type::DROP_COMPILED_EXPRESSION_CACHE:
case Type::DROP_S3_CLIENT_CACHE:
case Type::DROP_PARQUET_METADATA_CACHE:
case Type::RESET_COVERAGE:
case Type::RESTART_REPLICAS:
case Type::JEMALLOC_PURGE:
Expand Down
1 change: 1 addition & 0 deletions src/Parsers/ASTSystemQuery.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ class ASTSystemQuery : public IAST, public ASTQueryWithOnCluster
DROP_SCHEMA_CACHE,
DROP_FORMAT_SCHEMA_CACHE,
DROP_S3_CLIENT_CACHE,
DROP_PARQUET_METADATA_CACHE,
STOP_LISTEN,
START_LISTEN,
RESTART_REPLICAS,
Expand Down
3 changes: 3 additions & 0 deletions src/Processors/Formats/IInputFormat.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,9 @@ class IInputFormat : public SourceWithKeyCondition

void needOnlyCount() { need_only_count = true; }

/// Set additional info/key/id related to underlying storage of the ReadBuffer
virtual void setStorageRelatedUniqueKey(const Settings & /*settings*/, const String & /*key*/) {}

protected:
ReadBuffer & getReadBuffer() const { chassert(in); return *in; }

Expand Down
76 changes: 75 additions & 1 deletion src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
#if USE_PARQUET

#include <Columns/ColumnNullable.h>
#include <Core/Settings.h>
#include <Core/ServerSettings.h>
#include <Common/ProfileEvents.h>
#include <Common/logger_useful.h>
#include <Common/ThreadPool.h>
#include <Formats/FormatFactory.h>
Expand All @@ -28,13 +31,16 @@
#include <Common/FieldAccurateComparison.h>
#include <Processors/Formats/Impl/Parquet/ParquetRecordReader.h>
#include <Processors/Formats/Impl/Parquet/parquetBloomFilterHash.h>
#include <Processors/Formats/Impl/ParquetFileMetaDataCache.h>
#include <Interpreters/convertFieldToType.h>

#include <boost/algorithm/string/case_conv.hpp>

namespace ProfileEvents
{
extern const Event ParquetFetchWaitTimeMicroseconds;
extern const Event ParquetMetaDataCacheHits;
extern const Event ParquetMetaDataCacheMisses;
}

namespace CurrentMetrics
Expand All @@ -51,6 +57,16 @@ namespace CurrentMetrics
namespace DB
{

namespace Setting
{
extern const SettingsBool input_format_parquet_use_metadata_cache;
}

namespace ServerSetting
{
extern const ServerSettingsUInt64 input_format_parquet_metadata_cache_max_size;
}

namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
Expand Down Expand Up @@ -510,6 +526,49 @@ static std::vector<Range> getHyperrectangleForRowGroup(const parquet::FileMetaDa
return hyperrectangle;
}

std::shared_ptr<parquet::FileMetaData> ParquetBlockInputFormat::readMetadataFromFile()
{
createArrowFileIfNotCreated();
return parquet::ReadMetaData(arrow_file);
}

std::shared_ptr<parquet::FileMetaData> ParquetBlockInputFormat::getFileMetaData()
{
// in-memory cache is not implemented for local file operations, only for remote files
// there is a chance the user sets `input_format_parquet_use_metadata_cache=1` for a local file operation
// and the cache_key won't be set. Therefore, we also need to check for metadata_cache.key
if (!metadata_cache.use_cache || metadata_cache.key.empty())
{
return readMetadataFromFile();
}

auto [parquet_file_metadata, loaded] = ParquetFileMetaDataCache::instance()->getOrSet(
metadata_cache.key,
[&]()
{
return readMetadataFromFile();
}
);
if (loaded)
ProfileEvents::increment(ProfileEvents::ParquetMetaDataCacheMisses);
else
ProfileEvents::increment(ProfileEvents::ParquetMetaDataCacheHits);
return parquet_file_metadata;
}

void ParquetBlockInputFormat::createArrowFileIfNotCreated()
{
if (arrow_file)
{
return;
}

// Create arrow file adapter.
// TODO: Make the adapter do prefetching on IO threads, based on the full set of ranges that
// we'll need to read (which we know in advance). Use max_download_threads for that.
arrow_file = asArrowFile(*in, format_settings, is_stopped, "Parquet", PARQUET_MAGIC_BYTES, /* avoid_buffering */ true);
}

std::unordered_set<std::size_t> getBloomFilterFilteringColumnKeys(const KeyCondition::RPN & rpn)
{
std::unordered_set<std::size_t> column_keys;
Expand Down Expand Up @@ -609,7 +668,7 @@ void ParquetBlockInputFormat::initializeIfNeeded()
if (is_stopped)
return;

metadata = parquet::ReadMetaData(arrow_file);
metadata = getFileMetaData();
const bool prefetch_group = supportPrefetch();

std::shared_ptr<arrow::Schema> schema;
Expand Down Expand Up @@ -709,6 +768,8 @@ void ParquetBlockInputFormat::initializeIfNeeded()
}
}

bool has_row_groups_to_read = false;

auto skip_row_group_based_on_filters = [&](int row_group)
{
if (!format_settings.parquet.filter_push_down && !format_settings.parquet.bloom_filter_push_down)
Expand Down Expand Up @@ -758,7 +819,20 @@ void ParquetBlockInputFormat::initializeIfNeeded()
row_group_batches.back().total_bytes_compressed += row_group_size;
auto rows = adaptive_chunk_size(row_group);
row_group_batches.back().adaptive_chunk_size = rows ? rows : format_settings.parquet.max_block_size;

has_row_groups_to_read = true;
}

if (has_row_groups_to_read)
{
createArrowFileIfNotCreated();
}
}

void ParquetBlockInputFormat::setStorageRelatedUniqueKey(const Settings & settings, const String & key_)
{
metadata_cache.key = key_;
metadata_cache.use_cache = settings[Setting::input_format_parquet_use_metadata_cache];
}

void ParquetBlockInputFormat::initializeRowGroupBatchReader(size_t row_group_batch_idx)
Expand Down
14 changes: 14 additions & 0 deletions src/Processors/Formats/Impl/ParquetBlockInputFormat.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ class ParquetBlockInputFormat : public IInputFormat

size_t getApproxBytesReadForChunk() const override { return previous_approx_bytes_read_for_chunk; }

void setStorageRelatedUniqueKey(const Settings & settings, const String & key_) override;

private:
Chunk read() override;

Expand All @@ -90,6 +92,11 @@ class ParquetBlockInputFormat : public IInputFormat

void threadFunction(size_t row_group_batch_idx);

void createArrowFileIfNotCreated();
std::shared_ptr<parquet::FileMetaData> readMetadataFromFile();

std::shared_ptr<parquet::FileMetaData> getFileMetaData();

inline bool supportPrefetch() const;

// Data layout in the file:
Expand Down Expand Up @@ -338,6 +345,13 @@ class ParquetBlockInputFormat : public IInputFormat
std::exception_ptr background_exception = nullptr;
std::atomic<int> is_stopped{0};
bool is_initialized = false;
struct Cache
{
String key;
bool use_cache = false;
};

Cache metadata_cache;
};

class ParquetSchemaReader : public ISchemaReader
Expand Down
20 changes: 20 additions & 0 deletions src/Processors/Formats/Impl/ParquetFileMetaDataCache.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#include <Processors/Formats/Impl/ParquetFileMetaDataCache.h>

#ifdef USE_PARQUET

namespace DB
{

ParquetFileMetaDataCache::ParquetFileMetaDataCache()
: CacheBase<String, parquet::FileMetaData>(0)
{}

ParquetFileMetaDataCache * ParquetFileMetaDataCache::instance()
{
static ParquetFileMetaDataCache instance;
return &instance;
}

}

#endif
30 changes: 30 additions & 0 deletions src/Processors/Formats/Impl/ParquetFileMetaDataCache.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#pragma once

#include "config.h"

#if USE_PARQUET

namespace parquet
{

class FileMetaData;

}

#include <Common/CacheBase.h>

namespace DB
{

class ParquetFileMetaDataCache : public CacheBase<String, parquet::FileMetaData>
{
public:
static ParquetFileMetaDataCache * instance();

private:
ParquetFileMetaDataCache();
};

}

#endif
Loading
Loading