diff --git a/ci/licenses_golden/excluded_files b/ci/licenses_golden/excluded_files index 9c022b4adaf83..89c7c9da8dc63 100644 --- a/ci/licenses_golden/excluded_files +++ b/ci/licenses_golden/excluded_files @@ -172,6 +172,7 @@ ../../../flutter/impeller/renderer/backend/vulkan/command_encoder_vk_unittests.cc ../../../flutter/impeller/renderer/backend/vulkan/command_pool_vk_unittests.cc ../../../flutter/impeller/renderer/backend/vulkan/context_vk_unittests.cc +../../../flutter/impeller/renderer/backend/vulkan/descriptor_pool_vk_unittests.cc ../../../flutter/impeller/renderer/backend/vulkan/fence_waiter_vk_unittests.cc ../../../flutter/impeller/renderer/backend/vulkan/pass_bindings_cache_unittests.cc ../../../flutter/impeller/renderer/backend/vulkan/resource_manager_vk_unittests.cc diff --git a/impeller/renderer/backend/vulkan/BUILD.gn b/impeller/renderer/backend/vulkan/BUILD.gn index daec5d21bfe09..be72d061fb56c 100644 --- a/impeller/renderer/backend/vulkan/BUILD.gn +++ b/impeller/renderer/backend/vulkan/BUILD.gn @@ -12,6 +12,7 @@ impeller_component("vulkan_unittests") { "command_encoder_vk_unittests.cc", "command_pool_vk_unittests.cc", "context_vk_unittests.cc", + "descriptor_pool_vk_unittests.cc", "fence_waiter_vk_unittests.cc", "pass_bindings_cache_unittests.cc", "resource_manager_vk_unittests.cc", diff --git a/impeller/renderer/backend/vulkan/command_encoder_vk.cc b/impeller/renderer/backend/vulkan/command_encoder_vk.cc index 52e4c6c74a7b1..4aeedf67d74ee 100644 --- a/impeller/renderer/backend/vulkan/command_encoder_vk.cc +++ b/impeller/renderer/backend/vulkan/command_encoder_vk.cc @@ -16,11 +16,10 @@ namespace impeller { class TrackedObjectsVK { public: - explicit TrackedObjectsVK( - const std::weak_ptr& device_holder, - const std::shared_ptr& pool, - std::unique_ptr probe) - : desc_pool_(device_holder), probe_(std::move(probe)) { + explicit TrackedObjectsVK(const std::weak_ptr& context, + const std::shared_ptr& pool, + std::unique_ptr probe) + : desc_pool_(context), probe_(std::move(probe)) { if (!pool) { return; } @@ -112,8 +111,7 @@ std::shared_ptr CommandEncoderFactoryVK::Create() { if (!context) { return nullptr; } - auto& context_vk = ContextVK::Cast(*context); - auto recycler = context_vk.GetCommandPoolRecycler(); + auto recycler = context->GetCommandPoolRecycler(); if (!recycler) { return nullptr; } @@ -123,9 +121,8 @@ std::shared_ptr CommandEncoderFactoryVK::Create() { } auto tracked_objects = std::make_shared( - context_vk.GetDeviceHolder(), tls_pool, - context->GetGPUTracer()->CreateGPUProbe()); - auto queue = context_vk.GetGraphicsQueue(); + context, tls_pool, context->GetGPUTracer()->CreateGPUProbe()); + auto queue = context->GetGraphicsQueue(); if (!tracked_objects || !tracked_objects->IsValid() || !queue) { return nullptr; @@ -140,15 +137,14 @@ std::shared_ptr CommandEncoderFactoryVK::Create() { } if (label_.has_value()) { - context_vk.SetDebugName(tracked_objects->GetCommandBuffer(), - label_.value()); + context->SetDebugName(tracked_objects->GetCommandBuffer(), label_.value()); } tracked_objects->GetGPUProbe().RecordCmdBufferStart( tracked_objects->GetCommandBuffer()); - return std::make_shared(context_vk.GetDeviceHolder(), + return std::make_shared(context->GetDeviceHolder(), tracked_objects, queue, - context_vk.GetFenceWaiter()); + context->GetFenceWaiter()); } CommandEncoderVK::CommandEncoderVK( diff --git a/impeller/renderer/backend/vulkan/command_encoder_vk.h b/impeller/renderer/backend/vulkan/command_encoder_vk.h index ea8eb08328a6a..6b4cd076cc7d6 100644 --- a/impeller/renderer/backend/vulkan/command_encoder_vk.h +++ b/impeller/renderer/backend/vulkan/command_encoder_vk.h @@ -8,7 +8,6 @@ #include #include -#include "flutter/fml/macros.h" #include "impeller/renderer/backend/vulkan/command_pool_vk.h" #include "impeller/renderer/backend/vulkan/context_vk.h" #include "impeller/renderer/backend/vulkan/descriptor_pool_vk.h" diff --git a/impeller/renderer/backend/vulkan/command_pool_vk.cc b/impeller/renderer/backend/vulkan/command_pool_vk.cc index 53299c0facc18..731cc0c774fa5 100644 --- a/impeller/renderer/backend/vulkan/command_pool_vk.cc +++ b/impeller/renderer/backend/vulkan/command_pool_vk.cc @@ -8,7 +8,6 @@ #include #include -#include "fml/macros.h" #include "fml/thread_local.h" #include "fml/trace_event.h" #include "impeller/renderer/backend/vulkan/resource_manager_vk.h" diff --git a/impeller/renderer/backend/vulkan/context_vk.cc b/impeller/renderer/backend/vulkan/context_vk.cc index d57018ffdc523..6c1804dd74022 100644 --- a/impeller/renderer/backend/vulkan/context_vk.cc +++ b/impeller/renderer/backend/vulkan/context_vk.cc @@ -405,6 +405,13 @@ void ContextVK::Setup(Settings settings) { return; } + auto descriptor_pool_recycler = + std::make_shared(weak_from_this()); + if (!descriptor_pool_recycler) { + VALIDATION_LOG << "Could not create descriptor pool recycler."; + return; + } + //---------------------------------------------------------------------------- /// Fetch the queues. /// @@ -436,6 +443,7 @@ void ContextVK::Setup(Settings settings) { fence_waiter_ = std::move(fence_waiter); resource_manager_ = std::move(resource_manager); command_pool_recycler_ = std::move(command_pool_recycler); + descriptor_pool_recycler_ = std::move(descriptor_pool_recycler); device_name_ = std::string(physical_device_properties.deviceName); is_valid_ = true; diff --git a/impeller/renderer/backend/vulkan/context_vk.h b/impeller/renderer/backend/vulkan/context_vk.h index 0b2911b8824d5..33bfcfa346916 100644 --- a/impeller/renderer/backend/vulkan/context_vk.h +++ b/impeller/renderer/backend/vulkan/context_vk.h @@ -34,6 +34,7 @@ class FenceWaiterVK; class ResourceManagerVK; class SurfaceContextVK; class GPUTracerVK; +class DescriptorPoolRecyclerVK; class ContextVK final : public Context, public BackendCast, @@ -158,6 +159,10 @@ class ContextVK final : public Context, std::shared_ptr GetCommandPoolRecycler() const; + std::shared_ptr GetDescriptorPoolRecycler() const { + return descriptor_pool_recycler_; + } + std::shared_ptr GetGPUTracer() const; void RecordFrameEndTime() const; @@ -191,6 +196,7 @@ class ContextVK final : public Context, std::shared_ptr raster_message_loop_; std::unique_ptr queue_submit_thread_; std::shared_ptr gpu_tracer_; + std::shared_ptr descriptor_pool_recycler_; bool sync_presentation_ = false; const uint64_t hash_; diff --git a/impeller/renderer/backend/vulkan/descriptor_pool_vk.cc b/impeller/renderer/backend/vulkan/descriptor_pool_vk.cc index ee82614bfd6e7..347bd5b738f8e 100644 --- a/impeller/renderer/backend/vulkan/descriptor_pool_vk.cc +++ b/impeller/renderer/backend/vulkan/descriptor_pool_vk.cc @@ -3,43 +3,79 @@ // found in the LICENSE file. #include "impeller/renderer/backend/vulkan/descriptor_pool_vk.h" +#include #include "flutter/fml/trace_event.h" +#include "impeller/base/allocation.h" #include "impeller/base/validation.h" +#include "impeller/renderer/backend/vulkan/resource_manager_vk.h" +#include "vulkan/vulkan_handles.hpp" namespace impeller { +// Holds the command pool in a background thread, recyling it when not in use. +class BackgroundDescriptorPoolVK final { + public: + BackgroundDescriptorPoolVK(BackgroundDescriptorPoolVK&&) = default; + + explicit BackgroundDescriptorPoolVK( + vk::UniqueDescriptorPool&& pool, + uint32_t allocated_capacity, + std::weak_ptr recycler) + : pool_(std::move(pool)), + allocated_capacity_(allocated_capacity), + recycler_(std::move(recycler)) {} + + ~BackgroundDescriptorPoolVK() { + auto const recycler = recycler_.lock(); + + // Not only does this prevent recycling when the context is being destroyed, + // but it also prevents the destructor from effectively being called twice; + // once for the original BackgroundCommandPoolVK() and once for the moved + // BackgroundCommandPoolVK(). + if (!recycler) { + return; + } + + recycler->Reclaim(std::move(pool_), allocated_capacity_); + } + + private: + BackgroundDescriptorPoolVK(const BackgroundDescriptorPoolVK&) = delete; + + BackgroundDescriptorPoolVK& operator=(const BackgroundDescriptorPoolVK&) = + delete; + + vk::UniqueDescriptorPool pool_; + uint32_t allocated_capacity_; + std::weak_ptr recycler_; +}; + DescriptorPoolVK::DescriptorPoolVK( - const std::weak_ptr& device_holder) - : device_holder_(device_holder) { - FML_DCHECK(device_holder.lock()); + const std::weak_ptr& context) + : context_(context) { + FML_DCHECK(context.lock()); } -DescriptorPoolVK::~DescriptorPoolVK() = default; +DescriptorPoolVK::~DescriptorPoolVK() { + if (!pool_) { + return; + } -static vk::UniqueDescriptorPool CreatePool(const vk::Device& device, - uint32_t image_count, - uint32_t buffer_count) { - TRACE_EVENT0("impeller", "CreateDescriptorPool"); - std::vector pools = {}; - if (image_count > 0) { - pools.emplace_back(vk::DescriptorPoolSize{ - vk::DescriptorType::eCombinedImageSampler, image_count}); + auto const context = context_.lock(); + if (!context) { + return; } - if (buffer_count > 0) { - pools.emplace_back(vk::DescriptorPoolSize{ - vk::DescriptorType::eUniformBuffer, buffer_count}); - pools.emplace_back(vk::DescriptorPoolSize{ - vk::DescriptorType::eStorageBuffer, buffer_count}); + auto const recycler = context->GetDescriptorPoolRecycler(); + if (!recycler) { + return; } - vk::DescriptorPoolCreateInfo pool_info; - pool_info.setMaxSets(image_count + buffer_count); - pool_info.setPoolSizes(pools); - auto [result, pool] = device.createDescriptorPoolUnique(pool_info); - if (result != vk::Result::eSuccess) { - VALIDATION_LOG << "Unable to create a descriptor pool"; - } - return std::move(pool); + + auto reset_pool_when_dropped = BackgroundDescriptorPoolVK( + std::move(pool_), allocated_capacity_, recycler); + + UniqueResourceVKT pool( + context->GetResourceManager(), std::move(reset_pool_when_dropped)); } fml::StatusOr> @@ -47,25 +83,26 @@ DescriptorPoolVK::AllocateDescriptorSets( uint32_t buffer_count, uint32_t sampler_count, const std::vector& layouts) { - std::shared_ptr strong_device = device_holder_.lock(); - if (!strong_device) { + std::shared_ptr strong_context = context_.lock(); + if (!strong_context) { return fml::Status(fml::StatusCode::kUnknown, "No device"); } - - auto new_pool = - CreatePool(strong_device->GetDevice(), sampler_count, buffer_count); + auto minimum_capacity = std::max(sampler_count, buffer_count); + auto [new_pool, capacity] = + strong_context->GetDescriptorPoolRecycler()->Get(minimum_capacity); if (!new_pool) { return fml::Status(fml::StatusCode::kUnknown, "Failed to create descriptor pool"); } pool_ = std::move(new_pool); + allocated_capacity_ = capacity; vk::DescriptorSetAllocateInfo set_info; set_info.setDescriptorPool(pool_.get()); set_info.setSetLayouts(layouts); auto [result, sets] = - strong_device->GetDevice().allocateDescriptorSets(set_info); + strong_context->GetDevice().allocateDescriptorSets(set_info); if (result != vk::Result::eSuccess) { VALIDATION_LOG << "Could not allocate descriptor sets: " << vk::to_string(result); @@ -74,4 +111,114 @@ DescriptorPoolVK::AllocateDescriptorSets( return sets; } +void DescriptorPoolRecyclerVK::Reclaim(vk::UniqueDescriptorPool&& pool, + uint32_t allocated_capacity) { + TRACE_EVENT0("impeller", "DescriptorPoolRecyclerVK::Reclaim"); + + // Reset the pool on a background thread. + auto strong_context = context_.lock(); + if (!strong_context) { + return; + } + auto device = strong_context->GetDevice(); + device.resetDescriptorPool(pool.get()); + + // Move the pool to the recycled list. + Lock recycled_lock(recycled_mutex_); + + if (recycled_.size() < kMaxRecycledPools) { + recycled_.push_back(std::make_pair(std::move(pool), allocated_capacity)); + return; + } + + // If recycled has exceeded the max size of 32, then we need to remove a pool + // from the list. If we were to drop this pool, then there is a risk that + // the list of recycled descriptor pools could fill up with descriptors that + // are too small to reuse. This would lead to all subsequent descriptor + // allocations no longer being recycled. Instead, we pick the first + // descriptor pool with a smaller capacity than the reseting pool to drop. + // This may result in us dropping the current pool instead. + std::optional selected_index = std::nullopt; + for (auto i = 0u; i < recycled_.size(); i++) { + const auto& [_, capacity] = recycled_[i]; + if (capacity < allocated_capacity) { + selected_index = i; + break; + } + } + if (selected_index.has_value()) { + recycled_[selected_index.value()] = + std::make_pair(std::move(pool), allocated_capacity); + } + // If selected index has no value, then no pools had a smaller capacity than + // this one and we drop it instead. +} + +DescriptorPoolAndSize DescriptorPoolRecyclerVK::Get(uint32_t minimum_capacity) { + // See note on DescriptorPoolRecyclerVK doc comment. + auto rounded_capacity = + std::max(Allocation::NextPowerOfTwoSize(minimum_capacity), 64u); + + // Recycle a pool with a matching minumum capcity if it is available. + auto recycled_pool = Reuse(rounded_capacity); + if (recycled_pool.has_value()) { + return std::move(recycled_pool.value()); + } + return Create(rounded_capacity); +} + +DescriptorPoolAndSize DescriptorPoolRecyclerVK::Create( + uint32_t minimum_capacity) { + TRACE_EVENT0("impeller", "DescriptorPoolRecyclerVK::Create"); + + FML_DCHECK(Allocation::NextPowerOfTwoSize(minimum_capacity) == + minimum_capacity); + auto strong_context = context_.lock(); + if (!strong_context) { + VALIDATION_LOG << "Unable to create a descriptor pool"; + return {}; + } + + std::vector pools = { + vk::DescriptorPoolSize{vk::DescriptorType::eCombinedImageSampler, + minimum_capacity}, + vk::DescriptorPoolSize{vk::DescriptorType::eUniformBuffer, + minimum_capacity}, + vk::DescriptorPoolSize{vk::DescriptorType::eStorageBuffer, + minimum_capacity}}; + vk::DescriptorPoolCreateInfo pool_info; + pool_info.setMaxSets(minimum_capacity + minimum_capacity); + pool_info.setPoolSizes(pools); + auto [result, pool] = + strong_context->GetDevice().createDescriptorPoolUnique(pool_info); + if (result != vk::Result::eSuccess) { + VALIDATION_LOG << "Unable to create a descriptor pool"; + } + return std::make_pair(std::move(pool), minimum_capacity); +} + +std::optional DescriptorPoolRecyclerVK::Reuse( + uint32_t minimum_capacity) { + TRACE_EVENT0("impeller", "DescriptorPoolRecyclerVK::Reuse"); + + FML_DCHECK(Allocation::NextPowerOfTwoSize(minimum_capacity) == + minimum_capacity); + Lock lock(recycled_mutex_); + + std::optional found_index = std::nullopt; + for (auto i = 0u; i < recycled_.size(); i++) { + const auto& [_, capacity] = recycled_[i]; + if (capacity >= minimum_capacity) { + found_index = i; + break; + } + } + if (!found_index.has_value()) { + return std::nullopt; + } + auto pair = std::move(recycled_[found_index.value()]); + recycled_.erase(recycled_.begin() + found_index.value()); + return pair; +} + } // namespace impeller diff --git a/impeller/renderer/backend/vulkan/descriptor_pool_vk.h b/impeller/renderer/backend/vulkan/descriptor_pool_vk.h index d5e71a10969f6..e8a3e440c4695 100644 --- a/impeller/renderer/backend/vulkan/descriptor_pool_vk.h +++ b/impeller/renderer/backend/vulkan/descriptor_pool_vk.h @@ -6,9 +6,9 @@ #include -#include "flutter/fml/macros.h" #include "fml/status_or.h" -#include "impeller/renderer/backend/vulkan/device_holder.h" +#include "impeller/renderer/backend/vulkan/context_vk.h" +#include "vulkan/vulkan_handles.hpp" namespace impeller { @@ -25,8 +25,7 @@ namespace impeller { /// threading and lifecycle restrictions. class DescriptorPoolVK { public: - explicit DescriptorPoolVK( - const std::weak_ptr& device_holder); + explicit DescriptorPoolVK(const std::weak_ptr& context); ~DescriptorPoolVK(); @@ -36,12 +35,86 @@ class DescriptorPoolVK { const std::vector& layouts); private: - std::weak_ptr device_holder_; + std::weak_ptr context_; vk::UniqueDescriptorPool pool_ = {}; + uint32_t allocated_capacity_ = 0; DescriptorPoolVK(const DescriptorPoolVK&) = delete; DescriptorPoolVK& operator=(const DescriptorPoolVK&) = delete; }; +// A descriptor pool and its allocated buffer/sampler size. +using DescriptorPoolAndSize = std::pair; + +//------------------------------------------------------------------------------ +/// @brief Creates and manages the lifecycle of |vk::DescriptorPoolVK| +/// objects. +/// +/// To make descriptor pool recycling more effective, the number of requusted +/// descriptor slots is rounded up the nearest power of two. This also makes +/// determining whether a recycled pool has sufficient slots easier as only a +/// single number comparison is required. +/// +/// We round up to a minimum of 64 as the smallest power of two to reduce the +/// range of potential allocations to approximately: 64, 128, 256, 512, 1024, +/// 2048, 4096. Beyond this size applications will have far too many drawing +/// commands to render correctly. We also limit the number of cached descriptor +/// pools to 32, which is somewhat arbitrarily chosen, but given 2-ish frames in +/// flight is about 16 descriptors pools per frame which is extremely generous. +class DescriptorPoolRecyclerVK final + : public std::enable_shared_from_this { + public: + ~DescriptorPoolRecyclerVK() = default; + + /// The maximum number of descriptor pools this recycler will hold onto. + static constexpr size_t kMaxRecycledPools = 32u; + + /// @brief Creates a recycler for the given |ContextVK|. + /// + /// @param[in] context The context to create the recycler for. + explicit DescriptorPoolRecyclerVK(std::weak_ptr context) + : context_(std::move(context)) {} + + /// @brief Gets a descriptor pool with at least [minimum_capacity] + /// sampler and slots. + /// + /// This may create a new descriptor pool if no existing pools had + /// the necessary capacity. + DescriptorPoolAndSize Get(uint32_t minimum_capacity); + + /// @brief Returns the descriptor pool to be reset on a background + /// thread. + /// + /// @param[in] pool The pool to recycler. + void Reclaim(vk::UniqueDescriptorPool&& pool, uint32_t allocated_capacity); + + private: + std::weak_ptr context_; + + Mutex recycled_mutex_; + std::vector recycled_ IPLR_GUARDED_BY(recycled_mutex_); + + /// @brief Creates a new |vk::CommandPool|. + /// + /// The descriptor pool will have at least [minimum_capacity] + /// buffer and texture slots. + /// + /// @returns Returns a |std::nullopt| if a pool could not be created. + DescriptorPoolAndSize Create(uint32_t minimum_capacity); + + /// @brief Reuses a recycled |vk::CommandPool|, if available. + /// + /// The descriptor pool will have at least [minimum_capacity] + /// buffer and texture slots. [minimum_capacity] should be rounded + /// up to the next power of two for more efficient cache reuse. + /// + /// @returns Returns a |std::nullopt| if a pool was not available. + std::optional Reuse(uint32_t minimum_capacity); + + DescriptorPoolRecyclerVK(const DescriptorPoolRecyclerVK&) = delete; + + DescriptorPoolRecyclerVK& operator=(const DescriptorPoolRecyclerVK&) = delete; +}; + } // namespace impeller diff --git a/impeller/renderer/backend/vulkan/descriptor_pool_vk_unittests.cc b/impeller/renderer/backend/vulkan/descriptor_pool_vk_unittests.cc new file mode 100644 index 0000000000000..99756e1325e95 --- /dev/null +++ b/impeller/renderer/backend/vulkan/descriptor_pool_vk_unittests.cc @@ -0,0 +1,94 @@ +// Copyright 2013 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "flutter/testing/testing.h" // IWYU pragma: keep. +#include "fml/synchronization/waitable_event.h" +#include "impeller/renderer/backend/vulkan/descriptor_pool_vk.h" +#include "impeller/renderer/backend/vulkan/resource_manager_vk.h" +#include "impeller/renderer/backend/vulkan/test/mock_vulkan.h" + +namespace impeller { +namespace testing { + +TEST(DescriptorPoolRecyclerVKTest, GetDescriptorPoolRecyclerCreatesNewPools) { + auto const context = MockVulkanContextBuilder().Build(); + + auto const [pool1, _] = context->GetDescriptorPoolRecycler()->Get(1024); + auto const [pool2, __] = context->GetDescriptorPoolRecycler()->Get(1024); + + // The two descriptor pools should be different. + EXPECT_NE(pool1.get(), pool2.get()); + + context->Shutdown(); +} + +TEST(DescriptorPoolRecyclerVKTest, DescriptorPoolCapacityIsRoundedUp) { + auto const context = MockVulkanContextBuilder().Build(); + auto const [pool1, capacity] = context->GetDescriptorPoolRecycler()->Get(1); + + // Rounds up to a minimum of 64. + EXPECT_EQ(capacity, 64u); + + auto const [pool2, capacity_2] = + context->GetDescriptorPoolRecycler()->Get(1023); + + // Rounds up to the next power of two. + EXPECT_EQ(capacity_2, 1024u); + + context->Shutdown(); +} + +namespace { + +// Invokes the provided callback when the destructor is called. +// +// Can be moved, but not copied. +class DeathRattle final { + public: + explicit DeathRattle(std::function callback) + : callback_(std::move(callback)) {} + + DeathRattle(DeathRattle&&) = default; + DeathRattle& operator=(DeathRattle&&) = default; + + ~DeathRattle() { callback_(); } + + private: + std::function callback_; +}; + +} // namespace + +TEST(DescriptorPoolRecyclerVKTest, ReclaimMakesDescriptorPoolAvailable) { + auto const context = MockVulkanContextBuilder().Build(); + + { + // Fetch a pool (which will be created). + auto pool = DescriptorPoolVK(context); + pool.AllocateDescriptorSets(1024, 1024, {}); + } + + // Add something to the resource manager and have it notify us when it's + // destroyed. That should give us a non-flaky signal that the pool has been + // reclaimed as well. + auto waiter = fml::AutoResetWaitableEvent(); + auto rattle = DeathRattle([&waiter]() { waiter.Signal(); }); + { + UniqueResourceVKT resource(context->GetResourceManager(), + std::move(rattle)); + } + waiter.Wait(); + + auto const [pool, _] = context->GetDescriptorPoolRecycler()->Get(1024); + + // Now check that we only ever created one pool. + auto const called = GetMockVulkanFunctions(context->GetDevice()); + EXPECT_EQ( + std::count(called->begin(), called->end(), "vkCreateDescriptorPool"), 1u); + + context->Shutdown(); +} + +} // namespace testing +} // namespace impeller diff --git a/impeller/renderer/backend/vulkan/test/mock_vulkan.cc b/impeller/renderer/backend/vulkan/test/mock_vulkan.cc index 6a11682647525..017f1fbcbcd7c 100644 --- a/impeller/renderer/backend/vulkan/test/mock_vulkan.cc +++ b/impeller/renderer/backend/vulkan/test/mock_vulkan.cc @@ -32,6 +32,8 @@ struct MockQueryPool {}; struct MockCommandPool {}; +struct MockDescriptorPool {}; + class MockDevice final { public: explicit MockDevice() : called_functions_(new std::vector()) {} @@ -539,6 +541,34 @@ VkResult vkGetQueryPoolResults(VkDevice device, return VK_SUCCESS; } +VkResult vkCreateDescriptorPool(VkDevice device, + const VkDescriptorPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorPool* pDescriptorPool) { + MockDevice* mock_device = reinterpret_cast(device); + *pDescriptorPool = + reinterpret_cast(new MockDescriptorPool()); + mock_device->AddCalledFunction("vkCreateDescriptorPool"); + return VK_SUCCESS; +} + +VkResult vkResetDescriptorPool(VkDevice device, + VkDescriptorPool descriptorPool, + VkDescriptorPoolResetFlags flags) { + MockDevice* mock_device = reinterpret_cast(device); + mock_device->AddCalledFunction("vkResetDescriptorPool"); + return VK_SUCCESS; +} + +VkResult vkAllocateDescriptorSets( + VkDevice device, + const VkDescriptorSetAllocateInfo* pAllocateInfo, + VkDescriptorSet* pDescriptorSets) { + MockDevice* mock_device = reinterpret_cast(device); + mock_device->AddCalledFunction("vkAllocateDescriptorSets"); + return VK_SUCCESS; +} + PFN_vkVoidFunction GetMockVulkanProcAddress(VkInstance instance, const char* pName) { if (strcmp("vkEnumerateInstanceExtensionProperties", pName) == 0) { @@ -643,6 +673,12 @@ PFN_vkVoidFunction GetMockVulkanProcAddress(VkInstance instance, return (PFN_vkVoidFunction)vkCreateQueryPool; } else if (strcmp("vkGetQueryPoolResults", pName) == 0) { return (PFN_vkVoidFunction)vkGetQueryPoolResults; + } else if (strcmp("vkCreateDescriptorPool", pName) == 0) { + return (PFN_vkVoidFunction)vkCreateDescriptorPool; + } else if (strcmp("vkResetDescriptorPool", pName) == 0) { + return (PFN_vkVoidFunction)vkResetDescriptorPool; + } else if (strcmp("vkAllocateDescriptorSets", pName) == 0) { + return (PFN_vkVoidFunction)vkAllocateDescriptorSets; } return noop; }