Skip to content
This repository was archived by the owner on Feb 25, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions ci/licenses_golden/excluded_files
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,7 @@
../../../flutter/impeller/renderer/backend/vulkan/command_encoder_vk_unittests.cc
../../../flutter/impeller/renderer/backend/vulkan/command_pool_vk_unittests.cc
../../../flutter/impeller/renderer/backend/vulkan/context_vk_unittests.cc
../../../flutter/impeller/renderer/backend/vulkan/descriptor_pool_vk_unittests.cc
../../../flutter/impeller/renderer/backend/vulkan/fence_waiter_vk_unittests.cc
../../../flutter/impeller/renderer/backend/vulkan/pass_bindings_cache_unittests.cc
../../../flutter/impeller/renderer/backend/vulkan/resource_manager_vk_unittests.cc
Expand Down
1 change: 1 addition & 0 deletions impeller/renderer/backend/vulkan/BUILD.gn
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ impeller_component("vulkan_unittests") {
"command_encoder_vk_unittests.cc",
"command_pool_vk_unittests.cc",
"context_vk_unittests.cc",
"descriptor_pool_vk_unittests.cc",
"fence_waiter_vk_unittests.cc",
"pass_bindings_cache_unittests.cc",
"resource_manager_vk_unittests.cc",
Expand Down
24 changes: 10 additions & 14 deletions impeller/renderer/backend/vulkan/command_encoder_vk.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,10 @@ namespace impeller {

class TrackedObjectsVK {
public:
explicit TrackedObjectsVK(
const std::weak_ptr<const DeviceHolder>& device_holder,
const std::shared_ptr<CommandPoolVK>& pool,
std::unique_ptr<GPUProbe> probe)
: desc_pool_(device_holder), probe_(std::move(probe)) {
explicit TrackedObjectsVK(const std::weak_ptr<const ContextVK>& context,
const std::shared_ptr<CommandPoolVK>& pool,
std::unique_ptr<GPUProbe> probe)
: desc_pool_(context), probe_(std::move(probe)) {
if (!pool) {
return;
}
Expand Down Expand Up @@ -112,8 +111,7 @@ std::shared_ptr<CommandEncoderVK> CommandEncoderFactoryVK::Create() {
if (!context) {
return nullptr;
}
auto& context_vk = ContextVK::Cast(*context);
auto recycler = context_vk.GetCommandPoolRecycler();
auto recycler = context->GetCommandPoolRecycler();
if (!recycler) {
return nullptr;
}
Expand All @@ -123,9 +121,8 @@ std::shared_ptr<CommandEncoderVK> CommandEncoderFactoryVK::Create() {
}

auto tracked_objects = std::make_shared<TrackedObjectsVK>(
context_vk.GetDeviceHolder(), tls_pool,
context->GetGPUTracer()->CreateGPUProbe());
auto queue = context_vk.GetGraphicsQueue();
context, tls_pool, context->GetGPUTracer()->CreateGPUProbe());
auto queue = context->GetGraphicsQueue();

if (!tracked_objects || !tracked_objects->IsValid() || !queue) {
return nullptr;
Expand All @@ -140,15 +137,14 @@ std::shared_ptr<CommandEncoderVK> CommandEncoderFactoryVK::Create() {
}

if (label_.has_value()) {
context_vk.SetDebugName(tracked_objects->GetCommandBuffer(),
label_.value());
context->SetDebugName(tracked_objects->GetCommandBuffer(), label_.value());
}
tracked_objects->GetGPUProbe().RecordCmdBufferStart(
tracked_objects->GetCommandBuffer());

return std::make_shared<CommandEncoderVK>(context_vk.GetDeviceHolder(),
return std::make_shared<CommandEncoderVK>(context->GetDeviceHolder(),
tracked_objects, queue,
context_vk.GetFenceWaiter());
context->GetFenceWaiter());
}

CommandEncoderVK::CommandEncoderVK(
Expand Down
1 change: 0 additions & 1 deletion impeller/renderer/backend/vulkan/command_encoder_vk.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
#include <functional>
#include <optional>

#include "flutter/fml/macros.h"
#include "impeller/renderer/backend/vulkan/command_pool_vk.h"
#include "impeller/renderer/backend/vulkan/context_vk.h"
#include "impeller/renderer/backend/vulkan/descriptor_pool_vk.h"
Expand Down
1 change: 0 additions & 1 deletion impeller/renderer/backend/vulkan/command_pool_vk.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
#include <optional>
#include <utility>

#include "fml/macros.h"
#include "fml/thread_local.h"
#include "fml/trace_event.h"
#include "impeller/renderer/backend/vulkan/resource_manager_vk.h"
Expand Down
8 changes: 8 additions & 0 deletions impeller/renderer/backend/vulkan/context_vk.cc
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,13 @@ void ContextVK::Setup(Settings settings) {
return;
}

auto descriptor_pool_recycler =
std::make_shared<DescriptorPoolRecyclerVK>(weak_from_this());
if (!descriptor_pool_recycler) {
VALIDATION_LOG << "Could not create descriptor pool recycler.";
return;
}

//----------------------------------------------------------------------------
/// Fetch the queues.
///
Expand Down Expand Up @@ -436,6 +443,7 @@ void ContextVK::Setup(Settings settings) {
fence_waiter_ = std::move(fence_waiter);
resource_manager_ = std::move(resource_manager);
command_pool_recycler_ = std::move(command_pool_recycler);
descriptor_pool_recycler_ = std::move(descriptor_pool_recycler);
device_name_ = std::string(physical_device_properties.deviceName);
is_valid_ = true;

Expand Down
6 changes: 6 additions & 0 deletions impeller/renderer/backend/vulkan/context_vk.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ class FenceWaiterVK;
class ResourceManagerVK;
class SurfaceContextVK;
class GPUTracerVK;
class DescriptorPoolRecyclerVK;

class ContextVK final : public Context,
public BackendCast<ContextVK, Context>,
Expand Down Expand Up @@ -158,6 +159,10 @@ class ContextVK final : public Context,

std::shared_ptr<CommandPoolRecyclerVK> GetCommandPoolRecycler() const;

std::shared_ptr<DescriptorPoolRecyclerVK> GetDescriptorPoolRecycler() const {
return descriptor_pool_recycler_;
}

std::shared_ptr<GPUTracerVK> GetGPUTracer() const;

void RecordFrameEndTime() const;
Expand Down Expand Up @@ -191,6 +196,7 @@ class ContextVK final : public Context,
std::shared_ptr<fml::ConcurrentMessageLoop> raster_message_loop_;
std::unique_ptr<fml::Thread> queue_submit_thread_;
std::shared_ptr<GPUTracerVK> gpu_tracer_;
std::shared_ptr<DescriptorPoolRecyclerVK> descriptor_pool_recycler_;

bool sync_presentation_ = false;
const uint64_t hash_;
Expand Down
209 changes: 178 additions & 31 deletions impeller/renderer/backend/vulkan/descriptor_pool_vk.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3,69 +3,106 @@
// found in the LICENSE file.

#include "impeller/renderer/backend/vulkan/descriptor_pool_vk.h"
#include <optional>

#include "flutter/fml/trace_event.h"
#include "impeller/base/allocation.h"
#include "impeller/base/validation.h"
#include "impeller/renderer/backend/vulkan/resource_manager_vk.h"
#include "vulkan/vulkan_handles.hpp"

namespace impeller {

// Holds the command pool in a background thread, recyling it when not in use.
class BackgroundDescriptorPoolVK final {
public:
BackgroundDescriptorPoolVK(BackgroundDescriptorPoolVK&&) = default;

explicit BackgroundDescriptorPoolVK(
vk::UniqueDescriptorPool&& pool,
uint32_t allocated_capacity,
std::weak_ptr<DescriptorPoolRecyclerVK> recycler)
: pool_(std::move(pool)),
allocated_capacity_(allocated_capacity),
recycler_(std::move(recycler)) {}

~BackgroundDescriptorPoolVK() {
auto const recycler = recycler_.lock();

// Not only does this prevent recycling when the context is being destroyed,
// but it also prevents the destructor from effectively being called twice;
// once for the original BackgroundCommandPoolVK() and once for the moved
// BackgroundCommandPoolVK().
if (!recycler) {
return;
}

recycler->Reclaim(std::move(pool_), allocated_capacity_);
}

private:
BackgroundDescriptorPoolVK(const BackgroundDescriptorPoolVK&) = delete;

BackgroundDescriptorPoolVK& operator=(const BackgroundDescriptorPoolVK&) =
delete;

vk::UniqueDescriptorPool pool_;
uint32_t allocated_capacity_;
std::weak_ptr<DescriptorPoolRecyclerVK> recycler_;
};

DescriptorPoolVK::DescriptorPoolVK(
const std::weak_ptr<const DeviceHolder>& device_holder)
: device_holder_(device_holder) {
FML_DCHECK(device_holder.lock());
const std::weak_ptr<const ContextVK>& context)
: context_(context) {
FML_DCHECK(context.lock());
}

DescriptorPoolVK::~DescriptorPoolVK() = default;
DescriptorPoolVK::~DescriptorPoolVK() {
if (!pool_) {
return;
}

static vk::UniqueDescriptorPool CreatePool(const vk::Device& device,
uint32_t image_count,
uint32_t buffer_count) {
TRACE_EVENT0("impeller", "CreateDescriptorPool");
std::vector<vk::DescriptorPoolSize> pools = {};
if (image_count > 0) {
pools.emplace_back(vk::DescriptorPoolSize{
vk::DescriptorType::eCombinedImageSampler, image_count});
auto const context = context_.lock();
if (!context) {
return;
}
if (buffer_count > 0) {
pools.emplace_back(vk::DescriptorPoolSize{
vk::DescriptorType::eUniformBuffer, buffer_count});
pools.emplace_back(vk::DescriptorPoolSize{
vk::DescriptorType::eStorageBuffer, buffer_count});
auto const recycler = context->GetDescriptorPoolRecycler();
if (!recycler) {
return;
}
vk::DescriptorPoolCreateInfo pool_info;
pool_info.setMaxSets(image_count + buffer_count);
pool_info.setPoolSizes(pools);
auto [result, pool] = device.createDescriptorPoolUnique(pool_info);
if (result != vk::Result::eSuccess) {
VALIDATION_LOG << "Unable to create a descriptor pool";
}
return std::move(pool);

auto reset_pool_when_dropped = BackgroundDescriptorPoolVK(
std::move(pool_), allocated_capacity_, recycler);

UniqueResourceVKT<BackgroundDescriptorPoolVK> pool(
context->GetResourceManager(), std::move(reset_pool_when_dropped));
}

fml::StatusOr<std::vector<vk::DescriptorSet>>
DescriptorPoolVK::AllocateDescriptorSets(
uint32_t buffer_count,
uint32_t sampler_count,
const std::vector<vk::DescriptorSetLayout>& layouts) {
std::shared_ptr<const DeviceHolder> strong_device = device_holder_.lock();
if (!strong_device) {
std::shared_ptr<const ContextVK> strong_context = context_.lock();
if (!strong_context) {
return fml::Status(fml::StatusCode::kUnknown, "No device");
}

auto new_pool =
CreatePool(strong_device->GetDevice(), sampler_count, buffer_count);
auto minimum_capacity = std::max(sampler_count, buffer_count);
auto [new_pool, capacity] =
strong_context->GetDescriptorPoolRecycler()->Get(minimum_capacity);
if (!new_pool) {
return fml::Status(fml::StatusCode::kUnknown,
"Failed to create descriptor pool");
}
pool_ = std::move(new_pool);
allocated_capacity_ = capacity;

vk::DescriptorSetAllocateInfo set_info;
set_info.setDescriptorPool(pool_.get());
set_info.setSetLayouts(layouts);

auto [result, sets] =
strong_device->GetDevice().allocateDescriptorSets(set_info);
strong_context->GetDevice().allocateDescriptorSets(set_info);
if (result != vk::Result::eSuccess) {
VALIDATION_LOG << "Could not allocate descriptor sets: "
<< vk::to_string(result);
Expand All @@ -74,4 +111,114 @@ DescriptorPoolVK::AllocateDescriptorSets(
return sets;
}

void DescriptorPoolRecyclerVK::Reclaim(vk::UniqueDescriptorPool&& pool,
uint32_t allocated_capacity) {
TRACE_EVENT0("impeller", "DescriptorPoolRecyclerVK::Reclaim");

// Reset the pool on a background thread.
auto strong_context = context_.lock();
if (!strong_context) {
return;
}
auto device = strong_context->GetDevice();
device.resetDescriptorPool(pool.get());

// Move the pool to the recycled list.
Lock recycled_lock(recycled_mutex_);

if (recycled_.size() < kMaxRecycledPools) {
recycled_.push_back(std::make_pair(std::move(pool), allocated_capacity));
return;
}

// If recycled has exceeded the max size of 32, then we need to remove a pool
// from the list. If we were to drop this pool, then there is a risk that
// the list of recycled descriptor pools could fill up with descriptors that
// are too small to reuse. This would lead to all subsequent descriptor
// allocations no longer being recycled. Instead, we pick the first
// descriptor pool with a smaller capacity than the reseting pool to drop.
// This may result in us dropping the current pool instead.
std::optional<size_t> selected_index = std::nullopt;
for (auto i = 0u; i < recycled_.size(); i++) {
const auto& [_, capacity] = recycled_[i];
if (capacity < allocated_capacity) {
selected_index = i;
break;
}
}
if (selected_index.has_value()) {
recycled_[selected_index.value()] =
std::make_pair(std::move(pool), allocated_capacity);
}
// If selected index has no value, then no pools had a smaller capacity than
// this one and we drop it instead.
}

DescriptorPoolAndSize DescriptorPoolRecyclerVK::Get(uint32_t minimum_capacity) {
// See note on DescriptorPoolRecyclerVK doc comment.
auto rounded_capacity =
std::max(Allocation::NextPowerOfTwoSize(minimum_capacity), 64u);

// Recycle a pool with a matching minumum capcity if it is available.
auto recycled_pool = Reuse(rounded_capacity);
if (recycled_pool.has_value()) {
return std::move(recycled_pool.value());
}
return Create(rounded_capacity);
}

DescriptorPoolAndSize DescriptorPoolRecyclerVK::Create(
uint32_t minimum_capacity) {
TRACE_EVENT0("impeller", "DescriptorPoolRecyclerVK::Create");

FML_DCHECK(Allocation::NextPowerOfTwoSize(minimum_capacity) ==
minimum_capacity);
auto strong_context = context_.lock();
if (!strong_context) {
VALIDATION_LOG << "Unable to create a descriptor pool";
return {};
}

std::vector<vk::DescriptorPoolSize> pools = {
vk::DescriptorPoolSize{vk::DescriptorType::eCombinedImageSampler,
minimum_capacity},
vk::DescriptorPoolSize{vk::DescriptorType::eUniformBuffer,
minimum_capacity},
vk::DescriptorPoolSize{vk::DescriptorType::eStorageBuffer,
minimum_capacity}};
vk::DescriptorPoolCreateInfo pool_info;
pool_info.setMaxSets(minimum_capacity + minimum_capacity);
pool_info.setPoolSizes(pools);
auto [result, pool] =
strong_context->GetDevice().createDescriptorPoolUnique(pool_info);
if (result != vk::Result::eSuccess) {
VALIDATION_LOG << "Unable to create a descriptor pool";
}
return std::make_pair(std::move(pool), minimum_capacity);
}

std::optional<DescriptorPoolAndSize> DescriptorPoolRecyclerVK::Reuse(
uint32_t minimum_capacity) {
TRACE_EVENT0("impeller", "DescriptorPoolRecyclerVK::Reuse");

FML_DCHECK(Allocation::NextPowerOfTwoSize(minimum_capacity) ==
minimum_capacity);
Lock lock(recycled_mutex_);

std::optional<size_t> found_index = std::nullopt;
for (auto i = 0u; i < recycled_.size(); i++) {
const auto& [_, capacity] = recycled_[i];
if (capacity >= minimum_capacity) {
found_index = i;
break;
}
}
if (!found_index.has_value()) {
return std::nullopt;
}
auto pair = std::move(recycled_[found_index.value()]);
recycled_.erase(recycled_.begin() + found_index.value());
return pair;
}

} // namespace impeller
Loading