1111#include " flutter/fml/memory/ref_counted.h"
1212#include " flutter/fml/memory/weak_ptr.h"
1313#include " flutter/fml/task_runner.h"
14+ #include " flutter/fml/trace_event.h"
1415#include " third_party/skia/include/core/SkRefCnt.h"
1516#include " third_party/skia/include/gpu/GrDirectContext.h"
1617
1718namespace flutter {
1819
1920// A queue that holds Skia objects that must be destructed on the given task
2021// runner.
21- class SkiaUnrefQueue : public fml ::RefCountedThreadSafe<SkiaUnrefQueue> {
22+ template <class T >
23+ class UnrefQueue : public fml ::RefCountedThreadSafe<UnrefQueue<T>> {
2224 public:
23- void Unref (SkRefCnt* object);
25+ using ResourceContext = T;
26+
27+ void Unref (SkRefCnt* object) {
28+ std::scoped_lock lock (mutex_);
29+ objects_.push_back (object);
30+ if (!drain_pending_) {
31+ drain_pending_ = true ;
32+ task_runner_->PostDelayedTask (
33+ [strong = fml::Ref (this )]() { strong->Drain (); }, drain_delay_);
34+ }
35+ }
2436
2537 // Usually, the drain is called automatically. However, during IO manager
2638 // shutdown (when the platform side reference to the OpenGL context is about
2739 // to go away), we may need to pre-emptively drain the unref queue. It is the
2840 // responsibility of the caller to ensure that no further unrefs are queued
2941 // after this call.
30- void Drain ();
42+ void Drain () {
43+ TRACE_EVENT0 (" flutter" , " SkiaUnrefQueue::Drain" );
44+ std::deque<SkRefCnt*> skia_objects;
45+ {
46+ std::scoped_lock lock (mutex_);
47+ objects_.swap (skia_objects);
48+ drain_pending_ = false ;
49+ }
50+ DoDrain (skia_objects, context_);
51+ }
3152
32- void UpdateResourceContext (sk_sp<GrDirectContext > context) {
53+ void UpdateResourceContext (sk_sp<ResourceContext > context) {
3354 context_ = context;
3455 }
3556
@@ -39,25 +60,47 @@ class SkiaUnrefQueue : public fml::RefCountedThreadSafe<SkiaUnrefQueue> {
3960 std::mutex mutex_;
4061 std::deque<SkRefCnt*> objects_;
4162 bool drain_pending_;
42- sk_sp<GrDirectContext > context_;
63+ sk_sp<ResourceContext > context_;
4364
4465 // The `GrDirectContext* context` is only used for signaling Skia to
4566 // performDeferredCleanup. It can be nullptr when such signaling is not needed
4667 // (e.g., in unit tests).
47- SkiaUnrefQueue (fml::RefPtr<fml::TaskRunner> task_runner,
48- fml::TimeDelta delay,
49- sk_sp<GrDirectContext> context = nullptr );
50-
51- ~SkiaUnrefQueue ();
68+ UnrefQueue (fml::RefPtr<fml::TaskRunner> task_runner,
69+ fml::TimeDelta delay,
70+ sk_sp<ResourceContext> context = nullptr )
71+ : task_runner_(std::move(task_runner)),
72+ drain_delay_ (delay),
73+ drain_pending_(false ),
74+ context_(context) {}
75+
76+ ~UnrefQueue () {
77+ fml::TaskRunner::RunNowOrPostTask (
78+ task_runner_, [objects = std::move (objects_),
79+ context = std::move (context_)]() mutable {
80+ DoDrain (objects, context);
81+ context.reset ();
82+ });
83+ }
5284
85+ // static
5386 static void DoDrain (const std::deque<SkRefCnt*>& skia_objects,
54- sk_sp<GrDirectContext> context);
87+ sk_sp<ResourceContext> context) {
88+ for (SkRefCnt* skia_object : skia_objects) {
89+ skia_object->unref ();
90+ }
91+
92+ if (context && skia_objects.size () > 0 ) {
93+ context->performDeferredCleanup (std::chrono::milliseconds (0 ));
94+ }
95+ }
5596
56- FML_FRIEND_REF_COUNTED_THREAD_SAFE (SkiaUnrefQueue );
57- FML_FRIEND_MAKE_REF_COUNTED (SkiaUnrefQueue );
58- FML_DISALLOW_COPY_AND_ASSIGN (SkiaUnrefQueue );
97+ FML_FRIEND_REF_COUNTED_THREAD_SAFE (UnrefQueue );
98+ FML_FRIEND_MAKE_REF_COUNTED (UnrefQueue );
99+ FML_DISALLOW_COPY_AND_ASSIGN (UnrefQueue );
59100};
60101
102+ using SkiaUnrefQueue = UnrefQueue<GrDirectContext>;
103+
61104// / An object whose deallocation needs to be performed on an specific unref
62105// / queue. The template argument U need to have a call operator that returns
63106// / that unref queue.
0 commit comments