@@ -26,7 +26,7 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
26
26
class DeferredFreeFunctor
27
27
{
28
28
public:
29
- inline DeferredFreeFunctor (SubAllocatedDescriptorSet* composed, uint32_t binding, size_type count, value_type* addresses)
29
+ inline DeferredFreeFunctor (SubAllocatedDescriptorSet* composed, uint32_t binding, size_type count, const value_type* addresses)
30
30
: m_addresses(addresses, addresses + count), m_binding(binding), m_composed(composed)
31
31
{
32
32
}
@@ -38,21 +38,25 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
38
38
#ifdef _NBL_DEBUG
39
39
assert (m_composed);
40
40
#endif // _NBL_DEBUG
41
- m_composed->multi_deallocate (m_binding, m_addresses.size (), & m_addresses[ 0 ] );
41
+ m_composed->multi_deallocate (m_binding, m_addresses.size (), m_addresses. data () );
42
42
}
43
43
44
44
// Takes count of allocations we want to free up as reference, true is returned if
45
45
// the amount of allocations freed was >= allocationsToFreeUp
46
46
// False is returned if there are more allocations to free up
47
- inline bool operator ()(size_type allocationsToFreeUp)
47
+ inline bool operator ()(size_type& allocationsToFreeUp)
48
48
{
49
49
auto prevCount = m_addresses.size ();
50
50
operator ()();
51
51
auto totalFreed = m_addresses.size () - prevCount;
52
52
53
53
// This does the same logic as bool operator()(size_type&) on
54
54
// CAsyncSingleBufferSubAllocator
55
- return totalFreed >= allocationsToFreeUp;
55
+ bool freedEverything = totalFreed >= allocationsToFreeUp;
56
+
57
+ if (freedEverything) allocationsToFreeUp = 0u ;
58
+ else allocationsToFreeUp -= totalFreed;
59
+ return freedEverything;
56
60
}
57
61
protected:
58
62
SubAllocatedDescriptorSet* m_composed;
@@ -61,11 +65,19 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
61
65
};
62
66
protected:
63
67
struct SubAllocDescriptorSetRange {
64
- std::shared_ptr<AddressAllocator> addressAllocator;
65
- std::shared_ptr<ReservedAllocator> reservedAllocator;
68
+ MultiTimelineEventHandlerST<DeferredFreeFunctor> eventHandler;
69
+ std::unique_ptr<AddressAllocator> addressAllocator;
70
+ std::unique_ptr<ReservedAllocator> reservedAllocator;
66
71
size_t reservedSize;
72
+
73
+ SubAllocDescriptorSetRange (
74
+ std::unique_ptr<AddressAllocator>&& inAddressAllocator,
75
+ std::unique_ptr<ReservedAllocator>&& inReservedAllocator,
76
+ size_t inReservedSize) :
77
+ eventHandler ({}), addressAllocator(std::move(inAddressAllocator)),
78
+ reservedAllocator (std::move(inReservedAllocator)), reservedSize(inReservedSize) {}
79
+
67
80
};
68
- MultiTimelineEventHandlerST<DeferredFreeFunctor> eventHandler;
69
81
std::map<uint32_t , SubAllocDescriptorSetRange> m_allocatableRanges = {};
70
82
core::smart_refctd_ptr<video::IGPUDescriptorSet> m_descriptorSet;
71
83
@@ -79,8 +91,7 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
79
91
public:
80
92
81
93
// constructors
82
- template <typename ... Args>
83
- inline SubAllocatedDescriptorSet (video::IGPUDescriptorSet* descriptorSet)
94
+ inline SubAllocatedDescriptorSet (core::smart_refctd_ptr<video::IGPUDescriptorSet>&& descriptorSet)
84
95
{
85
96
auto layout = descriptorSet->getLayout ();
86
97
for (uint32_t descriptorType = 0 ; descriptorType < static_cast <uint32_t >(asset::IDescriptor::E_TYPE::ET_COUNT); descriptorType++)
@@ -101,19 +112,18 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
101
112
| IGPUDescriptorSetLayout::SBinding::E_CREATE_FLAGS::ECF_UPDATE_UNUSED_WHILE_PENDING_BIT
102
113
| IGPUDescriptorSetLayout::SBinding::E_CREATE_FLAGS::ECF_PARTIALLY_BOUND_BIT))
103
114
{
104
- SubAllocDescriptorSetRange range;
105
- range.reservedSize = AddressAllocator::reserved_size (MaxDescriptorSetAllocationAlignment, static_cast <size_type>(count), MinDescriptorSetAllocationSize);
106
- range.reservedAllocator = std::shared_ptr<ReservedAllocator>(new ReservedAllocator ());
107
- range.addressAllocator = std::shared_ptr<AddressAllocator>(new AddressAllocator (
108
- range.reservedAllocator ->allocate (range.reservedSize , _NBL_SIMD_ALIGNMENT),
115
+ auto reservedSize = AddressAllocator::reserved_size (MaxDescriptorSetAllocationAlignment, static_cast <size_type>(count), MinDescriptorSetAllocationSize);
116
+ auto reservedAllocator = std::unique_ptr<ReservedAllocator>(new ReservedAllocator ());
117
+ auto addressAllocator = std::unique_ptr<AddressAllocator>(new AddressAllocator (
118
+ reservedAllocator->allocate (reservedSize, _NBL_SIMD_ALIGNMENT),
109
119
static_cast <size_type>(0 ), 0u , MaxDescriptorSetAllocationAlignment, static_cast <size_type>(count),
110
120
MinDescriptorSetAllocationSize
111
121
));
112
- m_allocatableRanges.emplace (binding.data , range );
122
+ m_allocatableRanges.emplace (binding.data , SubAllocDescriptorSetRange ( std::move (addressAllocator), std::move (reservedAllocator), reservedSize) );
113
123
}
114
124
}
115
125
}
116
- m_descriptorSet = core::smart_refctd_ptr (descriptorSet);
126
+ m_descriptorSet = std::move (descriptorSet);
117
127
}
118
128
119
129
~SubAllocatedDescriptorSet ()
@@ -135,7 +145,9 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
135
145
AddressAllocator* getBindingAllocator (uint32_t binding)
136
146
{
137
147
auto range = m_allocatableRanges.find (binding);
138
- assert (range != m_allocatableRanges.end ());// Check if this binding has an allocator
148
+ // Check if this binding has an allocator
149
+ if (range == m_allocatableRanges.end ())
150
+ return nullptr ;
139
151
return range->second .addressAllocator .get ();
140
152
}
141
153
@@ -182,25 +194,36 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
182
194
}
183
195
}
184
196
// !
185
- inline void multi_deallocate (const ISemaphore::SWaitInfo& futureWait, DeferredFreeFunctor&& functor) noexcept
197
+ inline void multi_deallocate (uint32_t binding, const ISemaphore::SWaitInfo& futureWait, DeferredFreeFunctor&& functor) noexcept
186
198
{
199
+ auto range = m_allocatableRanges.find (binding);
200
+ // Check if this binding has an allocator
201
+ if (range == m_allocatableRanges.end ())
202
+ return ;
203
+
204
+ auto & eventHandler = range->second .eventHandler ;
187
205
auto debugGuard = stAccessVerifyDebugGuard ();
188
206
eventHandler.latch (futureWait,std::move (functor));
189
207
}
190
208
// TODO: improve signature of this function in the future
191
- template <typename T=core::IReferenceCounted>
192
- inline void multi_deallocate (uint32_t binding, uint32_t count, const value_type* addr, const ISemaphore::SWaitInfo& futureWait) noexcept
209
+ inline void multi_deallocate (uint32_t binding, size_type count, const value_type* addr, const ISemaphore::SWaitInfo& futureWait) noexcept
193
210
{
194
211
if (futureWait.semaphore )
195
- multi_deallocate (futureWait, DeferredFreeFunctor (& this , binding, count, addr));
212
+ multi_deallocate (binding, futureWait, DeferredFreeFunctor (this , binding, count, addr));
196
213
else
197
214
multi_deallocate (binding, count, addr);
198
215
}
199
216
// ! Returns free events still outstanding
200
217
inline uint32_t cull_frees () noexcept
201
218
{
202
219
auto debugGuard = stAccessVerifyDebugGuard ();
203
- return eventHandler.poll ().eventsLeft ;
220
+ uint32_t frees = 0 ;
221
+ for (uint32_t i = 0 ; i < m_allocatableRanges.size (); i++)
222
+ {
223
+ auto & it = m_allocatableRanges[i];
224
+ frees += it.eventHandler .poll ().eventsLeft ;
225
+ }
226
+ return frees;
204
227
}
205
228
};
206
229
0 commit comments