@@ -29,13 +29,19 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
29
29
std::shared_ptr<ReservedAllocator> reservedAllocator;
30
30
size_t reservedSize;
31
31
};
32
- std::unordered_map <uint32_t , SubAllocDescriptorSetRange> m_allocatableRanges = {};
32
+ std::map <uint32_t , SubAllocDescriptorSetRange> m_allocatableRanges = {};
33
33
34
+ #ifdef _NBL_DEBUG
35
+ std::recursive_mutex stAccessVerfier;
36
+ #endif // _NBL_DEBUG
37
+
38
+ constexpr static inline uint32_t MaxDescriptorSetAllocationAlignment = 64u *1024u ; // if you need larger alignments then you're not right in the head
39
+ constexpr static inline uint32_t MinDescriptorSetAllocationSize = 1u ;
34
40
35
41
public:
36
42
// constructors
37
43
template <typename ... Args>
38
- inline SubAllocatedDescriptorSet (video::IGPUDescriptorSetLayout* layout, const value_type maxAllocatableAlignment, Args&&... args )
44
+ inline SubAllocatedDescriptorSet (video::IGPUDescriptorSetLayout* layout)
39
45
{
40
46
for (uint32_t descriptorType = 0 ; descriptorType < static_cast <uint32_t >(asset::IDescriptor::E_TYPE::ET_COUNT); descriptorType++)
41
47
{
@@ -56,11 +62,12 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
56
62
| IGPUDescriptorSetLayout::SBinding::E_CREATE_FLAGS::ECF_PARTIALLY_BOUND_BIT))
57
63
{
58
64
SubAllocDescriptorSetRange range;
59
- range.reservedSize = AddressAllocator::reserved_size (maxAllocatableAlignment , static_cast <size_type>(count), args... );
65
+ range.reservedSize = AddressAllocator::reserved_size (MaxDescriptorSetAllocationAlignment , static_cast <size_type>(count), MinDescriptorSetAllocationSize );
60
66
range.reservedAllocator = std::shared_ptr<ReservedAllocator>(new ReservedAllocator ());
61
67
range.addressAllocator = std::shared_ptr<AddressAllocator>(new AddressAllocator (
62
68
range.reservedAllocator ->allocate (range.reservedSize , _NBL_SIMD_ALIGNMENT),
63
- static_cast <size_type>(0 ), 0u , maxAllocatableAlignment, static_cast <size_type>(count), std::forward<Args>(args)...
69
+ static_cast <size_type>(0 ), 0u , MaxDescriptorSetAllocationAlignment, static_cast <size_type>(count),
70
+ MinDescriptorSetAllocationSize
64
71
));
65
72
m_allocatableRanges.emplace (binding.data , range);
66
73
}
@@ -75,51 +82,55 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
75
82
auto & range = m_allocatableRanges[i];
76
83
if (range.reservedSize == 0 )
77
84
continue ;
78
-
79
85
auto ptr = reinterpret_cast <const uint8_t *>(core::address_allocator_traits<AddressAllocator>::getReservedSpacePtr (*range.addressAllocator ));
86
+ range.addressAllocator ->~PoolAddressAllocator ();
80
87
range.reservedAllocator ->deallocate (const_cast <uint8_t *>(ptr), range.reservedSize );
81
88
}
82
89
}
83
90
84
- // amount of bindings in the descriptor set layout used
85
- uint32_t getLayoutBindingCount () { return m_allocatableRanges.size (); }
86
-
87
91
// whether that binding index can be sub-allocated
88
- bool isBindingAllocatable (uint32_t binding)
89
- {
90
- return m_allocatableRanges.find (binding) != m_allocatableRanges.end ();
91
- }
92
+ bool isBindingAllocatable (uint32_t binding) { return m_allocatableRanges.find (binding) != m_allocatableRanges.end (); }
92
93
93
- AddressAllocator& getBindingAllocator (uint32_t binding)
94
+ AddressAllocator* getBindingAllocator (uint32_t binding)
94
95
{
95
96
auto range = m_allocatableRanges.find (binding);
96
97
assert (range != m_allocatableRanges.end ());// Check if this binding has an allocator
97
- return * range->second .addressAllocator ;
98
+ return range->second .addressAllocator . get () ;
98
99
}
99
100
100
101
// main methods
101
102
102
103
// ! Warning `outAddresses` needs to be primed with `invalid_value` values, otherwise no allocation happens for elements not equal to `invalid_value`
103
104
inline void multi_allocate (uint32_t binding, uint32_t count, value_type* outAddresses)
104
105
{
105
- auto & allocator = getBindingAllocator (binding);
106
+ #ifdef _NBL_DEBUG
107
+ std::unique_lock<std::recursive_mutex> tLock (stAccessVerfier,std::try_to_lock_t ());
108
+ assert (tLock.owns_lock ());
109
+ #endif // _NBL_DEBUG
110
+
111
+ auto allocator = getBindingAllocator (binding);
106
112
for (uint32_t i=0 ; i<count; i++)
107
113
{
108
114
if (outAddresses[i]!=AddressAllocator::invalid_address)
109
115
continue ;
110
116
111
- outAddresses[i] = allocator. alloc_addr (1 ,1 );
117
+ outAddresses[i] = allocator-> alloc_addr (1 ,1 );
112
118
}
113
119
}
114
120
inline void multi_deallocate (uint32_t binding, uint32_t count, const size_type* addr)
115
121
{
116
- auto & allocator = getBindingAllocator (binding);
122
+ #ifdef _NBL_DEBUG
123
+ std::unique_lock<std::recursive_mutex> tLock (stAccessVerfier,std::try_to_lock_t ());
124
+ assert (tLock.owns_lock ());
125
+ #endif // _NBL_DEBUG
126
+
127
+ auto allocator = getBindingAllocator (binding);
117
128
for (uint32_t i=0 ; i<count; i++)
118
129
{
119
130
if (addr[i]==AddressAllocator::invalid_address)
120
131
continue ;
121
132
122
- allocator. free_addr (addr[i],1 );
133
+ allocator-> free_addr (addr[i],1 );
123
134
}
124
135
}
125
136
};
0 commit comments