Skip to content

Add sub-allocated descriptor sets #657

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 30 commits into from
Mar 13, 2024
Merged
Changes from 3 commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
cc54740
Add sub-allocated descriptor set header
deprilula28 Feb 19, 2024
d10059f
Add some reusable binding API
deprilula28 Feb 19, 2024
347ff63
Work on using descriptor set layout directly
deprilula28 Feb 20, 2024
e1282c7
Remove out old bindings
deprilula28 Feb 20, 2024
28611be
Use pool address allocator
deprilula28 Feb 21, 2024
6c6046c
Use map
deprilula28 Feb 21, 2024
190067a
PR reviews
deprilula28 Feb 21, 2024
289e424
PR reviews
deprilula28 Feb 21, 2024
e0e91ff
Work on deferred freeing the descriptors
deprilula28 Feb 26, 2024
68582ea
Work on having descriptor set match with its sub allocator
deprilula28 Feb 26, 2024
d716848
PR reviews
deprilula28 Feb 27, 2024
4fd4b8f
Fix example
deprilula28 Feb 27, 2024
58c4e90
Add writing of descriptors on the allocate method
deprilula28 Feb 27, 2024
41b9a5b
Work on try allocate and timings
deprilula28 Feb 27, 2024
d608e78
Add PR comments
deprilula28 Feb 28, 2024
b326ca7
Work on nullifying descriptors
deprilula28 Feb 29, 2024
894a47c
Keep descriptor writes outside the allocate function
deprilula28 Mar 4, 2024
59c65ae
Include exporting of allocate descriptor writes instead of using them
deprilula28 Mar 4, 2024
1228f5f
Merge branch 'vulkan_1_3' into suballocdescriptorset
deprilula28 Mar 4, 2024
54250a6
Update examples submodule
deprilula28 Mar 4, 2024
2c35289
Update SubAllocatedDescriptorSet.h
devshgraphicsprogramming Mar 5, 2024
a2e2be4
Forgot that the nullification needs to be done between event-wait and…
devshgraphicsprogramming Mar 5, 2024
bc5b22d
PR review and fix compilation errors
deprilula28 Mar 8, 2024
912ed7a
PR reviews & nullifying descriptors
deprilula28 Mar 11, 2024
89e6440
Fix multi timeline functionality
deprilula28 Mar 12, 2024
ede586f
Update example
deprilula28 Mar 12, 2024
5531903
Implement depletion of sub alloc descriptor set
deprilula28 Mar 12, 2024
79c3a23
Fix API for nullify
deprilula28 Mar 13, 2024
6b5630d
Fix tabs & spaces
deprilula28 Mar 13, 2024
aca4c74
More PR reviews
deprilula28 Mar 13, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
97 changes: 83 additions & 14 deletions include/nbl/video/alloc/SubAllocatedDescriptorSet.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,26 +23,66 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
using value_type = typename AddressAllocator::size_type;
static constexpr value_type invalid_value = AddressAllocator::invalid_address;

class DeferredFreeFunctor
{
public:
inline DeferredFreeFunctor(SubAllocatedDescriptorSet* composed, uint32_t binding, size_type count, value_type* addresses)
: m_addresses(addresses, addresses + count), m_binding(binding), m_composed(composed)
{
}

// Just does the de-allocation
inline void operator()()
{
// isn't assert already debug-only?
#ifdef _NBL_DEBUG
assert(m_composed);
#endif // _NBL_DEBUG
m_composed->multi_deallocate(m_binding, m_addresses.size(), &m_addresses[0]);
}

// Takes count of allocations we want to free up as reference, true is returned if
// the amount of allocations freed was >= allocationsToFreeUp
// False is returned if there are more allocations to free up
inline bool operator()(size_type allocationsToFreeUp)
{
auto prevCount = m_addresses.size();
operator()();
auto totalFreed = m_addresses.size() - prevCount;

// This does the same logic as bool operator()(size_type&) on
// CAsyncSingleBufferSubAllocator
return totalFreed >= allocationsToFreeUp;
}
protected:
SubAllocatedDescriptorSet* m_composed;
uint32_t m_binding;
std::vector<value_type> m_addresses;
};
protected:
struct SubAllocDescriptorSetRange {
std::shared_ptr<AddressAllocator> addressAllocator;
std::shared_ptr<ReservedAllocator> reservedAllocator;
size_t reservedSize;
};
MultiTimelineEventHandlerST<DeferredFreeFunctor> eventHandler;
std::map<uint32_t, SubAllocDescriptorSetRange> m_allocatableRanges = {};
core::smart_refctd_ptr<video::IGPUDescriptorSet> m_descriptorSet;

#ifdef _NBL_DEBUG
std::recursive_mutex stAccessVerfier;
#endif // _NBL_DEBUG

constexpr static inline uint32_t MaxDescriptorSetAllocationAlignment = 64u*1024u; // if you need larger alignments then you're not right in the head
constexpr static inline uint32_t MaxDescriptorSetAllocationAlignment = 1u;
constexpr static inline uint32_t MinDescriptorSetAllocationSize = 1u;

public:

// constructors
template<typename... Args>
inline SubAllocatedDescriptorSet(video::IGPUDescriptorSetLayout* layout)
inline SubAllocatedDescriptorSet(video::IGPUDescriptorSet* descriptorSet)
{
auto layout = descriptorSet->getLayout();
for (uint32_t descriptorType = 0; descriptorType < static_cast<uint32_t>(asset::IDescriptor::E_TYPE::ET_COUNT); descriptorType++)
{
auto descType = static_cast<asset::IDescriptor::E_TYPE>(descriptorType);
Expand Down Expand Up @@ -73,6 +113,7 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
}
}
}
m_descriptorSet = core::smart_refctd_ptr(descriptorSet);
}

~SubAllocatedDescriptorSet()
Expand All @@ -83,7 +124,7 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted
if (range.reservedSize == 0)
continue;
auto ptr = reinterpret_cast<const uint8_t*>(core::address_allocator_traits<AddressAllocator>::getReservedSpacePtr(*range.addressAllocator));
range.addressAllocator->~PoolAddressAllocator();
range.addressAllocator = nullptr;
range.reservedAllocator->deallocate(const_cast<uint8_t*>(ptr), range.reservedSize);
}
}
Expand All @@ -100,39 +141,67 @@ class SubAllocatedDescriptorSet : public core::IReferenceCounted

// main methods

//! Warning `outAddresses` needs to be primed with `invalid_value` values, otherwise no allocation happens for elements not equal to `invalid_value`
inline void multi_allocate(uint32_t binding, uint32_t count, value_type* outAddresses)
#ifdef _NBL_DEBUG
std::unique_lock<std::recursive_mutex> stAccessVerifyDebugGuard()
{
#ifdef _NBL_DEBUG
std::unique_lock<std::recursive_mutex> tLock(stAccessVerfier,std::try_to_lock_t());
assert(tLock.owns_lock());
#endif // _NBL_DEBUG
return tLock;
}
#else
bool stAccessVerifyDebugGuard() { return false; }
#endif

//! Warning `outAddresses` needs to be primed with `invalid_value` values, otherwise no allocation happens for elements not equal to `invalid_value`
inline void multi_allocate(uint32_t binding, size_type count, value_type* outAddresses)
{
auto debugGuard = stAccessVerifyDebugGuard();

auto allocator = getBindingAllocator(binding);
for (uint32_t i=0; i<count; i++)
for (size_type i=0; i<count; i++)
{
if (outAddresses[i]!=AddressAllocator::invalid_address)
continue;

outAddresses[i] = allocator->alloc_addr(1,1);
// TODO: should also write something to the descriptor set (or probably leave that to the caller?)
}
}
inline void multi_deallocate(uint32_t binding, uint32_t count, const size_type* addr)
inline void multi_deallocate(uint32_t binding, size_type count, const size_type* addr)
{
#ifdef _NBL_DEBUG
std::unique_lock<std::recursive_mutex> tLock(stAccessVerfier,std::try_to_lock_t());
assert(tLock.owns_lock());
#endif // _NBL_DEBUG
auto debugGuard = stAccessVerifyDebugGuard();

auto allocator = getBindingAllocator(binding);
for (uint32_t i=0; i<count; i++)
for (size_type i=0; i<count; i++)
{
if (addr[i]==AddressAllocator::invalid_address)
continue;

allocator->free_addr(addr[i],1);
// TODO: should also write something to the descriptor sets
}
}
//!
inline void multi_deallocate(const ISemaphore::SWaitInfo& futureWait, DeferredFreeFunctor&& functor) noexcept
{
auto debugGuard = stAccessVerifyDebugGuard();
eventHandler.latch(futureWait,std::move(functor));
}
// TODO: improve signature of this function in the future
template<typename T=core::IReferenceCounted>
inline void multi_deallocate(uint32_t binding, uint32_t count, const value_type* addr, const ISemaphore::SWaitInfo& futureWait) noexcept
{
if (futureWait.semaphore)
multi_deallocate(futureWait, DeferredFreeFunctor(&this, binding, count, addr));
else
multi_deallocate(binding, count, addr);
}
//! Returns free events still outstanding
inline uint32_t cull_frees() noexcept
{
auto debugGuard = stAccessVerifyDebugGuard();
return eventHandler.poll().eventsLeft;
}
};

}
Expand Down