Skip to content

Commit ae39703

Browse files
Added internal class VmaDefragmentationAlgorithm_Fast::FeeSpaceDatabase.
Defragmentation algorithm is pretty much finished now!
1 parent 2af57d7 commit ae39703

1 file changed

Lines changed: 250 additions & 54 deletions

File tree

src/vk_mem_alloc.h

Lines changed: 250 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -5935,6 +5935,111 @@ class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
59355935
size_t origBlockIndex;
59365936
};
59375937

5938+
class FreeSpaceDatabase
5939+
{
5940+
public:
5941+
FreeSpaceDatabase()
5942+
{
5943+
FreeSpace s = {};
5944+
s.blockInfoIndex = SIZE_MAX;
5945+
for(size_t i = 0; i < MAX_COUNT; ++i)
5946+
{
5947+
m_FreeSpaces[i] = s;
5948+
}
5949+
}
5950+
5951+
void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
5952+
{
5953+
if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5954+
{
5955+
return;
5956+
}
5957+
5958+
// Find first invalid or the smallest structure.
5959+
size_t bestIndex = SIZE_MAX;
5960+
for(size_t i = 0; i < MAX_COUNT; ++i)
5961+
{
5962+
// Empty structure.
5963+
if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
5964+
{
5965+
bestIndex = i;
5966+
break;
5967+
}
5968+
if(m_FreeSpaces[i].size < size &&
5969+
(bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
5970+
{
5971+
bestIndex = i;
5972+
}
5973+
}
5974+
5975+
if(bestIndex != SIZE_MAX)
5976+
{
5977+
m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
5978+
m_FreeSpaces[bestIndex].offset = offset;
5979+
m_FreeSpaces[bestIndex].size = size;
5980+
}
5981+
}
5982+
5983+
bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
5984+
size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
5985+
{
5986+
size_t bestIndex = SIZE_MAX;
5987+
VkDeviceSize bestFreeSpaceAfter = 0;
5988+
for(size_t i = 0; i < MAX_COUNT; ++i)
5989+
{
5990+
// Structure is valid.
5991+
if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
5992+
{
5993+
const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
5994+
// Allocation fits into this structure.
5995+
if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
5996+
{
5997+
const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
5998+
(dstOffset + size);
5999+
if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6000+
{
6001+
bestIndex = i;
6002+
bestFreeSpaceAfter = freeSpaceAfter;
6003+
}
6004+
}
6005+
}
6006+
}
6007+
6008+
if(bestIndex != SIZE_MAX)
6009+
{
6010+
outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6011+
outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6012+
6013+
if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6014+
{
6015+
// Leave this structure for remaining empty space.
6016+
const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6017+
m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6018+
m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6019+
}
6020+
else
6021+
{
6022+
// This structure becomes invalid.
6023+
m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6024+
}
6025+
6026+
return true;
6027+
}
6028+
6029+
return false;
6030+
}
6031+
6032+
private:
6033+
static const size_t MAX_COUNT = 4;
6034+
6035+
struct FreeSpace
6036+
{
6037+
size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6038+
VkDeviceSize offset;
6039+
VkDeviceSize size;
6040+
} m_FreeSpaces[MAX_COUNT];
6041+
};
6042+
59386043
const bool m_OverlappingMoveSupported;
59396044

59406045
uint32_t m_AllocationCount;
@@ -5947,6 +6052,7 @@ class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
59476052

59486053
void PreprocessMetadata();
59496054
void PostprocessMetadata();
6055+
void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
59506056
};
59516057

59526058
struct VmaBlockDefragmentationContext
@@ -12365,6 +12471,8 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
1236512471

1236612472
// THE MAIN ALGORITHM
1236712473

12474+
FreeSpaceDatabase freeSpaceDb;
12475+
1236812476
size_t dstBlockInfoIndex = 0;
1236912477
size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
1237012478
VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
@@ -12382,6 +12490,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
1238212490
!end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
1238312491
{
1238412492
VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12493+
const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
1238512494
const VkDeviceSize srcAllocSize = srcSuballocIt->size;
1238612495
if(m_AllocationsMoved == maxAllocationsToMove ||
1238712496
m_BytesMoved + srcAllocSize > maxBytesToMove)
@@ -12390,84 +12499,157 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
1239012499
break;
1239112500
}
1239212501
const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12393-
VkDeviceSize dstAllocOffset = VmaAlignUp(dstOffset, pAlloc->GetAlignment());
1239412502

12395-
// If the allocation doesn't fit before the end of dstBlock, forward to next block.
12396-
while(dstBlockInfoIndex < srcBlockInfoIndex &&
12397-
dstAllocOffset + srcAllocSize > dstBlockSize)
12503+
// Try to place it in one of free spaces from the database.
12504+
size_t freeSpaceInfoIndex;
12505+
VkDeviceSize dstAllocOffset;
12506+
if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12507+
freeSpaceInfoIndex, dstAllocOffset))
1239812508
{
12399-
++dstBlockInfoIndex;
12400-
dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12401-
pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12402-
pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12403-
dstBlockSize = pDstMetadata->GetSize();
12404-
dstOffset = 0;
12405-
dstAllocOffset = 0;
12406-
}
12509+
size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12510+
VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12511+
VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12512+
VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
1240712513

12408-
// Same block
12409-
if(dstBlockInfoIndex == srcBlockInfoIndex)
12410-
{
12411-
VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12514+
// Same block
12515+
if(freeSpaceInfoIndex == srcBlockInfoIndex)
12516+
{
12517+
VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
1241212518

12413-
const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12519+
// MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
1241412520

12415-
bool skipOver = overlap;
12416-
if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12417-
{
12418-
// If destination and source place overlap, skip if it would move it
12419-
// by only < 1/64 of its size.
12420-
skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12421-
}
12521+
VmaSuballocation suballoc = *srcSuballocIt;
12522+
suballoc.offset = dstAllocOffset;
12523+
suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12524+
m_BytesMoved += srcAllocSize;
12525+
++m_AllocationsMoved;
12526+
12527+
VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12528+
++nextSuballocIt;
12529+
pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12530+
srcSuballocIt = nextSuballocIt;
1242212531

12423-
if(skipOver)
12424-
{
12425-
dstOffset = srcAllocOffset + srcAllocSize;
12426-
++srcSuballocIt;
12532+
InsertSuballoc(pFreeSpaceMetadata, suballoc);
12533+
12534+
VmaDefragmentationMove move = {
12535+
srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12536+
srcAllocOffset, dstAllocOffset,
12537+
srcAllocSize };
12538+
moves.push_back(move);
1242712539
}
12428-
// MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12540+
// Different block
1242912541
else
1243012542
{
12431-
srcSuballocIt->offset = dstAllocOffset;
12432-
srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12433-
dstOffset = dstAllocOffset + srcAllocSize;
12543+
// MOVE OPTION 2: Move the allocation to a different block.
12544+
12545+
VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12546+
12547+
VmaSuballocation suballoc = *srcSuballocIt;
12548+
suballoc.offset = dstAllocOffset;
12549+
suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
1243412550
m_BytesMoved += srcAllocSize;
1243512551
++m_AllocationsMoved;
12436-
++srcSuballocIt;
12552+
12553+
VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12554+
++nextSuballocIt;
12555+
pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12556+
srcSuballocIt = nextSuballocIt;
12557+
12558+
InsertSuballoc(pFreeSpaceMetadata, suballoc);
12559+
1243712560
VmaDefragmentationMove move = {
12438-
srcOrigBlockIndex, dstOrigBlockIndex,
12561+
srcOrigBlockIndex, freeSpaceOrigBlockIndex,
1243912562
srcAllocOffset, dstAllocOffset,
1244012563
srcAllocSize };
1244112564
moves.push_back(move);
1244212565
}
1244312566
}
12444-
// Different block
1244512567
else
1244612568
{
12447-
// MOVE OPTION 2: Move the allocation to a different block.
12569+
dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
1244812570

12449-
VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12450-
VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12571+
// If the allocation doesn't fit before the end of dstBlock, forward to next block.
12572+
while(dstBlockInfoIndex < srcBlockInfoIndex &&
12573+
dstAllocOffset + srcAllocSize > dstBlockSize)
12574+
{
12575+
// But before that, register remaining free space at the end of dst block.
12576+
freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12577+
12578+
++dstBlockInfoIndex;
12579+
dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12580+
pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12581+
pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12582+
dstBlockSize = pDstMetadata->GetSize();
12583+
dstOffset = 0;
12584+
dstAllocOffset = 0;
12585+
}
1245112586

12452-
VmaSuballocation suballoc = *srcSuballocIt;
12453-
suballoc.offset = dstAllocOffset;
12454-
suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12455-
dstOffset = dstAllocOffset + srcAllocSize;
12456-
m_BytesMoved += srcAllocSize;
12457-
++m_AllocationsMoved;
12587+
// Same block
12588+
if(dstBlockInfoIndex == srcBlockInfoIndex)
12589+
{
12590+
VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
1245812591

12459-
VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12460-
++nextSuballocIt;
12461-
pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12462-
srcSuballocIt = nextSuballocIt;
12592+
const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
1246312593

12464-
pDstMetadata->m_Suballocations.push_back(suballoc);
12594+
bool skipOver = overlap;
12595+
if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12596+
{
12597+
// If destination and source place overlap, skip if it would move it
12598+
// by only < 1/64 of its size.
12599+
skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12600+
}
1246512601

12466-
VmaDefragmentationMove move = {
12467-
srcOrigBlockIndex, dstOrigBlockIndex,
12468-
srcAllocOffset, dstAllocOffset,
12469-
srcAllocSize };
12470-
moves.push_back(move);
12602+
if(skipOver)
12603+
{
12604+
freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12605+
12606+
dstOffset = srcAllocOffset + srcAllocSize;
12607+
++srcSuballocIt;
12608+
}
12609+
// MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12610+
else
12611+
{
12612+
srcSuballocIt->offset = dstAllocOffset;
12613+
srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12614+
dstOffset = dstAllocOffset + srcAllocSize;
12615+
m_BytesMoved += srcAllocSize;
12616+
++m_AllocationsMoved;
12617+
++srcSuballocIt;
12618+
VmaDefragmentationMove move = {
12619+
srcOrigBlockIndex, dstOrigBlockIndex,
12620+
srcAllocOffset, dstAllocOffset,
12621+
srcAllocSize };
12622+
moves.push_back(move);
12623+
}
12624+
}
12625+
// Different block
12626+
else
12627+
{
12628+
// MOVE OPTION 2: Move the allocation to a different block.
12629+
12630+
VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12631+
VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12632+
12633+
VmaSuballocation suballoc = *srcSuballocIt;
12634+
suballoc.offset = dstAllocOffset;
12635+
suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12636+
dstOffset = dstAllocOffset + srcAllocSize;
12637+
m_BytesMoved += srcAllocSize;
12638+
++m_AllocationsMoved;
12639+
12640+
VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12641+
++nextSuballocIt;
12642+
pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12643+
srcSuballocIt = nextSuballocIt;
12644+
12645+
pDstMetadata->m_Suballocations.push_back(suballoc);
12646+
12647+
VmaDefragmentationMove move = {
12648+
srcOrigBlockIndex, dstOrigBlockIndex,
12649+
srcAllocOffset, dstAllocOffset,
12650+
srcAllocSize };
12651+
moves.push_back(move);
12652+
}
1247112653
}
1247212654
}
1247312655
}
@@ -12590,6 +12772,20 @@ void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
1259012772
}
1259112773
}
1259212774

12775+
void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
12776+
{
12777+
// TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
12778+
VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12779+
while(it != pMetadata->m_Suballocations.end())
12780+
{
12781+
if(it->offset < suballoc.offset)
12782+
{
12783+
++it;
12784+
}
12785+
}
12786+
pMetadata->m_Suballocations.insert(it, suballoc);
12787+
}
12788+
1259312789
////////////////////////////////////////////////////////////////////////////////
1259412790
// VmaBlockVectorDefragmentationContext
1259512791

0 commit comments

Comments
 (0)