diff --git a/xtoskrnl/includes/mm/alloc.hh b/xtoskrnl/includes/mm/alloc.hh index aaac9cd..16f8142 100644 --- a/xtoskrnl/includes/mm/alloc.hh +++ b/xtoskrnl/includes/mm/alloc.hh @@ -18,6 +18,13 @@ namespace MM { class Allocator final : private Pool { + private: + STATIC ULONG BigAllocationsInUse; + STATIC PPOOL_TRACKER_BIG_ALLOCATIONS BigAllocationsTable; + STATIC SIZE_T BigAllocationsTableHash; + STATIC KSPIN_LOCK BigAllocationsTableLock; + STATIC SIZE_T BigAllocationsTableSize; + public: STATIC XTAPI XTSTATUS AllocatePages(IN MMPOOL_TYPE PoolType, IN SIZE_T Bytes, @@ -35,16 +42,26 @@ namespace MM STATIC XTAPI XTSTATUS FreePool(IN PVOID VirtualAddress); STATIC XTAPI XTSTATUS FreePool(IN PVOID VirtualAddress, IN ULONG Tag); + STATIC XTAPI VOID InitializeBigAllocationsTable(VOID); private: STATIC XTAPI XTSTATUS AllocateNonPagedPoolPages(IN PFN_COUNT Pages, OUT PVOID *Memory); STATIC XTAPI XTSTATUS AllocatePagedPoolPages(IN PFN_COUNT Pages, OUT PVOID *Memory); + STATIC XTINLINE ULONG ComputeHash(IN PVOID VirtualAddress); + STATIC XTAPI BOOLEAN ExpandBigAllocationsTable(VOID); STATIC XTAPI XTSTATUS FreeNonPagedPoolPages(IN PVOID VirtualAddress, OUT PPFN_NUMBER PagesFreed); STATIC XTAPI XTSTATUS FreePagedPoolPages(IN PVOID VirtualAddress, OUT PPFN_NUMBER PagesFreed); + STATIC XTAPI BOOLEAN RegisterBigAllocationTag(IN PVOID VirtualAddress, + IN ULONG Key, + IN ULONG NumberOfPages, + IN MMPOOL_TYPE PoolType); + STATIC XTAPI ULONG UnregisterBigAllocationTag(IN PVOID VirtualAddress, + OUT PULONG_PTR NumberOfPages, + IN MMPOOL_TYPE PoolType); }; } diff --git a/xtoskrnl/includes/mm/pool.hh b/xtoskrnl/includes/mm/pool.hh index 988c73a..2f96cfa 100644 --- a/xtoskrnl/includes/mm/pool.hh +++ b/xtoskrnl/includes/mm/pool.hh @@ -18,20 +18,49 @@ namespace MM class Pool { protected: + STATIC POOL_DESCRIPTOR NonPagedPoolDescriptor; STATIC PFN_NUMBER NonPagedPoolFrameEnd; STATIC PFN_NUMBER NonPagedPoolFrameStart; STATIC LIST_ENTRY NonPagedPoolFreeList[MM_MAX_FREE_PAGE_LIST_HEADS]; + STATIC ULONG PoolSecureCookie; + STATIC PPOOL_DESCRIPTOR PoolVector[2]; public: + STATIC XTAPI MMPOOL_TYPE DeterminePoolType(IN PVOID VirtualAddress); STATIC XTAPI VOID InitializeNonPagedPool(VOID); STATIC XTAPI VOID InitializePagedPool(VOID); + STATIC XTAPI VOID InitializePoolSecurity(VOID); protected: + STATIC XTAPI PLIST_ENTRY DecodePoolLink(IN PLIST_ENTRY PoolLink); + STATIC XTAPI PLIST_ENTRY EncodePoolLink(IN PLIST_ENTRY PoolLink); + STATIC XTAPI PPOOL_HEADER GetPoolBlock(IN PPOOL_HEADER Header, IN SSIZE_T Index); + STATIC XTAPI PPOOL_HEADER GetPoolEntry(IN PVOID Payload); + STATIC XTAPI PLIST_ENTRY GetPoolFreeBlock(IN PPOOL_HEADER Header); + STATIC XTAPI PPOOL_HEADER GetPoolNextBlock(IN PPOOL_HEADER Header); + STATIC XTAPI PPOOL_HEADER GetPoolPreviousBlock(IN PPOOL_HEADER Header); + STATIC XTAPI VOID InsertPoolHeadList(IN PLIST_ENTRY ListHead, + IN PLIST_ENTRY Entry); + STATIC XTAPI VOID InsertPoolTailList(IN PLIST_ENTRY ListHead, + IN PLIST_ENTRY Entry); + STATIC XTAPI BOOLEAN PoolListEmpty(IN PLIST_ENTRY ListHead); + STATIC XTAPI VOID RemovePoolEntryList(IN PLIST_ENTRY Entry); + STATIC XTAPI PLIST_ENTRY RemovePoolHeadList(IN PLIST_ENTRY ListHead); + STATIC XTAPI PLIST_ENTRY RemovePoolTailList(IN PLIST_ENTRY ListHead); + STATIC XTAPI VOID VerifyPoolBlocks(IN PVOID Block); + STATIC XTAPI VOID VerifyPoolHeader(IN PPOOL_HEADER Entry); + STATIC XTAPI VOID VerifyPoolLinks(IN PLIST_ENTRY ListHead); STATIC XTAPI VOID VerifyRunLevel(IN MMPOOL_TYPE PoolType, IN SIZE_T Bytes, IN PVOID Entry); private: + STATIC XTAPI VOID InitializePoolDescriptor(IN PPOOL_DESCRIPTOR Descriptor, + IN MMPOOL_TYPE PoolType, + IN ULONG Index, + IN ULONG Threshold, + IN PVOID LockAddress); + STATIC XTAPI VOID InitializePoolListHead(IN PLIST_ENTRY ListHead); STATIC XTAPI VOID MapNonPagedPool(VOID); }; } diff --git a/xtoskrnl/mm/alloc.cc b/xtoskrnl/mm/alloc.cc index d6a59f9..a6fe08e 100644 --- a/xtoskrnl/mm/alloc.cc +++ b/xtoskrnl/mm/alloc.cc @@ -44,7 +44,7 @@ MM::Allocator::AllocateNonPagedPoolPages(IN PFN_COUNT Pages, /* Start a guarded code block */ { - /* Acquire the Non-Paged pool lock and raise runlevel to DISPATCH_LEVEL */ + /* Acquire the Non-Paged pool lock and raise runlevel to DISPATCH level */ KE::RaiseRunLevel RunLevel(DISPATCH_LEVEL); KE::QueuedSpinLockGuard NonPagedPoolSpinLock(NonPagedPoolLock); @@ -80,10 +80,10 @@ MM::Allocator::AllocateNonPagedPoolPages(IN PFN_COUNT Pages, RTL::LinkedList::InsertTailList(&NonPagedPoolFreeList[Index], &FreePage->List); } - /* Get the Page Table Entry (PTE) for the allocated address */ + /* Get the PTE for the allocated address */ PointerPte = MM::Paging::GetPteAddress(BaseAddress); - /* Get the Page Frame Number (PFN) database entry for the corresponding physical page */ + /* Get the PFN database entry for the corresponding physical page */ Pfn = MM::Pfn::GetPfnEntry(MM::Paging::GetPageFrameNumber(PointerPte)); /* Denote allocation boundaries */ @@ -122,7 +122,7 @@ MM::Allocator::AllocateNonPagedPoolPages(IN PFN_COUNT Pages, return STATUS_INSUFFICIENT_RESOURCES; } - /* Acquire the Non-Paged pool lock and raise runlevel to DISPATCH_LEVEL */ + /* Acquire the Non-Paged pool lock and raise runlevel to DISPATCH level */ KE::RaiseRunLevel RunLevel(DISPATCH_LEVEL); KE::QueuedSpinLockGuard NonPagedPoolSpinLock(NonPagedPoolLock); @@ -310,7 +310,11 @@ MM::Allocator::AllocatePool(IN MMPOOL_TYPE PoolType, OUT PVOID *Memory, IN ULONG Tag) { - UNIMPLEMENTED; + PPOOL_HEADER PoolEntry, NextPoolEntry, PoolRemainder; + PPOOL_DESCRIPTOR PoolDescriptor; + USHORT BlockSize, Index; + PLIST_ENTRY ListHead; + XTSTATUS Status; /* Verify run level for the specified pool */ VerifyRunLevel(PoolType, Bytes, NULLPTR); @@ -322,8 +326,364 @@ MM::Allocator::AllocatePool(IN MMPOOL_TYPE PoolType, Bytes = 1; } - /* Allocate pages */ - return AllocatePages(PoolType, Bytes, Memory); + /* Retrieve the specific pool descriptor based on the masked pool type */ + PoolDescriptor = PoolVector[PoolType & MM_POOL_TYPE_MASK]; + + /* Determine if the requested size exceeds the maximum standard pool block capacity */ + if(Bytes > (MM_PAGE_SIZE - (sizeof(POOL_HEADER) + MM_POOL_BLOCK_SIZE))) + { + /* Allocate new, raw pages directly to satisfy the large allocation request */ + Status = AllocatePages(PoolType, Bytes, (PVOID*)&PoolEntry); + if(Status != STATUS_SUCCESS || !PoolEntry) + { + /* Allocation failed, clear the output pointer and return the error status */ + *Memory = NULLPTR; + return Status; + } + + /* Update the pool descriptor statistical counters */ + RTL::Atomic::ExchangeAdd32((PLONG)&PoolDescriptor->TotalBigAllocations, (LONG)SIZE_TO_PAGES(Bytes)); + RTL::Atomic::ExchangeAdd64((PLONG_PTR)&PoolDescriptor->TotalBytes, (LONG_PTR)Bytes); + RTL::Atomic::Increment32((PLONG)&PoolDescriptor->RunningAllocations); + + /* Attempt to register the big allocation within the tracking table */ + if(!RegisterBigAllocationTag(PoolEntry, Tag, (ULONG)SIZE_TO_PAGES(Bytes), PoolType)) + { + /* Fallback to a default tag */ + Tag = SIGNATURE32('B', 'i', 'g', 'A'); + } + + /* Supply the allocated address and return success */ + *Memory = PoolEntry; + return STATUS_SUCCESS; + } + + /* Calculate the required block index */ + Index = (USHORT)((Bytes + sizeof(POOL_HEADER) + (MM_POOL_BLOCK_SIZE - 1)) / MM_POOL_BLOCK_SIZE); + + /* Resolve the appropriate list head for the calculated block index */ + ListHead = &PoolDescriptor->ListHeads[Index]; + while(ListHead != &PoolDescriptor->ListHeads[MM_POOL_LISTS_PER_PAGE]) + { + /* Check whether the target free list contains available blocks */ + if(!PoolListEmpty(ListHead)) + { + /* Start a guarded code block */ + { + /* Acquire the pool lock */ + PoolLockGuard PoolLock((MMPOOL_TYPE)(PoolDescriptor->PoolType & MM_POOL_TYPE_MASK)); + + /* Re-evaluate the list emptiness to prevent race conditions */ + if(PoolListEmpty(ListHead)) + { + /* Proceed to evaluate the next list head */ + continue; + } + + /* Validate the structural integrity of the pool list */ + VerifyPoolLinks(ListHead); + + /* Extract the first available free block from the list and resolve its header */ + PoolEntry = GetPoolEntry(RemovePoolHeadList(ListHead)); + + /* Re-validate the pool list and verify integrity of the extracted block */ + VerifyPoolLinks(ListHead); + VerifyPoolBlocks(PoolEntry); + + /* Check whether the extracted block requires splitting */ + if(PoolEntry->BlockSize != Index) + { + /* Check if the block is located at the absolute beginning of a page */ + if(PoolEntry->PreviousSize == 0) + { + /* Split the block and initialize the remainder */ + PoolRemainder = GetPoolBlock(PoolEntry, Index); + PoolRemainder->BlockSize = PoolEntry->BlockSize - Index; + PoolRemainder->PreviousSize = Index; + + /* Resolve the subsequent block and update its previous size field */ + NextPoolEntry = GetPoolNextBlock(PoolRemainder); + if(PAGE_ALIGN(NextPoolEntry) != NextPoolEntry) + { + /* Adjust the adjacent block to reflect the new size of the remainder */ + NextPoolEntry->PreviousSize = PoolRemainder->BlockSize; + } + } + else + { + /* Split the extracted block */ + PoolRemainder = PoolEntry; + PoolEntry->BlockSize -= Index; + + /* Advance the pointer to the new block and update its previous size */ + PoolEntry = GetPoolNextBlock(PoolEntry); + PoolEntry->PreviousSize = PoolRemainder->BlockSize; + + /* Resolve the adjacent next block and adjust its previous size */ + NextPoolEntry = GetPoolBlock(PoolEntry, Index); + if(PAGE_ALIGN(NextPoolEntry) != NextPoolEntry) + { + /* Adjust the adjacent block */ + NextPoolEntry->PreviousSize = Index; + } + } + + /* Finalize the structural sizing fields */ + BlockSize = PoolRemainder->BlockSize; + PoolEntry->BlockSize = Index; + PoolRemainder->PoolType = 0; + + /* Validate the target free list */ + VerifyPoolLinks(&PoolDescriptor->ListHeads[BlockSize - 1]); + + /* Ensure the remainder block is large enough to contain valid list */ + if(BlockSize != 1) + { + /* Insert the new remainder block into the appropriate free list and verify links */ + InsertPoolTailList(&PoolDescriptor->ListHeads[BlockSize - 1], GetPoolFreeBlock(PoolRemainder)); + VerifyPoolLinks(GetPoolFreeBlock(PoolRemainder)); + } + } + + /* Update the active pool type and verify structural invariants */ + PoolEntry->PoolType = PoolType + 1; + VerifyPoolBlocks(PoolEntry); + } + + /* Update the pool descriptor statistical counters */ + RTL::Atomic::ExchangeAdd64((PLONG_PTR)&PoolDescriptor->TotalBytes, (LONG_PTR)(PoolEntry->BlockSize * MM_POOL_BLOCK_SIZE)); + RTL::Atomic::Increment32((PLONG)&PoolDescriptor->RunningAllocations); + + /* Assign the specified identification tag */ + PoolEntry->PoolTag = Tag; + + /* Clear the internal list links */ + (GetPoolFreeBlock(PoolEntry))->Flink = NULLPTR; + (GetPoolFreeBlock(PoolEntry))->Blink = NULLPTR; + + /* Supply the allocated address and return success */ + *Memory = GetPoolFreeBlock(PoolEntry); + return STATUS_SUCCESS; + } + + /* Advance to the next list head */ + ListHead++; + } + + /* Allocate a new page to fulfill the request */ + Status = AllocatePages(PoolType, MM_PAGE_SIZE, (PVOID *)&PoolEntry); + if(Status != STATUS_SUCCESS || !PoolEntry) + { + /* Allocation failed, clear the output pointer and return the error status */ + *Memory = NULLPTR; + return Status; + } + + /* Initialize the structural header */ + PoolEntry->Long = 0; + PoolEntry->BlockSize = Index; + PoolEntry->PoolType = PoolType + 1; + + /* Calculate the block size of the remaining unused space */ + BlockSize = (MM_PAGE_SIZE / MM_POOL_BLOCK_SIZE) - Index; + + /* Initialize the remainder entry representing the free space */ + PoolRemainder = GetPoolBlock(PoolEntry, Index); + PoolRemainder->Long = 0; + PoolRemainder->BlockSize = BlockSize; + PoolRemainder->PreviousSize = Index; + + /* Update the pool descriptor statistical counters */ + RTL::Atomic::Increment32((PLONG)&PoolDescriptor->TotalPages); + RTL::Atomic::ExchangeAdd64((PLONG_PTR)&PoolDescriptor->TotalBytes, (LONG_PTR)(PoolEntry->BlockSize * MM_POOL_BLOCK_SIZE)); + + /* Check if the remainder block is large enough */ + if(PoolRemainder->BlockSize != 1) + { + /* Acquire the pool lock */ + PoolLockGuard PoolLock((MMPOOL_TYPE)(PoolDescriptor->PoolType & MM_POOL_TYPE_MASK)); + + /* Validate the target free list structure */ + VerifyPoolLinks(&PoolDescriptor->ListHeads[BlockSize - 1]); + + /* Insert the remainder block into the free list */ + InsertPoolTailList(&PoolDescriptor->ListHeads[BlockSize - 1], GetPoolFreeBlock(PoolRemainder)); + + /* Verify the structural integrity of the remainder and the allocated blocks */ + VerifyPoolLinks(GetPoolFreeBlock(PoolRemainder)); + VerifyPoolBlocks(PoolEntry); + } + else + { + /* Verify the allocated block invariants */ + VerifyPoolBlocks(PoolEntry); + } + + /* Increment the running allocation counter for the pool descriptor */ + RTL::Atomic::Increment32((PLONG)&PoolDescriptor->RunningAllocations); + + /* Perform a final structural validation of the pool block */ + VerifyPoolBlocks(PoolEntry); + + /* Apply the requested identification tag */ + PoolEntry->PoolTag = Tag; + + /* Supply the allocated address and return success */ + *Memory = GetPoolFreeBlock(PoolEntry); + return STATUS_SUCCESS; +} + +/** + * Computes a hash for a given virtual address to be used in the big allocation tracker. + * + * @param VirtualAddress + * Supplies the base virtual address to be hashed. + * + * @return This routine returns the computed partial hash value. + * + * @since XT 1.0 + */ +XTINLINE +ULONG +MM::Allocator::ComputeHash(IN PVOID VirtualAddress) +{ + ULONG Result; + + /* Transform the virtual address into a page frame number representation */ + Result = (ULONG)((ULONG_PTR)VirtualAddress >> MM_PAGE_SHIFT); + + /* Fold the page number bits using XOR to distribute the entropy across the lower bits */ + return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result; +} + +/** + * Expands the big allocation tracking table to accommodate additional large allocations. + * + * @return This routine returns TRUE if the table was successfully expanded, FALSE otherwise. + * + * @since XT 1.0 + */ +XTAPI +BOOLEAN +MM::Allocator::ExpandBigAllocationsTable(VOID) +{ + PPOOL_TRACKER_BIG_ALLOCATIONS NewTable, OldTable; + SIZE_T AllocationBytes, OldSize, NewSize; + ULONG Hash, HashMask, Index; + XTSTATUS Status; + BOOLEAN Abort; + + /* Initialize the abort flag and snapshot current table capacity */ + Abort = FALSE; + OldSize = BigAllocationsTableSize; + + /* Check if doubling the size would cause an integer overflow */ + if(OldSize > ((~(SIZE_T)0) / 2)) + { + /* Abort expansion to prevent integer wrap-around */ + return FALSE; + } + + /* Calculate the target capacity by safely doubling table capacity */ + NewSize = OldSize * 2; + + /* Ensure the new capacity does not result in fractional memory pages */ + NewSize = ROUND_DOWN(NewSize, MM_PAGE_SIZE / sizeof(POOL_TRACKER_BIG_ALLOCATIONS)); + + /* Check if calculating the total byte size would cause an integer overflow */ + if(NewSize > ((~(SIZE_T)0) / sizeof(POOL_TRACKER_BIG_ALLOCATIONS))) + { + /* Abort expansion to prevent allocating a truncated memory block */ + return FALSE; + } + + /* Compute the size required for the newly expanded tracking table */ + AllocationBytes = NewSize * sizeof(POOL_TRACKER_BIG_ALLOCATIONS); + + /* Allocate the required memory */ + Status = AllocatePages(NonPagedPool, AllocationBytes, (PVOID*)&NewTable); + if(Status != STATUS_SUCCESS || !NewTable) + { + /* Memory allocation failed, abort the table expansion */ + return FALSE; + } + + /* Zero the newly allocated table */ + RTL::Memory::ZeroMemory(NewTable, AllocationBytes); + + /* Iterate through the allocated memory block */ + for(Index = 0; Index < NewSize; Index++) + { + /* Mark the tracking entry as free and available */ + NewTable[Index].VirtualAddress = (PVOID)MM_POOL_BIG_ALLOCATIONS_ENTRY_FREE; + } + + /* Start a guarded code block */ + { + /* Acquire the table lock and raise runlevel to DISPATCH level */ + KE::RaiseRunLevel RunLevel(DISPATCH_LEVEL); + KE::SpinLockGuard BigAllocationsLock(&BigAllocationsTableLock); + + /* Verify if another thread has already expanded the table concurrently */ + if(BigAllocationsTableSize >= NewSize) + { + /* Another thread has already expanded the table, discard changes */ + Abort = TRUE; + } + else + { + /* Cache the legacy table pointer and calculate new hash mask */ + HashMask = NewSize - 1; + OldTable = BigAllocationsTable; + + /* Rehash and migrate all active entries from the old table */ + for(Index = 0; Index < OldSize; Index++) + { + /* Bypass unallocated entries in the legacy table */ + if((ULONG_PTR)OldTable[Index].VirtualAddress & MM_POOL_BIG_ALLOCATIONS_ENTRY_FREE) + { + /* Skip to the next entry */ + continue; + } + + /* Compute the updated hash index */ + Hash = ComputeHash(OldTable[Index].VirtualAddress) & HashMask; + + /* Resolve hash collisions using linear probing */ + while(!((ULONG_PTR)NewTable[Hash].VirtualAddress & MM_POOL_BIG_ALLOCATIONS_ENTRY_FREE)) + { + /* Advance the bucket index and check for table boundary overflow */ + if(++Hash == NewSize) + { + /* Wrap the probing index back to the beginning */ + Hash = 0; + } + } + + /* Migrate the active entry to its new hash bucket */ + NewTable[Hash] = OldTable[Index]; + } + + /* Activate the newly populated table globally */ + BigAllocationsTable = NewTable; + BigAllocationsTableHash = NewSize - 1; + BigAllocationsTableSize = NewSize; + } + } + + /* Check if another thread has already expanded the table concurrently */ + if(Abort) + { + /* Free memory allocated for the new table and return */ + FreePages(NewTable); + return TRUE; + } + + /* Free memory allocated for the legacy table */ + FreePages(OldTable); + + /* Return success */ + return TRUE; } /** @@ -383,7 +743,7 @@ MM::Allocator::FreeNonPagedPoolPages(IN PVOID VirtualAddress, /* Save the total free page count */ FreePages = Pages; - /* Acquire the Non-Paged pool lock and raise runlevel to DISPATCH_LEVEL */ + /* Acquire the Non-Paged pool lock and raise runlevel to DISPATCH level */ KE::RaiseRunLevel RunLevel(DISPATCH_LEVEL); KE::QueuedSpinLockGuard NonPagedPoolSpinLock(NonPagedPoolLock); @@ -633,8 +993,505 @@ XTSTATUS MM::Allocator::FreePool(IN PVOID VirtualAddress, IN ULONG Tag) { + PPOOL_HEADER PoolEntry, NextPoolEntry; + PFN_NUMBER PageCount, RealPageCount; + PPOOL_DESCRIPTOR PoolDescriptor; + MMPOOL_TYPE PoolType; + USHORT BlockSize; + BOOLEAN Combined; + XTSTATUS Status; + + /* Determine if the allocation is page-aligned */ + if(PAGE_ALIGN(VirtualAddress) == VirtualAddress) + { + /* Determine and the memory pool type from the VA mapping */ + PoolType = DeterminePoolType(VirtualAddress); + + /* Verify run level for the specified pool */ + VerifyRunLevel(PoolType, 0, VirtualAddress); + + /* Retrieve original metadata while removing the allocation from the tracking table */ + Tag = UnregisterBigAllocationTag(VirtualAddress, &PageCount, PoolType); + if(!Tag) + { + /* Fallback to a default tag */ + Tag = SIGNATURE32('B', 'i', 'g', 'A'); + PageCount = 1; + } + + /* Retrieve the specific pool descriptor based on the masked pool type */ + PoolDescriptor = PoolVector[PoolType]; + + /* Update the pool descriptor statistical counters */ + RTL::Atomic::Increment32((PLONG)&PoolDescriptor->RunningFrees); + RTL::Atomic::ExchangeAdd64((PLONG_PTR)&PoolDescriptor->TotalBytes, -(LONG_PTR)(PageCount << MM_PAGE_SHIFT)); + + /* Release the underlying physical pages */ + Status = FreePages(VirtualAddress, &RealPageCount); + if(Status == STATUS_SUCCESS) + { + /* Adjust the big allocation counter */ + RTL::Atomic::ExchangeAdd32((PLONG)&PoolDescriptor->TotalBigAllocations, -(LONG)RealPageCount); + } + + /* Return status code */ + return Status; + } + + /* Resolve the pool header */ + PoolEntry = (PPOOL_HEADER)VirtualAddress; + PoolEntry--; + + /* Extract the structural block size from the pool header */ + BlockSize = PoolEntry->BlockSize; + + /* Determine the underlying pool type and resolve its corresponding pool descriptor */ + PoolType = (MMPOOL_TYPE)((PoolEntry->PoolType - 1) & MM_POOL_TYPE_MASK); + PoolDescriptor = PoolVector[PoolType]; + + /* Verify run level for the specified pool */ + VerifyRunLevel(PoolType, 0, VirtualAddress); + + /* Extract the allocation identifying tag and initialize the consolidation flag */ + Tag = PoolEntry->PoolTag; + Combined = FALSE; + + /* Locate the adjacent forward pool block */ + NextPoolEntry = GetPoolBlock(PoolEntry, BlockSize); + + /* Update the pool descriptor statistical counters */ + RTL::Atomic::Increment32((PLONG)&PoolDescriptor->RunningFrees); + RTL::Atomic::ExchangeAdd64((PLONG_PTR)&PoolDescriptor->TotalBytes, (LONG_PTR)(-BlockSize * MM_POOL_BLOCK_SIZE)); + + /* Acquire the pool lock */ + PoolLockGuard PoolLock((MMPOOL_TYPE)(PoolDescriptor->PoolType & MM_POOL_TYPE_MASK)); + + /* Validate the structural integrity of the base block */ + VerifyPoolBlocks(PoolEntry); + + /* Ensure the adjacent forward block does not cross a page boundary */ + if(PAGE_ALIGN(NextPoolEntry) != NextPoolEntry) + { + /* Check if the adjacent forward block is currently marked as free */ + if(NextPoolEntry->PoolType == 0) + { + /* Flag the deallocation as having triggered a forward block merge */ + Combined = TRUE; + + /* Check if the forward block is large enough */ + if(NextPoolEntry->BlockSize != 1) + { + /* Validate the list links */ + VerifyPoolLinks(GetPoolFreeBlock(NextPoolEntry)); + + /* Unlink the forward block from its respective free list */ + RemovePoolEntryList(GetPoolFreeBlock(NextPoolEntry)); + + /* Re-validate the surrounding list links */ + VerifyPoolLinks(DecodePoolLink((GetPoolFreeBlock(NextPoolEntry))->Flink)); + VerifyPoolLinks(DecodePoolLink((GetPoolFreeBlock(NextPoolEntry))->Blink)); + } + + /* Expand the size of the current block to include the forward free block */ + PoolEntry->BlockSize += NextPoolEntry->BlockSize; + } + } + + /* Check if a valid adjacent backward block exists */ + if(PoolEntry->PreviousSize) + { + /* Resolve the adjacent backward block and check if it is free */ + NextPoolEntry = GetPoolPreviousBlock(PoolEntry); + if(NextPoolEntry->PoolType == 0) + { + /* Flag the deallocation as having triggered a backward block merge */ + Combined = TRUE; + + /* Check if the backward free block contains embedded list links */ + if(NextPoolEntry->BlockSize != 1) + { + /* Validate the backward block list links */ + VerifyPoolLinks(GetPoolFreeBlock(NextPoolEntry)); + + /* Extract the backward block from the free list */ + RemovePoolEntryList(GetPoolFreeBlock(NextPoolEntry)); + + /* Re-validate the adjacent free list */ + VerifyPoolLinks(DecodePoolLink((GetPoolFreeBlock(NextPoolEntry))->Flink)); + VerifyPoolLinks(DecodePoolLink((GetPoolFreeBlock(NextPoolEntry))->Blink)); + } + + /* Expand the backward block to include the freed base block */ + NextPoolEntry->BlockSize += PoolEntry->BlockSize; + + /* Shift the base entry pointer */ + PoolEntry = NextPoolEntry; + } + } + + /* Check whether the consolidated block spans an entire page */ + if((PAGE_ALIGN(PoolEntry) == PoolEntry) && + (PAGE_ALIGN(GetPoolNextBlock(PoolEntry)) == GetPoolNextBlock(PoolEntry))) + { + /* Release the pool lock */ + PoolLock.Release(); + + /* Decrement the total page count and return the entire page back */ + RTL::Atomic::ExchangeAdd32((PLONG)&PoolDescriptor->TotalPages, -1); + return FreePages(PoolEntry); + } + + /* Finalize the consolidated block size and mark the primary header as free */ + BlockSize = PoolEntry->BlockSize; + PoolEntry->PoolType = 0; + + /* Check if any coalescing occurred */ + if(Combined) + { + /* Resolve the new adjacent forward block and verify it resides on the same page */ + NextPoolEntry = GetPoolNextBlock(PoolEntry); + if(PAGE_ALIGN(NextPoolEntry) != NextPoolEntry) + { + /* Adjust the backward reference of the forward block */ + NextPoolEntry->PreviousSize = BlockSize; + } + } + + /* Insert the freed and consolidated block into the pool free list */ + InsertPoolHeadList(&PoolDescriptor->ListHeads[BlockSize - 1], GetPoolFreeBlock(PoolEntry)); + + /* Perform a final linkvalidation and return success */ + VerifyPoolLinks(GetPoolFreeBlock(PoolEntry)); + return STATUS_SUCCESS; +} + +/** + * Initializes the big allocations tracking table during early system boot. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Allocator::InitializeBigAllocationsTable(VOID) +{ + SIZE_T TableSize; + ULONG Index; + XTSTATUS Status; + PMMMEMORY_LAYOUT MemoryLayout; + + /* Not fully implemented yet, HIVE support needed */ UNIMPLEMENTED; - /* Free pages */ - return FreePages(VirtualAddress); + /* Retrieve memory layout */ + MemoryLayout = MM::Manager::GetMemoryLayout(); + + /* TODO: Retrieve initial big allocation table size from the HIVE */ + BigAllocationsTableSize = 0; + + /* Calculate the target table size */ + TableSize = MIN(BigAllocationsTableSize, (MemoryLayout->NonPagedPoolSize * MM_PAGE_SIZE) >> 12); + + /* Perform a bit-scan to determine the highest set bit */ + for(Index = 0; Index < 32; Index++) + { + /* Check if the lowest bit is currently set */ + if(TableSize & 1) + { + /* Verify if this is the only remaining set bit */ + if(!(TableSize & ~1)) + { + /* Exit the loop as the highest bit has been found */ + break; + } + } + + /* Shift the size down by one bit to evaluate higher bits */ + TableSize >>= 1; + } + + /* Check if the bit-scan completed without finding any set bits */ + if(Index == 32) + { + /* Apply the default size of 4096 entries */ + BigAllocationsTableSize = 4096; + } + else + { + /* Calculate the aligned power of two size, enforcing a minimum of 64 entries */ + BigAllocationsTableSize = MAX(1 << Index, 64); + } + + /* Iteratively attempt to allocate the tracking table */ + while(TRUE) + { + /* Prevent integer overflow when calculating the required byte size for the table */ + if((BigAllocationsTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_ALLOCATIONS))) + { + /* Halve the requested entry count and restart the evaluation */ + BigAllocationsTableSize >>= 1; + continue; + } + + /* Attempt to allocate physical memory for the table */ + Status = AllocatePages(NonPagedPool, + BigAllocationsTableSize * sizeof(POOL_TRACKER_BIG_ALLOCATIONS), + (PVOID*)&BigAllocationsTable); + + /* Check if the allocation succeeded */ + if(Status != STATUS_SUCCESS || !BigAllocationsTable) + { + /* Check if the allocation failed duefor a single entry */ + if(BigAllocationsTableSize == 1) + { + /* Failed to initialize the pool tracker, kernel panic */ + KE::Crash::Panic(0x41, TableSize, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + } + + /* Halve the requested entry count */ + BigAllocationsTableSize >>= 1; + } + else + { + /* Allocation succeeded */ + break; + } + } + + /* Zero the entire memory used by the table */ + RtlZeroMemory(BigAllocationsTable, BigAllocationsTableSize * sizeof(POOL_TRACKER_BIG_ALLOCATIONS)); + + /* Iterate through the newly allocated table */ + for(Index = 0; Index < BigAllocationsTableSize; Index++) + { + /* Mark the individual pool tracker entry as free and available */ + BigAllocationsTable[Index].VirtualAddress = (PVOID)MM_POOL_BIG_ALLOCATIONS_ENTRY_FREE; + } + + /* Calculate and store the hash mask */ + BigAllocationsTableHash = BigAllocationsTableSize - 1; + + /* Initialize the spinlock used to synchronize concurrent modifications to the tracking table */ + KE::SpinLock::InitializeSpinLock(&BigAllocationsTableLock); +} + +/** + * Registers a big allocation within the tracking table. + * + * @param VirtualAddress + * Supplies the virtual address of the big allocation. + * + * @param Key + * Supplies the key used to identify the allocation. + * + * @param NumberOfPages + * Supplies the number of physical pages backing the allocation. + * + * @param PoolType + * Specifies the type of pool from which the memory was allocated. + * + * @return This routine returns TRUE on successful insertion, FALSE otherwise. + * + * @since XT 1.0 + */ +BOOLEAN +XTAPI +MM::Allocator::RegisterBigAllocationTag(IN PVOID VirtualAddress, + IN ULONG Key, + IN ULONG NumberOfPages, + IN MMPOOL_TYPE PoolType) +{ + PPOOL_TRACKER_BIG_ALLOCATIONS Entry; + BOOLEAN Inserted, RequiresExpansion; + ULONG Hash, StartHash; + + /* Wrap the insertion logic in a retry loop */ + while(TRUE) + { + /* Initialize local variables */ + Inserted = FALSE; + RequiresExpansion = FALSE; + + /* Calculate the initial hash bucket index */ + Hash = ComputeHash(VirtualAddress); + + /* Start a guarded code block */ + { + /* Acquire the table lock and raise runlevel to DISPATCH level */ + KE::RaiseRunLevel RunLevel(DISPATCH_LEVEL); + KE::SpinLockGuard BigAllocationsLock(&BigAllocationsTableLock); + + /* Retrieve the tracker entry */ + Hash &= BigAllocationsTableHash; + StartHash = Hash; + + /* Traverse the hash table */ + do + { + /* Retrieve the tracker entry */ + Entry = &BigAllocationsTable[Hash]; + + /* Check if the current bucket is marked as free */ + if((ULONG_PTR)Entry->VirtualAddress & MM_POOL_BIG_ALLOCATIONS_ENTRY_FREE) + { + /* Populate the available bucket with the allocation metadata */ + Entry->Key = Key; + Entry->NumberOfPages = NumberOfPages; + Entry->VirtualAddress = VirtualAddress; + + /* Increment the global usage counter */ + BigAllocationsInUse++; + + /* Determine if the table capacity has reached the critical 75% threshold */ + if(BigAllocationsInUse > (BigAllocationsTableSize * 3 / 4)) + { + /* Flag the table for expansion */ + RequiresExpansion = TRUE; + } + + /* Mark insertion as successful and break out of the probing loop */ + Inserted = TRUE; + break; + } + + /* Advance to the next bucket */ + if(++Hash >= BigAllocationsTableSize) + { + /* Wrap the index back to the beginning of the table */ + Hash = 0; + } + + /* If the traversal has wrapped entirely back to the starting index, the table is saturated */ + if(Hash == StartHash) + { + /* Break out of the probing loop */ + break; + } + } + while(Hash != StartHash); + } + + /* Check if the insertion succeeded */ + if(Inserted) + { + /* Check if a table expansion is required */ + if(RequiresExpansion) + { + /* Trigger a table expansion asynchronously */ + ExpandBigAllocationsTable(); + } + + /* Return success */ + return TRUE; + } + + /* The table is completely saturated, attempt to expand the table */ + if(ExpandBigAllocationsTable()) + { + /* The table was successfully expanded, retry the insertion */ + continue; + } + + /* Table expansion failed, break out of the retry loop */ + break; + } + + /* Return failure */ + return FALSE; +} + +/** + * Unregisters a big allocation from the tracking table and retrieves its metadata. + * + * @param VirtualAddress + * Supplies the virtual address of the big allocation to be removed. + * + * @param NumberOfPages + * Supplies the number of physical pages backing the allocation. + * + * @param PoolType + * Specifies the pool type of the allocation. + * + * @return This routine returns the allocation pool tag if found, or a default signature otherwise. + * + * @since XT 1.0 + */ +XTAPI +ULONG +MM::Allocator::UnregisterBigAllocationTag(IN PVOID VirtualAddress, + OUT PULONG_PTR NumberOfPages, + IN MMPOOL_TYPE PoolType) +{ + ULONG Hash, StartHash; + ULONG PoolTag; + BOOLEAN Found; + PPOOL_TRACKER_BIG_ALLOCATIONS Entry; + + /* Initialize default state */ + Found = FALSE; + + /* Calculate the initial hash bucket index */ + Hash = ComputeHash(VirtualAddress); + + /* Start a guarded code block */ + { + /* Acquire the table lock and raise runlevel to DISPATCH level */ + KE::RaiseRunLevel RunLevel(DISPATCH_LEVEL); + KE::SpinLockGuard BigAllocationsLock(&BigAllocationsTableLock); + + /* Mask the computed hash and record the starting bucket */ + Hash &= BigAllocationsTableHash; + StartHash = Hash; + + /* Traverse the hash table using linear probing to pinpoint the exact allocation address */ + while(TRUE) + { + /* Retrieve the tracker entry */ + Entry = &BigAllocationsTable[Hash]; + + /* Check if the bucket contains the target virtual address */ + if(Entry->VirtualAddress == VirtualAddress) + { + /* Capture the allocation metadata */ + *NumberOfPages = Entry->NumberOfPages; + PoolTag = Entry->Key; + + /* Invalidate the entry */ + Entry->VirtualAddress = (PVOID)MM_POOL_BIG_ALLOCATIONS_ENTRY_FREE; + + /* Decrement the global usage counter */ + BigAllocationsInUse--; + + /* Update the found flag and break out of the probing loop */ + Found = TRUE; + break; + } + + /* Advance to the next bucket */ + if(++Hash >= BigAllocationsTableSize) + { + /* Wrap the hash index back to zero */ + Hash = 0; + } + + /* Check if the traversal has wrapped entirely back to the starting index */ + if(Hash == StartHash) + { + /* Abort the search */ + break; + } + } + } + + /* Evaluate the result of the table traversal */ + if(Found) + { + /* Return the original tag captured from the tracker */ + return PoolTag; + } + + /* Return an empty page count and a fallback tag */ + *NumberOfPages = 0; + return SIGNATURE32('B', 'i', 'g', 'A'); } diff --git a/xtoskrnl/mm/data.cc b/xtoskrnl/mm/data.cc index fea9782..d686660 100644 --- a/xtoskrnl/mm/data.cc +++ b/xtoskrnl/mm/data.cc @@ -9,6 +9,21 @@ #include +/* Active number of big allocations to trigger table expansion */ +ULONG MM::Allocator::BigAllocationsInUse; + +/* Pointer to the hash table for tracking page-aligned memory */ +PPOOL_TRACKER_BIG_ALLOCATIONS MM::Allocator::BigAllocationsTable; + +/* Bitmask used for fast modulo arithmetic during hash bucket lookups */ +SIZE_T MM::Allocator::BigAllocationsTableHash; + +/* Spinlock protecting the big allocations table */ +KSPIN_LOCK MM::Allocator::BigAllocationsTableLock; + +/* Maximum capacity of the tracking hash table */ +SIZE_T MM::Allocator::BigAllocationsTableSize; + /* Array of free page lists segregated by cache color */ PMMCOLOR_TABLES MM::Colors::FreePages[FreePageList + 1]; @@ -97,6 +112,9 @@ MMPFNLIST MM::Pfn::StandbyPagesList = {0, StandbyPageList, MAXULONG_PTR, MAXULON /* List containing free physical pages that have been zeroed out */ MMPFNLIST MM::Pfn::ZeroedPagesList = {0, ZeroedPageList, MAXULONG_PTR, MAXULONG_PTR}; +/* Non-paged pool descriptor */ +POOL_DESCRIPTOR MM::Pool::NonPagedPoolDescriptor; + /* PFN marking the initial non-paged pool end boundary */ PFN_NUMBER MM::Pool::NonPagedPoolFrameEnd; @@ -106,6 +124,12 @@ PFN_NUMBER MM::Pool::NonPagedPoolFrameStart; /* Array of non-paged pool free list heads */ LIST_ENTRY MM::Pool::NonPagedPoolFreeList[MM_MAX_FREE_PAGE_LIST_HEADS]; +/* Random cookie used to obfuscate pool links */ +ULONG MM::Pool::PoolSecureCookie; + +/* Array of pool descriptors */ +PPOOL_DESCRIPTOR MM::Pool::PoolVector[2]; + /* Array of lists for available System PTEs, separated by pool type */ MMPTE MM::Pte::FirstSystemFreePte[MaximumPtePoolTypes]; diff --git a/xtoskrnl/mm/mmgr.cc b/xtoskrnl/mm/mmgr.cc index c3c54b1..eddccf4 100644 --- a/xtoskrnl/mm/mmgr.cc +++ b/xtoskrnl/mm/mmgr.cc @@ -259,12 +259,18 @@ MM::Manager::InitializeMemoryManager(VOID) /* Initialize system PTE space */ MM::Pte::InitializeSystemPteSpace(); + /* Initialize memory pool security */ + MM::Pool::InitializePoolSecurity(); + /* Initialize non-paged pool */ MM::Pool::InitializeNonPagedPool(); /* Initialize PFN database */ MM::Pfn::InitializePfnDatabase(); + /* Initialize big allocations table */ + MM::Allocator::InitializeBigAllocationsTable(); + /* Initialize PFN bitmap */ MM::Pfn::InitializePfnBitmap(); diff --git a/xtoskrnl/mm/pool.cc b/xtoskrnl/mm/pool.cc index 48f0abc..d231dcf 100644 --- a/xtoskrnl/mm/pool.cc +++ b/xtoskrnl/mm/pool.cc @@ -9,6 +9,181 @@ #include +/** + * Decodes an obfuscated doubly-linked pool list pointer. + * + * @param Link + * Supplies the encoded list entry pointer to be decoded. + * + * @return This routine returns the valid, properly aligned list entry pointer. + * + * @since XT 1.0 + */ +XTAPI +PLIST_ENTRY +MM::Pool::DecodePoolLink(IN PLIST_ENTRY PoolLink) +{ + /* XOR the obfuscated pointer with the global pool cookie to reveal the true address */ + return (PLIST_ENTRY)((ULONG_PTR)PoolLink ^ PoolSecureCookie); +} + +/** + * Determines the pool type for a given memory address. + * + * @param VirtualAddress + * Supplies a virtual address to determine the pool type for. + * + * @return This routine returns the determined pool type for the specified address. + * + * @since XT 1.0 + */ +XTAPI +MMPOOL_TYPE +MM::Pool::DeterminePoolType(IN PVOID VirtualAddress) +{ + PMMMEMORY_LAYOUT MemoryLayout; + + /* Retrieve the memory layout */ + MemoryLayout = MM::Manager::GetMemoryLayout(); + + /* Evaluate the virtual address against known pool boundaries */ + if((VirtualAddress >= MemoryLayout->NonPagedPoolStart) && + (VirtualAddress <= MemoryLayout->NonPagedPoolEnd)) + { + /* Address belongs to the non-paged pool */ + return NonPagedPool; + } + else if((VirtualAddress >= MemoryLayout->NonPagedExpansionPoolStart) && + (VirtualAddress <= MemoryLayout->NonPagedExpansionPoolEnd)) + { + /* Address belongs to the non-paged expansion pool */ + return NonPagedPool; + } + else if((VirtualAddress >= MemoryLayout->PagedPoolStart) && + (VirtualAddress <= MemoryLayout->PagedPoolEnd)) + { + /* Address belongs to the paged pool */ + return PagedPool; + } + + /* Address does not belong to any known pool, kernel panic */ + KE::Crash::Panic(0xC2, 0x42, (ULONG_PTR)VirtualAddress, 0, 0); + + /* Return an invalid pool type to satisfy the compiler */ + return (MMPOOL_TYPE)-1; +} + +/** + * Encodes a doubly-linked pool list pointer to mitigate pool corruption. + * + * @param Link + * Supplies the raw list entry pointer to be encoded. + * + * @return This routine returns the obfuscated list entry pointer. + * + * @since XT 1.0 + */ +XTAPI +PLIST_ENTRY +MM::Pool::EncodePoolLink(IN PLIST_ENTRY PoolLink) +{ + /* XOR the raw pointer with the global pool cookie to securely obfuscate it */ + return (PLIST_ENTRY)((ULONG_PTR)PoolLink ^ PoolSecureCookie); +} + +/** + * Calculates the address of a pool block at a specific relative index. + * + * @param Header + * Supplies a pointer to the base pool header. + * + * @param Index + * Supplies the block index offset. This value can be negative to traverse backwards. + * + * @return This routine returns a pointer to the calculated pool header. + * + * @since XT 1.0 + */ +XTAPI +PPOOL_HEADER +MM::Pool::GetPoolBlock(IN PPOOL_HEADER Header, IN SSIZE_T Index) +{ + /* The destination block is located by advancing the base address by the specified index */ + return (PPOOL_HEADER)((ULONG_PTR)Header + (Index * MM_POOL_BLOCK_SIZE)); +} + +/** + * Retrieves the pool header associated with a given pool memory address. + * + * @param Memory + * Supplies a pointer to the allocated memory region of a pool block. + * + * @return This routine returns a pointer to the originating pool header. + * + * @since XT 1.0 + */ +XTAPI +PPOOL_HEADER +MM::Pool::GetPoolEntry(IN PVOID Memory) +{ + /* The structural header logically precedes the allocated memory region */ + return (PPOOL_HEADER)((ULONG_PTR)Memory - sizeof(POOL_HEADER)); +} + +/** + * Resolves the list entry structure embedded within a free pool block. + * + * @param Header + * Supplies a pointer to the pool header. + * + * @return This routine returns a pointer to the list entry directly following the header. + * + * @since XT 1.0 + */ +XTAPI +PLIST_ENTRY +MM::Pool::GetPoolFreeBlock(IN PPOOL_HEADER Header) +{ + /* Return the list entry pointer */ + return (PLIST_ENTRY)((ULONG_PTR)Header + sizeof(POOL_HEADER)); +} + +/** + * Retrieves a pointer to the adjacent contiguous pool block following the specified header. + * + * @param Header + * Supplies a pointer to the current pool header. + * + * @return This routine returns a pointer to the next pool header in memory. + * + * @since XT 1.0 + */ +XTAPI +PPOOL_HEADER +MM::Pool::GetPoolNextBlock(IN PPOOL_HEADER Header) +{ + /* The adjacent forward header is located exactly 'BlockSize' units ahead of the current block */ + return (PPOOL_HEADER)((ULONG_PTR)Header + (Header->BlockSize * MM_POOL_BLOCK_SIZE)); +} + +/** + * Retrieves a pointer to the adjacent contiguous pool block preceding the specified header. + * + * @param Header + * Supplies a pointer to the current pool header. + * + * @return This routine returns a pointer to the previous pool header in memory. + * + * @since XT 1.0 + */ +XTAPI +PPOOL_HEADER +MM::Pool::GetPoolPreviousBlock(IN PPOOL_HEADER Header) +{ + /* The adjacent backward header is located exactly 'PreviousSize' units behind the current block */ + return (PPOOL_HEADER)((ULONG_PTR)Header - (Header->PreviousSize * MM_POOL_BLOCK_SIZE)); +} + /** * Initializes the non-paged pool for memory allocator. * @@ -69,6 +244,10 @@ MM::Pool::InitializeNonPagedPool(VOID) Pte::InitializeSystemPtePool(Paging::GetNextPte(Paging::GetPteAddress(MemoryLayout->NonPagedExpansionPoolStart)), MemoryLayout->NonPagedExpansionPoolSize - 2, NonPagedPoolExpansion); + + /* Store non-paged pool descriptor in the pool vector and initialize it */ + PoolVector[NonPagedPool] = &NonPagedPoolDescriptor; + InitializePoolDescriptor(PoolVector[NonPagedPool], NonPagedPool, 0, 0, NULLPTR); } /** @@ -85,6 +264,422 @@ MM::Pool::InitializePagedPool(VOID) UNIMPLEMENTED; } +/** + * Initializes a pool descriptor used by the memory manager. + * + * @param Descriptor + * Supplies a pointer to the pool descriptor structure to be initialized. + * + * @param PoolType + * Specifies the type of memory pool that will be managed by the descriptor. + * + * @param Index + * Supplies the zero-based index of the descriptor within the pool vector. + * + * @param Threshold + * Specifies the allocation threshold that dictates when the pool should expand. + * + * @param LockAddress + * Supplies a pointer to the synchronization primitive that will serialize access to this descriptor. * + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Pool::InitializePoolDescriptor(IN PPOOL_DESCRIPTOR Descriptor, + IN MMPOOL_TYPE PoolType, + IN ULONG Index, + IN ULONG Threshold, + IN PVOID LockAddress) +{ + PLIST_ENTRY LastEntry, ListEntry; + + /* Populate the core attributes of the descriptor */ + Descriptor->LockAddress = LockAddress; + Descriptor->PoolIndex = Index; + Descriptor->PoolType = PoolType; + Descriptor->Threshold = Threshold; + + /* Clear the deferred free list */ + Descriptor->PendingFrees = NULLPTR; + Descriptor->PendingFreeDepth = 0; + + /* Zero out the runtime accounting and statistical tracking counters */ + Descriptor->RunningAllocations = 0; + Descriptor->RunningFrees = 0; + Descriptor->TotalBigAllocations = 0; + Descriptor->TotalBytes = 0; + Descriptor->TotalPages = 0; + + /* Establish the iteration boundaries */ + ListEntry = Descriptor->ListHeads; + LastEntry = ListEntry + MM_POOL_LISTS_PER_PAGE; + + /* Traverse and initialize all block list heads */ + while(ListEntry < LastEntry) + { + /* Initialize the empty list head */ + InitializePoolListHead(ListEntry); + ListEntry++; + } +} + +/** + * Initializes a doubly-linked pool list head with encoded pointers. + * + * @param ListHead + * Supplies a pointer to the pool list head that is to be initialized. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Pool::InitializePoolListHead(IN PLIST_ENTRY ListHead) +{ + PLIST_ENTRY ListEntry; + + /* Obfuscate the list head address and establish the empty circular linkage */ + ListEntry = EncodePoolLink(ListHead); + ListHead->Flink = ListEntry; + ListHead->Blink = ListEntry; +} + +/** + * Initializes the memory pool security mechanisms. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Pool::InitializePoolSecurity(VOID) +{ + UNIMPLEMENTED; + + /* Initialize the global pool cookie using a hard-coded value */ + PoolSecureCookie = 0xDEADC0DE; +} + +/** + * Inserts a pool entry at the head of a doubly-linked pool list. + * + * @param ListHead + * Supplies a pointer to the head of the pool list. + * + * @param Entry + * Supplies a pointer to the pool list entry to be inserted. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Pool::InsertPoolHeadList(IN PLIST_ENTRY ListHead, + IN PLIST_ENTRY Entry) +{ + PLIST_ENTRY Flink; + + /* Validate the pool list structure */ + VerifyPoolLinks(ListHead); + + /* Resolve the current forward link of the list head */ + Flink = DecodePoolLink(ListHead->Flink); + + /* Securely insert the new entry at the beginning of the pool list */ + Entry->Blink = EncodePoolLink(ListHead); + Entry->Flink = EncodePoolLink(Flink); + Flink->Blink = EncodePoolLink(Entry); + ListHead->Flink = EncodePoolLink(Entry); + + /* Re-validate the pool list structure */ + VerifyPoolLinks(ListHead); +} + +/** + * Inserts a pool entry at the tail of a doubly-linked pool list. + * + * @param ListHead + * Supplies a pointer to the head of the pool list. + * + * @param Entry + * Supplies a pointer to the pool list entry to be inserted. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Pool::InsertPoolTailList(IN PLIST_ENTRY ListHead, + IN PLIST_ENTRY Entry) +{ + PLIST_ENTRY Blink; + + /* Validate the pool list structure */ + VerifyPoolLinks(ListHead); + + /* Securely link the new entry at the end of the pool list */ + Blink = DecodePoolLink(ListHead->Blink); + Blink->Flink = EncodePoolLink(Entry); + Entry->Blink = EncodePoolLink(Blink); + Entry->Flink = EncodePoolLink(ListHead); + ListHead->Blink = EncodePoolLink(Entry); + + /* Re-validate the pool list structure */ + VerifyPoolLinks(ListHead); +} + +/** + * Determines whether a given doubly-linked pool list is empty. + * + * @param ListHead + * Supplies a pointer to the head of the pool list to be evaluated. + * + * @return This routine returns TRUE if the pool list is empty, or FALSE otherwise. + * + * @since XT 1.0 + */ +XTAPI +BOOLEAN +MM::Pool::PoolListEmpty(IN PLIST_ENTRY ListHead) +{ + /* Evaluate whether the pool list contains no valid entries */ + return (DecodePoolLink(ListHead->Flink) == ListHead); +} + +/** + * Removes a specific pool entry from a doubly-linked pool list. + * + * @param Entry + * Supplies a pointer to the pool list entry to be removed. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Pool::RemovePoolEntryList(IN PLIST_ENTRY Entry) +{ + PLIST_ENTRY Blink, Flink; + + /* Resolve the adjacent forward and backward links */ + Blink = DecodePoolLink(Entry->Blink); + Flink = DecodePoolLink(Entry->Flink); + + /* Securely link the adjacent nodes together */ + Blink->Flink = EncodePoolLink(Flink); + Flink->Blink = EncodePoolLink(Blink); +} + +/** + * Removes the first entry from a doubly-linked pool list. + * + * @param ListHead + * Supplies a pointer to the head of the pool list. + * + * @return This routine returns a pointer to the removed pool list entry. + * + * @since XT 1.0 + */ +XTAPI +PLIST_ENTRY +MM::Pool::RemovePoolHeadList(IN PLIST_ENTRY ListHead) +{ + PLIST_ENTRY Entry, Flink; + + /* Securely unlink the first entry from the pool list */ + Entry = DecodePoolLink(ListHead->Flink); + Flink = DecodePoolLink(Entry->Flink); + Flink->Blink = EncodePoolLink(ListHead); + ListHead->Flink = EncodePoolLink(Flink); + + /* Return the removed pool list entry */ + return Entry; +} + +/** + * Removes the last entry from a doubly-linked pool list. + * + * @param ListHead + * Supplies a pointer to the head of the pool list. + * + * @return This routine returns a pointer to the removed pool list entry. + * + * @since XT 1.0 + */ +PLIST_ENTRY +XTAPI +MM::Pool::RemovePoolTailList(IN PLIST_ENTRY ListHead) +{ + PLIST_ENTRY Blink, Entry; + + /* Securely unlink the last entry from the pool list */ + Entry = DecodePoolLink(ListHead->Blink); + Blink = DecodePoolLink(Entry->Blink); + Blink->Flink = EncodePoolLink(ListHead); + ListHead->Blink = EncodePoolLink(Blink); + + /* Return the removed pool list entry */ + return Entry; +} + +/** + * Verifies the structural integrity of all pool blocks residing on a specific page. + * + * @param Block + * Supplies a pointer to the specific pool block. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Pool::VerifyPoolBlocks(IN PVOID Block) +{ + PPOOL_HEADER Entry; + BOOLEAN FoundBlock; + SIZE_T Size; + + /* Initialize tracking variables */ + FoundBlock = FALSE; + Size = 0; + + /* Resolve the first pool header */ + Entry = (PPOOL_HEADER)PAGE_ALIGN(Block); + + /* Iterate through all contiguous pool allocations */ + do + { + /* Validate the current pool header */ + VerifyPoolHeader(Entry); + + /* Check if the current header corresponds to the target block */ + if(Entry == Block) + { + /* Mark the block as found */ + FoundBlock = TRUE; + } + + /* Accumulate the total block size and advance to the next entry */ + Size += Entry->BlockSize; + Entry = GetPoolNextBlock(Entry); + } + while((Size < (MM_PAGE_SIZE / MM_POOL_BLOCK_SIZE)) && (PAGE_ALIGN(Entry) != Entry)); + + /* Ensure the block was found and the total size is aligned with the page */ + if(!FoundBlock || (PAGE_ALIGN(Entry) != Entry) || (Size != (MM_PAGE_SIZE / MM_POOL_BLOCK_SIZE))) + { + /* Pool blocks corruption detected, kernel panic */ + KE::Crash::Panic(0x19, 10, (ULONG_PTR)Block, (ULONG_PTR)Entry, FoundBlock); + } +} + +/** + * Verifies the structural and spatial invariants of a specific pool header. + * + * @param Entry + * Supplies a pointer to the pool header to be verified. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Pool::VerifyPoolHeader(IN PPOOL_HEADER Entry) +{ + PPOOL_HEADER PreviousEntry, NextEntry; + + /* Verify that the current block header is valid */ + if(!Entry->BlockSize) + { + /* Invalid block header size, kernel panic */ + KE::Crash::Panic(0x19, 8, (ULONG_PTR)Entry->PreviousSize, Entry->BlockSize, (ULONG_PTR)Entry); + } + + /* Verify that the previous block header is valid */ + if(Entry->PreviousSize) + { + /* Resolve the previous block header */ + PreviousEntry = GetPoolPreviousBlock(Entry); + + /* Check if both adjacent blocks are within the same memory page */ + if(PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry)) + { + /* Adjacent blocks are not on the same page, kernel panic */ + KE::Crash::Panic(0x19, 6, (ULONG_PTR)PreviousEntry, (ULONG_PTR)PAGE_ALIGN(Entry), (ULONG_PTR)Entry); + } + + /* Check the actual size of the previous block */ + if(PreviousEntry->BlockSize != Entry->PreviousSize) + { + /* Block size mismatch, kernel panic */ + KE::Crash::Panic(0x19, 5, (ULONG_PTR)PreviousEntry, (ULONG_PTR)Entry->PreviousSize, (ULONG_PTR)Entry); + } + } + else if(PAGE_ALIGN(Entry) != Entry) + { + /* Not aligned to a page boundary, kernel panic */ + KE::Crash::Panic(0x19, 7, 0, (ULONG_PTR)PAGE_ALIGN(Entry), (ULONG_PTR)Entry); + } + + /* Resolve the next block header */ + NextEntry = GetPoolNextBlock(Entry); + + /* Verify the next block header */ + if(PAGE_ALIGN(NextEntry) != NextEntry) + { + /* Check if both adjacent blocks are within the same memory page */ + if(PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry)) + { + /* Adjacent blocks are not on the same page, kernel panic */ + KE::Crash::Panic(0x19, 9, (ULONG_PTR)NextEntry, (ULONG_PTR)PAGE_ALIGN(Entry), (ULONG_PTR)Entry); + } + + /* Check the previous block size */ + if(NextEntry->PreviousSize != Entry->BlockSize) + { + /* Block size mismatch, kernel panic */ + KE::Crash::Panic(0x19, 5, (ULONG_PTR)NextEntry, NextEntry->PreviousSize, (ULONG_PTR)Entry); + } + } +} + +/** + * Validates the structural integrity of a doubly-linked pool list. + * + * @param ListHead + * Supplies a pointer to the pool list head that is to be validated. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Pool::VerifyPoolLinks(IN PLIST_ENTRY ListHead) +{ + /* Validate the doubly-linked list invariants */ + if((DecodePoolLink(DecodePoolLink(ListHead->Blink)->Flink) != ListHead) || + (DecodePoolLink(DecodePoolLink(ListHead->Flink)->Blink) != ListHead)) + { + /* Pool corruption detected, raise kernel panic */ + KE::Crash::Panic(0x19, + 3, + (ULONG_PTR)ListHead, + (ULONG_PTR)DecodePoolLink(DecodePoolLink(ListHead->Blink)->Flink), + (ULONG_PTR)DecodePoolLink(DecodePoolLink(ListHead->Flink)->Blink)); + } +} + /** * Validates the run level for the specified pool. If the run level is invalid, the kernel panics. * @@ -132,7 +727,7 @@ MM::Pool::VerifyRunLevel(IN MMPOOL_TYPE PoolType, } } - /* Invalid run level for specified pool, kernel panic */ + /* Invalid run level for specified pool, raise kernel panic */ KE::Crash::Panic(0xC2, (Entry ? MM_POOL_INVALID_FREE_RUNLEVEL : MM_POOL_INVALID_ALLOC_RUNLEVEL), RunLevel,