From 944d5b5c0ae1a9f41127971483ff373356613872 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Mon, 23 Mar 2026 18:54:18 +0100 Subject: [PATCH] Implement pool allocations and frees tracking --- sdk/xtdk/amd64/mmtypes.h | 3 + sdk/xtdk/i686/mmtypes.h | 3 + sdk/xtdk/mmtypes.h | 12 + sdk/xtdk/xtstruct.h | 1 + xtoskrnl/includes/mm/alloc.hh | 30 ++- xtoskrnl/mm/alloc.cc | 430 ++++++++++++++++++++++++++++++---- xtoskrnl/mm/data.cc | 23 +- xtoskrnl/mm/mmgr.cc | 5 +- 8 files changed, 448 insertions(+), 59 deletions(-) diff --git a/sdk/xtdk/amd64/mmtypes.h b/sdk/xtdk/amd64/mmtypes.h index 0ce6129..8562a36 100644 --- a/sdk/xtdk/amd64/mmtypes.h +++ b/sdk/xtdk/amd64/mmtypes.h @@ -116,6 +116,9 @@ /* Number of pool lists per page */ #define MM_POOL_LISTS_PER_PAGE (MM_PAGE_SIZE / MM_POOL_BLOCK_SIZE) +/* Number of pool tracking tables */ +#define MM_POOL_TRACKING_TABLES 64 + /* Page size enumeration list */ typedef enum _PAGE_SIZE { diff --git a/sdk/xtdk/i686/mmtypes.h b/sdk/xtdk/i686/mmtypes.h index c539497..8adbe2d 100644 --- a/sdk/xtdk/i686/mmtypes.h +++ b/sdk/xtdk/i686/mmtypes.h @@ -114,6 +114,9 @@ /* Number of pool lists per page */ #define MM_POOL_LISTS_PER_PAGE (MM_PAGE_SIZE / MM_POOL_BLOCK_SIZE) +/* Number of pool tracking tables */ +#define MM_POOL_TRACKING_TABLES 32 + /* Page size enumeration list */ typedef enum _PAGE_SIZE { diff --git a/sdk/xtdk/mmtypes.h b/sdk/xtdk/mmtypes.h index ceddf22..786d9b3 100644 --- a/sdk/xtdk/mmtypes.h +++ b/sdk/xtdk/mmtypes.h @@ -232,4 +232,16 @@ typedef struct _POOL_TRACKER_BIG_ALLOCATIONS PVOID VirtualAddress; } POOL_TRACKER_BIG_ALLOCATIONS, *PPOOL_TRACKER_BIG_ALLOCATIONS; +/* Pool tracking table structure definition */ +typedef struct _POOL_TRACKING_TABLE +{ + LONG NonPagedAllocations; + SIZE_T NonPagedBytes; + LONG NonPagedFrees; + LONG PagedAllocations; + SIZE_T PagedBytes; + LONG PagedFrees; + ULONG Tag; +} POOL_TRACKING_TABLE, *PPOOL_TRACKING_TABLE; + #endif /* __XTDK_MMTYPES_H */ diff --git a/sdk/xtdk/xtstruct.h b/sdk/xtdk/xtstruct.h index 9c8f999..dd4902d 100644 --- a/sdk/xtdk/xtstruct.h +++ b/sdk/xtdk/xtstruct.h @@ -314,6 +314,7 @@ typedef struct _PHYSICAL_MEMORY_DESCRIPTOR PHYSICAL_MEMORY_DESCRIPTOR, *PPHYSICA typedef struct _PHYSICAL_MEMORY_RUN PHYSICAL_MEMORY_RUN, *PPHYSICAL_MEMORY_RUN; typedef struct _POOL_HEADER POOL_HEADER, *PPOOL_HEADER; typedef struct _POOL_TRACKER_BIG_ALLOCATIONS POOL_TRACKER_BIG_ALLOCATIONS, *PPOOL_TRACKER_BIG_ALLOCATIONS; +typedef struct _POOL_TRACKING_TABLE POOL_TRACKING_TABLE, *PPOOL_TRACKING_TABLE; typedef struct _PROCESSOR_IDENTITY PROCESSOR_IDENTITY, *PPROCESSOR_IDENTITY; typedef struct _PROCESSOR_POWER_STATE PROCESSOR_POWER_STATE, *PPROCESSOR_POWER_STATE; typedef struct _RTL_BITMAP RTL_BITMAP, *PRTL_BITMAP; diff --git a/xtoskrnl/includes/mm/alloc.hh b/xtoskrnl/includes/mm/alloc.hh index 16f8142..e3d24d5 100644 --- a/xtoskrnl/includes/mm/alloc.hh +++ b/xtoskrnl/includes/mm/alloc.hh @@ -19,11 +19,16 @@ namespace MM class Allocator final : private Pool { private: + STATIC PPOOL_TRACKING_TABLE AllocationsTrackingTable; + STATIC KSPIN_LOCK AllocationsTrackingTableLock; + STATIC SIZE_T AllocationsTrackingTableMask; + STATIC SIZE_T AllocationsTrackingTableSize; STATIC ULONG BigAllocationsInUse; - STATIC PPOOL_TRACKER_BIG_ALLOCATIONS BigAllocationsTable; - STATIC SIZE_T BigAllocationsTableHash; - STATIC KSPIN_LOCK BigAllocationsTableLock; - STATIC SIZE_T BigAllocationsTableSize; + STATIC PPOOL_TRACKER_BIG_ALLOCATIONS BigAllocationsTrackingTable; + STATIC SIZE_T BigAllocationsTrackingTableHash; + STATIC KSPIN_LOCK BigAllocationsTrackingTableLock; + STATIC SIZE_T BigAllocationsTrackingTableSize; + STATIC PPOOL_TRACKING_TABLE TagTables[MM_POOL_TRACKING_TABLES]; public: STATIC XTAPI XTSTATUS AllocatePages(IN MMPOOL_TYPE PoolType, @@ -42,7 +47,8 @@ namespace MM STATIC XTAPI XTSTATUS FreePool(IN PVOID VirtualAddress); STATIC XTAPI XTSTATUS FreePool(IN PVOID VirtualAddress, IN ULONG Tag); - STATIC XTAPI VOID InitializeBigAllocationsTable(VOID); + STATIC XTAPI VOID InitializeAllocationsTracking(VOID); + STATIC XTAPI VOID InitializeBigAllocationsTracking(VOID); private: STATIC XTAPI XTSTATUS AllocateNonPagedPoolPages(IN PFN_COUNT Pages, @@ -50,17 +56,25 @@ namespace MM STATIC XTAPI XTSTATUS AllocatePagedPoolPages(IN PFN_COUNT Pages, OUT PVOID *Memory); STATIC XTINLINE ULONG ComputeHash(IN PVOID VirtualAddress); + STATIC XTINLINE ULONG ComputeHash(IN ULONG Tag, + IN ULONG TableMask); STATIC XTAPI BOOLEAN ExpandBigAllocationsTable(VOID); STATIC XTAPI XTSTATUS FreeNonPagedPoolPages(IN PVOID VirtualAddress, OUT PPFN_NUMBER PagesFreed); STATIC XTAPI XTSTATUS FreePagedPoolPages(IN PVOID VirtualAddress, OUT PPFN_NUMBER PagesFreed); + STATIC XTAPI VOID RegisterAllocationTag(IN ULONG Tag, + IN SIZE_T Bytes, + IN MMPOOL_TYPE PoolType); STATIC XTAPI BOOLEAN RegisterBigAllocationTag(IN PVOID VirtualAddress, - IN ULONG Key, - IN ULONG NumberOfPages, + IN ULONG Tag, + IN ULONG Pages, IN MMPOOL_TYPE PoolType); + STATIC XTAPI VOID UnregisterAllocationTag(IN ULONG Tag, + IN SIZE_T Bytes, + IN MMPOOL_TYPE PoolType); STATIC XTAPI ULONG UnregisterBigAllocationTag(IN PVOID VirtualAddress, - OUT PULONG_PTR NumberOfPages, + OUT PULONG_PTR Pages, IN MMPOOL_TYPE PoolType); }; } diff --git a/xtoskrnl/mm/alloc.cc b/xtoskrnl/mm/alloc.cc index c2cefe1..4590327 100644 --- a/xtoskrnl/mm/alloc.cc +++ b/xtoskrnl/mm/alloc.cc @@ -353,6 +353,9 @@ MM::Allocator::AllocatePool(IN MMPOOL_TYPE PoolType, Tag = SIGNATURE32('B', 'i', 'g', 'A'); } + /* Register the allocation in the tracking table */ + RegisterAllocationTag(Tag, SIZE_TO_PAGES(Bytes), PoolType); + /* Supply the allocated address and return success */ *Memory = PoolEntry; return STATUS_SUCCESS; @@ -454,6 +457,9 @@ MM::Allocator::AllocatePool(IN MMPOOL_TYPE PoolType, RTL::Atomic::ExchangeAdd64((PLONG_PTR)&PoolDescriptor->TotalBytes, (LONG_PTR)(PoolEntry->BlockSize * MM_POOL_BLOCK_SIZE)); RTL::Atomic::Increment32((PLONG)&PoolDescriptor->RunningAllocations); + /* Register the allocation in the tracking table */ + RegisterAllocationTag(Tag, PoolEntry->BlockSize * MM_POOL_BLOCK_SIZE, PoolType); + /* Assign the specified identification tag */ PoolEntry->PoolTag = Tag; @@ -522,6 +528,9 @@ MM::Allocator::AllocatePool(IN MMPOOL_TYPE PoolType, /* Increment the running allocation counter for the pool descriptor */ RTL::Atomic::Increment32((PLONG)&PoolDescriptor->RunningAllocations); + /* Register the allocation in the tracking table */ + RegisterAllocationTag(Tag, PoolEntry->BlockSize * MM_POOL_BLOCK_SIZE, PoolType); + /* Perform a final structural validation of the pool block */ VerifyPoolBlocks(PoolEntry); @@ -539,7 +548,7 @@ MM::Allocator::AllocatePool(IN MMPOOL_TYPE PoolType, * @param VirtualAddress * Supplies the base virtual address to be hashed. * - * @return This routine returns the computed partial hash value. + * @return This routine returns the computed hash value. * * @since XT 1.0 */ @@ -556,6 +565,33 @@ MM::Allocator::ComputeHash(IN PVOID VirtualAddress) return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result; } +/** + * Computes a hash for a given pool tag to be used in the allocation tracker. + * + * @param Tag + * Supplies the 32-bit pool tag to be hashed. + * + * @param TableMask + * Supplies the bitmask used to bound the resulting hash index to the table size. + * + * @return This routine returns the computed hash value. + * + * @since XT 1.0 + */ +XTINLINE +ULONG +MM::Allocator::ComputeHash(IN ULONG Tag, + IN ULONG TableMask) +{ + ULONG Result; + + /* Fold the bytes using arithmetic shifts and XORs */ + Result = ((((((Tag & 0xFF) << 2) ^ ((Tag >> 8) & 0xFF)) << 2) ^ ((Tag >> 16) & 0xFF)) << 2) ^ ((Tag >> 24) & 0xFF); + + /* Multiply by the NT magic prime-like constant and shift down */ + return ((40543 * Result) >> 2) & TableMask; +} + /** * Expands the big allocation tracking table to accommodate additional large allocations. * @@ -575,7 +611,7 @@ MM::Allocator::ExpandBigAllocationsTable(VOID) /* Initialize the abort flag and snapshot current table capacity */ Abort = FALSE; - OldSize = BigAllocationsTableSize; + OldSize = BigAllocationsTrackingTableSize; /* Check if doubling the size would cause an integer overflow */ if(OldSize > ((~(SIZE_T)0) / 2)) @@ -620,12 +656,12 @@ MM::Allocator::ExpandBigAllocationsTable(VOID) /* Start a guarded code block */ { - /* Acquire the table lock and raise runlevel to DISPATCH level */ + /* Acquire the tracking table lock and raise runlevel to DISPATCH level */ KE::RaiseRunLevel RunLevel(DISPATCH_LEVEL); - KE::SpinLockGuard BigAllocationsLock(&BigAllocationsTableLock); + KE::SpinLockGuard TrackingTableLock(&BigAllocationsTrackingTableLock); /* Verify if another thread has already expanded the table concurrently */ - if(BigAllocationsTableSize >= NewSize) + if(BigAllocationsTrackingTableSize >= NewSize) { /* Another thread has already expanded the table, discard changes */ Abort = TRUE; @@ -634,7 +670,7 @@ MM::Allocator::ExpandBigAllocationsTable(VOID) { /* Cache the legacy table pointer and calculate new hash mask */ HashMask = NewSize - 1; - OldTable = BigAllocationsTable; + OldTable = BigAllocationsTrackingTable; /* Rehash and migrate all active entries from the old table */ for(Index = 0; Index < OldSize; Index++) @@ -665,9 +701,9 @@ MM::Allocator::ExpandBigAllocationsTable(VOID) } /* Activate the newly populated table globally */ - BigAllocationsTable = NewTable; - BigAllocationsTableHash = NewSize - 1; - BigAllocationsTableSize = NewSize; + BigAllocationsTrackingTable = NewTable; + BigAllocationsTrackingTableHash = NewSize - 1; + BigAllocationsTrackingTableSize = NewSize; } } @@ -1019,6 +1055,9 @@ MM::Allocator::FreePool(IN PVOID VirtualAddress, PageCount = 1; } + /* Remove the allocation from the tracking table */ + UnregisterAllocationTag(Tag, PageCount << MM_PAGE_SHIFT, PoolType); + /* Retrieve the specific pool descriptor based on the masked pool type */ PoolDescriptor = PoolVector[PoolType]; @@ -1056,6 +1095,9 @@ MM::Allocator::FreePool(IN PVOID VirtualAddress, Tag = PoolEntry->PoolTag; Combined = FALSE; + /* Remove the allocation from the tracking table */ + UnregisterAllocationTag(Tag, BlockSize * MM_POOL_BLOCK_SIZE, (MMPOOL_TYPE)(PoolEntry->PoolType - 1)); + /* Locate the adjacent forward pool block */ NextPoolEntry = GetPoolBlock(PoolEntry, BlockSize); @@ -1165,6 +1207,116 @@ MM::Allocator::FreePool(IN PVOID VirtualAddress, return STATUS_SUCCESS; } +/** + * Initializes the allocations tracking table during early system boot. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Allocator::InitializeAllocationsTracking(VOID) +{ + SIZE_T TableSize; + ULONG Index; + XTSTATUS Status; + PMMMEMORY_LAYOUT MemoryLayout; + + /* Not fully implemented yet, HIVE support needed */ + UNIMPLEMENTED; + + /* Retrieve memory layout */ + MemoryLayout = MM::Manager::GetMemoryLayout(); + + /* TODO: Retrieve tracking table size from the HIVE */ + AllocationsTrackingTableSize = 0; + + /* Calculate the target table size */ + TableSize = MIN(AllocationsTrackingTableSize, (MemoryLayout->NonPagedPoolSize * MM_PAGE_SIZE) >> 8); + + /* Perform a bit-scan to determine the highest set bit */ + for(Index = 0; Index < 32; Index++) + { + /* Check if the lowest bit is currently set */ + if(TableSize & 1) + { + /* Verify if this is the only remaining set bit */ + if(!(TableSize & ~1)) + { + /* Exit the loop as the highest bit has been found */ + break; + } + } + + /* Shift the size down by one bit to evaluate higher bits */ + TableSize >>= 1; + } + + /* Check if the bit-scan completed without finding any set bits */ + if(Index == 32) + { + /* Apply the default size of 1024 entries */ + AllocationsTrackingTableSize = 1024; + } + else + { + /* Calculate the aligned power of two size, enforcing a minimum of 64 entries */ + AllocationsTrackingTableSize = MAX(1 << Index, 64); + } + + /* Iteratively attempt to allocate the tracking table */ + while(TRUE) + { + /* Prevent integer overflow when calculating the required byte size for the table */ + if(AllocationsTrackingTableSize + 1 > (MAXULONG_PTR / sizeof(POOL_TRACKING_TABLE))) + { + /* Halve the requested entry count and restart the evaluation */ + AllocationsTrackingTableSize >>= 1; + continue; + } + + /* Attempt to allocate physical memory for the table */ + Status = MM::Allocator::AllocatePages(NonPagedPool, + (AllocationsTrackingTableSize + 1) * + sizeof(POOL_TRACKING_TABLE), (PVOID *)&AllocationsTrackingTable); + + /* Check if the allocation succeeded */ + if(Status != STATUS_SUCCESS || !AllocationsTrackingTable) + { + /* Check if the allocation failed duefor a single entry */ + if(AllocationsTrackingTableSize == 1) + { + /* Failed to initialize the pool tracker, kernel panic */ + KE::Crash::Panic(0x41, TableSize, (ULONG_PTR)~0, (ULONG_PTR)~0, (ULONG_PTR)~0); + } + + /* Halve the requested entry count */ + AllocationsTrackingTableSize >>= 1; + } + else + { + /* Allocation succeeded */ + break; + } + } + + /* Increment the table size to account for the overflow bucket entry */ + AllocationsTrackingTableSize += 1; + + /* Zero the entire memory used by the table */ + RtlZeroMemory(AllocationsTrackingTable, AllocationsTrackingTableSize * sizeof(POOL_TRACKING_TABLE)); + + /* Assign the global tracking table as the local table for the bootstrap processor */ + TagTables[0] = AllocationsTrackingTable; + + /* Calculate and store the hash mask */ + AllocationsTrackingTableMask = AllocationsTrackingTableSize - 2; + + /* Initialize the spinlock used to synchronize concurrent modifications to the tracking table */ + KE::SpinLock::InitializeSpinLock(&AllocationsTrackingTableLock); +} + /** * Initializes the big allocations tracking table during early system boot. * @@ -1174,7 +1326,7 @@ MM::Allocator::FreePool(IN PVOID VirtualAddress, */ XTAPI VOID -MM::Allocator::InitializeBigAllocationsTable(VOID) +MM::Allocator::InitializeBigAllocationsTracking(VOID) { SIZE_T TableSize; ULONG Index; @@ -1188,10 +1340,10 @@ MM::Allocator::InitializeBigAllocationsTable(VOID) MemoryLayout = MM::Manager::GetMemoryLayout(); /* TODO: Retrieve initial big allocation table size from the HIVE */ - BigAllocationsTableSize = 0; + BigAllocationsTrackingTableSize = 0; /* Calculate the target table size */ - TableSize = MIN(BigAllocationsTableSize, (MemoryLayout->NonPagedPoolSize * MM_PAGE_SIZE) >> 12); + TableSize = MIN(BigAllocationsTrackingTableSize, (MemoryLayout->NonPagedPoolSize * MM_PAGE_SIZE) >> 12); /* Perform a bit-scan to determine the highest set bit */ for(Index = 0; Index < 32; Index++) @@ -1215,42 +1367,42 @@ MM::Allocator::InitializeBigAllocationsTable(VOID) if(Index == 32) { /* Apply the default size of 4096 entries */ - BigAllocationsTableSize = 4096; + BigAllocationsTrackingTableSize = 4096; } else { /* Calculate the aligned power of two size, enforcing a minimum of 64 entries */ - BigAllocationsTableSize = MAX(1 << Index, 64); + BigAllocationsTrackingTableSize = MAX(1 << Index, 64); } /* Iteratively attempt to allocate the tracking table */ while(TRUE) { /* Prevent integer overflow when calculating the required byte size for the table */ - if((BigAllocationsTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_ALLOCATIONS))) + if((BigAllocationsTrackingTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_BIG_ALLOCATIONS))) { /* Halve the requested entry count and restart the evaluation */ - BigAllocationsTableSize >>= 1; + BigAllocationsTrackingTableSize >>= 1; continue; } /* Attempt to allocate physical memory for the table */ Status = AllocatePages(NonPagedPool, - BigAllocationsTableSize * sizeof(POOL_TRACKER_BIG_ALLOCATIONS), - (PVOID*)&BigAllocationsTable); + BigAllocationsTrackingTableSize * sizeof(POOL_TRACKER_BIG_ALLOCATIONS), + (PVOID*)&BigAllocationsTrackingTable); /* Check if the allocation succeeded */ - if(Status != STATUS_SUCCESS || !BigAllocationsTable) + if(Status != STATUS_SUCCESS || !BigAllocationsTrackingTable) { /* Check if the allocation failed duefor a single entry */ - if(BigAllocationsTableSize == 1) + if(BigAllocationsTrackingTableSize == 1) { /* Failed to initialize the pool tracker, kernel panic */ - KE::Crash::Panic(0x41, TableSize, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + KE::Crash::Panic(0x41, TableSize, (ULONG_PTR)~0, (ULONG_PTR)~0, (ULONG_PTR)~0); } /* Halve the requested entry count */ - BigAllocationsTableSize >>= 1; + BigAllocationsTrackingTableSize >>= 1; } else { @@ -1260,20 +1412,126 @@ MM::Allocator::InitializeBigAllocationsTable(VOID) } /* Zero the entire memory used by the table */ - RtlZeroMemory(BigAllocationsTable, BigAllocationsTableSize * sizeof(POOL_TRACKER_BIG_ALLOCATIONS)); + RtlZeroMemory(BigAllocationsTrackingTable, BigAllocationsTrackingTableSize * sizeof(POOL_TRACKER_BIG_ALLOCATIONS)); /* Iterate through the newly allocated table */ - for(Index = 0; Index < BigAllocationsTableSize; Index++) + for(Index = 0; Index < BigAllocationsTrackingTableSize; Index++) { /* Mark the individual pool tracker entry as free and available */ - BigAllocationsTable[Index].VirtualAddress = (PVOID)MM_POOL_BIG_ALLOCATIONS_ENTRY_FREE; + BigAllocationsTrackingTable[Index].VirtualAddress = (PVOID)MM_POOL_BIG_ALLOCATIONS_ENTRY_FREE; } /* Calculate and store the hash mask */ - BigAllocationsTableHash = BigAllocationsTableSize - 1; + BigAllocationsTrackingTableHash = BigAllocationsTrackingTableSize - 1; /* Initialize the spinlock used to synchronize concurrent modifications to the tracking table */ - KE::SpinLock::InitializeSpinLock(&BigAllocationsTableLock); + KE::SpinLock::InitializeSpinLock(&BigAllocationsTrackingTableLock); + + /* Register the allocation in the tracking table */ + RegisterAllocationTag(SIGNATURE32('M', 'M', 'g', 'r'), + SIZE_TO_PAGES(BigAllocationsTrackingTableSize * sizeof(POOL_TRACKER_BIG_ALLOCATIONS)), + NonPagedPool); +} + +/** + * Registers a pool memory allocation in the tracking table. + * + * @param Tag + * Supplies the tag used to identify the allocation. + * + * @param Bytes + * Supplies the size of the allocation. + * + * @param PoolType + * Specifies the type of pool from which the memory was allocated. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Allocator::RegisterAllocationTag(IN ULONG Tag, + IN SIZE_T Bytes, + IN MMPOOL_TYPE PoolType) +{ + PPOOL_TRACKING_TABLE CpuTable, TableEntry; + ULONG Hash, Index, Processor; + + /* Retrieve the local tracking table for the current processor */ + Processor = KE::Processor::GetCurrentProcessorNumber(); + CpuTable = TagTables[Processor]; + + /* Compute the initial hash index */ + Hash = ComputeHash(Tag, AllocationsTrackingTableMask); + Index = Hash; + + /* Probe the tracking table until a match or an empty slot is found */ + do + { + /* Fetch the tracker entry from the CPU table */ + TableEntry = &CpuTable[Hash]; + + /* Check if the current entry tracks the requested pool tag */ + if(TableEntry->Tag == Tag) + { + /* Update the appropriate statistics based on the pool type */ + if((PoolType & MM_POOL_TYPE_MASK) == NonPagedPool) + { + /* Update the non-paged allocation statistics */ + RTL::Atomic::Increment32(&TableEntry->NonPagedAllocations); + RTL::Atomic::ExchangeAdd64((PLONG_PTR)&TableEntry->NonPagedBytes, Bytes); + } + else + { + /* Update the paged allocation statistics */ + RTL::Atomic::Increment32(&TableEntry->PagedAllocations); + RTL::Atomic::ExchangeAdd64((PLONG_PTR)&TableEntry->PagedBytes, Bytes); + } + + /* The allocation has been successfully tracked, return */ + return; + } + + /* Check if the CPU table is entirely empty */ + if(TableEntry->Tag == 0) + { + /* Check if another processor has claimed this slot in the global table */ + if(AllocationsTrackingTable[Hash].Tag != 0) + { + /* Synchronize the local table with the global table */ + TableEntry->Tag = AllocationsTrackingTable[Hash].Tag; + + /* Restart the loop to evaluation */ + continue; + } + + /* Check if this is not the designated overflow bucket */ + if(Hash != (AllocationsTrackingTableSize - 1)) + { + /* Start a guarded code block */ + { + /* Acquire the tracking table lock */ + KE::SpinLockGuard TrackingTableLock(&AllocationsTrackingTableLock); + + /* Perform a double-checked lock */ + if(AllocationsTrackingTable[Hash].Tag == 0) + { + /* Claim the slot in both, local and global tracking tables */ + AllocationsTrackingTable[Hash].Tag = Tag; + TableEntry->Tag = Tag; + } + } + + /* Restart the loop */ + continue; + } + } + + /* Advance to the next index as hash collision occurred */ + Hash = (Hash + 1) & AllocationsTrackingTableMask; + } + while(Hash != Index); } /** @@ -1285,7 +1543,7 @@ MM::Allocator::InitializeBigAllocationsTable(VOID) * @param Tag * Supplies the tag used to identify the allocation. * - * @param NumberOfPages + * @param Pages * Supplies the number of physical pages backing the allocation. * * @param PoolType @@ -1299,7 +1557,7 @@ BOOLEAN XTAPI MM::Allocator::RegisterBigAllocationTag(IN PVOID VirtualAddress, IN ULONG Tag, - IN ULONG NumberOfPages, + IN ULONG Pages, IN MMPOOL_TYPE PoolType) { PPOOL_TRACKER_BIG_ALLOCATIONS Entry; @@ -1318,25 +1576,25 @@ MM::Allocator::RegisterBigAllocationTag(IN PVOID VirtualAddress, /* Start a guarded code block */ { - /* Acquire the table lock and raise runlevel to DISPATCH level */ + /* Acquire the tracking table lock and raise runlevel to DISPATCH level */ KE::RaiseRunLevel RunLevel(DISPATCH_LEVEL); - KE::SpinLockGuard BigAllocationsLock(&BigAllocationsTableLock); + KE::SpinLockGuard TrackingTableLock(&BigAllocationsTrackingTableLock); /* Retrieve the tracker entry */ - Hash &= BigAllocationsTableHash; + Hash &= BigAllocationsTrackingTableHash; StartHash = Hash; /* Traverse the hash table */ do { /* Retrieve the tracker entry */ - Entry = &BigAllocationsTable[Hash]; + Entry = &BigAllocationsTrackingTable[Hash]; /* Check if the current bucket is marked as free */ if((ULONG_PTR)Entry->VirtualAddress & MM_POOL_BIG_ALLOCATIONS_ENTRY_FREE) { /* Populate the available bucket with the allocation metadata */ - Entry->NumberOfPages = NumberOfPages; + Entry->NumberOfPages = Pages; Entry->Tag = Tag; Entry->VirtualAddress = VirtualAddress; @@ -1344,7 +1602,7 @@ MM::Allocator::RegisterBigAllocationTag(IN PVOID VirtualAddress, BigAllocationsInUse++; /* Determine if the table capacity has reached the critical 75% threshold */ - if(BigAllocationsInUse > (BigAllocationsTableSize * 3 / 4)) + if(BigAllocationsInUse > (BigAllocationsTrackingTableSize * 3 / 4)) { /* Flag the table for expansion */ RequiresExpansion = TRUE; @@ -1356,7 +1614,7 @@ MM::Allocator::RegisterBigAllocationTag(IN PVOID VirtualAddress, } /* Advance to the next bucket */ - if(++Hash >= BigAllocationsTableSize) + if(++Hash >= BigAllocationsTrackingTableSize) { /* Wrap the index back to the beginning of the table */ Hash = 0; @@ -1401,13 +1659,95 @@ MM::Allocator::RegisterBigAllocationTag(IN PVOID VirtualAddress, return FALSE; } +/** + * Unregisters a pool memory allocation in the tracking table. + * + * @param Tag + * Supplies the tag used to identify the allocation. + * + * @param Bytes + * Supplies the size of the allocation. + * + * @param PoolType + * Specifies the type of pool from which the memory was allocated. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MM::Allocator::UnregisterAllocationTag(IN ULONG Tag, + IN SIZE_T Bytes, + IN MMPOOL_TYPE PoolType) +{ + ULONG Hash, Index; + PPOOL_TRACKING_TABLE CpuTable; + PPOOL_TRACKING_TABLE TableEntry; + ULONG Processor; + + /* Retrieve the local tracking table for the current processor */ + Processor = KE::Processor::GetCurrentProcessorNumber(); + CpuTable = TagTables[Processor]; + + /* Compute the initial hash index */ + Hash = ComputeHash(Tag, AllocationsTrackingTableMask); + Index = Hash; + + /* Probe the tracking table until a match or an empty slot is found */ + do + { + /* Fetch the tracker entry from the CPU table */ + TableEntry = &CpuTable[Hash]; + + /* Check if the current entry tracks the requested pool tag */ + if(TableEntry->Tag == Tag) + { + /* Update the appropriate statistics based on the pool type */ + if((PoolType & MM_POOL_TYPE_MASK) == NonPagedPool) + { + /* Update the non-paged allocation statistics */ + RTL::Atomic::Increment32(&TableEntry->NonPagedFrees); + RTL::Atomic::ExchangeAdd64((PLONG_PTR)&TableEntry->NonPagedBytes, 0 - Bytes); + } + else + { + /* Update the paged allocation statistics */ + RTL::Atomic::Increment32(&TableEntry->PagedFrees); + RTL::Atomic::ExchangeAdd64((PLONG_PTR)&TableEntry->PagedBytes, 0 - Bytes); + } + + /* The allocation has been successfully tracked, return */ + return; + } + + /* Check if the CPU table is entirely empty */ + if(TableEntry->Tag == 0) + { + /* Check if another processor has claimed this slot in the global table */ + if(AllocationsTrackingTable[Hash].Tag != 0) + { + /* Synchronize the local table with the global table */ + TableEntry->Tag = AllocationsTrackingTable[Hash].Tag; + + /* Restart the loop to evaluation */ + continue; + } + } + + /* Advance to the next index as hash collision occurred */ + Hash = (Hash + 1) & AllocationsTrackingTableMask; + } + while(Hash != Index); +} + /** * Unregisters a big allocation from the tracking table and retrieves its metadata. * * @param VirtualAddress * Supplies the virtual address of the big allocation to be removed. * - * @param NumberOfPages + * @param Pages * Supplies the number of physical pages backing the allocation. * * @param PoolType @@ -1420,7 +1760,7 @@ MM::Allocator::RegisterBigAllocationTag(IN PVOID VirtualAddress, XTAPI ULONG MM::Allocator::UnregisterBigAllocationTag(IN PVOID VirtualAddress, - OUT PULONG_PTR NumberOfPages, + OUT PULONG_PTR Pages, IN MMPOOL_TYPE PoolType) { ULONG Hash, StartHash; @@ -1436,25 +1776,25 @@ MM::Allocator::UnregisterBigAllocationTag(IN PVOID VirtualAddress, /* Start a guarded code block */ { - /* Acquire the table lock and raise runlevel to DISPATCH level */ + /* Acquire the tracking table lock and raise runlevel to DISPATCH level */ KE::RaiseRunLevel RunLevel(DISPATCH_LEVEL); - KE::SpinLockGuard BigAllocationsLock(&BigAllocationsTableLock); + KE::SpinLockGuard TrackingTableLock(&BigAllocationsTrackingTableLock); /* Mask the computed hash and record the starting bucket */ - Hash &= BigAllocationsTableHash; + Hash &= BigAllocationsTrackingTableHash; StartHash = Hash; /* Traverse the hash table using linear probing to pinpoint the exact allocation address */ while(TRUE) { /* Retrieve the tracker entry */ - Entry = &BigAllocationsTable[Hash]; + Entry = &BigAllocationsTrackingTable[Hash]; /* Check if the bucket contains the target virtual address */ if(Entry->VirtualAddress == VirtualAddress) { /* Capture the allocation metadata */ - *NumberOfPages = Entry->NumberOfPages; + *Pages = Entry->NumberOfPages; PoolTag = Entry->Tag; /* Invalidate the entry */ @@ -1469,7 +1809,7 @@ MM::Allocator::UnregisterBigAllocationTag(IN PVOID VirtualAddress, } /* Advance to the next bucket */ - if(++Hash >= BigAllocationsTableSize) + if(++Hash >= BigAllocationsTrackingTableSize) { /* Wrap the hash index back to zero */ Hash = 0; @@ -1492,6 +1832,6 @@ MM::Allocator::UnregisterBigAllocationTag(IN PVOID VirtualAddress, } /* Return an empty page count and a fallback tag */ - *NumberOfPages = 0; + *Pages = 0; return SIGNATURE32('B', 'i', 'g', 'A'); } diff --git a/xtoskrnl/mm/data.cc b/xtoskrnl/mm/data.cc index 0895c0f..5cb3079 100644 --- a/xtoskrnl/mm/data.cc +++ b/xtoskrnl/mm/data.cc @@ -9,20 +9,35 @@ #include +/* Global table used to track pool memory allocations */ +PPOOL_TRACKING_TABLE MM::Allocator::AllocationsTrackingTable; + +/* Spinlock protecting the allocations table */ +KSPIN_LOCK MM::Allocator::AllocationsTrackingTableLock; + +/* Bitmask used during the hashing process */ +SIZE_T MM::Allocator::AllocationsTrackingTableMask; + +/* Total number of entries in the global allocations tracking table */ +SIZE_T MM::Allocator::AllocationsTrackingTableSize; + /* Active number of big allocations to trigger table expansion */ ULONG MM::Allocator::BigAllocationsInUse; /* Pointer to the hash table for tracking page-aligned memory */ -PPOOL_TRACKER_BIG_ALLOCATIONS MM::Allocator::BigAllocationsTable; +PPOOL_TRACKER_BIG_ALLOCATIONS MM::Allocator::BigAllocationsTrackingTable; /* Bitmask used for fast modulo arithmetic during hash bucket lookups */ -SIZE_T MM::Allocator::BigAllocationsTableHash; +SIZE_T MM::Allocator::BigAllocationsTrackingTableHash; /* Spinlock protecting the big allocations table */ -KSPIN_LOCK MM::Allocator::BigAllocationsTableLock; +KSPIN_LOCK MM::Allocator::BigAllocationsTrackingTableLock; /* Maximum capacity of the tracking hash table */ -SIZE_T MM::Allocator::BigAllocationsTableSize; +SIZE_T MM::Allocator::BigAllocationsTrackingTableSize; + +/* Array of CPU-local tracking tables */ +PPOOL_TRACKING_TABLE MM::Allocator::TagTables[MM_POOL_TRACKING_TABLES]; /* Array of free page lists segregated by cache color */ PMMCOLOR_TABLES MM::Colors::FreePages[FreePageList + 1]; diff --git a/xtoskrnl/mm/mmgr.cc b/xtoskrnl/mm/mmgr.cc index eddccf4..b1bf9eb 100644 --- a/xtoskrnl/mm/mmgr.cc +++ b/xtoskrnl/mm/mmgr.cc @@ -268,8 +268,9 @@ MM::Manager::InitializeMemoryManager(VOID) /* Initialize PFN database */ MM::Pfn::InitializePfnDatabase(); - /* Initialize big allocations table */ - MM::Allocator::InitializeBigAllocationsTable(); + /* Initialize allocations tracking tables */ + MM::Allocator::InitializeAllocationsTracking(); + MM::Allocator::InitializeBigAllocationsTracking(); /* Initialize PFN bitmap */ MM::Pfn::InitializePfnBitmap();