Files
alcyone/NTOSKRNL/KE/mutex.cpp
Dibyamartanda Samanta 58ba3f5db0 Nuke Old Mutex Implementation
To many changes to make , so i just updated with my recent iteration.

Signed-off-by: CodingWorkshop Signing Team <signing@codingworkshop.eu.org>
2025-09-25 14:57:10 +02:00

669 lines
15 KiB
C++

/*
* PROJECT: Alcyone System Kernel
* LICENSE: BSD Clause 3
* PURPOSE: Mutexes
* NT KERNEL: 5.11.9360
* COPYRIGHT: 2023-2029 Dibymartanda Samanta <dibya.samanta@neverseen.de>
*/
#include <ntoskrnl.h>
#define NTDEBUG
extern "C"
/*Mutex Count :
0 => Can Be acquired,
1 => Is Acquired by a Thread
In Negative Indicates, Number of Threads waiting*/
constexpr ULONG MUTEX_READY_TO_BE_ACQUIRED = 0;
/*Internal Function*/
// Fast Mutex definitions
#define FM_LOCK_BIT 0x1
#define FM_LOCK_WAITER_WOKEN 0x2
#define FM_LOCK_WAITER_INC 0x4
#define FM_RECURSIVE_BIT 0x8
typedef struct _FAST_MUTEX {
LONG Count; // 0x0: 0 = free, 1 = owned, negative = waiters
PVOID Owner; // 0x4: Owning thread
ULONG Contention; // 0x8: Contention count
KEVENT Event; // 0xC: Wait event
ULONG OldIrql; // 0x1C: Saved IRQL
LONG RecursionDepth; // 0x20: For recursive mutexes
} FAST_MUTEX, *PFAST_MUTEX; // 0x24 bytes (sizeof)
typedef PFAST_MUTEX PKGUARDED_MUTEX;
/*Internal Function*/
VOID
FASTCALL
KiAcquireFastMutex(
_Inout_ PFAST_MUTEX Mutex
)
{
LONG OldCount;
LONG NewCount;
PAGED_CODE();
/* Increment contention count */
InterlockedIncrement(&Mutex->Contention);
while (TRUE)
{
/* Read current count */
OldCount = ReadForWriteAccess(&Mutex->Count);
/* Check if mutex is free (count == 0) */
if (OldCount == 0)
{
/* Attempt to acquire by setting count to 1 */
if (InterlockedCompareExchange(&Mutex->Count, 1, 0) == 0)
{
/* Mutex acquired successfully */
break;
}
}
else
{
/* Mutex is owned, increment waiter count (make it more negative) */
NewCount = OldCount - 1;
if (InterlockedCompareExchange(&Mutex->Count, NewCount, OldCount) == OldCount)
{
/* Wait for the mutex event */
KeWaitForSingleObject(&Mutex->Event, WrFastMutex, KernelMode, FALSE, NULL);
/* Continue loop to try acquiring again */
}
}
}
}
VOID
FASTCALL
KeReleaseFastMutexContended(
IN PFAST_MUTEX FastMutex,
IN LONG OldValue)
{
LONG NewValue;
/* If there are waiters (negative count), wake one up */
if (OldValue < 0)
{
/* Increment count (reduce number of waiters) but keep it owned */
NewValue = OldValue + 1;
if (NewValue == 0)
{
/* No more waiters, mutex becomes free */
NewValue = 0;
}
if (InterlockedCompareExchange(&FastMutex->Count, NewValue, OldValue) == OldValue)
{
/* Wake up a waiter */
KeSetEvent(&FastMutex->Event, IO_NO_INCREMENT, FALSE);
}
}
}
/* Exported Function */
VOID
NTAPI
KeInitializeFastMutex(
_Out_ PFAST_MUTEX Mutex
)
{
/* Initialize the mutex structure */
RtlZeroMemory(Mutex, sizeof(FAST_MUTEX));
/* Set initial values - 0 means free/available */
Mutex->Owner = NULL;
Mutex->Contention = 0;
Mutex->Count = 0; // 0 = free
Mutex->RecursionDepth = 0;
/* Initialize the Mutex Gate */
KeInitializeEvent(&Mutex->Event, SynchronizationEvent, FALSE);
}
BOOLEAN
VECTORCALL
KeTryToAcquireFastMutex(
_Inout_ PFAST_MUTEX Mutex)
{
KIRQL OldIrql;
/* Raise IRQL to APC_LEVEL */
OldIrql = KeRaiseIrqlToSynchLevel();
/* Try to acquire the mutex atomically */
if (InterlockedCompareExchange(&Mutex->Count, 1, 0) == 0)
{
/* Successfully acquired */
Mutex->Owner = (PVOID)KeGetCurrentThread();
Mutex->OldIrql = OldIrql;
return TRUE;
}
else
{
/* Failed to acquire the mutex */
KeLowerIrql(OldIrql);
return FALSE;
}
}
VOID
NTAPI
KeEnterCriticalRegionAndAcquireFastMutexUnsafe(
_In_ PFAST_MUTEX FastMutex)
{
PKTHREAD OwnerThread;
KeEnterCriticalRegion();
/* Get the current thread */
OwnerThread = KeGetCurrentThread();
/* Try to acquire the FastMutex */
if (InterlockedCompareExchange(&FastMutex->Count, 1, 0) != 0)
{
/* FastMutex was locked, we need to wait */
KiAcquireFastMutex(FastMutex);
}
FastMutex->Owner = OwnerThread;
}
VOID
FASTCALL
KeReleaseFastMutexUnsafeAndLeaveCriticalRegion(
_In_ PFAST_MUTEX FastMutex)
{
LONG OldValue;
/* Clear the owner */
FastMutex->Owner = NULL;
/* Release the FastMutex */
OldValue = InterlockedExchange(&FastMutex->Count, 0);
if (OldValue < 0)
{
/* There were waiters, call the contended release function */
KeReleaseFastMutexContended(FastMutex, OldValue);
}
/* Leave critical region */
KeLeaveCriticalRegion();
}
VOID
NTAPI
KeAcquireFastMutex(
_In_ PFAST_MUTEX FastMutex)
{
KIRQL OldIrql;
/* Raise IRQL to APC_LEVEL */
OldIrql = KeRaiseIrqlToSynchLevel();
/* Try to acquire the FastMutex */
if (InterlockedCompareExchange(&FastMutex->Count, 1, 0) != 0)
{
/* We didn't acquire it, we'll have to wait */
KiAcquireFastMutex(FastMutex);
}
/* Set the owner thread and save the original IRQL */
FastMutex->Owner = KeGetCurrentThread();
FastMutex->OldIrql = OldIrql;
}
VOID
NTAPI
KeAcquireFastMutexUnsafe(
_In_ PFAST_MUTEX FastMutex)
{
PKTHREAD CurrentThread;
/* Get the current thread */
CurrentThread = KeGetCurrentThread();
/* Try to acquire the FastMutex */
if (InterlockedCompareExchange(&FastMutex->Count, 1, 0) != 0)
{
/* FastMutex was locked, we need to wait */
KiAcquireFastMutex(FastMutex);
}
/* Set the owner */
FastMutex->Owner = CurrentThread;
}
VOID
NTAPI
KeReleaseFastMutex(
_Inout_ PFAST_MUTEX FastMutex
)
{
KIRQL OldIrql;
LONG OldCount;
/* Clear owner and get saved IRQL */
FastMutex->Owner = NULL;
OldIrql = FastMutex->OldIrql;
/* Release the mutex */
OldCount = InterlockedExchange(&FastMutex->Count, 0);
/* Check if there were waiters */
if (OldCount < 0)
{
/* Wake up waiters */
KeSetEvent(&FastMutex->Event, IO_NO_INCREMENT, FALSE);
}
/* Restore IRQL */
KeLowerIrql(OldIrql);
}
VOID
NTAPI
KeReleaseFastMutexUnsafe(
_In_ PFAST_MUTEX FastMutex)
{
LONG OldValue;
/* Clear the owner */
FastMutex->Owner = NULL;
/* Release the lock and get the old value */
OldValue = InterlockedExchange(&FastMutex->Count, 0);
/* Check if there were waiters */
if (OldValue < 0)
{
/* Wake up waiters */
KeSetEvent(&FastMutex->Event, IO_NO_INCREMENT, FALSE);
}
}
/*Guarded Mutexes in Modern NT behave just like Fast Mutexes with bit of protection */
VOID
NTAPI
KeInitializeGuardedMutex(_Out_ PKGUARDED_MUTEX GuardedMutex)
{
/* Initialize the GuardedMutex */
GuardedMutex->Count = 0; // 0 = free
GuardedMutex->Owner = NULL;
GuardedMutex->Contention = 0;
GuardedMutex->RecursionDepth = 0;
/* Initialize the Mutex Gate */
KeInitializeEvent(&GuardedMutex->Event, SynchronizationEvent, FALSE);
}
VOID
NTAPI
KeAcquireGuardedMutex(_Inout_ PKGUARDED_MUTEX Mutex)
{
PKTHREAD OwnerThread = KeGetCurrentThread();
KIRQL OldIrql;
/* Raise IRQL and enter guarded region */
OldIrql = KeRaiseIrqlToSynchLevel();
KeEnterGuardedRegion();
if (InterlockedCompareExchange(&Mutex->Count, 1, 0) != 0)
{
KiAcquireFastMutex(Mutex);
}
Mutex->Owner = OwnerThread;
Mutex->OldIrql = OldIrql;
}
VOID
NTAPI
KeAcquireGuardedMutexUnsafe(
_Inout_ PKGUARDED_MUTEX FastMutex
)
{
PKTHREAD CurrentThread;
KeEnterGuardedRegion();
CurrentThread = KeGetCurrentThread();
if (InterlockedCompareExchange(&FastMutex->Count, 1, 0) != 0)
{
KiAcquireFastMutex(FastMutex);
}
FastMutex->Owner = CurrentThread;
}
VOID
NTAPI
KeReleaseGuardedMutexUnsafe(
_Inout_ PKGUARDED_MUTEX FastMutex
)
{
LONG OldCount;
FastMutex->Owner = NULL;
OldCount = InterlockedExchange(&FastMutex->Count, 0);
if (OldCount < 0)
{
KeSetEvent(&FastMutex->Event, IO_NO_INCREMENT, FALSE);
}
KeLeaveGuardedRegion();
}
VOID
NTAPI
KeReleaseGuardedMutex(
_In_ PKGUARDED_MUTEX FastMutex)
{
KIRQL OldIrql;
LONG OldValue;
/* Save the old IRQL and clear the owner */
OldIrql = FastMutex->OldIrql;
FastMutex->Owner = NULL;
/* Try to release the FastMutex */
OldValue = InterlockedExchange(&FastMutex->Count, 0);
if (OldValue < 0)
{
KeSetEvent(&FastMutex->Event, IO_NO_INCREMENT, FALSE);
}
/* Lower IRQL and leave guarded region */
KeLowerIrql(OldIrql);
KeLeaveGuardedRegion();
}
/* Specific to Alcyone, Not found in Windows NT */
VOID
NTAPI
KeInitializeRecursiveFastMutex(_Out_ PFAST_MUTEX Mutex)
{
KeInitializeFastMutex(Mutex);
/* Mark as recursive by setting the recursive bit in a separate field */
Mutex->RecursionDepth = -1; // Use -1 to indicate recursive capability
}
NTSTATUS
NTAPI
KeAcquireFastMutexTimeout(_Inout_ PFAST_MUTEX Mutex, _In_ PLARGE_INTEGER Timeout)
{
KIRQL OldIrql;
NTSTATUS Status;
/* Raise IRQL */
OldIrql = KeRaiseIrqlToSynchLevel();
/* Try to acquire immediately */
if (InterlockedCompareExchange(&Mutex->Count, 1, 0) == 0)
{
Mutex->Owner = KeGetCurrentThread();
Mutex->OldIrql = OldIrql;
return STATUS_SUCCESS;
}
/* Increment waiter count */
InterlockedDecrement(&Mutex->Count);
/* Wait with timeout */
Status = KeWaitForSingleObject(&Mutex->Event, WrFastMutex, KernelMode, FALSE, Timeout);
if (Status == STATUS_SUCCESS)
{
/* Try to acquire after being woken up */
if (InterlockedCompareExchange(&Mutex->Count, 1, 0) == 0)
{
Mutex->Owner = KeGetCurrentThread();
Mutex->OldIrql = OldIrql;
}
else
{
/* Someone else got it, this shouldn't happen normally */
KeLowerIrql(OldIrql);
Status = STATUS_TIMEOUT;
}
}
else
{
/* Timeout or other error - remove ourselves from waiter count */
InterlockedIncrement(&Mutex->Count);
KeLowerIrql(OldIrql);
}
return Status;
}
BOOLEAN
NTAPI
KeIsMutexOwned(_In_ PFAST_MUTEX Mutex)
{
return (Mutex->Owner == KeGetCurrentThread());
}
NTSTATUS
NTAPI
KeAcquireGuardedMutexTimeout(_Inout_ PKGUARDED_MUTEX Mutex, _In_ PLARGE_INTEGER Timeout)
{
KIRQL OldIrql;
NTSTATUS Status;
/* Raise IRQL and enter guarded region */
OldIrql = KeRaiseIrqlToSynchLevel();
KeEnterGuardedRegion();
/* Try to acquire immediately */
if (InterlockedCompareExchange(&Mutex->Count, 1, 0) == 0)
{
Mutex->Owner = KeGetCurrentThread();
Mutex->OldIrql = OldIrql;
return STATUS_SUCCESS;
}
/* Increment waiter count */
InterlockedDecrement(&Mutex->Count);
/* Wait with timeout */
Status = KeWaitForSingleObject(&Mutex->Event, WrGuardedMutex, KernelMode, FALSE, Timeout);
if (Status == STATUS_SUCCESS)
{
/* Try to acquire after being woken up */
if (InterlockedCompareExchange(&Mutex->Count, 1, 0) == 0)
{
Mutex->Owner = KeGetCurrentThread();
Mutex->OldIrql = OldIrql;
}
else
{
/* Someone else got it, this shouldn't happen normally */
KeLowerIrql(OldIrql);
KeLeaveGuardedRegion();
Status = STATUS_TIMEOUT;
}
}
else
{
/* Timeout or other error - remove ourselves from waiter count */
InterlockedIncrement(&Mutex->Count);
KeLowerIrql(OldIrql);
KeLeaveGuardedRegion();
}
return Status;
}
/* Additional Alcyone-specific recursive mutex functions */
VOID
NTAPI
KeAcquireRecursiveFastMutex(_Inout_ PFAST_MUTEX Mutex)
{
PKTHREAD CurrentThread = KeGetCurrentThread();
KIRQL OldIrql;
/* Check if this is a recursive mutex */
if (Mutex->RecursionDepth == -1)
{
/* Check if current thread already owns it */
if (Mutex->Owner == CurrentThread)
{
/* Increment recursion depth */
InterlockedIncrement(&Mutex->RecursionDepth);
return;
}
}
/* Not recursive or not owned by current thread - acquire normally */
OldIrql = KeRaiseIrqlToSynchLevel();
if (InterlockedCompareExchange(&Mutex->Count, 1, 0) != 0)
{
KiAcquireFastMutex(Mutex);
}
Mutex->Owner = CurrentThread;
Mutex->OldIrql = OldIrql;
/* Set recursion depth to 1 for recursive mutexes */
if (Mutex->RecursionDepth == -1)
{
Mutex->RecursionDepth = 1;
}
}
VOID
NTAPI
KeReleaseRecursiveFastMutex(_Inout_ PFAST_MUTEX Mutex)
{
PKTHREAD CurrentThread = KeGetCurrentThread();
KIRQL OldIrql;
LONG OldCount;
/* Verify ownership */
if (Mutex->Owner != CurrentThread)
{
KeBugCheckEx(MUTEX_LEVEL_NUMBER_VIOLATION,
(ULONG_PTR)Mutex,
(ULONG_PTR)CurrentThread,
(ULONG_PTR)Mutex->Owner,
0);
return;
}
/* Handle recursive case */
if (Mutex->RecursionDepth > 1)
{
InterlockedDecrement(&Mutex->RecursionDepth);
return;
}
/* Clear owner and get saved IRQL */
Mutex->Owner = NULL;
OldIrql = Mutex->OldIrql;
/* Reset recursion depth for recursive mutexes */
if (Mutex->RecursionDepth != 0)
{
Mutex->RecursionDepth = -1; // Mark as recursive but not owned
}
/* Release the mutex */
OldCount = InterlockedExchange(&Mutex->Count, 0);
/* Check if there were waiters */
if (OldCount < 0)
{
KeSetEvent(&Mutex->Event, IO_NO_INCREMENT, FALSE);
}
/* Restore IRQL */
KeLowerIrql(OldIrql);
}
/* Utility functions for debugging and monitoring */
ULONG
NTAPI
KeGetMutexContentionCount(_In_ PFAST_MUTEX Mutex)
{
return Mutex->Contention;
}
PKTHREAD
NTAPI
KeGetMutexOwner(_In_ PFAST_MUTEX Mutex)
{
return (PKTHREAD)Mutex->Owner;
}
LONG
NTAPI
KeGetMutexRecursionDepth(_In_ PFAST_MUTEX Mutex)
{
return Mutex->RecursionDepth;
}
BOOLEAN
NTAPI
KeIsMutexRecursive(_In_ PFAST_MUTEX Mutex)
{
return (Mutex->RecursionDepth == -1 || Mutex->RecursionDepth > 0);
}
/* Enhanced try-acquire with recursion support */
BOOLEAN
NTAPI
KeTryToAcquireRecursiveFastMutex(_Inout_ PFAST_MUTEX Mutex)
{
PKTHREAD CurrentThread = KeGetCurrentThread();
KIRQL OldIrql;
/* Check if this is a recursive mutex and current thread owns it */
if (Mutex->RecursionDepth != 0 && Mutex->Owner == CurrentThread)
{
/* Increment recursion depth */
InterlockedIncrement(&Mutex->RecursionDepth);
return TRUE;
}
/* Try normal acquisition */
OldIrql = KeRaiseIrqlToSynchLevel();
if (InterlockedCompareExchange(&Mutex->Count, 1, 0) == 0)
{
Mutex->Owner = CurrentThread;
Mutex->OldIrql = OldIrql;
/* Set recursion depth to 1 for recursive mutexes */
if (Mutex->RecursionDepth == -1)
{
Mutex->RecursionDepth = 1;
}
return TRUE;
}
else
{
KeLowerIrql(OldIrql);
return FALSE;
}
}