/*
 * PROJECT: Alcyone System Kernel
 * LICENSE: BSD Clause 3
 * PURPOSE: Cache Controller:: Lazy Writer
 * NT KERNEL: 5.11.9360
 * COPYRIGHT:  2023-2029 Dibymartanda Samanta <>
 */ 
#include <ntoskrnl.h>
#define NTDEBUG
#include <debug.h>
#include "ccinternal.hpp"


extern "C"

/*Internal Function*/

VOID 
VECTORCALL
CcPostWorkQueue(IN PWORK_QUEUE_ENTRY WorkItem, 
                IN PLIST_ENTRY WorkQueue)
{
    PWORK_QUEUE_ITEM ThreadToSpawn = nullptr;
    PLIST_ENTRY ListEntry = nullptr; // be aware.
    KIRQL CurrentIrql = NULL;

    /* Aquire SpinLock & Insert the Worker in Queue*/
    CurrentIrql = KeAcquireQueuedSpinLock(LockQueueWorkQueueLock);
    InsertTailList(WorkQueue, &WorkItem->WorkQueueLinks);

    /* CHeck if new thread to be spawnwed */
    if (!CcQueueThrottle && !IsListEmpty(&CcIdleWorkerThreadList))
    {

        ListEntry = RemoveHeadList(&CcIdleWorkerThreadList);
        ThreadToSpawn = CONTAINING_RECORD(ListEntry, WORK_QUEUE_ITEM, List);

        /* Update the number of Active Worker Thread */
        _InterlockedIncrement(&CcNumberActiveWorkerThreads);
    }

    KeReleaseQueuedSpinLock(LockQueueWorkQueueLock, CurrentIrql);

    /* Check if thread left to spawn, disable it now since Spinlock is released */
    if (ThreadToSpawn)
    {
        DBGPRINT(" CcPostWorkQueue: Thread Left to be spwaned even after release of Spinlock!\n");
        ThreadToSpawn->List.Flink = nullptr;
        ExQueueWorkItem(ThreadToSpawn, CriticalWorkQueue);
    }
}
VOID 
NTAPI 
CcScheduleLazyWriteScanEx(IN BOOLEAN Delay_Status, IN BOOLEAN Teardowmn_Status)
{
    BOOLEAN Result;

    /* If their no delay , Start Lazy Writer Scan Immidietely */
    if (Delay_Status)
    {
        LazyWriter.Active = True;
        /* Check if Teardown is Active */
        if (Teardowmn_Status)
        {
            LazyWriter.PendingTeardown = True;
        }
        result = KeSetTimer(&LazyWriter.ScanTimer, CcNoDelay, &LazyWriter.ScanDpc);
    }
    /* If It is not running , Start it */
    else if (LazyWriter.ScanActive)
    {
        result = KeSetTimer(&LazyWriter.ScanTimer, CcIdleDelay, &LazyWriter.ScanDpc);
    }
    /* If it is already running , Queue for it */
    else
    {
        result = KeSetTimer(&LazyWriter.ScanTimer, CcFirstDelay, &LazyWriter.ScanDpc);
    }
    return result;
}

VOID 
NTAPI 
CcScheduleLazyWriteScan(IN BOOLEAN NoDelay)
{
    return CcScheduleLazyWriteScanEx(NoDelay, False);
}
VOID VECTORCALL CcScanDpc(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SysArg0, IN PVOID SysArg1)
{

    PLIST_ENTRY WorkQueue;
    PGENERAL_LOOKASIDE LookasideList;
    KIRQL CurrentIrql;

    /* GET Current PRCB and Assign work item*/
    PKPRCB Prcb = KeGetCurrentPrcb();
    LookasideList = Prcb->PPLookasideList[5].P;
    InterlockedIncrement(&LookasideList->TotalAllocates);
    PWORK_QUEUE_ENTRY WorkItem = static_cast<PWORK_QUEUE_ENTRY>(InterlockedPopEntrySList(&LookasideList->ListHead));

    InterlockedIncrement(&LookasideList->AllocateMisses);
    LookasideList = Prcb->PPLookasideList[5].L;
    InterlockedIncrement(&LookasideList->TotalAllocates);

    WorkItem = static_cast<PWORK_QUEUE_ENTRY>(InterlockedPopEntrySList(&LookasideList->ListHead);

	/* Assingning Work Item if it is null*/
	if (!WorkItem)
	{
        InterlockedIncrement(&LookasideList->AllocateMisses);
        WorkItem = static_cast<PWORK_QUEUE_ENTRY>(
            (LookasideList->Allocate(LookasideList->Type, LookasideList->Size, LookasideList->Tag)));

        if (WorkItem != nullptr)
        {
            DBGPRINT("CcScanDpc: WorkQueue is NULL, SECOND Assingment in Progress\n");
            InterlockedIncrement(&LookasideList->AllocateMisses);
            WorkItem = static_cast<PWORK_QUEUE_ENTRY>(
                LookasideList->Allocate(LookasideList->Type, LookasideList->Size, LookasideList->Tag));
        }
	}

	/* Release SpinLock if  WOrk Item Queue is not Assigned*/
	if (!WorkItem)
	{
        DBGRINT("CcScanDpc: WorkQueue is not assigned.!\n");
        CurrentIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
        /* Set Lazy Writer Scan to False */
        LazyWriter.ScanActive = FALSE;
        DBGRINT("CcScanDpc: Lazy Writer Scan is Disabled!\n");
        KeReleaseQueuedSpinLock(LockQueueMasterLock, CurrentIrql);
        return;
	}


	WorkItem->Function = LazyWriteScan;
	CurrentIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);

	/* Check for Lazy Writer Teardown Status*/

	if (LazyWriter.PendingTeardown)
	{
        /* Assign Worker Type*/
        WorkQueue = &CcFastTeardownWorkQueue;
        /*If Pending Teardown is active in the Queue , disable it now since Queue type is assigned*/
        LazyWriter.PendingTeardown = false;
	}

	else
	{
        WorkQueue = &CcRegularWorkQueue;
	}

	/* Release the Spinlock and Post the Lazy Write */
	KeReleaseQueuedSpinLock(LockQueueMasterLock, CurrentIrql);
	CcPostWorkQueue(WorkItem, WorkQueue);
}

LONG 
VECTORCALL 
CcExceptionFilter(IN NTSTATUS Status)
{
    LONG Result;
    /* Get the Flag Status */
    BOOL NTSTATUS_FLAG = static_cast<BOOL>(FsRtlIsNtstatusExpected(Status));
    if (NTSTATUS_FLAG)
        Result = EXCEPTION_EXECUTE_HANDLER;
    else
        Result = EXCEPTION_CONTINUE_SEARCH;

    DBGPRINT("CcExceptionFilter: Status %X\n", Status);
    return Result;
}
VOID 
VECTORCALL 
CcPerformReadAhead(IN PFILE_OBJECT FileObject)
{
    PETHREAD CurrentThread = PsGetCurrentThread();
    PSHARED_CACHE_MAP SharedCachMap;
    PPRIVATE_CACHE_MAP PrivateCachMap;
    PVACB Vacb = nullptr;
    BOOLEAN LengthIsZero = false;
    BOOLEAN ReadAheasStatus = false;
    BOOLEAN ReadAheadStatus = false;
    BOOLEAN ReadAheadFinishStatus = false;
    ULONG_PTR VirtualAdress = nullptr;
    LARGE_INTEGER FileOffset = {0};
    LARGE_INTEGER readAheadOffset[2];
    ULONG readAheadLength[2];
    ULONG IsPageNotResident = {0};
    ULONG ReceivedLength = {0};
    ULONG NumberOfPages = {0};
    ULONG Length = {0};
    ULONG OldReadClusterSize = {0};
    UCHAR OldForwardClusterOnly = {0};
    KIRQL LastIRQL = {0};

    /* GET Cluster History */
    OldForwardClusterOnly = CurrentThread->ForwardClusterOnly;
    OldReadClusterSize = CurrentThread->ReadClusterSize;

    SharedCachMap = FileObject->SectionObjectPointer->SharedCacheMap;

    _TRY
    {
        while (TRUE)
        {
            LastIRQL = KeAcquireQueuedSpinLock(LockQueueMasterLock);

            PrivateCachMap = FileObject->VirtualAdress;

            /* Check if it is a valid VirtualAdress Adress , if Not Reissue ReadAhead Offesets */
            if (PrivateCachMap != nullptr)
            {
                KeAcquireSpinLockAtDpcLevel(&PrivateCachMap->ReadAheadSpinLock);

                LengthIsZero = (!(PrivateCachMap->ReadAheadLength[0] | PrivateCachMap->ReadAheadLength[1]));

                readAheadOffset[0].QuadPart = PrivateCachMap->ReadAheadOffset[0].QuadPart;
                readAheadOffset[1].QuadPart = PrivateCachMap->ReadAheadOffset[1].QuadPart;

                readAheadLength[0] = PrivateCachMap->ReadAheadLength[0];
                readAheadLength[1] = PrivateCachMap->ReadAheadLength[1];

                PrivateCachMap->ReadAheadLength[0] = 0;
                PrivateCachMap->ReadAheadLength[1] = 0;

                KeReleaseSpinLockFromDpcLevel(&PrivateCachMap->ReadAheadSpinLock);
            }

            KeReleaseQueuedSpinLock(LockQueueMasterLock, LastIRQL);

            ReadAheadStatus = (*SharedCachMap->Callbacks->AcquireForReadAhead)(SharedCachMap->LazyWriteContext, TRUE);

            if (!PrivateCachMap || LengthIsZero || !ReadAheadStatus)
                break;

            for (auto i = 0; i <= 1; i++)
            {
                FileOffset = readAheadOffset[i];
                Length = readAheadLength[i];
                /* Check if Cache Can be read Ahead */
                if (Length && FileOffset.QuadPart <= SharedCachMap->FileSize.QuadPart)
                {
                    ReadAheasStatus = TRUE;

                    if (SharedCachMap->FileSize.QuadPart <= (FileOffset.QuadPart + (LONGLONG)Length))
                    {
                        Length = (SharedCachMap->FileSize.QuadPart - FileOffset.QuadPart);
                        ReadAheadFinishStatus = TRUE;
                    }

                    if (Length > 0x800000)
                        Length = 0x800000; // Make a Define and move to header

                    while (Length != NULL)
                    {
                        VirtualAdress = CcGetVirtualAddress(SharedCachMap, FileOffset, &Vacb, &ReceivedLength);

                        if (ReceivedLength > Length)
                            ReceivedLength = Length;

                        for (NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(VirtualAdress, ReceivedLength);
                             NumberOfPages; NumberOfPages--)
                        {
                            CurrentThread->ForwardClusterOnly = 1;

                            if (NumberOfPages > 0x10)
                                CurrentThread->ReadClusterSize = 0xF;
                            else
                                CurrentThread->ReadClusterSize = (NumberOfPages - 1);

                            IsPageNotResident |= !MmCheckCachedPageState(VirtualAdress, FALSE);

                            VirtualAdress = static_cast<LONG *>(VirtualAdress + PAGE_SIZE);
                        }
                        /*Update History */
                        FileOffset.QuadPart += ReceivedLength;
                        Length -= ReceivedLength;

                        CcFreeVirtualAddress(Vacb);
                        Vacb = nullptr;
                    }
                }
            }

            (*SharedCachMap->Callbacks->ReleaseFromReadAhead)(SharedCachMap->LazyWriteContext);

            ReadAheadStatus = FALSE;
        }
    }
    _FINALLY
    {

        /* Restore cluster variables */
        CurrentThread->ForwardClusterOnly = OldForwardClusterOnly;
        CurrentThread->ReadClusterSize = OldReadClusterSize;

        if (Vacb)
            CcFreeVirtualAddress(Vacb);
            /* If ReadAheadStatus is False , Release it from Write Context */
        if (ReadAheadStatus)
            (*SharedCachMap->Callbacks->ReleaseFromReadAhead)(SharedCachMap->LazyWriteContext);

        LastIRQL = KeAcquireQueuedSpinLock(LockQueueMasterLock);

        PrivateCachMap = FileObject->VirtualAdress;
        if (PrivateCachMap)
        {
            KeAcquireSpinLockAtDpcLevel(&PrivateCachMap->ReadAheadSpinLock);

            RtlInterlockedAndBits(&PrivateCachMap->UlongFlags, ~SHARE_FL_WAITING_TEARDOWN);

            if (ReadAheadFinishStatus && (FileObject->Flags & FO_SEQUENTIAL_ONLY))
                PrivateCachMap->ReadAheadOffset[1].QuadPart = 0;

            if (ReadAheasStatus && !IsPageNotResident)
                RtlInterlockedAndBits(&PrivateCachMap->UlongFlags, ~0x20000);

            KeReleaseSpinLockFromDpcLevel(&PrivateCachMap->ReadAheadSpinLock);
        }

        KeReleaseQueuedSpinLock(LockQueueMasterLock, LastIRQL);
        ObDereferenceObject(FileObject);
        LastIRQL = KeAcquireQueuedSpinLock(LockQueueMasterLock);

        _InterlockedDecrement(SharedCachMap->OpenCount);
        SharedCachMap->Flags &= ~SHARE_FL_READ_AHEAD;

        /* If a Invalid Cache Map */
        if (!SharedCachMap->OpenCount && !(SharedCachMap->Flags & SHARE_FL_WRITE_QUEUED) && !SharedCachMap->DirtyPages)
        {
            RemoveEntryList(&SharedCachMap->SharedCacheMapLinks);
            InsertTailList(&CcDirtySharedCacheMapList.SharedCacheMapLinks, &SharedCachMap->SharedCacheMapLinks);

            LazyWriter.OtherWork = TRUE;

            if (!LazyWriter.ScanActive)
                CcScheduleLazyWriteScan(FALSE);
        }

        KeReleaseQueuedSpinLock(LockQueueMasterLock, LastIRQL);
    }
}
VOID 
NTAPI 
CcPostDeferredWrites(VOID)
{
    PDEFERRED_WRITE DeferredWrites = nullptr;
    PLIST_ENTRY Entry = nullptr;
    ULONG BytesToWrite = {0};
    KIRQL OldIrql = {0};

    while (TRUE)
    {

        /* Acquire the spin lock to protect concurrent access to shared data structures */
        KeAcquireSpinLock(&CcDeferredWriteSpinLock, &OldIrql);

        if (!IsListEmpty(&CcDeferredWrites))
        {
            Entry = CcDeferredWrites.Flink;
            while (true)
            {
                DeferredWrites = CONTAINING_RECORD(Entry, DEFERRED_WRITE, DeferredWriteLinks);

                /* Calculate the total bytes to write */
                BytesToWrite += DeferredWrites->BytesToWrite;

                /*  BytesToWrite Must not overflow Defferwrite Limits */
                if (BytesToWrite < DeferredWrites->BytesToWrite)
                {
                    DeferredWrites = nullptr;
                    break;
                }

                /*  if writing to the file object is possible */
                if (CcCanIWrite(DeferredWrites->FileObject, BytesToWrite, FALSE, 0xFE))
                {
                    /*Remove the entry from the deferred writes list if writing is possible */
                    RemoveEntryList(&DeferredWrites->DeferredWriteLinks);
                    break;
                }

                /* Check if the limit of modified pages has been reached */
                if (!DeferredWrites->LimitModifiedPages)
                {
                    DeferredWrites = nullptr;
                    break;
                }

                /* Decrement the bytes to write and reset DeferredWrites*/
                BytesToWrite -= DeferredWrites->BytesToWrite;
                DeferredWrites = nullptr;

                /* Move to the next entry in the list */
                Entry = Entry->Flink;
            }

            /* Release the spin lock */
            KeReleaseSpinLock(&CcDeferredWriteSpinLock, OldIrql);

            /* If Defferwrite List is null get out of loop */
            if (!DeferredWrites)
                break;

            if (DeferredWrites->Event)
            {
                KeSetEvent(DeferredWrites->Event, IO_NO_INCREMENT, FALSE);
                continue;
            }

            /*execute the post routine, and free memory */

            DeferredWrites->PostRoutine(DeferredWrites->Context1, DeferredWrites->Context2);
            ExFreePoolWithTag(DeferredWrites, 0);
        }
    }
}

NTSTATUS 
NTAPI 
CcSetValidData(IN PFILE_OBJECT FileObject, 
               IN PLARGE_INTEGER FileSize)
{
        FILE_END_OF_FILE_INFORMATION FileInfo = {0};
        PDEVICE_OBJECT DeviceObject = nullptr;           
        PIO_STACK_LOCATION IoStack = nullptr;            
        IO_STATUS_BLOCK IoStatus = nullptr;              
        KEVENT Event;                          
        PIRP Irp;                              
        NTSTATUS Status;                     

        /*Initialize a kernel event object for synchronization */
        KeInitializeEvent(&Event, NotificationEvent, FALSE);

        /* Retrieve the device object associated with the file*/
        DeviceObject = IoGetRelatedDeviceObject(FileObject);

        /* Initialize FileInfo structure with the new file size */
        FileInfo.EndOfFile.QuadPart = FileSize->QuadPart;

        /* Allocate an I/O request packet (IRP) */
        Irp = IoAllocateIrp(DeviceObject->StackSize, 0);
        if (!Irp)
        {
            return STATUS_INSUFFICIENT_RESOURCES;
        }

        /* Set IRP parameters */
        Irp->AssociatedIrp.SystemBuffer = &FileInfo;
        Irp->UserIosb = &IoStatus;
        Irp->UserEvent = &Event;
        Irp->Flags = (IRP_SYNCHRONOUS_PAGING_IO | IRP_PAGING_IO);
        Irp->RequestorMode = KernelMode;
        Irp->Tail.Overlay.Thread = PsGetCurrentThread();
        Irp->Tail.Overlay.OriginalFileObject = FileObject;

        /* Get the next IRP stack location and set up IRP parameters */
        IoStack = IoGetNextIrpStackLocation(Irp);
        IoStack->MajorFunction = IRP_MJ_SET_INFORMATION;
        IoStack->FileObject = FileObject;
        IoStack->DeviceObject = DeviceObject;
        IoStack->Parameters.SetFile.Length = sizeof(FILE_END_OF_FILE_INFORMATION);
        IoStack->Parameters.SetFile.FileInformationClass = FileEndOfFileInformation;
        IoStack->Parameters.SetFile.FileObject = nullptr;
        IoStack->Parameters.SetFile.AdvanceOnly = 1;

        /* Call the device driver to handle the IRP */
        Status = IoCallDriver(DeviceObject, Irp);

        /*If the operation is pending, wait for it to complete*/
        if (Status == STATUS_PENDING)
            KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, nullptr);

        /* Handle completion and error handling */
        if (!NT_SUCCESS(Status))
        {
            IoStatus.Status = Status;
        }

        /* Return the IOstatus of the operation */
        return IoStatus.Status;
}
VOID 
VECTORCALL 
CcWriteBehind(IN PSHARED_CACHE_MAP SharedMap, 
              IN IO_STATUS_BLOCK * OutIoStatus)
{
        KLOCK_QUEUE_HANDLE LockHandle; 
        LARGE_INTEGER validDataLength = {0}; 
        ULONG ActivePage = {0};              
        ULONG TargetPages = {0} ;             
        KIRQL OldIrql = {0};                 
        BOOLEAN IsVacbLocked = false;  
        BOOLEAN IsCancelWait = false;  
        NTSTATUS Status;               

        PVACB ActiveVacb = nullptr; 
        PUNICODE_STRING FileName = nullptr;   

        /*Acquire lazy writer's lock for lazy write*/
        if (!(*SharedMap->Callbacks->AcquireForLazyWrite)(SharedMap->LazyWriteContext, TRUE))
        {
            /* If acquisition fails, release LockQueueMasterLock and set STATUS_FILE_LOCK_CONFLICT */
            OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
            SharedMap->Flags &= ~0x20; // Clear the flag indicating wait for Lazy Writer
            KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);

            OutIoStatus->Status = STATUS_FILE_LOCK_CONFLICT;
            return;
        }

        /* Acquire BcbSpinLock and LockQueueMasterLock */
        KeAcquireInStackQueuedSpinLock(&SharedMap->BcbSpinLock, &LockHandle);
        KeAcquireQueuedSpinLockAtDpcLevel(&KeGetCurrentPrcb()->LockQueue[LockQueueMasterLock]);

        /*Determine if there are few dirty pages or no open references */
        if (SharedMap->DirtyPages <= 1 || !SharedMap->OpenCount)
        {
            /*Acquire ActiveVacbSpinLock to handle ActiveVacb*/
            KeAcquireSpinLockAtDpcLevel(&SharedMap->ActiveVacbSpinLock);

            /*Retrieve ActiveVacb and related information if it exists*/
            ActiveVacb = SharedMap->ActiveVacb;
            if (ActiveVacb)
            {
                ActivePage = SharedMap->ActivePage;
                SharedMap->ActiveVacb = 0;
                IsVacbLocked = ((SharedMap->Flags & SHARE_FL_VACB_LOCKED) != 0);
            }

            /*Release ActiveVacbSpinLock*/
            KeReleaseSpinLockFromDpcLevel(&SharedMap->ActiveVacbSpinLock);
        }

        /*Increment OpenCount*/
        _InterlockedIncrement(SharedMap->OpenCount);

        /* Update Mbcb information if it exists*/
        if (SharedMap->Mbcb)
        {
            TargetPages = SharedMap->Mbcb->DirtyPages;

            if (ActiveVacb)
                _InterlockedIncrement(TargetPages);

            /*Determine PagesToWrite based on CcPagesYetToWrite and TargetPages*/
            if (TargetPages > CcPagesYetToWrite)
                SharedMap->Mbcb->PagesToWrite = CcPagesYetToWrite;
            else
                SharedMap->Mbcb->PagesToWrite = TargetPages;
        }

        /*Release LockQueueMasterLock and BcbSpinLock*/
        KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->LockQueue[LockQueueMasterLock]);
        KeReleaseInStackQueuedSpinLock(&LockHandle);

        /* Free ActiveVacb if it exists*/
        if (ActiveVacb)
            CcFreeActiveVacb(SharedMap, ActiveVacb, ActivePage, IsVacbLocked);

        /* Flush cache to disk*/
        CcFlushCache(SharedMap->FileObject->SectionObjectPointer, &CcNoDelay, 1, OutIoStatus);

        /*Release lazy writer's lock*/
        (*SharedMap->Callbacks->ReleaseFromLazyWrite)(SharedMap->LazyWriteContext);

        /* Check if Status Verification Failed */
        if (!NT_SUCCESS(OutIoStatus->Status) && OutIoStatus->Status != STATUS_VERIFY_REQUIRED)
            IsCancelWait = TRUE;

        if (!NT_SUCCESS(OutIoStatus->Status) && OutIoStatus->Status != STATUS_VERIFY_REQUIRED &&
            OutIoStatus->Status != STATUS_FILE_LOCK_CONFLICT &&
            OutIoStatus->Status != STATUS_ENCOUNTERED_WRITE_IN_PROGRESS)
        {
            /* Prepare for error notification and log*/
            POBJECT_NAME_INFORMATION FileNameInfo = nullptr;
            NTSTATUS status;

            /*Retrieve IOSTATUS */
            status = IoQueryFileDosDeviceName(SharedMap->FileObject, &FileNameInfo);

            if (status = STATUS_SUCCESS)
            {
                IoRaiseInformationalHardError(STATUS_LOST_WRITEBEHIND_DATA, &FileNameInfo->Name, nullptr);
            }
            else
            {
                FileName = &SharedMap->FileObject->FileName;

                if (FileName->Length && FileName->MaximumLength && FileName->Buffer)
                    IoRaiseInformationalHardError(STATUS_LOST_WRITEBEHIND_DATA, FileName, nullptr);
            }

            if (FileNameInfo)
                ExFreePoolWithTag(FileNameInfo, 0);
        }
        else if (!IsListEmpty(&CcDeferredWrites))
        {
            /*If deferred writes exist, process them */
            CcPostDeferredWrites();
        }
       
        /*Acquire BcbSpinLock */
        KeAcquireInStackQueuedSpinLock(&SharedMap->BcbSpinLock, &LockHandle);

        /* Update ValidDataLength if necessary*/
        Status = STATUS_SUCCESS;

        if (SharedMap->Flags & (0x400 | 0x8000))
        {
            if (SharedMap->ValidDataLength.QuadPart <= SharedMap->ValidDataGoal.QuadPart &&
                SharedMap->ValidDataLength.QuadPart != 0x7FFFFFFFFFFFFFFF && SharedMap->FileSize.QuadPart)
            {
                /* Get flushed valid data and set ValidDataLength if necessary */
                validDataLength = CcGetFlushedValidData(SharedMap->FileObject->SectionObjectPointer, TRUE);

                if (validDataLength.QuadPart >= SharedMap->ValidDataLength.QuadPart)
                {
                    KeReleaseInStackQueuedSpinLock(&LockHandle);
                    Status = CcSetValidData(SharedMap->FileObject, &validDataLength);
                    KeAcquireInStackQueuedSpinLock(&SharedMap->BcbSpinLock, &LockHandle);

                    if (NT_SUCCESS(Status))
                        SharedMap->ValidDataLength = validDataLength;
                }
            }
        }

        /* Release BcbSpinLock*/
        KeReleaseInStackQueuedSpinLock(&LockHandle);

        /* Perform cleanup tasks*/
        OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
        InterlockedDecrement(SharedMap->OpenCount);
        LockHandle.OldIrql = OldIrql;

        /* Check if there are any remaining open references*/
        if (SharedMap->OpenCount)
            goto Exit;

        /*Cleanup if no remaining open references*/
        if (NT_SUCCESS(Status) ||
            (Status != STATUS_INSUFFICIENT_RESOURCES && Status != STATUS_VERIFY_REQUIRED &&
             Status != STATUS_FILE_LOCK_CONFLICT && Status != STATUS_ENCOUNTERED_WRITE_IN_PROGRESS))
        {
            /* Release LockQueueMasterLock, acquire file object lock, and recheck open count*/
            KeReleaseQueuedSpinLock(LockQueueMasterLock, LockHandle.OldIrql);
            FsRtlAcquireFileExclusive(SharedMap->FileObject);

            LockHandle.OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);

            /*Delete SharedCacheMap if no open references remain*/
            if (!SharedMap->OpenCount)
            {
                if (!SharedMap->DirtyPages ||
                    (!SharedMap->FileSize.QuadPart && !(SharedMap->Flags & SHARE_FL_PIN_ACCESS)))
                {
                    CcDeleteSharedCacheMap(SharedMap, LockHandle.OldIrql, TRUE);
                    OutIoStatus->Information = 0;
                    return;
                }
            }

            /*Release LockQueueMasterLock and file object lock */
            KeReleaseQueuedSpinLock(LockQueueMasterLock, LockHandle.OldIrql);
            FsRtlReleaseFile(SharedMap->FileObject);
            LockHandle.OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
        }

    Exit:

        /*Clear SHARE_FL_WAITING_FLUSH flag if necessary*/
        if (OutIoStatus->Information != 0x8A5E)
            SharedMap->Flags &= ~0x20;

        /* Release LockQueueMasterLock*/
        KeReleaseQueuedSpinLock(LockQueueMasterLock, LockHandle.OldIrql);
}
VOID 
NTAPI CcLazyWriteScan()
{
        PSHARED_CACHE_MAP FirstMap = nullptr;
        PSHARED_CACHE_MAP SharedMap = nullptr;
        PGENERAL_LOOKASIDE LookasideList = nullptr;
        PWORK_QUEUE_ENTRY WorkItem = nullptr;
        PLIST_ENTRY ListEntry = nullptr;
        PLIST_ENTRY MapLinks = nullptr;
        PKPRCB Prcb = nullptr;
        LIST_ENTRY PostWorkList ={0};
        ULONG TargetPages ={0};
        ULONG NextTargetPages;
        ULONG DirtyPages;
        ULONG counter = 0;
        BOOLEAN IsNoPagesToWrite = FALSE;
        BOOLEAN IsDoubleScan = FALSE;
        KIRQL OldIrql;

        OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);

        // Check if there is no dirty page and no other work pending
        if (!CcTotalDirtyPages && !LazyWriter.OtherWork)
        {
            // Check if there are deferred writes pending
            if (!IsListEmpty(&CcDeferredWrites))
            {
                KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
                CcPostDeferredWrites();
                CcScheduleLazyWriteScan(FALSE);
            }
            else
            {
                // No deferred writes pending, mark lazy write scan as inactive
                LazyWriter.ScanActive = 0;
                KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
            }

            return;
        }

        InitializeListHead(&PostWorkList);

        // Move items from tick work queue to post work list
        while (!IsListEmpty(&CcPostTickWorkQueue))
        {
            ListEntry = RemoveHeadList(&CcPostTickWorkQueue);
            InsertTailList(&PostWorkList, ListEntry);
        }

        LazyWriter.OtherWork = FALSE;

        // Calculate target pages based on dirty pages count
        if (CcTotalDirtyPages > 8)
            TargetPages = (CcTotalDirtyPages / 8);
        else
            TargetPages = CcTotalDirtyPages;

        DirtyPages = (CcTotalDirtyPages + CcPagesWrittenLastTime);

        // Calculate next target pages for scanning
        if (CcDirtyPagesLastScan < DirtyPages)
            NextTargetPages = (DirtyPages - CcDirtyPagesLastScan);
        else
            NextTargetPages = 0;

        NextTargetPages += (CcTotalDirtyPages - TargetPages);

        if (NextTargetPages > CcDirtyPageTarget)
            TargetPages += (NextTargetPages - CcDirtyPageTarget);

        CcDirtyPagesLastScan = CcTotalDirtyPages;
        CcPagesWrittenLastTime = TargetPages;
        CcPagesYetToWrite = TargetPages;

        // Get the first shared cache map to start the scan
        SharedMap =
            CONTAINING_RECORD(CcLazyWriterCursor.SharedCacheMapLinks.Flink, SHARED_CACHE_MAP, SharedCacheMapLinks);

        while (SharedMap != FirstMap)
        {
            MapLinks = &SharedMap->SharedCacheMapLinks;

            if (MapLinks == &CcLazyWriterCursor.SharedCacheMapLinks)
                break;

            if (!FirstMap)
                FirstMap = SharedMap;

            // Check if we need to skip to the next map
            if (IsGoToNextMap(SharedMap, TargetPages))
            {
                counter++;
                if (counter >= 20 && !(SharedMap->Flags & (0x20 | 0x800)))
                {
                    SharedMap->DirtyPages++;
                    SharedMap->Flags |= 0x20;
                    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);

                    counter = 0;

                    OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
                    SharedMap->Flags &= ~0x20;
                    SharedMap->DirtyPages--;
                }

                goto NextMap;
            }

            SharedMap->PagesToWrite = SharedMap->DirtyPages;

            // Adjust pages to write based on certain conditions
            if ((SharedMap->Flags & SHARE_FL_MODIFIED_NO_WRITE) && SharedMap->DirtyPages >= 0x40 &&
                CcCapturedSystemSize != MmSmallSystem)
            {
                SharedMap->PagesToWrite /= 8;
            }

            if (!IsNoPagesToWrite)
            {
                if (TargetPages > SharedMap->PagesToWrite)
                {
                    TargetPages -= SharedMap->PagesToWrite;
                }
                else if ((SharedMap->Flags & SHARE_FL_MODIFIED_NO_WRITE) ||
                         (FirstMap == SharedMap && !(SharedMap->LazyWritePassCount & 0xF)))
                {
                    TargetPages = 0;
                    IsNoPagesToWrite = TRUE;

                    IsDoubleScan = TRUE;
                }
                else
                {
                    RemoveEntryList(&CcLazyWriterCursor.SharedCacheMapLinks);
                    InsertTailList(MapLinks, &CcLazyWriterCursor.SharedCacheMapLinks);

                    TargetPages = 0;
                    IsNoPagesToWrite = TRUE;
                }
            }

            SharedMap->Flags |= 0x20;
            SharedMap->DirtyPages++;

            KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);

            // Acquire a work item for writing behind
            Prcb = KeGetCurrentPrcb();
            LookasideList = Prcb->PPLookasideList[5].P;
            LookasideList->TotalAllocates++;
            WorkItem = (PWORK_QUEUE_ENTRY)InterlockedPopEntrySList(&LookasideList->ListHead);

            if (!WorkItem)
            {
                LookasideList->AllocateMisses++;
                LookasideList = Prcb->PPLookasideList[5].L;
                LookasideList->TotalAllocates++;
                WorkItem = (PWORK_QUEUE_ENTRY)InterlockedPopEntrySList(&LookasideList->ListHead);

                if (!WorkItem)
                {
                    LookasideList->AllocateMisses++;
                    WorkItem = LookasideList->Allocate(LookasideList->Type, LookasideList->Size, LookasideList->Tag);
                }
            }

            // Check if work item is available
            if (!WorkItem)
            {
                OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
                SharedMap->Flags &= ~0x20;
                SharedMap->DirtyPages--;
                break;
            }

            // Initialize work item for write behind function
            WorkItem->Function = WriteBehind;
            WorkItem->Parameters.Write.SharedCacheMap = SharedMap;

            OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);

            SharedMap->DirtyPages--;

            // Post work item to appropriate queue
            if (SharedMap->Flags & SHARE_FL_WAITING_TEARDOWN)
            {
                SharedMap->WriteBehindWorkQueueEntry = (PVOID)((ULONG_PTR)WorkItem | 1);
                CcPostWorkQueue(WorkItem, &CcFastTeardownWorkQueue);
            }
            else
            {
                SharedMap->WriteBehindWorkQueueEntry = WorkItem;
                CcPostWorkQueue(WorkItem, &CcRegularWorkQueue);
            }

            counter = 0;

        NextMap:

            SharedMap = CONTAINING_RECORD(MapLinks->Flink, SHARED_CACHE_MAP, SharedCacheMapLinks);

            if (IsDoubleScan)
            {
                RemoveEntryList(&CcLazyWriterCursor.SharedCacheMapLinks);
                InsertHeadList(MapLinks, &CcLazyWriterCursor.SharedCacheMapLinks);

                IsDoubleScan = FALSE;
            }
        }

        // Post remaining work items to regular work queue
        while (!IsListEmpty(&PostWorkList))
        {
            PWORK_QUEUE_ENTRY workItem;
            PLIST_ENTRY entry;

            entry = RemoveHeadList(&PostWorkList);
            workItem = CONTAINING_RECORD(entry, WORK_QUEUE_ENTRY, WorkQueueLinks);

            CcPostWorkQueue(workItem, &CcRegularWorkQueue);
        }
        //   CcComputeNextScanTime(&OldestLWSTimeStamp, &NextScanDelay);  Enable When Threadpool is finished 

       // if (!IsListEmpty(&PostWorkList) || !IsListEmpty(&CcDeferredWrites) || MmRegistryStatus.ProductStatus ||NextScanDelay.QuadPart != 0x7FFFFFFFFFFFFFFF))
       if (!IsListEmpty(&PostWorkList) || !IsListEmpty(&CcDeferredWrites) || MmRegistryStatus.ProductStatus))
            {
                /* Schedule a lazy write scan */
                CcRescheduleLazyWriteScan(&NextScanDelay);
                /* If forced disable is set, clear it */
                if (CcForcedDisableLazywriteScan)
                {
                    CcForcedDisableLazywriteScan = false;
                }
            }
        else
        {
            /* Set forced disable and deactivate the scan */
            CcForcedDisableLazywriteScan = true;
            LazyWriter.ScanActive = false;
        }
    }

    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);

    // Check if there are deferred writes pending
    if (!IsListEmpty(&CcDeferredWrites))
    {
        CcPostDeferredWrites();
        CcScheduleLazyWriteScan(FALSE);
    }
}
NTSTATUS CcWaitForCurrentLazyWriterActivity()
{
    NTSTATUS result;
    PWORK_QUEUE_ENTRY WorkQueueEntry = nullptr;
    KEVENT Event = {0};
    KIRQL irql = {0};

    result = CcAllocateWorkQueueEntry(&WorkQueueEntry);
    if (NT_SUCCESS(result))
    {
        WorkQueueEntry->Function = SetDone;
        KeInitializeEvent(&Event, NotificationEvent, FALSE);
        WorkQueueEntry->Parameters.Notification.Reason = (ULONG_PTR)&Event;

        if ((PerfGlobalGroupMask.Masks[4] & 0x20000) != 0)
            CcPerfLogWorkItemEnqueue(&CcPostTickWorkQueue, WorkQueueEntry, 0, 0);

        irql = KeAcquireQueuedSpinLock(LockQueueMasterLock);

        WorkQueueEntry->WorkQueueLinks.Flink = &CcPostTickWorkQueue;
        WorkQueueEntry->WorkQueueLinks.Blink = CcPostTickWorkQueue.Blink;
        CcPostTickWorkQueue.Blink->Flink = &WorkQueueEntry->WorkQueueLinks;
        CcPostTickWorkQueue.Blink = &WorkQueueEntry->WorkQueueLinks;

        LazyWriter.OtherWork = 1;
        _InterlockedIncrement(&CcPostTickWorkItemCount);

        CcScheduleLazyWriteScan(1, 1);

        KeReleaseQueuedSpinLock(LockQueueMasterLock, irql);

        result = KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);

        _InterlockedDecrement(&CcPostTickWorkItemCount);
    }

    return result;
}
VOID VECTORCALL CcReEngageWorkerThreads(
    ULONG NormalThreadsToActivate, 
    ULONG ExtraWriteBehindThreadsToActivate
)
{
    ULONG i;
    PLIST_ENTRY currentThreadEntry;
    PLIST_ENTRY nextThreadEntry;
	ULONG j;
    PLIST_ENTRY currentExtraThreadEntry;
    PLIST_ENTRY nextExtraThreadEntry;

    /*Activate the required number of normal worker threads*/
    for (i = 0; i < NormalThreadsToActivate; ++i)
    {
        
        currentThreadEntry = CcIdleWorkerThreadList.Flink;
     
        if (IsListEmpty(&CcIdleWorkerThreadList))
            break;
        nextThreadEntry = currentThreadEntry->Flink;

        /*Check if List is corrupt*/
        if (currentThreadEntry->Blink != &CcIdleWorkerThreadList || 
            nextThreadEntry->Blink != currentThreadEntry)
        {
            __fastfail(3u);
        }

        /* Move to the next thread in the idle list*/
        ++CcNumberActiveWorkerThreads;
        CcIdleWorkerThreadList.Flink = nextThreadEntry;
        nextThreadEntry->Blink = &CcIdleWorkerThreadList;
        /* Detach the current = thread from the list and schedule it for work*/
        currentThreadEntry->Flink = NULL;
        ExQueueWorkItem(currentThreadEntry, CriticalWorkQueue);
    }

    

    for (j = 0; j < ExtraWriteBehindThreadsToActivate; ++j)
    {
        /*first thread in the idle extra write-behind list*/
        currentExtraThreadEntry = CcIdleExtraWriteBehindThreadList.Flink;
        
        
        if (IsListEmpty(&CcIdleExtraWriteBehindThreadList))
            break;
        
        nextExtraThreadEntry = currentExtraThreadEntry->Flink;

        /*Consistency check to prevent corrupt list operations*/
        if (currentExtraThreadEntry->Blink != &CcIdleExtraWriteBehindThreadList || 
            nextExtraThreadEntry->Blink != currentExtraThreadEntry)
        {
            __fastfail(3u);
        }

        /* Move to the next thread in the idle extra list*/
        ++CcActiveExtraWriteBehindThreads;
        CcIdleExtraWriteBehindThreadList.Flink = nextExtraThreadEntry;
        nextExtraThreadEntry->Blink = &CcIdleExtraWriteBehindThreadList;
        
        /* Detach the current extra thread from the list and schedule it for work*/
        currentExtraThreadEntry->Flink = NULL;
        ExQueueWorkItem(currentExtraThreadEntry, CriticalWorkQueue);
    }
}
VOID 
NTAPI 
CcWorkerThread(PVOID Parameter)
{
    PWORK_QUEUE_ITEM WorkItem = static_cast<PWORK_QUEUE_ITEM>(Parameter);
    PGENERAL_LOOKASIDE LookasideList = nullptr;
    PSHARED_CACHE_MAP SharedMap = nullptr;
    PWORK_QUEUE_ENTRY WorkEntry = nullptr;
    PLIST_ENTRY Entry = nullptr;
    PKPRCB Prcb = nullptr;
    IO_STATUS_BLOCK IoStatus = {0};
    KIRQL OldIrql = PASSIVE_LEVEL;
    BOOLEAN DropThrottle = FALSE;
    BOOLEAN WritePerformed = FALSE;

    DPRINT("CcWorkerThread: WorkItem");

    

    /* Loop till we have jobs */
    while (TRUE)
    {
        /* Lock queues */
        OldIrql = KeAcquireQueuedSpinLock(LockQueueWorkQueueLock);

        /* If we have to touch throttle, reset it now! */
        if (DropThrottle)
        {
            CcQueueThrottle = FALSE;
            DropThrottle = FALSE;
           //  CcReEngageWorkerThreads(CcThreadsActiveBeforeThrottle, CcExtraThreadsActiveBeforeThrottle);  Enable When Threadpool is ready 
        }

        if (IoStatus.Information == 0x8A5E)
        {
            ASSERT(Entry);

            if (WorkEntry->Function == WriteBehind)
            {
                SharedMap = WorkEntry->Parameters.Write.SharedCacheMap;
                ASSERT(Entry != &CcFastTeardownWorkQueue);
                SharedMap->WriteBehindWorkQueueEntry = WorkEntry;
            }

            InsertTailList(Entry, &WorkEntry->WorkQueueLinks);
            IoStatus.Information = 0;
        }

        /* Check if we have write to do */
        if (!IsListEmpty(&CcFastTeardownWorkQueue))
        {
            Entry = &CcFastTeardownWorkQueue;
            WorkEntry = CONTAINING_RECORD(Entry->Flink, WORK_QUEUE_ENTRY, WorkQueueLinks);

            ASSERT((WorkEntry->Function == LazyWriteScan) || (WorkEntry->Function == WriteBehind));
        }
        /* If not, check read queues */
        else if (!IsListEmpty(&CcExpressWorkQueue))
        {
            Entry = &CcExpressWorkQueue;
        }
        else if (!IsListEmpty(&CcRegularWorkQueue))
        {
            Entry = &CcRegularWorkQueue;
        }
        else
        {
            break;
        }

        
        WorkEntry = CONTAINING_RECORD(Entry->Flink, WORK_QUEUE_ENTRY, WorkQueueLinks);

        if (WorkEntry->Function == SetDone && CcNumberActiveWorkerThreads > 1)
        {
            CcQueueThrottle = TRUE;
            break;
        }

        if (WorkEntry->Function == WriteBehind)
            WorkEntry->Parameters.Write.SharedCacheMap->WriteBehindWorkQueueEntry = NULL;

        /* Remove current entry */
        RemoveHeadList(Entry);

        KeReleaseQueuedSpinLock(LockQueueWorkQueueLock, OldIrql);

        __try
        {
            switch (WorkEntry->Function)
            {
            case ReadAhead: {
                CcPerformReadAhead(WorkEntry->Parameters.Read.FileObject);
                break;
            }
            case WriteBehind: {
                WritePerformed = TRUE;
                PsGetCurrentThread()->MemoryMaker = 1;

                CcWriteBehind(WorkEntry->Parameters.Write.SharedCacheMap, &IoStatus);

                if (!NT_SUCCESS(IoStatus.Status))
                    WritePerformed = FALSE;

                PsGetCurrentThread()->MemoryMaker = 0;
                break;
            }
            case LazyWriteScan: {
                CcLazyWriteScan();
                break;
            }
            case SetDone: {
                KeSetEvent(WorkEntry->Parameters.Event.Event, IO_NO_INCREMENT, FALSE);
                DropThrottle = TRUE;
                break;
            }
            }
        }
        __except (CcExceptionFilter(GetExceptionCode()))
        {
            if (WorkEntry->Function == WriteBehind)
                PsGetCurrentThread()->MemoryMaker = 0;
        }

        if (IoStatus.Information == 0x8A5E)
            continue;


        LookasideList = Prcb->PPLookasideList[5].P;
        InterlockedIncrement(&LookasideList->TotalFrees); // Use interlocked increment

        if (LookasideList->ListHead.Depth < LookasideList->Depth)
        {
            InterlockedPushEntrySList(&LookasideList->ListHead, (PSINGLE_LIST_ENTRY)WorkEntry);
            continue;
        }

        if (LookasideList->ListHead.Depth < LookasideList->Depth)
        {
            InterlockedPushEntrySList(&LookasideList->ListHead, (PSINGLE_LIST_ENTRY)WorkEntry);
            continue;
        }
        InterlockedIncrement(&LookasideList->FreeMisses); // Use interlocked increment

        LookasideList = Prcb->PPLookasideList[5].L;
        InterlockedIncrement(&LookasideList->TotalFrees);

        if (LookasideList->ListHead.Depth < LookasideList->Depth)
        {
            InterlockedPushEntrySList(&LookasideList->ListHead, (PSINGLE_LIST_ENTRY)WorkEntry);
            continue;
        }

        InterlockedIncrement(&LookasideList->FreeMisses);
        LookasideList->Free(WorkEntry);
    }
    /* UNlock QUes*/
    InsertTailList(&CcIdleWorkerThreadList, &WorkItem->List);

    CcNumberActiveWorkerThreads--;

    /* Unlock queues */
    KeReleaseQueuedSpinLock(LockQueueWorkQueueLock, OldIrql);

    
    if (!IsListEmpty(&CcDeferredWrites) && CcTotalDirtyPages >= 20)
    {
        
        if (WritePerformed)
            CcLazyWriteScan();
    }
}

VECTORCALL 
CcAllocateWorkQueueEntry(PWORK_QUEUE_ENTRY &workQueueEntry)
{
    PKPRCB Prcb = KeGetCurrentPrcb();
    PGENERAL_LOOKASIDE LookasideList = nullptr;
    PWORK_QUEUE_ENTRY WorkItem = nullptr;
    KEVENT Event = NULL;
    KIRQL OldIrql = NULL;
    NTSTATUS Status = NULL;

    /* Allocate a work item */
    LookasideList = Prcb->PPLookasideList[6].P;
    _InterlockedIncrement(&LookasideList->TotalAllocates);

    WorkItem = (PWORK_QUEUE_ENTRY)InterlockedPopEntrySList(&LookasideList->ListHead);
    if (!WorkItem)
    {
        LookasideList->AllocateMisses++;
        LookasideList = Prcb->PPLookasideList[5].L;
        LookasideList->TotalAllocates++;

        WorkItem = (PWORK_QUEUE_ENTRY)InterlockedPopEntrySList(&LookasideList->ListHead);
        if (!WorkItem)
        {
            LookasideList->AllocateMisses++;
            WorkItem = (PWORK_QUEUE_ENTRY)LookasideList->Allocate(LookasideList->Type, LookasideList->Size,
                                                                  LookasideList->Tag);
            WorkItem->WorkQueueLinks.Flink = Prcb->Number;
        }
    }

    if (!WorkItem)
    {
        DBGPRINT("CcAllocateWorkQueueEntry: STATUS_INSUFFICIENT_RESOURCES\n");
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    workQueueEntry = WorkItem;
}
/* Exported Function */

NTSTATUS CcWaitForCurrentLazyWriterActivity()
{
    NTSTATUS status;                 
    PWORK_QUEUE_ENTRY workQueueEntry = nullptr; 
    PLIST_ENTRY blink = nullptr;
    KIRQL irql = PASSIVE_LEVEL;
    KEVENT event = {0};

    /* Allocate a work queue entry*/
    status = CcAllocateWorkQueueEntry(&workQueueEntry);
    if (NT_SUCCESS(status)) // Check if the status is a success
    {
        /* Set the function of the work queue entry*/
        workQueueEntry->Function = SetDone;
        /* Initialize the event object*/
        KeInitializeEvent(&event, NotificationEvent, 0);
        /*Set the reason for the notification*/
        workQueueEntry->Parameters.Notification.Reason = &event;

        /* Acquire the queued spin lock*/
        irql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
        blink = CcPostTickWorkQueue.Blink;

        /*Enqueue the work item*/
        workQueueEntry->WorkQueueLinks.Flink = &CcPostTickWorkQueue;
        workQueueEntry->WorkQueueLinks.Blink = blink;
        blink->Flink = &workQueueEntry->WorkQueueLinks;
        CcPostTickWorkQueue.Blink = &workQueueEntry->WorkQueueLinks;

        /*Set the other work flag*/
        LazyWriter.OtherWork = 1;
        /*Increment the work item count*/
        _InterlockedIncrement(&CcPostTickWorkItemCount);
        /* Schedule Lazy Write Scan */
        CcScheduleLazyWriteScan(true);
        /*Release the queued spin lock*/
        KeReleaseQueuedSpinLock(LockQueueMasterLock, irql);

        /*Wait for the single object*/
        status = KeWaitForSingleObject(&event, Executive, NULL, NULL, NULL);
        // Decrement the work item count
        _InterlockedDecrement(&CcPostTickWorkItemCount);
    }

    /*Return the status of the operation*/
    return status;
}