[NTOSKRNL:CC] Implementation of Lazy Writer

Implemented following function
*CcPostWorkQueue
*CcScheduleLazyWriteScanEx
*CcScheduleLazyWriteScan
*CcExceptionFilter
*CcPerformReadAhead
*CcPostDeferredWrites
*CcSetValidData
*CcWriteBehind
*CcLazyWriteScan
This commit is contained in:
Dibyamartanda Samanta 2024-05-21 11:38:04 +02:00
parent 189cf42f74
commit 3ff9824c8b

View File

@ -0,0 +1,862 @@
/*
* PROJECT: Alcyone System Kernel
* LICENSE: BSD Clause 3
* PURPOSE: Cache Controller:: Lazy Writer
* NT KERNEL: 5.11.9360
* COPYRIGHT: 2023-2029 Dibymartanda Samanta <>
*/
#include <ntoskrnl.h>
#define NTDEBUG
#include <debug.h>
#include "ccinternal.hpp"
#include "cclazywriter.hpp"
extern "C"
/*Internal Function*/
VOID
VECTORCALL
CcPostWorkQueue(IN PWORK_QUEUE_ENTRY WorkItem,
IN PLIST_ENTRY WorkQueue)
{
PWORK_QUEUE_ITEM ThreadToSpawn = nullptr;
PLIST_ENTRY ListEntry = nullptr; // be aware.
KIRQL CurrentIrql = NULL;
/* Aquire SpinLock & Insert the Worker in Queue*/
CurrentIrql = KeAcquireQueuedSpinLock(LockQueueWorkQueueLock);
InsertTailList(WorkQueue, &WorkItem->WorkQueueLinks);
/* CHeck if new thread to be spawnwed */
if (!CcQueueThrottle && !IsListEmpty(&CcIdleWorkerThreadList))
{
ListEntry = RemoveHeadList(&CcIdleWorkerThreadList);
ThreadToSpawn = CONTAINING_RECORD(ListEntry, WORK_QUEUE_ITEM, List);
/* Update the number of Active Worker Thread */
_InterlockedIncrement(&CcNumberActiveWorkerThreads);
}
KeReleaseQueuedSpinLock(LockQueueWorkQueueLock, CurrentIrql);
/* Check if thread left to spawn, disable it now since Spinlock is released */
if (ThreadToSpawn)
{
DBGPRINT(" CcPostWorkQueue: Thread Left to be spwaned even after release of Spinlock!\n");
ThreadToSpawn->List.Flink = nullptr;
ExQueueWorkItem(ThreadToSpawn, CriticalWorkQueue);
}
}
VOID
NTAPI
CcScheduleLazyWriteScanEx(IN BOOLEAN Delay_Status, IN BOOLEAN Teardowmn_Status)
{
BOOLEAN Result;
/* If their no delay , Start Lazy Writer Scan Immidietely */
if (Delay_Status)
{
LazyWriter.Active = True;
/* Check if Teardown is Active */
if (Teardowmn_Status)
{
LazyWriter.PendingTeardown = True;
}
result = KeSetTimer(&LazyWriter.ScanTimer, CcNoDelay, &LazyWriter.ScanDpc);
}
/* If It is not running , Start it */
else if (LazyWriter.ScanActive)
{
result = KeSetTimer(&LazyWriter.ScanTimer, CcIdleDelay, &LazyWriter.ScanDpc);
}
/* If it is already running , Queue for it */
else
{
result = KeSetTimer(&LazyWriter.ScanTimer, CcFirstDelay, &LazyWriter.ScanDpc);
}
return result;
}
VOID
NTAPI
CcScheduleLazyWriteScan(IN BOOLEAN NoDelay)
{
return CcScheduleLazyWriteScanEx(NoDelay, False);
}
LONG
VECTORCALL
CcExceptionFilter(IN NTSTATUS Status)
{
LONG Result;
/* Get the Flag Status */
BOOL NTSTATUS_FLAG = static_cast<BOOL>(FsRtlIsNtstatusExpected(Status));
if (NTSTATUS_FLAG)
Result = EXCEPTION_EXECUTE_HANDLER;
else
Result = EXCEPTION_CONTINUE_SEARCH;
DBGPRINT("CcExceptionFilter: Status %X\n", Status);
return Result;
}
VOID
VECTORCALL
CcPerformReadAhead(IN PFILE_OBJECT FileObject)
{
PETHREAD CurrentThread = PsGetCurrentThread();
PSHARED_CACHE_MAP SharedCachMap;
PPRIVATE_CACHE_MAP PrivateCachMap;
PVACB Vacb = nullptr;
BOOLEAN LengthIsZero = false;
BOOLEAN ReadAheasStatus = false;
BOOLEAN ReadAheadStatus = false;
BOOLEAN ReadAheadFinishStatus = false;
ULONG_PTR VirtualAdress = nullptr;
LARGE_INTEGER FileOffset = {0};
LARGE_INTEGER readAheadOffset[2];
ULONG readAheadLength[2];
ULONG IsPageNotResident = {0};
ULONG ReceivedLength = {0};
ULONG NumberOfPages = {0};
ULONG Length = {0};
ULONG OldReadClusterSize = {0};
UCHAR OldForwardClusterOnly = {0};
KIRQL LastIRQL = {0};
/* GET Cluster History */
OldForwardClusterOnly = CurrentThread->ForwardClusterOnly;
OldReadClusterSize = CurrentThread->ReadClusterSize;
SharedCachMap = FileObject->SectionObjectPointer->SharedCacheMap;
_TRY
{
while (TRUE)
{
LastIRQL = KeAcquireQueuedSpinLock(LockQueueMasterLock);
PrivateCachMap = FileObject->VirtualAdress;
/* Check if it is a valid VirtualAdress Adress , if Not Reissue ReadAhead Offesets */
if (PrivateCachMap != nullptr)
{
KeAcquireSpinLockAtDpcLevel(&PrivateCachMap->ReadAheadSpinLock);
LengthIsZero = (!(PrivateCachMap->ReadAheadLength[0] | PrivateCachMap->ReadAheadLength[1]));
readAheadOffset[0].QuadPart = PrivateCachMap->ReadAheadOffset[0].QuadPart;
readAheadOffset[1].QuadPart = PrivateCachMap->ReadAheadOffset[1].QuadPart;
readAheadLength[0] = PrivateCachMap->ReadAheadLength[0];
readAheadLength[1] = PrivateCachMap->ReadAheadLength[1];
PrivateCachMap->ReadAheadLength[0] = 0;
PrivateCachMap->ReadAheadLength[1] = 0;
KeReleaseSpinLockFromDpcLevel(&PrivateCachMap->ReadAheadSpinLock);
}
KeReleaseQueuedSpinLock(LockQueueMasterLock, LastIRQL);
ReadAheadStatus = (*SharedCachMap->Callbacks->AcquireForReadAhead)(SharedCachMap->LazyWriteContext, TRUE);
if (!PrivateCachMap || LengthIsZero || !ReadAheadStatus)
break;
for (auto i = 0; i <= 1; i++)
{
FileOffset = readAheadOffset[i];
Length = readAheadLength[i];
/* Check if Cache Can be read Ahead */
if (Length && FileOffset.QuadPart <= SharedCachMap->FileSize.QuadPart)
{
ReadAheasStatus = TRUE;
if (SharedCachMap->FileSize.QuadPart <= (FileOffset.QuadPart + (LONGLONG)Length))
{
Length = (SharedCachMap->FileSize.QuadPart - FileOffset.QuadPart);
ReadAheadFinishStatus = TRUE;
}
if (Length > 0x800000)
Length = 0x800000; // Make a Define and move to header
while (Length != NULL)
{
VirtualAdress = CcGetVirtualAddress(SharedCachMap, FileOffset, &Vacb, &ReceivedLength);
if (ReceivedLength > Length)
ReceivedLength = Length;
for (NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(VirtualAdress, ReceivedLength);
NumberOfPages; NumberOfPages--)
{
CurrentThread->ForwardClusterOnly = 1;
if (NumberOfPages > 0x10)
CurrentThread->ReadClusterSize = 0xF;
else
CurrentThread->ReadClusterSize = (NumberOfPages - 1);
IsPageNotResident |= !MmCheckCachedPageState(VirtualAdress, FALSE);
VirtualAdress = static_cast<LONG *>(VirtualAdress + PAGE_SIZE);
}
/*Update History */
FileOffset.QuadPart += ReceivedLength;
Length -= ReceivedLength;
CcFreeVirtualAddress(Vacb);
Vacb = nullptr;
}
}
}
(*SharedCachMap->Callbacks->ReleaseFromReadAhead)(SharedCachMap->LazyWriteContext);
ReadAheadStatus = FALSE;
}
}
_FINALLY
{
/* Restore cluster variables */
CurrentThread->ForwardClusterOnly = OldForwardClusterOnly;
CurrentThread->ReadClusterSize = OldReadClusterSize;
if (Vacb)
CcFreeVirtualAddress(Vacb);
/* If ReadAheadStatus is False , Release it from Write Context */
if (ReadAheadStatus)
(*SharedCachMap->Callbacks->ReleaseFromReadAhead)(SharedCachMap->LazyWriteContext);
LastIRQL = KeAcquireQueuedSpinLock(LockQueueMasterLock);
PrivateCachMap = FileObject->VirtualAdress;
if (PrivateCachMap)
{
KeAcquireSpinLockAtDpcLevel(&PrivateCachMap->ReadAheadSpinLock);
RtlInterlockedAndBits(&PrivateCachMap->UlongFlags, ~SHARE_FL_WAITING_TEARDOWN);
if (ReadAheadFinishStatus && (FileObject->Flags & FO_SEQUENTIAL_ONLY))
PrivateCachMap->ReadAheadOffset[1].QuadPart = 0;
if (ReadAheasStatus && !IsPageNotResident)
RtlInterlockedAndBits(&PrivateCachMap->UlongFlags, ~0x20000);
KeReleaseSpinLockFromDpcLevel(&PrivateCachMap->ReadAheadSpinLock);
}
KeReleaseQueuedSpinLock(LockQueueMasterLock, LastIRQL);
ObDereferenceObject(FileObject);
LastIRQL = KeAcquireQueuedSpinLock(LockQueueMasterLock);
_InterlockedDecrement(SharedCachMap->OpenCount);
SharedCachMap->Flags &= ~SHARE_FL_READ_AHEAD;
/* If a Invalid Cache Map */
if (!SharedCachMap->OpenCount && !(SharedCachMap->Flags & SHARE_FL_WRITE_QUEUED) && !SharedCachMap->DirtyPages)
{
RemoveEntryList(&SharedCachMap->SharedCacheMapLinks);
InsertTailList(&CcDirtySharedCacheMapList.SharedCacheMapLinks, &SharedCachMap->SharedCacheMapLinks);
LazyWriter.OtherWork = TRUE;
if (!LazyWriter.ScanActive)
CcScheduleLazyWriteScan(FALSE);
}
KeReleaseQueuedSpinLock(LockQueueMasterLock, LastIRQL);
}
}
VOID
NTAPI
CcPostDeferredWrites(VOID)
{
PDEFERRED_WRITE DeferredWrites = nullptr;
PLIST_ENTRY Entry = nullptr;
ULONG BytesToWrite = {0};
KIRQL OldIrql = {0};
while (TRUE)
{
/* Acquire the spin lock to protect concurrent access to shared data structures */
KeAcquireSpinLock(&CcDeferredWriteSpinLock, &OldIrql);
if (!IsListEmpty(&CcDeferredWrites))
{
Entry = CcDeferredWrites.Flink;
while (true)
{
DeferredWrites = CONTAINING_RECORD(Entry, DEFERRED_WRITE, DeferredWriteLinks);
/* Calculate the total bytes to write */
BytesToWrite += DeferredWrites->BytesToWrite;
/* BytesToWrite Must not overflow Defferwrite Limits */
if (BytesToWrite < DeferredWrites->BytesToWrite)
{
DeferredWrites = nullptr;
break;
}
/* if writing to the file object is possible */
if (CcCanIWrite(DeferredWrites->FileObject, BytesToWrite, FALSE, 0xFE))
{
/*Remove the entry from the deferred writes list if writing is possible */
RemoveEntryList(&DeferredWrites->DeferredWriteLinks);
break;
}
/* Check if the limit of modified pages has been reached */
if (!DeferredWrites->LimitModifiedPages)
{
DeferredWrites = nullptr;
break;
}
/* Decrement the bytes to write and reset DeferredWrites*/
BytesToWrite -= DeferredWrites->BytesToWrite;
DeferredWrites = nullptr;
/* Move to the next entry in the list */
Entry = Entry->Flink;
}
/* Release the spin lock */
KeReleaseSpinLock(&CcDeferredWriteSpinLock, OldIrql);
/* If Defferwrite List is null get out of loop */
if (!DeferredWrites)
break;
if (DeferredWrites->Event)
{
KeSetEvent(DeferredWrites->Event, IO_NO_INCREMENT, FALSE);
continue;
}
/*execute the post routine, and free memory */
DeferredWrites->PostRoutine(DeferredWrites->Context1, DeferredWrites->Context2);
ExFreePoolWithTag(DeferredWrites, 0);
}
}
}
NTSTATUS
NTAPI
CcSetValidData(IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileSize)
{
FILE_END_OF_FILE_INFORMATION FileInfo; // Structure to hold end-of-file information
PDEVICE_OBJECT DeviceObject; // Pointer to the device object associated with the file
PIO_STACK_LOCATION IoStack; // Pointer to an I/O stack location structure
IO_STATUS_BLOCK IoStatus; // I/O status block structure
KEVENT Event; // Kernel event object for synchronization
PIRP Irp; // Pointer to an I/O request packet structure
NTSTATUS Status; // Status of the operation
/*Initialize a kernel event object for synchronization */
KeInitializeEvent(&Event, NotificationEvent, FALSE);
/* Retrieve the device object associated with the file*/
DeviceObject = IoGetRelatedDeviceObject(FileObject);
/* Initialize FileInfo structure with the new file size */
FileInfo.EndOfFile.QuadPart = FileSize->QuadPart;
/* Allocate an I/O request packet (IRP) */
Irp = IoAllocateIrp(DeviceObject->StackSize, 0);
if (!Irp)
{
return STATUS_INSUFFICIENT_RESOURCES;
}
/* Set IRP parameters */
Irp->AssociatedIrp.SystemBuffer = &FileInfo;
Irp->UserIosb = &IoStatus;
Irp->UserEvent = &Event;
Irp->Flags = (IRP_SYNCHRONOUS_PAGING_IO | IRP_PAGING_IO);
Irp->RequestorMode = KernelMode;
Irp->Tail.Overlay.Thread = PsGetCurrentThread();
Irp->Tail.Overlay.OriginalFileObject = FileObject;
/* Get the next IRP stack location and set up IRP parameters */
IoStack = IoGetNextIrpStackLocation(Irp);
IoStack->MajorFunction = IRP_MJ_SET_INFORMATION;
IoStack->FileObject = FileObject;
IoStack->DeviceObject = DeviceObject;
IoStack->Parameters.SetFile.Length = sizeof(FILE_END_OF_FILE_INFORMATION);
IoStack->Parameters.SetFile.FileInformationClass = FileEndOfFileInformation;
IoStack->Parameters.SetFile.FileObject = nullptr;
IoStack->Parameters.SetFile.AdvanceOnly = 1;
/* Call the device driver to handle the IRP */
Status = IoCallDriver(DeviceObject, Irp);
/*If the operation is pending, wait for it to complete*/
if (Status == STATUS_PENDING)
KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, nullptr);
/* Handle completion and error handling */
if (!NT_SUCCESS(Status))
{
IoStatus.Status = Status;
}
/* Return the IOstatus of the operation */
return IoStatus.Status;
}
VOID
VECTORCALL
CcWriteBehind(IN PSHARED_CACHE_MAP SharedMap,
IN IO_STATUS_BLOCK * OutIoStatus)
{
KLOCK_QUEUE_HANDLE LockHandle;
LARGE_INTEGER validDataLength = {0};
ULONG ActivePage = {0};
ULONG TargetPages = {0} ;
KIRQL OldIrql = {0};
BOOLEAN IsVacbLocked = false;
BOOLEAN IsCancelWait = false;
NTSTATUS Status;
PVACB ActiveVacb = nullptr;
PUNICODE_STRING FileName = nullptr;
/*Acquire lazy writer's lock for lazy write*/
if (!(*SharedMap->Callbacks->AcquireForLazyWrite)(SharedMap->LazyWriteContext, TRUE))
{
/* If acquisition fails, release LockQueueMasterLock and set STATUS_FILE_LOCK_CONFLICT */
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
SharedMap->Flags &= ~0x20; // Clear the flag indicating wait for Lazy Writer
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
OutIoStatus->Status = STATUS_FILE_LOCK_CONFLICT;
return;
}
/* Acquire BcbSpinLock and LockQueueMasterLock */
KeAcquireInStackQueuedSpinLock(&SharedMap->BcbSpinLock, &LockHandle);
KeAcquireQueuedSpinLockAtDpcLevel(&KeGetCurrentPrcb()->LockQueue[LockQueueMasterLock]);
/*Determine if there are few dirty pages or no open references */
if (SharedMap->DirtyPages <= 1 || !SharedMap->OpenCount)
{
/*Acquire ActiveVacbSpinLock to handle ActiveVacb*/
KeAcquireSpinLockAtDpcLevel(&SharedMap->ActiveVacbSpinLock);
/*Retrieve ActiveVacb and related information if it exists*/
ActiveVacb = SharedMap->ActiveVacb;
if (ActiveVacb)
{
ActivePage = SharedMap->ActivePage;
SharedMap->ActiveVacb = 0;
IsVacbLocked = ((SharedMap->Flags & SHARE_FL_VACB_LOCKED) != 0);
}
/*Release ActiveVacbSpinLock*/
KeReleaseSpinLockFromDpcLevel(&SharedMap->ActiveVacbSpinLock);
}
/*Increment OpenCount*/
_InterlockedIncrement(SharedMap->OpenCount);
/* Update Mbcb information if it exists*/
if (SharedMap->Mbcb)
{
TargetPages = SharedMap->Mbcb->DirtyPages;
if (ActiveVacb)
_InterlockedIncrement(TargetPages);
/*Determine PagesToWrite based on CcPagesYetToWrite and TargetPages*/
if (TargetPages > CcPagesYetToWrite)
SharedMap->Mbcb->PagesToWrite = CcPagesYetToWrite;
else
SharedMap->Mbcb->PagesToWrite = TargetPages;
}
/*Release LockQueueMasterLock and BcbSpinLock*/
KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->LockQueue[LockQueueMasterLock]);
KeReleaseInStackQueuedSpinLock(&LockHandle);
/* Free ActiveVacb if it exists*/
if (ActiveVacb)
CcFreeActiveVacb(SharedMap, ActiveVacb, ActivePage, IsVacbLocked);
/* Flush cache to disk*/
CcFlushCache(SharedMap->FileObject->SectionObjectPointer, &CcNoDelay, 1, OutIoStatus);
/*Release lazy writer's lock*/
(*SharedMap->Callbacks->ReleaseFromLazyWrite)(SharedMap->LazyWriteContext);
/* Check if Status Verification Failed */
if (!NT_SUCCESS(OutIoStatus->Status) && OutIoStatus->Status != STATUS_VERIFY_REQUIRED)
IsCancelWait = TRUE;
if (!NT_SUCCESS(OutIoStatus->Status) && OutIoStatus->Status != STATUS_VERIFY_REQUIRED &&
OutIoStatus->Status != STATUS_FILE_LOCK_CONFLICT &&
OutIoStatus->Status != STATUS_ENCOUNTERED_WRITE_IN_PROGRESS)
{
/* Prepare for error notification and log*/
POBJECT_NAME_INFORMATION FileNameInfo = nullptr;
NTSTATUS status;
/*Retrieve IOSTATUS */
status = IoQueryFileDosDeviceName(SharedMap->FileObject, &FileNameInfo);
if (status = STATUS_SUCCESS)
{
IoRaiseInformationalHardError(STATUS_LOST_WRITEBEHIND_DATA, &FileNameInfo->Name, nullptr);
}
else
{
FileName = &SharedMap->FileObject->FileName;
if (FileName->Length && FileName->MaximumLength && FileName->Buffer)
IoRaiseInformationalHardError(STATUS_LOST_WRITEBEHIND_DATA, FileName, nullptr);
}
if (FileNameInfo)
ExFreePoolWithTag(FileNameInfo, 0);
}
else if (!IsListEmpty(&CcDeferredWrites))
{
/*If deferred writes exist, process them */
CcPostDeferredWrites();
}
/*Acquire BcbSpinLock */
KeAcquireInStackQueuedSpinLock(&SharedMap->BcbSpinLock, &LockHandle);
/* Update ValidDataLength if necessary*/
Status = STATUS_SUCCESS;
if (SharedMap->Flags & (0x400 | 0x8000))
{
if (SharedMap->ValidDataLength.QuadPart <= SharedMap->ValidDataGoal.QuadPart &&
SharedMap->ValidDataLength.QuadPart != 0x7FFFFFFFFFFFFFFF && SharedMap->FileSize.QuadPart)
{
/* Get flushed valid data and set ValidDataLength if necessary */
validDataLength = CcGetFlushedValidData(SharedMap->FileObject->SectionObjectPointer, TRUE);
if (validDataLength.QuadPart >= SharedMap->ValidDataLength.QuadPart)
{
KeReleaseInStackQueuedSpinLock(&LockHandle);
Status = CcSetValidData(SharedMap->FileObject, &validDataLength);
KeAcquireInStackQueuedSpinLock(&SharedMap->BcbSpinLock, &LockHandle);
if (NT_SUCCESS(Status))
SharedMap->ValidDataLength = validDataLength;
}
}
}
/* Release BcbSpinLock*/
KeReleaseInStackQueuedSpinLock(&LockHandle);
/* Perform cleanup tasks*/
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
InterlockedDecrement(SharedMap->OpenCount);
LockHandle.OldIrql = OldIrql;
/* Check if there are any remaining open references*/
if (SharedMap->OpenCount)
goto Exit;
/*Cleanup if no remaining open references*/
if (NT_SUCCESS(Status) ||
(Status != STATUS_INSUFFICIENT_RESOURCES && Status != STATUS_VERIFY_REQUIRED &&
Status != STATUS_FILE_LOCK_CONFLICT && Status != STATUS_ENCOUNTERED_WRITE_IN_PROGRESS))
{
/* Release LockQueueMasterLock, acquire file object lock, and recheck open count*/
KeReleaseQueuedSpinLock(LockQueueMasterLock, LockHandle.OldIrql);
FsRtlAcquireFileExclusive(SharedMap->FileObject);
LockHandle.OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
/*Delete SharedCacheMap if no open references remain*/
if (!SharedMap->OpenCount)
{
if (!SharedMap->DirtyPages ||
(!SharedMap->FileSize.QuadPart && !(SharedMap->Flags & SHARE_FL_PIN_ACCESS)))
{
CcDeleteSharedCacheMap(SharedMap, LockHandle.OldIrql, TRUE);
OutIoStatus->Information = 0;
return;
}
}
/*Release LockQueueMasterLock and file object lock */
KeReleaseQueuedSpinLock(LockQueueMasterLock, LockHandle.OldIrql);
FsRtlReleaseFile(SharedMap->FileObject);
LockHandle.OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
}
Exit:
/*Clear SHARE_FL_WAITING_FLUSH flag if necessary*/
if (OutIoStatus->Information != 0x8A5E)
SharedMap->Flags &= ~0x20;
/* Release LockQueueMasterLock*/
KeReleaseQueuedSpinLock(LockQueueMasterLock, LockHandle.OldIrql);
}
VOID
NTAPI CcLazyWriteScan()
{
PSHARED_CACHE_MAP FirstMap = nullptr;
PSHARED_CACHE_MAP SharedMap = nullptr;
PGENERAL_LOOKASIDE LookasideList = nullptr;
PWORK_QUEUE_ENTRY WorkItem = nullptr;
PLIST_ENTRY ListEntry = nullptr;
PLIST_ENTRY MapLinks = nullptr;
PKPRCB Prcb = nullptr;
LIST_ENTRY PostWorkList ={0};
ULONG TargetPages ={0};
ULONG NextTargetPages;
ULONG DirtyPages;
ULONG counter = 0;
BOOLEAN IsNoPagesToWrite = FALSE;
BOOLEAN IsDoubleScan = FALSE;
KIRQL OldIrql;
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
// Check if there is no dirty page and no other work pending
if (!CcTotalDirtyPages && !LazyWriter.OtherWork)
{
// Check if there are deferred writes pending
if (!IsListEmpty(&CcDeferredWrites))
{
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
CcPostDeferredWrites();
CcScheduleLazyWriteScan(FALSE);
}
else
{
// No deferred writes pending, mark lazy write scan as inactive
LazyWriter.ScanActive = 0;
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
}
return;
}
InitializeListHead(&PostWorkList);
// Move items from tick work queue to post work list
while (!IsListEmpty(&CcPostTickWorkQueue))
{
ListEntry = RemoveHeadList(&CcPostTickWorkQueue);
InsertTailList(&PostWorkList, ListEntry);
}
LazyWriter.OtherWork = FALSE;
// Calculate target pages based on dirty pages count
if (CcTotalDirtyPages > 8)
TargetPages = (CcTotalDirtyPages / 8);
else
TargetPages = CcTotalDirtyPages;
DirtyPages = (CcTotalDirtyPages + CcPagesWrittenLastTime);
// Calculate next target pages for scanning
if (CcDirtyPagesLastScan < DirtyPages)
NextTargetPages = (DirtyPages - CcDirtyPagesLastScan);
else
NextTargetPages = 0;
NextTargetPages += (CcTotalDirtyPages - TargetPages);
if (NextTargetPages > CcDirtyPageTarget)
TargetPages += (NextTargetPages - CcDirtyPageTarget);
CcDirtyPagesLastScan = CcTotalDirtyPages;
CcPagesWrittenLastTime = TargetPages;
CcPagesYetToWrite = TargetPages;
// Get the first shared cache map to start the scan
SharedMap =
CONTAINING_RECORD(CcLazyWriterCursor.SharedCacheMapLinks.Flink, SHARED_CACHE_MAP, SharedCacheMapLinks);
while (SharedMap != FirstMap)
{
MapLinks = &SharedMap->SharedCacheMapLinks;
if (MapLinks == &CcLazyWriterCursor.SharedCacheMapLinks)
break;
if (!FirstMap)
FirstMap = SharedMap;
// Check if we need to skip to the next map
if (IsGoToNextMap(SharedMap, TargetPages))
{
counter++;
if (counter >= 20 && !(SharedMap->Flags & (0x20 | 0x800)))
{
SharedMap->DirtyPages++;
SharedMap->Flags |= 0x20;
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
counter = 0;
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
SharedMap->Flags &= ~0x20;
SharedMap->DirtyPages--;
}
goto NextMap;
}
SharedMap->PagesToWrite = SharedMap->DirtyPages;
// Adjust pages to write based on certain conditions
if ((SharedMap->Flags & SHARE_FL_MODIFIED_NO_WRITE) && SharedMap->DirtyPages >= 0x40 &&
CcCapturedSystemSize != MmSmallSystem)
{
SharedMap->PagesToWrite /= 8;
}
if (!IsNoPagesToWrite)
{
if (TargetPages > SharedMap->PagesToWrite)
{
TargetPages -= SharedMap->PagesToWrite;
}
else if ((SharedMap->Flags & SHARE_FL_MODIFIED_NO_WRITE) ||
(FirstMap == SharedMap && !(SharedMap->LazyWritePassCount & 0xF)))
{
TargetPages = 0;
IsNoPagesToWrite = TRUE;
IsDoubleScan = TRUE;
}
else
{
RemoveEntryList(&CcLazyWriterCursor.SharedCacheMapLinks);
InsertTailList(MapLinks, &CcLazyWriterCursor.SharedCacheMapLinks);
TargetPages = 0;
IsNoPagesToWrite = TRUE;
}
}
SharedMap->Flags |= 0x20;
SharedMap->DirtyPages++;
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
// Acquire a work item for writing behind
Prcb = KeGetCurrentPrcb();
LookasideList = Prcb->PPLookasideList[5].P;
LookasideList->TotalAllocates++;
WorkItem = (PWORK_QUEUE_ENTRY)InterlockedPopEntrySList(&LookasideList->ListHead);
if (!WorkItem)
{
LookasideList->AllocateMisses++;
LookasideList = Prcb->PPLookasideList[5].L;
LookasideList->TotalAllocates++;
WorkItem = (PWORK_QUEUE_ENTRY)InterlockedPopEntrySList(&LookasideList->ListHead);
if (!WorkItem)
{
LookasideList->AllocateMisses++;
WorkItem = LookasideList->Allocate(LookasideList->Type, LookasideList->Size, LookasideList->Tag);
}
}
// Check if work item is available
if (!WorkItem)
{
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
SharedMap->Flags &= ~0x20;
SharedMap->DirtyPages--;
break;
}
// Initialize work item for write behind function
WorkItem->Function = WriteBehind;
WorkItem->Parameters.Write.SharedCacheMap = SharedMap;
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
SharedMap->DirtyPages--;
// Post work item to appropriate queue
if (SharedMap->Flags & SHARE_FL_WAITING_TEARDOWN)
{
SharedMap->WriteBehindWorkQueueEntry = (PVOID)((ULONG_PTR)WorkItem | 1);
CcPostWorkQueue(WorkItem, &CcFastTeardownWorkQueue);
}
else
{
SharedMap->WriteBehindWorkQueueEntry = WorkItem;
CcPostWorkQueue(WorkItem, &CcRegularWorkQueue);
}
counter = 0;
NextMap:
SharedMap = CONTAINING_RECORD(MapLinks->Flink, SHARED_CACHE_MAP, SharedCacheMapLinks);
if (IsDoubleScan)
{
RemoveEntryList(&CcLazyWriterCursor.SharedCacheMapLinks);
InsertHeadList(MapLinks, &CcLazyWriterCursor.SharedCacheMapLinks);
IsDoubleScan = FALSE;
}
}
// Post remaining work items to regular work queue
while (!IsListEmpty(&PostWorkList))
{
PWORK_QUEUE_ENTRY workItem;
PLIST_ENTRY entry;
entry = RemoveHeadList(&PostWorkList);
workItem = CONTAINING_RECORD(entry, WORK_QUEUE_ENTRY, WorkQueueLinks);
CcPostWorkQueue(workItem, &CcRegularWorkQueue);
}
CcComputeNextScanTime(&OldestLWSTimeStamp, &NextScanDelay);
if (!IsListEmpty(&PostWorkList) || !IsListEmpty(&CcDeferredWrites) || MmRegistryStatus.ProductStatus ||
NextScanDelay.QuadPart != 0x7FFFFFFFFFFFFFFF))
{
/* Schedule a lazy write scan */
CcRescheduleLazyWriteScan(&NextScanDelay);
/* If forced disable is set, clear it */
if (CcForcedDisableLazywriteScan)
{
CcForcedDisableLazywriteScan = false;
}
}
else
{
/* Set forced disable and deactivate the scan */
CcForcedDisableLazywriteScan = true;
LazyWriter.ScanActive = false;
}
}
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
// Check if there are deferred writes pending
if (!IsListEmpty(&CcDeferredWrites))
{
CcPostDeferredWrites();
CcScheduleLazyWriteScan(FALSE);
}
}