[NTOSKRNL:CC] Pin Support::Implemented CcSetDirtyPinnedData

Implemented CcSetDirtyPinnedData
Note: In Future Implement Specialized Iterator for Bcbs ,introduce bounds checking similar
to GSL Span
This commit is contained in:
Dibyamartanda Samanta 2024-08-06 13:59:37 +02:00 committed by CodingWorkshop Signing Team
parent a40c3d4200
commit 4370544c97
Signed by: CodingWorkshop Signing Team
GPG Key ID: 6DC88369C82795D2

View File

@ -1,7 +1,7 @@
/*
* PROJECT: Alcyone System Kernel
* LICENSE: BSD Clause 3
* PURPOSE: Cache Controller:: Lazy Writer
* PURPOSE: Cache Controller:: Pin Support
* NT KERNEL: 5.11.9360
* COPYRIGHT: 2023-2029 Dibymartanda Samanta <>
*/
@ -60,6 +60,117 @@ CcAllocateObcb(
return NewObcb;
}
VOID
NTAPI
CcSetDirtyPinnedData(
IN PBCB BcbVoid,
IN PLARGE_INTEGER Lsn OPTIONAL)
{
PSHARED_CACHE_MAP SharedCacheMap;
PBCB Bcb;
PBCB* BcbArray;
PFAST_MUTEX Mutex;
KIRQL OldIrql;
ULONG PagesToWrite;
PETHREAD CurrentThread;
if (BcbVoid->NodeTypeCode == 762)
{
BcbArray = (ULONG_PTR)BcbVoid-Bcbs;
}
SharedCacheMap = (*BcbArray)->SharedCacheMap;
/* Handle logging if necessary */
auto VolumeLogHandleContext = &SharedCacheMap->VolumeCacheMap->LogHandleContext;
if ( (SharedCacheMap->Flags & 0x1000000) != 0 && LogHandleContext.LastLWTimeStamp.QuadPart = -1 )
{
KeQuerySystemTime(&LogHandleContext.LastLWTimeStamp)
}
if (!(SharedCacheMap->Flags & 0x200))
{
CcSetDirtyInMask(SharedCacheMap, &BcbVoid->FileOffset, BcbVoid->PagesToWrite, NULL);
return;
}
Bcb = *BcbArray;
while (Bcb)
{
if ((ULONG_PTR)Bcb & 1)
{
KeBugCheckEx(CACHE_MANAGER, 0xE94, 0xC0000420, 0, 0);
}
SharedCacheMap = Bcb->SharedCacheMap;
Mutex = &SharedCacheMap->BcbLock;
/* Acquire the BCB lock */
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); // KeAcquireInStackQueuedSpinLock((PKSPIN_LOCK)(v6 + 184), &LockHandle)
KeAcquireQueuedSpinLockAtDpcLevel((ULONG_PTR)&KeGetPcr()->Prcb->LockQueue[5]);
if (!Bcb->Dirty)
{
PagesToWrite = Bcb->PagesToWrite >> PAGE_SHIFT;
Bcb->Dirty = TRUE;
if (Lsn)
{
Bcb->OldestLsn = *Lsn;
Bcb->NewestLsn = *Lsn;
}
/* Update dirty page statistics */
if (!SharedCacheMap->DirtyPages && !(SharedCacheMap->Flags & PRIVATE_CACHE_MAP_FLAGS_WRITE_THROUGH))
{
CcScheduleLazyWriteScan(FALSE, FALSE);
CcInsertIntoDirtySharedCacheMapList(SharedCacheMap);
}
CcChargeDirtyPages(SharedCacheMap, 0, 0, PagesToWrite);
/* Handle process disk counters if necessary */
if (SharedCacheMap->Flags & PRIVATE_CACHE_MAP_FLAGS_TRACK_DIRTY_PAGES)
{
PsUpdateDiskCounters(PsGetCurrentThread()->ThreadsProcess, 0, PagesToWrite << PAGE_SHIFT, 0, 1, 0);
}
}
KeReleaseQueuedSpinLockFromDpcLevel((ULONG_PTR)&Prcb->LockQueue[5]);
/* Update LSN information */
if (Lsn)
{
if (!Bcb->MostRecentlyDirtiedPage || Lsn->QuadPart < Bcb->MostRecentlyDirtiedPage)
{
Bcb->OldestLsn = *Lsn;
}
if (!Bcb->NewestLsn.QuadPart || Lsn->QuadPart > Bcb->NewestLsn.QuadPart)
{
Bcb->NewestLsn = *Lsn;
}
if (Lsn->QuadPart > SharedCacheMap->LargestLSN.QuadPart)
{
SharedCacheMap->LargestLSN = *Lsn;
}
}
/* Update ValidDataGoal if necessary */
if (Bcb->ResumeWritePage > SharedCacheMap->ValidDataGoal.QuadPart)
{
SharedCacheMap->ValidDataGoal.QuadPart = Bcb->ResumeWritePage;
}
/* Release the BCB lock */
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); // KeAcquireInStackQueuedSpinLock
/* Move to the next BCB */
BcbArray++;
Bcb = *BcbArray;
}
}
BOOLEAN
NTAPI