481 lines
14 KiB
C++
481 lines
14 KiB
C++
/* PROJECT: Alcyone Kernel
|
|
* LICENSE: BSD Clause 3
|
|
* PURPOSE: Cache Controller:: LOG Handler
|
|
* NT KERNEL: 5.11.9360
|
|
* COPYRIGHT: 2023-2029 Dibymartanda Samanta <>
|
|
*/
|
|
|
|
|
|
#include <ntoskrnl.h>
|
|
#define NTDEBUG
|
|
#include <debug.h>
|
|
#include "ccinternal.hpp"
|
|
#include "ccloghandler.hpp"
|
|
|
|
extern "C"
|
|
|
|
LARGE_INTEGER
|
|
NTAPI
|
|
CcGetDirtyPages(IN PVOID LogHandle,
|
|
IN PDIRTY_PAGE_ROUTINE DirtyPageRoutine,
|
|
IN PVOID Context1,
|
|
IN PVOID Context2)
|
|
{
|
|
__try
|
|
{
|
|
KIRQL OldIrql = {0};
|
|
PSHARED_CACHE_MAP SharedCacheMap = nullptr;
|
|
PFILE_OBJECT *FileObject = nullptr;
|
|
PCC_BCB Bcb = nullptr,
|
|
PCC_BCB BcbToUnpin = nullptr;
|
|
PLIST_ENTRY Entry = &CcDirtyLoggedStreamLinks;
|
|
LARGE_INTEGER OldestLsn = {0};
|
|
LARGE_INTEGER Result = {0};
|
|
BOOLEAN FirstIteration = TRUE;
|
|
|
|
// Acquire Master Lock
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
// Iterate through all Dirty Shared Cache Maps
|
|
for (auto it = ListEntryIterator::begin(Entry); it != ListEntryIterator::end(Entry); ++it)
|
|
{
|
|
SharedCacheMap = CONTAINING_RECORD(*it, SHARED_CACHE_MAP, LoggedStreamLinks);
|
|
|
|
// Check flags and validate LogHandle
|
|
if ((SharedCacheMap->Flags & 0x800) != 0)
|
|
{
|
|
DBGPRINT("CcGetDirtyPages::Invalid Flag \n")
|
|
return (LARGE_INTEGER){0};
|
|
}
|
|
|
|
if ((SharedCacheMap->Flags & 0x2000000) != 0 && SharedCacheMap->LogHandle == LogHandle)
|
|
{
|
|
SharedCacheMap->OpenCount++;
|
|
SharedCacheMap->DirtyPages++;
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
|
|
|
/* Reference the shared cache map file object*/
|
|
FileObject = CcReferenceSharedCacheMapFileObject(SharedCacheMap);
|
|
|
|
/* Acquire the BCB Lock */
|
|
KLOCK_QUEUE_HANDLE LockHandle;
|
|
KeAcquireInStackQueuedSpinLock(&SharedCacheMap->BcbLock, &LockHandle);
|
|
|
|
/* Iterate over BCB List */
|
|
for (auto it = ListEntryIterator::begin(&SharedCacheMap->BcbList); it != ListEntryIterator::end(&SharedCacheMap->BcbList); ++it)
|
|
{
|
|
Bcb = CONTAINING_RECORD(*it, CC_BCB, BcbLinks);
|
|
|
|
/* Ensure the BCB is marked as dirty */
|
|
if (Bcb->NodeTypeCode == NODE_TYPE_DIRTY_BCB && Bcb->NodeByteCode)
|
|
{ LocalFileOffset = Bcb->BaseData;
|
|
LocalOldestLsn = Bcb->OldestLsn;
|
|
LocalNewestLsn = Bcb->NewestLsn;
|
|
|
|
/*Increment the PinCount to keep the BCB pinned*/
|
|
Bcb->PinCount++;
|
|
|
|
/*Release the BCB lock temporarily for the callback*/
|
|
KeReleaseInStackQueuedSpinLock(&LockHandle);
|
|
|
|
/*Unpin any previously pinned BCB*/
|
|
if (BcbToUnpin)
|
|
{
|
|
CcUnpinFileDataEx(BcbToUnpin, 1, 0);
|
|
BcbToUnpin = NULL;
|
|
}
|
|
|
|
/* Invoke the provided dirty page routine*/
|
|
DirtyPageRoutine(FileObject,
|
|
&LocalFileOffset,
|
|
SharedCacheMap,
|
|
&LocalOldestLsn,
|
|
&LocalNewestLsn,
|
|
Context1,
|
|
Context2);
|
|
if (LocalOldestLsn.QuadPart && OldestLsn.QuadPart || LocalOldestLsn.QuadPart < OldestLsn.QuadPart)
|
|
{
|
|
OldestLsn = LocalOldestLsn;
|
|
}
|
|
}
|
|
|
|
// Reacquire the BCB lock
|
|
KeAcquireInStackQueuedSpinLock(&SharedCacheMap->BcbLock, &LockHandle);
|
|
// Decrement the PinCount or mark for unpinning
|
|
if (Bcb->PinCount > 1)
|
|
{
|
|
Bcb->PinCount--;
|
|
BcbToUnpin = NULL;
|
|
}
|
|
else
|
|
{
|
|
BcbToUnpin = Bcb;
|
|
}
|
|
/*Release the BCB lock*/
|
|
KeReleaseInStackQueuedSpinLock(&LockHandle);
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Dereference the shared cache map file object*/
|
|
CcDereferenceSharedCacheMapFileObject(SharedCacheMap, FileObject);
|
|
|
|
/*Unpin any remaining BCB*/
|
|
if (BcbToUnpin)
|
|
{
|
|
CcUnpinFileDataEx(BcbToUnpin, 1,0);
|
|
BcbToUnpin = NULL;
|
|
}
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
/*Decrement the OpenCount and DirtyPages*/
|
|
SharedCacheMap->OpenCount--;
|
|
SharedCacheMap->DirtyPages--;
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock,OldIrql);
|
|
}
|
|
}
|
|
return OldestLsn ;
|
|
|
|
}
|
|
|
|
|
|
LARGE_INTEGER
|
|
NTAPI
|
|
CcGetLsnForFileObject(IN PFILE_OBJECT FileObject,
|
|
OUT OPTIONAL PLARGE_INTEGER OldestLsn)
|
|
{
|
|
PSHARED_CACHE_MAP SharedCacheMap = nullptr;
|
|
PMBCB Bcb = nullptr;
|
|
KLOCK_QUEUE_HANDLE LockHandle;
|
|
LARGE_INTEGER OldestLsnValue = {0};
|
|
LARGE_INTEGER Result = {0};
|
|
BITMAP_RANGE OldRangeBasePage = {0};
|
|
|
|
if (OldestLsn)
|
|
{
|
|
OldestLsn->QuadPart = 0;
|
|
}
|
|
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
|
if (!SharedCacheMap || !SharedCacheMap->BcbList.Flink) {
|
|
return 0;
|
|
}
|
|
|
|
/* Acquire the spin lock*/
|
|
KeAcquireInStackQueuedSpinLock(&SharedCacheMap->BcbLock, &LockHandle);
|
|
for(auto it = ListEntryIterator::begin(&SharedCacheMap->BcbList); it != ListEntryIterator::end(&SharedCacheMap->BcbList); ++it)
|
|
{
|
|
Bcb = CONTAINING_RECORD(*it, _MBCB, BitmapRange1);
|
|
|
|
if (Bcb->NodeTypeCode == NODE_TYPE_CODE_BCB && Bcb->NodeIsInZone)
|
|
{
|
|
auto MostRecentlyDirtiedPage = Bcb->MostRecentlyDirtiedPage.QuadPart;
|
|
|
|
if (MostRecentlyDirtiedPage && (!OldestLsnValue.QuadPart || MostRecentlyDirtiedPage < OldestLsnValue.QuadPart))
|
|
{
|
|
OldestLsnValue = MostRecentlyDirtiedPage;
|
|
}
|
|
|
|
if(Bcb->BitmapRange1.BasePage > OldRangeBasePage.BasePage)
|
|
{
|
|
OldRangeBasePage = Bcb->BitmapRange1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
KeReleaseInStackQueuedSpinLock(&LockHandle);
|
|
|
|
/* Set the OldestLsn value if requested*/
|
|
if (OldestLsn)
|
|
{
|
|
OldestLsn->QuadPart = OldestLsnValue;
|
|
}
|
|
|
|
Result.QuadPart = OldRangeBasePage.BasePage;
|
|
|
|
return Result;
|
|
}
|
|
|
|
|
|
Void NTAPI CcSetAdditionalCacheAttributes(
|
|
In PFILE_OBJECT FileObject,
|
|
In BOOLEAN DisableReadAhead,
|
|
In BOOLEAN DisableWriteBehind
|
|
)
|
|
{
|
|
|
|
PSHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
|
KIRQL oldlock = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
if ( DisableReadAhead )
|
|
SharedCacheMap->Flags |= READAHEAD_FLAG;
|
|
else
|
|
SharedCacheMap->Flags &= ~READAHEAD_FLAG;
|
|
if ( DisableWriteBehind )
|
|
SharedCacheMap->Flags |= DISABLE_WRITE_FLAG;
|
|
else
|
|
SharedCacheMap->Flags &= ~ENABLE_WRITE_FLAG;
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldlock);
|
|
|
|
}
|
|
VOID
|
|
NTAPI
|
|
CcSetAdditionalCacheAttributesEx(PFILE_OBJECT FileObject,
|
|
ULONG Flags)
|
|
{
|
|
PSHARED_CACHE_MAP sharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
|
KIRQL irqlDisableWriteBehind = {0};
|
|
KLOCK_QUEUE_HANDLE lockQueueHandle = {0};
|
|
BOOL disableReadAhead = (Flags & 2) != 0;
|
|
if (sharedCacheMap == nullptr||!sharedCacheMap->OpenCount)
|
|
DBGPRINT("Invalid Shared Cache Map or Open Count")
|
|
return;
|
|
|
|
|
|
if ((sharedCacheMap->Flags & 0x2000) == 0)
|
|
CcSetAdditionalCacheAttributes(FileObject, disableReadAhead, (Flags & 4) != 0);
|
|
|
|
|
|
KeAcquireInStackQueuedSpinLockAtDpcLevel(&sharedCacheMap->BcbLock, &lockQueueHandle);
|
|
|
|
if ((Flags & 1) != 0)
|
|
sharedCacheMap->Flags |= FLAG_DISABLE_WRITE_BEHIND;
|
|
else
|
|
sharedCacheMap->Flags &= ~FLAG_DISABLE_WRITE_BEHIND;
|
|
|
|
if ((Flags & 8) != 0)
|
|
{
|
|
sharedCacheMap->Flags |= FLAG_DISABLE_LAZY_WRITER;
|
|
}
|
|
else
|
|
{
|
|
if ((sharedCacheMap->Flags & FLAG_DISABLE_LAZY_WRITER) != 0)
|
|
sharedCacheMap->Flags |= FLAG_LAZY_WRITER_ACTIVE;
|
|
sharedCacheMap->Flags &= ~FLAG_DISABLE_LAZY_WRITER;
|
|
}
|
|
|
|
if ((Flags & 0x10) != 0)
|
|
sharedCacheMap->Flags |= FLAG_DISABLE_LAZY_WRITER_SCAN;
|
|
else
|
|
sharedCacheMap->Flags &= ~FLAG_DISABLE_LAZY_WRITER_SCAN;
|
|
|
|
if ((Flags & 0x20) != 0)
|
|
sharedCacheMap->Flags |= FLAG_DISABLE_METADATA_FLUSH;
|
|
else
|
|
sharedCacheMap->Flags &= ~FLAG_DISABLE_METADATA_FLUSH;
|
|
|
|
KeReleaseInStackQueuedSpinLockFromDpcLevel(&lockQueueHandle);
|
|
|
|
}
|
|
|
|
|
|
VOID
|
|
NTAPI
|
|
CcSetLogHandleForFile(IN PFILE_OBJECT FileObject,
|
|
IN PVOID LogHandle,
|
|
IN PFLUSH_TO_LSN FlushToLsnRoutine)
|
|
{
|
|
PSHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap
|
|
SharedCacheMap->LogHandle = LogHandle;
|
|
SharedCacheMap->FlushToLsnRoutine = FlushToLsnRoutine;
|
|
}
|
|
|
|
VOID
|
|
NTAPI
|
|
CcSetLogHandleForFileEx(
|
|
IN PFILE_OBJECT FileObject,
|
|
IN PVOID LogHandle,
|
|
IN PFLUSH_TO_LSN FlushToLsnRoutine
|
|
IN PQUERY_LOG_USAGE QueryLogUsageRoutine
|
|
_In_opt_ PFILE_OBJECT RelatedLogHandleFileObject
|
|
)
|
|
{
|
|
PSHARED_CACHE_MAP SharedCacheMap = nullptr;
|
|
KIRQL OldIrql;
|
|
LOG_HANDLE_CONTEXT LogHandleContext ={0};
|
|
PLOG_HANDLE_CONTEXT VolumeLogHandleContext = nullptr;
|
|
PLIST_ENTRY ListHead = nullptr;
|
|
PLIST_ENTRY PrevEntry = nullptr;
|
|
|
|
/* Retrieve the Shared Cache Map*/
|
|
SharedCacheMap = (PSHARED_CACHE_MAP)FileObject->SectionObjectPointer->SharedCacheMap;
|
|
|
|
/*Validate Shared Cache Map*/
|
|
if (!SharedCacheMap || SharedCacheMap->OpenCount == 0)
|
|
{
|
|
|
|
return;
|
|
}
|
|
if (SharedCacheMap->Flags & FLAG_LOGHANDLE_SET)
|
|
{
|
|
return;
|
|
}
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
/* Remove the existing log handle if present*/
|
|
if (SharedCacheMap->LogHandle)
|
|
{
|
|
RemoveEntryList(&SharedCacheMap->LoggedStreamLinks);
|
|
}
|
|
|
|
/*Handle the case when a new log handle is not provided*/
|
|
if (!LogHandle)
|
|
{
|
|
SharedCacheMap->Flags &= ~FLAG_LOGHANDLE_NOT_SET;
|
|
SharedCacheMap->LogHandle = NULL;
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
|
return;
|
|
}
|
|
|
|
/* Determine the appropriate list based on whether there are dirty pages*/
|
|
if (SharedCacheMap->DirtyPages)
|
|
{
|
|
ListHead = &CcDirtyLoggedStreamLinks;
|
|
}
|
|
else
|
|
{
|
|
ListHead = &CcCleanLoggedStreamLinks;
|
|
}
|
|
|
|
PrevEntry = ListHead->Blink;
|
|
InsertTailList(ListHead, &SharedCacheMap->LoggedStreamLinks);
|
|
|
|
/*Initialize the volume log handle context if not already present*/
|
|
VolumeLogHandleContext = &SharedCacheMap->VolumeCacheMap->LogHandleContext;
|
|
|
|
if (VolumeLogHandleContext->LogHandle == NULL)
|
|
{
|
|
RtlZeroMemory(&LogHandleContext, sizeof(LOG_HANDLE_CONTEXT));
|
|
LogHandleContext.LastLWTimeStamp.QuadPart = -1;
|
|
LogHandleContext.LogHandle = LogHandle;
|
|
LogHandleContext.FlushToLsnRoutine = FlushToLsnRoutine;
|
|
LogHandleContext.QueryLogUsageRoutine = QueryLogUsageRoutine;
|
|
|
|
if (RelatedLogHandleFileObject)
|
|
{
|
|
ObfReferenceObjectWithTag(RelatedLogHandleFileObject, 'tlFD');
|
|
LogHandleContext.RelatedFileObject = RelatedLogHandleFileObject;
|
|
}
|
|
|
|
RtlCopyMemory(VolumeLogHandleContext, &LogHandleContext, sizeof(LOG_HANDLE_CONTEXT));
|
|
}
|
|
|
|
|
|
SharedCacheMap->LogHandle = VolumeLogHandleContext;
|
|
|
|
/*Update the flags and dirty page statistics*/
|
|
if (!(SharedCacheMap->Flags & FLAG_LOGHANDLE_NOT_SET))
|
|
{
|
|
if (SharedCacheMap->DirtyPages)
|
|
{
|
|
VolumeLogHandleContext->DirtyPageStatistics.DirtyPages += SharedCacheMap->DirtyPages;
|
|
}
|
|
}
|
|
|
|
SharedCacheMap->Flags |= FLAG_LOGHANDLE_NOT_SET;
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
|
}
|
|
|
|
VOID
|
|
VECTORCALL
|
|
CcSetLoggedDataThreshold (
|
|
_In_ PVOID LogHandle,
|
|
_In_ ULONG NumberOfPages)
|
|
{
|
|
KIRQL OldIrql;
|
|
PVOLUME_CACHE_MAP VolumeCacheMap = nullptr;
|
|
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
|
|
|
/* Check if the volume cache map list is valid */
|
|
if (!IsListEmpty(&CcVolumeCacheMapList))
|
|
{
|
|
/* Iterate through the list to find the matching log handle*/
|
|
for (auto it = ListEntryIterator::begin(&CcVolumeCacheMapList); it != ListEntryIterator::end(&CcVolumeCacheMapList); ++it)
|
|
{
|
|
VolumeCacheMap = CONTAINING_RECORD(*it, VOLUME_CACHE_MAP, VolumeCacheMapLinks);
|
|
if (&VolumeCacheMap->VolumeCacheMapLinks == LogHandle)
|
|
{
|
|
/* We Found the matching log handle, update the Data threshold */
|
|
VolumeCacheMap->LoggedDataThreshold = NumberOfPages;
|
|
break;
|
|
}
|
|
VolumeCacheMap = NULL;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
|
}
|
|
|
|
BOOLEAN
|
|
NTAPI
|
|
CcIsThereDirtyLoggedPages (
|
|
_In_ PDEVICE_OBJECT DeviceObject,
|
|
_Out_opt_ PULONG NumberOfDirtyPages
|
|
)
|
|
{
|
|
BOOLEAN DirtyPagesExist = false;
|
|
KIRQL OldIrql;
|
|
PVOLUME_CACHE_MAP VolumeCacheMap = nullptr;
|
|
|
|
|
|
OldIrql = KeAcquireQueuedSpinLock(LockQueueCcVolumeCacheMapLock);
|
|
|
|
/*Iterate through the volume cache map list to find the matching device object*/
|
|
|
|
for (auto it = ListEntryIterator::begin(&CcVolumeCacheMapList); it != ListEntryIterator::end(&CcVolumeCacheMapList); ++it)
|
|
{
|
|
VolumeCacheMap = CONTAINING_RECORD(*it, VOLUME_CACHE_MAP, VolumeCacheMapLinks);
|
|
if (VolumeCacheMap->DeviceObject == DeviceObject)
|
|
{
|
|
break;
|
|
}
|
|
VolumeCacheMap = NULL;
|
|
}
|
|
|
|
// Ensure that the volume cache map was found
|
|
if (VolumeCacheMap != nullptr)
|
|
{
|
|
// Check if there are dirty pages or logged pages queued to disk
|
|
if (VolumeCacheMap->LogHandleContext.DirtyPages || VolumeCacheMap->LoggedPagesQueuedToDisk)
|
|
{
|
|
if (NumberOfDirtyPages)
|
|
{
|
|
*NumberOfDirtyPages = VolumeCacheMap->LogHandleContext.DirtyPages + VolumeCacheMap->LoggedPagesQueuedToDisk;
|
|
}
|
|
DirtyPagesExist = true;
|
|
}
|
|
}
|
|
|
|
|
|
KeReleaseQueuedSpinLock(LockQueueCcVolumeCacheMapLock, OldIrql);
|
|
|
|
return DirtyPagesExist;
|
|
}
|
|
|
|
ULONG
|
|
NTAPI
|
|
CcGetCachedDirtyPageCountForFile(PSECTION_OBJECT_POINTERS SectionObjectPointer)
|
|
{
|
|
ULONG DIRTYPAGES = -1;;
|
|
PSHARED_CACHE_MAP SharedCacheMap;
|
|
|
|
if ( SectionObjectPointer )
|
|
{
|
|
SharedCacheMap = SectionObjectPointer->SharedCacheMap;
|
|
if ( SharedCacheMap )
|
|
DIRTYPAGES = SharedCacheMap->DirtyPages;
|
|
}
|
|
return DIRTYPAGES;
|
|
}
|
|
|
|
|