alcyone/NTOSKRNL/CC/ccpinsupport.cpp
Dibyamartanda Samanta 4370544c97
[NTOSKRNL:CC] Pin Support::Implemented CcSetDirtyPinnedData
Implemented CcSetDirtyPinnedData
Note: In Future Implement Specialized Iterator for Bcbs ,introduce bounds checking similar
to GSL Span
2024-08-06 13:59:37 +02:00

571 lines
14 KiB
C++

/*
* PROJECT: Alcyone System Kernel
* LICENSE: BSD Clause 3
* PURPOSE: Cache Controller:: Pin Support
* NT KERNEL: 5.11.9360
* COPYRIGHT: 2023-2029 Dibymartanda Samanta <>
*/
#include <ntoskrnl.h>
#define NTDEBUG
#include <debug.h>
#include "ccinternal.hpp"
extern "C"
/* Move Typedef later to cctypes in sdk */
/* NOTE: This structure have reconstructed analyzing CcAllocateObcb, might be icomplete to one in Windows NT Kernel*/
/* We just need API Compatibility, so it doesn't matter */
typedef struct _OBCB
{
short NodeTypeCode;
short NodeByteSize;
ULONG ByteLength;
LARGE_INTEGER FileOffset;
struct PCC_BCB Bcbs[1];
long __PADDING__[1];
} OBCB, *POBCB;
/*Internal Function*/
POBCB
NTAPI
CcAllocateObcb(
_In_ PLARGE_INTEGER FileOffset,
_In_ ULONG Length,
_In_ PCC_BCB Bcb)
{
/*Calculate the size needed for the OBCB*/
SIZE_T AllocationSize = sizeof(OBCB) + ((Length + PAGE_SIZE - 1 + FileOffset->LowPart - *reinterpret_cast<ULONG*>(reinterpret_cast<ULONG_PTR>(FirstBcb) & ~1UL) - *reinterpret_cast<ULONG*>((reinterpret_cast<ULONG_PTR>(FirstBcb) & ~1UL) + sizeof(ULONG))) >> PAGE_SHIFT) * sizeof(PVOID);
/*Allocate memory for the OBCB*/
POBCB NewObcb = reinterpret_cast<POBCB>(ExAllocatePoolWithTag(NonPagedPool,AllocationSize,'cObC'));
if (NewObcb == nullptr)
{
return nullptr;
}
/* Initialize the OBCB */
RtlZeroMemory(NewObcb, AllocationSize);
NewObcb->NodeByteSize = static_cast<USHORT>(AllocationSize);
NewObcb->NodeTypeCode = 762;
NewObcb->ByteLength = Length;
NewObcb->FileOffset = *FileOffset;
NewObcb->Bcbs[0] = Bcb;
return NewObcb;
}
VOID
NTAPI
CcSetDirtyPinnedData(
IN PBCB BcbVoid,
IN PLARGE_INTEGER Lsn OPTIONAL)
{
PSHARED_CACHE_MAP SharedCacheMap;
PBCB Bcb;
PBCB* BcbArray;
PFAST_MUTEX Mutex;
KIRQL OldIrql;
ULONG PagesToWrite;
PETHREAD CurrentThread;
if (BcbVoid->NodeTypeCode == 762)
{
BcbArray = (ULONG_PTR)BcbVoid-Bcbs;
}
SharedCacheMap = (*BcbArray)->SharedCacheMap;
/* Handle logging if necessary */
auto VolumeLogHandleContext = &SharedCacheMap->VolumeCacheMap->LogHandleContext;
if ( (SharedCacheMap->Flags & 0x1000000) != 0 && LogHandleContext.LastLWTimeStamp.QuadPart = -1 )
{
KeQuerySystemTime(&LogHandleContext.LastLWTimeStamp)
}
if (!(SharedCacheMap->Flags & 0x200))
{
CcSetDirtyInMask(SharedCacheMap, &BcbVoid->FileOffset, BcbVoid->PagesToWrite, NULL);
return;
}
Bcb = *BcbArray;
while (Bcb)
{
if ((ULONG_PTR)Bcb & 1)
{
KeBugCheckEx(CACHE_MANAGER, 0xE94, 0xC0000420, 0, 0);
}
SharedCacheMap = Bcb->SharedCacheMap;
Mutex = &SharedCacheMap->BcbLock;
/* Acquire the BCB lock */
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); // KeAcquireInStackQueuedSpinLock((PKSPIN_LOCK)(v6 + 184), &LockHandle)
KeAcquireQueuedSpinLockAtDpcLevel((ULONG_PTR)&KeGetPcr()->Prcb->LockQueue[5]);
if (!Bcb->Dirty)
{
PagesToWrite = Bcb->PagesToWrite >> PAGE_SHIFT;
Bcb->Dirty = TRUE;
if (Lsn)
{
Bcb->OldestLsn = *Lsn;
Bcb->NewestLsn = *Lsn;
}
/* Update dirty page statistics */
if (!SharedCacheMap->DirtyPages && !(SharedCacheMap->Flags & PRIVATE_CACHE_MAP_FLAGS_WRITE_THROUGH))
{
CcScheduleLazyWriteScan(FALSE, FALSE);
CcInsertIntoDirtySharedCacheMapList(SharedCacheMap);
}
CcChargeDirtyPages(SharedCacheMap, 0, 0, PagesToWrite);
/* Handle process disk counters if necessary */
if (SharedCacheMap->Flags & PRIVATE_CACHE_MAP_FLAGS_TRACK_DIRTY_PAGES)
{
PsUpdateDiskCounters(PsGetCurrentThread()->ThreadsProcess, 0, PagesToWrite << PAGE_SHIFT, 0, 1, 0);
}
}
KeReleaseQueuedSpinLockFromDpcLevel((ULONG_PTR)&Prcb->LockQueue[5]);
/* Update LSN information */
if (Lsn)
{
if (!Bcb->MostRecentlyDirtiedPage || Lsn->QuadPart < Bcb->MostRecentlyDirtiedPage)
{
Bcb->OldestLsn = *Lsn;
}
if (!Bcb->NewestLsn.QuadPart || Lsn->QuadPart > Bcb->NewestLsn.QuadPart)
{
Bcb->NewestLsn = *Lsn;
}
if (Lsn->QuadPart > SharedCacheMap->LargestLSN.QuadPart)
{
SharedCacheMap->LargestLSN = *Lsn;
}
}
/* Update ValidDataGoal if necessary */
if (Bcb->ResumeWritePage > SharedCacheMap->ValidDataGoal.QuadPart)
{
SharedCacheMap->ValidDataGoal.QuadPart = Bcb->ResumeWritePage;
}
/* Release the BCB lock */
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); // KeAcquireInStackQueuedSpinLock
/* Move to the next BCB */
BcbArray++;
Bcb = *BcbArray;
}
}
BOOLEAN
NTAPI
CcPinFileData(
_In_ PFILE_OBJECT FileObject,
_In_ PLARGE_INTEGER FileOffset,
_In_ ULONG Length,
_In_ BOOLEAN IsNoWrite,
_In_ BOOLEAN Flags,
_In_ ULONG PinFlags,
_Out_ PCC_BCB* Bcb,
_Out_ PVOID* Buffer,
_Out_ LARGE_INTEGER* BeyondLastByte)
{
// Unimplemented
}
BOOLEAN
NTAPI
CcMapDataCommon(
_In_ PFILE_OBJECT FileObject,
_In_ PLARGE_INTEGER FileOffset,
_In_ ULONG Length,
_In_ ULONG Flags,
_Out_ PVOID *Bcb,
_Out_ PVOID *Buffer
)
{
if (Flags & FILE_PIN_SEQUENTIAL_READ)
InterlockedIncrement(&CcSequentialReadCount);
else
InterlockedIncrement(&CcRandomReadCount);
KeGetCurrentThread()->WaitIrql = PASSIVE_LEVEL;
if (Flags & FILE_PIN_SEQUENTIAL_READ)
{
*Buffer = CcGetVirtualAddress(SharedMap, *FileOffset, (PVACB *)&Bcb, &ReceivedLength);
return TRUE;
}
else
{
LARGE_INTEGER BeyondLastByte;
if (CcPinFileData(FileObject, FileOffset, Length, TRUE, 0, Flags, Bcb, Buffer, &BeyondLastByte))
{
return TRUE;
}
else
{
InterlockedIncrement(&CCFailedPinReadCount);
return FALSE;
}
}
}
}
VOID
NTAPI
CcUnpinFileDataEx(
_In_ PCC_BCB Bcb,
_In_ BOOLEAN& WriteStatus,
_In_ ULONG& UnPinType)
{
PSHARED_CACHE_MAP SharedCacheMap = nullptr;
KIRQL OldIrql = {0};
ULONG NumberOfPages = {0};
BOOLEAN ReadOnly = false;
/* If it is not BCB Ptr,it must be VACB Pointer, free it*/
if (Bcb->NodeTypeCode != BCB_NODE_TYPE_CODE)
{
PVACB Vacb = reinterpret_cast<PVACB>(Bcb);
CcFreeVirtualAddress(Vacb);
return;
}
SharedCacheMap = Bcb->SharedCacheMap;
/* Is it only Unpinning if so is their no write performed by Cache Controller */
if (!(SharedCacheMap->Flags & SHARED_CACHE_MAP_MODIFIED_NO_WRITE) || UnPinType == UnPin)
ReadOnly = TRUE;
OldIrql = KeAcquireSpinLockRaiseToDpc(&SharedCacheMap->BcbSpinLock);
/* Just reduce pin count if BCB is not set to be cleaned else perform cleaning of BCB*/
if (UnPinType < CLEAN_BCB_PIN)
{
ASSERT(Bcb->PinCount > 0);
Bcb->PinCount--;
}
else
{
if (UnPinType != CLEAN_BCB_PIN)
KeBugCheckEx(UNEXPECTED_VALUE, UnPinType, (ULONG_PTR)Bcb, 0, 0);
if (Bcb->IsBCBDirty)
{
NumberOfPages = (Bcb->Length / PAGE_SIZE);
Bcb->IsBCBDirty = FALSE;
Bcb->NewestDirtiedPage = 0;
Bcb->NewestLsn.QuadPart = 0;
CcDeductIsBCBDirtyPages(SharedCacheMap, NumberOfPages);
/*Adjust page count */
if (CcPagesYetToWrite <= NumberOfPages)
CcPagesYetToWrite = 0;
else
CcPagesYetToWrite -= NumberOfPages;
if (!SharedCacheMap->IsBCBDirtyPages && SharedCacheMap->OpenCount)
CcInsertIntoCleanSharedCacheMapList(SharedCacheMap);
}
}
if (Bcb->PinCount)
{ /* If not read only BCB, release resources,their no point being pinned at that case */
if (!ReadOnly)
ExReleaseResourceLite(&Bcb->BcbResource);
KeReleaseSpinLock(&SharedCacheMap->BcbSpinLock, OldIrql);
}
/*if BCB is dirty, free the virtual adress*/
else if (Bcb->IsBCBDirty)
{
if (Bcb->BaseAddress)
{
CcFreeVirtualAddress(Bcb->Vacb);
Bcb->BaseAddress = NULL;
Bcb->Vacb = NULL;
}
if (!ReadOnly)
ExReleaseResourceLite(&Bcb->BcbResource);
KeReleaseSpinLock(&SharedCacheMap->BcbSpinLock, OldIrql);
}
else
{
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->VacbSpinLock);
RemoveEntryList(&Bcb->Link);
if (SharedCacheMap->SectionSize.QuadPart > CACHE_OVERALL_SIZE &&
(SharedCacheMap->Flags & SHARED_CACHE_MAP_MODIFIED_NO_WRITE))
{
CcAdjustVacbLevelLockCount(SharedCacheMap, &Bcb->FileOffset.QuadPart, -1);
}
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->VacbSpinLock);
if (Bcb->BaseAddress)
CcFreeVirtualAddress(Bcb->Vacb);
if (!ReadOnly)
ExReleaseResourceLite(&Bcb->BcbResource);
KeReleaseSpinLock(&SharedCacheMap->BcbSpinLock, OldIrql);
CcDeallocateBcb(Bcb);
}
}
BOOLEAN
NTAPI
CcMapData(
_In_ In PFILE_OBJECT FileObject,
_In_ PLARGE_INTEGER FileOffset,
_In_ ULONG Length,
_In_ ULONG Flags,
_Out_ PVOID *Bcb,
_Out_ PVOID *Buffer)
{
PVOID LocalBuffer = nullptr;
/* Call CcMapDataCommon to perform the actual mapping */
if (!CcMapDataCommon(FileObject,FileOffset,Length,Flags,Bcb,&LocalBuffer))
{
return false;
}
/* Check if we need to read the data */
if (!(Flags & MAP_NO_READ))
{
/* Read the data */
if (!CcMapAndRead(Length, 0, true, LocalBuffer))
{
return false;
}
}
/* Set Bcb to point after the LocalBuffer */
*Bcb = reinterpret_cast<PVOID>(reinterpret_cast<ULONG_PTR>(LocalBuffer) + sizeof(PVOID));
/* Set the output Buffer */
*Buffer = LocalBuffer;
return true;
}
BOOLEAN
NTAPI
CcPinMappedData(
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileOffset,
IN ULONG Length,
IN ULONG Flags,
IN OUT PCC_BCB Bcb)
{
//Unimplemented
}
BOOLEAN
NTAPI
CcPinRead(
IN PFILE_OBJECT FileObject,
IN PLARGE_INTEGER FileOffset,
IN ULONG Length,
IN ULONG Flags,
OUT PVOID *Bcb,
OUT PVOID *Buffer)
{
//Unimplemented
}
BOOLEAN
NTAPI
CcPreparePinWrite(
_In_ PFILE_OBJECT FileObject,
_In_ PLARGE_INTEGER FileOffset,
_In_ ULONG Length,
_In_ BOOLEAN Zero,
_In_ ULONG Flags,
_Out_ PVOID *Bcb,
_Out_ PVOID *Buffer)
{
LARGE_INTEGER LocalFileOffset = *FileOffset;
LARGE_INTEGER BeyondLastByte ={0};
ULONG RemainingLength = Length;
POBCB localbcb = nullptr;
PCC_BCB *localbcbPtr = nullptr;
PVOID LocalBuffer = nullptr;
BOOLEAN Result = false;
if (Flags & PIN_WAIT)
{
return CcMapDataForOverwrite(FileObject, FileOffset, Length, Bcb, Buffer);
}
/*Pinning Loop*/
while(true)
{
if (localbcb)
{
if (localbcbPtr == reinterpret_cast<PCC_BCB>(&localbcb))
{
localbcb = CcAllocateObcb(&LocalFileOffset, RemainingLength, localbcb);
localbcbPtr = localbcb->Bcbs;
*Buffer = LocalBuffer;
}
RemainingLength += LocalFileOffset.QuadPart - BeyondLastByte.QuadPart;
LocalFileOffset.QuadPart = BeyondLastByte.QuadPart;
localbcbPtr++;
}
if (!CcPinFileData(FileObject, &LocalFileOffset,RemainingLength,false,false,Flags,localbcbPtr,&LocalBuffer, &BeyondLastByte))
{
/* Pinning failed */
if (localbcb)
{
CcUnpinData(localbcb);
}
return false;
}
if(BeyondLastByte.QuadPart < LocalFileOffset.QuadPart + RemainingLength)
break;
}
if (localbcbPtr == reinterpret_cast<PCC_BCB*>(&localbcb))
{
*Buffer = LocalBuffer;
}
if (Zero)
{
RtlZeroMemory(*Buffer, Length);
}
CcSetDirtyPinnedData(localbcb, nullptr);
*Bcb = localbcb;
return true;
}
VOID
NTAPI
CcSetBcbOwnerPointer(
IN PMBCB Bcb,
IN PVOID OwnerPointer)
{
if (Bcb->NodeTypeCode == 762)
{
/* Iterate through all bitmap ranges */
for (auto it = ListEntryIterator::begin(&Bcb->BitmapRanges); it != ListEntryIterator::end(&Bcb->BitmapRanges); ++it)
{ auto* Bcb = CONTAINING_RECORD(it->Blink,MBCB,BitmapRanges);
ExSetResourceOwnerPointer(&Bcb->DirtyPages,OwnerPointer);
}
}
else
{
/* Single bitmap range */
ExSetResourceOwnerPointer(&Bcb->BitmapRange1.FirstDirtyPage,OwnerPointer);
}
}
VOID
NTAPI
CcUnpinData(
_In_ PVOID BcbPtr)
{
PCC_BCB Bcb = reinterpret_cast<PCC_BCB>(BcbPtr);
BOOLEAN WRITE_FLAG = NULL;
if (reinterpret_cast<ULONG_PTR>((Bcb) & 1))
{
WRITE_FLAG = TRUE;
Bcb = reinterpret_cast<PCC_BCB>(reinterpret_cast<ULONG_PTR>(Bcb) & ~(1));
CcUnpinFileDataEx(Bcb, WRITE_FLAG, UNPIN_BCB);
return;
}
if (Bcb->NodeTypeCode != 762)
{
WRITE_FLAG = FALSE;
CcUnpinFileDataEx(Bcb, WRITE_FLAG, UNPIN_BCB);
return;
}
for (auto it = ListEntryIterator::begin(&Bcb->BcbLinks); it != ListEntryIterator::end(&Bcb->BcbLinks); ++it)
{
auto* BCB_CURRENT = CONTAINING_RECORD(it->Blink, CC_BCB, BcbLinks);
CcUnpinData(BCB_CURRENT);
}
ExFreePoolWithTag(Bcb,NULL);
}
VOID
NTAPI
CcUnpinDataForThread(IN PVOID BcbPtr,
IN ERESOURCE_THREAD ResourceThreadId)
{
PCC_BCB Bcb = BcbPtr;
BOOLEAN WRITE_FLAG = NULL;
if (reinterpret_cast<ULONG_PTR>((Bcb) & 1))
{
WRITE_FLAG = TRUE;
Bcb = reinterpret_cast<PCC_BCB>(reinterpret_cast<ULONG_PTR>(Bcb) & ~(1));
CcUnpinFileDataEx(Bcb, WRITE_FLAG, );
return;
}
if (Bcb->NodeTypeCode != 762)
{
WRITE_FLAG = FALSE;
ExReleaseResourceForThreadLite(&Bcb->Resource, ResourceThreadId);
CcUnpinFileDataEx(Bcb, WRITE_FLAG, UNPIN_BCB);
return;
}
for (auto it = ListEntryIterator::begin(&Bcb->BcbLinks); it != ListEntryIterator::end(&Bcb->BcbLinks); ++it)
{
auto* BCB_CURRENT = CONTAINING_RECORD(it->Blink, CC_BCB, BcbLinks);
CcUnpinData(BCB_CURRENT);
}
ExFreePoolWithTag(Bcb,NULL);
}