/*
 * PROJECT: Alcyone System Kernel
 * LICENSE: BSD Clause 3
 * PURPOSE: Cache Controller:: Lazy Writer
 * NT KERNEL: 5.11.9360
 * COPYRIGHT:  2023-2029 Dibymartanda Samanta <>
 */

#include "debug.hpp"
#include <ntoskrnl.hpp>


BOOLEAN NTAPI IsGoToNextMap(IN PSHARED_CACHE_MAP SharedMap, IN ULONG TargetPages)
{
    BOOLEAN Skip = FALSE;

    // If the map is marked for teardown or modified without writing, return TRUE
    if (SharedMap->Flags & (SHARE_FL_WAITING_TEARDOWN | SHARE_FL_MODIFIED_NO_WRITE)) {
        return TRUE;
    }

    // Determine if we should skip based on open count and dirty pages
    if ((SharedMap->OpenCount || SharedMap->DirtyPages) && SharedMap->FileSize.QuadPart != 0) {
        Skip = TRUE;
    }

    // Skip if no dirty pages but `Skip` is `TRUE`
    if (!SharedMap->DirtyPages && Skip) {
        return TRUE;
    }

    // If the map is waiting for teardown, return FALSE
    if (SharedMap->Flags & SHARE_FL_WAITING_TEARDOWN) {
        return FALSE;
    }

    // Skip if no target pages and `Skip` is `TRUE`
    if (TargetPages == 0 && Skip) {
        return TRUE;
    }

    // Increment the lazy write pass count
    SharedMap->LazyWritePassCount++;

    // Skip based on lazy write pass count and other conditions
    if ((SharedMap->LazyWritePassCount & 0xF) &&
        (SharedMap->Flags & SHARE_FL_MODIFIED_NO_WRITE) &&
        CcCapturedSystemSize != MmSmallSystem &&
        SharedMap->DirtyPages < 0x40 && Skip)
    {
        return TRUE;
    }

    // Skip if file object is marked for direct I/O and other conditions
    if ((SharedMap->FileObject->Flags & FO_DIRECT_IO) &&
        SharedMap->OpenCount != 0 &&
        CcCanIWrite(SharedMap->FileObject, 0x40000, FALSE, 0xFF) && Skip)
    {
        return TRUE;
    }

    return FALSE;
}

/* Later Move it to XDK */
class SpinLockGuard
{
public:
   
    SpinLockGuard(KSPIN_LOCK_QUEUE_NUMBER QueueNumber)
        : m_queueNumber(QueueNumber)
    {
        m_currentIrql = KeAcquireQueuedSpinLock(m_queueNumber);
    }

    
    ~SpinLockGuard()
    {
        KeReleaseQueuedSpinLock(m_queueNumber, m_currentIrql);
    }

    SpinLockGuard(const SpinLockGuard&) = delete;
    SpinLockGuard& operator=(const SpinLockGuard&) = delete;

private:
    KSPIN_LOCK_QUEUE_NUMBER m_queueNumber;
    KIRQL m_currentIrql;
};

class SpinLockSharedGuard {
public:
    SpinLockSharedGuard(PEX_SPIN_LOCK SpinLock) : m_SpinLock(SpinLock) {
        m_OldIrql = ExAcquireSpinLockShared(m_SpinLock);
    }

    ~SpinLockSharedGuard() {
        ExReleaseSpinLockShared(m_SpinLock, m_OldIrql);
    }

    SpinLockSharedGuard(const SpinLockSharedGuard&) = delete;
    SpinLockSharedGuard& operator=(const SpinLockSharedGuard&) = delete;

private:
    PEX_SPIN_LOCK m_SpinLock;
    KIRQL m_OldIrql;
};

template <typename T>
class Array {
public:
    Array(T* data, size_t size) : data_(data), size_(size) {}

    T& operator[](size_t index) {
        checkBounds(index);
        return data_[index];
    }

    const T& operator[](size_t index) const {
        checkBounds(index);
        return data_[index];
    }

    T& at(size_t index) {
        checkBounds(index);
        return data_[index];
    }

    const T& at(size_t index) const {
        checkBounds(index);
        return data_[index];
    }

    size_t size() const {
        return size_;
    }

private:
    T* data_;
    size_t size_;

    void checkBounds(size_t index) const {
        if (index >= size_) {
           ExRaiseStatus(STATUS_ARRAY_BOUNDS_EXCEEDED);
        }
    }
};