From e8771dfc5b47ef8536a0ffdd8bfe14ced213b2c4 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Fri, 15 Aug 2025 00:32:56 +0200 Subject: [PATCH 01/34] Use __asm__ to comply with disabled GNU extensions --- xtoskrnl/ar/amd64/cpufunc.c | 434 +++++++++++++++++------------------ xtoskrnl/ar/i686/cpufunc.c | 408 ++++++++++++++++---------------- xtoskrnl/hl/amd64/ioport.c | 42 ++-- xtoskrnl/hl/i686/ioport.c | 42 ++-- xtoskrnl/ke/amd64/krnlinit.c | 18 +- xtoskrnl/ke/i686/krnlinit.c | 22 +- xtoskrnl/mm/amd64/pages.c | 24 +- xtoskrnl/mm/i686/pages.c | 16 +- 8 files changed, 503 insertions(+), 503 deletions(-) diff --git a/xtoskrnl/ar/amd64/cpufunc.c b/xtoskrnl/ar/amd64/cpufunc.c index 95e621f..d129717 100644 --- a/xtoskrnl/ar/amd64/cpufunc.c +++ b/xtoskrnl/ar/amd64/cpufunc.c @@ -20,7 +20,7 @@ XTCDECL VOID ArClearInterruptFlag(VOID) { - asm volatile("cli"); + __asm__ volatile("cli"); } /** @@ -40,12 +40,12 @@ ArCpuId(IN OUT PCPUID_REGISTERS Registers) UINT32 MaxLeaf; /* Get highest function ID available */ - asm volatile("cpuid" - : "=a" (MaxLeaf) - : "a" (Registers->Leaf & 0x80000000) - : "rbx", - "rcx", - "rdx"); + __asm__ volatile("cpuid" + : "=a" (MaxLeaf) + : "a" (Registers->Leaf & 0x80000000) + : "rbx", + "rcx", + "rdx"); /* Check if CPU supports this command */ if(Registers->Leaf > MaxLeaf) @@ -55,13 +55,13 @@ ArCpuId(IN OUT PCPUID_REGISTERS Registers) } /* Execute CPUID function */ - asm volatile("cpuid" - : "=a" (Registers->Eax), - "=b" (Registers->Ebx), - "=c" (Registers->Ecx), - "=d" (Registers->Edx) - : "a" (Registers->Leaf), - "c" (Registers->SubLeaf)); + __asm__ volatile("cpuid" + : "=a" (Registers->Eax), + "=b" (Registers->Ebx), + "=c" (Registers->Ecx), + "=d" (Registers->Edx) + : "a" (Registers->Leaf), + "c" (Registers->SubLeaf)); /* Return TRUE */ return TRUE; @@ -96,11 +96,11 @@ ArGetCpuFlags(VOID) ULONG_PTR Flags; /* Get RFLAGS register */ - asm volatile("pushf\n" - "pop %0\n" - : "=rm" (Flags) - : - : "memory"); + __asm__ volatile("pushf\n" + "pop %0\n" + : "=rm" (Flags) + : + : "memory"); /* Return flags */ return Flags; @@ -119,11 +119,11 @@ ULONG_PTR ArGetStackPointer(VOID) { /* Get current stack pointer */ - asm volatile("movq %%rsp, %%rax\n" - "retq\n" - : - : - :); + __asm__ volatile("movq %%rsp, %%rax\n" + "retq\n" + : + : + :); } /** @@ -137,7 +137,7 @@ XTCDECL VOID ArHalt(VOID) { - asm volatile("hlt"); + __asm__ volatile("hlt"); } /** @@ -174,10 +174,10 @@ XTCDECL VOID ArInvalidateTlbEntry(IN PVOID Address) { - asm volatile("invlpg (%0)" - : - : "b" (Address) - : "memory"); + __asm__ volatile("invlpg (%0)" + : + : "b" (Address) + : "memory"); } /** @@ -194,10 +194,10 @@ XTCDECL VOID ArLoadGlobalDescriptorTable(IN PVOID Source) { - asm volatile("lgdt %0" - : - : "m" (*(PSHORT)Source) - : "memory"); + __asm__ volatile("lgdt %0" + : + : "m" (*(PSHORT)Source) + : "memory"); } /** @@ -214,10 +214,10 @@ XTCDECL VOID ArLoadInterruptDescriptorTable(IN PVOID Source) { - asm volatile("lidt %0" - : - : "m" (*(PSHORT)Source) - : "memory"); + __asm__ volatile("lidt %0" + : + : "m" (*(PSHORT)Source) + : "memory"); } /** @@ -234,9 +234,9 @@ XTCDECL VOID ArLoadLocalDescriptorTable(IN USHORT Source) { - asm volatile("lldtw %0" - : - : "g" (Source)); + __asm__ volatile("lldtw %0" + : + : "g" (Source)); } /** @@ -253,9 +253,9 @@ XTCDECL VOID ArLoadMxcsrRegister(IN ULONG Source) { - asm volatile("ldmxcsr %0" - : - : "m" (Source)); + __asm__ volatile("ldmxcsr %0" + : + : "m" (Source)); } /** @@ -280,45 +280,45 @@ ArLoadSegment(IN USHORT Segment, { case SEGMENT_CS: /* Load CS Segment */ - asm volatile("mov %0, %%rax\n" - "push %%rax\n" - "lea label(%%rip), %%rax\n" - "push %%rax\n" - "lretq\n" - "label:" - : - : "ri" ((ULONGLONG)Source) - : "rax"); + __asm__ volatile("mov %0, %%rax\n" + "push %%rax\n" + "lea label(%%rip), %%rax\n" + "push %%rax\n" + "lretq\n" + "label:" + : + : "ri" ((ULONGLONG)Source) + : "rax"); break; case SEGMENT_DS: /* Load DS Segment */ - asm volatile("movl %0, %%ds" - : - : "r" (Source)); + __asm__ volatile("movl %0, %%ds" + : + : "r" (Source)); break; case SEGMENT_ES: /* Load ES Segment */ - asm volatile("movl %0, %%es" - : - : "r" (Source)); + __asm__ volatile("movl %0, %%es" + : + : "r" (Source)); break; case SEGMENT_FS: /* Load FS Segment */ - asm volatile("movl %0, %%fs" - : - : "r" (Source)); + __asm__ volatile("movl %0, %%fs" + : + : "r" (Source)); break; case SEGMENT_GS: /* Load GS Segment */ - asm volatile("movl %0, %%gs" - : - : "r" (Source)); + __asm__ volatile("movl %0, %%gs" + : + : "r" (Source)); break; /* Load SS Segment */ case SEGMENT_SS: - asm volatile("movl %0, %%ss" - : - : "r" (Source)); + __asm__ volatile("movl %0, %%ss" + : + : "r" (Source)); break; } } @@ -337,9 +337,9 @@ XTCDECL VOID ArLoadTaskRegister(USHORT Source) { - asm volatile("ltr %0" - : - : "rm" (Source)); + __asm__ volatile("ltr %0" + : + : "rm" (Source)); } /** @@ -354,9 +354,9 @@ VOID ArMemoryBarrier(VOID) { LONG Barrier; - asm volatile("lock; orl $0, %0;" - : - : "m"(Barrier)); + __asm__ volatile("lock; orl $0, %0;" + : + : "m"(Barrier)); } /** @@ -380,38 +380,38 @@ ArReadControlRegister(IN USHORT ControlRegister) { case 0: /* Read value from CR0 */ - asm volatile("mov %%cr0, %0" - : "=r" (Value) - : - : "memory"); + __asm__ volatile("mov %%cr0, %0" + : "=r" (Value) + : + : "memory"); break; case 2: /* Read value from CR2 */ - asm volatile("mov %%cr2, %0" - : "=r" (Value) - : - : "memory"); + __asm__ volatile("mov %%cr2, %0" + : "=r" (Value) + : + : "memory"); break; case 3: /* Read value from CR3 */ - asm volatile("mov %%cr3, %0" - : "=r" (Value) - : - : "memory"); + __asm__ volatile("mov %%cr3, %0" + : "=r" (Value) + : + : "memory"); break; case 4: /* Read value from CR4 */ - asm volatile("mov %%cr4, %0" - : "=r" (Value) - : - : "memory"); + __asm__ volatile("mov %%cr4, %0" + : "=r" (Value) + : + : "memory"); break; case 8: /* Read value from CR8 */ - asm volatile("mov %%cr8, %0" - : "=r" (Value) - : - : "memory"); + __asm__ volatile("mov %%cr8, %0" + : "=r" (Value) + : + : "memory"); break; default: /* Invalid control register set */ @@ -444,43 +444,43 @@ ArReadDebugRegister(IN USHORT DebugRegister) { case 0: /* Read value from DR0 */ - asm volatile("mov %%dr0, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr0, %0" + : "=r" (Value)); break; case 1: /* Read value from DR1 */ - asm volatile("mov %%dr1, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr1, %0" + : "=r" (Value)); break; case 2: /* Read value from DR2 */ - asm volatile("mov %%dr2, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr2, %0" + : "=r" (Value)); break; case 3: /* Read value from DR3 */ - asm volatile("mov %%dr3, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr3, %0" + : "=r" (Value)); break; case 4: /* Read value from DR4 */ - asm volatile("mov %%dr4, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr4, %0" + : "=r" (Value)); break; case 5: /* Read value from DR5 */ - asm volatile("mov %%dr5, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr5, %0" + : "=r" (Value)); break; case 6: /* Read value from DR6 */ - asm volatile("mov %%dr6, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr6, %0" + : "=r" (Value)); break; case 7: /* Read value from DR7 */ - asm volatile("mov %%dr7, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr7, %0" + : "=r" (Value)); break; default: /* Invalid debug register set */ @@ -509,9 +509,9 @@ ArReadGSQuadWord(ULONG Offset) ULONGLONG Value; /* Read quadword from GS segment */ - asm volatile("movq %%gs:%a[Offset], %q[Value]" - : [Value] "=r" (Value) - : [Offset] "ir" (Offset)); + __asm__ volatile("movq %%gs:%a[Offset], %q[Value]" + : [Value] "=r" (Value) + : [Offset] "ir" (Offset)); return Value; } @@ -531,10 +531,10 @@ ArReadModelSpecificRegister(IN ULONG Register) { ULONG Low, High; - asm volatile("rdmsr" - : "=a" (Low), - "=d" (High) - : "c" (Register)); + __asm__ volatile("rdmsr" + : "=a" (Low), + "=d" (High) + : "c" (Register)); return ((ULONGLONG)High << 32) | Low; } @@ -566,9 +566,9 @@ ArReadTimeStampCounter(VOID) { ULONGLONG Low, High; - asm volatile("rdtsc" - : "=a" (Low), - "=d" (High)); + __asm__ volatile("rdtsc" + : "=a" (Low), + "=d" (High)); return ((ULONGLONG)High << 32) | Low; } @@ -584,10 +584,10 @@ XTCDECL VOID ArReadWriteBarrier(VOID) { - asm volatile("" - : - : - : "memory"); + __asm__ volatile("" + : + : + : "memory"); } /** @@ -601,7 +601,7 @@ XTCDECL VOID ArSetInterruptFlag(VOID) { - asm volatile("sti"); + __asm__ volatile("sti"); } /** @@ -618,10 +618,10 @@ XTCDECL VOID ArStoreGlobalDescriptorTable(OUT PVOID Destination) { - asm volatile("sgdt %0" - : "=m" (*(PSHORT)Destination) - : - : "memory"); + __asm__ volatile("sgdt %0" + : "=m" (*(PSHORT)Destination) + : + : "memory"); } /** @@ -638,10 +638,10 @@ XTCDECL VOID ArStoreInterruptDescriptorTable(OUT PVOID Destination) { - asm volatile("sidt %0" - : "=m" (*(PSHORT)Destination) - : - : "memory"); + __asm__ volatile("sidt %0" + : "=m" (*(PSHORT)Destination) + : + : "memory"); } /** @@ -658,10 +658,10 @@ XTCDECL VOID ArStoreLocalDescriptorTable(OUT PVOID Destination) { - asm volatile("sldt %0" - : "=m" (*(PSHORT)Destination) - : - : "memory"); + __asm__ volatile("sldt %0" + : "=m" (*(PSHORT)Destination) + : + : "memory"); } /** @@ -685,28 +685,28 @@ ArStoreSegment(IN USHORT Segment, switch(Segment) { case SEGMENT_CS: - asm volatile("movl %%cs, %0" - : "=r" (*(PUINT)Destination)); + __asm__ volatile("movl %%cs, %0" + : "=r" (*(PUINT)Destination)); break; case SEGMENT_DS: - asm volatile("movl %%ds, %0" - : "=r" (*(PUINT)Destination)); + __asm__ volatile("movl %%ds, %0" + : "=r" (*(PUINT)Destination)); break; case SEGMENT_ES: - asm volatile("movl %%es, %0" - : "=r" (*(PUINT)Destination)); + __asm__ volatile("movl %%es, %0" + : "=r" (*(PUINT)Destination)); break; case SEGMENT_FS: - asm volatile("movl %%fs, %0" - : "=r" (*(PUINT)Destination)); + __asm__ volatile("movl %%fs, %0" + : "=r" (*(PUINT)Destination)); break; case SEGMENT_GS: - asm volatile("movl %%gs, %0" - : "=r" (*(PUINT)Destination)); + __asm__ volatile("movl %%gs, %0" + : "=r" (*(PUINT)Destination)); break; case SEGMENT_SS: - asm volatile("movl %%ss, %0" - : "=r" (*(PUINT)Destination)); + __asm__ volatile("movl %%ss, %0" + : "=r" (*(PUINT)Destination)); break; default: Destination = NULL; @@ -728,10 +728,10 @@ XTCDECL VOID ArStoreTaskRegister(OUT PVOID Destination) { - asm volatile("str %0" - : "=m" (*(PULONG)Destination) - : - : "memory"); + __asm__ volatile("str %0" + : "=m" (*(PULONG)Destination) + : + : "memory"); } /** @@ -757,38 +757,38 @@ ArWriteControlRegister(IN USHORT ControlRegister, { case 0: /* Write value to CR0 */ - asm volatile("mov %0, %%cr0" - : - : "r"(Value) - : "memory"); + __asm__ volatile("mov %0, %%cr0" + : + : "r"(Value) + : "memory"); break; case 2: /* Write value to CR2 */ - asm volatile("mov %0, %%cr2" - : - : "r"(Value) - : "memory"); + __asm__ volatile("mov %0, %%cr2" + : + : "r"(Value) + : "memory"); break; case 3: /* Write value to CR3 */ - asm volatile("mov %0, %%cr3" - : - : "r"(Value) - : "memory"); + __asm__ volatile("mov %0, %%cr3" + : + : "r"(Value) + : "memory"); break; case 4: /* Write value to CR4 */ - asm volatile("mov %0, %%cr4" - : - : "r"(Value) - : "memory"); + __asm__ volatile("mov %0, %%cr4" + : + : "r"(Value) + : "memory"); break; case 8: /* Write value to CR8 */ - asm volatile("mov %0, %%cr8" - : - : "r"(Value) - : "memory"); + __asm__ volatile("mov %0, %%cr8" + : + : "r"(Value) + : "memory"); break; } } @@ -816,52 +816,52 @@ ArWriteDebugRegister(IN USHORT DebugRegister, { case 0: /* Write value to DR0 */ - asm volatile("mov %0, %%dr0" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr0" + : + : "r" (Value) + : "memory"); case 1: /* Write value to DR1 */ - asm volatile("mov %0, %%dr1" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr1" + : + : "r" (Value) + : "memory"); case 2: /* Write value to DR2 */ - asm volatile("mov %0, %%dr2" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr2" + : + : "r" (Value) + : "memory"); case 3: /* Write value to DR3 */ - asm volatile("mov %0, %%dr3" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr3" + : + : "r" (Value) + : "memory"); case 4: /* Write value to DR4 */ - asm volatile("mov %0, %%dr4" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr4" + : + : "r" (Value) + : "memory"); case 5: /* Write value to DR5 */ - asm volatile("mov %0, %%dr5" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr5" + : + : "r" (Value) + : "memory"); case 6: /* Write value to DR6 */ - asm volatile("mov %0, %%dr6" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr6" + : + : "r" (Value) + : "memory"); case 7: /* Write value to DR7 */ - asm volatile("mov %0, %%dr7" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr7" + : + : "r" (Value) + : "memory"); } } @@ -879,10 +879,10 @@ XTCDECL VOID ArWriteEflagsRegister(IN UINT_PTR Value) { - asm volatile("push %0\n" - "popf" - : - : "rim" (Value)); + __asm__ volatile("push %0\n" + "popf" + : + : "rim" (Value)); } /** @@ -906,11 +906,11 @@ ArWriteModelSpecificRegister(IN ULONG Register, ULONG Low = Value & 0xFFFFFFFF; ULONG High = Value >> 32; - asm volatile("wrmsr" - : - : "c" (Register), - "a" (Low), - "d" (High)); + __asm__ volatile("wrmsr" + : + : "c" (Register), + "a" (Low), + "d" (High)); } /** @@ -924,8 +924,8 @@ XTCDECL VOID ArYieldProcessor(VOID) { - asm volatile("pause" - : - : - : "memory"); + __asm__ volatile("pause" + : + : + : "memory"); } diff --git a/xtoskrnl/ar/i686/cpufunc.c b/xtoskrnl/ar/i686/cpufunc.c index b6fa66b..27b6676 100644 --- a/xtoskrnl/ar/i686/cpufunc.c +++ b/xtoskrnl/ar/i686/cpufunc.c @@ -20,7 +20,7 @@ XTCDECL VOID ArClearInterruptFlag(VOID) { - asm volatile("cli"); + __asm__ volatile("cli"); } /** @@ -40,12 +40,12 @@ ArCpuId(IN OUT PCPUID_REGISTERS Registers) UINT32 MaxLeaf; /* Get highest function ID available */ - asm volatile("cpuid" - : "=a" (MaxLeaf) - : "a" (Registers->Leaf & 0x80000000) - : "rbx", - "rcx", - "rdx"); + __asm__ volatile("cpuid" + : "=a" (MaxLeaf) + : "a" (Registers->Leaf & 0x80000000) + : "rbx", + "rcx", + "rdx"); /* Check if CPU supports this command */ if(Registers->Leaf > MaxLeaf) @@ -55,13 +55,13 @@ ArCpuId(IN OUT PCPUID_REGISTERS Registers) } /* Execute CPUID function */ - asm volatile("cpuid" - : "=a" (Registers->Eax), - "=b" (Registers->Ebx), - "=c" (Registers->Ecx), - "=d" (Registers->Edx) - : "a" (Registers->Leaf), - "c" (Registers->SubLeaf)); + __asm__ volatile("cpuid" + : "=a" (Registers->Eax), + "=b" (Registers->Ebx), + "=c" (Registers->Ecx), + "=d" (Registers->Edx) + : "a" (Registers->Leaf), + "c" (Registers->SubLeaf)); /* Return TRUE */ return TRUE; @@ -96,11 +96,11 @@ ArGetCpuFlags(VOID) ULONG_PTR Flags; /* Get EFLAGS register */ - asm volatile("pushf\n" - "pop %0\n" - : "=rm" (Flags) - : - : "memory"); + __asm__ volatile("pushf\n" + "pop %0\n" + : "=rm" (Flags) + : + : "memory"); /* Return flags */ return Flags; @@ -119,11 +119,11 @@ ULONG_PTR ArGetStackPointer(VOID) { /* Get current stack pointer */ - asm volatile("mov %%esp, %%eax\n" - "ret\n" - : - : - :); + __asm__ volatile("mov %%esp, %%eax\n" + "ret\n" + : + : + :); } /** @@ -137,7 +137,7 @@ XTCDECL VOID ArHalt(VOID) { - asm volatile("hlt"); + __asm__ volatile("hlt"); } /** @@ -174,10 +174,10 @@ XTCDECL VOID ArInvalidateTlbEntry(PVOID Address) { - asm volatile("invlpg (%0)" - : - : "b" (Address) - : "memory"); + __asm__ volatile("invlpg (%0)" + : + : "b" (Address) + : "memory"); } /** @@ -194,10 +194,10 @@ XTCDECL VOID ArLoadGlobalDescriptorTable(IN PVOID Source) { - asm volatile("lgdt %0" - : - : "m" (*(PSHORT)Source) - : "memory"); + __asm__ volatile("lgdt %0" + : + : "m" (*(PSHORT)Source) + : "memory"); } /** @@ -214,10 +214,10 @@ XTCDECL VOID ArLoadInterruptDescriptorTable(IN PVOID Source) { - asm volatile("lidt %0" - : - : "m" (*(PSHORT)Source) - : "memory"); + __asm__ volatile("lidt %0" + : + : "m" (*(PSHORT)Source) + : "memory"); } /** @@ -234,9 +234,9 @@ XTCDECL VOID ArLoadLocalDescriptorTable(IN USHORT Source) { - asm volatile("lldtw %0" - : - : "g" (Source)); + __asm__ volatile("lldtw %0" + : + : "g" (Source)); } /** @@ -261,45 +261,45 @@ ArLoadSegment(IN USHORT Segment, { case SEGMENT_CS: /* Load CS Segment */ - asm volatile("mov %0, %%eax\n" - "push %%eax\n" - "lea label, %%eax\n" - "push %%eax\n" - "lret\n" - "label:" - : - : "ri" (Source) - : "eax"); + __asm__ volatile("mov %0, %%eax\n" + "push %%eax\n" + "lea label, %%eax\n" + "push %%eax\n" + "lret\n" + "label:" + : + : "ri" (Source) + : "eax"); break; case SEGMENT_DS: /* Load DS Segment */ - asm volatile("movl %0, %%ds" - : - : "r" (Source)); + __asm__ volatile("movl %0, %%ds" + : + : "r" (Source)); break; case SEGMENT_ES: /* Load ES Segment */ - asm volatile("movl %0, %%es" - : - : "r" (Source)); + __asm__ volatile("movl %0, %%es" + : + : "r" (Source)); break; case SEGMENT_FS: /* Load FS Segment */ - asm volatile("movl %0, %%fs" - : - : "r" (Source)); + __asm__ volatile("movl %0, %%fs" + : + : "r" (Source)); break; case SEGMENT_GS: /* Load GS Segment */ - asm volatile("movl %0, %%gs" - : - : "r" (Source)); + __asm__ volatile("movl %0, %%gs" + : + : "r" (Source)); break; case SEGMENT_SS: /* Load SS Segment */ - asm volatile("movl %0, %%ss" - : - : "r" (Source)); + __asm__ volatile("movl %0, %%ss" + : + : "r" (Source)); break; } } @@ -318,9 +318,9 @@ XTCDECL VOID ArLoadTaskRegister(USHORT Source) { - asm volatile("ltr %0" - : - : "rm" (Source)); + __asm__ volatile("ltr %0" + : + : "rm" (Source)); } /** @@ -335,10 +335,10 @@ VOID ArMemoryBarrier(VOID) { LONG Barrier; - asm volatile("xchg %%eax, %0" - : - : "m" (Barrier) - : "%eax"); + __asm__ volatile("xchg %%eax, %0" + : + : "m" (Barrier) + : "%eax"); } /** @@ -362,31 +362,31 @@ ArReadControlRegister(IN USHORT ControlRegister) { case 0: /* Read value from CR0 */ - asm volatile("mov %%cr0, %0" - : "=r" (Value) - : - : "memory"); + __asm__ volatile("mov %%cr0, %0" + : "=r" (Value) + : + : "memory"); break; case 2: /* Read value from CR2 */ - asm volatile("mov %%cr2, %0" - : "=r" (Value) - : - : "memory"); + __asm__ volatile("mov %%cr2, %0" + : "=r" (Value) + : + : "memory"); break; case 3: /* Read value from CR3 */ - asm volatile("mov %%cr3, %0" - : "=r" (Value) - : - : "memory"); + __asm__ volatile("mov %%cr3, %0" + : "=r" (Value) + : + : "memory"); break; case 4: /* Read value from CR4 */ - asm volatile("mov %%cr4, %0" - : "=r" (Value) - : - : "memory"); + __asm__ volatile("mov %%cr4, %0" + : "=r" (Value) + : + : "memory"); break; default: /* Invalid control register set */ @@ -419,43 +419,43 @@ ArReadDebugRegister(IN USHORT DebugRegister) { case 0: /* Read value from DR0 */ - asm volatile("mov %%dr0, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr0, %0" + : "=r" (Value)); break; case 1: /* Read value from DR1 */ - asm volatile("mov %%dr1, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr1, %0" + : "=r" (Value)); break; case 2: /* Read value from DR2 */ - asm volatile("mov %%dr2, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr2, %0" + : "=r" (Value)); break; case 3: /* Read value from DR3 */ - asm volatile("mov %%dr3, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr3, %0" + : "=r" (Value)); break; case 4: /* Read value from DR4 */ - asm volatile("mov %%dr4, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr4, %0" + : "=r" (Value)); break; case 5: /* Read value from DR5 */ - asm volatile("mov %%dr5, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr5, %0" + : "=r" (Value)); break; case 6: /* Read value from DR6 */ - asm volatile("mov %%dr6, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr6, %0" + : "=r" (Value)); break; case 7: /* Read value from DR7 */ - asm volatile("mov %%dr7, %0" - : "=r" (Value)); + __asm__ volatile("mov %%dr7, %0" + : "=r" (Value)); break; default: /* Invalid debug register set */ @@ -482,9 +482,9 @@ ULONG ArReadFSDualWord(ULONG Offset) { ULONG Value; - asm volatile("movl %%fs:%a[Offset], %k[Value]" - : [Value] "=r" (Value) - : [Offset] "ir" (Offset)); + __asm__ volatile("movl %%fs:%a[Offset], %k[Value]" + : [Value] "=r" (Value) + : [Offset] "ir" (Offset)); return Value; } @@ -504,9 +504,9 @@ ArReadModelSpecificRegister(IN ULONG Register) { ULONGLONG Value; - asm volatile("rdmsr" - : "=A" (Value) - : "c" (Register)); + __asm__ volatile("rdmsr" + : "=A" (Value) + : "c" (Register)); return Value; } @@ -537,8 +537,8 @@ ArReadTimeStampCounter(VOID) { ULONGLONG Value; - asm volatile("rdtsc" - : "=A" (Value)); + __asm__ volatile("rdtsc" + : "=A" (Value)); return Value; } @@ -554,10 +554,10 @@ XTCDECL VOID ArReadWriteBarrier(VOID) { - asm volatile("" - : - : - : "memory"); + __asm__ volatile("" + : + : + : "memory"); } /** @@ -571,7 +571,7 @@ XTCDECL VOID ArSetInterruptFlag(VOID) { - asm volatile("sti"); + __asm__ volatile("sti"); } /** @@ -588,10 +588,10 @@ XTCDECL VOID ArStoreGlobalDescriptorTable(OUT PVOID Destination) { - asm volatile("sgdt %0" - : "=m" (*(PSHORT)Destination) - : - : "memory"); + __asm__ volatile("sgdt %0" + : "=m" (*(PSHORT)Destination) + : + : "memory"); } /** @@ -608,10 +608,10 @@ XTCDECL VOID ArStoreInterruptDescriptorTable(OUT PVOID Destination) { - asm volatile("sidt %0" - : "=m" (*(PSHORT)Destination) - : - : "memory"); + __asm__ volatile("sidt %0" + : "=m" (*(PSHORT)Destination) + : + : "memory"); } /** @@ -628,10 +628,10 @@ XTCDECL VOID ArStoreLocalDescriptorTable(OUT PVOID Destination) { - asm volatile("sldt %0" - : "=m" (*(PSHORT)Destination) - : - : "memory"); + __asm__ volatile("sldt %0" + : "=m" (*(PSHORT)Destination) + : + : "memory"); } /** @@ -655,28 +655,28 @@ ArStoreSegment(IN USHORT Segment, switch(Segment) { case SEGMENT_CS: - asm volatile("movl %%cs, %0" - : "=r" (*(PUINT)Destination)); + __asm__ volatile("movl %%cs, %0" + : "=r" (*(PUINT)Destination)); break; case SEGMENT_DS: - asm volatile("movl %%ds, %0" - : "=r" (*(PUINT)Destination)); + __asm__ volatile("movl %%ds, %0" + : "=r" (*(PUINT)Destination)); break; case SEGMENT_ES: - asm volatile("movl %%es, %0" - : "=r" (*(PUINT)Destination)); + __asm__ volatile("movl %%es, %0" + : "=r" (*(PUINT)Destination)); break; case SEGMENT_FS: - asm volatile("movl %%fs, %0" - : "=r" (*(PUINT)Destination)); + __asm__ volatile("movl %%fs, %0" + : "=r" (*(PUINT)Destination)); break; case SEGMENT_GS: - asm volatile("movl %%gs, %0" - : "=r" (*(PUINT)Destination)); + __asm__ volatile("movl %%gs, %0" + : "=r" (*(PUINT)Destination)); break; case SEGMENT_SS: - asm volatile("movl %%ss, %0" - : "=r" (*(PUINT)Destination)); + __asm__ volatile("movl %%ss, %0" + : "=r" (*(PUINT)Destination)); break; default: Destination = NULL; @@ -698,10 +698,10 @@ XTCDECL VOID ArStoreTaskRegister(OUT PVOID Destination) { - asm volatile("str %0" - : "=m" (*(PULONG)Destination) - : - : "memory"); + __asm__ volatile("str %0" + : "=m" (*(PULONG)Destination) + : + : "memory"); } /** @@ -727,31 +727,31 @@ ArWriteControlRegister(IN USHORT ControlRegister, { case 0: /* Write value to CR0 */ - asm volatile("mov %0, %%cr0" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%cr0" + : + : "r" (Value) + : "memory"); break; case 2: /* Write value to CR2 */ - asm volatile("mov %0, %%cr2" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%cr2" + : + : "r" (Value) + : "memory"); break; case 3: /* Write value to CR3 */ - asm volatile("mov %0, %%cr3" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%cr3" + : + : "r" (Value) + : "memory"); break; case 4: /* Write value to CR4 */ - asm volatile("mov %0, %%cr4" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%cr4" + : + : "r" (Value) + : "memory"); break; } } @@ -779,52 +779,52 @@ ArWriteDebugRegister(IN USHORT DebugRegister, { case 0: /* Write value to DR0 */ - asm volatile("mov %0, %%dr0" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr0" + : + : "r" (Value) + : "memory"); case 1: /* Write value to DR1 */ - asm volatile("mov %0, %%dr1" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr1" + : + : "r" (Value) + : "memory"); case 2: /* Write value to DR2 */ - asm volatile("mov %0, %%dr2" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr2" + : + : "r" (Value) + : "memory"); case 3: /* Write value to DR3 */ - asm volatile("mov %0, %%dr3" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr3" + : + : "r" (Value) + : "memory"); case 4: /* Write value to DR4 */ - asm volatile("mov %0, %%dr4" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr4" + : + : "r" (Value) + : "memory"); case 5: /* Write value to DR5 */ - asm volatile("mov %0, %%dr5" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr5" + : + : "r" (Value) + : "memory"); case 6: /* Write value to DR6 */ - asm volatile("mov %0, %%dr6" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr6" + : + : "r" (Value) + : "memory"); case 7: /* Write value to DR7 */ - asm volatile("mov %0, %%dr7" - : - : "r" (Value) - : "memory"); + __asm__ volatile("mov %0, %%dr7" + : + : "r" (Value) + : "memory"); } } @@ -842,10 +842,10 @@ XTCDECL VOID ArWriteEflagsRegister(IN UINT_PTR Value) { - asm volatile("push %0\n" - "popf" - : - : "rim" (Value)); + __asm__ volatile("push %0\n" + "popf" + : + : "rim" (Value)); } /** @@ -866,10 +866,10 @@ VOID ArWriteModelSpecificRegister(IN ULONG Register, IN ULONGLONG Value) { - asm volatile("wrmsr" - : - : "c" (Register), - "A" (Value)); + __asm__ volatile("wrmsr" + : + : "c" (Register), + "A" (Value)); } /** @@ -883,8 +883,8 @@ XTCDECL VOID ArYieldProcessor(VOID) { - asm volatile("pause" - : - : - : "memory"); + __asm__ volatile("pause" + : + : + : "memory"); } diff --git a/xtoskrnl/hl/amd64/ioport.c b/xtoskrnl/hl/amd64/ioport.c index 6ba8609..1ec9adc 100644 --- a/xtoskrnl/hl/amd64/ioport.c +++ b/xtoskrnl/hl/amd64/ioport.c @@ -24,9 +24,9 @@ UCHAR HlIoPortInByte(IN USHORT Port) { UCHAR Value; - asm volatile("inb %1, %0" - : "=a" (Value) - : "Nd" (Port)); + __asm__ volatile("inb %1, %0" + : "=a" (Value) + : "Nd" (Port)); return Value; } @@ -45,9 +45,9 @@ ULONG HlIoPortInLong(IN USHORT Port) { ULONG Value; - asm volatile("inl %1, %0" - : "=a" (Value) - : "Nd" (Port)); + __asm__ volatile("inl %1, %0" + : "=a" (Value) + : "Nd" (Port)); return Value; } @@ -66,9 +66,9 @@ USHORT HlIoPortInShort(IN USHORT Port) { USHORT Value; - asm volatile("inw %1, %0" - : "=a" (Value) - : "Nd" (Port)); + __asm__ volatile("inw %1, %0" + : "=a" (Value) + : "Nd" (Port)); return Value; } @@ -90,10 +90,10 @@ VOID HlIoPortOutByte(IN USHORT Port, IN UCHAR Value) { - asm volatile("outb %0, %1" - : - : "a" (Value), - "Nd" (Port)); + __asm__ volatile("outb %0, %1" + : + : "a" (Value), + "Nd" (Port)); } /** @@ -114,10 +114,10 @@ VOID HlIoPortOutLong(IN USHORT Port, IN ULONG Value) { - asm volatile("outl %0, %1" - : - : "a" (Value), - "Nd" (Port)); + __asm__ volatile("outl %0, %1" + : + : "a" (Value), + "Nd" (Port)); } /** @@ -138,8 +138,8 @@ VOID HlIoPortOutShort(IN USHORT Port, IN USHORT Value) { - asm volatile("outw %0, %1" - : - : "a" (Value), - "Nd" (Port)); + __asm__ volatile("outw %0, %1" + : + : "a" (Value), + "Nd" (Port)); } diff --git a/xtoskrnl/hl/i686/ioport.c b/xtoskrnl/hl/i686/ioport.c index 7f6b5d7..fa6baf9 100644 --- a/xtoskrnl/hl/i686/ioport.c +++ b/xtoskrnl/hl/i686/ioport.c @@ -24,9 +24,9 @@ UCHAR HlIoPortInByte(IN USHORT Port) { UCHAR Value; - asm volatile("inb %1, %0" - : "=a" (Value) - : "Nd" (Port)); + __asm__ volatile("inb %1, %0" + : "=a" (Value) + : "Nd" (Port)); return Value; } @@ -45,9 +45,9 @@ ULONG HlIoPortInLong(IN USHORT Port) { ULONG Value; - asm volatile("inl %1, %0" - : "=a" (Value) - : "Nd" (Port)); + __asm__ volatile("inl %1, %0" + : "=a" (Value) + : "Nd" (Port)); return Value; } @@ -66,9 +66,9 @@ USHORT HlIoPortInShort(IN USHORT Port) { USHORT Value; - asm volatile("inw %1, %0" - : "=a" (Value) - : "Nd" (Port)); + __asm__ volatile("inw %1, %0" + : "=a" (Value) + : "Nd" (Port)); return Value; } @@ -90,10 +90,10 @@ VOID HlIoPortOutByte(IN USHORT Port, IN UCHAR Value) { - asm volatile("outb %0, %1" - : - : "a" (Value), - "Nd" (Port)); + __asm__ volatile("outb %0, %1" + : + : "a" (Value), + "Nd" (Port)); } /** @@ -114,10 +114,10 @@ VOID HlIoPortOutLong(IN USHORT Port, IN ULONG Value) { - asm volatile("outl %0, %1" - : - : "a" (Value), - "Nd" (Port)); + __asm__ volatile("outl %0, %1" + : + : "a" (Value), + "Nd" (Port)); } /** @@ -138,8 +138,8 @@ VOID HlIoPortOutShort(IN USHORT Port, IN USHORT Value) { - asm volatile("outw %0, %1" - : - : "a" (Value), - "Nd" (Port)); + __asm__ volatile("outw %0, %1" + : + : "a" (Value), + "Nd" (Port)); } diff --git a/xtoskrnl/ke/amd64/krnlinit.c b/xtoskrnl/ke/amd64/krnlinit.c index de4bce0..986a403 100644 --- a/xtoskrnl/ke/amd64/krnlinit.c +++ b/xtoskrnl/ke/amd64/krnlinit.c @@ -122,13 +122,13 @@ VOID KepSwitchBootStack(IN ULONG_PTR Stack) { /* Discard old stack frame, switch stack and jump to KepStartKernel() */ - asm volatile("mov %0, %%rdx\n" - "xor %%rbp, %%rbp\n" - "mov %%rdx, %%rsp\n" - "sub %1, %%rsp\n" - "jmp KepStartKernel\n" - : - : "m" (Stack), - "i" (FLOATING_SAVE_AREA_SIZE | KEXCEPTION_FRAME_SIZE | KSWITCH_FRAME_SIZE | KRETURN_ADDRESS_SIZE), - "p" (KepStartKernel)); + __asm__ volatile("mov %0, %%rdx\n" + "xor %%rbp, %%rbp\n" + "mov %%rdx, %%rsp\n" + "sub %1, %%rsp\n" + "jmp KepStartKernel\n" + : + : "m" (Stack), + "i" (FLOATING_SAVE_AREA_SIZE | KEXCEPTION_FRAME_SIZE | KSWITCH_FRAME_SIZE | KRETURN_ADDRESS_SIZE), + "p" (KepStartKernel)); } diff --git a/xtoskrnl/ke/i686/krnlinit.c b/xtoskrnl/ke/i686/krnlinit.c index 2833c26..a671565 100644 --- a/xtoskrnl/ke/i686/krnlinit.c +++ b/xtoskrnl/ke/i686/krnlinit.c @@ -122,15 +122,15 @@ VOID KepSwitchBootStack(IN ULONG_PTR Stack) { /* Discard old stack frame, switch stack, make space for NPX and jump to KepStartKernel() */ - asm volatile("mov %0, %%edx\n" - "xor %%ebp, %%ebp\n" - "mov %%edx, %%esp\n" - "sub %1, %%esp\n" - "push %2\n" - "jmp _KepStartKernel@0\n" - : - : "m" (Stack), - "i" (KTRAP_FRAME_ALIGN | KTRAP_FRAME_SIZE | NPX_FRAME_SIZE | KRETURN_ADDRESS_SIZE), - "i" (CR0_EM | CR0_MP | CR0_TS), - "p" (KepStartKernel)); + __asm__ volatile("mov %0, %%edx\n" + "xor %%ebp, %%ebp\n" + "mov %%edx, %%esp\n" + "sub %1, %%esp\n" + "push %2\n" + "jmp _KepStartKernel@0\n" + : + : "m" (Stack), + "i" (KTRAP_FRAME_ALIGN | KTRAP_FRAME_SIZE | NPX_FRAME_SIZE | KRETURN_ADDRESS_SIZE), + "i" (CR0_EM | CR0_MP | CR0_TS), + "p" (KepStartKernel)); } diff --git a/xtoskrnl/mm/amd64/pages.c b/xtoskrnl/mm/amd64/pages.c index 48c1184..7471ec5 100644 --- a/xtoskrnl/mm/amd64/pages.c +++ b/xtoskrnl/mm/amd64/pages.c @@ -27,16 +27,16 @@ VOID MmZeroPages(IN PVOID Address, IN ULONG Size) { - asm volatile("xor %%rax, %%rax\n" - "mov %0, %%rdi\n" - "mov %1, %%ecx\n" - "shr $3, %%ecx\n" - "rep stosq\n" - : - : "m" (Address), - "m" (Size) - : "rax", - "rdi", - "ecx", - "memory"); + __asm__ volatile("xor %%rax, %%rax\n" + "mov %0, %%rdi\n" + "mov %1, %%ecx\n" + "shr $3, %%ecx\n" + "rep stosq\n" + : + : "m" (Address), + "m" (Size) + : "rax", + "rdi", + "ecx", + "memory"); } diff --git a/xtoskrnl/mm/i686/pages.c b/xtoskrnl/mm/i686/pages.c index 073749a..b0b4fe9 100644 --- a/xtoskrnl/mm/i686/pages.c +++ b/xtoskrnl/mm/i686/pages.c @@ -27,12 +27,12 @@ VOID MmZeroPages(IN PVOID Address, IN ULONG Size) { - asm volatile("xor %%eax, %%eax\n" - "rep stosb" - : "=D"(Address), - "=c"(Size) - : "0"(Address), - "1"(Size), - "a"(0) - : "memory"); + __asm__ volatile("xor %%eax, %%eax\n" + "rep stosb" + : "=D"(Address), + "=c"(Size) + : "0"(Address), + "1"(Size), + "a"(0) + : "memory"); } -- 2.50.1 From e57985da8d3c45cbea3eb8ddc9d6c66121f66473 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Fri, 15 Aug 2025 20:49:25 +0200 Subject: [PATCH 02/34] Rename MM_LA57_SHIFT to MM_P5I_SHIFT for consistency --- sdk/xtdk/amd64/mmtypes.h | 2 +- xtldr/arch/amd64/memory.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/xtdk/amd64/mmtypes.h b/sdk/xtdk/amd64/mmtypes.h index a961e37..c5f8ac7 100644 --- a/sdk/xtdk/amd64/mmtypes.h +++ b/sdk/xtdk/amd64/mmtypes.h @@ -30,7 +30,7 @@ #define MM_PDI_SHIFT 21 #define MM_PPI_SHIFT 30 #define MM_PXI_SHIFT 39 -#define MM_LA57_SHIFT 48 +#define MM_P5I_SHIFT 48 /* Number of PTEs per page */ #define MM_PTE_PER_PAGE 512 diff --git a/xtldr/arch/amd64/memory.c b/xtldr/arch/amd64/memory.c index 1b6a6c6..9133174 100644 --- a/xtldr/arch/amd64/memory.c +++ b/xtldr/arch/amd64/memory.c @@ -167,7 +167,7 @@ BlMapPage(IN PXTBL_PAGE_MAPPING PageMap, while(NumberOfPages > 0) { /* Calculate the indices in the various Page Tables from the virtual address */ - Pml5Entry = (VirtualAddress & ((ULONGLONG)0x1FF << MM_LA57_SHIFT)) >> MM_LA57_SHIFT; + Pml5Entry = (VirtualAddress & ((ULONGLONG)0x1FF << MM_P5I_SHIFT)) >> MM_P5I_SHIFT; Pml4Entry = (VirtualAddress & ((ULONGLONG)0x1FF << MM_PXI_SHIFT)) >> MM_PXI_SHIFT; Pml3Entry = (VirtualAddress & ((ULONGLONG)0x1FF << MM_PPI_SHIFT)) >> MM_PPI_SHIFT; Pml2Entry = (VirtualAddress & ((ULONGLONG)0x1FF << MM_PDI_SHIFT)) >> MM_PDI_SHIFT; -- 2.50.1 From 3ca6d04f6b93ad08bb326f05ee74e9ab369bad72 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sat, 16 Aug 2025 00:22:21 +0200 Subject: [PATCH 03/34] Add definitions for 5-level paging and refactor constants --- sdk/xtdk/amd64/mmtypes.h | 19 +++++++++++++------ xtldr/modules/xtos_o/amd64/memory.c | 2 +- xtldr/modules/xtos_o/i686/memory.c | 2 +- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/sdk/xtdk/amd64/mmtypes.h b/sdk/xtdk/amd64/mmtypes.h index c5f8ac7..7febe4c 100644 --- a/sdk/xtdk/amd64/mmtypes.h +++ b/sdk/xtdk/amd64/mmtypes.h @@ -18,11 +18,18 @@ #define MM_PAGE_SHIFT 12L #define MM_PAGE_SIZE 4096 -/* Page directory and page base addresses */ -#define MM_PTE_BASE 0xFFFFF68000000000UI64 -#define MM_PDE_BASE 0xFFFFF6FB40000000UI64 -#define MM_PPE_BASE 0xFFFFF6FB7DA00000UI64 -#define MM_PXE_BASE 0xFFFFF6FB7DBED000UI64 +/* Page directory and page base addresses for 4-level paging */ +#define MM_PTE_BASE 0xFFFFF68000000000ULL +#define MM_PDE_BASE 0xFFFFF6FB40000000ULL +#define MM_PPE_BASE 0xFFFFF6FB7DA00000ULL +#define MM_PXE_BASE 0xFFFFF6FB7DBED000ULL + +/* Page directory and page base addresses for 5-level paging */ +#define MM_PTE_LA57_BASE 0xFFFFF68000000000ULL +#define MM_PDE_LA57_BASE 0xFFFFF78000000000ULL +#define MM_PPE_LA57_BASE 0xFFFFF78800000000ULL +#define MM_PXE_LA57_BASE 0xFFFFF78840000000ULL +#define MM_P5E_LA57_BASE 0xFFFFF78840200000ULL /* PTE shift values */ #define MM_PTE_SHIFT 3 @@ -54,7 +61,7 @@ #define MM_HARDWARE_VA_START 0xFFFFFFFFFFC00000ULL /* Maximum physical address used by HAL allocations */ -#define MM_MAXIMUM_PHYSICAL_ADDRESS 0x00000000FFFFFFFF +#define MM_MAXIMUM_PHYSICAL_ADDRESS 0x00000000FFFFFFFFULL /* Page size enumeration list */ typedef enum _PAGE_SIZE diff --git a/xtldr/modules/xtos_o/amd64/memory.c b/xtldr/modules/xtos_o/amd64/memory.c index 36d9890..91349c8 100644 --- a/xtldr/modules/xtos_o/amd64/memory.c +++ b/xtldr/modules/xtos_o/amd64/memory.c @@ -185,7 +185,7 @@ XtEnablePaging(IN PXTBL_PAGE_MAPPING PageMap) EFI_STATUS Status; /* Build page map */ - Status = XtLdrProtocol->Memory.BuildPageMap(PageMap, 0xFFFFF6FB7DBED000); + Status = XtLdrProtocol->Memory.BuildPageMap(PageMap, (PageMap->PageMapLevel > 4) ? MM_P5E_LA57_BASE : MM_PXE_BASE); if(Status != STATUS_EFI_SUCCESS) { /* Failed to build page map */ diff --git a/xtldr/modules/xtos_o/i686/memory.c b/xtldr/modules/xtos_o/i686/memory.c index 50f4194..f4fa319 100644 --- a/xtldr/modules/xtos_o/i686/memory.c +++ b/xtldr/modules/xtos_o/i686/memory.c @@ -126,7 +126,7 @@ XtEnablePaging(IN PXTBL_PAGE_MAPPING PageMap) EFI_STATUS Status; /* Build page map */ - Status = XtLdrProtocol->Memory.BuildPageMap(PageMap, 0xC0000000); + Status = XtLdrProtocol->Memory.BuildPageMap(PageMap, MM_PTE_BASE); if(Status != STATUS_EFI_SUCCESS) { /* Failed to build page map */ -- 2.50.1 From 7e08dc286e2c5a9c957ea25c0712e347d907b951 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sat, 16 Aug 2025 00:29:20 +0200 Subject: [PATCH 04/34] Separate types for legacy (PML2) and PAE (PML3) paging --- sdk/xtdk/i686/mmtypes.h | 226 ++++++++++++++++++++++++++++++---------- 1 file changed, 172 insertions(+), 54 deletions(-) diff --git a/sdk/xtdk/i686/mmtypes.h b/sdk/xtdk/i686/mmtypes.h index eb97748..9f66077 100644 --- a/sdk/xtdk/i686/mmtypes.h +++ b/sdk/xtdk/i686/mmtypes.h @@ -28,7 +28,11 @@ #define MM_PDI_SHIFT 21 #define MM_PPI_SHIFT 30 +/* Page directory and page base legacy address */ +#define MM_PDE_LEGACY_BASE 0xC0300000 + /* PTE legacy shift values */ +#define MM_PTE_LEGACY_SHIFT 2 #define MM_PDI_LEGACY_SHIFT 22 /* Minimum number of physical pages needed by the system */ @@ -58,8 +62,26 @@ typedef enum _PAGE_SIZE Size4M } PAGE_SIZE, *PPAGE_SIZE; -/* Page Table entry structure definition (with PAE support) */ -typedef struct _HARDWARE_PTE +/* Legacy Page Table entry structure definition (PML2) */ +typedef struct _HARDWARE_LEGACY_PTE +{ + ULONG Valid:1; + ULONG Writable:1; + ULONG Owner:1; + ULONG WriteThrough:1; + ULONG CacheDisable:1; + ULONG Accessed:1; + ULONG Dirty:1; + ULONG LargePage:1; + ULONG Global:1; + ULONG CopyOnWrite:1; + ULONG Prototype:1; + ULONG Reserved0:1; + ULONG PageFrameNumber:20; +} HARDWARE_LEGACY_PTE, *PHARDWARE_LEGACY_PTE; + +/* Page Table entry structure definition (PML3) */ +typedef struct _HARDWARE_MODERN_PTE { ULONGLONG Valid:1; ULONGLONG Writable:1; @@ -77,10 +99,116 @@ typedef struct _HARDWARE_PTE ULONGLONG Reserved1:14; ULONGLONG SoftwareWsIndex:11; ULONGLONG NoExecute:1; +} HARDWARE_MODERN_PTE, *PHARDWARE_MODERN_PTE; + +/* Generic Page Table entry union to abstract PML2 and PML3 formats */ +typedef union _HARDWARE_PTE +{ + ULONGLONG Long; + HARDWARE_LEGACY_PTE Pml2; + HARDWARE_MODERN_PTE Pml3; } HARDWARE_PTE, *PHARDWARE_PTE; -/* Page Table Entry on PAE enabled system */ -typedef struct _MMPTE_HARDWARE +/* Page map information structure definition */ +typedef struct _MMPAGEMAP_INFO +{ + BOOLEAN Xpa; + ULONG PdeBase; + ULONG PdiShift; + ULONG PteShift; +} MMPAGEMAP_INFO, *PMMPAGEMAP_INFO; + +/* Legacy Page Table Entry hardware structure definition (PML2) */ +typedef struct _MMPML2_PTE_HARDWARE +{ + ULONG Valid:1; + ULONG Writable:1; + ULONG Owner:1; + ULONG WriteThrough:1; + ULONG CacheDisable:1; + ULONG Accessed:1; + ULONG Dirty:1; + ULONG LargePage:1; + ULONG Global:1; + ULONG CopyOnWrite:1; + ULONG Prototype:1; + ULONG Write:1; + ULONG PageFrameNumber:20; +} MMPML2_PTE_HARDWARE, *PMMPML2_PTE_HARDWARE; + +/* Legacy Page Table Entry list structure definition (PML2) */ +typedef struct _MMPML2_PTE_LIST +{ + ULONG Valid:1; + ULONG OneEntry:1; + ULONG Reserved0:8; + ULONG Prototype:1; + ULONG Reserved1:1; + ULONG NextEntry:20; +} MMPML2_PTE_LIST, *PMMPML2_PTE_LIST; + +/* Legacy Page Table Entry subsection structure definition (PML2) */ +typedef struct _MMPML2_PTE_PROTOTYPE +{ + ULONG Valid:1; + ULONG ProtoAddressLow:7; + ULONG ReadOnly:1; + ULONG WhichPool:1; + ULONG Prototype:1; + ULONG ProtoAddressHigh:21; +} MMPML2_PTE_PROTOTYPE, *PMMPML2_PTE_PROTOTYPE; + +/* Legacy Page Table Entry software structure definition (PML2) */ +typedef struct _MMPML2_PTE_SOFTWARE +{ + ULONG Valid:1; + ULONG PageFileLow:4; + ULONG Protection:5; + ULONG Prototype:1; + ULONG Transition:1; + ULONG PageFileHigh:20; +} MMPML2_PTE_SOFTWARE, *PMMPML2_PTE_SOFTWARE; + +/* Legacy Page Table Entry subsection structure definition (PML2) */ +typedef struct _MMPML2_PTE_SUBSECTION +{ + ULONG Valid:1; + ULONG SubsectionAddressLow:4; + ULONG Protection:5; + ULONG Prototype:1; + ULONG SubsectionAddressHigh:20; + ULONG WhichPool:1; +} MMPML2_PTE_SUBSECTION, *PMMPML2_PTE_SUBSECTION; + +/* Legacy Page Table Entry transition structure definition (PML2) */ +typedef struct _MMPML2_PTE_TRANSITION +{ + ULONG Valid:1; + ULONG Write:1; + ULONG Owner:1; + ULONG WriteThrough:1; + ULONG CacheDisable:1; + ULONG Protection:5; + ULONG Prototype:1; + ULONG Transition:1; + ULONG PageFrameNumber:20; +} MMPML2_PTE_TRANSITION, *PMMPML2_PTE_TRANSITION; + +/* Legacy Page Table Entry union definition (PML2) */ +typedef union _MMPML2_PTE +{ + ULONG Long; + HARDWARE_PTE Flush; + MMPML2_PTE_HARDWARE Hard; + MMPML2_PTE_PROTOTYPE Proto; + MMPML2_PTE_SOFTWARE Soft; + MMPML2_PTE_TRANSITION Trans; + MMPML2_PTE_SUBSECTION Subsect; + MMPML2_PTE_LIST List; +} MMPML2_PTE, *PMMPML2_PTE; + +/* Page Table Entry hardware structure definition (PML3) */ +typedef struct _MMPML3_PTE_HARDWARE { ULONGLONG Valid:1; ULONGLONG Writable:1; @@ -95,59 +223,59 @@ typedef struct _MMPTE_HARDWARE ULONGLONG Prototype:1; ULONGLONG Write:1; ULONGLONG PageFrameNumber:26; - ULONGLONG Reserved1:25; + ULONGLONG Reserved0:25; ULONGLONG NoExecute:1; -} MMPTE_HARDWARE, *PMMPTE_HARDWARE; +} MMPML3_PTE_HARDWARE, *PMMPML3_PTE_HARDWARE; -/* Page Table Entry list structure definition (with PAE support) */ -typedef struct _MMPTE_LIST +/* Page Table Entry list structure definition (PML3) */ +typedef struct _MMPML3_PTE_LIST { ULONGLONG Valid:1; ULONGLONG OneEntry:1; - ULONGLONG Reserved1:8; + ULONGLONG Reserved0:8; ULONGLONG Prototype:1; - ULONGLONG Reserved2:21; + ULONGLONG Reserved1:21; ULONGLONG NextEntry:32; -} MMPTE_LIST, *PMMPTE_LIST; +} MMPML3_PTE_LIST, *PMMPML3_PTE_LIST; -/* Page Table Entry subsection structure definition (with PAE support) */ -typedef struct _MMPTE_PROTOTYPE +/* Page Table Entry subsection structure definition (PML3) */ +typedef struct _MMPML3_PTE_PROTOTYPE { ULONGLONG Valid:1; - ULONGLONG Reserved1:7; + ULONGLONG Reserved0:7; ULONGLONG ReadOnly:1; - ULONGLONG Reserved2:1; + ULONGLONG Reserved1:1; ULONGLONG Prototype:1; ULONGLONG Protection:5; - ULONGLONG Reserved3:16; + ULONGLONG Reserved2:16; ULONGLONG ProtoAddress:32; -} MMPTE_PROTOTYPE, *PMMPTE_PROTOTYPE; +} MMPML3_PTE_PROTOTYPE, *PMMPML3_PTE_PROTOTYPE; -/* Page Table Entry software structure definition (with PAE support) */ -typedef struct _MMPTE_SOFTWARE +/* Page Table Entry software structure definition (PML3) */ +typedef struct _MMPML3_PTE_SOFTWARE { ULONGLONG Valid:1; ULONGLONG PageFileLow:4; ULONGLONG Protection:5; ULONGLONG Prototype:1; ULONGLONG Transition:1; - ULONGLONG Reserved1:20; + ULONGLONG Reserved0:20; ULONGLONG PageFileHigh:32; -} MMPTE_SOFTWARE, *PMMPTE_SOFTWARE; +} MMPML3_PTE_SOFTWARE, *PMMPML3_PTE_SOFTWARE; -/* Page Table Entry subsection structure definition (with PAE support) */ -typedef struct _MMPTE_SUBSECTION +/* Page Table Entry subsection structure definition (PML3) */ +typedef struct _MMPML3_PTE_SUBSECTION { ULONGLONG Valid:1; - ULONGLONG Reserved1:4; + ULONGLONG Reserved0:4; ULONGLONG Protection:5; ULONGLONG Prototype:1; - ULONGLONG Reserved2:21; + ULONGLONG Reserved1:21; ULONGLONG SubsectionAddress:32; -} MMPTE_SUBSECTION, *PMMPTE_SUBSECTION; +} MMPML3_PTE_SUBSECTION, *PMMPML3_PTE_SUBSECTION; -/* Page Table Entry transition structure definition (with PAE support) */ -typedef struct _MMPTE_TRANSITION +/* Page Table Entry transition structure definition (PML3) */ +typedef struct _MMPML3_PTE_TRANSITION { ULONGLONG Valid:1; ULONGLONG Write:1; @@ -159,38 +287,28 @@ typedef struct _MMPTE_TRANSITION ULONGLONG Transition:1; ULONGLONG PageFrameNumber:26; ULONGLONG Unused:26; -} MMPTE_TRANSITION, *PMMPTE_TRANSITION; +} MMPML3_PTE_TRANSITION, *PMMPML3_PTE_TRANSITION; -/* Page Table Entry structure definition (with PAE support) */ -typedef union _MMPTE +/* Page Table Entry union definition (PML3) */ +typedef union _MMPML3_PTE { ULONGLONG Long; HARDWARE_PTE Flush; - MMPTE_HARDWARE Hardware; - MMPTE_PROTOTYPE Prototype; - MMPTE_SOFTWARE Software; - MMPTE_TRANSITION Transition; - MMPTE_SUBSECTION Subsection; - MMPTE_LIST List; -} MMPTE, *PMMPTE; + MMPML3_PTE_HARDWARE Hardware; + MMPML3_PTE_PROTOTYPE Prototype; + MMPML3_PTE_SOFTWARE Software; + MMPML3_PTE_TRANSITION Transition; + MMPML3_PTE_SUBSECTION Subsection; + MMPML3_PTE_LIST List; +} MMPML3_PTE, *PMMPML3_PTE; -/* Legacy Page Table entry structure definition (without PAE support) */ -typedef struct _HARDWARE_LEGACY_PTE +/* Generic Page Table Entry union to abstract PML2 and PML3 formats */ +typedef union _MMPTE { - ULONG Valid:1; - ULONG Writable:1; - ULONG Owner:1; - ULONG WriteThrough:1; - ULONG CacheDisable:1; - ULONG Accessed:1; - ULONG Dirty:1; - ULONG LargePage:1; - ULONG Global:1; - ULONG CopyOnWrite:1; - ULONG Prototype:1; - ULONG Reserved0:1; - ULONG PageFrameNumber:20; -} HARDWARE_LEGACY_PTE, *PHARDWARE_LEGACY_PTE; + ULONGLONG Long; + MMPML2_PTE Pml2; + MMPML3_PTE Pml3; +} MMPTE, *PMMPTE; /* Page Frame Number structure definition */ typedef struct _MMPFN -- 2.50.1 From 22f81a106bda1da19700a89f37d4720e91e7da54 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sat, 16 Aug 2025 00:33:18 +0200 Subject: [PATCH 05/34] Update forward declarations for PML2/PML3 types --- sdk/xtdk/i686/xtstruct.h | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/sdk/xtdk/i686/xtstruct.h b/sdk/xtdk/i686/xtstruct.h index 3fd7ff5..a78bc76 100644 --- a/sdk/xtdk/i686/xtstruct.h +++ b/sdk/xtdk/i686/xtstruct.h @@ -40,7 +40,7 @@ typedef struct _FN_SAVE_FORMAT FN_SAVE_FORMAT, *PFN_SAVE_FORMAT; typedef struct _FX_SAVE_AREA FX_SAVE_AREA, *PFX_SAVE_AREA; typedef struct _FX_SAVE_FORMAT FX_SAVE_FORMAT, *PFX_SAVE_FORMAT; typedef struct _HARDWARE_LEGACY_PTE HARDWARE_LEGACY_PTE, *PHARDWARE_LEGACY_PTE; -typedef struct _HARDWARE_PTE HARDWARE_PTE, *PHARDWARE_PTE; +typedef struct _HARDWARE_MODERN_PTE HARDWARE_MODERN_PTE, *PHARDWARE_MODERN_PTE; typedef struct _KDESCRIPTOR KDESCRIPTOR, *PKDESCRIPTOR; typedef struct _KEXCEPTION_FRAME KEXCEPTION_FRAME, *PKEXCEPTION_FRAME; typedef struct _KGDTENTRY KGDTENTRY, *PKGDTENTRY; @@ -56,12 +56,18 @@ typedef struct _KTHREAD_INIT_FRAME KTHREAD_INIT_FRAME, *PKTHREAD_INIT_FRAME; typedef struct _KTRAP_FRAME KTRAP_FRAME, *PKTRAP_FRAME; typedef struct _KTSS KTSS, *PKTSS; typedef struct _MMPFN MMPFN, *PMMPFN; -typedef struct _MMPTE_HARDWARE MMPTE_HARDWARE, *PMMPTE_HARDWARE; -typedef struct _MMPTE_LIST MMPTE_LIST, *PMMPTE_LIST; -typedef struct _MMPTE_PROTOTYPE MMPTE_PROTOTYPE, *PMMPTE_PROTOTYPE; -typedef struct _MMPTE_SOFTWARE MMPTE_SOFTWARE, *PMMPTE_SOFTWARE; -typedef struct _MMPTE_SUBSECTION MMPTE_SUBSECTION, *PMMPTE_SUBSECTION; -typedef struct _MMPTE_TRANSITION MMPTE_TRANSITION, *PMMPTE_TRANSITION; +typedef struct _MMPML2_PTE_HARDWARE MMPML2_PTE_HARDWARE, *PMMPML2_PTE_HARDWARE; +typedef struct _MMPML2_PTE_LIST MMPML2_PTE_LIST, *PMMPML2_PTE_LIST; +typedef struct _MMPML2_PTE_PROTOTYPE MMPML2_PTE_PROTOTYPE, *PMMPML2_PTE_PROTOTYPE; +typedef struct _MMPML2_PTE_SOFTWARE MMPML2_PTE_SOFTWARE, *PMMPML2_PTE_SOFTWARE; +typedef struct _MMPML2_PTE_SUBSECTION MMPML2_PTE_SUBSECTION, *PMMPML2_PTE_SUBSECTION; +typedef struct _MMPML2_PTE_TRANSITION MMPML2_PTE_TRANSITION, *PMMPML2_PTE_TRANSITION; +typedef struct _MMPML3_PTE_HARDWARE MMPML3_PTE_HARDWARE, *PMMPML3_PTE_HARDWARE; +typedef struct _MMPML3_PTE_LIST MMPML3_PTE_LIST, *PMMPML3_PTE_LIST; +typedef struct _MMPML3_PTE_PROTOTYPE MMPML3_PTE_PROTOTYPE, *PMMPML3_PTE_PROTOTYPE; +typedef struct _MMPML3_PTE_SOFTWARE MMPML3_PTE_SOFTWARE, *PMMPML3_PTE_SOFTWARE; +typedef struct _MMPML3_PTE_SUBSECTION MMPML3_PTE_SUBSECTION, *PMMPML3_PTE_SUBSECTION; +typedef struct _MMPML3_PTE_TRANSITION MMPML3_PTE_TRANSITION, *PMMPML3_PTE_TRANSITION; typedef struct _THREAD_ENVIRONMENT_BLOCK THREAD_ENVIRONMENT_BLOCK, *PTHREAD_ENVIRONMENT_BLOCK; /* Unions forward references */ @@ -69,12 +75,15 @@ typedef union _APIC_BASE_REGISTER APIC_BASE_REGISTER, *PAPIC_BASE_REGISTER; typedef union _APIC_COMMAND_REGISTER APIC_COMMAND_REGISTER, *PAPIC_COMMAND_REGISTER; typedef union _APIC_LVT_REGISTER APIC_LVT_REGISTER, *PAPIC_LVT_REGISTER; typedef union _APIC_SPURIOUS_REGISTER APIC_SPURIOUS_REGISTER, *PAPIC_SPURIOUS_REGISTER; +typedef union _HARDWARE_PTE HARDWARE_PTE, *PHARDWARE_PTE; +typedef union _MMPML2_PTE MMPML2_PTE, *PMMPML2_PTE; +typedef union _MMPML3_PTE MMPML3_PTE, *PMMPML3_PTE; typedef union _MMPTE MMPDE, *PMMPDE; +typedef union _MMPTE MMPPE, *PMMPPE; typedef union _MMPTE MMPTE, *PMMPTE; typedef union _PIC_I8259_ICW1 PIC_I8259_ICW1, *PPIC_I8259_ICW1; typedef union _PIC_I8259_ICW2 PIC_I8259_ICW2, *PPIC_I8259_ICW2; typedef union _PIC_I8259_ICW3 PIC_I8259_ICW3, *PPIC_I8259_ICW3; typedef union _PIC_I8259_ICW4 PIC_I8259_ICW4, *PPIC_I8259_ICW4; - #endif /* __XTDK_I686_XTSTRUCT_H */ -- 2.50.1 From f85fe31b38fac2ac2236885759e9729ecc7ecb45 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sat, 16 Aug 2025 00:36:20 +0200 Subject: [PATCH 06/34] Adapt i686 memory mapping to new PML3 types --- xtldr/arch/i686/memory.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/xtldr/arch/i686/memory.c b/xtldr/arch/i686/memory.c index f3cb652..51f91b4 100644 --- a/xtldr/arch/i686/memory.c +++ b/xtldr/arch/i686/memory.c @@ -61,9 +61,9 @@ BlBuildPageMap(IN PXTBL_PAGE_MAPPING PageMap, /* Fill the PDPT with pointers to the Page Directories */ for(Index = 0; Index < 4; Index++) { - RtlZeroMemory(&((PHARDWARE_PTE)PageMap->PtePointer)[Index], sizeof(HARDWARE_PTE)); - ((PHARDWARE_PTE)PageMap->PtePointer)[Index].PageFrameNumber = DirectoryAddress / EFI_PAGE_SIZE; - ((PHARDWARE_PTE)PageMap->PtePointer)[Index].Valid = 1; + RtlZeroMemory(&((PHARDWARE_MODERN_PTE)PageMap->PtePointer)[Index], sizeof(HARDWARE_MODERN_PTE)); + ((PHARDWARE_MODERN_PTE)PageMap->PtePointer)[Index].PageFrameNumber = DirectoryAddress / EFI_PAGE_SIZE; + ((PHARDWARE_MODERN_PTE)PageMap->PtePointer)[Index].Valid = 1; DirectoryAddress += EFI_PAGE_SIZE; } } @@ -193,8 +193,8 @@ BlMapPage(IN PXTBL_PAGE_MAPPING PageMap, SIZE_T PageFrameNumber; PVOID Pml1, Pml2, Pml3; SIZE_T Pml1Entry, Pml2Entry, Pml3Entry; - PHARDWARE_PTE PmlTable; PHARDWARE_LEGACY_PTE LegacyPmlTable; + PHARDWARE_MODERN_PTE PmlTable; EFI_STATUS Status; /* Set the Page Frame Number (PFN) */ @@ -231,8 +231,8 @@ BlMapPage(IN PXTBL_PAGE_MAPPING PageMap, } /* Set the 64-bit PTE entry */ - PmlTable = (PHARDWARE_PTE)Pml1; - RtlZeroMemory(&PmlTable[Pml1Entry], sizeof(HARDWARE_PTE)); + PmlTable = (PHARDWARE_MODERN_PTE)Pml1; + RtlZeroMemory(&PmlTable[Pml1Entry], sizeof(HARDWARE_MODERN_PTE)); PmlTable[Pml1Entry].PageFrameNumber = PageFrameNumber; PmlTable[Pml1Entry].Valid = 1; PmlTable[Pml1Entry].Writable = 1; @@ -304,14 +304,14 @@ BlpGetNextPageTable(IN PXTBL_PAGE_MAPPING PageMap, ULONGLONG PmlPointer = 0; EFI_STATUS Status; PHARDWARE_LEGACY_PTE LegacyPmlTable; - PHARDWARE_PTE PmlTable; + PHARDWARE_MODERN_PTE PmlTable; BOOLEAN ValidPte = FALSE; /* Check page map level to determine PTE size */ if(PageMap->PageMapLevel >= 3) { /* 64-bit PTE for PML3 (PAE enabled) */ - PmlTable = (PHARDWARE_PTE)PageTable; + PmlTable = (PHARDWARE_MODERN_PTE)PageTable; if(PmlTable[Entry].Valid) { /* Get page frame number from page table entry */ @@ -362,7 +362,7 @@ BlpGetNextPageTable(IN PXTBL_PAGE_MAPPING PageMap, if(PageMap->PageMapLevel >= 3) { /* 64-bit PTE for PML3 (PAE enabled) */ - PmlTable = (PHARDWARE_PTE)PageTable; + PmlTable = (PHARDWARE_MODERN_PTE)PageTable; PmlTable[Entry].PageFrameNumber = Address / EFI_PAGE_SIZE; PmlTable[Entry].Valid = 1; PmlTable[Entry].Writable = 1; @@ -406,7 +406,7 @@ BlpSelfMapPml(IN PXTBL_PAGE_MAPPING PageMap, IN ULONG_PTR SelfMapAddress) { PHARDWARE_LEGACY_PTE LegacyPml; - PHARDWARE_PTE Pml; + PHARDWARE_MODERN_PTE Pml; ULONGLONG PmlIndex; ULONG Index; @@ -417,13 +417,13 @@ BlpSelfMapPml(IN PXTBL_PAGE_MAPPING PageMap, PmlIndex = (SelfMapAddress >> MM_PDI_SHIFT) & 0x1FF; /* Get Page Directory */ - Pml = (PHARDWARE_PTE)(((PHARDWARE_PTE)PageMap->PtePointer)[SelfMapAddress >> MM_PPI_SHIFT].PageFrameNumber * EFI_PAGE_SIZE); + Pml = (PHARDWARE_MODERN_PTE)(((PHARDWARE_MODERN_PTE)PageMap->PtePointer)[SelfMapAddress >> MM_PPI_SHIFT].PageFrameNumber * EFI_PAGE_SIZE); /* Add self-mapping for PML3 (PAE enabled) */ for(Index = 0; Index < 4; Index++) { - RtlZeroMemory(&Pml[PmlIndex + Index], sizeof(HARDWARE_PTE)); - Pml[PmlIndex + Index].PageFrameNumber = ((PHARDWARE_PTE)PageMap->PtePointer)[Index].PageFrameNumber; + RtlZeroMemory(&Pml[PmlIndex + Index], sizeof(HARDWARE_MODERN_PTE)); + Pml[PmlIndex + Index].PageFrameNumber = ((PHARDWARE_MODERN_PTE)PageMap->PtePointer)[Index].PageFrameNumber; Pml[PmlIndex + Index].Valid = 1; Pml[PmlIndex + Index].Writable = 1; } -- 2.50.1 From 5768d4bba6590f88376fd9525c4325c4199fb236 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sat, 16 Aug 2025 19:58:00 +0200 Subject: [PATCH 07/34] Prepare for architecture-specific paging initialization --- xtoskrnl/CMakeLists.txt | 1 + xtoskrnl/includes/amd64/mmi.h | 12 ++++++++---- xtoskrnl/includes/i686/mmi.h | 18 +++++++++++++----- xtoskrnl/ke/amd64/krnlinit.c | 3 +++ xtoskrnl/ke/i686/krnlinit.c | 3 +++ xtoskrnl/mm/amd64/init.c | 29 ++++++++++++++--------------- xtoskrnl/mm/amd64/pmap.c | 25 +++++++++++++++++++++++++ xtoskrnl/mm/i686/init.c | 29 ++++++++++++++--------------- xtoskrnl/mm/i686/pmap.c | 25 +++++++++++++++++++++++++ 9 files changed, 106 insertions(+), 39 deletions(-) create mode 100644 xtoskrnl/mm/amd64/pmap.c create mode 100644 xtoskrnl/mm/i686/pmap.c diff --git a/xtoskrnl/CMakeLists.txt b/xtoskrnl/CMakeLists.txt index d6a2f57..80a9d05 100644 --- a/xtoskrnl/CMakeLists.txt +++ b/xtoskrnl/CMakeLists.txt @@ -62,6 +62,7 @@ list(APPEND XTOSKRNL_SOURCE ${XTOSKRNL_SOURCE_DIR}/mm/pages.c ${XTOSKRNL_SOURCE_DIR}/mm/${ARCH}/init.c ${XTOSKRNL_SOURCE_DIR}/mm/${ARCH}/pages.c + ${XTOSKRNL_SOURCE_DIR}/mm/${ARCH}/pmap.c ${XTOSKRNL_SOURCE_DIR}/po/idle.c ${XTOSKRNL_SOURCE_DIR}/rtl/atomic.c ${XTOSKRNL_SOURCE_DIR}/rtl/bitmap.c diff --git a/xtoskrnl/includes/amd64/mmi.h b/xtoskrnl/includes/amd64/mmi.h index 9d9ab6a..d5f91e8 100644 --- a/xtoskrnl/includes/amd64/mmi.h +++ b/xtoskrnl/includes/amd64/mmi.h @@ -13,11 +13,19 @@ /* AMD64 Memory Manager routines forward references */ +XTAPI +VOID +MmInitializePageMapSupport(VOID); + XTFASTCALL VOID MmZeroPages(IN PVOID Address, IN ULONG Size); +XTAPI +BOOLEAN +MmpGetExtendedPhysicalAddressingStatus(VOID); + XTAPI PMMPTE MmpGetPdeAddress(PVOID Address); @@ -38,8 +46,4 @@ XTAPI VOID MmpInitializeArchitecture(VOID); -XTAPI -BOOLEAN -MmpMemoryExtensionEnabled(VOID); - #endif /* __XTOSKRNL_AMD64_MMI_H */ diff --git a/xtoskrnl/includes/i686/mmi.h b/xtoskrnl/includes/i686/mmi.h index 31c0c10..218361b 100644 --- a/xtoskrnl/includes/i686/mmi.h +++ b/xtoskrnl/includes/i686/mmi.h @@ -13,15 +13,27 @@ /* i686 Memory Manager routines forward references */ +XTAPI +VOID +MmInitializePageMapSupport(VOID); + XTFASTCALL VOID MmZeroPages(IN PVOID Address, IN ULONG Size); XTAPI -PMMPTE +BOOLEAN +MmpGetExtendedPhysicalAddressingStatus(VOID); + +XTAPI +PMMPDE MmpGetPdeAddress(PVOID Address); +XTAPI +PMMPPE +MmpGetPpeAddress(PVOID Address); + XTAPI PMMPTE MmpGetPteAddress(PVOID Address); @@ -30,8 +42,4 @@ XTAPI VOID MmpInitializeArchitecture(VOID); -XTAPI -BOOLEAN -MmpMemoryExtensionEnabled(VOID); - #endif /* __XTOSKRNL_I686_MMI_H */ diff --git a/xtoskrnl/ke/amd64/krnlinit.c b/xtoskrnl/ke/amd64/krnlinit.c index 986a403..610e4ef 100644 --- a/xtoskrnl/ke/amd64/krnlinit.c +++ b/xtoskrnl/ke/amd64/krnlinit.c @@ -52,6 +52,9 @@ KepInitializeMachine(VOID) /* Initialize processor */ HlInitializeProcessor(); + + /* Initialize page map support */ + MmInitializePageMapSupport(); } /** diff --git a/xtoskrnl/ke/i686/krnlinit.c b/xtoskrnl/ke/i686/krnlinit.c index a671565..6d0d725 100644 --- a/xtoskrnl/ke/i686/krnlinit.c +++ b/xtoskrnl/ke/i686/krnlinit.c @@ -52,6 +52,9 @@ KepInitializeMachine(VOID) /* Initialize processor */ HlInitializeProcessor(); + + /* Initialize page map support */ + MmInitializePageMapSupport(); } /** diff --git a/xtoskrnl/mm/amd64/init.c b/xtoskrnl/mm/amd64/init.c index f746c4d..88e27d7 100644 --- a/xtoskrnl/mm/amd64/init.c +++ b/xtoskrnl/mm/amd64/init.c @@ -9,6 +9,20 @@ #include +/** + * Detects if eXtended Physical Addressing (XPA) is enabled and initializes page map support. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MmInitializePageMapSupport(VOID) +{ + UNIMPLEMENTED; +} + /** * Gets the address of the PDE (Page Directory Entry), that maps given address. * @@ -102,18 +116,3 @@ MmpInitializeArchitecture(VOID) { UNIMPLEMENTED; } - -/** - * Checks if LA57 (PML5) is enabled. - * - * @return This routine returns TRUE if LA57 is enabled, or FALSE otherwise. - * - * @since XT 1.0 - */ -XTAPI -BOOLEAN -MmpMemoryExtensionEnabled(VOID) -{ - /* Check if LA57 (PML5) is enabled */ - return ((ArReadControlRegister(4) & CR4_LA57) != 0) ? TRUE : FALSE; -} diff --git a/xtoskrnl/mm/amd64/pmap.c b/xtoskrnl/mm/amd64/pmap.c new file mode 100644 index 0000000..c1149dd --- /dev/null +++ b/xtoskrnl/mm/amd64/pmap.c @@ -0,0 +1,25 @@ +/** + * PROJECT: ExectOS + * COPYRIGHT: See COPYING.md in the top level directory + * FILE: xtoskrnl/mm/amd64/pmap.c + * DESCRIPTION: Low-level support for AMD64 page map manipulation + * DEVELOPERS: Aiken Harris + */ + +#include + + +/** + * Checks if eXtended Physical Addressing (XPA) is enabled. + * + * @return This routine returns TRUE if LA57 is enabled, or FALSE otherwise. + * + * @since XT 1.0 + */ +XTAPI +BOOLEAN +MmpGetExtendedPhysicalAddressingStatus(VOID) +{ + /* Check if LA57 is enabled */ + return ((ArReadControlRegister(4) & CR4_LA57) != 0) ? TRUE : FALSE; +} diff --git a/xtoskrnl/mm/i686/init.c b/xtoskrnl/mm/i686/init.c index aed604b..fe4887e 100644 --- a/xtoskrnl/mm/i686/init.c +++ b/xtoskrnl/mm/i686/init.c @@ -9,6 +9,20 @@ #include +/** + * Detects if eXtended Physical Addressing (XPA) is enabled and initializes page map support. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MmInitializePageMapSupport(VOID) +{ + UNIMPLEMENTED; +} + /** * Gets the address of the PDE (Page Directory Entry), that maps given address. * @@ -64,18 +78,3 @@ MmpInitializeArchitecture(VOID) { UNIMPLEMENTED; } - -/** - * Checks if PAE (Physical Address Extension) is enabled. - * - * @return This routine returns TRUE if PAE is enabled, or FALSE otherwise. - * - * @since XT 1.0 - */ -XTAPI -BOOLEAN -MmpMemoryExtensionEnabled(VOID) -{ - /* Check if PAE is enabled */ - return ((ArReadControlRegister(4) & CR4_PAE) != 0) ? TRUE : FALSE; -} diff --git a/xtoskrnl/mm/i686/pmap.c b/xtoskrnl/mm/i686/pmap.c new file mode 100644 index 0000000..b7e759f --- /dev/null +++ b/xtoskrnl/mm/i686/pmap.c @@ -0,0 +1,25 @@ +/** + * PROJECT: ExectOS + * COPYRIGHT: See COPYING.md in the top level directory + * FILE: xtoskrnl/mm/i686/pmap.c + * DESCRIPTION: Low-level support for i686 page map manipulation + * DEVELOPERS: Aiken Harris + */ + +#include + + +/** + * Checks if eXtended Physical Addressing (XPA) is enabled. + * + * @return This routine returns TRUE if PAE is enabled, or FALSE otherwise. + * + * @since XT 1.0 + */ +XTAPI +BOOLEAN +MmpGetExtendedPhysicalAddressingStatus(VOID) +{ + /* Check if PAE is enabled */ + return ((ArReadControlRegister(4) & CR4_PAE) != 0) ? TRUE : FALSE; +} -- 2.50.1 From 1dcd3fceedb9aadecfe9436ada1fada1d2e977b8 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sat, 16 Aug 2025 20:08:12 +0200 Subject: [PATCH 08/34] Define page map information structure for both supported architectures --- sdk/xtdk/amd64/mmtypes.h | 11 +++++++++++ sdk/xtdk/amd64/xtstruct.h | 1 + sdk/xtdk/i686/mmtypes.h | 1 + sdk/xtdk/i686/xtstruct.h | 1 + 4 files changed, 14 insertions(+) diff --git a/sdk/xtdk/amd64/mmtypes.h b/sdk/xtdk/amd64/mmtypes.h index 7febe4c..f472dff 100644 --- a/sdk/xtdk/amd64/mmtypes.h +++ b/sdk/xtdk/amd64/mmtypes.h @@ -92,6 +92,17 @@ typedef struct _HARDWARE_PTE ULONGLONG NoExecute:1; } HARDWARE_PTE, *PHARDWARE_PTE; +/* Page map information structure definition */ +typedef struct _MMPAGEMAP_INFO +{ + BOOLEAN Xpa; + ULONGLONG PteBase; + ULONGLONG PdeBase; + ULONGLONG PpeBase; + ULONGLONG PxeBase; + ULONGLONG P5eBase; +} MMPAGEMAP_INFO, *PMMPAGEMAP_INFO; + /* A Page Table Entry on AMD64 system */ typedef struct _MMPTE_HARDWARE { diff --git a/sdk/xtdk/amd64/xtstruct.h b/sdk/xtdk/amd64/xtstruct.h index 282c29b..0231756 100644 --- a/sdk/xtdk/amd64/xtstruct.h +++ b/sdk/xtdk/amd64/xtstruct.h @@ -51,6 +51,7 @@ typedef struct _KSWITCH_FRAME KSWITCH_FRAME, *PKSWITCH_FRAME; typedef struct _KTHREAD_INIT_FRAME KTHREAD_INIT_FRAME, *PKTHREAD_INIT_FRAME; typedef struct _KTRAP_FRAME KTRAP_FRAME, *PKTRAP_FRAME; typedef struct _KTSS KTSS, *PKTSS; +typedef struct _MMPAGEMAP_INFO MMPAGEMAP_INFO, *PMMPAGEMAP_INFO; typedef struct _MMPFN MMPFN, *PMMPFN; typedef struct _MMPTE_HARDWARE MMPTE_HARDWARE, *PMMPTE_HARDWARE; typedef struct _MMPTE_HARDWARE_LARGEPAGE MMPTE_HARDWARE_LARGEPAGE, *PMMPTE_HARDWARE_LARGEPAGE; diff --git a/sdk/xtdk/i686/mmtypes.h b/sdk/xtdk/i686/mmtypes.h index 9f66077..0547330 100644 --- a/sdk/xtdk/i686/mmtypes.h +++ b/sdk/xtdk/i686/mmtypes.h @@ -113,6 +113,7 @@ typedef union _HARDWARE_PTE typedef struct _MMPAGEMAP_INFO { BOOLEAN Xpa; + ULONG PteBase; ULONG PdeBase; ULONG PdiShift; ULONG PteShift; diff --git a/sdk/xtdk/i686/xtstruct.h b/sdk/xtdk/i686/xtstruct.h index a78bc76..bd93426 100644 --- a/sdk/xtdk/i686/xtstruct.h +++ b/sdk/xtdk/i686/xtstruct.h @@ -55,6 +55,7 @@ typedef struct _KSWITCH_FRAME KSWITCH_FRAME, *PKSWITCH_FRAME; typedef struct _KTHREAD_INIT_FRAME KTHREAD_INIT_FRAME, *PKTHREAD_INIT_FRAME; typedef struct _KTRAP_FRAME KTRAP_FRAME, *PKTRAP_FRAME; typedef struct _KTSS KTSS, *PKTSS; +typedef struct _MMPAGEMAP_INFO MMPAGEMAP_INFO, *PMMPAGEMAP_INFO; typedef struct _MMPFN MMPFN, *PMMPFN; typedef struct _MMPML2_PTE_HARDWARE MMPML2_PTE_HARDWARE, *PMMPML2_PTE_HARDWARE; typedef struct _MMPML2_PTE_LIST MMPML2_PTE_LIST, *PMMPML2_PTE_LIST; -- 2.50.1 From 6a330e38f2feadbec513e168696b4eae243b6dca Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sat, 16 Aug 2025 20:14:18 +0200 Subject: [PATCH 09/34] Consolidate paging-related globals into MmpPageMapInfo --- xtoskrnl/includes/globals.h | 7 ++----- xtoskrnl/mm/globals.c | 7 ++----- xtoskrnl/mm/init.c | 3 --- 3 files changed, 4 insertions(+), 13 deletions(-) diff --git a/xtoskrnl/includes/globals.h b/xtoskrnl/includes/globals.h index 64de0f1..882dd1e 100644 --- a/xtoskrnl/includes/globals.h +++ b/xtoskrnl/includes/globals.h @@ -75,9 +75,6 @@ EXTERN ULONG MmNumberOfPhysicalPages; /* Old biggest free memory descriptor */ EXTERN LOADER_MEMORY_DESCRIPTOR MmOldFreeDescriptor; -/* Page Map Level */ -EXTERN ULONG MmPageMapLevel; - /* Processor structures data (THIS IS A TEMPORARY HACK) */ EXTERN UCHAR MmProcessorStructuresData[MAXIMUM_PROCESSORS][KPROCESSOR_STRUCTURES_SIZE]; @@ -87,8 +84,8 @@ EXTERN LOADER_MEMORY_DESCRIPTOR MmpHardwareAllocationDescriptors[MM_HARDWARE_ALL /* Live address of kernel's hardware heap */ EXTERN PVOID MmpHardwareHeapStart; -/* Architecture-specific memory extension */ -EXTERN BOOLEAN MmpMemoryExtension; +/* Information about the current page map */ +EXTERN MMPAGEMAP_INFO MmpPageMapInfo; /* Number of used hardware allocation descriptors */ EXTERN ULONG MmpUsedHardwareAllocationDescriptors; diff --git a/xtoskrnl/mm/globals.c b/xtoskrnl/mm/globals.c index 723c6c9..8fc74b6 100644 --- a/xtoskrnl/mm/globals.c +++ b/xtoskrnl/mm/globals.c @@ -24,9 +24,6 @@ ULONG MmNumberOfPhysicalPages; /* Old biggest free memory descriptor */ LOADER_MEMORY_DESCRIPTOR MmOldFreeDescriptor; -/* Page Map Level */ -ULONG MmPageMapLevel; - /* Processor structures data (THIS IS A TEMPORARY HACK) */ UCHAR MmProcessorStructuresData[MAXIMUM_PROCESSORS][KPROCESSOR_STRUCTURES_SIZE] = {0}; @@ -36,8 +33,8 @@ LOADER_MEMORY_DESCRIPTOR MmpHardwareAllocationDescriptors[MM_HARDWARE_ALLOCATION /* Live address of kernel's hardware heap */ PVOID MmpHardwareHeapStart = MM_HARDWARE_HEAP_START_ADDRESS; -/* Architecture-specific memory extension */ -BOOLEAN MmpMemoryExtension; +/* Information about the current page map */ +MMPAGEMAP_INFO MmpPageMapInfo; /* Number of used hardware allocation descriptors */ ULONG MmpUsedHardwareAllocationDescriptors = 0; diff --git a/xtoskrnl/mm/init.c b/xtoskrnl/mm/init.c index a7d5afd..a0917b9 100644 --- a/xtoskrnl/mm/init.c +++ b/xtoskrnl/mm/init.c @@ -31,9 +31,6 @@ MmInitializeMemoryManager(VOID) KePanic(0); } - /* Store Page Map Level */ - MmPageMapLevel = KeInitializationBlock->LoaderInformation.PageMapLevel; - /* Proceed with architecture specific initialization */ MmpInitializeArchitecture(); } -- 2.50.1 From 8491e5fed16923757d089efe412d5a57e7aa6022 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sat, 16 Aug 2025 20:18:34 +0200 Subject: [PATCH 10/34] Remove PageMapLevel from the loader information block --- sdk/xtdk/xtfw.h | 1 - xtldr/modules/xtos_o/xtos.c | 3 --- 2 files changed, 4 deletions(-) diff --git a/sdk/xtdk/xtfw.h b/sdk/xtdk/xtfw.h index 925d429..1a4642a 100644 --- a/sdk/xtdk/xtfw.h +++ b/sdk/xtdk/xtfw.h @@ -89,7 +89,6 @@ typedef struct _FIRMWARE_INFORMATION_BLOCK typedef struct _LOADER_INFORMATION_BLOCK { PVOID DbgPrint; - ULONG PageMapLevel; } LOADER_INFORMATION_BLOCK, *PLOADER_INFORMATION_BLOCK; /* Boot Loader memory mapping information */ diff --git a/xtldr/modules/xtos_o/xtos.c b/xtldr/modules/xtos_o/xtos.c index 65e0129..f8844c1 100644 --- a/xtldr/modules/xtos_o/xtos.c +++ b/xtldr/modules/xtos_o/xtos.c @@ -571,9 +571,6 @@ XtpInitializeLoaderBlock(IN PXTBL_PAGE_MAPPING PageMap, /* Set LoaderInformation block properties */ LoaderBlock->LoaderInformation.DbgPrint = XtLdrProtocol->Debug.Print; - /* Store page map level */ - LoaderBlock->LoaderInformation.PageMapLevel = PageMap->PageMapLevel; - /* Attempt to find virtual address of the EFI Runtime Services */ // Status = XtLdrProtocol->GetVirtualAddress(MemoryMappings, &EfiSystemTable->RuntimeServices->Hdr, &RuntimeServices); // if(Status == STATUS_EFI_SUCCESS) -- 2.50.1 From de2973ac42bedea5c17e87f49cc5f3993bb4e515 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sat, 16 Aug 2025 20:28:05 +0200 Subject: [PATCH 11/34] Implement page map info initialization --- xtoskrnl/mm/amd64/init.c | 26 +++++++++++++++++++++++++- xtoskrnl/mm/i686/init.c | 28 +++++++++++++++++++++++++++- 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/xtoskrnl/mm/amd64/init.c b/xtoskrnl/mm/amd64/init.c index 88e27d7..0b930ef 100644 --- a/xtoskrnl/mm/amd64/init.c +++ b/xtoskrnl/mm/amd64/init.c @@ -20,7 +20,31 @@ XTAPI VOID MmInitializePageMapSupport(VOID) { - UNIMPLEMENTED; + /* Check if XPA is enabled */ + if(MmpGetExtendedPhysicalAddressingStatus()) + { + /* Set PML5 page map information */ + MmpPageMapInfo.Xpa = TRUE; + + /* Set PML5 base addresses */ + MmpPageMapInfo.PteBase = MM_PTE_LA57_BASE; + MmpPageMapInfo.PdeBase = MM_PDE_LA57_BASE; + MmpPageMapInfo.PpeBase = MM_PPE_LA57_BASE; + MmpPageMapInfo.PxeBase = MM_PXE_LA57_BASE; + MmpPageMapInfo.P5eBase = MM_P5E_LA57_BASE; + } + else + { + /* Set PML4 page map information */ + MmpPageMapInfo.Xpa = FALSE; + + /* Set PML4 base addresses */ + MmpPageMapInfo.PteBase = MM_PTE_BASE; + MmpPageMapInfo.PdeBase = MM_PDE_BASE; + MmpPageMapInfo.PpeBase = MM_PPE_BASE; + MmpPageMapInfo.PxeBase = MM_PXE_BASE; + MmpPageMapInfo.P5eBase = 0x0; + } } /** diff --git a/xtoskrnl/mm/i686/init.c b/xtoskrnl/mm/i686/init.c index fe4887e..8a8a9ce 100644 --- a/xtoskrnl/mm/i686/init.c +++ b/xtoskrnl/mm/i686/init.c @@ -20,7 +20,33 @@ XTAPI VOID MmInitializePageMapSupport(VOID) { - UNIMPLEMENTED; + /* Check if XPA is enabled */ + if(MmpGetExtendedPhysicalAddressingStatus()) + { + /* Set PML3 page map information */ + MmpPageMapInfo.Xpa = TRUE; + + /* Set PML3 base addresses */ + MmpPageMapInfo.PteBase = MM_PTE_BASE; + MmpPageMapInfo.PdeBase = MM_PDE_BASE; + + /* Set PML3 shift values */ + MmpPageMapInfo.PdiShift = MM_PDI_SHIFT; + MmpPageMapInfo.PteShift = MM_PTE_SHIFT; + } + else + { + /* Set PML2 page map information */ + MmpPageMapInfo.Xpa = FALSE; + + /* Set PML2 base addresses */ + MmpPageMapInfo.PteBase = MM_PTE_BASE; + MmpPageMapInfo.PdeBase = MM_PDE_LEGACY_BASE; + + /* Set PML2 shift values */ + MmpPageMapInfo.PdiShift = MM_PDI_LEGACY_SHIFT; + MmpPageMapInfo.PteShift = MM_PTE_LEGACY_SHIFT; + } } /** -- 2.50.1 From 0ed59f223cc269a81b87f8e19ef2575390256860 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sat, 16 Aug 2025 21:07:54 +0200 Subject: [PATCH 12/34] Relocate page mapping helpers and add PML5 support --- sdk/xtdk/amd64/xtstruct.h | 1 + xtoskrnl/includes/amd64/mmi.h | 10 +++- xtoskrnl/mm/amd64/init.c | 80 --------------------------- xtoskrnl/mm/amd64/pmap.c | 100 ++++++++++++++++++++++++++++++++++ xtoskrnl/mm/i686/init.c | 42 -------------- xtoskrnl/mm/i686/pmap.c | 60 ++++++++++++++++++++ 6 files changed, 168 insertions(+), 125 deletions(-) diff --git a/sdk/xtdk/amd64/xtstruct.h b/sdk/xtdk/amd64/xtstruct.h index 0231756..1403809 100644 --- a/sdk/xtdk/amd64/xtstruct.h +++ b/sdk/xtdk/amd64/xtstruct.h @@ -67,6 +67,7 @@ typedef union _APIC_BASE_REGISTER APIC_BASE_REGISTER, *PAPIC_BASE_REGISTER; typedef union _APIC_COMMAND_REGISTER APIC_COMMAND_REGISTER, *PAPIC_COMMAND_REGISTER; typedef union _APIC_LVT_REGISTER APIC_LVT_REGISTER, *PAPIC_LVT_REGISTER; typedef union _APIC_SPURIOUS_REGISTER APIC_SPURIOUS_REGISTER, *PAPIC_SPURIOUS_REGISTER; +typedef union _MMPTE MMP5E, *PMMP5E; typedef union _MMPTE MMPDE, *PMMPDE; typedef union _MMPTE MMPPE, *PMMPPE; typedef union _MMPTE MMPTE, *PMMPTE; diff --git a/xtoskrnl/includes/amd64/mmi.h b/xtoskrnl/includes/amd64/mmi.h index d5f91e8..cccbfc3 100644 --- a/xtoskrnl/includes/amd64/mmi.h +++ b/xtoskrnl/includes/amd64/mmi.h @@ -27,11 +27,15 @@ BOOLEAN MmpGetExtendedPhysicalAddressingStatus(VOID); XTAPI -PMMPTE +PMMP5E +MmpGetP5eAddress(PVOID Address); + +XTAPI +PMMPDE MmpGetPdeAddress(PVOID Address); XTAPI -PMMPTE +PMMPPE MmpGetPpeAddress(PVOID Address); XTAPI @@ -39,7 +43,7 @@ PMMPTE MmpGetPteAddress(PVOID Address); XTAPI -PMMPTE +PMMPXE MmpGetPxeAddress(PVOID Address); XTAPI diff --git a/xtoskrnl/mm/amd64/init.c b/xtoskrnl/mm/amd64/init.c index 0b930ef..8d2129c 100644 --- a/xtoskrnl/mm/amd64/init.c +++ b/xtoskrnl/mm/amd64/init.c @@ -47,86 +47,6 @@ MmInitializePageMapSupport(VOID) } } -/** - * Gets the address of the PDE (Page Directory Entry), that maps given address. - * - * @param Address - * Specifies the address to find the PDE for. - * - * @return This routine returns the address of the PDE. - * - * @since XT 1.0 - */ -XTAPI -PMMPTE -MmpGetPdeAddress(PVOID Address) -{ - ULONGLONG Offset; - - Offset = ((((ULONGLONG)Address & (((ULONGLONG)1 << 48) - 1)) >> MM_PDI_SHIFT) << MM_PTE_SHIFT); - return (PMMPTE)(MM_PDE_BASE + Offset); -} - -/** - * Gets the address of the PPE (Page Directory Pointer Table Entry), that maps given address. - * - * @param Address - * Specifies the address to find the PPE for. - * - * @return This routine returns the address of the PPE. - * - * @since XT 1.0 - */ -XTAPI -PMMPTE -MmpGetPpeAddress(PVOID Address) -{ - ULONGLONG Offset; - - Offset = ((((ULONGLONG)Address & (((ULONGLONG)1 << 48) - 1)) >> MM_PPI_SHIFT) << MM_PTE_SHIFT); - return (PMMPTE)(MM_PPE_BASE + Offset); -} - -/** - * Gets the address of the PTE (Page Table Entry), that maps given address. - * - * @param Address - * Specifies the address to find the PTE for. - * - * @return This routine returns the address of the PTE. - * - * @since XT 1.0 - */ -XTAPI -PMMPTE -MmpGetPteAddress(PVOID Address) -{ - ULONGLONG Offset; - - Offset = ((((ULONGLONG)Address & (((ULONGLONG)1 << 48) - 1)) >> MM_PTI_SHIFT) << MM_PTE_SHIFT); - return (PMMPTE)(MM_PTE_BASE + Offset); -} - -/** - * Gets the address of the PXE (Extended Page Entry), that maps given address. - * - * @param Address - * Specifies the address to find the PXE for. - * - * @return This routine returns the address of the PXE. - * - * @since XT 1.0 - */ -XTAPI -PMMPTE -MmpGetPxeAddress(PVOID Address) -{ - ULONGLONG Offset; - - Offset = (((ULONGLONG)Address >> MM_PXI_SHIFT) & (MM_PXE_PER_PAGE - 1)); - return (PMMPTE)(MM_PXE_BASE + Offset); -} - /** * Performs architecture specific initialization of the XTOS Memory Manager. * diff --git a/xtoskrnl/mm/amd64/pmap.c b/xtoskrnl/mm/amd64/pmap.c index c1149dd..dc59230 100644 --- a/xtoskrnl/mm/amd64/pmap.c +++ b/xtoskrnl/mm/amd64/pmap.c @@ -23,3 +23,103 @@ MmpGetExtendedPhysicalAddressingStatus(VOID) /* Check if LA57 is enabled */ return ((ArReadControlRegister(4) & CR4_LA57) != 0) ? TRUE : FALSE; } + +/** + * Gets the address of the P5E (Page Map Level 5 Entry), that maps given address. + * + * @param Address + * Specifies the virtual address for which to retrieve the corresponding P5E. + * + * @return This routine returns the address of the P5E, or NULL if LA57 is not enabled. + * + * @since XT 1.0 + */ +XTAPI +PMMP5E +MmpGetP5eAddress(PVOID Address) +{ + ULONGLONG Offset; + + Offset = (((ULONGLONG)Address >> MM_P5I_SHIFT) << MM_PTE_SHIFT); + return (PMMP5E)((MmpPageMapInfo.P5eBase + Offset) * MmpPageMapInfo.Xpa); +} + +/** + * Gets the address of the PDE (Page Directory Entry), that maps given address. + * + * @param Address + * Specifies the virtual address for which to retrieve the corresponding PDE. + * + * @return This routine returns the address of the PDE. + * + * @since XT 1.0 + */ +XTAPI +PMMPDE +MmpGetPdeAddress(PVOID Address) +{ + ULONGLONG Offset; + + Offset = (((ULONGLONG)Address >> MM_PDI_SHIFT) << MM_PTE_SHIFT); + return (PMMPDE)(MmpPageMapInfo.PdeBase + Offset); +} + +/** + * Gets the address of the PPE (Page Directory Pointer Table Entry), that maps given address. + * + * @param Address + * Specifies the virtual address for which to retrieve the corresponding PPE. + * + * @return This routine returns the address of the PPE. + * + * @since XT 1.0 + */ +XTAPI +PMMPPE +MmpGetPpeAddress(PVOID Address) +{ + ULONGLONG Offset; + + Offset = (((ULONGLONG)Address >> MM_PPI_SHIFT) << MM_PTE_SHIFT); + return (PMMPPE)(MmpPageMapInfo.PpeBase + Offset); +} + +/** + * Gets the address of the PTE (Page Table Entry), that maps given address. + * + * @param Address + * Specifies the virtual address for which to retrieve the corresponding PTE. + * + * @return This routine returns the address of the PTE. + * + * @since XT 1.0 + */ +XTAPI +PMMPTE +MmpGetPteAddress(PVOID Address) +{ + ULONGLONG Offset; + + Offset = (((ULONGLONG)Address >> MM_PTI_SHIFT) << MM_PTE_SHIFT); + return (PMMPTE)(MmpPageMapInfo.PteBase + Offset); +} + +/** + * Gets the address of the PXE (Extended Page Entry), that maps given address. + * + * @param Address + * Specifies the virtual address for which to retrieve the corresponding PXE. + * + * @return This routine returns the address of the PXE. + * + * @since XT 1.0 + */ +XTAPI +PMMPXE +MmpGetPxeAddress(PVOID Address) +{ + ULONGLONG Offset; + + Offset = (((ULONGLONG)Address >> MM_PXI_SHIFT) << MM_PTE_SHIFT); + return (PMMPXE)(MmpPageMapInfo.PxeBase + Offset); +} diff --git a/xtoskrnl/mm/i686/init.c b/xtoskrnl/mm/i686/init.c index 8a8a9ce..7b3e21d 100644 --- a/xtoskrnl/mm/i686/init.c +++ b/xtoskrnl/mm/i686/init.c @@ -49,48 +49,6 @@ MmInitializePageMapSupport(VOID) } } -/** - * Gets the address of the PDE (Page Directory Entry), that maps given address. - * - * @param Address - * Specifies the address to find the PDE for. - * - * @return This routine returns the address of the PDE. - * - * @since XT 1.0 - */ -XTAPI -PMMPTE -MmpGetPdeAddress(PVOID Address) -{ - ULONG Offset; - - /* Calculate offset and return PTE address */ - Offset = ((((ULONG)(Address)) >> MM_PDI_SHIFT) << MM_PTE_SHIFT); - return (PMMPTE)(MM_PDE_BASE + Offset); -} - -/** - * Gets the address of the PTE (Page Table Entry), that maps given address. - * - * @param Address - * Specifies the address to find the PTE for. - * - * @return This routine returns the address of the PTE. - * - * @since XT 1.0 - */ -XTAPI -PMMPTE -MmpGetPteAddress(PVOID Address) -{ - ULONG Offset; - - /* Calculate offset and return PTE address */ - Offset = ((((ULONG)(Address)) >> MM_PTI_SHIFT) << MM_PTE_SHIFT); - return (PMMPTE)(MM_PTE_BASE + Offset); -} - /** * Performs architecture specific initialization of the XTOS Memory Manager. * diff --git a/xtoskrnl/mm/i686/pmap.c b/xtoskrnl/mm/i686/pmap.c index b7e759f..fbe5229 100644 --- a/xtoskrnl/mm/i686/pmap.c +++ b/xtoskrnl/mm/i686/pmap.c @@ -23,3 +23,63 @@ MmpGetExtendedPhysicalAddressingStatus(VOID) /* Check if PAE is enabled */ return ((ArReadControlRegister(4) & CR4_PAE) != 0) ? TRUE : FALSE; } + +/** + * Gets the address of the PDE (Page Directory Entry), that maps given address. + * + * @param Address + * Specifies the virtual address for which to retrieve the corresponding PDE. + * + * @return This routine returns the address of the PDE. + * + * @since XT 1.0 + */ +XTAPI +PMMPDE +MmpGetPdeAddress(PVOID Address) +{ + ULONG Offset; + + /* Calculate offset and return PTE address */ + Offset = ((((ULONG)(Address)) >> MmpPageMapInfo.PdiShift) << MmpPageMapInfo.PteShift); + return (PMMPTE)(MmpPageMapInfo.PdeBase + Offset); +} + +/** + * Gets the address of the PPE (Page Directory Pointer Table Entry), that maps given address. + * + * @param Address + * Specifies the virtual address for which to retrieve the corresponding PDE. + * + * @return This routine returns the address of the PPE. + * + * @since XT 1.0 + */ +XTAPI +PMMPPE +MmpGetPpeAddress(PVOID Address) +{ + /* Return zero */ + return 0; +} + +/** + * Gets the address of the PTE (Page Table Entry), that maps given address. + * + * @param Address + * Specifies the virtual address for which to retrieve the corresponding PTE. + * + * @return This routine returns the address of the PTE. + * + * @since XT 1.0 + */ +XTAPI +PMMPTE +MmpGetPteAddress(PVOID Address) +{ + ULONG Offset; + + /* Calculate offset and return PTE address */ + Offset = ((((ULONG)(Address)) >> MM_PTI_SHIFT) << MmpPageMapInfo.PteShift); + return (PMMPTE)(MM_PTE_BASE + Offset); +} -- 2.50.1 From f77f2bbf92f01b413bd640a767a6f726ca169cdc Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sun, 17 Aug 2025 00:23:19 +0200 Subject: [PATCH 13/34] Introduce architecture-specific page map routines --- sdk/xtdk/mmtypes.h | 10 ++++++++++ sdk/xtdk/xtstruct.h | 1 + xtoskrnl/CMakeLists.txt | 1 + xtoskrnl/includes/amd64/globals.h | 6 ++++++ xtoskrnl/includes/globals.h | 3 +++ xtoskrnl/includes/i686/globals.h | 6 ++++++ xtoskrnl/mm/amd64/globals.c | 26 ++++++++++++++++++++++++++ xtoskrnl/mm/globals.c | 3 +++ xtoskrnl/mm/i686/globals.c | 26 ++++++++++++++++++++++++++ 9 files changed, 82 insertions(+) create mode 100644 xtoskrnl/mm/amd64/globals.c create mode 100644 xtoskrnl/mm/i686/globals.c diff --git a/sdk/xtdk/mmtypes.h b/sdk/xtdk/mmtypes.h index acbf7ed..2cddd97 100644 --- a/sdk/xtdk/mmtypes.h +++ b/sdk/xtdk/mmtypes.h @@ -10,8 +10,18 @@ #define __XTDK_MMTYPES_H #include +#include ARCH_HEADER(xtstruct.h) +/* Page map routines structure definition */ +typedef CONST STRUCT _CMMPAGEMAP_ROUTINES +{ + VOID (XTAPI *ClearPte)(PHARDWARE_PTE PtePointer); + BOOLEAN (XTAPI *PteValid)(PHARDWARE_PTE PtePointer); + VOID (XTAPI *SetPteCaching)(PHARDWARE_PTE PtePointer, BOOLEAN CacheDisable, BOOLEAN WriteThrough); + VOID (XTAPI *SetPte)(PHARDWARE_PTE PtePointer, PFN_NUMBER PageFrameNumber, BOOLEAN Writable); +} CMMPAGEMAP_ROUTINES, *PCMMPAGEMAP_ROUTINES; + /* Color tables structure definition */ typedef struct _MMCOLOR_TABLES { diff --git a/sdk/xtdk/xtstruct.h b/sdk/xtdk/xtstruct.h index 21b7464..a5043ba 100644 --- a/sdk/xtdk/xtstruct.h +++ b/sdk/xtdk/xtstruct.h @@ -69,6 +69,7 @@ typedef struct _ANSI_STRING ANSI_STRING, *PANSI_STRING; typedef struct _ANSI_STRING32 ANSI_STRING32, *PANSI_STRING32; typedef struct _ANSI_STRING64 ANSI_STRING64, *PANSI_STRING64; typedef struct _CPPORT CPPORT, *PCPPORT; +typedef const struct _CMMPAGEMAP_ROUTINES CMMPAGEMAP_ROUTINES, *PCMMPAGEMAP_ROUTINES; typedef struct _CSTRING CSTRING, *PCSTRING; typedef struct _EFI_1394_DEVICE_PATH EFI_1394_DEVICE_PATH, *PEFI_1394_DEVICE_PATH; typedef struct _EFI_ACPI_ADDRESS_SPACE_DESCRIPTOR EFI_ACPI_ADDRESS_SPACE_DESCRIPTOR, *PEFI_ACPI_ADDRESS_SPACE_DESCRIPTOR; diff --git a/xtoskrnl/CMakeLists.txt b/xtoskrnl/CMakeLists.txt index 80a9d05..0e36cba 100644 --- a/xtoskrnl/CMakeLists.txt +++ b/xtoskrnl/CMakeLists.txt @@ -60,6 +60,7 @@ list(APPEND XTOSKRNL_SOURCE ${XTOSKRNL_SOURCE_DIR}/mm/init.c ${XTOSKRNL_SOURCE_DIR}/mm/kpools.c ${XTOSKRNL_SOURCE_DIR}/mm/pages.c + ${XTOSKRNL_SOURCE_DIR}/mm/${ARCH}/globals.c ${XTOSKRNL_SOURCE_DIR}/mm/${ARCH}/init.c ${XTOSKRNL_SOURCE_DIR}/mm/${ARCH}/pages.c ${XTOSKRNL_SOURCE_DIR}/mm/${ARCH}/pmap.c diff --git a/xtoskrnl/includes/amd64/globals.h b/xtoskrnl/includes/amd64/globals.h index 70c6aa0..234379a 100644 --- a/xtoskrnl/includes/amd64/globals.h +++ b/xtoskrnl/includes/amd64/globals.h @@ -30,4 +30,10 @@ EXTERN UCHAR ArKernelBootStack[KERNEL_STACK_SIZE]; /* Kernel own fault stack */ EXTERN UCHAR ArKernelFaultStack[KERNEL_STACK_SIZE]; +/* Page mapping routines for systems using 4-level paging (PML4) */ +EXTERN CMMPAGEMAP_ROUTINES MmpPml4Routines; + +/* Page mapping routines for systems using 5-level paging (PML5) */ +EXTERN CMMPAGEMAP_ROUTINES MmpPml5Routines; + #endif /* __XTOSKRNL_AMD64_GLOBALS_H */ diff --git a/xtoskrnl/includes/globals.h b/xtoskrnl/includes/globals.h index 882dd1e..3ce700e 100644 --- a/xtoskrnl/includes/globals.h +++ b/xtoskrnl/includes/globals.h @@ -87,6 +87,9 @@ EXTERN PVOID MmpHardwareHeapStart; /* Information about the current page map */ EXTERN MMPAGEMAP_INFO MmpPageMapInfo; +/* Pointers to page map routines for the current paging mode */ +EXTERN PCMMPAGEMAP_ROUTINES MmpPageMapRoutines; + /* Number of used hardware allocation descriptors */ EXTERN ULONG MmpUsedHardwareAllocationDescriptors; diff --git a/xtoskrnl/includes/i686/globals.h b/xtoskrnl/includes/i686/globals.h index 0c5f6ec..e5bf03e 100644 --- a/xtoskrnl/includes/i686/globals.h +++ b/xtoskrnl/includes/i686/globals.h @@ -34,4 +34,10 @@ EXTERN UCHAR ArKernelBootStack[KERNEL_STACK_SIZE]; /* Kernel own fault stack */ EXTERN UCHAR ArKernelFaultStack[KERNEL_STACK_SIZE]; +/* Page mapping routines for systems using 2-level paging (PML2) */ +EXTERN CMMPAGEMAP_ROUTINES MmpPml2Routines; + +/* Page mapping routines for systems using 3-level paging (PML3) */ +EXTERN CMMPAGEMAP_ROUTINES MmpPml3Routines; + #endif /* __XTOSKRNL_I686_GLOBALS_H */ diff --git a/xtoskrnl/mm/amd64/globals.c b/xtoskrnl/mm/amd64/globals.c new file mode 100644 index 0000000..52994aa --- /dev/null +++ b/xtoskrnl/mm/amd64/globals.c @@ -0,0 +1,26 @@ +/** + * PROJECT: ExectOS + * COPYRIGHT: See COPYING.md in the top level directory + * FILE: xtoskrnl/mm/amd64/globals.c + * DESCRIPTION: AMD64-specific global variables for the Memory Manager + * DEVELOPERS: Aiken Harris + */ + +#include + + +/* Page mapping routines for systems using 4-level paging (PML4) */ +CMMPAGEMAP_ROUTINES MmpPml4Routines = { + // .ClearPte = MmpClearPte, + // .PteValid = MmpPml2PteValid, + // .SetPteCaching = MmpSetPml2PteCaching, + // .SetPte = MmpSetPml2Pte, +}; + +/* Page mapping routines for systems using 5-level paging (PML5) */ +CMMPAGEMAP_ROUTINES MmpPml5Routines = { + // .ClearPte = MmpClearPte, + // .PteValid = MmpPml3PteValid, + // .SetPteCaching = MmpSetPml3PteCaching, + // .SetPte = MmpSetPml3Pte, +}; diff --git a/xtoskrnl/mm/globals.c b/xtoskrnl/mm/globals.c index 8fc74b6..f3e0878 100644 --- a/xtoskrnl/mm/globals.c +++ b/xtoskrnl/mm/globals.c @@ -36,5 +36,8 @@ PVOID MmpHardwareHeapStart = MM_HARDWARE_HEAP_START_ADDRESS; /* Information about the current page map */ MMPAGEMAP_INFO MmpPageMapInfo; +/* Pointers to page map routines for the current paging mode */ +PCMMPAGEMAP_ROUTINES MmpPageMapRoutines; + /* Number of used hardware allocation descriptors */ ULONG MmpUsedHardwareAllocationDescriptors = 0; diff --git a/xtoskrnl/mm/i686/globals.c b/xtoskrnl/mm/i686/globals.c new file mode 100644 index 0000000..97db43e --- /dev/null +++ b/xtoskrnl/mm/i686/globals.c @@ -0,0 +1,26 @@ +/** + * PROJECT: ExectOS + * COPYRIGHT: See COPYING.md in the top level directory + * FILE: xtoskrnl/mm/i686/globals.c + * DESCRIPTION: i686-specific global variables for the Memory Manager + * DEVELOPERS: Aiken Harris + */ + +#include + + +/* Page mapping routines for systems using 2-level paging (PML2) */ +CMMPAGEMAP_ROUTINES MmpPml2Routines = { + .ClearPte = MmpClearPte, + .PteValid = MmpPml2PteValid, + .SetPteCaching = MmpSetPml2PteCaching, + .SetPte = MmpSetPml2Pte, +}; + +/* Page mapping routines for systems using 3-level paging (PML3) */ +CMMPAGEMAP_ROUTINES MmpPml3Routines = { + .ClearPte = MmpClearPte, + .PteValid = MmpPml3PteValid, + .SetPteCaching = MmpSetPml3PteCaching, + .SetPte = MmpSetPml3Pte, +}; -- 2.50.1 From 720d525b952fe69984033c86135541ddafd5dfc7 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sun, 17 Aug 2025 00:29:28 +0200 Subject: [PATCH 14/34] Assign page map routines --- xtoskrnl/mm/amd64/init.c | 7 +++++++ xtoskrnl/mm/i686/init.c | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/xtoskrnl/mm/amd64/init.c b/xtoskrnl/mm/amd64/init.c index 8d2129c..0b12f80 100644 --- a/xtoskrnl/mm/amd64/init.c +++ b/xtoskrnl/mm/amd64/init.c @@ -4,6 +4,7 @@ * FILE: xtoskrnl/mm/amd64/init.c * DESCRIPTION: Architecture specific Memory Manager initialization routines * DEVELOPERS: Rafal Kupiec + * Aiken Harris */ #include @@ -23,6 +24,9 @@ MmInitializePageMapSupport(VOID) /* Check if XPA is enabled */ if(MmpGetExtendedPhysicalAddressingStatus()) { + /* XPA enabled, use LA57 paging (PML5) */ + MmpPageMapRoutines = &MmpPml5Routines; + /* Set PML5 page map information */ MmpPageMapInfo.Xpa = TRUE; @@ -35,6 +39,9 @@ MmInitializePageMapSupport(VOID) } else { + /* XPA disabled, use LA48 paging (PML4) */ + MmpPageMapRoutines = &MmpPml4Routines; + /* Set PML4 page map information */ MmpPageMapInfo.Xpa = FALSE; diff --git a/xtoskrnl/mm/i686/init.c b/xtoskrnl/mm/i686/init.c index 7b3e21d..a473449 100644 --- a/xtoskrnl/mm/i686/init.c +++ b/xtoskrnl/mm/i686/init.c @@ -4,6 +4,7 @@ * FILE: xtoskrnl/mm/i686/init.c * DESCRIPTION: Architecture specific Memory Manager initialization routines * DEVELOPERS: Rafal Kupiec + * Aiken Harris */ #include @@ -23,6 +24,9 @@ MmInitializePageMapSupport(VOID) /* Check if XPA is enabled */ if(MmpGetExtendedPhysicalAddressingStatus()) { + /* XPA enabled, use modern PAE paging (PML3) */ + MmpPageMapRoutines = &MmpPml3Routines; + /* Set PML3 page map information */ MmpPageMapInfo.Xpa = TRUE; @@ -36,6 +40,9 @@ MmInitializePageMapSupport(VOID) } else { + /* XPA disabled, use legacy i386 paging (PML2) */ + MmpPageMapRoutines = &MmpPml2Routines; + /* Set PML2 page map information */ MmpPageMapInfo.Xpa = FALSE; -- 2.50.1 From 57193eecc073ec084e3d22478be3413eae9daec0 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sun, 17 Aug 2025 00:45:12 +0200 Subject: [PATCH 15/34] Implement PML2/PML3 page table routines --- xtoskrnl/includes/i686/mmi.h | 36 ++++++++ xtoskrnl/mm/i686/pmap.c | 157 +++++++++++++++++++++++++++++++++++ 2 files changed, 193 insertions(+) diff --git a/xtoskrnl/includes/i686/mmi.h b/xtoskrnl/includes/i686/mmi.h index 218361b..cc36a58 100644 --- a/xtoskrnl/includes/i686/mmi.h +++ b/xtoskrnl/includes/i686/mmi.h @@ -22,6 +22,10 @@ VOID MmZeroPages(IN PVOID Address, IN ULONG Size); +XTAPI +VOID +MmpClearPte(PHARDWARE_PTE PtePointer); + XTAPI BOOLEAN MmpGetExtendedPhysicalAddressingStatus(VOID); @@ -42,4 +46,36 @@ XTAPI VOID MmpInitializeArchitecture(VOID); +XTAPI +BOOLEAN +MmpPml2PteValid(PHARDWARE_PTE PtePointer); + +XTAPI +VOID +MmpSetPml2Pte(PHARDWARE_PTE PtePointer, + PFN_NUMBER PageFrameNumber, + BOOLEAN Writable); + +XTAPI +VOID +MmpSetPml2PteCaching(PHARDWARE_PTE PtePointer, + BOOLEAN CacheDisable, + BOOLEAN WriteThrough); + +XTAPI +BOOLEAN +MmpPml3PteValid(PHARDWARE_PTE PtePointer); + +XTAPI +VOID +MmpSetPml3Pte(PHARDWARE_PTE PtePointer, + PFN_NUMBER PageFrameNumber, + BOOLEAN Writable); + +XTAPI +VOID +MmpSetPml3PteCaching(PHARDWARE_PTE PtePointer, + BOOLEAN CacheDisable, + BOOLEAN WriteThrough); + #endif /* __XTOSKRNL_I686_MMI_H */ diff --git a/xtoskrnl/mm/i686/pmap.c b/xtoskrnl/mm/i686/pmap.c index fbe5229..1c1034b 100644 --- a/xtoskrnl/mm/i686/pmap.c +++ b/xtoskrnl/mm/i686/pmap.c @@ -9,6 +9,23 @@ #include +/** + * Clears the contents of a page table entry (PTE). + * + * @param PtePointer + * Pointer to the page table entry (PTE) to be cleared. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MmpClearPte(PHARDWARE_PTE PtePointer) +{ + PtePointer->Long = 0; +} + /** * Checks if eXtended Physical Addressing (XPA) is enabled. * @@ -83,3 +100,143 @@ MmpGetPteAddress(PVOID Address) Offset = ((((ULONG)(Address)) >> MM_PTI_SHIFT) << MmpPageMapInfo.PteShift); return (PMMPTE)(MM_PTE_BASE + Offset); } + +/** + * Checks whether the given PML2 page table entry (PTE) is valid. + * + * @param PtePointer + * Pointer to the page table entry (PTE) to check. + * + * @return Returns TRUE if the entry is valid, FALSE otherwise. + * + * @since XT 1.0 + */ +XTAPI +BOOLEAN +MmpPml2PteValid(PHARDWARE_PTE PtePointer) +{ + return (BOOLEAN)PtePointer->Pml2.Valid; +} + +/** + * Sets a PML2 page table entry (PTE) with the specified physical page and access flags. + * + * @param PtePointer + * Pointer to the page table entry (PTE) to set. + * + * @param PageFrameNumber + * Physical frame number to map. + * + * @param Writable + * Indicates whether the page should be writable. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MmpSetPml2Pte(PHARDWARE_PTE PtePointer, + PFN_NUMBER PageFrameNumber, + BOOLEAN Writable) +{ + PtePointer->Pml2.PageFrameNumber = PageFrameNumber; + PtePointer->Pml2.Valid = 1; + PtePointer->Pml2.Writable = Writable; +} + +/** + * Sets caching attributes for a PML2 page table entry (PTE). + * + * @param PtePointer + * Pointer to the page table entry (PTE) to modify. + * + * @param CacheDisable + * Indicates whether caching should be disabled for this page. + * + * @param WriteThrough + * Indicates whether write-through caching should be enabled. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MmpSetPml2PteCaching(PHARDWARE_PTE PtePointer, + BOOLEAN CacheDisable, + BOOLEAN WriteThrough) +{ + PtePointer->Pml2.CacheDisable = CacheDisable; + PtePointer->Pml2.WriteThrough = WriteThrough; +} + +/** + * Checks whether the given PML3 page table entry (PTE) is valid. + * + * @param PtePointer + * Pointer to the page table entry (PTE) to check. + * + * @return Returns TRUE if the entry is valid, FALSE otherwise. + * + * @since XT 1.0 + */ +XTAPI +BOOLEAN +MmpPml3PteValid(PHARDWARE_PTE PtePointer) +{ + return PtePointer->Pml3.Valid; +} + +/** + * Sets a PML3 page table entry (PTE) with the specified physical page and access flags. + * + * @param PtePointer + * Pointer to the page table entry (PTE) to set. + * + * @param PageFrameNumber + * Physical frame number to map. + * + * @param Writable + * Indicates whether the page should be writable. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MmpSetPml3Pte(PHARDWARE_PTE PtePointer, + PFN_NUMBER PageFrameNumber, + BOOLEAN Writable) +{ + PtePointer->Pml3.PageFrameNumber = PageFrameNumber; + PtePointer->Pml3.Valid = 1; + PtePointer->Pml3.Writable = Writable; +} + +/** + * Sets caching attributes for a PML3 page table entry (PTE). + * + * @param PtePointer + * Pointer to the page table entry (PTE) to modify. + * + * @param CacheDisable + * Indicates whether caching should be disabled for this page. + * + * @param WriteThrough + * Indicates whether write-through caching should be enabled. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MmpSetPml3PteCaching(PHARDWARE_PTE PtePointer, + BOOLEAN CacheDisable, + BOOLEAN WriteThrough) +{ + PtePointer->Pml3.CacheDisable = CacheDisable; + PtePointer->Pml3.WriteThrough = WriteThrough; +} -- 2.50.1 From 1e11acee721c886c5144c030d9dd794b018dbced Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sun, 17 Aug 2025 00:47:56 +0200 Subject: [PATCH 16/34] Refactor hardware memory mapping to use page map routine callbacks --- xtoskrnl/mm/hlpool.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/xtoskrnl/mm/hlpool.c b/xtoskrnl/mm/hlpool.c index 67263e4..5b96fa9 100644 --- a/xtoskrnl/mm/hlpool.c +++ b/xtoskrnl/mm/hlpool.c @@ -190,7 +190,7 @@ MmMapHardwareMemory(IN PHYSICAL_ADDRESS PhysicalAddress, ReturnAddress = (PVOID)(ULONG_PTR)ReturnAddress + MM_PAGE_SIZE; /* Check if PTE is valid */ - if(PtePointer->Valid) + if(MmpPageMapRoutines->PteValid(PtePointer)) { /* PTE is not available, go to the next one */ BaseAddress = ReturnAddress; @@ -219,9 +219,7 @@ MmMapHardwareMemory(IN PHYSICAL_ADDRESS PhysicalAddress, PtePointer = (PHARDWARE_PTE)MmpGetPteAddress(BaseAddress); /* Fill the PTE */ - PtePointer->PageFrameNumber = (PFN_NUMBER)(PhysicalAddress.QuadPart >> MM_PAGE_SHIFT); - PtePointer->Valid = 1; - PtePointer->Writable = 1; + MmpPageMapRoutines->SetPte(PtePointer, (PFN_NUMBER)(PhysicalAddress.QuadPart >> MM_PAGE_SHIFT), TRUE); /* Advance to the next address */ PhysicalAddress.QuadPart += MM_PAGE_SIZE; @@ -268,8 +266,7 @@ MmMarkHardwareMemoryWriteThrough(IN PVOID VirtualAddress, for(Page = 0; Page < PageCount; Page++) { /* Mark pages as CD/WT */ - PtePointer->CacheDisable = 1; - PtePointer->WriteThrough = 1; + MmpPageMapRoutines->SetPteCaching(PtePointer, TRUE, TRUE); PtePointer++; } } @@ -302,9 +299,7 @@ MmRemapHardwareMemory(IN PVOID VirtualAddress, PtePointer = (PHARDWARE_PTE)MmpGetPteAddress(VirtualAddress); /* Remap the PTE */ - PtePointer->PageFrameNumber = (PFN_NUMBER)(PhysicalAddress.QuadPart >> MM_PAGE_SHIFT); - PtePointer->Valid = 1; - PtePointer->Writable = 1; + MmpPageMapRoutines->SetPte(PtePointer, (PFN_NUMBER)(PhysicalAddress.QuadPart >> MM_PAGE_SHIFT), TRUE); /* Check if TLB needs to be flushed */ if(FlushTlb) @@ -356,11 +351,7 @@ MmUnmapHardwareMemory(IN PVOID VirtualAddress, for(Page = 0; Page < PageCount; Page++) { /* Unmap the PTE and get the next one */ - PtePointer->CacheDisable = 0; - PtePointer->Valid = 0; - PtePointer->Writable = 0; - PtePointer->WriteThrough = 0; - PtePointer->PageFrameNumber = 0; + MmpPageMapRoutines->ClearPte(PtePointer); PtePointer++; } -- 2.50.1 From c3ece4f317e0a945f82189884fda0e7a6268a015 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sun, 17 Aug 2025 00:51:26 +0200 Subject: [PATCH 17/34] Fix type usage in XtpMapHardwareMemoryPool --- xtldr/modules/xtos_o/i686/memory.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/xtldr/modules/xtos_o/i686/memory.c b/xtldr/modules/xtos_o/i686/memory.c index f4fa319..be8b809 100644 --- a/xtldr/modules/xtos_o/i686/memory.c +++ b/xtldr/modules/xtos_o/i686/memory.c @@ -60,7 +60,7 @@ XtpMapHardwareMemoryPool(IN PXTBL_PAGE_MAPPING PageMap) { EFI_PHYSICAL_ADDRESS Address; PHARDWARE_LEGACY_PTE LegacyPdeBase; - PHARDWARE_PTE PdeBase; + PHARDWARE_MODERN_PTE PdeBase; EFI_STATUS Status; /* Allocate memory */ @@ -78,10 +78,10 @@ XtpMapHardwareMemoryPool(IN PXTBL_PAGE_MAPPING PageMap) if(PageMap->PageMapLevel == 3) { /* Get PDE base address (PAE enabled) */ - PdeBase = (PHARDWARE_PTE)(((PHARDWARE_PTE)PageMap->PtePointer)[MM_HARDWARE_VA_START >> MM_PPI_SHIFT].PageFrameNumber << MM_PAGE_SHIFT); + PdeBase = (PHARDWARE_MODERN_PTE)(((PHARDWARE_MODERN_PTE)PageMap->PtePointer)[MM_HARDWARE_VA_START >> MM_PPI_SHIFT].PageFrameNumber << MM_PAGE_SHIFT); /* Make PDE valid */ - RtlZeroMemory(&PdeBase[(MM_HARDWARE_VA_START >> MM_PDI_SHIFT) & 0x1FF], sizeof(HARDWARE_PTE)); + RtlZeroMemory(&PdeBase[(MM_HARDWARE_VA_START >> MM_PDI_SHIFT) & 0x1FF], sizeof(HARDWARE_MODERN_PTE)); PdeBase[(MM_HARDWARE_VA_START >> MM_PDI_SHIFT) & 0x1FF].PageFrameNumber = Address >> MM_PAGE_SHIFT; PdeBase[(MM_HARDWARE_VA_START >> MM_PDI_SHIFT) & 0x1FF].Valid = 1; PdeBase[(MM_HARDWARE_VA_START >> MM_PDI_SHIFT) & 0x1FF].Writable = 1; -- 2.50.1 From f30d3df5b3eafbf78ffc3a6c3f230955334b867d Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sun, 17 Aug 2025 21:48:28 +0200 Subject: [PATCH 18/34] Implement PTE manipulation functions for AMD64 architecture --- xtoskrnl/includes/amd64/mmi.h | 20 +++++++++++ xtoskrnl/mm/amd64/globals.c | 16 ++++----- xtoskrnl/mm/amd64/pmap.c | 65 +++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+), 8 deletions(-) diff --git a/xtoskrnl/includes/amd64/mmi.h b/xtoskrnl/includes/amd64/mmi.h index cccbfc3..dbbcba0 100644 --- a/xtoskrnl/includes/amd64/mmi.h +++ b/xtoskrnl/includes/amd64/mmi.h @@ -22,6 +22,10 @@ VOID MmZeroPages(IN PVOID Address, IN ULONG Size); +XTAPI +VOID +MmpClearPte(PHARDWARE_PTE PtePointer); + XTAPI BOOLEAN MmpGetExtendedPhysicalAddressingStatus(VOID); @@ -50,4 +54,20 @@ XTAPI VOID MmpInitializeArchitecture(VOID); +XTAPI +BOOLEAN +MmpPteValid(PHARDWARE_PTE PtePointer); + +XTAPI +VOID +MmpSetPte(PHARDWARE_PTE PtePointer, + PFN_NUMBER PageFrameNumber, + BOOLEAN Writable); + +XTAPI +VOID +MmpSetPteCaching(PHARDWARE_PTE PtePointer, + BOOLEAN CacheDisable, + BOOLEAN WriteThrough); + #endif /* __XTOSKRNL_AMD64_MMI_H */ diff --git a/xtoskrnl/mm/amd64/globals.c b/xtoskrnl/mm/amd64/globals.c index 52994aa..f5959a1 100644 --- a/xtoskrnl/mm/amd64/globals.c +++ b/xtoskrnl/mm/amd64/globals.c @@ -11,16 +11,16 @@ /* Page mapping routines for systems using 4-level paging (PML4) */ CMMPAGEMAP_ROUTINES MmpPml4Routines = { - // .ClearPte = MmpClearPte, - // .PteValid = MmpPml2PteValid, - // .SetPteCaching = MmpSetPml2PteCaching, - // .SetPte = MmpSetPml2Pte, + .ClearPte = MmpClearPte, + .PteValid = MmpPteValid, + .SetPteCaching = MmpSetPteCaching, + .SetPte = MmpSetPte, }; /* Page mapping routines for systems using 5-level paging (PML5) */ CMMPAGEMAP_ROUTINES MmpPml5Routines = { - // .ClearPte = MmpClearPte, - // .PteValid = MmpPml3PteValid, - // .SetPteCaching = MmpSetPml3PteCaching, - // .SetPte = MmpSetPml3Pte, + .ClearPte = MmpClearPte, + .PteValid = MmpPteValid, + .SetPteCaching = MmpSetPteCaching, + .SetPte = MmpSetPte, }; diff --git a/xtoskrnl/mm/amd64/pmap.c b/xtoskrnl/mm/amd64/pmap.c index dc59230..20b9ec1 100644 --- a/xtoskrnl/mm/amd64/pmap.c +++ b/xtoskrnl/mm/amd64/pmap.c @@ -9,6 +9,27 @@ #include +/** + * Clears the contents of a page table entry (PTE). + * + * @param PtePointer + * Pointer to the page table entry (PTE) to be cleared. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MmpClearPte(PHARDWARE_PTE PtePointer) +{ + PtePointer->CacheDisable = 0; + PtePointer->PageFrameNumber = 0; + PtePointer->Valid = 0; + PtePointer->Writable = 0; + PtePointer->WriteThrough = 0; +} + /** * Checks if eXtended Physical Addressing (XPA) is enabled. * @@ -123,3 +144,47 @@ MmpGetPxeAddress(PVOID Address) Offset = (((ULONGLONG)Address >> MM_PXI_SHIFT) << MM_PTE_SHIFT); return (PMMPXE)(MmpPageMapInfo.PxeBase + Offset); } + +/** + * Checks whether the given page table entry (PTE) is valid. + * + * @param PtePointer + * Pointer to the page table entry (PTE) to check. + * + * @return Returns TRUE if the entry is valid, FALSE otherwise. + * + * @since XT 1.0 + */ +XTAPI +BOOLEAN +MmpPteValid(PHARDWARE_PTE PtePointer) +{ + return (BOOLEAN)PtePointer->Valid; +} + +/** + * Sets a page table entry (PTE) with the specified physical page and access flags. + * + * @param PtePointer + * Pointer to the page table entry (PTE) to set. + * + * @param PageFrameNumber + * Physical frame number to map. + * + * @param Writable + * Indicates whether the page should be writable. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MmpSetPte(PHARDWARE_PTE PtePointer, + PFN_NUMBER PageFrameNumber, + BOOLEAN Writable) +{ + PtePointer->PageFrameNumber = PageFrameNumber; + PtePointer->Valid = 1; + PtePointer->Writable = Writable; +} -- 2.50.1 From a9dd1eaacd687b64e581bec801a4a6c2f4aa008d Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sun, 17 Aug 2025 21:51:43 +0200 Subject: [PATCH 19/34] Implement MmpSetPteCaching function for AMD64 architecture --- xtoskrnl/mm/amd64/pmap.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/xtoskrnl/mm/amd64/pmap.c b/xtoskrnl/mm/amd64/pmap.c index 20b9ec1..a776720 100644 --- a/xtoskrnl/mm/amd64/pmap.c +++ b/xtoskrnl/mm/amd64/pmap.c @@ -188,3 +188,29 @@ MmpSetPte(PHARDWARE_PTE PtePointer, PtePointer->Valid = 1; PtePointer->Writable = Writable; } + +/** + * Sets caching attributes for a page table entry (PTE). + * + * @param PtePointer + * Pointer to the page table entry (PTE) to modify. + * + * @param CacheDisable + * Indicates whether caching should be disabled for this page. + * + * @param WriteThrough + * Indicates whether write-through caching should be enabled. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +XTAPI +VOID +MmpSetPteCaching(PHARDWARE_PTE PtePointer, + BOOLEAN CacheDisable, + BOOLEAN WriteThrough) +{ + PtePointer->CacheDisable = CacheDisable; + PtePointer->WriteThrough = WriteThrough; +} -- 2.50.1 From 017b8603d507cd2e554e342a442f16b28816a9af Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Sun, 17 Aug 2025 21:55:21 +0200 Subject: [PATCH 20/34] Align parameters in PTE manipulation functions --- xtoskrnl/mm/i686/pmap.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/xtoskrnl/mm/i686/pmap.c b/xtoskrnl/mm/i686/pmap.c index 1c1034b..4decaba 100644 --- a/xtoskrnl/mm/i686/pmap.c +++ b/xtoskrnl/mm/i686/pmap.c @@ -137,8 +137,8 @@ MmpPml2PteValid(PHARDWARE_PTE PtePointer) XTAPI VOID MmpSetPml2Pte(PHARDWARE_PTE PtePointer, - PFN_NUMBER PageFrameNumber, - BOOLEAN Writable) + PFN_NUMBER PageFrameNumber, + BOOLEAN Writable) { PtePointer->Pml2.PageFrameNumber = PageFrameNumber; PtePointer->Pml2.Valid = 1; @@ -164,8 +164,8 @@ MmpSetPml2Pte(PHARDWARE_PTE PtePointer, XTAPI VOID MmpSetPml2PteCaching(PHARDWARE_PTE PtePointer, - BOOLEAN CacheDisable, - BOOLEAN WriteThrough) + BOOLEAN CacheDisable, + BOOLEAN WriteThrough) { PtePointer->Pml2.CacheDisable = CacheDisable; PtePointer->Pml2.WriteThrough = WriteThrough; @@ -207,8 +207,8 @@ MmpPml3PteValid(PHARDWARE_PTE PtePointer) XTAPI VOID MmpSetPml3Pte(PHARDWARE_PTE PtePointer, - PFN_NUMBER PageFrameNumber, - BOOLEAN Writable) + PFN_NUMBER PageFrameNumber, + BOOLEAN Writable) { PtePointer->Pml3.PageFrameNumber = PageFrameNumber; PtePointer->Pml3.Valid = 1; @@ -234,8 +234,8 @@ MmpSetPml3Pte(PHARDWARE_PTE PtePointer, XTAPI VOID MmpSetPml3PteCaching(PHARDWARE_PTE PtePointer, - BOOLEAN CacheDisable, - BOOLEAN WriteThrough) + BOOLEAN CacheDisable, + BOOLEAN WriteThrough) { PtePointer->Pml3.CacheDisable = CacheDisable; PtePointer->Pml3.WriteThrough = WriteThrough; -- 2.50.1 From d602038858ccda0c8751d683695f4061688da7a0 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Mon, 18 Aug 2025 00:10:32 +0200 Subject: [PATCH 21/34] Temporarily disable LA57 paging --- xtldr/modules/xtos_o/amd64/memory.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/xtldr/modules/xtos_o/amd64/memory.c b/xtldr/modules/xtos_o/amd64/memory.c index 91349c8..705ea16 100644 --- a/xtldr/modules/xtos_o/amd64/memory.c +++ b/xtldr/modules/xtos_o/amd64/memory.c @@ -43,13 +43,14 @@ XtpDeterminePagingLevel(IN CONST PWCHAR Parameters) /* Query CPUID */ ArCpuId(&CpuRegisters); - /* Check if eXtended Physical Addressing (XPA) is enabled and if LA57 is supported by the CPU */ - if((CpuRegisters.Ecx & CPUID_FEATURES_ECX_LA57) && - !(XtLdrProtocol->BootUtil.GetBooleanParameter(Parameters, L"NOXPA"))) - { - /* Enable LA57 (PML5) */ - return 5; - } + // TODO: Uncomment the following code when LA57 support is implemented in the bootloader + // /* Check if eXtended Physical Addressing (XPA) is enabled and if LA57 is supported by the CPU */ + // if((CpuRegisters.Ecx & CPUID_FEATURES_ECX_LA57) && + // !(XtLdrProtocol->BootUtil.GetBooleanParameter(Parameters, L"NOXPA"))) + // { + // /* Enable LA57 (PML5) */ + // return 4; + // } } /* Disable LA57 and use PML4 by default */ -- 2.50.1 From c409400cbf7e1664731c1f93fce1bfb05c7dc906 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Mon, 18 Aug 2025 01:07:28 +0200 Subject: [PATCH 22/34] Correct VA masking in AMD64 page mapping functions --- sdk/xtdk/amd64/mmtypes.h | 1 + xtoskrnl/mm/amd64/init.c | 6 ++++++ xtoskrnl/mm/amd64/pmap.c | 10 +++++----- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/sdk/xtdk/amd64/mmtypes.h b/sdk/xtdk/amd64/mmtypes.h index f472dff..8fccfaf 100644 --- a/sdk/xtdk/amd64/mmtypes.h +++ b/sdk/xtdk/amd64/mmtypes.h @@ -101,6 +101,7 @@ typedef struct _MMPAGEMAP_INFO ULONGLONG PpeBase; ULONGLONG PxeBase; ULONGLONG P5eBase; + ULONG VaBits; } MMPAGEMAP_INFO, *PMMPAGEMAP_INFO; /* A Page Table Entry on AMD64 system */ diff --git a/xtoskrnl/mm/amd64/init.c b/xtoskrnl/mm/amd64/init.c index 0b12f80..31483d4 100644 --- a/xtoskrnl/mm/amd64/init.c +++ b/xtoskrnl/mm/amd64/init.c @@ -36,6 +36,9 @@ MmInitializePageMapSupport(VOID) MmpPageMapInfo.PpeBase = MM_PPE_LA57_BASE; MmpPageMapInfo.PxeBase = MM_PXE_LA57_BASE; MmpPageMapInfo.P5eBase = MM_P5E_LA57_BASE; + + /* PML5 use 57-bit virtual addresses */ + MmpPageMapInfo.VaBits = 57; } else { @@ -51,6 +54,9 @@ MmInitializePageMapSupport(VOID) MmpPageMapInfo.PpeBase = MM_PPE_BASE; MmpPageMapInfo.PxeBase = MM_PXE_BASE; MmpPageMapInfo.P5eBase = 0x0; + + /* PML use 48-bit virtual addresses */ + MmpPageMapInfo.VaBits = 48; } } diff --git a/xtoskrnl/mm/amd64/pmap.c b/xtoskrnl/mm/amd64/pmap.c index a776720..bc29a75 100644 --- a/xtoskrnl/mm/amd64/pmap.c +++ b/xtoskrnl/mm/amd64/pmap.c @@ -61,7 +61,7 @@ MmpGetP5eAddress(PVOID Address) { ULONGLONG Offset; - Offset = (((ULONGLONG)Address >> MM_P5I_SHIFT) << MM_PTE_SHIFT); + Offset = ((((ULONGLONG)Address & (((ULONGLONG)1 << MmpPageMapInfo.VaBits) - 1)) >> MM_P5I_SHIFT) << MM_PTE_SHIFT); return (PMMP5E)((MmpPageMapInfo.P5eBase + Offset) * MmpPageMapInfo.Xpa); } @@ -81,7 +81,7 @@ MmpGetPdeAddress(PVOID Address) { ULONGLONG Offset; - Offset = (((ULONGLONG)Address >> MM_PDI_SHIFT) << MM_PTE_SHIFT); + Offset = ((((ULONGLONG)Address & (((ULONGLONG)1 << MmpPageMapInfo.VaBits) - 1)) >> MM_PDI_SHIFT) << MM_PTE_SHIFT); return (PMMPDE)(MmpPageMapInfo.PdeBase + Offset); } @@ -101,7 +101,7 @@ MmpGetPpeAddress(PVOID Address) { ULONGLONG Offset; - Offset = (((ULONGLONG)Address >> MM_PPI_SHIFT) << MM_PTE_SHIFT); + Offset = ((((ULONGLONG)Address & (((ULONGLONG)1 << MmpPageMapInfo.VaBits) - 1)) >> MM_PPI_SHIFT) << MM_PTE_SHIFT); return (PMMPPE)(MmpPageMapInfo.PpeBase + Offset); } @@ -121,7 +121,7 @@ MmpGetPteAddress(PVOID Address) { ULONGLONG Offset; - Offset = (((ULONGLONG)Address >> MM_PTI_SHIFT) << MM_PTE_SHIFT); + Offset = ((((ULONGLONG)Address & (((ULONGLONG)1 << MmpPageMapInfo.VaBits) - 1)) >> MM_PTI_SHIFT) << MM_PTE_SHIFT); return (PMMPTE)(MmpPageMapInfo.PteBase + Offset); } @@ -141,7 +141,7 @@ MmpGetPxeAddress(PVOID Address) { ULONGLONG Offset; - Offset = (((ULONGLONG)Address >> MM_PXI_SHIFT) << MM_PTE_SHIFT); + Offset = ((((ULONGLONG)Address & (((ULONGLONG)1 << MmpPageMapInfo.VaBits) - 1)) >> MM_PXI_SHIFT) << MM_PTE_SHIFT); return (PMMPXE)(MmpPageMapInfo.PxeBase + Offset); } -- 2.50.1 From b639bf30779b8f9cf9b51b297752e5474f91fdf8 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Mon, 18 Aug 2025 11:59:05 +0200 Subject: [PATCH 23/34] Implement PML5 self-mapping --- xtldr/arch/amd64/memory.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/xtldr/arch/amd64/memory.c b/xtldr/arch/amd64/memory.c index 9133174..9b4f58b 100644 --- a/xtldr/arch/amd64/memory.c +++ b/xtldr/arch/amd64/memory.c @@ -338,22 +338,21 @@ BlpSelfMapPml(IN PXTBL_PAGE_MAPPING PageMap, /* Check page map level */ if(PageMap->PageMapLevel == 5) { - /* Self-mapping for PML5 is not supported */ - BlDebugPrint(L"PML5 self-mapping not supported yet!\n"); - return STATUS_EFI_UNSUPPORTED; + /* Calculate PML index based on provided self map address for PML5 */ + PmlIndex = (SelfMapAddress >> MM_P5I_SHIFT) & 0x1FF; } else { - /* Calculate PML index based on provided self map address */ + /* Calculate PML index based on provided self map address for PML4 */ PmlIndex = (SelfMapAddress >> MM_PXI_SHIFT) & 0x1FF; - - /* Add self-mapping for PML4 */ - RtlZeroMemory(&PmlBase[PmlIndex], sizeof(HARDWARE_PTE)); - PmlBase[PmlIndex].PageFrameNumber = (UINT_PTR)PageMap->PtePointer / EFI_PAGE_SIZE; - PmlBase[PmlIndex].Valid = 1; - PmlBase[PmlIndex].Writable = 1; } + /* Add self-mapping */ + RtlZeroMemory(&PmlBase[PmlIndex], sizeof(HARDWARE_PTE)); + PmlBase[PmlIndex].PageFrameNumber = (UINT_PTR)PageMap->PtePointer / EFI_PAGE_SIZE; + PmlBase[PmlIndex].Valid = 1; + PmlBase[PmlIndex].Writable = 1; + /* Return success */ return STATUS_EFI_SUCCESS; } -- 2.50.1 From 91a5db2ee403f6ef1b1a1bd4e35b6c549ef37590 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Mon, 18 Aug 2025 12:13:48 +0200 Subject: [PATCH 24/34] Implement PML5 support in XtpMapHardwareMemoryPool --- xtldr/modules/xtos_o/amd64/memory.c | 45 ++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 8 deletions(-) diff --git a/xtldr/modules/xtos_o/amd64/memory.c b/xtldr/modules/xtos_o/amd64/memory.c index 705ea16..ca24832 100644 --- a/xtldr/modules/xtos_o/amd64/memory.c +++ b/xtldr/modules/xtos_o/amd64/memory.c @@ -71,19 +71,48 @@ XTCDECL EFI_STATUS XtpMapHardwareMemoryPool(IN PXTBL_PAGE_MAPPING PageMap) { - PHARDWARE_PTE PdeBase, PpeBase, PxeBase; + PHARDWARE_PTE P5eBase, PdeBase, PpeBase, PxeBase; EFI_PHYSICAL_ADDRESS Address; EFI_STATUS Status; - /* Check page map level */ - if(PageMap->PageMapLevel > 4) + if(PageMap->PageMapLevel == 5) { - /* PML5 (LA57) is not supported yet */ - return STATUS_EFI_UNSUPPORTED; - } + /* Get P5E (PML5) base address */ + P5eBase = (PHARDWARE_PTE)PageMap->PtePointer; - /* Get PXE (PML4) base address */ - PxeBase = ((PHARDWARE_PTE)(PageMap->PtePointer)); + /* Check if P5E entry already exists */ + if(!P5eBase[(MM_HARDWARE_VA_START >> MM_P5I_SHIFT) & 0x1FF].Valid) + { + /* No valid P5E, allocate memory */ + Status = XtLdrProtocol->Memory.AllocatePages(1, &Address); + if(Status != STATUS_EFI_SUCCESS) + { + /* Memory allocation failure, return error */ + return Status; + } + + /* Zero fill memory used by P5E */ + RtlZeroMemory((PVOID)Address, EFI_PAGE_SIZE); + + /* Make P5E valid */ + P5eBase[(MM_HARDWARE_VA_START >> MM_P5I_SHIFT) & 0x1FF].Valid = 1; + P5eBase[(MM_HARDWARE_VA_START >> MM_P5I_SHIFT) & 0x1FF].PageFrameNumber = Address / EFI_PAGE_SIZE; + P5eBase[(MM_HARDWARE_VA_START >> MM_P5I_SHIFT) & 0x1FF].Writable = 1; + + /* Set PXE base address */ + PxeBase = (PHARDWARE_PTE)(UINT_PTR)Address; + } + else + { + /* Set PXE base address based on existing P5E */ + PxeBase = (PHARDWARE_PTE)((P5eBase[(MM_HARDWARE_VA_START >> MM_P5I_SHIFT) & 0x1FF].PageFrameNumber) << EFI_PAGE_SHIFT); + } + } + else + { + /* Get PXE (PML4) base address */ + PxeBase = (PHARDWARE_PTE)PageMap->PtePointer; + } /* Check if PXE entry already exists */ if(!PxeBase[(MM_HARDWARE_VA_START >> MM_PXI_SHIFT) & 0x1FF].Valid) -- 2.50.1 From 1a0bc7f65f1bb4432aa0647015ed425ce284da61 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Tue, 19 Aug 2025 21:45:13 +0200 Subject: [PATCH 25/34] Update and correct CR4 bit definitions --- sdk/xtdk/amd64/artypes.h | 15 +++++++++++---- sdk/xtdk/i686/artypes.h | 12 +++++++----- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/sdk/xtdk/amd64/artypes.h b/sdk/xtdk/amd64/artypes.h index a6196b1..2bc4493 100644 --- a/sdk/xtdk/amd64/artypes.h +++ b/sdk/xtdk/amd64/artypes.h @@ -39,13 +39,20 @@ #define CR4_PCE 0x00000100 #define CR4_FXSR 0x00000200 #define CR4_XMMEXCPT 0x00000400 +#define CR4_UMIP 0x00000800 #define CR4_LA57 0x00001000 -#define CR4_RESERVED1 0x00001800 #define CR4_VMXE 0x00002000 #define CR4_SMXE 0x00004000 -#define CR4_RESERVED2 0x00018000 -#define CR4_XSAVE 0x00020000 -#define CR4_RESERVED3 0xFFFC0000 +#define CR4_FSGSBASE 0x00010000 +#define CR4_PCIDE 0x00020000 +#define CR4_XSAVE 0x00040000 +#define CR4_KL 0x00080000 +#define CR4_SMEP 0x00100000 +#define CR4_SMAP 0x00200000 +#define CR4_PKE 0x00400000 +#define CR4_CET 0x00800000 +#define CR4_PKS 0x01000000 +#define CR4_UINTR 0x02000000 /* Descriptors size */ #define GDT_ENTRIES 128 diff --git a/sdk/xtdk/i686/artypes.h b/sdk/xtdk/i686/artypes.h index 1f818dd..b989726 100644 --- a/sdk/xtdk/i686/artypes.h +++ b/sdk/xtdk/i686/artypes.h @@ -39,13 +39,15 @@ #define CR4_PCE 0x00000100 #define CR4_FXSR 0x00000200 #define CR4_XMMEXCPT 0x00000400 -#define CR4_LA57 0x00001000 -#define CR4_RESERVED1 0x00001800 +#define CR4_UMIP 0x00000800 #define CR4_VMXE 0x00002000 #define CR4_SMXE 0x00004000 -#define CR4_RESERVED2 0x00018000 -#define CR4_XSAVE 0x00020000 -#define CR4_RESERVED3 0xFFFC0000 +#define CR4_FSGSBASE 0x00010000 +#define CR4_PCIDE 0x00020000 +#define CR4_XSAVE 0x00040000 +#define CR4_SMEP 0x00100000 +#define CR4_SMAP 0x00200000 +#define CR4_PKE 0x00400000 /* Descriptors size */ #define GDT_ENTRIES 128 -- 2.50.1 From ebae8c655c7c4180f7ad4b86cfd5ce68b0799d9d Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Tue, 19 Aug 2025 23:59:58 +0200 Subject: [PATCH 26/34] Expand CR4, MSR, and EFER register definitions --- sdk/xtdk/amd64/artypes.h | 7 +++++++ sdk/xtdk/i686/artypes.h | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/sdk/xtdk/amd64/artypes.h b/sdk/xtdk/amd64/artypes.h index 2bc4493..b9fdedd 100644 --- a/sdk/xtdk/amd64/artypes.h +++ b/sdk/xtdk/amd64/artypes.h @@ -53,6 +53,8 @@ #define CR4_CET 0x00800000 #define CR4_PKS 0x01000000 #define CR4_UINTR 0x02000000 +#define CR4_LASS 0x08000000 +#define CR4_LAM_SUP 0x10000000 /* Descriptors size */ #define GDT_ENTRIES 128 @@ -91,6 +93,7 @@ #define X86_MSR_FSBASE 0xC0000100 #define X86_MSR_GSBASE 0xC0000101 #define X86_MSR_KERNEL_GSBASE 0xC0000102 +#define X86_MSR_TSC_AUX 0xC0000103 /* Processor features in the EFER MSR */ #define X86_MSR_EFER_SCE (1 << 0) @@ -98,6 +101,10 @@ #define X86_MSR_EFER_LMA (1 << 10) #define X86_MSR_EFER_NXE (1 << 11) #define X86_MSR_EFER_SVME (1 << 12) +#define X86_EFER_LMSLE (1 << 13) +#define X86_EFER_FFXSR (1 << 14) +#define X86_EFER_TCE (1 << 15) +#define X86_EFER_AUTOIBRS (1 << 21) /* X86 EFLAG bit masks definitions */ #define X86_EFLAGS_NF_MASK 0x00000000 /* None */ diff --git a/sdk/xtdk/i686/artypes.h b/sdk/xtdk/i686/artypes.h index b989726..0c4da8b 100644 --- a/sdk/xtdk/i686/artypes.h +++ b/sdk/xtdk/i686/artypes.h @@ -40,14 +40,21 @@ #define CR4_FXSR 0x00000200 #define CR4_XMMEXCPT 0x00000400 #define CR4_UMIP 0x00000800 +#define CR4_LA57 0x00001000 #define CR4_VMXE 0x00002000 #define CR4_SMXE 0x00004000 #define CR4_FSGSBASE 0x00010000 #define CR4_PCIDE 0x00020000 #define CR4_XSAVE 0x00040000 +#define CR4_KL 0x00080000 #define CR4_SMEP 0x00100000 #define CR4_SMAP 0x00200000 #define CR4_PKE 0x00400000 +#define CR4_CET 0x00800000 +#define CR4_PKS 0x01000000 +#define CR4_UINTR 0x02000000 +#define CR4_LASS 0x08000000 +#define CR4_LAM_SUP 0x10000000 /* Descriptors size */ #define GDT_ENTRIES 128 -- 2.50.1 From 2468d80078a1b2e62c716f3875902fc378316049 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Wed, 20 Aug 2025 00:20:10 +0200 Subject: [PATCH 27/34] Add trampoline to enable 5-level paging --- xtoskrnl/ar/amd64/archsup.S | 123 ++++++++++++++++++++++++++++++- xtoskrnl/includes/amd64/asmsup.h | 15 ++++ 2 files changed, 137 insertions(+), 1 deletion(-) diff --git a/xtoskrnl/ar/amd64/archsup.S b/xtoskrnl/ar/amd64/archsup.S index 1c18ab2..6cee551 100644 --- a/xtoskrnl/ar/amd64/archsup.S +++ b/xtoskrnl/ar/amd64/archsup.S @@ -4,6 +4,7 @@ * FILE: xtoskrnl/ar/amd64/archsup.S * DESCRIPTION: Provides AMD64 architecture features not implementable in C * DEVELOPERS: Rafal Kupiec + * Aiken Harris */ #include @@ -13,7 +14,127 @@ /** - * This macro creates a trap handler for the specified vector. + * Enables eXtended Physical Addressing (XPA). + * + * @param PageMap + * Supplies a pointer to the page map to be used. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +.global ArEnableExtendedPhysicalAddressing +ArEnableExtendedPhysicalAddressing: + /* Save the original CR4 register */ + movq %cr4, %rax + + /* Save the state of stack pointer and non-volatile registers */ + movq %rsp, XpaRegisterSaveArea(%rip) + movq %rbp, XpaRegisterSaveArea+0x08(%rip) + movq %rax, XpaRegisterSaveArea+0x10(%rip) + movq %rbx, XpaRegisterSaveArea+0x18(%rip) + + /* Save the original CR0 register */ + movq %cr0, %rbp + + /* Load temporary GDT required for mode transitions */ + leaq XpaTemporaryGdtDesc(%rip), %rax + movq %rax, XpaTemporaryGdtBase(%rip) + lgdtq XpaTemporaryGdtSize(%rip) + + /* Load addresses for entering compatibility mode and re-entering long mode */ + leaq XpaEnterCompatMode(%rip), %rax + leaq XpaEnterLongMode(%rip), %rbx + + /* Push the 32-bit code segment selector and the target address for a far jump */ + pushq $GDT_R0_CMCODE + pushq %rax + + /* Perform a far return to switch to 32-bit compatibility mode */ + lretq + +XpaEnterCompatMode: + /* Enter 32-bit compatibility mode */ + .code32 + + /* Store the PageMap pointer on the stack for future use */ + pushl %ecx + + /* Set the stack segment to the 32-bit data segment selector */ + movl $GDT_R0_DATA, %eax + movl %eax, %ss + + /* Disable PGE and PCIDE to ensure all TLB entries will be flushed */ + movl %cr4, %eax + andl $~(CR4_PGE | CR4_PCIDE), %eax + movl %eax, %cr4 + + /* Temporarily disable paging */ + movl %ebp, %eax + andl $~CR0_PG, %eax + movl %eax, %cr0 + + /* Disable Long Mode as prerequisite for enabling 5-level paging */ + movl $X86_MSR_EFER, %ecx + rdmsr + andl $~X86_MSR_EFER_LME, %eax + wrmsr + + /* Transition to 5-level paging (PML5/LA57) */ + movl %cr4, %eax + orl $CR4_LA57, %eax + movl %eax, %cr4 + + /* Restore the PageMap pointer from the stack and load it into CR3 */ + popl %ecx + movl %ecx, %cr3 + + /* Re-enable Long Mode */ + movl $X86_MSR_EFER, %ecx + rdmsr + orl $X86_MSR_EFER_LME, %eax + wrmsr + + /* Restore CR0 with paging enabled and flush the instruction pipeline */ + movl %ebp, %cr0 + call XpaFlushInstructions + +XpaFlushInstructions: + /* Push the 64-bit code segment selector and the target address for a far jump */ + pushl $GDT_R0_CODE + pushl %ebx + + /* Perform a far return to switch to 64-bit long mode */ + lretl + +XpaEnterLongMode: + /* Enter 64-bit long mode */ + .code64 + + /* Restore the stack pointer and non-volatile registers */ + movq XpaRegisterSaveArea(%rip), %rsp + movq XpaRegisterSaveArea+8(%rip), %rbp + movq XpaRegisterSaveArea+0x10(%rip), %rax + movq XpaRegisterSaveArea+0x18(%rip), %rbx + + /* Restore the original CR4 register with LA57 bit set */ + orq $CR4_LA57, %rax + movq %rax, %cr4 + + /* Return to the caller */ + retq + +/* Data section for saving registers and temporary GDT */ +XpaRegisterSaveArea: .quad 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 +XpaTemporaryGdtSize: .short ArEnableExtendedPhysicalAddressingEnd - XpaTemporaryGdtDesc - 1 +XpaTemporaryGdtBase: .quad 0x0000000000000000 +XpaTemporaryGdtDesc: .quad 0x0000000000000000, 0x00CF9A000000FFFF, 0x00AF9A000000FFFF, 0x00CF92000000FFFF + +.global ArEnableExtendedPhysicalAddressingEnd +ArEnableExtendedPhysicalAddressingEnd: + +/** + * Creates a trap handler for the specified vector. * * @param Vector * Supplies a trap vector number. diff --git a/xtoskrnl/includes/amd64/asmsup.h b/xtoskrnl/includes/amd64/asmsup.h index 6b243d2..7e04ab6 100644 --- a/xtoskrnl/includes/amd64/asmsup.h +++ b/xtoskrnl/includes/amd64/asmsup.h @@ -10,6 +10,21 @@ #define __XTOSKRNL_AMD64_ASMSUP_H +/* Control Register bit definitions */ +#define CR0_PG 0x80000000 +#define CR4_PGE 0x00000080 +#define CR4_LA57 0x00001000 +#define CR4_PCIDE 0x00020000 + +/* GDT selectors */ +#define GDT_R0_CMCODE 0x08 +#define GDT_R0_CODE 0x10 +#define GDT_R0_DATA 0x18 + +/* MSR registers */ +#define X86_MSR_EFER 0xC0000080 +#define X86_MSR_EFER_LME (1 << 8) + /* KTRAP_FRAME structure offsets */ #define TrapXmm0 0 #define TrapXmm1 16 -- 2.50.1 From c4a7df6f38f0695bf714c9b62d2f61ca690db28f Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Wed, 20 Aug 2025 20:20:35 +0200 Subject: [PATCH 28/34] Extract trampoline code into a separate file --- xtoskrnl/CMakeLists.txt | 2 + xtoskrnl/ar/amd64/archsup.S | 121 -------------------------------- xtoskrnl/ar/amd64/boot.S | 133 ++++++++++++++++++++++++++++++++++++ xtoskrnl/ar/i686/boot.S | 14 ++++ 4 files changed, 149 insertions(+), 121 deletions(-) create mode 100644 xtoskrnl/ar/amd64/boot.S create mode 100644 xtoskrnl/ar/i686/boot.S diff --git a/xtoskrnl/CMakeLists.txt b/xtoskrnl/CMakeLists.txt index 0e36cba..9f208da 100644 --- a/xtoskrnl/CMakeLists.txt +++ b/xtoskrnl/CMakeLists.txt @@ -9,6 +9,7 @@ include_directories( # Specify list of library source code files list(APPEND LIBXTOS_SOURCE + ${XTOSKRNL_SOURCE_DIR}/ar/${ARCH}/boot.S ${XTOSKRNL_SOURCE_DIR}/ar/${ARCH}/cpufunc.c ${XTOSKRNL_SOURCE_DIR}/hl/cport.c ${XTOSKRNL_SOURCE_DIR}/hl/${ARCH}/ioport.c @@ -23,6 +24,7 @@ list(APPEND LIBXTOS_SOURCE # Specify list of kernel source code files list(APPEND XTOSKRNL_SOURCE ${XTOSKRNL_SOURCE_DIR}/ar/${ARCH}/archsup.S + ${XTOSKRNL_SOURCE_DIR}/ar/${ARCH}/boot.S ${XTOSKRNL_SOURCE_DIR}/ar/${ARCH}/cpufunc.c ${XTOSKRNL_SOURCE_DIR}/ar/${ARCH}/globals.c ${XTOSKRNL_SOURCE_DIR}/ar/${ARCH}/procsup.c diff --git a/xtoskrnl/ar/amd64/archsup.S b/xtoskrnl/ar/amd64/archsup.S index 6cee551..849df4f 100644 --- a/xtoskrnl/ar/amd64/archsup.S +++ b/xtoskrnl/ar/amd64/archsup.S @@ -4,7 +4,6 @@ * FILE: xtoskrnl/ar/amd64/archsup.S * DESCRIPTION: Provides AMD64 architecture features not implementable in C * DEVELOPERS: Rafal Kupiec - * Aiken Harris */ #include @@ -13,126 +12,6 @@ .text -/** - * Enables eXtended Physical Addressing (XPA). - * - * @param PageMap - * Supplies a pointer to the page map to be used. - * - * @return This routine does not return any value. - * - * @since XT 1.0 - */ -.global ArEnableExtendedPhysicalAddressing -ArEnableExtendedPhysicalAddressing: - /* Save the original CR4 register */ - movq %cr4, %rax - - /* Save the state of stack pointer and non-volatile registers */ - movq %rsp, XpaRegisterSaveArea(%rip) - movq %rbp, XpaRegisterSaveArea+0x08(%rip) - movq %rax, XpaRegisterSaveArea+0x10(%rip) - movq %rbx, XpaRegisterSaveArea+0x18(%rip) - - /* Save the original CR0 register */ - movq %cr0, %rbp - - /* Load temporary GDT required for mode transitions */ - leaq XpaTemporaryGdtDesc(%rip), %rax - movq %rax, XpaTemporaryGdtBase(%rip) - lgdtq XpaTemporaryGdtSize(%rip) - - /* Load addresses for entering compatibility mode and re-entering long mode */ - leaq XpaEnterCompatMode(%rip), %rax - leaq XpaEnterLongMode(%rip), %rbx - - /* Push the 32-bit code segment selector and the target address for a far jump */ - pushq $GDT_R0_CMCODE - pushq %rax - - /* Perform a far return to switch to 32-bit compatibility mode */ - lretq - -XpaEnterCompatMode: - /* Enter 32-bit compatibility mode */ - .code32 - - /* Store the PageMap pointer on the stack for future use */ - pushl %ecx - - /* Set the stack segment to the 32-bit data segment selector */ - movl $GDT_R0_DATA, %eax - movl %eax, %ss - - /* Disable PGE and PCIDE to ensure all TLB entries will be flushed */ - movl %cr4, %eax - andl $~(CR4_PGE | CR4_PCIDE), %eax - movl %eax, %cr4 - - /* Temporarily disable paging */ - movl %ebp, %eax - andl $~CR0_PG, %eax - movl %eax, %cr0 - - /* Disable Long Mode as prerequisite for enabling 5-level paging */ - movl $X86_MSR_EFER, %ecx - rdmsr - andl $~X86_MSR_EFER_LME, %eax - wrmsr - - /* Transition to 5-level paging (PML5/LA57) */ - movl %cr4, %eax - orl $CR4_LA57, %eax - movl %eax, %cr4 - - /* Restore the PageMap pointer from the stack and load it into CR3 */ - popl %ecx - movl %ecx, %cr3 - - /* Re-enable Long Mode */ - movl $X86_MSR_EFER, %ecx - rdmsr - orl $X86_MSR_EFER_LME, %eax - wrmsr - - /* Restore CR0 with paging enabled and flush the instruction pipeline */ - movl %ebp, %cr0 - call XpaFlushInstructions - -XpaFlushInstructions: - /* Push the 64-bit code segment selector and the target address for a far jump */ - pushl $GDT_R0_CODE - pushl %ebx - - /* Perform a far return to switch to 64-bit long mode */ - lretl - -XpaEnterLongMode: - /* Enter 64-bit long mode */ - .code64 - - /* Restore the stack pointer and non-volatile registers */ - movq XpaRegisterSaveArea(%rip), %rsp - movq XpaRegisterSaveArea+8(%rip), %rbp - movq XpaRegisterSaveArea+0x10(%rip), %rax - movq XpaRegisterSaveArea+0x18(%rip), %rbx - - /* Restore the original CR4 register with LA57 bit set */ - orq $CR4_LA57, %rax - movq %rax, %cr4 - - /* Return to the caller */ - retq - -/* Data section for saving registers and temporary GDT */ -XpaRegisterSaveArea: .quad 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 -XpaTemporaryGdtSize: .short ArEnableExtendedPhysicalAddressingEnd - XpaTemporaryGdtDesc - 1 -XpaTemporaryGdtBase: .quad 0x0000000000000000 -XpaTemporaryGdtDesc: .quad 0x0000000000000000, 0x00CF9A000000FFFF, 0x00AF9A000000FFFF, 0x00CF92000000FFFF - -.global ArEnableExtendedPhysicalAddressingEnd -ArEnableExtendedPhysicalAddressingEnd: - /** * Creates a trap handler for the specified vector. * diff --git a/xtoskrnl/ar/amd64/boot.S b/xtoskrnl/ar/amd64/boot.S new file mode 100644 index 0000000..d631845 --- /dev/null +++ b/xtoskrnl/ar/amd64/boot.S @@ -0,0 +1,133 @@ +/** + * PROJECT: ExectOS + * COPYRIGHT: See COPYING.md in the top level directory + * FILE: xtoskrnl/ar/amd64/boot.S + * DESCRIPTION: AMD64-specific boot code for setting up the low-level CPU environment + * DEVELOPERS: Aiken Harris + */ + +#include + +.altmacro +.text + + +/** + * Enables eXtended Physical Addressing (XPA). + * + * @param PageMap + * Supplies a pointer to the page map to be used. + * + * @return This routine does not return any value. + * + * @since XT 1.0 + */ +.global ArEnableExtendedPhysicalAddressing +ArEnableExtendedPhysicalAddressing: + /* Save the original CR4 register */ + movq %cr4, %rax + + /* Save the state of stack pointer and non-volatile registers */ + movq %rsp, XpaRegisterSaveArea(%rip) + movq %rbp, XpaRegisterSaveArea+0x08(%rip) + movq %rax, XpaRegisterSaveArea+0x10(%rip) + movq %rbx, XpaRegisterSaveArea+0x18(%rip) + + /* Save the original CR0 register */ + movq %cr0, %rbp + + /* Load temporary GDT required for mode transitions */ + leaq XpaTemporaryGdtDesc(%rip), %rax + movq %rax, XpaTemporaryGdtBase(%rip) + lgdtq XpaTemporaryGdtSize(%rip) + + /* Load addresses for entering compatibility mode and re-entering long mode */ + leaq XpaEnterCompatMode(%rip), %rax + leaq XpaEnterLongMode(%rip), %rbx + + /* Push the 32-bit code segment selector and the target address for a far jump */ + pushq $GDT_R0_CMCODE + pushq %rax + + /* Perform a far return to switch to 32-bit compatibility mode */ + lretq + +XpaEnterCompatMode: + /* Enter 32-bit compatibility mode */ + .code32 + + /* Store the PageMap pointer on the stack for future use */ + pushl %ecx + + /* Set the stack segment to the 32-bit data segment selector */ + movl $GDT_R0_DATA, %eax + movl %eax, %ss + + /* Disable PGE and PCIDE to ensure all TLB entries will be flushed */ + movl %cr4, %eax + andl $~(CR4_PGE | CR4_PCIDE), %eax + movl %eax, %cr4 + + /* Temporarily disable paging */ + movl %ebp, %eax + andl $~CR0_PG, %eax + movl %eax, %cr0 + + /* Disable Long Mode as prerequisite for enabling 5-level paging */ + movl $X86_MSR_EFER, %ecx + rdmsr + andl $~X86_MSR_EFER_LME, %eax + wrmsr + + /* Transition to 5-level paging (PML5/LA57) */ + movl %cr4, %eax + orl $CR4_LA57, %eax + movl %eax, %cr4 + + /* Restore the PageMap pointer from the stack and load it into CR3 */ + popl %ecx + movl %ecx, %cr3 + + /* Re-enable Long Mode */ + movl $X86_MSR_EFER, %ecx + rdmsr + orl $X86_MSR_EFER_LME, %eax + wrmsr + + /* Restore CR0 with paging enabled and flush the instruction pipeline */ + movl %ebp, %cr0 + call XpaFlushInstructions + +XpaFlushInstructions: + /* Push the 64-bit code segment selector and the target address for a far jump */ + pushl $GDT_R0_CODE + pushl %ebx + + /* Perform a far return to switch to 64-bit long mode */ + lretl + +XpaEnterLongMode: + /* Enter 64-bit long mode */ + .code64 + + /* Restore the stack pointer and non-volatile registers */ + movq XpaRegisterSaveArea(%rip), %rsp + movq XpaRegisterSaveArea+8(%rip), %rbp + movq XpaRegisterSaveArea+0x10(%rip), %rax + movq XpaRegisterSaveArea+0x18(%rip), %rbx + + /* Restore the original CR4 register with LA57 bit set */ + orq $CR4_LA57, %rax + movq %rax, %cr4 + + /* Return to the caller */ + retq + +/* Data section for saving registers and temporary GDT */ +XpaRegisterSaveArea: .quad 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 +XpaTemporaryGdtSize: .short ArEnableExtendedPhysicalAddressingEnd - XpaTemporaryGdtDesc - 1 +XpaTemporaryGdtBase: .quad 0x0000000000000000 +XpaTemporaryGdtDesc: .quad 0x0000000000000000, 0x00CF9A000000FFFF, 0x00AF9A000000FFFF, 0x00CF92000000FFFF + +.global ArEnableExtendedPhysicalAddressingEnd +ArEnableExtendedPhysicalAddressingEnd: diff --git a/xtoskrnl/ar/i686/boot.S b/xtoskrnl/ar/i686/boot.S new file mode 100644 index 0000000..edb4c05 --- /dev/null +++ b/xtoskrnl/ar/i686/boot.S @@ -0,0 +1,14 @@ +/** + * PROJECT: ExectOS + * COPYRIGHT: See COPYING.md in the top level directory + * FILE: xtoskrnl/ar/i686/boot.S + * DESCRIPTION: i686-specific boot code for setting up the low-level CPU environment + * DEVELOPERS: Aiken Harris + */ + +#include + +.altmacro +.text + + -- 2.50.1 From 4a7ea6009dfd00dcbd1e9d0848ff492d72709765 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Wed, 20 Aug 2025 20:23:44 +0200 Subject: [PATCH 29/34] Expose ArEnableExtendedPhysicalAddressing function in XTDK --- sdk/xtdk/amd64/arfuncs.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sdk/xtdk/amd64/arfuncs.h b/sdk/xtdk/amd64/arfuncs.h index 50ebe29..72a9d2c 100644 --- a/sdk/xtdk/amd64/arfuncs.h +++ b/sdk/xtdk/amd64/arfuncs.h @@ -24,6 +24,10 @@ XTCDECL BOOLEAN ArCpuId(IN OUT PCPUID_REGISTERS Registers); +XTCDECL +VOID +ArEnableExtendedPhysicalAddressing(IN ULONG_PTR PageMap); + XTCDECL VOID ArHalt(VOID); -- 2.50.1 From 9f6121e9b29fd05895f5b2298f50fe0b8442eef6 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Wed, 20 Aug 2025 20:37:55 +0200 Subject: [PATCH 30/34] Map the physical page for trampoline code --- sdk/xtdk/amd64/mmtypes.h | 3 +++ sdk/xtdk/i686/mmtypes.h | 2 ++ xtldr/arch/amd64/memory.c | 9 +++++++++ xtldr/arch/i686/memory.c | 9 +++++++++ 4 files changed, 23 insertions(+) diff --git a/sdk/xtdk/amd64/mmtypes.h b/sdk/xtdk/amd64/mmtypes.h index 8fccfaf..dfe9d7d 100644 --- a/sdk/xtdk/amd64/mmtypes.h +++ b/sdk/xtdk/amd64/mmtypes.h @@ -63,6 +63,9 @@ /* Maximum physical address used by HAL allocations */ #define MM_MAXIMUM_PHYSICAL_ADDRESS 0x00000000FFFFFFFFULL +/* Trampoline code address */ +#define MM_TRAMPOLINE_ADDRESS 0x80000 + /* Page size enumeration list */ typedef enum _PAGE_SIZE { diff --git a/sdk/xtdk/i686/mmtypes.h b/sdk/xtdk/i686/mmtypes.h index 0547330..027fc1a 100644 --- a/sdk/xtdk/i686/mmtypes.h +++ b/sdk/xtdk/i686/mmtypes.h @@ -53,6 +53,8 @@ /* Maximum physical address used by HAL allocations */ #define MM_MAXIMUM_PHYSICAL_ADDRESS 0xFFFFFFFF +/* Trampoline code address */ +#define MM_TRAMPOLINE_ADDRESS 0x80000 /* Page size enumeration list */ typedef enum _PAGE_SIZE diff --git a/xtldr/arch/amd64/memory.c b/xtldr/arch/amd64/memory.c index 9b4f58b..8db75f5 100644 --- a/xtldr/arch/amd64/memory.c +++ b/xtldr/arch/amd64/memory.c @@ -54,6 +54,15 @@ BlBuildPageMap(IN PXTBL_PAGE_MAPPING PageMap, return Status; } + /* Map the trampoline code area */ + Status = BlMapVirtualMemory(PageMap, (PVOID)MM_TRAMPOLINE_ADDRESS,(PVOID)MM_TRAMPOLINE_ADDRESS, + 1, LoaderFirmwareTemporary); + if(Status != STATUS_EFI_SUCCESS) + { + /* Mapping trampoline code failed */ + return Status; + } + /* Get list of XTLDR modules */ ModulesList = BlGetModulesList(); ModulesListEntry = ModulesList->Flink; diff --git a/xtldr/arch/i686/memory.c b/xtldr/arch/i686/memory.c index 51f91b4..a642e1f 100644 --- a/xtldr/arch/i686/memory.c +++ b/xtldr/arch/i686/memory.c @@ -90,6 +90,15 @@ BlBuildPageMap(IN PXTBL_PAGE_MAPPING PageMap, return Status; } + /* Map the trampoline code area */ + Status = BlMapVirtualMemory(PageMap, (PVOID)MM_TRAMPOLINE_ADDRESS,(PVOID)MM_TRAMPOLINE_ADDRESS, + 1, LoaderFirmwareTemporary); + if(Status != STATUS_EFI_SUCCESS) + { + /* Mapping trampoline code failed */ + return Status; + } + /* Get list of XTLDR modules */ ModulesList = BlGetModulesList(); ModulesListEntry = ModulesList->Flink; -- 2.50.1 From 88b3a5796213a87e6e68634b83b5f2706122ee38 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Wed, 20 Aug 2025 20:59:31 +0200 Subject: [PATCH 31/34] Allow specifying an allocation type when allocating pages --- xtldr/arch/amd64/memory.c | 4 ++-- xtldr/includes/xtldr.h | 3 ++- xtldr/memory.c | 5 +++-- xtldr/modules/pecoff/pecoff.c | 4 ++-- xtldr/modules/xtos_o/amd64/memory.c | 8 ++++---- xtldr/modules/xtos_o/i686/memory.c | 2 +- xtldr/modules/xtos_o/xtos.c | 6 +++--- xtldr/volume.c | 2 +- 8 files changed, 18 insertions(+), 16 deletions(-) diff --git a/xtldr/arch/amd64/memory.c b/xtldr/arch/amd64/memory.c index 8db75f5..c94e735 100644 --- a/xtldr/arch/amd64/memory.c +++ b/xtldr/arch/amd64/memory.c @@ -35,7 +35,7 @@ BlBuildPageMap(IN PXTBL_PAGE_MAPPING PageMap, EFI_STATUS Status; /* Allocate pages for the Page Map */ - Status = BlAllocateMemoryPages(1, &Address); + Status = BlAllocateMemoryPages(AllocateAnyPages, 1, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Memory allocation failure */ @@ -288,7 +288,7 @@ BlpGetNextPageTable(IN PXTBL_PAGE_MAPPING PageMap, else { /* Allocate pages for new PML entry */ - Status = BlAllocateMemoryPages(1, &Address); + Status = BlAllocateMemoryPages(AllocateAnyPages, 1, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Memory allocation failure */ diff --git a/xtldr/includes/xtldr.h b/xtldr/includes/xtldr.h index 1564402..791b09a 100644 --- a/xtldr/includes/xtldr.h +++ b/xtldr/includes/xtldr.h @@ -18,7 +18,8 @@ /* XTLDR routines forward references */ XTCDECL EFI_STATUS -BlAllocateMemoryPages(IN ULONGLONG NumberOfPages, +BlAllocateMemoryPages(IN EFI_ALLOCATE_TYPE AllocationType, + IN ULONGLONG NumberOfPages, OUT PEFI_PHYSICAL_ADDRESS Memory); XTCDECL diff --git a/xtldr/memory.c b/xtldr/memory.c index c135d03..1d6f25b 100644 --- a/xtldr/memory.c +++ b/xtldr/memory.c @@ -24,10 +24,11 @@ */ XTCDECL EFI_STATUS -BlAllocateMemoryPages(IN ULONGLONG NumberOfPages, +BlAllocateMemoryPages(IN EFI_ALLOCATE_TYPE AllocationType, + IN ULONGLONG NumberOfPages, OUT PEFI_PHYSICAL_ADDRESS Memory) { - return EfiSystemTable->BootServices->AllocatePages(AllocateAnyPages, EfiLoaderData, NumberOfPages, Memory); + return EfiSystemTable->BootServices->AllocatePages(AllocationType, EfiLoaderData, NumberOfPages, Memory); } /** diff --git a/xtldr/modules/pecoff/pecoff.c b/xtldr/modules/pecoff/pecoff.c index 5288c37..aac0208 100644 --- a/xtldr/modules/pecoff/pecoff.c +++ b/xtldr/modules/pecoff/pecoff.c @@ -409,7 +409,7 @@ PeLoadImage(IN PEFI_FILE_HANDLE FileHandle, Pages = EFI_SIZE_TO_PAGES(ImageData->FileSize); /* Allocate pages */ - Status = XtLdrProtocol->Memory.AllocatePages(Pages, &Address); + Status = XtLdrProtocol->Memory.AllocatePages(AllocateAnyPages, Pages, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Pages allocation failure */ @@ -472,7 +472,7 @@ PeLoadImage(IN PEFI_FILE_HANDLE FileHandle, ImageData->ImagePages = EFI_SIZE_TO_PAGES(ImageData->ImageSize); /* Allocate image pages */ - Status = XtLdrProtocol->Memory.AllocatePages(ImageData->ImagePages, &Address); + Status = XtLdrProtocol->Memory.AllocatePages(AllocateAnyPages, ImageData->ImagePages, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Pages reallocation failure */ diff --git a/xtldr/modules/xtos_o/amd64/memory.c b/xtldr/modules/xtos_o/amd64/memory.c index ca24832..bfc7b5e 100644 --- a/xtldr/modules/xtos_o/amd64/memory.c +++ b/xtldr/modules/xtos_o/amd64/memory.c @@ -84,7 +84,7 @@ XtpMapHardwareMemoryPool(IN PXTBL_PAGE_MAPPING PageMap) if(!P5eBase[(MM_HARDWARE_VA_START >> MM_P5I_SHIFT) & 0x1FF].Valid) { /* No valid P5E, allocate memory */ - Status = XtLdrProtocol->Memory.AllocatePages(1, &Address); + Status = XtLdrProtocol->Memory.AllocatePages(AllocateAnyPages, 1, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Memory allocation failure, return error */ @@ -118,7 +118,7 @@ XtpMapHardwareMemoryPool(IN PXTBL_PAGE_MAPPING PageMap) if(!PxeBase[(MM_HARDWARE_VA_START >> MM_PXI_SHIFT) & 0x1FF].Valid) { /* No valid PXE, allocate memory */ - Status = XtLdrProtocol->Memory.AllocatePages(1, &Address); + Status = XtLdrProtocol->Memory.AllocatePages(AllocateAnyPages, 1, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Memory allocation failure, return error */ @@ -146,7 +146,7 @@ XtpMapHardwareMemoryPool(IN PXTBL_PAGE_MAPPING PageMap) if(!PpeBase[(MM_HARDWARE_VA_START >> MM_PPI_SHIFT) & 0x1FF].Valid) { /* No valid PPE, allocate memory */ - Status = XtLdrProtocol->Memory.AllocatePages(1, &Address); + Status = XtLdrProtocol->Memory.AllocatePages(AllocateAnyPages, 1, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Memory allocation failure, return error */ @@ -177,7 +177,7 @@ XtpMapHardwareMemoryPool(IN PXTBL_PAGE_MAPPING PageMap) if(!PdeBase[((MM_HARDWARE_VA_START >> MM_PDI_SHIFT) & 0x1FF) + Index].Valid) { /* No valid PDE, allocate memory */ - Status = XtLdrProtocol->Memory.AllocatePages(1, &Address); + Status = XtLdrProtocol->Memory.AllocatePages(AllocateAnyPages, 1, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Memory allocation failure, return error */ diff --git a/xtldr/modules/xtos_o/i686/memory.c b/xtldr/modules/xtos_o/i686/memory.c index be8b809..959ac8c 100644 --- a/xtldr/modules/xtos_o/i686/memory.c +++ b/xtldr/modules/xtos_o/i686/memory.c @@ -64,7 +64,7 @@ XtpMapHardwareMemoryPool(IN PXTBL_PAGE_MAPPING PageMap) EFI_STATUS Status; /* Allocate memory */ - Status = XtLdrProtocol->Memory.AllocatePages(1, &Address); + Status = XtLdrProtocol->Memory.AllocatePages(AllocateAnyPages, 1, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Memory allocation failure, return error */ diff --git a/xtldr/modules/xtos_o/xtos.c b/xtldr/modules/xtos_o/xtos.c index f8844c1..f9af717 100644 --- a/xtldr/modules/xtos_o/xtos.c +++ b/xtldr/modules/xtos_o/xtos.c @@ -75,7 +75,7 @@ XtGetMemoryDescriptorList(IN PXTBL_PAGE_MAPPING PageMap, Pages = (ULONGLONG)EFI_SIZE_TO_PAGES((PageMap->MapSize + 1) * sizeof(LOADER_MEMORY_DESCRIPTOR)); - Status = XtLdrProtocol->Memory.AllocatePages(Pages, &Address); + Status = XtLdrProtocol->Memory.AllocatePages(AllocateAnyPages, Pages, &Address); if(Status != STATUS_EFI_SUCCESS) { return Status; @@ -136,7 +136,7 @@ XtGetSystemResourcesList(IN PXTBL_PAGE_MAPPING PageMap, Pages = (ULONGLONG)EFI_SIZE_TO_PAGES(sizeof(SYSTEM_RESOURCE_ACPI) + sizeof(SYSTEM_RESOURCE_FRAMEBUFFER)); - Status = XtLdrProtocol->Memory.AllocatePages(Pages, &Address); + Status = XtLdrProtocol->Memory.AllocatePages(AllocateAnyPages, Pages, &Address); if(Status != STATUS_EFI_SUCCESS) { return Status; @@ -552,7 +552,7 @@ XtpInitializeLoaderBlock(IN PXTBL_PAGE_MAPPING PageMap, BlockPages = EFI_SIZE_TO_PAGES(sizeof(KERNEL_INITIALIZATION_BLOCK)); /* Allocate memory for kernel initialization block */ - Status = XtLdrProtocol->Memory.AllocatePages(BlockPages, &Address); + Status = XtLdrProtocol->Memory.AllocatePages(AllocateAnyPages, BlockPages, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Memory allocation failure */ diff --git a/xtldr/volume.c b/xtldr/volume.c index 73f3499..abc3793 100644 --- a/xtldr/volume.c +++ b/xtldr/volume.c @@ -616,7 +616,7 @@ BlReadFile(IN PEFI_FILE_HANDLE DirHandle, Pages = EFI_SIZE_TO_PAGES(FileInfo->FileSize); /* Allocate pages */ - Status = BlAllocateMemoryPages(Pages, &Address); + Status = BlAllocateMemoryPages(AllocateAnyPages, Pages, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Pages allocation failure */ -- 2.50.1 From d1b14fccdd6573aa9c925fd7acb32d503bf90767 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Wed, 20 Aug 2025 21:08:43 +0200 Subject: [PATCH 32/34] Resolve build issues caused by the last commit --- sdk/xtdk/bltypes.h | 2 +- xtldr/arch/i686/memory.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sdk/xtdk/bltypes.h b/sdk/xtdk/bltypes.h index ffd467c..42ffaa3 100644 --- a/sdk/xtdk/bltypes.h +++ b/sdk/xtdk/bltypes.h @@ -43,7 +43,7 @@ typedef LONG (*PBL_GET_MEMTYPE_ROUTINE)(IN EFI_MEMORY_TYPE EfiMemoryType); /* Boot Loader protocol routine pointers */ -typedef EFI_STATUS (*PBL_ALLOCATE_PAGES)(IN ULONGLONG Size, OUT PEFI_PHYSICAL_ADDRESS Memory); +typedef EFI_STATUS (*PBL_ALLOCATE_PAGES)(IN EFI_ALLOCATE_TYPE AllocationType, IN ULONGLONG Size, OUT PEFI_PHYSICAL_ADDRESS Memory); typedef EFI_STATUS (*PBL_ALLOCATE_POOL)(IN UINT_PTR Size, OUT PVOID *Memory); typedef EFI_STATUS (*PBL_BOOTMENU_INITIALIZE_OS_LIST)(OUT PXTBL_BOOTMENU_ITEM *MenuEntries, OUT PULONG EntriesCount, OUT PULONG DefaultId); typedef BOOLEAN (*PBL_BOOTUTIL_GET_BOOLEAN_PARAMETER)(IN CONST PWCHAR Parameters, IN CONST PWCHAR Needle); diff --git a/xtldr/arch/i686/memory.c b/xtldr/arch/i686/memory.c index a642e1f..f6aede9 100644 --- a/xtldr/arch/i686/memory.c +++ b/xtldr/arch/i686/memory.c @@ -36,7 +36,7 @@ BlBuildPageMap(IN PXTBL_PAGE_MAPPING PageMap, if(PageMap->PageMapLevel == 3) { /* Allocate a page for the 3-level page map structure (PAE enabled) */ - Status = BlAllocateMemoryPages(1, &Address); + Status = BlAllocateMemoryPages(AllocateAnyPages, 1, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Memory allocation failed, cannot proceed with page map creation */ @@ -48,7 +48,7 @@ BlBuildPageMap(IN PXTBL_PAGE_MAPPING PageMap, RtlZeroMemory(PageMap->PtePointer, EFI_PAGE_SIZE); /* Allocate 4 pages for the Page Directories (PDs) */ - Status = BlAllocateMemoryPages(4, &DirectoryAddress); + Status = BlAllocateMemoryPages(AllocateAnyPages, 4, &DirectoryAddress); if(Status != STATUS_EFI_SUCCESS) { /* Memory allocation failed, cannot proceed with page map creation */ @@ -70,7 +70,7 @@ BlBuildPageMap(IN PXTBL_PAGE_MAPPING PageMap, else { /* Allocate a page for the 2-level page map structure (PAE disabled) */ - Status = BlAllocateMemoryPages(1, &Address); + Status = BlAllocateMemoryPages(AllocateAnyPages, 1, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Memory allocation failed, cannot proceed with page map creation */ @@ -349,7 +349,7 @@ BlpGetNextPageTable(IN PXTBL_PAGE_MAPPING PageMap, else { /* Allocate pages for new PML entry */ - Status = BlAllocateMemoryPages(1, &Address); + Status = BlAllocateMemoryPages(AllocateAnyPages, 1, &Address); if(Status != STATUS_EFI_SUCCESS) { /* Memory allocation failure */ -- 2.50.1 From 1ef2560ef6cf291739b3742a84f6a9776345dc61 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Thu, 21 Aug 2025 00:14:49 +0200 Subject: [PATCH 33/34] Enable LA57 by invoking the trampoline code --- xtldr/modules/xtos_o/amd64/memory.c | 54 ++++++++++++++++++++-------- xtldr/modules/xtos_o/includes/xtos.h | 6 ++++ 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/xtldr/modules/xtos_o/amd64/memory.c b/xtldr/modules/xtos_o/amd64/memory.c index bfc7b5e..55f8ec8 100644 --- a/xtldr/modules/xtos_o/amd64/memory.c +++ b/xtldr/modules/xtos_o/amd64/memory.c @@ -43,14 +43,13 @@ XtpDeterminePagingLevel(IN CONST PWCHAR Parameters) /* Query CPUID */ ArCpuId(&CpuRegisters); - // TODO: Uncomment the following code when LA57 support is implemented in the bootloader - // /* Check if eXtended Physical Addressing (XPA) is enabled and if LA57 is supported by the CPU */ - // if((CpuRegisters.Ecx & CPUID_FEATURES_ECX_LA57) && - // !(XtLdrProtocol->BootUtil.GetBooleanParameter(Parameters, L"NOXPA"))) - // { - // /* Enable LA57 (PML5) */ - // return 4; - // } + /* Check if eXtended Physical Addressing (XPA) is enabled and if LA57 is supported by the CPU */ + if((CpuRegisters.Ecx & CPUID_FEATURES_ECX_LA57) && + !(XtLdrProtocol->BootUtil.GetBooleanParameter(Parameters, L"NOXPA"))) + { + /* Enable LA57 (PML5) */ + return 5; + } } /* Disable LA57 and use PML4 by default */ @@ -213,6 +212,9 @@ EFI_STATUS XtEnablePaging(IN PXTBL_PAGE_MAPPING PageMap) { EFI_STATUS Status; + EFI_PHYSICAL_ADDRESS TrampolineAddress; + PXT_TRAMPOLINE_ENTRY TrampolineEntry; + ULONG_PTR TrampolineSize; /* Build page map */ Status = XtLdrProtocol->Memory.BuildPageMap(PageMap, (PageMap->PageMapLevel > 4) ? MM_P5E_LA57_BASE : MM_PXE_BASE); @@ -232,6 +234,29 @@ XtEnablePaging(IN PXTBL_PAGE_MAPPING PageMap) return Status; } + /* Check the configured page map level to set the LA57 state accordingly */ + if(PageMap->PageMapLevel == 5) + { + /* Set the address of the trampoline code below 1MB */ + TrampolineAddress = MM_TRAMPOLINE_ADDRESS; + + /* Calculate the size of the trampoline code */ + TrampolineSize = (ULONG_PTR)ArEnableExtendedPhysicalAddressingEnd - (ULONG_PTR)ArEnableExtendedPhysicalAddressing; + + /* Allocate pages for the trampoline */ + Status = XtLdrProtocol->Memory.AllocatePages(AllocateAddress, EFI_SIZE_TO_PAGES(TrampolineSize), &TrampolineAddress); + if(Status != STATUS_EFI_SUCCESS) + { + /* Failed to allocate memory for trampoline code */ + XtLdrProtocol->Debug.Print(L"Failed to allocate memory for trampoline code (Status code: %zX)\n", Status); + return Status; + } + + /* Set the trampoline entry point and copy its code into the allocated buffer */ + TrampolineEntry = (PXT_TRAMPOLINE_ENTRY)(UINT_PTR)TrampolineAddress; + RtlCopyMemory(TrampolineEntry, ArEnableExtendedPhysicalAddressing, TrampolineSize); + } + /* Exit EFI Boot Services */ XtLdrProtocol->Debug.Print(L"Exiting EFI boot services\n"); Status = XtLdrProtocol->Util.ExitBootServices(); @@ -247,19 +272,20 @@ XtEnablePaging(IN PXTBL_PAGE_MAPPING PageMap) { /* Enable Linear Address 57-bit (LA57) extension */ XtLdrProtocol->Debug.Print(L"Enabling Linear Address 57-bit (LA57)\n"); + + /* Execute the trampoline to enable LA57 and write PML5 to CR3 */ + TrampolineEntry((UINT64)PageMap->PtePointer); } else { /* Disable Linear Address 57-bit (LA57) extension */ XtLdrProtocol->Debug.Print(L"Disabling Linear Address 57-bit (LA57)\n"); + + /* Write PML4 to CR3 and enable paging */ + ArWriteControlRegister(3, (UINT_PTR)PageMap->PtePointer); + ArWriteControlRegister(0, ArReadControlRegister(0) | CR0_PG); } - /* Write PML4 to CR3 */ - ArWriteControlRegister(3, (UINT_PTR)PageMap->PtePointer); - - /* Enable paging */ - ArWriteControlRegister(0, ArReadControlRegister(0) | CR0_PG); - /* Return success */ return STATUS_EFI_SUCCESS; } diff --git a/xtldr/modules/xtos_o/includes/xtos.h b/xtldr/modules/xtos_o/includes/xtos.h index 7bd30f8..2f4da6a 100644 --- a/xtldr/modules/xtos_o/includes/xtos.h +++ b/xtldr/modules/xtos_o/includes/xtos.h @@ -29,9 +29,15 @@ typedef struct _XT_FRAMEBUFFER_PROTOCOL /* EFI XT Loader Protocol */ EXTERN PXTBL_LOADER_PROTOCOL XtLdrProtocol; +/* XTOS trampoline end address to calculate trampoline size */ +EXTERN PVOID ArEnableExtendedPhysicalAddressingEnd[]; + /* XTOS kernel entry point */ typedef VOID (XTAPI *PXT_ENTRY_POINT)(IN PKERNEL_INITIALIZATION_BLOCK BootParameters); +/* XTOS trampoline entry point */ +typedef VOID (*PXT_TRAMPOLINE_ENTRY)(UINT64 PageMap); + /* XTOS boot protocol related routines forward references */ XTCDECL EFI_STATUS -- 2.50.1 From a84ef21571939a24a0b435bbbead2991a586fa13 Mon Sep 17 00:00:00 2001 From: Aiken Harris Date: Thu, 21 Aug 2025 01:42:36 +0200 Subject: [PATCH 34/34] Adjust LA57 base addresses to prevent overflow --- sdk/xtdk/amd64/mmtypes.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sdk/xtdk/amd64/mmtypes.h b/sdk/xtdk/amd64/mmtypes.h index dfe9d7d..3b8db1f 100644 --- a/sdk/xtdk/amd64/mmtypes.h +++ b/sdk/xtdk/amd64/mmtypes.h @@ -25,11 +25,11 @@ #define MM_PXE_BASE 0xFFFFF6FB7DBED000ULL /* Page directory and page base addresses for 5-level paging */ -#define MM_PTE_LA57_BASE 0xFFFFF68000000000ULL -#define MM_PDE_LA57_BASE 0xFFFFF78000000000ULL -#define MM_PPE_LA57_BASE 0xFFFFF78800000000ULL -#define MM_PXE_LA57_BASE 0xFFFFF78840000000ULL -#define MM_P5E_LA57_BASE 0xFFFFF78840200000ULL +#define MM_PTE_LA57_BASE 0xFFFF000000000000ULL +#define MM_PDE_LA57_BASE 0xFFFF010000000000ULL +#define MM_PPE_LA57_BASE 0xFFFF010800000000ULL +#define MM_PXE_LA57_BASE 0xFFFF010840000000ULL +#define MM_P5E_LA57_BASE 0xFFFF010840200000ULL /* PTE shift values */ #define MM_PTE_SHIFT 3 -- 2.50.1