HyperDbg Debugger
Loading...
Searching...
No Matches
Ept.h File Reference

Contains the headers relating to EPT structures, MTRR and all basic Hooking structures. More...

Go to the source code of this file.

Classes

struct  _MTRR_RANGE_DESCRIPTOR
 MTRR Descriptor. More...
 
union  _IA32_MTRR_FIXED_RANGE_TYPE
 Fixed range MTRR. More...
 
struct  _EPT_STATE
 Main structure for saving the state of EPT among the project. More...
 
struct  _VMM_EPT_DYNAMIC_SPLIT
 Split 2MB granularity to 4 KB granularity. More...
 

Macros

#define PAGE_ATTRIB_READ   0x2
 Page attributes for internal use.
 
#define PAGE_ATTRIB_WRITE   0x4
 
#define PAGE_ATTRIB_EXEC   0x8
 
#define PAGE_ATTRIB_EXEC_HIDDEN_HOOK   0x10
 
#define SIZE_2_MB   ((SIZE_T)(512 * PAGE_SIZE))
 Integer 2MB.
 
#define ADDRMASK_EPT_PML1_OFFSET(_VAR_)   ((_VAR_) & 0xFFFULL)
 Offset into the 1st paging structure (4096 byte)
 
#define ADDRMASK_EPT_PML1_INDEX(_VAR_)   (((_VAR_) & 0x1FF000ULL) >> 12)
 Index of the 1st paging structure (4096 byte)
 
#define ADDRMASK_EPT_PML2_INDEX(_VAR_)   (((_VAR_) & 0x3FE00000ULL) >> 21)
 Index of the 2nd paging structure (2MB)
 
#define ADDRMASK_EPT_PML3_INDEX(_VAR_)   (((_VAR_) & 0x7FC0000000ULL) >> 30)
 Index of the 3rd paging structure (1GB)
 
#define ADDRMASK_EPT_PML4_INDEX(_VAR_)   (((_VAR_) & 0xFF8000000000ULL) >> 39)
 Index of the 4th paging structure (512GB)
 
#define MAX_VARIABLE_RANGE_MTRRS   255
 Architecturally defined number of variable range MTRRs.
 
#define NUM_FIXED_RANGE_MTRRS   ((1 + 2 + 8) * RTL_NUMBER_OF_FIELD(IA32_MTRR_FIXED_RANGE_TYPE, s.Types))
 Architecturally defined number of fixed range MTRRs. 1 register for 64k, 2 registers for 16k, 8 registers for 4k, and each register has 8 ranges as per "Fixed Range MTRRs" states.
 
#define NUM_MTRR_ENTRIES   (MAX_VARIABLE_RANGE_MTRRS + NUM_FIXED_RANGE_MTRRS)
 Total number of MTRR descriptors to store.
 

Typedefs

typedef struct _MTRR_RANGE_DESCRIPTOR MTRR_RANGE_DESCRIPTOR
 MTRR Descriptor.
 
typedef struct _MTRR_RANGE_DESCRIPTORPMTRR_RANGE_DESCRIPTOR
 
typedef union _IA32_MTRR_FIXED_RANGE_TYPE IA32_MTRR_FIXED_RANGE_TYPE
 Fixed range MTRR.
 
typedef struct _EPT_STATE EPT_STATE
 Main structure for saving the state of EPT among the project.
 
typedef struct _EPT_STATEPEPT_STATE
 
typedef struct _VMM_EPT_DYNAMIC_SPLIT VMM_EPT_DYNAMIC_SPLIT
 Split 2MB granularity to 4 KB granularity.
 
typedef struct _VMM_EPT_DYNAMIC_SPLITPVMM_EPT_DYNAMIC_SPLIT
 

Functions

BOOLEAN EptSetupPML2Entry (PVMM_EPT_PAGE_TABLE EptPageTable, PEPT_PML2_ENTRY NewEntry, SIZE_T PageFrameNumber)
 Set up PML2 Entries.
 
BOOLEAN EptHandlePageHookExit (_Inout_ VIRTUAL_MACHINE_STATE *VCpu, _In_ VMX_EXIT_QUALIFICATION_EPT_VIOLATION ViolationQualification, _In_ UINT64 GuestPhysicalAddr)
 
BOOLEAN EptCheckFeatures (VOID)
 Check for EPT Features.
 
BOOLEAN EptBuildMtrrMap (VOID)
 Build MTRR Map.
 
PVMM_EPT_PAGE_TABLE EptAllocateAndCreateIdentityPageTable (VOID)
 Allocates page maps and create identity page table.
 
BOOLEAN EptSplitLargePage (PVMM_EPT_PAGE_TABLE EptPageTable, PVOID PreAllocatedBuffer, SIZE_T PhysicalAddress)
 Convert 2MB pages to 4KB pages.
 
PEPT_PML2_ENTRY EptGetPml2Entry (PVMM_EPT_PAGE_TABLE EptPageTable, SIZE_T PhysicalAddress)
 Split 2MB (LargePage) into 4kb pages.
 
BOOLEAN EptLogicalProcessorInitialize (VOID)
 Initialize EPT Table based on Processor Index.
 
BOOLEAN EptHandleEptViolation (VIRTUAL_MACHINE_STATE *VCpu)
 Handle EPT Violation.
 
PEPT_PML1_ENTRY EptGetPml1Entry (PVMM_EPT_PAGE_TABLE EptPageTable, SIZE_T PhysicalAddress)
 Get the PML1 Entry of a special address.
 
PVOID EptGetPml1OrPml2Entry (PVMM_EPT_PAGE_TABLE EptPageTable, SIZE_T PhysicalAddress, BOOLEAN *IsLargePage)
 Get the PML1 entry for this physical address if the large page is available then large page of Pml2 is returned.
 
VOID EptHandleMisconfiguration (VOID)
 Handle Ept Misconfigurations.
 
VOID EptSetPML1AndInvalidateTLB (_Inout_ VIRTUAL_MACHINE_STATE *VCpu, _Out_ PEPT_PML1_ENTRY EntryAddress, _In_ EPT_PML1_ENTRY EntryValue, _In_ _Strict_type_match_ INVEPT_TYPE InvalidationType)
 This function set the specific PML1 entry in a spinlock protected area then invalidate the TLB , this function should be called from vmx root-mode.
 
BOOLEAN EptCheckAndHandleBreakpoint (VIRTUAL_MACHINE_STATE *VCpu)
 Check if the breakpoint vm-exit relates to EPT hook or not.
 

Detailed Description

Contains the headers relating to EPT structures, MTRR and all basic Hooking structures.

Author
Sina Karvandi (sina@.nosp@m.hype.nosp@m.rdbg..nosp@m.org)
Version
0.1
Date
2020-04-11

Macro Definition Documentation

◆ ADDRMASK_EPT_PML1_INDEX

#define ADDRMASK_EPT_PML1_INDEX ( _VAR_)    (((_VAR_) & 0x1FF000ULL) >> 12)

Index of the 1st paging structure (4096 byte)

◆ ADDRMASK_EPT_PML1_OFFSET

#define ADDRMASK_EPT_PML1_OFFSET ( _VAR_)    ((_VAR_) & 0xFFFULL)

Offset into the 1st paging structure (4096 byte)

◆ ADDRMASK_EPT_PML2_INDEX

#define ADDRMASK_EPT_PML2_INDEX ( _VAR_)    (((_VAR_) & 0x3FE00000ULL) >> 21)

Index of the 2nd paging structure (2MB)

◆ ADDRMASK_EPT_PML3_INDEX

#define ADDRMASK_EPT_PML3_INDEX ( _VAR_)    (((_VAR_) & 0x7FC0000000ULL) >> 30)

Index of the 3rd paging structure (1GB)

◆ ADDRMASK_EPT_PML4_INDEX

#define ADDRMASK_EPT_PML4_INDEX ( _VAR_)    (((_VAR_) & 0xFF8000000000ULL) >> 39)

Index of the 4th paging structure (512GB)

◆ MAX_VARIABLE_RANGE_MTRRS

#define MAX_VARIABLE_RANGE_MTRRS   255

Architecturally defined number of variable range MTRRs.

◆ NUM_FIXED_RANGE_MTRRS

#define NUM_FIXED_RANGE_MTRRS   ((1 + 2 + 8) * RTL_NUMBER_OF_FIELD(IA32_MTRR_FIXED_RANGE_TYPE, s.Types))

Architecturally defined number of fixed range MTRRs. 1 register for 64k, 2 registers for 16k, 8 registers for 4k, and each register has 8 ranges as per "Fixed Range MTRRs" states.

◆ NUM_MTRR_ENTRIES

#define NUM_MTRR_ENTRIES   (MAX_VARIABLE_RANGE_MTRRS + NUM_FIXED_RANGE_MTRRS)

Total number of MTRR descriptors to store.

◆ PAGE_ATTRIB_EXEC

#define PAGE_ATTRIB_EXEC   0x8

◆ PAGE_ATTRIB_EXEC_HIDDEN_HOOK

#define PAGE_ATTRIB_EXEC_HIDDEN_HOOK   0x10

◆ PAGE_ATTRIB_READ

#define PAGE_ATTRIB_READ   0x2

Page attributes for internal use.

◆ PAGE_ATTRIB_WRITE

#define PAGE_ATTRIB_WRITE   0x4

◆ SIZE_2_MB

#define SIZE_2_MB   ((SIZE_T)(512 * PAGE_SIZE))

Integer 2MB.

Typedef Documentation

◆ EPT_STATE

typedef struct _EPT_STATE EPT_STATE

Main structure for saving the state of EPT among the project.

◆ IA32_MTRR_FIXED_RANGE_TYPE

◆ MTRR_RANGE_DESCRIPTOR

MTRR Descriptor.

◆ PEPT_STATE

typedef struct _EPT_STATE * PEPT_STATE

◆ PMTRR_RANGE_DESCRIPTOR

◆ PVMM_EPT_DYNAMIC_SPLIT

◆ VMM_EPT_DYNAMIC_SPLIT

Split 2MB granularity to 4 KB granularity.

Function Documentation

◆ EptAllocateAndCreateIdentityPageTable()

PVMM_EPT_PAGE_TABLE EptAllocateAndCreateIdentityPageTable ( VOID )

Allocates page maps and create identity page table.

Returns
PVMM_EPT_PAGE_TABLE identity map page-table
643{
644 PVMM_EPT_PAGE_TABLE PageTable;
645 EPT_PML3_POINTER RWXTemplate;
646 EPT_PML2_ENTRY PML2EntryTemplate;
647 SIZE_T EntryGroupIndex;
648 SIZE_T EntryIndex;
649
650 //
651 // Allocate all paging structures as 4KB aligned pages
652 //
653
654 //
655 // Allocate address anywhere in the OS's memory space and
656 // zero out all entries to ensure all unused entries are marked Not Present
657 //
659
660 if (PageTable == NULL)
661 {
662 LogError("Err, failed to allocate memory for PageTable");
663 return NULL;
664 }
665
666 //
667 // Mark the first 512GB PML4 entry as present, which allows us to manage up
668 // to 512GB of discrete paging structures.
669 //
670 PageTable->PML4[0].PageFrameNumber = (SIZE_T)VirtualAddressToPhysicalAddress(&PageTable->PML3[0]) / PAGE_SIZE;
671 PageTable->PML4[0].ReadAccess = 1;
672 PageTable->PML4[0].WriteAccess = 1;
673 PageTable->PML4[0].ExecuteAccess = 1;
674
675 //
676 // Now mark each 1GB PML3 entry as RWX and map each to their PML2 entry
677 //
678
679 //
680 // Ensure stack memory is cleared
681 //
682 RWXTemplate.AsUInt = 0;
683
684 //
685 // Set up one 'template' RWX PML3 entry and copy it into each of the 512 PML3 entries
686 // Using the same method as SimpleVisor for copying each entry using intrinsics.
687 //
688 RWXTemplate.ReadAccess = 1;
689 RWXTemplate.WriteAccess = 1;
690 RWXTemplate.ExecuteAccess = 1;
691
692 //
693 // Copy the template into each of the 512 PML3 entry slots
694 //
695 __stosq((SIZE_T *)&PageTable->PML3[0], RWXTemplate.AsUInt, VMM_EPT_PML3E_COUNT);
696
697 //
698 // For each of the 512 PML3 entries
699 //
700 for (EntryIndex = 0; EntryIndex < VMM_EPT_PML3E_COUNT; EntryIndex++)
701 {
702 //
703 // Map the 1GB PML3 entry to 512 PML2 (2MB) entries to describe each large page.
704 // NOTE: We do *not* manage any PML1 (4096 byte) entries and do not allocate them.
705 //
706 PageTable->PML3[EntryIndex].PageFrameNumber = (SIZE_T)VirtualAddressToPhysicalAddress(&PageTable->PML2[EntryIndex][0]) / PAGE_SIZE;
707 }
708
709 PML2EntryTemplate.AsUInt = 0;
710
711 //
712 // All PML2 entries will be RWX and 'present'
713 //
714 PML2EntryTemplate.WriteAccess = 1;
715 PML2EntryTemplate.ReadAccess = 1;
716 PML2EntryTemplate.ExecuteAccess = 1;
717
718 //
719 // We are using 2MB large pages, so we must mark this 1 here
720 //
721 PML2EntryTemplate.LargePage = 1;
722
723 //
724 // For each collection of 512 PML2 entries (512 collections * 512 entries per collection),
725 // mark it RWX using the same template above.
726 // This marks the entries as "Present" regardless of if the actual system has memory at
727 // this region or not. We will cause a fault in our EPT handler if the guest access a page
728 // outside a usable range, despite the EPT frame being present here.
729 //
730 __stosq((SIZE_T *)&PageTable->PML2[0], PML2EntryTemplate.AsUInt, VMM_EPT_PML3E_COUNT * VMM_EPT_PML2E_COUNT);
731
732 //
733 // For each of the 512 collections of 512 2MB PML2 entries
734 //
735 for (EntryGroupIndex = 0; EntryGroupIndex < VMM_EPT_PML3E_COUNT; EntryGroupIndex++)
736 {
737 //
738 // For each 2MB PML2 entry in the collection
739 //
740 for (EntryIndex = 0; EntryIndex < VMM_EPT_PML2E_COUNT; EntryIndex++)
741 {
742 //
743 // Setup the memory type and frame number of the PML2 entry
744 //
745 EptSetupPML2Entry(PageTable, &PageTable->PML2[EntryGroupIndex][EntryIndex], (EntryGroupIndex * VMM_EPT_PML2E_COUNT) + EntryIndex);
746 }
747 }
748
749 return PageTable;
750}
_Use_decl_annotations_ UINT64 VirtualAddressToPhysicalAddress(_In_ PVOID VirtualAddress)
Converts Virtual Address to Physical Address.
Definition Conversion.c:154
BOOLEAN EptSetupPML2Entry(PVMM_EPT_PAGE_TABLE EptPageTable, PEPT_PML2_ENTRY NewEntry, SIZE_T PageFrameNumber)
Set up PML2 Entries.
Definition Ept.c:603
#define LogError(format,...)
Log in the case of error.
Definition HyperDbgHyperLogIntrinsics.h:113
PVOID PlatformMemAllocateContiguousZeroedMemory(SIZE_T NumberOfBytes)
Allocate a contiguous zeroed memory.
Definition Mem.c:22
#define VMM_EPT_PML3E_COUNT
The number of 1GB PDPT entries in the page table per 512GB PML4 entry.
Definition State.h:84
#define VMM_EPT_PML2E_COUNT
Then number of 2MB Page Directory entries in the page table per 1GB PML3 entry.
Definition State.h:91
EPT_PDPTE EPT_PML3_POINTER
Definition State.h:19
EPT_PDE_2MB EPT_PML2_ENTRY
Definition State.h:20
#define PAGE_SIZE
Size of each page (4096 bytes)
Definition common.h:69
NULL()
Definition test-case-generator.py:530
Structure for saving EPT Table.
Definition State.h:105
EPT_PML4_POINTER PML4[VMM_EPT_PML4E_COUNT]
28.2.2 Describes 512 contiguous 512GB memory regions each with 512 1GB regions.
Definition State.h:110
EPT_PML3_POINTER PML3[VMM_EPT_PML3E_COUNT]
Describes exactly 512 contiguous 1GB memory regions within a our singular 512GB PML4 region.
Definition State.h:116
EPT_PML2_ENTRY PML2[VMM_EPT_PML3E_COUNT][VMM_EPT_PML2E_COUNT]
For each 1GB PML3 entry, create 512 2MB entries to map identity. NOTE: We are using 2MB pages as the ...
Definition State.h:124

◆ EptBuildMtrrMap()

BOOLEAN EptBuildMtrrMap ( VOID )

Build MTRR Map.

Returns
BOOLEAN

Build MTRR Map.

Returns
BOOLEAN
157{
158 IA32_MTRR_CAPABILITIES_REGISTER MTRRCap;
159 IA32_MTRR_PHYSBASE_REGISTER CurrentPhysBase;
160 IA32_MTRR_PHYSMASK_REGISTER CurrentPhysMask;
161 IA32_MTRR_DEF_TYPE_REGISTER MTRRDefType;
162 PMTRR_RANGE_DESCRIPTOR Descriptor;
163 UINT32 CurrentRegister;
164 UINT32 NumberOfBitsInMask;
165
166 MTRRCap.AsUInt = __readmsr(IA32_MTRR_CAPABILITIES);
167 MTRRDefType.AsUInt = __readmsr(IA32_MTRR_DEF_TYPE);
168
169 //
170 // All MTRRs are disabled when clear, and the
171 // UC memory type is applied to all of physical memory.
172 //
173 if (!MTRRDefType.MtrrEnable)
174 {
175 g_EptState->DefaultMemoryType = MEMORY_TYPE_UNCACHEABLE;
176 return TRUE;
177 }
178
179 //
180 // The IA32_MTRR_DEF_TYPE MSR (named MTRRdefType MSR for the P6 family processors) sets the default
181 // properties of the regions of physical memory that are not encompassed by MTRRs
182 //
183 g_EptState->DefaultMemoryType = (UINT8)MTRRDefType.DefaultMemoryType;
184
185 //
186 // The fixed memory ranges are mapped with 11 fixed-range registers of 64 bits each. Each of these registers is
187 // divided into 8-bit fields that are used to specify the memory type for each of the sub-ranges the register controls:
188 // - Register IA32_MTRR_FIX64K_00000 - Maps the 512-KByte address range from 0H to 7FFFFH. This range
189 // is divided into eight 64-KByte sub-ranges.
190 //
191 // - Registers IA32_MTRR_FIX16K_80000 and IA32_MTRR_FIX16K_A0000 - Maps the two 128-KByte
192 // address ranges from 80000H to BFFFFH. This range is divided into sixteen 16-KByte sub-ranges, 8 ranges per
193 // register.
194 //
195 // - Registers IA32_MTRR_FIX4K_C0000 through IA32_MTRR_FIX4K_F8000 - Maps eight 32-KByte
196 // address ranges from C0000H to FFFFFH. This range is divided into sixty-four 4-KByte sub-ranges, 8 ranges per
197 // register.
198 //
199 if (MTRRCap.FixedRangeSupported && MTRRDefType.FixedRangeMtrrEnable)
200 {
201 const UINT32 K64Base = 0x0;
202 const UINT32 K64Size = 0x10000;
203 IA32_MTRR_FIXED_RANGE_TYPE K64Types = {__readmsr(IA32_MTRR_FIX64K_00000)};
204 for (unsigned int i = 0; i < 8; i++)
205 {
207 Descriptor->MemoryType = K64Types.s.Types[i];
208 Descriptor->PhysicalBaseAddress = K64Base + (K64Size * i);
209 Descriptor->PhysicalEndAddress = K64Base + (K64Size * i) + (K64Size - 1);
210 Descriptor->FixedRange = TRUE;
211 }
212
213 const UINT32 K16Base = 0x80000;
214 const UINT32 K16Size = 0x4000;
215 for (unsigned int i = 0; i < 2; i++)
216 {
217 IA32_MTRR_FIXED_RANGE_TYPE K16Types = {__readmsr(IA32_MTRR_FIX16K_80000 + i)};
218 for (unsigned int j = 0; j < 8; j++)
219 {
221 Descriptor->MemoryType = K16Types.s.Types[j];
222 Descriptor->PhysicalBaseAddress = (K16Base + (i * K16Size * 8)) + (K16Size * j);
223 Descriptor->PhysicalEndAddress = (K16Base + (i * K16Size * 8)) + (K16Size * j) + (K16Size - 1);
224 Descriptor->FixedRange = TRUE;
225 }
226 }
227
228 const UINT32 K4Base = 0xC0000;
229 const UINT32 K4Size = 0x1000;
230 for (unsigned int i = 0; i < 8; i++)
231 {
232 IA32_MTRR_FIXED_RANGE_TYPE K4Types = {__readmsr(IA32_MTRR_FIX4K_C0000 + i)};
233
234 for (unsigned int j = 0; j < 8; j++)
235 {
237 Descriptor->MemoryType = K4Types.s.Types[j];
238 Descriptor->PhysicalBaseAddress = (K4Base + (i * K4Size * 8)) + (K4Size * j);
239 Descriptor->PhysicalEndAddress = (K4Base + (i * K4Size * 8)) + (K4Size * j) + (K4Size - 1);
240 Descriptor->FixedRange = TRUE;
241 }
242 }
243 }
244
245 for (CurrentRegister = 0; CurrentRegister < MTRRCap.VariableRangeCount; CurrentRegister++)
246 {
247 //
248 // For each dynamic register pair
249 //
250 CurrentPhysBase.AsUInt = __readmsr(IA32_MTRR_PHYSBASE0 + (CurrentRegister * 2));
251 CurrentPhysMask.AsUInt = __readmsr(IA32_MTRR_PHYSMASK0 + (CurrentRegister * 2));
252
253 //
254 // Is the range enabled?
255 //
256 if (CurrentPhysMask.Valid)
257 {
258 //
259 // We only need to read these once because the ISA dictates that MTRRs are
260 // to be synchronized between all processors during BIOS initialization.
261 //
263
264 //
265 // Calculate the base address in bytes
266 //
267 Descriptor->PhysicalBaseAddress = CurrentPhysBase.PageFrameNumber * PAGE_SIZE;
268
269 //
270 // Calculate the total size of the range
271 // The lowest bit of the mask that is set to 1 specifies the size of the range
272 //
273 _BitScanForward64((ULONG *)&NumberOfBitsInMask, CurrentPhysMask.PageFrameNumber * PAGE_SIZE);
274
275 //
276 // Size of the range in bytes + Base Address
277 //
278 Descriptor->PhysicalEndAddress = Descriptor->PhysicalBaseAddress + ((1ULL << NumberOfBitsInMask) - 1ULL);
279
280 //
281 // Memory Type (cacheability attributes)
282 //
283 Descriptor->MemoryType = (UCHAR)CurrentPhysBase.Type;
284
285 Descriptor->FixedRange = FALSE;
286
287 LogDebugInfo("MTRR Range: Base=0x%llx End=0x%llx Type=0x%x", Descriptor->PhysicalBaseAddress, Descriptor->PhysicalEndAddress, Descriptor->MemoryType);
288 }
289 }
290
291 LogDebugInfo("Total MTRR ranges committed: 0x%x", g_EptState->NumberOfEnabledMemoryRanges);
292
293 return TRUE;
294}
unsigned char UCHAR
Definition BasicTypes.h:35
#define TRUE
Definition BasicTypes.h:55
#define FALSE
Definition BasicTypes.h:54
unsigned char UINT8
Definition BasicTypes.h:46
unsigned int UINT32
Definition BasicTypes.h:48
unsigned long ULONG
Definition BasicTypes.h:37
EPT_STATE * g_EptState
Save the state and variables related to EPT.
Definition GlobalVariables.h:50
#define LogDebugInfo(format,...)
Log, initialize boot information and debug information.
Definition HyperDbgHyperLogIntrinsics.h:155
UINT32 NumberOfEnabledMemoryRanges
Definition Ept.h:120
UINT8 DefaultMemoryType
Definition Ept.h:127
MTRR_RANGE_DESCRIPTOR MemoryRanges[NUM_MTRR_ENTRIES]
Definition Ept.h:119
MTRR Descriptor.
Definition Ept.h:72
UCHAR MemoryType
Definition Ept.h:75
BOOLEAN FixedRange
Definition Ept.h:76
SIZE_T PhysicalBaseAddress
Definition Ept.h:73
SIZE_T PhysicalEndAddress
Definition Ept.h:74
Fixed range MTRR.
Definition Ept.h:84
UINT8 Types[8]
Definition Ept.h:88
struct _IA32_MTRR_FIXED_RANGE_TYPE::@5 s

◆ EptCheckAndHandleBreakpoint()

BOOLEAN EptCheckAndHandleBreakpoint ( VIRTUAL_MACHINE_STATE * VCpu)

Check if the breakpoint vm-exit relates to EPT hook or not.

Parameters
VCpuThe virtual processor's state
Returns
BOOLEAN
1212{
1213 UINT64 GuestRip = 0;
1214 BOOLEAN IsHandledByEptHook;
1215
1216 //
1217 // Reading guest's RIP
1218 //
1219 __vmx_vmread(VMCS_GUEST_RIP, &GuestRip);
1220
1221 //
1222 // Don't increment rip by default
1223 //
1225
1226 //
1227 // Check if it relates to !epthook or not
1228 //
1229 IsHandledByEptHook = EptCheckAndHandleEptHookBreakpoints(VCpu, GuestRip);
1230
1231 return IsHandledByEptHook;
1232}
UCHAR BOOLEAN
Definition BasicTypes.h:39
unsigned __int64 UINT64
Definition BasicTypes.h:21
BOOLEAN EptCheckAndHandleEptHookBreakpoints(VIRTUAL_MACHINE_STATE *VCpu, UINT64 GuestRip)
Perform checking and handling if the breakpoint vm-exit relates to EPT hook or not.
Definition Ept.c:1111
VOID HvSuppressRipIncrement(VIRTUAL_MACHINE_STATE *VCpu)
Suppress the incrementation of RIP.
Definition Hv.c:324

◆ EptCheckFeatures()

BOOLEAN EptCheckFeatures ( VOID )

Check for EPT Features.

Returns
BOOLEAN

Check for EPT Features.

Returns
BOOLEAN Shows whether EPT is supported in this machine or not
23{
24 IA32_VMX_EPT_VPID_CAP_REGISTER VpidRegister;
25 IA32_MTRR_DEF_TYPE_REGISTER MTRRDefType;
26
27 VpidRegister.AsUInt = __readmsr(IA32_VMX_EPT_VPID_CAP);
28 MTRRDefType.AsUInt = __readmsr(IA32_MTRR_DEF_TYPE);
29
30 if (!VpidRegister.PageWalkLength4 || !VpidRegister.MemoryTypeWriteBack || !VpidRegister.Pde2MbPages)
31 {
32 return FALSE;
33 }
34
35 if (!VpidRegister.AdvancedVmexitEptViolationsInformation)
36 {
37 LogDebugInfo("The processor doesn't report advanced VM-exit information for EPT violations");
38 }
39
40 if (!VpidRegister.ExecuteOnlyPages)
41 {
43 LogDebugInfo("The processor doesn't support execute-only pages, execute hooks won't work as they're on this feature in our design");
44 }
45 else
46 {
48 }
49
50 if (!MTRRDefType.MtrrEnable)
51 {
52 LogError("Err, MTRR dynamic ranges are not supported");
53 return FALSE;
54 }
55
56 LogDebugInfo("All EPT features are present");
57
58 return TRUE;
59}
COMPATIBILITY_CHECKS_STATUS g_CompatibilityCheck
Different attributes and compatibility checks of the current processor.
Definition GlobalVariables.h:26
BOOLEAN ExecuteOnlySupport
Definition CompatibilityChecks.h:29

◆ EptGetPml1Entry()

PEPT_PML1_ENTRY EptGetPml1Entry ( PVMM_EPT_PAGE_TABLE EptPageTable,
SIZE_T PhysicalAddress )

Get the PML1 Entry of a special address.

Parameters
EptPageTable
PhysicalAddress
Returns
PEPT_PML1_ENTRY

Get the PML1 Entry of a special address.

Parameters
EptPageTableThe EPT Page Table
PhysicalAddressPhysical address that we want to get its PML1
Returns
PEPT_PML1_ENTRY Return NULL if the address is invalid or the page wasn't already split
305{
306 SIZE_T Directory, DirectoryPointer, PML4Entry;
307 PEPT_PML2_ENTRY PML2;
308 PEPT_PML1_ENTRY PML1;
309 PEPT_PML2_POINTER PML2Pointer;
310
311 Directory = ADDRMASK_EPT_PML2_INDEX(PhysicalAddress);
312 DirectoryPointer = ADDRMASK_EPT_PML3_INDEX(PhysicalAddress);
313 PML4Entry = ADDRMASK_EPT_PML4_INDEX(PhysicalAddress);
314
315 //
316 // Addresses above 512GB are invalid because it is > physical address bus width
317 //
318 if (PML4Entry > 0)
319 {
320 return NULL;
321 }
322
323 PML2 = &EptPageTable->PML2[DirectoryPointer][Directory];
324
325 //
326 // Check to ensure the page is split
327 //
328 if (PML2->LargePage)
329 {
330 return NULL;
331 }
332
333 //
334 // Conversion to get the right PageFrameNumber.
335 // These pointers occupy the same place in the table and are directly convertible.
336 //
337 PML2Pointer = (PEPT_PML2_POINTER)PML2;
338
339 //
340 // If it is, translate to the PML1 pointer
341 //
342 PML1 = (PEPT_PML1_ENTRY)PhysicalAddressToVirtualAddress(PML2Pointer->PageFrameNumber * PAGE_SIZE);
343
344 if (!PML1)
345 {
346 return NULL;
347 }
348
349 //
350 // Index into PML1 for that address
351 //
352 PML1 = &PML1[ADDRMASK_EPT_PML1_INDEX(PhysicalAddress)];
353
354 return PML1;
355}
_Use_decl_annotations_ UINT64 PhysicalAddressToVirtualAddress(UINT64 PhysicalAddress)
Converts Physical Address to Virtual Address.
Definition Conversion.c:22
#define ADDRMASK_EPT_PML2_INDEX(_VAR_)
Index of the 2nd paging structure (2MB)
Definition Ept.h:49
#define ADDRMASK_EPT_PML4_INDEX(_VAR_)
Index of the 4th paging structure (512GB)
Definition Ept.h:61
#define ADDRMASK_EPT_PML1_INDEX(_VAR_)
Index of the 1st paging structure (4096 byte)
Definition Ept.h:43
#define ADDRMASK_EPT_PML3_INDEX(_VAR_)
Index of the 3rd paging structure (1GB)
Definition Ept.h:55
EPT_PTE * PEPT_PML1_ENTRY
Definition State.h:22
EPT_PDE_2MB * PEPT_PML2_ENTRY
Definition State.h:20
EPT_PDE * PEPT_PML2_POINTER
Definition State.h:21

◆ EptGetPml1OrPml2Entry()

PVOID EptGetPml1OrPml2Entry ( PVMM_EPT_PAGE_TABLE EptPageTable,
SIZE_T PhysicalAddress,
BOOLEAN * IsLargePage )

Get the PML1 entry for this physical address if the large page is available then large page of Pml2 is returned.

Parameters
EptPageTableThe EPT Page Table
PhysicalAddressPhysical address that we want to get its PML1
IsLargePageShows whether it's a large page or not
Returns
PEPT_PML1_ENTRY Return PEPT_PML1_ENTRY or PEPT_PML2_ENTRY
Parameters
EptPageTableThe EPT Page Table
PhysicalAddressPhysical address that we want to get its PML1
IsLargePageShows whether it's a large page or not
Returns
PVOID Return PEPT_PML1_ENTRY or PEPT_PML2_ENTRY
369{
370 SIZE_T Directory, DirectoryPointer, PML4Entry;
371 PEPT_PML2_ENTRY PML2;
372 PEPT_PML1_ENTRY PML1;
373 PEPT_PML2_POINTER PML2Pointer;
374
375 Directory = ADDRMASK_EPT_PML2_INDEX(PhysicalAddress);
376 DirectoryPointer = ADDRMASK_EPT_PML3_INDEX(PhysicalAddress);
377 PML4Entry = ADDRMASK_EPT_PML4_INDEX(PhysicalAddress);
378
379 //
380 // Addresses above 512GB are invalid because it is > physical address bus width
381 //
382 if (PML4Entry > 0)
383 {
384 return NULL;
385 }
386
387 PML2 = &EptPageTable->PML2[DirectoryPointer][Directory];
388
389 //
390 // Check to ensure the page is split
391 //
392 if (PML2->LargePage)
393 {
394 *IsLargePage = TRUE;
395 return PML2;
396 }
397
398 //
399 // Conversion to get the right PageFrameNumber.
400 // These pointers occupy the same place in the table and are directly convertible.
401 //
402 PML2Pointer = (PEPT_PML2_POINTER)PML2;
403
404 //
405 // If it is, translate to the PML1 pointer
406 //
407 PML1 = (PEPT_PML1_ENTRY)PhysicalAddressToVirtualAddress(PML2Pointer->PageFrameNumber * PAGE_SIZE);
408
409 if (!PML1)
410 {
411 return NULL;
412 }
413
414 //
415 // Index into PML1 for that address
416 //
417 PML1 = &PML1[ADDRMASK_EPT_PML1_INDEX(PhysicalAddress)];
418
419 *IsLargePage = FALSE;
420 return PML1;
421}

◆ EptGetPml2Entry()

PEPT_PML2_ENTRY EptGetPml2Entry ( PVMM_EPT_PAGE_TABLE EptPageTable,
SIZE_T PhysicalAddress )

Split 2MB (LargePage) into 4kb pages.

Parameters
EptPageTableThe EPT Page Table
PreAllocatedBufferThe address of pre-allocated buffer
PhysicalAddressPhysical address of where we want to split
Returns
BOOLEAN Returns true if it was successful or false if there was an error

Split 2MB (LargePage) into 4kb pages.

Parameters
EptPageTableThe EPT Page Table
PhysicalAddressPhysical Address that we want to get its PML2
Returns
PEPT_PML2_ENTRY The PML2 Entry Structure
432{
433 SIZE_T Directory, DirectoryPointer, PML4Entry;
434 PEPT_PML2_ENTRY PML2;
435
436 Directory = ADDRMASK_EPT_PML2_INDEX(PhysicalAddress);
437 DirectoryPointer = ADDRMASK_EPT_PML3_INDEX(PhysicalAddress);
438 PML4Entry = ADDRMASK_EPT_PML4_INDEX(PhysicalAddress);
439
440 //
441 // Addresses above 512GB are invalid because it is > physical address bus width
442 //
443 if (PML4Entry > 0)
444 {
445 return NULL;
446 }
447
448 PML2 = &EptPageTable->PML2[DirectoryPointer][Directory];
449 return PML2;
450}

◆ EptHandleEptViolation()

BOOLEAN EptHandleEptViolation ( VIRTUAL_MACHINE_STATE * VCpu)

Handle EPT Violation.

Parameters
VCpuThe virtual processor's state
Returns
BOOLEAN

Handle EPT Violation.

Violations are thrown whenever an operation is performed on an EPT entry that does not provide permissions to access that page

Parameters
VCpuThe virtual processor's state
Returns
BOOLEAN Return true if the violation was handled by the page hook handler and false if it was not handled
1003{
1004 UINT64 GuestPhysicalAddr;
1005 VMX_EXIT_QUALIFICATION_EPT_VIOLATION ViolationQualification = {.AsUInt = VCpu->ExitQualification};
1006
1007 //
1008 // Reading guest physical address
1009 //
1010 __vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS, &GuestPhysicalAddr);
1011
1012 if (ExecTrapHandleEptViolationVmexit(VCpu, &ViolationQualification))
1013 {
1014 return TRUE;
1015 }
1016 else if (EptHandlePageHookExit(VCpu, ViolationQualification, GuestPhysicalAddr))
1017 {
1018 //
1019 // Handled by page hook code
1020 //
1021 return TRUE;
1022 }
1023 else if (VmmCallbackUnhandledEptViolation(VCpu->CoreId, (UINT64)ViolationQualification.AsUInt, GuestPhysicalAddr))
1024 {
1025 //
1026 // Check whether this violation is meaningful for the application or not
1027 //
1028 return TRUE;
1029 }
1030
1031 LogError("Err, unexpected EPT violation at RIP: %llx", VCpu->LastVmexitRip);
1032 DbgBreakPoint();
1033 //
1034 // Redo the instruction that caused the exception
1035 //
1036 return FALSE;
1037}
BOOLEAN VmmCallbackUnhandledEptViolation(UINT32 CoreId, UINT64 ViolationQualification, UINT64 GuestPhysicalAddr)
routine callback to handle unhandled EPT violations
Definition Callback.c:316
_Use_decl_annotations_ BOOLEAN EptHandlePageHookExit(VIRTUAL_MACHINE_STATE *VCpu, VMX_EXIT_QUALIFICATION_EPT_VIOLATION ViolationQualification, UINT64 GuestPhysicalAddr)
Check if this exit is due to a violation caused by a currently hooked page.
Definition Ept.c:844
BOOLEAN ExecTrapHandleEptViolationVmexit(VIRTUAL_MACHINE_STATE *VCpu, VMX_EXIT_QUALIFICATION_EPT_VIOLATION *ViolationQualification)
Handle EPT Violations related to the MBEC hooks.
Definition ExecTrap.c:779
UINT32 ExitQualification
Definition State.h:308
UINT32 CoreId
Definition State.h:306
UINT64 LastVmexitRip
Definition State.h:309

◆ EptHandleMisconfiguration()

VOID EptHandleMisconfiguration ( VOID )

Handle Ept Misconfigurations.

Returns
VOID

Handle Ept Misconfigurations.

Parameters
GuestAddress
Returns
VOID
1047{
1048 UINT64 GuestPhysicalAddr = 0;
1049
1050 __vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS, &GuestPhysicalAddr);
1051
1052 LogInfo("EPT Misconfiguration!");
1053
1054 LogError("Err, a field in the EPT paging structure was invalid, faulting guest address : 0x%llx",
1055 GuestPhysicalAddr);
1056
1057 //
1058 // We can't continue now.
1059 // EPT misconfiguration is a fatal exception that will probably crash the OS if we don't get out now
1060 //
1061}
#define LogInfo(format,...)
Define log variables.
Definition HyperDbgHyperLogIntrinsics.h:71

◆ EptHandlePageHookExit()

BOOLEAN EptHandlePageHookExit ( _Inout_ VIRTUAL_MACHINE_STATE * VCpu,
_In_ VMX_EXIT_QUALIFICATION_EPT_VIOLATION ViolationQualification,
_In_ UINT64 GuestPhysicalAddr )

◆ EptLogicalProcessorInitialize()

BOOLEAN EptLogicalProcessorInitialize ( VOID )

Initialize EPT Table based on Processor Index.

Returns
BOOLEAN

Initialize EPT Table based on Processor Index.

Creates an identity mapped page table and sets up an EPTP to be applied to the VMCS later

Returns
BOOLEAN
760{
761 ULONG ProcessorsCount;
762 PVMM_EPT_PAGE_TABLE PageTable;
763 EPT_POINTER EPTP = {0};
764
765 //
766 // Get number of processors
767 //
768 ProcessorsCount = KeQueryActiveProcessorCount(0);
769
770 for (size_t i = 0; i < ProcessorsCount; i++)
771 {
772 //
773 // Allocate the identity mapped page table
774 //
776
777 if (!PageTable)
778 {
779 //
780 // Try to deallocate previous pools (if any)
781 //
782 for (size_t j = 0; j < ProcessorsCount; j++)
783 {
784 if (g_GuestState[j].EptPageTable != NULL)
785 {
786 MmFreeContiguousMemory(g_GuestState[j].EptPageTable);
788 }
789 }
790
791 LogError("Err, unable to allocate memory for EPT");
792 return FALSE;
793 }
794
795 //
796 // Virtual address to the page table to keep track of it for later freeing
797 //
798 g_GuestState[i].EptPageTable = PageTable;
799
800 //
801 // Use default memory type
802 //
803 EPTP.MemoryType = g_EptState->DefaultMemoryType;
804
805 //
806 // We might utilize the 'access' and 'dirty' flag features in the dirty logging mechanism
807 //
808 EPTP.EnableAccessAndDirtyFlags = TRUE;
809
810 //
811 // Bits 5:3 (1 less than the EPT page-walk length) must be 3, indicating an EPT page-walk length of 4;
812 // see Section 28.2.2
813 //
814 EPTP.PageWalkLength = 3;
815
816 //
817 // The physical page number of the page table we will be using
818 //
819 EPTP.PageFrameNumber = (SIZE_T)VirtualAddressToPhysicalAddress(&PageTable->PML4) / PAGE_SIZE;
820
821 //
822 // We will write the EPTP to the VMCS later
823 //
824 g_GuestState[i].EptPointer = EPTP;
825 }
826
827 return TRUE;
828}
PVMM_EPT_PAGE_TABLE EptAllocateAndCreateIdentityPageTable(VOID)
Allocates page maps and create identity page table.
Definition Ept.c:642
VIRTUAL_MACHINE_STATE * g_GuestState
Save the state and variables related to virtualization on each to logical core.
Definition GlobalVariables.h:38
PVMM_EPT_PAGE_TABLE EptPageTable
Definition State.h:342
EPT_POINTER EptPointer
Definition State.h:341

◆ EptSetPML1AndInvalidateTLB()

VOID EptSetPML1AndInvalidateTLB ( _Inout_ VIRTUAL_MACHINE_STATE * VCpu,
_Out_ PEPT_PML1_ENTRY EntryAddress,
_In_ EPT_PML1_ENTRY EntryValue,
_In_ _Strict_type_match_ INVEPT_TYPE InvalidationType )

This function set the specific PML1 entry in a spinlock protected area then invalidate the TLB , this function should be called from vmx root-mode.

Parameters
EntryAddress
EntryValue
InvalidationType
Returns
VOID

◆ EptSetupPML2Entry()

BOOLEAN EptSetupPML2Entry ( PVMM_EPT_PAGE_TABLE EptPageTable,
PEPT_PML2_ENTRY NewEntry,
SIZE_T PageFrameNumber )

Set up PML2 Entries.

Parameters
EptPageTable
NewEntryThe PML2 Entry
PageFrameNumberPFN (Physical Address)
Returns
VOID
604{
605 PVOID TargetBuffer;
606
607 //
608 // Each of the 512 collections of 512 PML2 entries is setup here
609 // This will, in total, identity map every physical address from 0x0
610 // to physical address 0x8000000000 (512GB of memory)
611 // ((EntryGroupIndex * VMM_EPT_PML2E_COUNT) + EntryIndex) * 2MB is
612 // the actual physical address we're mapping
613 //
614 NewEntry->PageFrameNumber = PageFrameNumber;
615
616 if (EptIsValidForLargePage(PageFrameNumber))
617 {
618 NewEntry->MemoryType = EptGetMemoryType(PageFrameNumber, TRUE);
619
620 return TRUE;
621 }
622 else
623 {
624 TargetBuffer = (PVOID)PlatformMemAllocateNonPagedPool(sizeof(VMM_EPT_DYNAMIC_SPLIT));
625
626 if (!TargetBuffer)
627 {
628 LogError("Err, cannot allocate page for splitting edge large pages");
629 return FALSE;
630 }
631
632 return EptSplitLargePage(EptPageTable, TargetBuffer, PageFrameNumber * SIZE_2_MB);
633 }
634}
BOOLEAN EptSplitLargePage(PVMM_EPT_PAGE_TABLE EptPageTable, PVOID PreAllocatedBuffer, SIZE_T PhysicalAddress)
Split 2MB (LargePage) into 4kb pages.
Definition Ept.c:462
BOOLEAN EptIsValidForLargePage(SIZE_T PageFrameNumber)
Check if potential large page doesn't land on two or more different cache memory types.
Definition Ept.c:571
UINT8 EptGetMemoryType(SIZE_T PageFrameNumber, BOOLEAN IsLargePage)
Check whether EPT features are present or not.
Definition Ept.c:69
#define SIZE_2_MB
Integer 2MB.
Definition Ept.h:31
PVOID PlatformMemAllocateNonPagedPool(SIZE_T NumberOfBytes)
Allocate a non-paged buffer.
Definition Mem.c:41
Split 2MB granularity to 4 KB granularity.
Definition Ept.h:135

◆ EptSplitLargePage()

BOOLEAN EptSplitLargePage ( PVMM_EPT_PAGE_TABLE EptPageTable,
PVOID PreAllocatedBuffer,
SIZE_T PhysicalAddress )

Convert 2MB pages to 4KB pages.

Parameters
EptPageTable
PreAllocatedBuffer
PhysicalAddress
Returns
BOOLEAN

Convert 2MB pages to 4KB pages.

Parameters
EptPageTableThe EPT Page Table
PreAllocatedBufferThe address of pre-allocated buffer
PhysicalAddressPhysical address of where we want to split
Returns
BOOLEAN Returns true if it was successful or false if there was an error
465{
466 PVMM_EPT_DYNAMIC_SPLIT NewSplit;
467 EPT_PML1_ENTRY EntryTemplate;
468 SIZE_T EntryIndex;
469 PEPT_PML2_ENTRY TargetEntry;
470 EPT_PML2_POINTER NewPointer;
471
472 //
473 // Find the PML2 entry that's currently used
474 //
475 TargetEntry = EptGetPml2Entry(EptPageTable, PhysicalAddress);
476
477 if (!TargetEntry)
478 {
479 LogError("Err, an invalid physical address passed");
480 return FALSE;
481 }
482
483 //
484 // If this large page is not marked a large page, that means it's a pointer already.
485 // That page is therefore already split.
486 //
487 if (!TargetEntry->LargePage)
488 {
489 //
490 // As it's a large page and we request a pool for it, we need to
491 // free the pool because it's not used anymore
492 //
493 PoolManagerFreePool((UINT64)PreAllocatedBuffer);
494
495 return TRUE;
496 }
497
498 //
499 // Allocate the PML1 entries
500 //
501 NewSplit = (PVMM_EPT_DYNAMIC_SPLIT)PreAllocatedBuffer;
502 if (!NewSplit)
503 {
504 LogError("Err, failed to allocate dynamic split memory");
505 return FALSE;
506 }
507 RtlZeroMemory(NewSplit, sizeof(VMM_EPT_DYNAMIC_SPLIT));
508
509 //
510 // Point back to the entry in the dynamic split for easy reference for which entry that
511 // dynamic split is for
512 //
513 NewSplit->u.Entry = TargetEntry;
514
515 //
516 // Make a template for RWX
517 //
518 EntryTemplate.AsUInt = 0;
519 EntryTemplate.ReadAccess = 1;
520 EntryTemplate.WriteAccess = 1;
521 EntryTemplate.ExecuteAccess = 1;
522
523 //
524 // copy other bits from target entry
525 //
526 EntryTemplate.MemoryType = TargetEntry->MemoryType;
527 EntryTemplate.IgnorePat = TargetEntry->IgnorePat;
528 EntryTemplate.SuppressVe = TargetEntry->SuppressVe;
529
530 //
531 // Copy the template into all the PML1 entries
532 //
533 __stosq((SIZE_T *)&NewSplit->PML1[0], EntryTemplate.AsUInt, VMM_EPT_PML1E_COUNT);
534
535 //
536 // Set the page frame numbers for identity mapping
537 //
538 for (EntryIndex = 0; EntryIndex < VMM_EPT_PML1E_COUNT; EntryIndex++)
539 {
540 //
541 // Convert the 2MB page frame number to the 4096 page entry number plus the offset into the frame
542 //
543 NewSplit->PML1[EntryIndex].PageFrameNumber = ((TargetEntry->PageFrameNumber * SIZE_2_MB) / PAGE_SIZE) + EntryIndex;
544 NewSplit->PML1[EntryIndex].MemoryType = EptGetMemoryType(NewSplit->PML1[EntryIndex].PageFrameNumber, FALSE);
545 }
546
547 //
548 // Allocate a new pointer which will replace the 2MB entry with a pointer to 512 4096 byte entries
549 //
550 NewPointer.AsUInt = 0;
551 NewPointer.WriteAccess = 1;
552 NewPointer.ReadAccess = 1;
553 NewPointer.ExecuteAccess = 1;
554 NewPointer.PageFrameNumber = (SIZE_T)VirtualAddressToPhysicalAddress(&NewSplit->PML1[0]) / PAGE_SIZE;
555
556 //
557 // Now, replace the entry in the page table with our new split pointer
558 //
559 RtlCopyMemory(TargetEntry, &NewPointer, sizeof(NewPointer));
560
561 return TRUE;
562}
PEPT_PML2_ENTRY EptGetPml2Entry(PVMM_EPT_PAGE_TABLE EptPageTable, SIZE_T PhysicalAddress)
Get the PML2 entry for this physical address.
Definition Ept.c:431
struct _VMM_EPT_DYNAMIC_SPLIT * PVMM_EPT_DYNAMIC_SPLIT
BOOLEAN PoolManagerFreePool(UINT64 AddressToFree)
This function set a pool flag to be freed, and it will be freed on the next IOCTL when it's safe to r...
Definition PoolManager.c:136
EPT_PTE EPT_PML1_ENTRY
Definition State.h:22
EPT_PDE EPT_PML2_POINTER
Definition State.h:21
#define VMM_EPT_PML1E_COUNT
Then number of 4096 byte Page Table entries in the page table per 2MB PML2 entry when dynamically spl...
Definition State.h:98
EPT_PML1_ENTRY PML1[VMM_EPT_PML1E_COUNT]
The 4096 byte page table entries that correspond to the split 2MB table entry.
Definition Ept.h:141
PEPT_PML2_ENTRY Entry
Definition Ept.h:149
union _VMM_EPT_DYNAMIC_SPLIT::@6 u
The pointer to the 2MB entry in the page table which this split is servicing.