HyperDbg Debugger
Loading...
Searching...
No Matches
Ept.c File Reference

The implementation of functions relating to the Extended Page Table (a.k.a. EPT) More...

#include "pch.h"

Functions

BOOLEAN EptCheckFeatures (VOID)
 Check whether EPT features are present or not.
 
UINT8 EptGetMemoryType (SIZE_T PageFrameNumber, BOOLEAN IsLargePage)
 Check whether EPT features are present or not.
 
BOOLEAN EptBuildMtrrMap (VOID)
 Build MTRR Map of current physical addresses.
 
PEPT_PML1_ENTRY EptGetPml1Entry (PVMM_EPT_PAGE_TABLE EptPageTable, SIZE_T PhysicalAddress)
 Get the PML1 entry for this physical address if the page is split.
 
PVOID EptGetPml1OrPml2Entry (PVMM_EPT_PAGE_TABLE EptPageTable, SIZE_T PhysicalAddress, BOOLEAN *IsLargePage)
 Get the PML1 entry for this physical address if the large page is available then large page of Pml2 is returned.
 
PEPT_PML2_ENTRY EptGetPml2Entry (PVMM_EPT_PAGE_TABLE EptPageTable, SIZE_T PhysicalAddress)
 Get the PML2 entry for this physical address.
 
BOOLEAN EptSplitLargePage (PVMM_EPT_PAGE_TABLE EptPageTable, PVOID PreAllocatedBuffer, SIZE_T PhysicalAddress)
 Split 2MB (LargePage) into 4kb pages.
 
BOOLEAN EptIsValidForLargePage (SIZE_T PageFrameNumber)
 Check if potential large page doesn't land on two or more different cache memory types.
 
BOOLEAN EptSetupPML2Entry (PVMM_EPT_PAGE_TABLE EptPageTable, PEPT_PML2_ENTRY NewEntry, SIZE_T PageFrameNumber)
 Set up PML2 Entries.
 
PVMM_EPT_PAGE_TABLE EptAllocateAndCreateIdentityPageTable (VOID)
 Allocates page maps and create identity page table.
 
BOOLEAN EptLogicalProcessorInitialize (VOID)
 Initialize EPT for an individual logical processor.
 
_Use_decl_annotations_ BOOLEAN EptHandlePageHookExit (VIRTUAL_MACHINE_STATE *VCpu, VMX_EXIT_QUALIFICATION_EPT_VIOLATION ViolationQualification, UINT64 GuestPhysicalAddr)
 Check if this exit is due to a violation caused by a currently hooked page.
 
BOOLEAN EptHandleEptViolation (VIRTUAL_MACHINE_STATE *VCpu)
 Handle VM exits for EPT violations.
 
VOID EptHandleMisconfiguration (VOID)
 Handle vm-exits for EPT Misconfiguration.
 
_Use_decl_annotations_ VOID EptSetPML1AndInvalidateTLB (VIRTUAL_MACHINE_STATE *VCpu, PEPT_PML1_ENTRY EntryAddress, EPT_PML1_ENTRY EntryValue, INVEPT_TYPE InvalidationType)
 This function set the specific PML1 entry in a spinlock protected area then invalidate the TLB.
 
BOOLEAN EptCheckAndHandleEptHookBreakpoints (VIRTUAL_MACHINE_STATE *VCpu, UINT64 GuestRip)
 Perform checking and handling if the breakpoint vm-exit relates to EPT hook or not.
 
BOOLEAN EptCheckAndHandleBreakpoint (VIRTUAL_MACHINE_STATE *VCpu)
 Check if the breakpoint vm-exit relates to EPT hook or not.
 

Detailed Description

The implementation of functions relating to the Extended Page Table (a.k.a. EPT)

Author
Sina Karvandi (sina@.nosp@m.hype.nosp@m.rdbg..nosp@m.org)
Gbps
Matthijs Lavrijsen (matti.nosp@m.watt.nosp@m.i@gma.nosp@m.il.c.nosp@m.om)

Some of the codes are re-used from Gbps/gbhv (https://github.com/Gbps/gbhv)

Version
0.1
Date
2020-04-10

Function Documentation

◆ EptAllocateAndCreateIdentityPageTable()

PVMM_EPT_PAGE_TABLE EptAllocateAndCreateIdentityPageTable ( VOID )

Allocates page maps and create identity page table.

Returns
PVMM_EPT_PAGE_TABLE identity map page-table
643{
644 PVMM_EPT_PAGE_TABLE PageTable;
645 EPT_PML3_POINTER RWXTemplate;
646 EPT_PML2_ENTRY PML2EntryTemplate;
647 SIZE_T EntryGroupIndex;
648 SIZE_T EntryIndex;
649
650 //
651 // Allocate all paging structures as 4KB aligned pages
652 //
653
654 //
655 // Allocate address anywhere in the OS's memory space and
656 // zero out all entries to ensure all unused entries are marked Not Present
657 //
659
660 if (PageTable == NULL)
661 {
662 LogError("Err, failed to allocate memory for PageTable");
663 return NULL;
664 }
665
666 //
667 // Mark the first 512GB PML4 entry as present, which allows us to manage up
668 // to 512GB of discrete paging structures.
669 //
670 PageTable->PML4[0].PageFrameNumber = (SIZE_T)VirtualAddressToPhysicalAddress(&PageTable->PML3[0]) / PAGE_SIZE;
671 PageTable->PML4[0].ReadAccess = 1;
672 PageTable->PML4[0].WriteAccess = 1;
673 PageTable->PML4[0].ExecuteAccess = 1;
674
675 //
676 // Now mark each 1GB PML3 entry as RWX and map each to their PML2 entry
677 //
678
679 //
680 // Ensure stack memory is cleared
681 //
682 RWXTemplate.AsUInt = 0;
683
684 //
685 // Set up one 'template' RWX PML3 entry and copy it into each of the 512 PML3 entries
686 // Using the same method as SimpleVisor for copying each entry using intrinsics.
687 //
688 RWXTemplate.ReadAccess = 1;
689 RWXTemplate.WriteAccess = 1;
690 RWXTemplate.ExecuteAccess = 1;
691
692 //
693 // Copy the template into each of the 512 PML3 entry slots
694 //
695 __stosq((SIZE_T *)&PageTable->PML3[0], RWXTemplate.AsUInt, VMM_EPT_PML3E_COUNT);
696
697 //
698 // For each of the 512 PML3 entries
699 //
700 for (EntryIndex = 0; EntryIndex < VMM_EPT_PML3E_COUNT; EntryIndex++)
701 {
702 //
703 // Map the 1GB PML3 entry to 512 PML2 (2MB) entries to describe each large page.
704 // NOTE: We do *not* manage any PML1 (4096 byte) entries and do not allocate them.
705 //
706 PageTable->PML3[EntryIndex].PageFrameNumber = (SIZE_T)VirtualAddressToPhysicalAddress(&PageTable->PML2[EntryIndex][0]) / PAGE_SIZE;
707 }
708
709 PML2EntryTemplate.AsUInt = 0;
710
711 //
712 // All PML2 entries will be RWX and 'present'
713 //
714 PML2EntryTemplate.WriteAccess = 1;
715 PML2EntryTemplate.ReadAccess = 1;
716 PML2EntryTemplate.ExecuteAccess = 1;
717
718 //
719 // We are using 2MB large pages, so we must mark this 1 here
720 //
721 PML2EntryTemplate.LargePage = 1;
722
723 //
724 // For each collection of 512 PML2 entries (512 collections * 512 entries per collection),
725 // mark it RWX using the same template above.
726 // This marks the entries as "Present" regardless of if the actual system has memory at
727 // this region or not. We will cause a fault in our EPT handler if the guest access a page
728 // outside a usable range, despite the EPT frame being present here.
729 //
730 __stosq((SIZE_T *)&PageTable->PML2[0], PML2EntryTemplate.AsUInt, VMM_EPT_PML3E_COUNT * VMM_EPT_PML2E_COUNT);
731
732 //
733 // For each of the 512 collections of 512 2MB PML2 entries
734 //
735 for (EntryGroupIndex = 0; EntryGroupIndex < VMM_EPT_PML3E_COUNT; EntryGroupIndex++)
736 {
737 //
738 // For each 2MB PML2 entry in the collection
739 //
740 for (EntryIndex = 0; EntryIndex < VMM_EPT_PML2E_COUNT; EntryIndex++)
741 {
742 //
743 // Setup the memory type and frame number of the PML2 entry
744 //
745 EptSetupPML2Entry(PageTable, &PageTable->PML2[EntryGroupIndex][EntryIndex], (EntryGroupIndex * VMM_EPT_PML2E_COUNT) + EntryIndex);
746 }
747 }
748
749 return PageTable;
750}
_Use_decl_annotations_ UINT64 VirtualAddressToPhysicalAddress(_In_ PVOID VirtualAddress)
Converts Virtual Address to Physical Address.
Definition Conversion.c:154
BOOLEAN EptSetupPML2Entry(PVMM_EPT_PAGE_TABLE EptPageTable, PEPT_PML2_ENTRY NewEntry, SIZE_T PageFrameNumber)
Set up PML2 Entries.
Definition Ept.c:603
#define LogError(format,...)
Log in the case of error.
Definition HyperDbgHyperLogIntrinsics.h:113
PVOID PlatformMemAllocateContiguousZeroedMemory(SIZE_T NumberOfBytes)
Allocate a contiguous zeroed memory.
Definition Mem.c:22
#define VMM_EPT_PML3E_COUNT
The number of 1GB PDPT entries in the page table per 512GB PML4 entry.
Definition State.h:84
#define VMM_EPT_PML2E_COUNT
Then number of 2MB Page Directory entries in the page table per 1GB PML3 entry.
Definition State.h:91
EPT_PDPTE EPT_PML3_POINTER
Definition State.h:19
EPT_PDE_2MB EPT_PML2_ENTRY
Definition State.h:20
#define PAGE_SIZE
Size of each page (4096 bytes)
Definition common.h:69
NULL()
Definition test-case-generator.py:530
Structure for saving EPT Table.
Definition State.h:105
EPT_PML4_POINTER PML4[VMM_EPT_PML4E_COUNT]
28.2.2 Describes 512 contiguous 512GB memory regions each with 512 1GB regions.
Definition State.h:110
EPT_PML3_POINTER PML3[VMM_EPT_PML3E_COUNT]
Describes exactly 512 contiguous 1GB memory regions within a our singular 512GB PML4 region.
Definition State.h:116
EPT_PML2_ENTRY PML2[VMM_EPT_PML3E_COUNT][VMM_EPT_PML2E_COUNT]
For each 1GB PML3 entry, create 512 2MB entries to map identity. NOTE: We are using 2MB pages as the ...
Definition State.h:124

◆ EptBuildMtrrMap()

BOOLEAN EptBuildMtrrMap ( VOID )

Build MTRR Map of current physical addresses.

Build MTRR Map.

Returns
BOOLEAN
157{
158 IA32_MTRR_CAPABILITIES_REGISTER MTRRCap;
159 IA32_MTRR_PHYSBASE_REGISTER CurrentPhysBase;
160 IA32_MTRR_PHYSMASK_REGISTER CurrentPhysMask;
161 IA32_MTRR_DEF_TYPE_REGISTER MTRRDefType;
162 PMTRR_RANGE_DESCRIPTOR Descriptor;
163 UINT32 CurrentRegister;
164 UINT32 NumberOfBitsInMask;
165
166 MTRRCap.AsUInt = __readmsr(IA32_MTRR_CAPABILITIES);
167 MTRRDefType.AsUInt = __readmsr(IA32_MTRR_DEF_TYPE);
168
169 //
170 // All MTRRs are disabled when clear, and the
171 // UC memory type is applied to all of physical memory.
172 //
173 if (!MTRRDefType.MtrrEnable)
174 {
175 g_EptState->DefaultMemoryType = MEMORY_TYPE_UNCACHEABLE;
176 return TRUE;
177 }
178
179 //
180 // The IA32_MTRR_DEF_TYPE MSR (named MTRRdefType MSR for the P6 family processors) sets the default
181 // properties of the regions of physical memory that are not encompassed by MTRRs
182 //
183 g_EptState->DefaultMemoryType = (UINT8)MTRRDefType.DefaultMemoryType;
184
185 //
186 // The fixed memory ranges are mapped with 11 fixed-range registers of 64 bits each. Each of these registers is
187 // divided into 8-bit fields that are used to specify the memory type for each of the sub-ranges the register controls:
188 // - Register IA32_MTRR_FIX64K_00000 - Maps the 512-KByte address range from 0H to 7FFFFH. This range
189 // is divided into eight 64-KByte sub-ranges.
190 //
191 // - Registers IA32_MTRR_FIX16K_80000 and IA32_MTRR_FIX16K_A0000 - Maps the two 128-KByte
192 // address ranges from 80000H to BFFFFH. This range is divided into sixteen 16-KByte sub-ranges, 8 ranges per
193 // register.
194 //
195 // - Registers IA32_MTRR_FIX4K_C0000 through IA32_MTRR_FIX4K_F8000 - Maps eight 32-KByte
196 // address ranges from C0000H to FFFFFH. This range is divided into sixty-four 4-KByte sub-ranges, 8 ranges per
197 // register.
198 //
199 if (MTRRCap.FixedRangeSupported && MTRRDefType.FixedRangeMtrrEnable)
200 {
201 const UINT32 K64Base = 0x0;
202 const UINT32 K64Size = 0x10000;
203 IA32_MTRR_FIXED_RANGE_TYPE K64Types = {__readmsr(IA32_MTRR_FIX64K_00000)};
204 for (unsigned int i = 0; i < 8; i++)
205 {
207 Descriptor->MemoryType = K64Types.s.Types[i];
208 Descriptor->PhysicalBaseAddress = K64Base + (K64Size * i);
209 Descriptor->PhysicalEndAddress = K64Base + (K64Size * i) + (K64Size - 1);
210 Descriptor->FixedRange = TRUE;
211 }
212
213 const UINT32 K16Base = 0x80000;
214 const UINT32 K16Size = 0x4000;
215 for (unsigned int i = 0; i < 2; i++)
216 {
217 IA32_MTRR_FIXED_RANGE_TYPE K16Types = {__readmsr(IA32_MTRR_FIX16K_80000 + i)};
218 for (unsigned int j = 0; j < 8; j++)
219 {
221 Descriptor->MemoryType = K16Types.s.Types[j];
222 Descriptor->PhysicalBaseAddress = (K16Base + (i * K16Size * 8)) + (K16Size * j);
223 Descriptor->PhysicalEndAddress = (K16Base + (i * K16Size * 8)) + (K16Size * j) + (K16Size - 1);
224 Descriptor->FixedRange = TRUE;
225 }
226 }
227
228 const UINT32 K4Base = 0xC0000;
229 const UINT32 K4Size = 0x1000;
230 for (unsigned int i = 0; i < 8; i++)
231 {
232 IA32_MTRR_FIXED_RANGE_TYPE K4Types = {__readmsr(IA32_MTRR_FIX4K_C0000 + i)};
233
234 for (unsigned int j = 0; j < 8; j++)
235 {
237 Descriptor->MemoryType = K4Types.s.Types[j];
238 Descriptor->PhysicalBaseAddress = (K4Base + (i * K4Size * 8)) + (K4Size * j);
239 Descriptor->PhysicalEndAddress = (K4Base + (i * K4Size * 8)) + (K4Size * j) + (K4Size - 1);
240 Descriptor->FixedRange = TRUE;
241 }
242 }
243 }
244
245 for (CurrentRegister = 0; CurrentRegister < MTRRCap.VariableRangeCount; CurrentRegister++)
246 {
247 //
248 // For each dynamic register pair
249 //
250 CurrentPhysBase.AsUInt = __readmsr(IA32_MTRR_PHYSBASE0 + (CurrentRegister * 2));
251 CurrentPhysMask.AsUInt = __readmsr(IA32_MTRR_PHYSMASK0 + (CurrentRegister * 2));
252
253 //
254 // Is the range enabled?
255 //
256 if (CurrentPhysMask.Valid)
257 {
258 //
259 // We only need to read these once because the ISA dictates that MTRRs are
260 // to be synchronized between all processors during BIOS initialization.
261 //
263
264 //
265 // Calculate the base address in bytes
266 //
267 Descriptor->PhysicalBaseAddress = CurrentPhysBase.PageFrameNumber * PAGE_SIZE;
268
269 //
270 // Calculate the total size of the range
271 // The lowest bit of the mask that is set to 1 specifies the size of the range
272 //
273 _BitScanForward64((ULONG *)&NumberOfBitsInMask, CurrentPhysMask.PageFrameNumber * PAGE_SIZE);
274
275 //
276 // Size of the range in bytes + Base Address
277 //
278 Descriptor->PhysicalEndAddress = Descriptor->PhysicalBaseAddress + ((1ULL << NumberOfBitsInMask) - 1ULL);
279
280 //
281 // Memory Type (cacheability attributes)
282 //
283 Descriptor->MemoryType = (UCHAR)CurrentPhysBase.Type;
284
285 Descriptor->FixedRange = FALSE;
286
287 LogDebugInfo("MTRR Range: Base=0x%llx End=0x%llx Type=0x%x", Descriptor->PhysicalBaseAddress, Descriptor->PhysicalEndAddress, Descriptor->MemoryType);
288 }
289 }
290
291 LogDebugInfo("Total MTRR ranges committed: 0x%x", g_EptState->NumberOfEnabledMemoryRanges);
292
293 return TRUE;
294}
unsigned char UCHAR
Definition BasicTypes.h:35
#define TRUE
Definition BasicTypes.h:55
#define FALSE
Definition BasicTypes.h:54
unsigned char UINT8
Definition BasicTypes.h:46
unsigned int UINT32
Definition BasicTypes.h:48
unsigned long ULONG
Definition BasicTypes.h:37
EPT_STATE * g_EptState
Save the state and variables related to EPT.
Definition GlobalVariables.h:50
#define LogDebugInfo(format,...)
Log, initialize boot information and debug information.
Definition HyperDbgHyperLogIntrinsics.h:155
UINT32 NumberOfEnabledMemoryRanges
Definition Ept.h:120
UINT8 DefaultMemoryType
Definition Ept.h:127
MTRR_RANGE_DESCRIPTOR MemoryRanges[NUM_MTRR_ENTRIES]
Definition Ept.h:119
MTRR Descriptor.
Definition Ept.h:72
UCHAR MemoryType
Definition Ept.h:75
BOOLEAN FixedRange
Definition Ept.h:76
SIZE_T PhysicalBaseAddress
Definition Ept.h:73
SIZE_T PhysicalEndAddress
Definition Ept.h:74
Fixed range MTRR.
Definition Ept.h:84
UINT8 Types[8]
Definition Ept.h:88
struct _IA32_MTRR_FIXED_RANGE_TYPE::@5 s

◆ EptCheckAndHandleBreakpoint()

BOOLEAN EptCheckAndHandleBreakpoint ( VIRTUAL_MACHINE_STATE * VCpu)

Check if the breakpoint vm-exit relates to EPT hook or not.

Parameters
VCpuThe virtual processor's state
Returns
BOOLEAN
1212{
1213 UINT64 GuestRip = 0;
1214 BOOLEAN IsHandledByEptHook;
1215
1216 //
1217 // Reading guest's RIP
1218 //
1219 __vmx_vmread(VMCS_GUEST_RIP, &GuestRip);
1220
1221 //
1222 // Don't increment rip by default
1223 //
1225
1226 //
1227 // Check if it relates to !epthook or not
1228 //
1229 IsHandledByEptHook = EptCheckAndHandleEptHookBreakpoints(VCpu, GuestRip);
1230
1231 return IsHandledByEptHook;
1232}
UCHAR BOOLEAN
Definition BasicTypes.h:39
unsigned __int64 UINT64
Definition BasicTypes.h:21
BOOLEAN EptCheckAndHandleEptHookBreakpoints(VIRTUAL_MACHINE_STATE *VCpu, UINT64 GuestRip)
Perform checking and handling if the breakpoint vm-exit relates to EPT hook or not.
Definition Ept.c:1111
VOID HvSuppressRipIncrement(VIRTUAL_MACHINE_STATE *VCpu)
Suppress the incrementation of RIP.
Definition Hv.c:324

◆ EptCheckAndHandleEptHookBreakpoints()

BOOLEAN EptCheckAndHandleEptHookBreakpoints ( VIRTUAL_MACHINE_STATE * VCpu,
UINT64 GuestRip )

Perform checking and handling if the breakpoint vm-exit relates to EPT hook or not.

Parameters
VCpuThe virtual processor's state
GuestRip
Returns
BOOLEAN
1112{
1113 PVOID TargetPage;
1114 PLIST_ENTRY TempList;
1115 BOOLEAN IsHandledByEptHook = FALSE;
1116
1117 //
1118 // ***** Check breakpoint for !epthook *****
1119 //
1120
1121 //
1122 // Check whether the breakpoint was due to a !epthook command or not
1123 //
1124 TempList = &g_EptState->HookedPagesList;
1125
1126 while (&g_EptState->HookedPagesList != TempList->Flink)
1127 {
1128 TempList = TempList->Flink;
1129 PEPT_HOOKED_PAGE_DETAIL HookedEntry = CONTAINING_RECORD(TempList, EPT_HOOKED_PAGE_DETAIL, PageHookList);
1130
1131 if (HookedEntry->IsExecutionHook)
1132 {
1133 for (size_t i = 0; i < HookedEntry->CountOfBreakpoints; i++)
1134 {
1135 if (HookedEntry->BreakpointAddresses[i] == GuestRip)
1136 {
1137 //
1138 // We found an address that matches the details, let's trigger the event
1139 //
1140
1141 //
1142 // As the context to event trigger, we send the rip
1143 // of where triggered this event
1144 //
1145 DispatchEventHiddenHookExecCc(VCpu, (PVOID)GuestRip);
1146
1147 //
1148 // Pointer to the page entry in the page table
1149 //
1150 TargetPage = EptGetPml1Entry(VCpu->EptPageTable, HookedEntry->PhysicalBaseAddress);
1151
1152 //
1153 // Restore to its original entry for one instruction
1154 //
1156 TargetPage,
1157 HookedEntry->OriginalEntry,
1158 InveptSingleContext);
1159
1160 //
1161 // Next we have to save the current hooked entry to restore on the next instruction's vm-exit
1162 //
1163 VCpu->MtfEptHookRestorePoint = HookedEntry;
1164
1165 //
1166 // The following codes are added because we realized if the execution takes long then
1167 // the execution might be switched to another routines, thus, MTF might conclude on
1168 // another routine and we might (and will) trigger the same instruction soon
1169 //
1170 // The following code is not necessary on local debugging (VMI Mode), however, I don't
1171 // know why? just things are not reasonable here for me
1172 // another weird thing that I observed is the fact if you don't touch the routine related
1173 // to the I/O in and out instructions in VMWare then it works perfectly, just touching I/O
1174 // for serial is problematic, it might be a VMWare nested-virtualization bug, however, the
1175 // below approached proved to be work on both Debug Mode and WMI Mode
1176 // If you remove the below codes then when epthook is triggered then the execution stucks
1177 // on the same instruction on where the hooks is triggered, so 'p' and 't' commands for
1178 // steppings won't work
1179 //
1180
1181 //
1182 // We have to set Monitor trap flag and give it the HookedEntry to work with
1183 //
1185
1186 //
1187 // Indicate that we handled the ept violation
1188 //
1189 IsHandledByEptHook = TRUE;
1190
1191 //
1192 // Get out of the loop
1193 //
1194 break;
1195 }
1196 }
1197 }
1198 }
1199
1200 return IsHandledByEptHook;
1201}
VOID DispatchEventHiddenHookExecCc(VIRTUAL_MACHINE_STATE *VCpu, PVOID Context)
Handling debugger functions related to hidden hook exec CC events.
Definition Dispatch.c:960
PEPT_PML1_ENTRY EptGetPml1Entry(PVMM_EPT_PAGE_TABLE EptPageTable, SIZE_T PhysicalAddress)
Get the PML1 entry for this physical address if the page is split.
Definition Ept.c:304
_Use_decl_annotations_ VOID EptSetPML1AndInvalidateTLB(VIRTUAL_MACHINE_STATE *VCpu, PEPT_PML1_ENTRY EntryAddress, EPT_PML1_ENTRY EntryValue, INVEPT_TYPE InvalidationType)
This function set the specific PML1 entry in a spinlock protected area then invalidate the TLB.
Definition Ept.c:1075
VOID HvEnableMtfAndChangeExternalInterruptState(VIRTUAL_MACHINE_STATE *VCpu)
Enables MTF and adjust external interrupt state.
Definition Hv.c:1389
Structure to save the state of each hooked pages.
Definition State.h:163
BOOLEAN IsExecutionHook
This field shows whether the hook contains a hidden hook for execution or not.
Definition State.h:229
UINT64 BreakpointAddresses[MaximumHiddenBreakpointsOnPage]
Address of hooked pages (multiple breakpoints on a single page) this is only used in hidden breakpoin...
Definition State.h:259
EPT_PML1_ENTRY OriginalEntry
The original page entry. Will be copied back when the hook is removed from the page.
Definition State.h:214
SIZE_T PhysicalBaseAddress
The base address of the page. Used to find this structure in the list of page hooks when a hook is hi...
Definition State.h:187
UINT64 CountOfBreakpoints
Count of breakpoints (multiple breakpoints on a single page) this is only used in hidden breakpoints ...
Definition State.h:271
LIST_ENTRY HookedPagesList
Definition Ept.h:118
PVMM_EPT_PAGE_TABLE EptPageTable
Definition State.h:342
PEPT_HOOKED_PAGE_DETAIL MtfEptHookRestorePoint
Definition State.h:331

◆ EptCheckFeatures()

BOOLEAN EptCheckFeatures ( VOID )

Check whether EPT features are present or not.

Check for EPT Features.

Returns
BOOLEAN Shows whether EPT is supported in this machine or not
23{
24 IA32_VMX_EPT_VPID_CAP_REGISTER VpidRegister;
25 IA32_MTRR_DEF_TYPE_REGISTER MTRRDefType;
26
27 VpidRegister.AsUInt = __readmsr(IA32_VMX_EPT_VPID_CAP);
28 MTRRDefType.AsUInt = __readmsr(IA32_MTRR_DEF_TYPE);
29
30 if (!VpidRegister.PageWalkLength4 || !VpidRegister.MemoryTypeWriteBack || !VpidRegister.Pde2MbPages)
31 {
32 return FALSE;
33 }
34
35 if (!VpidRegister.AdvancedVmexitEptViolationsInformation)
36 {
37 LogDebugInfo("The processor doesn't report advanced VM-exit information for EPT violations");
38 }
39
40 if (!VpidRegister.ExecuteOnlyPages)
41 {
43 LogDebugInfo("The processor doesn't support execute-only pages, execute hooks won't work as they're on this feature in our design");
44 }
45 else
46 {
48 }
49
50 if (!MTRRDefType.MtrrEnable)
51 {
52 LogError("Err, MTRR dynamic ranges are not supported");
53 return FALSE;
54 }
55
56 LogDebugInfo("All EPT features are present");
57
58 return TRUE;
59}
COMPATIBILITY_CHECKS_STATUS g_CompatibilityCheck
Different attributes and compatibility checks of the current processor.
Definition GlobalVariables.h:26
BOOLEAN ExecuteOnlySupport
Definition CompatibilityChecks.h:29

◆ EptGetMemoryType()

UINT8 EptGetMemoryType ( SIZE_T PageFrameNumber,
BOOLEAN IsLargePage )

Check whether EPT features are present or not.

Parameters
PageFrameNumber
IsLargePage
Returns
UINT8 Return desired type of memory for particular small/large page
70{
71 UINT8 TargetMemoryType;
72 SIZE_T AddressOfPage;
73 SIZE_T CurrentMtrrRange;
74 MTRR_RANGE_DESCRIPTOR * CurrentMemoryRange;
75
76 AddressOfPage = IsLargePage ? PageFrameNumber * SIZE_2_MB : PageFrameNumber * PAGE_SIZE;
77
78 TargetMemoryType = (UINT8)-1;
79
80 //
81 // For each MTRR range
82 //
83 for (CurrentMtrrRange = 0; CurrentMtrrRange < g_EptState->NumberOfEnabledMemoryRanges; CurrentMtrrRange++)
84 {
85 CurrentMemoryRange = &g_EptState->MemoryRanges[CurrentMtrrRange];
86
87 //
88 // If the physical address is described by this MTRR
89 //
90 if (AddressOfPage >= CurrentMemoryRange->PhysicalBaseAddress &&
91 AddressOfPage < CurrentMemoryRange->PhysicalEndAddress)
92 {
93 // LogInfo("0x%X> Range=%llX -> %llX | Begin=%llX End=%llX", PageFrameNumber, AddressOfPage, AddressOfPage + SIZE_2_MB - 1, g_EptState->MemoryRanges[CurrentMtrrRange].PhysicalBaseAddress, g_EptState->MemoryRanges[CurrentMtrrRange].PhysicalEndAddress);
94
95 //
96 // 12.11.4.1 MTRR Precedences
97 //
98 if (CurrentMemoryRange->FixedRange)
99 {
100 //
101 // When the fixed-range MTRRs are enabled, they take priority over the variable-range
102 // MTRRs when overlaps in ranges occur.
103 //
104 TargetMemoryType = CurrentMemoryRange->MemoryType;
105 break;
106 }
107
108 if (TargetMemoryType == MEMORY_TYPE_UNCACHEABLE)
109 {
110 //
111 // If this is going to be marked uncacheable, then we stop the search as UC always
112 // takes precedence
113 //
114 TargetMemoryType = CurrentMemoryRange->MemoryType;
115 break;
116 }
117
118 if (TargetMemoryType == MEMORY_TYPE_WRITE_THROUGH || CurrentMemoryRange->MemoryType == MEMORY_TYPE_WRITE_THROUGH)
119 {
120 if (TargetMemoryType == MEMORY_TYPE_WRITE_BACK)
121 {
122 //
123 // If two or more MTRRs overlap and describe the same region, and at least one is WT and
124 // the other one(s) is/are WB, use WT. However, continue looking, as other MTRRs
125 // may still specify the address as UC, which always takes precedence
126 //
127 TargetMemoryType = MEMORY_TYPE_WRITE_THROUGH;
128 continue;
129 }
130 }
131
132 //
133 // Otherwise, just use the last MTRR that describes this address
134 //
135 TargetMemoryType = CurrentMemoryRange->MemoryType;
136 }
137 }
138
139 //
140 // If no MTRR was found, return the default memory type
141 //
142 if (TargetMemoryType == (UINT8)-1)
143 {
144 TargetMemoryType = g_EptState->DefaultMemoryType;
145 }
146
147 return TargetMemoryType;
148}
#define SIZE_2_MB
Integer 2MB.
Definition Ept.h:31

◆ EptGetPml1Entry()

PEPT_PML1_ENTRY EptGetPml1Entry ( PVMM_EPT_PAGE_TABLE EptPageTable,
SIZE_T PhysicalAddress )

Get the PML1 entry for this physical address if the page is split.

Get the PML1 Entry of a special address.

Parameters
EptPageTableThe EPT Page Table
PhysicalAddressPhysical address that we want to get its PML1
Returns
PEPT_PML1_ENTRY Return NULL if the address is invalid or the page wasn't already split
305{
306 SIZE_T Directory, DirectoryPointer, PML4Entry;
307 PEPT_PML2_ENTRY PML2;
308 PEPT_PML1_ENTRY PML1;
309 PEPT_PML2_POINTER PML2Pointer;
310
311 Directory = ADDRMASK_EPT_PML2_INDEX(PhysicalAddress);
312 DirectoryPointer = ADDRMASK_EPT_PML3_INDEX(PhysicalAddress);
313 PML4Entry = ADDRMASK_EPT_PML4_INDEX(PhysicalAddress);
314
315 //
316 // Addresses above 512GB are invalid because it is > physical address bus width
317 //
318 if (PML4Entry > 0)
319 {
320 return NULL;
321 }
322
323 PML2 = &EptPageTable->PML2[DirectoryPointer][Directory];
324
325 //
326 // Check to ensure the page is split
327 //
328 if (PML2->LargePage)
329 {
330 return NULL;
331 }
332
333 //
334 // Conversion to get the right PageFrameNumber.
335 // These pointers occupy the same place in the table and are directly convertible.
336 //
337 PML2Pointer = (PEPT_PML2_POINTER)PML2;
338
339 //
340 // If it is, translate to the PML1 pointer
341 //
342 PML1 = (PEPT_PML1_ENTRY)PhysicalAddressToVirtualAddress(PML2Pointer->PageFrameNumber * PAGE_SIZE);
343
344 if (!PML1)
345 {
346 return NULL;
347 }
348
349 //
350 // Index into PML1 for that address
351 //
352 PML1 = &PML1[ADDRMASK_EPT_PML1_INDEX(PhysicalAddress)];
353
354 return PML1;
355}
_Use_decl_annotations_ UINT64 PhysicalAddressToVirtualAddress(UINT64 PhysicalAddress)
Converts Physical Address to Virtual Address.
Definition Conversion.c:22
#define ADDRMASK_EPT_PML2_INDEX(_VAR_)
Index of the 2nd paging structure (2MB)
Definition Ept.h:49
#define ADDRMASK_EPT_PML4_INDEX(_VAR_)
Index of the 4th paging structure (512GB)
Definition Ept.h:61
#define ADDRMASK_EPT_PML1_INDEX(_VAR_)
Index of the 1st paging structure (4096 byte)
Definition Ept.h:43
#define ADDRMASK_EPT_PML3_INDEX(_VAR_)
Index of the 3rd paging structure (1GB)
Definition Ept.h:55
EPT_PTE * PEPT_PML1_ENTRY
Definition State.h:22
EPT_PDE_2MB * PEPT_PML2_ENTRY
Definition State.h:20
EPT_PDE * PEPT_PML2_POINTER
Definition State.h:21

◆ EptGetPml1OrPml2Entry()

PVOID EptGetPml1OrPml2Entry ( PVMM_EPT_PAGE_TABLE EptPageTable,
SIZE_T PhysicalAddress,
BOOLEAN * IsLargePage )

Get the PML1 entry for this physical address if the large page is available then large page of Pml2 is returned.

Parameters
EptPageTableThe EPT Page Table
PhysicalAddressPhysical address that we want to get its PML1
IsLargePageShows whether it's a large page or not
Returns
PVOID Return PEPT_PML1_ENTRY or PEPT_PML2_ENTRY
369{
370 SIZE_T Directory, DirectoryPointer, PML4Entry;
371 PEPT_PML2_ENTRY PML2;
372 PEPT_PML1_ENTRY PML1;
373 PEPT_PML2_POINTER PML2Pointer;
374
375 Directory = ADDRMASK_EPT_PML2_INDEX(PhysicalAddress);
376 DirectoryPointer = ADDRMASK_EPT_PML3_INDEX(PhysicalAddress);
377 PML4Entry = ADDRMASK_EPT_PML4_INDEX(PhysicalAddress);
378
379 //
380 // Addresses above 512GB are invalid because it is > physical address bus width
381 //
382 if (PML4Entry > 0)
383 {
384 return NULL;
385 }
386
387 PML2 = &EptPageTable->PML2[DirectoryPointer][Directory];
388
389 //
390 // Check to ensure the page is split
391 //
392 if (PML2->LargePage)
393 {
394 *IsLargePage = TRUE;
395 return PML2;
396 }
397
398 //
399 // Conversion to get the right PageFrameNumber.
400 // These pointers occupy the same place in the table and are directly convertible.
401 //
402 PML2Pointer = (PEPT_PML2_POINTER)PML2;
403
404 //
405 // If it is, translate to the PML1 pointer
406 //
407 PML1 = (PEPT_PML1_ENTRY)PhysicalAddressToVirtualAddress(PML2Pointer->PageFrameNumber * PAGE_SIZE);
408
409 if (!PML1)
410 {
411 return NULL;
412 }
413
414 //
415 // Index into PML1 for that address
416 //
417 PML1 = &PML1[ADDRMASK_EPT_PML1_INDEX(PhysicalAddress)];
418
419 *IsLargePage = FALSE;
420 return PML1;
421}

◆ EptGetPml2Entry()

PEPT_PML2_ENTRY EptGetPml2Entry ( PVMM_EPT_PAGE_TABLE EptPageTable,
SIZE_T PhysicalAddress )

Get the PML2 entry for this physical address.

Split 2MB (LargePage) into 4kb pages.

Parameters
EptPageTableThe EPT Page Table
PhysicalAddressPhysical Address that we want to get its PML2
Returns
PEPT_PML2_ENTRY The PML2 Entry Structure
432{
433 SIZE_T Directory, DirectoryPointer, PML4Entry;
434 PEPT_PML2_ENTRY PML2;
435
436 Directory = ADDRMASK_EPT_PML2_INDEX(PhysicalAddress);
437 DirectoryPointer = ADDRMASK_EPT_PML3_INDEX(PhysicalAddress);
438 PML4Entry = ADDRMASK_EPT_PML4_INDEX(PhysicalAddress);
439
440 //
441 // Addresses above 512GB are invalid because it is > physical address bus width
442 //
443 if (PML4Entry > 0)
444 {
445 return NULL;
446 }
447
448 PML2 = &EptPageTable->PML2[DirectoryPointer][Directory];
449 return PML2;
450}

◆ EptHandleEptViolation()

BOOLEAN EptHandleEptViolation ( VIRTUAL_MACHINE_STATE * VCpu)

Handle VM exits for EPT violations.

Handle EPT Violation.

Violations are thrown whenever an operation is performed on an EPT entry that does not provide permissions to access that page

Parameters
VCpuThe virtual processor's state
Returns
BOOLEAN Return true if the violation was handled by the page hook handler and false if it was not handled
1003{
1004 UINT64 GuestPhysicalAddr;
1005 VMX_EXIT_QUALIFICATION_EPT_VIOLATION ViolationQualification = {.AsUInt = VCpu->ExitQualification};
1006
1007 //
1008 // Reading guest physical address
1009 //
1010 __vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS, &GuestPhysicalAddr);
1011
1012 if (ExecTrapHandleEptViolationVmexit(VCpu, &ViolationQualification))
1013 {
1014 return TRUE;
1015 }
1016 else if (EptHandlePageHookExit(VCpu, ViolationQualification, GuestPhysicalAddr))
1017 {
1018 //
1019 // Handled by page hook code
1020 //
1021 return TRUE;
1022 }
1023 else if (VmmCallbackUnhandledEptViolation(VCpu->CoreId, (UINT64)ViolationQualification.AsUInt, GuestPhysicalAddr))
1024 {
1025 //
1026 // Check whether this violation is meaningful for the application or not
1027 //
1028 return TRUE;
1029 }
1030
1031 LogError("Err, unexpected EPT violation at RIP: %llx", VCpu->LastVmexitRip);
1032 DbgBreakPoint();
1033 //
1034 // Redo the instruction that caused the exception
1035 //
1036 return FALSE;
1037}
BOOLEAN VmmCallbackUnhandledEptViolation(UINT32 CoreId, UINT64 ViolationQualification, UINT64 GuestPhysicalAddr)
routine callback to handle unhandled EPT violations
Definition Callback.c:316
_Use_decl_annotations_ BOOLEAN EptHandlePageHookExit(VIRTUAL_MACHINE_STATE *VCpu, VMX_EXIT_QUALIFICATION_EPT_VIOLATION ViolationQualification, UINT64 GuestPhysicalAddr)
Check if this exit is due to a violation caused by a currently hooked page.
Definition Ept.c:844
BOOLEAN ExecTrapHandleEptViolationVmexit(VIRTUAL_MACHINE_STATE *VCpu, VMX_EXIT_QUALIFICATION_EPT_VIOLATION *ViolationQualification)
Handle EPT Violations related to the MBEC hooks.
Definition ExecTrap.c:779
UINT32 ExitQualification
Definition State.h:308
UINT32 CoreId
Definition State.h:306
UINT64 LastVmexitRip
Definition State.h:309

◆ EptHandleMisconfiguration()

VOID EptHandleMisconfiguration ( VOID )

Handle vm-exits for EPT Misconfiguration.

Handle Ept Misconfigurations.

Parameters
GuestAddress
Returns
VOID
1047{
1048 UINT64 GuestPhysicalAddr = 0;
1049
1050 __vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS, &GuestPhysicalAddr);
1051
1052 LogInfo("EPT Misconfiguration!");
1053
1054 LogError("Err, a field in the EPT paging structure was invalid, faulting guest address : 0x%llx",
1055 GuestPhysicalAddr);
1056
1057 //
1058 // We can't continue now.
1059 // EPT misconfiguration is a fatal exception that will probably crash the OS if we don't get out now
1060 //
1061}
#define LogInfo(format,...)
Define log variables.
Definition HyperDbgHyperLogIntrinsics.h:71

◆ EptHandlePageHookExit()

_Use_decl_annotations_ BOOLEAN EptHandlePageHookExit ( VIRTUAL_MACHINE_STATE * VCpu,
VMX_EXIT_QUALIFICATION_EPT_VIOLATION ViolationQualification,
UINT64 GuestPhysicalAddr )

Check if this exit is due to a violation caused by a currently hooked page.

If the memory access attempt was RW and the page was marked executable, the page is swapped with the original page.

If the memory access attempt was execute and the page was marked not executable, the page is swapped with the hooked page.

Parameters
VCpuThe virtual processor's state *
ViolationQualificationThe violation qualification in vm-exit
GuestPhysicalAddrThe GUEST_PHYSICAL_ADDRESS that caused this EPT violation
Returns
BOOLEAN Returns true if it was successful or false if the violation was not due to a page hook
847{
848 PVOID TargetPage;
849 UINT64 CurrentRip;
850 UINT32 CurrentInstructionLength;
851 BOOLEAN IsHandled = FALSE;
852 BOOLEAN ResultOfHandlingHook = FALSE;
853 BOOLEAN IgnoreReadOrWriteOrExec = FALSE;
854 BOOLEAN IsExecViolation = FALSE;
855
857 {
858 if (HookedEntry->PhysicalBaseAddress == (SIZE_T)PAGE_ALIGN(GuestPhysicalAddr))
859 {
860 //
861 // *** We found an address that matches the details ***
862 //
863
864 //
865 // Returning true means that the caller should return to the ept state to
866 // the previous state when this instruction is executed
867 // by setting the Monitor Trap Flag. Return false means that nothing special
868 // for the caller to do
869 //
870
871 //
872 // Reaching here means that the hooks was actually caused VM-exit because of
873 // our configurations, but here we double whether the hook needs to trigger
874 // any event or not because the hooking address (physical) might not be in the
875 // target range. For example we might hook 0x123b000 to 0x123b300 but the hook
876 // happens on 0x123b4600, so we perform the necessary checks here
877 //
878
879 if (GuestPhysicalAddr >= HookedEntry->StartOfTargetPhysicalAddress && GuestPhysicalAddr <= HookedEntry->EndOfTargetPhysicalAddress)
880 {
881 ResultOfHandlingHook = EptHookHandleHookedPage(VCpu,
882 HookedEntry,
883 ViolationQualification,
884 GuestPhysicalAddr,
885 &HookedEntry->LastContextState,
886 &IgnoreReadOrWriteOrExec,
887 &IsExecViolation);
888 }
889 else
890 {
891 //
892 // Here we assume the hook is handled as the hook needs to be
893 // restored (just not within the range)
894 //
895 ResultOfHandlingHook = TRUE;
896 }
897
898 if (ResultOfHandlingHook)
899 {
900 //
901 // Here we check whether the event should be ignored or not,
902 // if we don't apply the below restorations routines, the event
903 // won't redo and the emulation of the memory access is passed
904 //
905 if (!IgnoreReadOrWriteOrExec)
906 {
907 //
908 // Pointer to the page entry in the page table
909 //
910 TargetPage = EptGetPml1Entry(VCpu->EptPageTable, HookedEntry->PhysicalBaseAddress);
911
912 //
913 // Restore to its original entry for one instruction
914 //
916 TargetPage,
917 HookedEntry->OriginalEntry,
918 InveptSingleContext);
919
920 //
921 // Next we have to save the current hooked entry to restore on the next instruction's vm-exit
922 //
923 VCpu->MtfEptHookRestorePoint = HookedEntry;
924
925 //
926 // The following codes are added because we realized if the execution takes long then
927 // the execution might be switched to another routines, thus, MTF might conclude on
928 // another routine and we might (and will) trigger the same instruction soon
929 //
930
931 //
932 // We have to set Monitor trap flag and give it the HookedEntry to work with
933 //
935 }
936 }
937
938 //
939 // Indicate that we handled the ept violation
940 //
941 IsHandled = TRUE;
942
943 //
944 // Get out of the loop
945 //
946 break;
947 }
948 }
949
950 //
951 // Check whether the event should be ignored or not
952 //
953 if (IgnoreReadOrWriteOrExec)
954 {
955 //
956 // Do not redo the instruction (EPT hooks won't affect the VMCS_VMEXIT_INSTRUCTION_LENGTH),
957 // thus, we use custom length diassembler engine to ignore the instruction at target address
958 //
959
960 // HvPerformRipIncrement(VCpu); // invalid because EPT Violation won't affect VMCS_VMEXIT_INSTRUCTION_LENGTH
961 HvSuppressRipIncrement(VCpu); // Just to make sure nothing is added to the address
962
963 //
964 // If the target violation is for READ/WRITE, we ignore the current instruction and move to the
965 // next instruction, but if the violation is for execute access, then we just won't increment the RIP
966 //
967 if (!IsExecViolation)
968 {
969 //
970 // Get the RIP here as the RIP might be changed by the user and thus is not valid to be read
971 // from the VCpu
972 //
973 CurrentRip = HvGetRip();
975
976 CurrentRip = CurrentRip + CurrentInstructionLength;
977
978 HvSetRip(CurrentRip);
979 }
980 }
981 else
982 {
983 //
984 // Redo the instruction (it's also not necessary as the EPT Violation won't affect VMCS_VMEXIT_INSTRUCTION_LENGTH)
985 //
987 }
988
989 return IsHandled;
990}
UINT32 DisassemblerLengthDisassembleEngineInVmxRootOnTargetProcess(PVOID Address, BOOLEAN Is32Bit)
Disassembler length disassembler engine.
Definition Disassembler.c:297
BOOLEAN EptHookHandleHookedPage(VIRTUAL_MACHINE_STATE *VCpu, EPT_HOOKED_PAGE_DETAIL *HookedEntryDetails, VMX_EXIT_QUALIFICATION_EPT_VIOLATION ViolationQualification, SIZE_T PhysicalAddress, EPT_HOOKS_CONTEXT *LastContext, BOOLEAN *IgnoreReadOrWriteOrExec, BOOLEAN *IsExecViolation)
Handles page hooks (trigger events)
Definition EptHook.c:1686
VOID HvSetRip(UINT64 Rip)
Set guest's RIP.
Definition Hv.c:1194
UINT64 HvGetRip()
Read guest's RIP.
Definition Hv.c:1178
#define LIST_FOR_EACH_LINK(_head, _struct_type, _member, _var)
Definition MetaMacros.h:34
BOOLEAN CommonIsGuestOnUsermode32Bit()
determines if the guest was in 32-bit user-mode or 64-bit (long mode)
Definition Common.c:97
#define PAGE_ALIGN(Va)
Aligning a page.
Definition common.h:75

◆ EptIsValidForLargePage()

BOOLEAN EptIsValidForLargePage ( SIZE_T PageFrameNumber)

Check if potential large page doesn't land on two or more different cache memory types.

Parameters
PageFrameNumberPFN (Physical Address)
Returns
BOOLEAN
572{
573 SIZE_T StartAddressOfPage = PageFrameNumber * SIZE_2_MB;
574 SIZE_T EndAddressOfPage = StartAddressOfPage + (SIZE_2_MB - 1);
575 MTRR_RANGE_DESCRIPTOR * CurrentMemoryRange;
576 SIZE_T CurrentMtrrRange;
577
578 for (CurrentMtrrRange = 0; CurrentMtrrRange < g_EptState->NumberOfEnabledMemoryRanges; CurrentMtrrRange++)
579 {
580 CurrentMemoryRange = &g_EptState->MemoryRanges[CurrentMtrrRange];
581
582 if ((StartAddressOfPage <= CurrentMemoryRange->PhysicalEndAddress &&
583 EndAddressOfPage > CurrentMemoryRange->PhysicalEndAddress) ||
584 (StartAddressOfPage < CurrentMemoryRange->PhysicalBaseAddress &&
585 EndAddressOfPage >= CurrentMemoryRange->PhysicalBaseAddress))
586 {
587 return FALSE;
588 }
589 }
590
591 return TRUE;
592}

◆ EptLogicalProcessorInitialize()

BOOLEAN EptLogicalProcessorInitialize ( VOID )

Initialize EPT for an individual logical processor.

Initialize EPT Table based on Processor Index.

Creates an identity mapped page table and sets up an EPTP to be applied to the VMCS later

Returns
BOOLEAN
760{
761 ULONG ProcessorsCount;
762 PVMM_EPT_PAGE_TABLE PageTable;
763 EPT_POINTER EPTP = {0};
764
765 //
766 // Get number of processors
767 //
768 ProcessorsCount = KeQueryActiveProcessorCount(0);
769
770 for (size_t i = 0; i < ProcessorsCount; i++)
771 {
772 //
773 // Allocate the identity mapped page table
774 //
776
777 if (!PageTable)
778 {
779 //
780 // Try to deallocate previous pools (if any)
781 //
782 for (size_t j = 0; j < ProcessorsCount; j++)
783 {
784 if (g_GuestState[j].EptPageTable != NULL)
785 {
786 MmFreeContiguousMemory(g_GuestState[j].EptPageTable);
788 }
789 }
790
791 LogError("Err, unable to allocate memory for EPT");
792 return FALSE;
793 }
794
795 //
796 // Virtual address to the page table to keep track of it for later freeing
797 //
798 g_GuestState[i].EptPageTable = PageTable;
799
800 //
801 // Use default memory type
802 //
803 EPTP.MemoryType = g_EptState->DefaultMemoryType;
804
805 //
806 // We might utilize the 'access' and 'dirty' flag features in the dirty logging mechanism
807 //
808 EPTP.EnableAccessAndDirtyFlags = TRUE;
809
810 //
811 // Bits 5:3 (1 less than the EPT page-walk length) must be 3, indicating an EPT page-walk length of 4;
812 // see Section 28.2.2
813 //
814 EPTP.PageWalkLength = 3;
815
816 //
817 // The physical page number of the page table we will be using
818 //
819 EPTP.PageFrameNumber = (SIZE_T)VirtualAddressToPhysicalAddress(&PageTable->PML4) / PAGE_SIZE;
820
821 //
822 // We will write the EPTP to the VMCS later
823 //
824 g_GuestState[i].EptPointer = EPTP;
825 }
826
827 return TRUE;
828}
PVMM_EPT_PAGE_TABLE EptAllocateAndCreateIdentityPageTable(VOID)
Allocates page maps and create identity page table.
Definition Ept.c:642
VIRTUAL_MACHINE_STATE * g_GuestState
Save the state and variables related to virtualization on each to logical core.
Definition GlobalVariables.h:38
EPT_POINTER EptPointer
Definition State.h:341

◆ EptSetPML1AndInvalidateTLB()

_Use_decl_annotations_ VOID EptSetPML1AndInvalidateTLB ( VIRTUAL_MACHINE_STATE * VCpu,
PEPT_PML1_ENTRY EntryAddress,
EPT_PML1_ENTRY EntryValue,
INVEPT_TYPE InvalidationType )

This function set the specific PML1 entry in a spinlock protected area then invalidate the TLB.

This function should be called from vmx root-mode

Parameters
VCpuThe virtual processor's state
EntryAddressPML1 entry information (the target address)
EntryValueThe value of pm1's entry (the value that should be replaced)
InvalidationTypetype of invalidation
Returns
VOID
1079{
1080 //
1081 // set the value
1082 //
1083 EntryAddress->AsUInt = EntryValue.AsUInt;
1084
1085 //
1086 // invalidate the cache
1087 //
1088 if (InvalidationType == InveptSingleContext)
1089 {
1090 EptInveptSingleContext(VCpu->EptPointer.AsUInt);
1091 }
1092 else if (InvalidationType == InveptAllContext)
1093 {
1095 }
1096 else
1097 {
1098 LogError("Err, invalid invalidation parameter");
1099 }
1100}
UCHAR EptInveptAllContexts()
Invalidates all contexts in EPT cache table.
Definition Invept.c:54
UCHAR EptInveptSingleContext(_In_ UINT64 EptPointer)
Invalidates a single context in ept cache table.
Definition Invept.c:40

◆ EptSetupPML2Entry()

BOOLEAN EptSetupPML2Entry ( PVMM_EPT_PAGE_TABLE EptPageTable,
PEPT_PML2_ENTRY NewEntry,
SIZE_T PageFrameNumber )

Set up PML2 Entries.

Parameters
EptPageTable
NewEntryThe PML2 Entry
PageFrameNumberPFN (Physical Address)
Returns
VOID
604{
605 PVOID TargetBuffer;
606
607 //
608 // Each of the 512 collections of 512 PML2 entries is setup here
609 // This will, in total, identity map every physical address from 0x0
610 // to physical address 0x8000000000 (512GB of memory)
611 // ((EntryGroupIndex * VMM_EPT_PML2E_COUNT) + EntryIndex) * 2MB is
612 // the actual physical address we're mapping
613 //
614 NewEntry->PageFrameNumber = PageFrameNumber;
615
616 if (EptIsValidForLargePage(PageFrameNumber))
617 {
618 NewEntry->MemoryType = EptGetMemoryType(PageFrameNumber, TRUE);
619
620 return TRUE;
621 }
622 else
623 {
624 TargetBuffer = (PVOID)PlatformMemAllocateNonPagedPool(sizeof(VMM_EPT_DYNAMIC_SPLIT));
625
626 if (!TargetBuffer)
627 {
628 LogError("Err, cannot allocate page for splitting edge large pages");
629 return FALSE;
630 }
631
632 return EptSplitLargePage(EptPageTable, TargetBuffer, PageFrameNumber * SIZE_2_MB);
633 }
634}
BOOLEAN EptSplitLargePage(PVMM_EPT_PAGE_TABLE EptPageTable, PVOID PreAllocatedBuffer, SIZE_T PhysicalAddress)
Split 2MB (LargePage) into 4kb pages.
Definition Ept.c:462
BOOLEAN EptIsValidForLargePage(SIZE_T PageFrameNumber)
Check if potential large page doesn't land on two or more different cache memory types.
Definition Ept.c:571
UINT8 EptGetMemoryType(SIZE_T PageFrameNumber, BOOLEAN IsLargePage)
Check whether EPT features are present or not.
Definition Ept.c:69
PVOID PlatformMemAllocateNonPagedPool(SIZE_T NumberOfBytes)
Allocate a non-paged buffer.
Definition Mem.c:41
Split 2MB granularity to 4 KB granularity.
Definition Ept.h:135

◆ EptSplitLargePage()

BOOLEAN EptSplitLargePage ( PVMM_EPT_PAGE_TABLE EptPageTable,
PVOID PreAllocatedBuffer,
SIZE_T PhysicalAddress )

Split 2MB (LargePage) into 4kb pages.

Convert 2MB pages to 4KB pages.

Parameters
EptPageTableThe EPT Page Table
PreAllocatedBufferThe address of pre-allocated buffer
PhysicalAddressPhysical address of where we want to split
Returns
BOOLEAN Returns true if it was successful or false if there was an error
465{
466 PVMM_EPT_DYNAMIC_SPLIT NewSplit;
467 EPT_PML1_ENTRY EntryTemplate;
468 SIZE_T EntryIndex;
469 PEPT_PML2_ENTRY TargetEntry;
470 EPT_PML2_POINTER NewPointer;
471
472 //
473 // Find the PML2 entry that's currently used
474 //
475 TargetEntry = EptGetPml2Entry(EptPageTable, PhysicalAddress);
476
477 if (!TargetEntry)
478 {
479 LogError("Err, an invalid physical address passed");
480 return FALSE;
481 }
482
483 //
484 // If this large page is not marked a large page, that means it's a pointer already.
485 // That page is therefore already split.
486 //
487 if (!TargetEntry->LargePage)
488 {
489 //
490 // As it's a large page and we request a pool for it, we need to
491 // free the pool because it's not used anymore
492 //
493 PoolManagerFreePool((UINT64)PreAllocatedBuffer);
494
495 return TRUE;
496 }
497
498 //
499 // Allocate the PML1 entries
500 //
501 NewSplit = (PVMM_EPT_DYNAMIC_SPLIT)PreAllocatedBuffer;
502 if (!NewSplit)
503 {
504 LogError("Err, failed to allocate dynamic split memory");
505 return FALSE;
506 }
507 RtlZeroMemory(NewSplit, sizeof(VMM_EPT_DYNAMIC_SPLIT));
508
509 //
510 // Point back to the entry in the dynamic split for easy reference for which entry that
511 // dynamic split is for
512 //
513 NewSplit->u.Entry = TargetEntry;
514
515 //
516 // Make a template for RWX
517 //
518 EntryTemplate.AsUInt = 0;
519 EntryTemplate.ReadAccess = 1;
520 EntryTemplate.WriteAccess = 1;
521 EntryTemplate.ExecuteAccess = 1;
522
523 //
524 // copy other bits from target entry
525 //
526 EntryTemplate.MemoryType = TargetEntry->MemoryType;
527 EntryTemplate.IgnorePat = TargetEntry->IgnorePat;
528 EntryTemplate.SuppressVe = TargetEntry->SuppressVe;
529
530 //
531 // Copy the template into all the PML1 entries
532 //
533 __stosq((SIZE_T *)&NewSplit->PML1[0], EntryTemplate.AsUInt, VMM_EPT_PML1E_COUNT);
534
535 //
536 // Set the page frame numbers for identity mapping
537 //
538 for (EntryIndex = 0; EntryIndex < VMM_EPT_PML1E_COUNT; EntryIndex++)
539 {
540 //
541 // Convert the 2MB page frame number to the 4096 page entry number plus the offset into the frame
542 //
543 NewSplit->PML1[EntryIndex].PageFrameNumber = ((TargetEntry->PageFrameNumber * SIZE_2_MB) / PAGE_SIZE) + EntryIndex;
544 NewSplit->PML1[EntryIndex].MemoryType = EptGetMemoryType(NewSplit->PML1[EntryIndex].PageFrameNumber, FALSE);
545 }
546
547 //
548 // Allocate a new pointer which will replace the 2MB entry with a pointer to 512 4096 byte entries
549 //
550 NewPointer.AsUInt = 0;
551 NewPointer.WriteAccess = 1;
552 NewPointer.ReadAccess = 1;
553 NewPointer.ExecuteAccess = 1;
554 NewPointer.PageFrameNumber = (SIZE_T)VirtualAddressToPhysicalAddress(&NewSplit->PML1[0]) / PAGE_SIZE;
555
556 //
557 // Now, replace the entry in the page table with our new split pointer
558 //
559 RtlCopyMemory(TargetEntry, &NewPointer, sizeof(NewPointer));
560
561 return TRUE;
562}
PEPT_PML2_ENTRY EptGetPml2Entry(PVMM_EPT_PAGE_TABLE EptPageTable, SIZE_T PhysicalAddress)
Get the PML2 entry for this physical address.
Definition Ept.c:431
struct _VMM_EPT_DYNAMIC_SPLIT * PVMM_EPT_DYNAMIC_SPLIT
BOOLEAN PoolManagerFreePool(UINT64 AddressToFree)
This function set a pool flag to be freed, and it will be freed on the next IOCTL when it's safe to r...
Definition PoolManager.c:136
EPT_PTE EPT_PML1_ENTRY
Definition State.h:22
EPT_PDE EPT_PML2_POINTER
Definition State.h:21
#define VMM_EPT_PML1E_COUNT
Then number of 4096 byte Page Table entries in the page table per 2MB PML2 entry when dynamically spl...
Definition State.h:98
EPT_PML1_ENTRY PML1[VMM_EPT_PML1E_COUNT]
The 4096 byte page table entries that correspond to the split 2MB table entry.
Definition Ept.h:141
PEPT_PML2_ENTRY Entry
Definition Ept.h:149
union _VMM_EPT_DYNAMIC_SPLIT::@6 u
The pointer to the 2MB entry in the page table which this split is servicing.