//#pragma module PAGING_MACROS.H "X-9" #ifndef __PAGING_MACROS_LOADED #define __PAGING_MACROS_LOADED /* ************************************************************************* * * * VMS SOFTWARE, INC. CONFIDENTIAL. This software is confidential * * proprietary software licensed by VMS Software, Inc., and is not * * authorized to be used, duplicated or disclosed to anyone without * * the prior written permission of VMS Software, Inc. * * Copyright 2019-2021 VMS Software, Inc. * * * ************************************************************************* * * FACILITY: VMS Executive (LIB_H) * * ABSTRACT: This module contains C macros for accessing and manipulating * PTEs for x86. * * Author: Drew Mason * * Creation Date: 19-Jul-2017 * * Revision History: * * X-9 GHJ Gregory H. Jordan 23-Aug-2021 * Add a 4k variant of the various pte_va macros for X86. These * macros will return the address of the 4K pte instead of the address * of the PTE pair. * * X-8 GHJ Gregory H. Jordan 20-Feb-2019 * Update $$xxpte_va macros to handle a PTEs pointing to * the top level page table self map pointers. * * X-7 AHM Drew Mason 16-Aug-2018 * Change macros BCNT_TO_PAGES and VA_TO_BOFF to use * the allocated page size of 8192 rather than the * hardware page size of 4096 bytes. * * X-6 GHJ Gregory H. Jordan 11-Jul-2018 * Modify macros that return a VA to alwasy return the * PTE Va for an 8K page. Pull out bpte_va as it is * moving to vms_macros.h * * X-5 AHM Drew Mason 6-Jun-2018 * Conditionalize the macros that return PTE virtual * addresses to use boo$ variables in SYSBOOT and * mmg$ variables otherwise. Use the quadword form * of the PTE_LEVELS variables. * * X-4 AHM Drew Mason 20-Apr-2018 * Fix bug in the mask used in pml4e_va and pdpte_va. * * X-3 GHJ Greg Jordan 17-Apr-2018 * Fix use of __SYSBOOT, needs to be in upper case. * * X-2 GHJ Greg Jordan 11-Apr-2018 * Switch usage of BOO$GL_PTE_LEVELS to MMG$GL_PTE_LEVELS * unless __SYSBOOT is defined. Rename pteidx_pt tp pteidx_bpt * and pto_pt to pto_bpt. * * X-1 AHM Drew Mason 19-Mar-2018 * Move this .h file from SYSBOOTX86 to LIB_H. * * --- Resync modification history for LIB_H * * X-2 AHM Drew Mason 2-Feb-2018 * Change "spt" to "bpt" for bottom page table to use * a term that has less baggage. * * X-1 AHM Drew Mason 19-Jul-2017 * Initial entry. */ #ifndef __INITIAL_POINTER_SIZE #error SYS-F-NOPTRSIZE, x86 memory-management code must compile with the /POINTER_SIZE qualifier #endif #pragma __required_pointer_size __save #pragma __required_pointer_size __long #include #include #include // This construct allows SYSBOOT write access to variables that are // run-time constants, but need to be initialized at boot time. #ifdef __SYSBOOT #define __RUNCONST #else #define __RUNCONST const #endif #define BCNT_TO_PAGES( BCNT ) \ (((BCNT) + MMG$$C_BYTES_PER_PAGE - 1) >> MMG$$C_BOFF_SIZE) #define VA_TO_BOFF( VA ) \ (VA & MMG$$C_BOFF_MASK_8K) /* ! ! MACROS: ! pteidx_pml5 (addr) ! pteidx_pml4 (addr) ! pteidx_pdpt (addr) ! pteidx_pd (addr) ! pteidx_bpt (addr) ! ! FUNCTION: ! ! These routines and macros accept a virtual address and return the corresponding ! page-table field. The return value is, equivalently, the index (not offset) in ! the page-table page for the input address. This is an index within the specific ! page, not the index within the entire page table. ! ! The purpose of embedding the routine in a #define is so that the routine ! can be passed any type that can be cast as an int64. ! ! NOTES: ! ! (1) In contrast to previous VMS practice, the order of the page tables is ! reversed: the L4 or L5 page table is the one farthest from the target page. ! CR3 (the x86 equivalent of PTBR) contains the physical address of the PML4 ! or PML5 page table, depending on whether the system is running with 4- or ! 5-level pagins. ! (2) These macros are intended to work with either 4- or 5-level paging. ! (3) All that these routines do is mask and shift the appropriate bits from the ! input VA so that they are at the low end of the returned quadword. ! (4) It is the caller's responsibility to locate the appropriate page-table page. ! (5) There is no guarantee that the page-table page is valid or even exists. ! (6) If given an address within a 1-GB or 2-MB page, pteidx_bpt will still return ! the PT index that corresponds to this address. It is likely that the ! BPT page for this address does not exist. The caller must sort this out. ! (7) Similarly, if given an address within a 1-GB page, pteidx_pd will still return ! the PD index that corresponds to this address. It is likely that the ! PD page for this address does not exist. The caller must sort this out. ! ! ARGUMENTS: ! ! addr (Passed by value) ! The virtual address whose PT field is to be returned. ! ! SIDE EFFECTS: ! ! None. ! ! RETURNS: ! ! The appropriate field of the VA passed to this routine, returned as an index. ! ! EXAMPLE CALL: ! ! int64 addr; ! uint64 levelField; ! levelField = pteidx_pdpt (addr); ! ! CHANGE HISTORY: */ //#pragma inline ($$$pteidx_pml5) static uint64 $$$pteidx_pml5 (int64 addr) { return (addr & MMG$$C_PML5_MASK) >> MMG$$C_PML5_BITPOS; } #define pteidx_pml5(addr) $$$pteidx_pml5 ((int64) addr) //#pragma inline ($$$pteidx_pml4) static uint64 $$$pteidx_pml4 (int64 addr) { return (addr & MMG$$C_PML4_MASK) >> MMG$$C_PML4_BITPOS; } #define pteidx_pml4(addr) $$$pteidx_pml4 ((int64) addr) //#pragma inline ($$$pteidx_pdpt) static uint64 $$$pteidx_pdpt (int64 addr) { return (addr & MMG$$C_PDPT_MASK) >> MMG$$C_PDPT_BITPOS; } #define pteidx_pdpt(addr) $$$pteidx_pdpt ((int64) addr) //#pragma inline ($$$pteidx_pd) static uint64 $$$pteidx_pd (int64 addr) { return (addr & MMG$$C_PD_MASK) >> MMG$$C_PD_BITPOS; } #define pteidx_pd(addr) $$$pteidx_pd ((int64) addr) //#pragma inline ($$$pteidx_bpt) static uint64 $$$pteidx_bpt (int64 addr) { return (addr & MMG$$C_BPT_MASK) >> MMG$$C_BPT_BITPOS; } #define pteidx_bpt(addr) $$$pteidx_bpt ((int64) addr) /* ! ! MACROS: ! pto_pml5e (addr) ! pto_pml4e (addr) ! pto_pdpte (addr) ! pto_pde (addr) ! pto_bpte (addr) ! ! FUNCTION: ! ! These routines and macros accept a virtual address and return the corresponding ! page-table field. The return value is, equivalently, the offset (not index) ! in the page-table page for the input address. This is an offset within the ! specific page, not the offset within the entire page table. ! ! The purpose of embedding the routine in a #define is so that the routine ! can be passed any type that can be cast as an int64. ! ! NOTES: ! ! (1) In contrast to previous VMS practice, the order of the page tables is ! reversed: the L1 page table is the one closest to the target 4-kB page. ! The L4 or L5 page table is the one farthest from the target page. ! CR3 (the x86 equivalent of PTBR) contains the physical address of the ! PML4 or PML5 page table, depending on whether the system is running ! with 4- or 5-level paging. ! (2) These macros are intended to work with either 4- or 5-level paging. ! (3) All that these routines do is mask and shift the appropriate bits from the ! input VA so that they are at the low end of the returned quadword. ! (4) It is the caller's responsibility to locate the appropriate page-table page. ! (5) There is no guarantee that the page-table page is valid or even exists. ! (6) If given an address within a 1-GB or 2-MB page, pto_bpte will still return ! the BPT index that corresponds to this address. It is likely that the ! BPT page for this address does not exist. The caller must sort this out. ! (7) Similarly, if given an address within a 1-GB page, pto_pde will still return ! the PD index that corresponds to this address. It is likely that the ! PD page for this address does not exist. The caller must sort this out. ! ! ARGUMENTS: ! ! addr (Passed by value) ! The virtual address whose PT field is to be returned. ! ! SIDE EFFECTS: ! ! None. ! ! RETURNS: ! ! The appropriate field of the VA passed to this routine, returned as an offset. ! ! EXAMPLE CALL: ! ! int64 addr; ! uint64 levelField; ! levelField = pto_pdpte (addr); ! ! CHANGE HISTORY: */ //#pragma inline ($$$pto_pml5e) static uint64 $$$pto_pml5e (int64 addr) { return (addr & MMG$$C_PML5_MASK) >> MMG$$C_PML5_OFFPOS; } #define pto_pml5e(addr) $$$pto_pml5e ((int64) addr) //#pragma inline ($$$pto_pml4e) static uint64 $$$pto_pml4e (int64 addr) { return (addr & MMG$$C_PML4_MASK) >> MMG$$C_PML4_OFFPOS; } #define pto_pml4e(addr) $$$pto_pml4e ((int64) addr) //#pragma inline ($$$pto_pdpte) static uint64 $$$pto_pdpte (int64 addr) { return (addr & MMG$$C_PDPT_MASK) >> MMG$$C_PDPT_OFFPOS; } #define pto_pdpte(addr) $$$pto_pdpte ((int64) addr) //#pragma inline ($$$pto_pde) static uint64 $$$pto_pde (int64 addr) { return (addr & MMG$$C_PD_MASK) >> MMG$$C_PD_OFFPOS; } #define pto_pde(addr) $$$pto_pde ((int64) addr) //#pragma inline ($$$pto_bpte) static uint64 $$$pto_bpte (int64 addr) { return (addr & MMG$$C_BPT_MASK) >> MMG$$C_BPT_OFFPOS; } #define pto_bpte(addr) $$$pto_bpte ((int64) addr) /* ! ! MACROS: ! pml5e_va (addr, mode) ! pml4e_va (addr, mode) ! pdpte_va (addr, mode) ! pde_va (addr, mode) ! bpte_va (addr, mode) ! ! pml5e_va_4k (addr, mode) ! pml4e_va_4k (addr, mode) ! pdpte_va_4k (addr, mode) ! pde_va_4k (addr, mode) ! bpte_va_4k (addr, mode) ! ! FUNCTION: ! ! These routines and macros return the VA of the PTE that maps a ! given VA. The specific macro identifies which of the several ! mapping PTEs are to be returned. Result depends on execution ! mode. ! ! The purpose of embedding the routine in a #define is to that the routine ! can be passed any type that can be case as an int64. ! ! NOTES: ! ! (1) These macros are intended to work with either 4- or 5-level paging. ! But they are not very smart. If you call pml5e_va on a system ! that does not have 5-level paging enabled, the return value will ! be zero. (There are better ways to determine if 5-level paging ! is on. ! (2) All these routines do is mask, shift, and add the value to the ! address of the appropriate page-table base. If you ask for the ! VA of a PDE, the macro does not check whether the PML5 or PML4 ! entries are valid. That's the caller's responsibility. ! (3) bpte_va invoked on a 1-GB or 2-MB will still return the VA of the ! BPTE that corresponds to that address. It is likely that the BPT ! page for that address does not exist. The caller must sort this ! out. A similar note applies to invoking pde_va on a 1-GB page. ! ! It is up to the caller to determine if 5-level paging is enabled... ! ! ARGUMENTS: ! addr (Passed by value) ! The virtual address for which a PTE address is desired. ! mode (Passed by value) ! Which of the per-mode page table addresses is desired. ! ! SIDE EFFECTS: ! ! None ! ! EXAMPLE CALL: ! ! #include ! #include ! int64 addr; ! PTE_PQ pdentry_va; ! ! pdentry_va = pde_va (addr, PSL$C_SUPER); */ #if defined __SYSBOOT #define PAGING_MACROS$PTE_LEVELS boo$gq_pte_levels #define PAGING_MACROS$BPT_BASE boo$gq_bpt_base #define PAGING_MACROS$PD_BASE boo$gq_pd_base #define PAGING_MACROS$PDPT_BASE boo$gq_pdpt_base #define PAGING_MACROS$PML4_BASE boo$gq_pml4_base #define PAGING_MACROS$PML5_BASE boo$gq_pml5_base #else #define PAGING_MACROS$PTE_LEVELS mmg$gq_pte_levels #define PAGING_MACROS$BPT_BASE mmg$gq_bpt_base #define PAGING_MACROS$PD_BASE mmg$gq_pd_base #define PAGING_MACROS$PDPT_BASE mmg$gq_pdpt_base #define PAGING_MACROS$PML4_BASE mmg$gq_pml4_base #define PAGING_MACROS$PML5_BASE mmg$gq_pml5_base #endif #pragma inline ($$$pml5e_va) static PTE_PQ $$$pml5e_va (int64 addr, uint32 mode) { extern unsigned __int64 __RUNCONST PAGING_MACROS$PTE_LEVELS; extern PTE_PQ __RUNCONST PAGING_MACROS$PML5_BASE [4]; __int64 offset; __int64 va_pte; if (PAGING_MACROS$PTE_LEVELS != 5) { return (PTE_PQ) 0; } offset = (addr & MMG$$C_PML5_MASK) >> MMG$$C_PML5_OFFPOS; #ifndef __SYSBOOT offset &= ~0xF; // Point to PTE pair #endif va_pte = (__int64) PAGING_MACROS$PML5_BASE [mode] + offset + ((mode % 2) * PTE$C_BYTES_PER_PTE); return ((PTE_PQ) va_pte); } // end $$$pml5e_va #define pml5e_va(addr,mode) $$$pml5e_va ((int64) (addr), (uint32) (mode)) #pragma inline ($$$pml4e_va) static PTE_PQ $$$pml4e_va (int64 addr, uint32 mode) { extern unsigned __int64 __RUNCONST PAGING_MACROS$PTE_LEVELS; extern PTE_PQ __RUNCONST PAGING_MACROS$PML4_BASE [4]; extern PTE_PQ __RUNCONST PAGING_MACROS$PML5_BASE [4]; __int64 mask; __int64 offset; __int64 va_pte; if (PAGING_MACROS$PTE_LEVELS == 5) mask = MMG$$C_PML5_MASK | MMG$$C_PML4_MASK; else mask = MMG$$C_PML4_MASK; offset = (addr & mask) >> MMG$$C_PML4_OFFPOS; #ifndef __SYSBOOT offset &= ~0xF; // Point to PTE pair #endif va_pte = (__int64) PAGING_MACROS$PML4_BASE [mode] + offset; // If va_pte is the self map PTE in the top level page table, fix up based on mode if (PAGING_MACROS$PTE_LEVELS == 4) { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML4_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } else { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML5_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } return ((PTE_PQ) va_pte); } // end $$$pml4e_va #define pml4e_va(addr,mode) $$$pml4e_va ((int64) (addr), (uint32) (mode)) #pragma inline ($$$pdpte_va) static PTE_PQ $$$pdpte_va (int64 addr, uint32 mode) { extern unsigned __int64 __RUNCONST PAGING_MACROS$PTE_LEVELS; extern PTE_PQ __RUNCONST PAGING_MACROS$PDPT_BASE [4]; extern PTE_PQ __RUNCONST PAGING_MACROS$PML4_BASE [4]; extern PTE_PQ __RUNCONST PAGING_MACROS$PML5_BASE [4]; __int64 mask; __int64 offset; __int64 va_pte; mask = MMG$$C_PML4_MASK | MMG$$C_PDPT_MASK; if (PAGING_MACROS$PTE_LEVELS == 5) mask |= MMG$$C_PML5_MASK; offset = (addr & mask) >> MMG$$C_PDPT_OFFPOS; #ifndef __SYSBOOT offset &= ~0xF; // Point to PTE pair #endif va_pte = (__int64) PAGING_MACROS$PDPT_BASE [mode] + offset; // If va_pte is the self map PTE in the top level page table, fix up based on mode if (PAGING_MACROS$PTE_LEVELS == 4) { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML4_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } else { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML5_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } return ((PTE_PQ) va_pte); } // end $$$pdpte_va #define pdpte_va(addr,mode) $$$pdpte_va ((int64) (addr), (uint32) (mode)) #pragma inline ($$$pde_va) static PTE_PQ $$$pde_va (int64 addr, uint32 mode) { extern unsigned __int64 __RUNCONST PAGING_MACROS$PTE_LEVELS; extern PTE_PQ __RUNCONST PAGING_MACROS$PD_BASE [4]; extern PTE_PQ __RUNCONST PAGING_MACROS$PML4_BASE [4]; extern PTE_PQ __RUNCONST PAGING_MACROS$PML5_BASE [4]; __int64 mask; __int64 offset; __int64 va_pte; mask = MMG$$C_PML4_MASK | MMG$$C_PDPT_MASK | MMG$$C_PD_MASK; if (PAGING_MACROS$PTE_LEVELS == 5) mask |= MMG$$C_PML5_MASK; offset = (addr & mask) >> MMG$$C_PD_OFFPOS; #ifndef __SYSBOOT offset &= ~0xF; // Point to PTE pair #endif va_pte = (__int64) PAGING_MACROS$PD_BASE [mode] + offset; // If va_pte is the self map PTE in the top level page table, fix up based on mode if (PAGING_MACROS$PTE_LEVELS == 4) { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML4_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } else { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML5_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } return ((PTE_PQ) va_pte); } // end $$$pde_va #define pde_va(addr,mode) $$$pde_va ((int64) (addr), (uint32) (mode)) #pragma inline ($$$pml5e_va_4k) static PTE_PQ $$$pml5e_va_4k (int64 addr, uint32 mode) { extern unsigned __int64 __RUNCONST PAGING_MACROS$PTE_LEVELS; extern PTE_PQ __RUNCONST PAGING_MACROS$PML5_BASE [4]; __int64 offset; __int64 va_pte; if (PAGING_MACROS$PTE_LEVELS != 5) { return (PTE_PQ) 0; } offset = (addr & MMG$$C_PML5_MASK) >> MMG$$C_PML5_OFFPOS; va_pte = (__int64) PAGING_MACROS$PML5_BASE [mode] + offset + ((mode % 2) * PTE$C_BYTES_PER_PTE); return ((PTE_PQ) va_pte); } // end $$$pml5e_va_4k #define pml5e_va_4k(addr,mode) $$$pml5e_va_4k ((int64) (addr), (uint32) (mode)) #pragma inline ($$$pml4e_va_4k) static PTE_PQ $$$pml4e_va_4k (int64 addr, uint32 mode) { extern unsigned __int64 __RUNCONST PAGING_MACROS$PTE_LEVELS; extern PTE_PQ __RUNCONST PAGING_MACROS$PML4_BASE [4]; extern PTE_PQ __RUNCONST PAGING_MACROS$PML5_BASE [4]; __int64 mask; __int64 offset; __int64 va_pte; if (PAGING_MACROS$PTE_LEVELS == 5) mask = MMG$$C_PML5_MASK | MMG$$C_PML4_MASK; else mask = MMG$$C_PML4_MASK; offset = (addr & mask) >> MMG$$C_PML4_OFFPOS; va_pte = (__int64) PAGING_MACROS$PML4_BASE [mode] + offset; // If va_pte is the self map PTE in the top level page table, fix up based on mode if (PAGING_MACROS$PTE_LEVELS == 4) { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML4_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } else { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML5_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } return ((PTE_PQ) va_pte); } // end $$$pml4e_va_4k #define pml4e_va_4k(addr,mode) $$$pml4e_va_4k ((int64) (addr), (uint32) (mode)) #pragma inline ($$$pdpte_va_4k) static PTE_PQ $$$pdpte_va_4k (int64 addr, uint32 mode) { extern unsigned __int64 __RUNCONST PAGING_MACROS$PTE_LEVELS; extern PTE_PQ __RUNCONST PAGING_MACROS$PDPT_BASE [4]; extern PTE_PQ __RUNCONST PAGING_MACROS$PML4_BASE [4]; extern PTE_PQ __RUNCONST PAGING_MACROS$PML5_BASE [4]; __int64 mask; __int64 offset; __int64 va_pte; mask = MMG$$C_PML4_MASK | MMG$$C_PDPT_MASK; if (PAGING_MACROS$PTE_LEVELS == 5) mask |= MMG$$C_PML5_MASK; offset = (addr & mask) >> MMG$$C_PDPT_OFFPOS; va_pte = (__int64) PAGING_MACROS$PDPT_BASE [mode] + offset; // If va_pte is the self map PTE in the top level page table, fix up based on mode if (PAGING_MACROS$PTE_LEVELS == 4) { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML4_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } else { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML5_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } return ((PTE_PQ) va_pte); } // end $$$pdpte_va_4k #define pdpte_va_4k(addr,mode) $$$pdpte_va_4k ((int64) (addr), (uint32) (mode)) #pragma inline ($$$pde_va_4k) static PTE_PQ $$$pde_va_4k (int64 addr, uint32 mode) { extern unsigned __int64 __RUNCONST PAGING_MACROS$PTE_LEVELS; extern PTE_PQ __RUNCONST PAGING_MACROS$PD_BASE [4]; extern PTE_PQ __RUNCONST PAGING_MACROS$PML4_BASE [4]; extern PTE_PQ __RUNCONST PAGING_MACROS$PML5_BASE [4]; __int64 mask; __int64 offset; __int64 va_pte; mask = MMG$$C_PML4_MASK | MMG$$C_PDPT_MASK | MMG$$C_PD_MASK; if (PAGING_MACROS$PTE_LEVELS == 5) mask |= MMG$$C_PML5_MASK; offset = (addr & mask) >> MMG$$C_PD_OFFPOS; va_pte = (__int64) PAGING_MACROS$PD_BASE [mode] + offset; // If va_pte is the self map PTE in the top level page table, fix up based on mode if (PAGING_MACROS$PTE_LEVELS == 4) { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML4_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } else { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML5_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } return ((PTE_PQ) va_pte); } // end $$$pde_va_4k #define pde_va_4k(addr,mode) $$$pde_va_4k ((int64) (addr), (uint32) (mode)) #pragma inline ($$$bpte_va_4k) static PTE_PQ $$$bpte_va_4k (int64 addr, uint32 mode) { extern PTE_PQ __RUNCONST PAGING_MACROS$BPT_BASE [4]; #ifndef __SYSBOOT extern PTE_PQ __RUNCONST PAGING_MACROS$PML4_BASE [4]; extern PTE_PQ __RUNCONST PAGING_MACROS$PML5_BASE [4]; #endif extern __RUNCONST unsigned __int64 PAGING_MACROS$PTE_LEVELS; __int64 mask; __int64 offset; __int64 va_pte; #if defined(TEST$MMG) va_pte = test$mmg_bpte_va( addr ); if (va_pte != 0) return ((PTE_PQ) va_pte); #endif mask = MMG$$C_PML4_MASK | MMG$$C_PDPT_MASK | MMG$$C_PD_MASK | MMG$$C_BPT_MASK; if (PAGING_MACROS$PTE_LEVELS == 5) mask |= MMG$$C_PML5_MASK; offset = (addr & mask) >> MMG$$C_BPT_OFFPOS; va_pte = (uint64) PAGING_MACROS$BPT_BASE [mode] + offset; #ifndef __SYSBOOT /* If va_pte is the self map PTE in the top level page table, fix up based on mode */ if (PAGING_MACROS$PTE_LEVELS == 4) { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML4_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } else { if ((va_pte & ~0x10ull) == (__int64) PAGING_MACROS$PML5_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } #endif return ((PTE_PQ) va_pte); } /* end $$$bpte_va_4k */ #define bpte_va_4k(addr,mode) $$$bpte_va_4k ((int64) (addr), (uint32) (mode)) # pragma __required_pointer_size __restore #endif // __PAGING_MACROS_LOADED