#ifndef __VMS_MACROS_LOADED #define __VMS_MACROS_LOADED 1 /* module VMS_MACROS.H "X-94" */ /* ************************************************************************* */ /* * * */ /* * HPE CONFIDENTIAL. This software is confidential proprietary software * */ /* * licensed by Hewlett Packard Enterprise Development, LP, and is not * */ /* * authorized to be used, duplicated or disclosed to anyone without the * */ /* * prior written permission of HPE. * */ /* * Copyright 2019 Hewlett Packard Enterprise Development, LP * */ /* * * */ /* * VMS SOFTWARE, INC. CONFIDENTIAL. This software is confidential * */ /* * proprietary software licensed by VMS Software, Inc., and is not * */ /* * authorized to be used, duplicated or disclosed to anyone without * */ /* * the prior written permission of VMS Software, Inc. * */ /* * Copyright 2019-2021 VMS Software, Inc. * */ /* * * */ /* ************************************************************************* */ /* *++ * FACILITY: * * VMS Executive (LIB_H) * * ABSTRACT: * * This header file will provide a basic set of C macros for system programmers * to hide some particularly ugly things or for commonly used functions which * are somewhat performance sensitive. These macros should be very simple (not * that C would let you get away with anything else). If complex, reconsider * creating a new exec routine to perform the function rather than a macro. * * AUTHOR: * * Steve DiPirro * * CREATION DATE: 17-Feb-1993 * * MODIFICATION HISTORY: * * X-94 GHJ Gregory H. Jordan 9-Jun-2021 * The lock_cpu_mutex and unlock_cpu_mutex macros incorrectly * computed a signed longword delta time. If bit 31 was set, * then the call to exe$timedwait_setup computed a end_time * in the past. If exe$timedwait_complete was called, it * would always indicate the end time was exceeded. * * X-93 CEG0922 Clair Grant 28-Sep-2020 * Change comments in NOT_X86_READY to use "\" * * X-92 CEG0920 Clair Grant 25-Sep-2020 * Change comment in X-91 * * X-91 CEG0911 Clair Grant 24-Sep-2020 * Replace "// comment" style to eliminate * confusion when compiling /STANDARD=VAXC. * * X-90 CEG0840 Clair Grant 04-Dec-2019 * Fix build bug in X-89 * * X-89 CEG0836 Clair Grant 23-Nov-2019 * Add VMS$GET_GR and VMS$SET_GR * * X-88 AHM Drew Mason 7-Jun-2019 * Change the x86 definition of VMS$GET_TIMESTAMP to * read the TSC not the HPET. * * X-87 CV-0334 Camiel Vanderhoeven 12-Apr-2019 * Fix typo in previous checkin. * * X-86 CV-0329 Camiel Vanderhoeven 9-Apr-2019 * Use sys$pal_read_unq on x86. * * X-85 RCL Rick Lord 14-Mar-2019 * Add $pa_to_pfn to convert a PA to a PFN. Since * MMG$$C_PA_TO_PFN_SHIFT is now defined in MMG_CONSTANTS, * use it and delete local symbol PFN_TO_PA_SHIFT. Move * #include of mmg_constants to an unconditional block * to support Alpha and IA64 $pa_to_pfn. * * X-84 GHJ Gregory H. Jordan 20-Feb-2019 * Update $$bpte_va to handle a bottom PTE pointing to * the top level page table self map pointers. * * X-83 CEG0675 Clair Grant 07-Feb-2019 * Update VMS$PREV_INVO_END and add VMS$INIT_INVO_CONTEXT. * * X-82 CEG0667 Clair Grant 06-Feb-2019 * Add VMS$GET_CURR_INVO_CONTEXT, VMS$GET_PREV_INVO_CONTEXT, * and VMS$PREV_INVO_END macros * * X-81 CEG0659 Clair Grant 19-Jan-2019 * Add VMS$SET_THREADENV(X) * * X-80 RCL Rick Lord 7-Jan-2019 * Add macro not_x86_ready() * * X-79 PAJ1681 Paul A. Jacobi 09-Nov-2018 * Fix bug_check macro to generate the same code on X86 as * IA64. The X86 version will probably diverge in the future. * Fix VDE ident. * * X-77 MJM Michael Moroney 20-Sep-2018 * Define $insert_pfn( pte, pfn ) macro to insert PFN in PTE. * * X-76 MJM Michael Moroney 13-Sep-2018 * Add parenthesis to argument for $extract_pfn() and * $pfn_to_pa() to prevent syntax error on macro expansion. * * X-75 GHJ Gregory H. Jordan 21-Aug-2018 * Some code defines mmg$gl_vpn_to_va and a uint32 and some * as a uint64... Stop attempting to define in VMS_MACROS.H * and hard code for now. * * X-74 GHJ Gregory H. Jordan 21-Aug-2018 * Define mmg$gl_vpn_to_va in the static routines used * to conert a pfn to a pa. * * X-73 GHJ Gregory H. Jordan 20-Aug-2018 * Add macros for Extracting a PFN from a PTE, Converting a * pfn to a PA, and extracting a PA from a PTE. For X86, * there are variants to operate on 2mb and 1G ptes/pfns. * * X-72 GHJ Gregory H. Jordan 1-Aug-2018 * Make a fix for the TEST$MMG environment. * * X-71 AHM Drew Mason 16-Jul-2018 * Fix bug in bpte_va that caused SYSBOOT to fail. * * X-70 GHJ Gregory H. Jordan 11-Jul-2018 * Add the bpte_va macro vms_macros as it will be valid for * use in all architectures. * * X-69 GHJ Gregory H. Jordan 27-Jun-2018 * Add VA2PA_64 macro. * * X-68 GHJ Gregory H. Jordan 11-Jun-2018 * Add an include of mmg_constants for the X86 variant * of $$$va_pte_to_va. * * X-67 GHJ Gregory H. Jordan 5-Jun-2018 * Update VA_PTE_TO_VA for X86. * * X-66 GHJ Gregory H. Jordan 16-May-2018 * The $write_pte macro is moving to PTE_FUNCTIONS.H to * avoid the need for this module to pull in PTE_FUNCTIONS.H * for Alpha. That resulted in some conflicting symbol * definitions in the build. * * X-65 GHJ Gregory H. Jordan 16-May-2018 * Add the $write_pte macro with archiecture specific actions * for X86, IA64, and Alpha. * * X-64 CEG0416 Clair Grant 13-Apr-2018 * Make PLDEF IA64 only * * X-63 CEG0414 Clair Grant 12-Apr-2018 * Fix VMS$GET_THREADENV() for Alpha and IA64 * * X-62 CV-0161 Camiel Vanderhoeven 12-Apr-2018 * Replace C++ style comments with C style ones for * modules compiled with /STANDARD=VAXC * * X-61 CEG0401 Clair Grant 09-Apr-2018 * Added VMS$GET_THREADENV() macro * * X-60 CEG0384 Clair Grant 28-Mar-2018 * Include far_pointers for x86 * * X-59 CEG0383 Clair Grant 26-Mar-2018 * Fix another build bug in X-57 * * X-58 CEG0382 Clair Grant 26-Mar-2018 * Fix build bug in X-57 * * X-57 CEG0375 Clair Grant 26-Mar-2018 * Added VMS$GET_TIMESTAMP() * * X-56 CV-0146 Camiel Vanderhoeven 15-Mar-2018 * Add VMS$GET_PREVMODE() macros for all three architectures. * * X-55 CV-0108 Camiel Vanderhoeven 13-Feb-2018 * Fix use of PSLDEF when __NEW_STARLET is not defined. * * X-54 CV-0106 Camiel Vanderhoeven 12-Feb-2018 * Add VMS$GET_CURMODE() macros for all three architectures. * * X-53 CEG0348 Clair Grant 25-Jan-2018 * Verified a conditional * * X-52 AHM Drew Mason 1-Dec-2017 * Revert changes made in X-51 to mmg$gq_s0base_pte_address. * Made several VA- and PTE-related macros into dummies * for x86 because they won't exist on x86. * One more conditional still to be verified. * * X-51 AHM Drew Mason 7-Nov-2017 * Update copyright. Add definition of __RUNCONST * to allow SYSBOOT to write the "constants" * used here. Change mmg$gq_s0s1base_pte_address * to mmg$gq_s0base_pte_address for x86 only. Added * module IDENT. More conditionals still to be verified. * * X-50 CV-0053 Camiel Vanderhoeven 03-Oct-2017 * kriprdef is ia64 only. Exclude from x86 version. * * X-49 CEG0288 Clair Grant 13-Sep-2017 * Verified some conditionals for x86 port. More to do. * * X-48 CEG0234 Clair Grant 18-Jul-2017 * Create a temporary, empty bug_check for x86. * * X-47 DAG Doug Gordon 2-Nov-2016 * Noticed that the X-45 changes were outside the * __VMS_MACROS_LOADED conditional. Move them in. (But * obviously no one has included this file multiple times * since 2004!) * * X-46 CMOS Christian Moser 24-FEB-2005 * Rewrite the LOCK_CPU_MUTEX and UNLOCK_CPU_MUTEX to match what * the Macro32 macro does to acquire and release the CPU mutex. * Only use a single atomic update operation to avoid some * fairness issues. * * X-45 Clair Grant 02-Apr-2004 * Add "atomic" macros * * X-44 CMOS Christian Moser 7-SEP-2003 * Add EXC_PRINT for exception tracing * * X-43 Clair Grant 01-Nov-2002 * builtins.h must precede kriprdef.h * * X-42 Burns Fisher 31-Oct-2002 * KRIPRDEF include is IA64-only * * X-41 Burns Fisher 30-Oct-2002 * Use KRs for get_cpu_data and add get_slot_va for IA64. * * X-40 Clair Grant 23-Jul-2002 * We won't be calling a service to bugcheck so replace * sys$pal_bugchk with the __PAL_BUGCHK builtin which uses * a break instruction directly. * * X-39 KLN3080 Karen L. Noel 18-Jul-2002 * o Fix lower case conditional. * o Reset ident numbering to match source control. * * X-37A1A4 KLN3048 Karen L. Noel 28-Mar-2002 * o Avoid assigning a 64-bit pointer to a 32-bit int. * o Conditionalize Alpha specific code. * o Require /pointer_size to use 64-bit pointers. * * X-41 KLN3037 Karen L. Noel 13-Mar-2002 * Inline static routines so compiler doesn't mess up * initialization routines. * * X-40 KLN3035 Karen L. Noel 11-Mar-2002 * Remove arch_defs.h and change conditionals to use * __ALPHA and __ia64. Including arch_defs.h breaks * XFC and perhaps other code that does conditionals * a different way. * * X-39 KLN3025 Karen L. Noel 26-Feb-2002 * o Conditionalize MTPR/MFPR macro definitions. * o Cast new_ipl in sys_unlock macro to get rid * of compiler informational. * o Make two page table spaces per IA64 region. * o Call sys$pal_bugchk for IA64 in bug_check macro. * o Fix comment for sys_lock_nospin to indicate SS$_LOCKINUSE * can be returned. * o Remove inline pragmas. We trust the compiler now. * * X-38 CMOS Christian Moser 10-JAN-2002 * Update comment for TR_PRINT macro to include example * usage based on popular feedback request. * * X-37 CMOS Christian Moser 26-JUN-2001 * Add new TR_PRINT macro, which can be used as a general * purpose debug aid in combination with TR$DEBUG and TR$SDA. * * X-36 JRK Jim Kauffman 6-Nov-2000 * Fix CPU mutex deadlock * * X-35 CMOS Christian Moser 16-AUG-1999 * Add new SYS_LOCK and SYS_UNLOCK variants to support * sharelocks and nospin locking. * * X-34 RAB Richard A. Bishop 9-Apr-1999 * Make bug_check macro use some names that aren't so likely * to clash with customer definitions. * * X-33 PAJ0988 Paul A. Jacobi 16-Jun-1998 * Define BUGCHK_POWEROFF for use with BUG_CHECK macro. Update * module IDENT to match VDE. Fold of X-28A1 from BLIZZARD_FT1. * * X-30 KLN2084 Karen L. Noel 5-Jun-1998 * Protect the sys_lock macro so that it can be used * properly within a module compiled for 64-bit pointers. * * X-29 KLN2077 Karen L. Noel 20-May-1998 * Add "int" to declaration in bug_check macro so the C * compiler doesn't complain when using level4 checking. * * X-28 CMOS Christian Moser 27-APR-1998 * Replace bug_check macro to generate inline bugcheck with * correct PALcode instruction, instead of calling routine * EXE$GEN_BUGCHK to generate an inline bugcheck. * * X-27 JRK388 Jim Kauffman 4-Nov-1997 * Fix bug_check usage in lock_cpu_mutex and unlock_cpu_mutex * * X-26 KLN1570 Karen L. Noel 18-Jul-1996 * Fix calls to TBI_DATA* routines so that the call * entry points are used instead of the JSB entries. * * X-25 SDD Steve DiPirro 26-Apr-1996 * Fix exe$gen_bugchk parameter declarations to be more standard * and to avoid the DECC V5.3 bug. * * X-24 NYK521 Nitin Y. Karkhanis 30-Nov-1995 * Add macros to fetch contents of item list entry fields. * * X-23 EMB0381 Ellen M. Batbouta 03-Oct-1995 * Add NO_PCB symbol to TBI_DATA_64 and TBI_SINGLE macros. * * X-22 NYK326 Nitin Y. Karkhanis 30-Mar-1995 * Cast PTE pointers to integers before performing any * arithmetic. (In va_pte_to_svapte and svapte_to_va_pte.) * * X-21 EMB0355 Ellen M. Batbouta 08-Mar-1995 * Add TB invalidate macros, TBI_ALL, TBI_SINGLE, and * TBI_DATA_64. * * X-20 NYK251 Nitin Y. Karkhanis 1-Feb-1995 * Removed 64B_REVISIT for PTE_VA macro since the newest * generation of the C compiler that's coupled with Theta * correctly evaluates 64-bit expressions. * Added const to all extern system data cells declarations. * * X-19 NYK231 Nitin Y. Karkhanis 27-Jan-1995 * Add new routines to convert a VA_PTE to a SVAPTE and vice * versa. * Replaced instances of __unsigned int64 with uint64 for the * PTE macros only. * * X-18 LSS0314 Leonard S. Szubowicz 10-Jan-1995 * Handle multiple inclusions of this header file gracefully by * doing nothing after the first time. * * X-17 NYK102 Nitin Y. Karkhanis 10-Nov-1994 * Temporarily make pte_va an inline routine. This was done * to work around some compiler problems with 64-bit expressions. * * X-16 NYK075 Nitin Y. Karkhanis 17-Oct-1994 * The presence of access_backpointer and establish_backpointer * in this header file resulted in too much pain for the * build. Access_backpointer, decref, establish_backpointer, * incref, is_encumbered, and pfn_to_entry therefore have been * moved to [LIB_H]PFN_MACROS.H. The VA_PTE_TO_VA inline routine * need the static qualifer on its definition. * * X-15 NYK073 Nitin Y. Karkhanis 14-Oct-1994 * MMG$GQ_LEVEL_WIDTH and MMG$GQ_NON_VA_MASK need to be * declared within inline routine establish_backpointer. * * X-14 NYK072 Nitin Y. Karkhanis 13-Oct-1994 * Safe to include access_backpointer routine since the * system data cells it uses have now been defined. The * inclusion of the routine ended up being a separate edit from * X-13 since the symbols were defined (in SYSLNK.OPT, etc.) * after edit X-13 hit the "pack". * * X-13 NYK071 Nitin Y. Karkhanis 13-Oct-1994 * Update PTE macros according to code review comments. * Add PFN macros. * * X-12 NYK055 Nitin Y. Karkhanis 19-Sep-1994 * MMG$GL_PAGE_SIZE in va_pte_to_va should really be * MMG$GQ_PAGE_SIZE. * * X-11 NYK046 Nitin Y. Karkhanis 14-Sep-1994 * Adding paging equation macros (l1pte_va, l2pte_va, pte_va, * va_pte_to_va- routine). Also changed pfn_to_entry macro since * the shift is no longer viable. PFN database entry size has * grown for 64-bits and is no longer a power of 2. * * X-10 SDD Steve DiPirro 18-Aug-1994 * Some idiot screwed up all the va/vpn conversion macros for P0 * space using a nonexistent (and unneeded) mask symbol. Oh yeah, * that was me. What a surprise. * * X-9 SDD Steve DiPirro 23-May-1994 * Extern declaration of smp$gl_flags should use same type * as in VMS_DRIVERS.H (type SMP rather than int). * * X-8 SDD Steve DiPirro 27-Jan-1994 * Function prototypes defined here are obsolete and conflict * with the actual definitions now available in other include * files. * * X-7 SDD Steve DiPirro 08-Nov-1993 * Fixed sys_unlock to restore IPL, even when SMP enabled. * * X-6 SDD Steve DiPirro 08-Sep-1993 * Added vms_assert, good_status, and bad_status macros. * Fixed sys_lock problem accessing spinlock vector. * * X-5 SDD Steve DiPirro 26-Aug-1993 * Fixed sys_lock and sys_unlock references to SPL$C_xxx * symbols causing new compilation problems. * * X-4 SDD Steve DiPirro 18-Aug-1993 * Fixed (erroneous) extra level of indirection in the * bug_check macro. * * X-3 SDD Steve DiPirro 09-Jun-1993 * I'm an idiot. Fix the case of constants used by macros. * Fix sys_lock to allow defaulting of saved_ipl parameter. * Make bugcheck code references consistent across the macros. * * X-2 SDD Steve DiPirro 30-Apr-1993 * Added new bug_check macro. * *-- */ /* Include any header files we need to make these macros work */ #ifdef __ALPHA /* Verified for x86 port - Clair Grant */ #include #include #endif #include union _PS { unsigned __int64 quad; #ifdef __NEW_STARLET PSLDEF fields; #else union psldef fields; #endif }; #ifdef __ALPHA /* Verified for x86 port - Camiel Vanderhoeven */ #define PL$C_KERNEL PSL$C_KERNEL #define PL$C_EXEC PSL$C_EXEC #define PL$C_SUPER PSL$C_SUPER #define PL$C_USER PSL$C_USER #endif #include #include #ifdef __ia64 /* Verified for x86 port - Clair Grant */ #include #include #include #endif #ifdef __x86_64 /* Verified for x86 port - Clair Grant */ #include #include extern UINT64_PQ exe$gqp_hpet_counter_va; #endif /* This construct allows SYSBOOT write access to variables that are */ /* run-time constants, but need to be initialized at boot time. */ #ifdef __SYSBOOT #define __RUNCONST #define VMS_MACROS$PTE_LEVELS boo$gq_pte_levels #define VMS_MACROS$BPT_BASE boo$gq_bpt_base #define VMS_MACROS$PML4_BASE boo$gq_pml4_base #define VMS_MACROS$PML5_BASE boo$gq_pml5_base #else #define __RUNCONST const #define VMS_MACROS$PTE_LEVELS mmg$gq_pte_levels #define VMS_MACROS$BPT_BASE mmg$gq_bpt_base #define VMS_MACROS$PML4_BASE mmg$gq_pml4_base #define VMS_MACROS$PML5_BASE mmg$gq_pml5_base #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* These are macros which facilitate the creation of other macros */ #define concat_sym(a,b) a ## b #define concat_defs(a,b) concat_sym(a,b) #define bld_sym1(x,line) concat_sym(x,line) #define bld_sym(x) bld_sym1(x,__LINE__) /* Here are some macros which need no explanation (except for, perhaps, why we're bothering to define them in the first place) */ #define mfpr_asten __PAL_MFPR_ASTEN() #define mfpr_astsr __PAL_MFPR_ASTSR() #define mfpr_esp __PAL_MFPR_ESP() #define mfpr_fen __PAL_MFPR_FEN() #define mfpr_ipl __PAL_MFPR_IPL() #define mfpr_mces __PAL_MFPR_MCES() #define mfpr_pcbb __PAL_MFPR_PCBB() #define mfpr_prbr __PAL_MFPR_PRBR() #define mfpr_sisr __PAL_MFPR_SISR() #define mfpr_ssp __PAL_MFPR_SSP() #define mfpr_tbchk(check_adr) __PAL_MFPR_TBCHK(check_adr) #define mfpr_usp __PAL_MFPR_USP() #define mfpr_whami __PAL_MFPR_WHAMI() #define mtpr_asten(mask) __PAL_MTPR_ASTEN(mask) #define mtpr_astsr(mask) __PAL_MTPR_ASTSR(mask) #define mtpr_datfx(enable) __PAL_MTPR_DATFX(enable) #define mtpr_esp(new_sp) __PAL_MTPR_ESP(new_sp) #define mtpr_fen(new_fen) __PAL_MTPR_FEN(new_fen) #define mtpr_ipir(ipir_mask) __PAL_MTPR_IPIR(ipir_mask) #define mtpr_ipl(newipl) __PAL_MTPR_IPL(newipl) #define mtpr_mces(mck_sum) __PAL_MTPR_MCES(mck_sum) #define mtpr_prbr(new_prbr) __PAL_MTPR_PRBR(new_prbr) #define mtpr_sirr(mask) __PAL_MTPR_SIRR(mask) #define mtpr_ssp(new_sp) __PAL_MTPR_SSP(new_sp) #define mtpr_tbia __PAL_MTPR_TBIA() #define mtpr_tbiap __PAL_MTPR_TBIAP() #define mtpr_tbis(tb_adr) __PAL_MTPR_TBIS(tb_adr) #define mtpr_tbisd(tb_adr) __PAL_MTPR_TBISD(tb_adr) #define mtpr_tbisi(tb_adr) __PAL_MTPR_TBISI(tb_adr) #define mtpr_usp(new_sp) __PAL_MTPR_USP(new_sp) /* The following macros are modelled after macros available to BLISS and MACRO but are basically just simple PAL calls. */ #define dsbint(newipl,saved_ipl) saved_ipl = __PAL_MTPR_IPL(newipl) #define enbint(newipl) __PAL_MTPR_IPL(newipl) #define setipl(newipl) __PAL_MTPR_IPL(newipl) #define softint(ipl) __PAL_MTPR_SIRR(ipl) #ifdef __ia64 /* Verified for x86 port - Clair Grant; X-53 */ #define find_cpu_data __getReg(KR$C_CPUDB_VA) #define get_slot_va __getReg(KR$C_SLOT_VA) #else #define find_cpu_data __PAL_MFPR_PRBR() #endif /* The following MFPR/MTPR calls have no IA64 builtins. */ #ifdef __ALPHA /* Verified for x86 port - Clair Grant */ #define mfpr_scbb __PAL_MFPR_SCBB() #define mtpr_scbb(base_adr) __PAL_MTPR_SCBB(base_adr) #define mfpr_ptbr __PAL_MFPR_PTBR() #define mfpr_vptb __PAL_MFPR_VPTB() #define mtpr_vptb(new_vptb) __PAL_MTPR_VPTB(new_vptb) #endif /* What follows is a bug_check macro for system C programmers which can be used to generate a bugcheck. Included are some #define's of constants which can be used with the macro invocation. An example of its use is (note, all parameters are in uppercase): bug_check (INCONSTATE, FATAL, COLD); */ #define BUGCHK_QUOTE(s) #s #define BUGCHK_STR(s) BUGCHK_QUOTE(s) #define BUGCHK_FATAL 1 #define BUGCHK_NONFATAL 0 #define BUGCHK_POWEROFF 2 #define BUGCHK_COLD 1 #define BUGCHK_WARM 0 #ifdef __ALPHA /* Verified for x86 port - Clair Grant */ #define bug_check(code, severity, reboot) \ { \ extern const int BUG$_##code; \ int bug_code = (int) &BUG$_##code; \ bug_code |= (BUGCHK_##severity) ? ((BUGCHK_##reboot) ? 5 : 4) : 0; \ asm ( "call_pal " BUGCHK_STR(EVX$PAL_BUGCHK) ";", bug_code ); \ } #endif #ifdef __ia64 /* Verified for x86 port - Clair Grant */ #define bug_check(code, severity, reboot) \ { \ extern const int BUG$_##code; \ int bug_code = (int) &BUG$_##code; \ bug_code |= (BUGCHK_##severity) ? ((BUGCHK_##reboot) ? 5 : 4) : 0; \ __PAL_BUGCHK(bug_code); \ } #endif /* __ia64 */ #ifdef __x86_64 /* Verified for x86 port - Paul A. Jacobi */ #define bug_check(code, severity, reboot) \ { \ extern const int BUG$_##code; \ int bug_code = (int) &BUG$_##code; \ bug_code |= (BUGCHK_##severity) ? ((BUGCHK_##reboot) ? 5 : 4) : 0; \ __PAL_BUGCHK(bug_code); \ } #endif /* __x86_64 */ #ifdef __x86_64 /* Verified for x86 port--Drew Mason */ /* The following code is only available to modules that compile with the */ /* pointer_size qualifier. Short, long, 32, or 64 are all okay. */ #ifdef __INITIAL_POINTER_SIZE #if __INITIAL_POINTER_SIZE /* * The following macros and routines are temporarily defined for x86, but * return null results. They will be going away in the fullness of time, * but are left defined to keep the build noise to a dull roar. * l1pte_va * l2pte_va * pte_va * va_pte_to_va * svapte_to_va_pte * va_pte_to_svapte * make_va_s0 * extract_va_s0 * extract_pte_offset * */ /* Call it bpte_va so it doesn't get confused with pte_va used on Alpha and IA64. */ #pragma inline ($$$bpte_va) static PTE_PQ $$$bpte_va (int64 addr, uint32 mode) { extern PTE_PQ __RUNCONST VMS_MACROS$BPT_BASE [4]; #ifndef __SYSBOOT extern PTE_PQ __RUNCONST VMS_MACROS$PML4_BASE [4]; extern PTE_PQ __RUNCONST VMS_MACROS$PML5_BASE [4]; #endif extern __RUNCONST unsigned __int64 VMS_MACROS$PTE_LEVELS; __int64 mask; __int64 offset; __int64 va_pte; #if defined(TEST$MMG) va_pte = test$mmg_bpte_va( addr ); if (va_pte != 0) return ((PTE_PQ) va_pte); #endif mask = MMG$$C_PML4_MASK | MMG$$C_PDPT_MASK | MMG$$C_PD_MASK | MMG$$C_BPT_MASK; if (VMS_MACROS$PTE_LEVELS == 5) mask |= MMG$$C_PML5_MASK; offset = (addr & mask) >> MMG$$C_BPT_OFFPOS; #ifndef __SYSBOOT offset &= ~0xF; /* Insure PTE for 8K page */ #endif va_pte = (uint64) VMS_MACROS$BPT_BASE [mode] + offset; #ifndef __SYSBOOT /* If va_pte is the self map PTE in the top level page table, fix up based on mode */ if (VMS_MACROS$PTE_LEVELS == 4) { if ((va_pte & ~0x10ull) == (__int64) VMS_MACROS$PML4_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } else { if ((va_pte & ~0x10ull) == (__int64) VMS_MACROS$PML5_BASE[mode]+(PTE$C_BYTES_PER_PTE*(MMG$$C_PTES_PER_PAGE/2))) va_pte += (mode % 2) * PTE$C_BYTES_PER_PTE; } #endif return ((PTE_PQ) va_pte); } /* end $$$bpte_va */ #define bpte_va(addr,mode) $$$bpte_va ((int64) (addr), (uint32) (mode)) static PTE_PQ $$$dummy1 (uint64 addr) { return (PTE_PQ) 0; } #define l1pte_va(addr) $$$dummy1 ((uint64) (addr)) #define l2pte_va(addr) $$$dummy1 ((uint64) (addr)) #define pte_va(addr) $$$dummy1 ((uint64) (addr)) #define svapte_to_va_pte (PTE * svapte) \ #define $$$SVAPTE_TO_VA 1 #if defined($$$SVAPTE_TO_VA) #error svapte_to_va_pte is only supported on Alpha nd IA64 #endif static PTE * va_pte_to_svapte (PTE_PQ va_pte) { return (PTE *) 0; } #define va2pa_64( va ) mmg$va2pa_64( va ) #endif /* if __INITIAL_POINTER_SIZE */ #endif /* ifdef __INITIAL_POINTER_SIZE */ #define make_va_s0(vpn) (0) #define extract_va_s0(vpn) (0) #define extract_pte_offset(va) (0) #else /* ifdef __x86_64 Verified for x86 port--Drew Mason */ /* The following code is only available to modules that compile with the */ /* pointer_size qualifier. Short, long, 32, or 64 are all okay. */ #ifdef __INITIAL_POINTER_SIZE #if __INITIAL_POINTER_SIZE /* Macro to return the VA of the L1PTE that maps the virtual address passed in. L1PTE_VA = MMG$GQ_L1_BASE[VA] + 8*VA */ #pragma inline ($$$l1pte_va) static PTE_PQ $$$l1pte_va (uint64 addr) { extern PTE_PQ __RUNCONST mmg$gq_l1_base[VA$C_VRNX_COUNT]; int vrnx; unsigned __int64 va_pte; #ifdef __NEW_STARLET VA v; v.va$q_quad = addr; #else va v; v.va$q_quad[0] = addr; v.va$q_quad[1] = addr>>32; #endif vrnx = v.va$v_vrnx; va_pte = (uint64) mmg$gq_l1_base[vrnx] + ((addr & ~mmg$gq_non_va_mask) >> 3*mmg$gq_level_width) & (uint64) ~(PTE$C_BYTES_PER_PTE-1); return ((PTE_PQ) va_pte); } #define l1pte_va(addr) $$$l1pte_va((uint64)(addr)) /* Macro to return the VA of the L2PTE that maps the virtual address passed in. L2PTE_VA = MMG$GQ_L2_BASE[VA] + 8*VA */ #pragma inline ($$$l2pte_va) static PTE_PQ $$$l2pte_va (uint64 addr) { extern PTE_PQ __RUNCONST mmg$gq_l2_base[VA$C_VRNX_COUNT]; extern __RUNCONST uint64 mmg$gq_non_va_mask; extern __RUNCONST uint64 mmg$gq_level_width; int vrnx; unsigned __int64 va_pte; #ifdef __NEW_STARLET VA v; v.va$q_quad = addr; #else va v; v.va$q_quad[0] = addr; v.va$q_quad[1] = addr>>32; #endif vrnx = v.va$v_vrnx; va_pte = (uint64) mmg$gq_l2_base[vrnx] + ((addr & ~mmg$gq_non_va_mask) >> 2*mmg$gq_level_width) & (uint64) ~(PTE$C_BYTES_PER_PTE-1); return ((PTE_PQ)va_pte); } #define l2pte_va(addr) $$$l2pte_va((uint64)(addr)) /* Macro to return the VA of the PTE that maps the virtual address passed in. PTE_VA = MMG$GQ_PT_BASE[VA] + 8*VA */ #pragma inline ($$$pte_va) static PTE_PQ $$$pte_va (uint64 addr) { extern PTE_PQ __RUNCONST mmg$gq_pt_base[VA$C_VRNX_COUNT]; extern __RUNCONST uint64 mmg$gq_non_pt_mask; extern __RUNCONST uint64 mmg$gq_level_width; int vrnx; unsigned __int64 va_pte; #ifdef __NEW_STARLET VA v; v.va$q_quad = addr; #else va v; v.va$q_quad[0] = addr; v.va$q_quad[1] = addr>>32; #endif vrnx = v.va$v_vrnx; va_pte = (uint64) mmg$gq_pt_base[vrnx] + ((addr & ~mmg$gq_non_pt_mask) >> mmg$gq_level_width); return ((PTE_PQ)va_pte); } #define pte_va(addr) $$$pte_va((uint64)(addr)) /* bpte_va has a mode parameterm but the parameter is ignored on Alpha/IA64. */ #define bpte_va(addr,mode) $$$pte_va((uint64)(addr)) #endif /* if __INITIAL_POINTER_SIZE */ #endif /* ifdef __INITIAL_POINTER_SIZE */ #endif #ifdef __INITIAL_POINTER_SIZE #if __INITIAL_POINTER_SIZE /* Routine to return the VA mapped by the VA_PTE passed in. VA = ((VA_PTE - MMG$GQ_PT_BASE[VA])/PTE_SIZE) * PAGE_SIZE */ #pragma inline ($$$va_pte_to_va) static VOID_PQ $$$va_pte_to_va (PTE_PQ va_pte) { #ifdef __x86_64 /* Verified for x86 port--Greg Jordan */ extern PTE_PQ __RUNCONST mmg$gq_bpt_vrnx_base[VA$C_VRNX_COUNT]; #else extern PTE_PQ __RUNCONST mmg$gq_pt_base[VA$C_VRNX_COUNT]; #endif extern __RUNCONST uint64 mmg$gq_va_bits; extern __RUNCONST uint64 mmg$gq_level_width; uint64 temp1; int vrnx; #ifdef __NEW_STARLET VA v; VA temp2; v.va$q_quad = (uint64)va_pte; #else va v; va temp2; temp1 = (uint64)va_pte; v.va$q_quad[0] = (unsigned int)temp1; v.va$q_quad[1] = (unsigned int)(temp1 >> 32); #endif vrnx = v.va$v_vrnx; #ifdef __x86_64 /* Verified for x86 port--Greg Jordan */ temp1 = ((uint64)va_pte - (uint64) mmg$gq_bpt_vrnx_base[vrnx]) << MMG$$C_PTE_SEGMENT_SIZE; #else temp1 = ((uint64)va_pte - (uint64) mmg$gq_pt_base[vrnx]) << mmg$gq_level_width; #endif /* Since the above statement creates an address that does not have the bits above the L1 MSB set according to the setting of the L1 MSB, the following code handles this contingency. */ #if defined(__alpha) || defined(__x86_64) /* Verified for x86 port - Clair Grant */ if ((int64) (temp1 << (64 - mmg$gq_va_bits)) < 0) temp1 = temp1 | ((int64) -1 << mmg$gq_va_bits); #endif /* IA64 we now have to insert the vrnx bits */ #ifdef __ia64 /* Verified for x86 port--Drew Mason */ if (vrnx&1) temp1 = temp1 | ((int64) -1 << mmg$gq_va_bits); # ifdef __NEW_STARLET temp2.va$q_quad = temp1; temp2.va$v_vrnx = vrnx; temp1 = temp2.va$q_quad; # else temp2.va$q_quad[0] = temp1; temp2.va$q_quad[1] = temp1>>32; temp2.va$v_vrnx = vrnx; temp1 = temp2.va$q_quad[0]; temp1 |= (uint64)temp2.va$q_quad[1]<<32; # endif #endif /* __ia64 */ return ((VOID_PQ) temp1); } #define va_pte_to_va(va_pte) $$$va_pte_to_va((PTE_PQ)(va_pte)) #endif /* if __INITIAL_POINTER_SIZE */ #endif /* ifdef __INITIAL_POINTER_SIZE */ #if defined(__alpha) || defined(__ia64) /* Verified for x86 port - Clair Grant */ #ifdef __INITIAL_POINTER_SIZE #if __INITIAL_POINTER_SIZE /* Routine to convert a SVAPTE to a VA_PTE. */ #pragma inline (svapte_to_va_pte) static PTE_PQ svapte_to_va_pte (PTE *svapte) { extern PTE * __RUNCONST mmg$gl_sptbase; extern PTE_PQ __RUNCONST mmg$gq_s0s1base_pte_address; return ((PTE_PQ) ((uint64) mmg$gq_s0s1base_pte_address + (uint64) ((int) svapte - (int) mmg$gl_sptbase))); } /* Routine to convert a VA_PTE to a SVAPTE. */ #pragma inline (va_pte_to_svapte) static PTE * va_pte_to_svapte (PTE_PQ va_pte) { extern PTE * __RUNCONST mmg$gl_sptbase; extern PTE_PQ __RUNCONST mmg$gq_s0s1base_pte_address; return ((PTE *) ((int) mmg$gl_sptbase + (int) ((uint64) va_pte - (uint64) mmg$gq_s0s1base_pte_address))); } #endif /* if __INITIAL_POINTER_SIZE */ #endif /* ifdef __INITIAL_POINTER_SIZE */ /* These macros, MAKE_VA_xx, are similar to $MAKE_VA for MACRO and BLISS but not as sophisticated. They will convert a virtual page number (VPN) to an address (the first byte of the page) for the specified address space. These macros ALWAYS assume page-size-independent code and that the LIB symbols VA$M_xxx are defined and you're linking against SYS$BASE_IMAGE. */ #define make_va_s0(vpn) ((vpn << mmg$gl_vpn_to_va) | VA$M_SYSTEM) /* These macros, EXTRACT_VA_xx, are similar to $EXTRACT_VA for MACRO and BLISS but not as sophisticated. They will convert a virtual address to a virtual page number (VPN) for the specified address space. These macros ALWAYS assume page-size-independent code and that the LIB symbols VA$M_xxx are defined and you're linking against SYS$BASE_IMAGE. */ #define extract_va_s0(va) ((va & (~ VA$M_SYSTEM)) >> mmg$gl_vpn_to_va) /* These macros, EXTRACT_PTE_OFFSET_xx, are similar to $EXTRACT_PTE_OFFSET for MACRO and BLISS but not as sophisticated. They will convert a virtual address to a PTE offset for the specified address space. These macros ALWAYS assume page-size-independent code and that the LIB symbols VA$M_xxx are defined and you're linking against SYS$BASE_IMAGE. */ #define extract_pte_offset_s0(va) \ ((va & (~ VA$M_SYSTEM)) >> mmg$gl_pte_offset_to_va) #endif /* ifdef __x86_64 else */ /* These macros and static routines will perform various operations on PTEs and PFNs. Extracting a PFN from a PTE $extract_pfn( PTE pte ) Inserting a PFN into a PTE $insert_pfn( PTE pte, PFN pfn ) Converting a PFN to a PA $pfn_to_pa( uint64 pfn ) Getting the PA for a PTE $extract_pa( PTE pte ) Converting a PA to a PFN $pa_to_pfn ( uint64 pa ) For X86, _2mb and _1g variants exist. */ #define $pa_to_pfn( pa ) ((pa) >> MMG$$C_PA_TO_PFN_SHIFT) #if defined(__alpha) || defined(__ia64) /* Verified for x86 port - Greg Jordan */ #define $extract_pfn( pte ) (pte).pte$v_pfn #define $insert_pfn( pte, pfn ) (pte).pte$v_pfn = (pfn) #define $pfn_to_pa( pfn ) ((pfn) << MMG$$C_PA_TO_PFN_SHIFT) #pragma inline ($extract_pa) static uint64 $extract_pa (PTE pte) { uint64 pfn; pfn = $extract_pfn( pte ); return $pfn_to_pa( pfn ); } #elif defined(__x86_64) #define $extract_pfn( pte ) (pte).pte$v_pfn_4k #define $insert_pfn( pte, pfn ) (pte).pte$v_pfn_4k = (pfn) #define $extract_pfn_2mb( pte ) (pte).pte$v_pfn_2mb #define $extract_pfn_1g( pte ) (pte).pte$v_pfn_1g #define $pfn_to_pa( pfn ) ((pfn) << MMG$$C_PFN_TO_PA_SHIFT) #define $pfn_to_pa_2mb( pfn ) ((pfn) << MMG$$C_PFN_2MB_TO_PA_SHIFT) #define $pfn_to_pa_1g( pfn ) ((pfn) << MMG$$C_PFN_1GB_TO_PA_SHIFT) #pragma inline ($extract_pa) static uint64 $extract_pa (PTE pte) { uint64 pfn; pfn = $extract_pfn( pte ); return $pfn_to_pa( pfn ); } #pragma inline ($extract_pa_2mb) static uint64 $extract_pa_2mb (PTE pte) { uint64 pfn; pfn = $extract_pfn_2mb( pte ); return $pfn_to_pa_2mb( pfn ); } #pragma inline ($extract_pa_1g) static uint64 $extract_pa_1g (PTE pte) { uint64 pfn; pfn = $extract_pfn_1g( pte ); return $pfn_to_pa_1g( pfn ); } #else #error Need architecture specific work here #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __save /* Save the previously-defined required ptr size */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif /* Guard against anyone using this macro with their module compiled with long pointers */ typedef struct _spl ** SPL_PPL; /* Short pointer to a short pointer to an SPL structure */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __restore /* Restore the previously-defined required ptr size */ #endif /* Define some constants used by the various spinlock acquisition and release macros */ #define NOSAVE_IPL ((int *) 0) /* don't save original IPL */ #define NOLOWER_IPL -1 /* don't lower IPL on unlock */ #define NORAISE_IPL 0 /* don't raise IPL on lock */ #define RAISE_IPL 1 /* do raise IPL on lock */ #define SMP_RELEASE 0 /* unconditionally release spinlock */ #define SMP_RESTORE 1 /* conditionally release spinlock */ /* These macros are similar to the LOCK macro (in MACRO-32) to acquire a spinlock and/or raise IPL. They do NOT take out a mutex, however. This is a separate function. lockname = Name of the spinlock in uppercase (IOLOCK8, etc.) change_ipl = 0 => No, 1 => Yes saved_ipl = Address of variable (int) to receive the previous IPL (or zero) status = address of variable (int) to receive the status of the lock operation For example, to take out IOLOCK8, change IPL, and save the previous IPL: int old_ipl; sys_lock(IOLOCK8,1,&old_ipl); and if you didn't want to save the previous IPL: sys_lock(IOLOCK8,1,0); The _NOSPIN variants return a status of either SS$_NORMAL or SS$_LOCKINUSE, depending if the spinlock was locked or not. */ #define sys_lock(lockname,change_ipl,saved_ipl) \ { \ extern SMP smp$gl_flags; \ extern SPL_PPL smp$ar_spnlkvec; \ \ if (saved_ipl != 0) \ *(int *)saved_ipl = mfpr_ipl; \ \ if (change_ipl == 0) \ smp_std$acqnoipl(smp$ar_spnlkvec[concat_sym(SPL$C_,lockname)]); \ else \ if (smp$gl_flags.smp$v_enabled) \ smp_std$acquire(concat_sym(SPL$C_,lockname)); \ else \ mtpr_ipl(concat_sym(IPL$_,lockname)); \ } #define sys_lock_shr(lockname,change_ipl,saved_ipl) \ { \ extern SMP smp$gl_flags; \ extern SPL_PPL smp$ar_spnlkvec; \ \ if (saved_ipl != 0) \ *(int *)saved_ipl = mfpr_ipl; \ \ if (change_ipl == 0) \ smp_std$acqnoipl_shr(smp$ar_spnlkvec[concat_sym(SPL$C_,lockname)]); \ else \ if (smp$gl_flags.smp$v_enabled) \ smp_std$acquire_shr(concat_sym(SPL$C_,lockname)); \ else \ mtpr_ipl(concat_sym(IPL$_,lockname)); \ } #define sys_lock_nospin(lockname,change_ipl,saved_ipl,status) \ { \ extern SMP smp$gl_flags; \ extern SPL_PPL smp$ar_spnlkvec; \ \ if (saved_ipl != 0) \ *(int *)saved_ipl = mfpr_ipl; \ \ if (change_ipl == 0) \ *(int *)status = smp_std$acqnoipl_nospin(smp$ar_spnlkvec[concat_sym(SPL$C_,lockname)]); \ else \ if (smp$gl_flags.smp$v_enabled) \ *(int *)status = smp_std$acquire_nospin(concat_sym(SPL$C_,lockname)); \ else \ { \ mtpr_ipl(concat_sym(IPL$_,lockname)); \ *(int *)status = SS$_NORMAL; \ } \ } #define sys_lock_shr_nospin(lockname,change_ipl,saved_ipl,status) \ { \ extern SMP smp$gl_flags; \ extern SPL_PPL smp$ar_spnlkvec; \ \ if (saved_ipl != 0) \ *(int *)saved_ipl = mfpr_ipl; \ \ if (change_ipl == 0) \ *(int *)status = smp_std$acqnoipl_shr_nospin(smp$ar_spnlkvec[concat_sym(SPL$C_,lockname)]); \ else \ if (smp$gl_flags.smp$v_enabled) \ *(int *)status = smp_std$acquire_shr_nospin(concat_sym(SPL$C_,lockname)); \ else \ { \ mtpr_ipl(concat_sym(IPL$_,lockname)); \ *(int *)status = SS$_NORMAL; \ } \ } #define sys_lock_cvt_to_shr(lockname) \ { \ extern SMP smp$gl_flags; \ extern SPL_PPL smp$ar_spnlkvec; \ \ smp_std$cvt_to_shared(smp$ar_spnlkvec[concat_sym(SPL$C_,lockname)]); \ } #define sys_lock_cvt_to_ex(lockname,status) \ { \ extern SMP smp$gl_flags; \ extern SPL_PPL smp$ar_spnlkvec; \ \ *(int *)status = smp_std$cvt_to_ex(smp$ar_spnlkvec[concat_sym(SPL$C_,lockname)]); \ } /* These macros are similar to the UNLOCK macro (in MACRO-32) to release a spinlock and/or lower IPL. They do NOT release a mutex, however. This is a separate function. lockname = Name of the spinlock in uppercase (IOLOCK8, etc.) new_ipl = if >= 0, then this is to be the new IPL (< 0 implies no change) restore = if != 0, then use SMP restore function, else use release. */ #define sys_unlock(lockname,new_ipl,restore) \ { \ extern SMP smp$gl_flags; \ \ if (smp$gl_flags.smp$v_enabled) \ if (restore != 0) \ smp_std$restore(concat_sym(SPL$C_,lockname)); \ else \ smp_std$release(concat_sym(SPL$C_,lockname)); \ if ((int)(new_ipl) >= 0) \ mtpr_ipl(new_ipl); \ } #define sys_unlock_shr(lockname,new_ipl,restore) \ { \ extern SMP smp$gl_flags; \ \ if (smp$gl_flags.smp$v_enabled) \ { \ if (restore != 0) \ smp_std$restore_shr(concat_sym(SPL$C_,lockname)); \ else \ smp_std$release_shr(concat_sym(SPL$C_,lockname)); \ } \ if ((int)(new_ipl) >= 0) \ mtpr_ipl(new_ipl); \ } /******************************************************************************* * * The LOCK_CPU_MUTEX and UNLOCK_CPU_MUTEX macros are similar to the * Macro32 LOCK and UNLOCK macros for acquiring and releasing the * CPU mutex, the only mutex ever acquired or released by those * macros. * * The LOCK_CPU_MUTEX and UNLOCK_CPU_MUTEX macros do not take a * mutex parameter and instead deal exclusively with the CPU mutex. * The caller can only specify whether shared or exclusive access is * required. * * Acquiring the CPU mutex in shared mode will restore the IPL, but * if the CPU mutex is acquired in exclusive mode, then IPL remains * at IPL$_POWER and it is the responsibility of the caller to restore * the IPL after releasing the CPU mutex. * * Shared Usage: * lock_cpu_mutex (1); * ... * ... * unlock_cpu_mutex (1); * * Exclusive Usage: * saved_ipl = __PAL_MFPR_IPL(); * lock_cpu_mutex (0); * ... * ... * unlock_cpu_mutex (0); * setipl (saved_ipl); * * */ #ifdef __alpha /* Verified for x86 port - Clair Grant */ #define CMPXCHG8(ptr,old,new) __CMP_STORE_QUAD(ptr,old,new,ptr) #else #define CMPXCHG8(ptr,old,new) __CMP_SWAP_QUAD(ptr,old,new) #endif #define lock_cpu_mutex(share) \ { \ int status; \ int saved_ipl; \ int drop_ipl; \ int retry; \ int64 delta_time; \ int64 end_time; \ CPU *cpu; \ MUTEX old_mutex; \ MUTEX new_mutex; \ extern int sgn$gl_smp_spinwait; \ extern SYS_TIME_CONTROL exe$gl_time_control; \ extern volatile struct _mutex smp$gq_cpu_mutex; \ \ \ /* */ \ /* Disable interrupts, initialize drop IPL and */ \ /* retrieve CPU database address */ \ /* */ \ saved_ipl = setipl ( IPL$_POWER ); \ if ( saved_ipl >= IPL$_SCHED ) \ drop_ipl = saved_ipl; \ else \ drop_ipl = IPL$_SCHED; \ cpu = (CPU *) find_cpu_data; \ retry = 0; \ \ /* */ \ /* Compute endtime token */ \ /* */ \ int64 smp_spinwait = sgn$gl_smp_spinwait; \ delta_time = smp_spinwait << 13ull; \ exe$timedwait_setup ( &delta_time, &end_time ); \ \ /* */ \ /* Loop to increment mutex owner count atomically */ \ /* */ \ do \ { \ /* */ \ /* Issue memory barrier and read current mutex */ \ /* */ \ __MB(); \ old_mutex.mutex$q_quadword = smp$gq_cpu_mutex.mutex$q_quadword; \ new_mutex.mutex$q_quadword = old_mutex.mutex$q_quadword; \ \ /* */ \ /* check if mutex is free or not */ \ /* */ \ if ( old_mutex.mutex$v_interlock ) \ { \ /* */ \ /* Give the IPINT handler a chance */ \ /* */ \ setipl ( drop_ipl ); \ setipl ( IPL$_POWER ); \ \ /* */ \ /* Check for bugcheck requests. */ \ /* */ \ if ( cpu->cpu$v_bugchk ) bug_check ( CPUEXIT, FATAL, COLD ); \ \ /* */ \ /* If SMP timeouts are not disabled check for timeout, */ \ /* then try another wait loop */ \ /* */ \ if ( !exe$gl_time_control.exe$v_nospinwait ) \ { \ status = exe$timedwait_complete ( &end_time ); \ if ( !$VMS_STATUS_SUCCESS(status) ) \ { \ smp$timeout(); \ exe$timedwait_setup ( &delta_time, &end_time ); \ } \ } \ } \ else \ { \ /* */ \ /* Mutex is free (interlock bit clear) */ \ /* For shared access, only increment the owner count. */ \ /* For exclusive access, only set the interlock bit */ \ /* after making sure it is not owned. */ \ /* */ \ if ( share ) \ { \ new_mutex.mutex$l_owncnt++; \ retry = CMPXCHG8 ( &smp$gq_cpu_mutex, \ old_mutex.mutex$q_quadword, \ new_mutex.mutex$q_quadword ); \ } \ else \ { \ if ( (int)new_mutex.mutex$l_owncnt == -1 ) \ { \ new_mutex.mutex$v_interlock = 1; \ retry = CMPXCHG8 ( &smp$gq_cpu_mutex, \ old_mutex.mutex$q_quadword, \ new_mutex.mutex$q_quadword ); \ } \ } \ } \ \ } while ( !retry ); \ \ /* */ \ /* record this mutex held-count and restore IPL */ \ /* shared access */ \ /* */ \ __MB(); \ cpu->cpu$l_cpumtx++; \ if ( share ) setipl ( saved_ipl ); \ } #define unlock_cpu_mutex(share) \ { \ int status; \ int saved_ipl; \ int drop_ipl; \ int retry; \ int64 delta_time; \ int64 end_time; \ CPU *cpu; \ MUTEX old_mutex; \ MUTEX new_mutex; \ extern int sgn$gl_smp_spinwait; \ extern SYS_TIME_CONTROL exe$gl_time_control; \ extern volatile struct _mutex smp$gq_cpu_mutex; \ \ \ if ( share ) \ { \ /* */ \ /* Disable interrupts, initialize drop IPL and */ \ /* retrieve CPU database address */ \ /* */ \ saved_ipl = setipl ( IPL$_POWER ); \ if ( saved_ipl >= IPL$_SCHED ) \ drop_ipl = saved_ipl; \ else \ drop_ipl = IPL$_SCHED; \ cpu = (CPU *) find_cpu_data; \ retry = 0; \ \ /* */ \ /* Compute endtime token */ \ /* */ \ int64 smp_spinwait = sgn$gl_smp_spinwait; \ delta_time = smp_spinwait << 13ull; \ exe$timedwait_setup ( &delta_time, &end_time ); \ \ /* */ \ /* Loop to decrement CPU mutex owner count atomically */ \ /* */ \ do \ { \ /* */ \ /* Issue memory barrier and read current CPU mutex */ \ /* */ \ __MB(); \ old_mutex.mutex$q_quadword = smp$gq_cpu_mutex.mutex$q_quadword; \ new_mutex.mutex$q_quadword = old_mutex.mutex$q_quadword; \ \ /* */ \ /* check if CPU mutex is free or not */ \ /* */ \ if ( old_mutex.mutex$v_interlock ) \ { \ /* */ \ /* Give the IPINT handler a chance */ \ /* */ \ setipl ( drop_ipl ); \ setipl ( IPL$_POWER ); \ \ /* */ \ /* Check for bugcheck requests. */ \ /* */ \ if ( cpu->cpu$v_bugchk ) bug_check ( CPUEXIT, FATAL, COLD ); \ \ /* */ \ /* If SMP timeouts are not disabled check for timeout, */ \ /* then try another wait loop */ \ /* */ \ if ( !exe$gl_time_control.exe$v_nospinwait ) \ { \ status = exe$timedwait_complete ( &end_time ); \ if ( !$VMS_STATUS_SUCCESS(status) ) \ { \ smp$timeout(); \ exe$timedwait_setup ( &delta_time, &end_time ); \ } \ } \ } \ else \ { \ /* */ \ /* Decrement owner count */ \ /* */ \ new_mutex.mutex$l_owncnt--; \ retry = CMPXCHG8 ( &smp$gq_cpu_mutex, \ old_mutex.mutex$q_quadword, \ new_mutex.mutex$q_quadword ); \ } \ \ } while ( !retry ); \ \ /* */ \ /* rundown this CPU's mutex held-count and restore IPL */ \ /* */ \ __MB(); \ cpu->cpu$l_cpumtx--; \ setipl ( saved_ipl ); \ } \ else \ { \ /* */ \ /* Interrupts are already disabled, so just retrieve */ \ /* CPU database address */ \ /* */ \ cpu = (CPU *) find_cpu_data; \ \ /* */ \ /* decrement this CPU's mutex held-count */ \ /* */ \ cpu->cpu$l_cpumtx--; \ \ /* */ \ /* Clear the interlocked bit atomically */ \ /* */ \ __ATOMIC_DECREMENT_QUAD ( &smp$gq_cpu_mutex ); \ \ /* */ \ /* Synchronize with any TB invalidates that might have */ \ /* occured in the active set prior to this CPU joining */ \ /* the active set. */ \ /* */ \ mtpr_tbia; \ } \ } /*******************************************************************************/ /* This vms_assert macro is intended to provide C "assert" behavior in a twisted, perverted, VMS fashion...That is, if the specified expression turns out to be false (evaluated VMS-style) at run-time, fatal bugcheck. Otherwise, do nothing. The macro does nothing in a non-debug mode too. */ #ifdef NDEBUG #define vms_assert(ignore) #else #define vms_assert(expr) \ { if (!((expr) & 1)) bug_check(ASSERTFAIL,FATAL,COLD) } #endif /* This macro improves readability of VMS code which checks status return values for success/failure based on the low bit of the status value. In C, the test for this is slightly uglier than in MACRO and BLISS and can be hidden inside a macro which makes it clear what the code is trying to do. This macro takes a single argument, the return status or function return value and returns true (1) or false (0) based on the low bit of this value. */ #define good_status(status) (((status) & 1) == 1) #define bad_status(status) (((status) & 1) == 0) /* The following definitions of constants and system data cells are needed by the TB invalidate routines which follow them. */ #define NO_PCB ((PCB *) 0) #define THIS_CPU_ONLY 1 #define ALL_CPUS 2 #define ASSUME_PRIVATE 3 #define ASSUME_SHARED 4 extern SMP smp$gl_flags; /* TB Invalidate All Entries (System and Process) TBI_ALL ENVIRON ENVIRON = "THIS_CPU_ONLY" indicates that this invocation of TBIA is to be executed strictly within the context of the local CPU only. Thus, no attempt is made whatsoever to extend the TBIA request to any CPU or other 'processor' that might exist within the system. = "ALL_CPUS" forces the TBIA to be extended to all components of the system that may have cached PTEs. */ #define tbi_all(environ) { \ \ if (environ == THIS_CPU_ONLY) \ mtpr_tbia; \ else \ mmg$tbi_all(); \ } /* TB Invalidate Data Single 64 TBI_DATA_64 ADDR, ENVIRON, PCBADDR ADDR = 64-bit Virtual Address to be invalidated. ENVIRON = "THIS_CPU_ONLY" indicates that this invocation of TBISD is to be executed strictly within the context of the local CPU only. Thus, no attempt is made whatsoever to extend the TBISD request to any CPU or other 'processor' that might exist within the system. = "ASSUME_PRIVATE" indicates that this is a threads environment and that the address should be treated as a private address and not checked. Therefore, in an SMP environment, we need to do the invalidate to other CPUs which are running a kernel thread from this process. This argument is used for system space addresses which should be treated as private to the process (e.g. for L2PTE's which are also mapped in "page table space"). = "ASSUME_SHARED" indicates that this invocation of TBISD should be broadcast to all other CPUs in the system. ASSUME_ SHARED is the exact opposite of THIS_CPU_ONLY. = "ALL_CPUS" forces the TB invalidate to be extended to all components of the system that may have cached PTEs. PCBADDR = Address of current process control block. The NO_PCB symbol can be used for this argument if the PCB address is not required (for example, when using the qualifier, ENVIRON=THIS_CPU_ONLY or ENVIRON=ASSUMED_SHARED). */ #define tbi_data_64(addr,environ,pcbaddr) { \ \ switch (environ) \ { \ case THIS_CPU_ONLY: \ mtpr_tbisd(addr); \ break; \ case ALL_CPUS: \ if (smp$gl_flags.smp$v_enabled == 0) \ mtpr_tbisd(addr); \ else \ if ($is_shared_va(addr)) \ mmg_std$tbi_data_64(addr); \ else \ { \ if (pcbaddr->pcb$l_multithread <= 1) \ mtpr_tbisd(addr); \ else \ mmg_std$tbi_data_64_threads(addr); \ } \ break; \ case ASSUME_PRIVATE: \ if (smp$gl_flags.smp$v_enabled == 0) \ mtpr_tbisd(addr); \ else \ if (pcbaddr->pcb$l_multithread <= 1) \ mtpr_tbisd(addr); \ else \ mmg_std$tbi_data_64_threads(addr); \ break; \ case ASSUME_SHARED: \ if (smp$gl_flags.smp$v_enabled == 0) \ mtpr_tbisd(addr); \ else \ mmg_std$tbi_data_64(addr); \ break; \ } \ } /* TB Invalidate Single TBI_SINGLE ADDR, ENVIRON, PCBADDR ADDR = Virtual Address to be invalidated. ENVIRON = "THIS_CPU_ONLY" indicates that this invocation of TBIS is to be executed strictly within the context of the local CPU only. Thus, no attempt is made whatsoever to extend the TBIS request to any CPU or other 'processor' that might exist within the system. = "ASSUME_PRIVATE" indicates that this is a threads environment and that the address should be treated as a private address and not checked. Therefore, in an SMP environment, we need to do the invalidate to other CPUs which are running a kernel thread from this process. This argument is used for system space addresses which should be treated as private to the process (e.g. for L2PTE's which are also mapped in "page table space"). = "ASSUME_SHARED" indicates that this invocation of TBIS should be broadcast to all other CPUs in the system. ASSUME_ SHARED is the exact opposite of THIS_CPU_ONLY. = "ALL_CPUS" forces the TB invalidate to be extended to all components of the system that may have cached PTEs. PCBADDR = Address of current process control block. The NO_PCB symbol can be used for this argument if the PCB address is not required (for example, when using the qualifier, ENVIRON=THIS_CPU_ONLY). */ #define tbi_single(addr,environ,pcbaddr) { \ \ switch (environ) \ { \ case THIS_CPU_ONLY: \ mtpr_tbis(addr); \ break; \ case ALL_CPUS: \ if (smp$gl_flags.smp$v_enabled == 0) \ mtpr_tbis(addr); \ else \ if ($is_shared_va(addr)) \ mmg$tbi_single(addr); \ else \ { \ if (pcbaddr->pcb$l_multithread <= 1) \ mtpr_tbis(addr); \ else \ mmg$tbi_single_threads(addr); \ } \ break; \ case ASSUME_PRIVATE: \ if (smp$gl_flags.smp$v_enabled == 0) \ mtpr_tbis(addr); \ else \ if (pcbaddr->pcb$l_multithread <= 1) \ mtpr_tbis(addr); \ else \ mmg$tbi_single_threads(addr); \ break; \ case ASSUME_SHARED: \ if (smp$gl_flags.smp$v_enabled == 0) \ mtpr_tbis(addr); \ else \ mmg$tbi_single(addr); \ break; \ } \ } /* * Convert bitmask to bit number. I.e., the xxx$V_yyy version of xxx$M_yyy. * The name is uppercase to reflect the fact that the input is a compile-time * constant, such as IRP$M_ERASE. * * Input (mask) Output (position) * 0x0001 0 * 0x0002 1 * 0x0004 2 * ... * 0x4000 14 * etc. * * Currently limited to 32 bit wide single-bit masks. */ #define MASK_TO_POSITION(m) \ (m>>24 ? (m>>31?31:m>>30?30:m>>29?29:m>>28?28: \ m>>27?27:m>>26?26:m>>25?25:24) : \ m>>16 ? (m>>23?23:m>>22?22:m>>21?21:m>>20?20: \ m>>19?19:m>>18?18:m>>17?17:16) : \ m>>8 ? (m>>15?15:m>>14?14:m>>13?13:m>>12?12: \ m>>11?11:m>>10?10:m>> 9? 9: 8) : \ (m>> 7? 7:m>> 6? 6:m>> 5? 5:m>> 4? 4: \ m>> 3? 3:m>> 2? 2:m>> 1? 1: 0) ) /* $get_item_code This macro fetches the contents of the item code field from an item list entry. Note that the item code field is in the same place for 32-bit and 64-bit item list entries. ARGUMENTS: item_list: Specifies the item list entry from which the item code is extracted. USAGE: item_code = $get_item_code (item_list); */ #define $get_item_code(item_list) \ ( ((ILEA_64_PQ)item_list)->ilea_64$w_code ) /* $GET_LENGTH This macro fetches the contents of the length field from an item list entry. ARGUMENTS: flag: A flag denoting the type of item list specified. Low bit set denotes a 64-bit item list, while low bit clear denotes a 32-bit item list. itemlist: Specifies the item list entry from which the length is extracted. USAGE: item_length = $get_length (flag, item_list); */ #define $get_length(flag,item_list) \ ( flag == 1 ? ((ILEA_64_PQ)item_list)->ilea_64$q_length : ((ILE2_PQ)item_list)->ile2$w_length ) /* $GET_BUFADDR This macro fetches the contents of the buffer address field from an item list entry. ARGUMENTS: flag: A flag denoting the type of item list specified. Low bit set denotes a 64-bit item list, while low bit clear denotes a 32-bit item list. item_list: Specifies the item list entry from which the buffer address is extracted. USAGE: item_bufaddr = $get_bufaddr (flag, item_list); */ #define $get_bufaddr(flag,item_list) \ ( flag == 1 ? ((ILEA_64_PQ)item_list)->ilea_64$pq_bufaddr : ((ILE2_PQ)item_list)->ile2$ps_bufaddr ) /* $GET_RETLEN_ADDR This macro fetches the contents of the return length address field from an item list entry. The return length address field only exists for item_list_3 and item_list_64_b item list types. ARGUMENTS: flag: A flag denoting the type of item list specified. Low bit set denotes a 64-bit item list, while low bit clear denotes a 32-bit item list. item_list: Specifies the item list entry from which the return length address is extracted. USAGE: item_retlen_addr = $get_retlen_addr (flag, item_list); */ #define $get_retlen_addr(flag,item_list) \ ( flag == 1 ? ((ILEB_64_PQ)item_list)->ileb_64$pq_retlen_addr : ((ILE3_PQ)item_list)->ile3$ps_retlen_addr ) /* $GET_ILE_FIELDS This macro fetches the contents of the item list entry fields and writes them to the user-supplied registers. ARGUMENTS: flag: A flag denoting the type of item list entry specified in the item list argument. Low bit set denotes a 64-bit item list, while low bit clear denotes a 32-bit item list. item_list: An item list entry from which to fetch the contents of the various fields. item_code: Contents of the item code field are recorded here. length: Contents of the length field are recorded here. bufaddr: Contents of the buffer address field are recorded here. retlen_addr: Contents of the return length address field are recorded here. USAGE: $get_ile_fields (flag, item_list, item_code, length, bufaddr, retlen_addr); */ #define $get_ile_fields(flag, item_list, item_code, length, bufaddr, retlen_addr) \ { \ if (flag == 1 ) \ { \ item_code = ((ILEB_64_PQ)item_list)->ileb_64$w_code; \ length = ((ILEB_64_PQ)item_list)->ileb_64$q_length; \ bufaddr = ((ILEB_64_PQ)item_list)->ileb_64$pq_bufaddr; \ retlen_addr = ((ILEB_64_PQ)item_list)->ileb_64$pq_retlen_addr; \ } \ else \ { \ item_code = ((ILE3_PQ)item_list)->ile3$w_code; \ length = ((ILE3_PQ)item_list)->ile3$w_length; \ bufaddr = ((ILE3_PQ)item_list)->ile3$ps_bufaddr; \ retlen_addr = ((ILE3_PQ)item_list)->ile3$ps_retlen_addr; \ } \ } /* TR_PRINT - Debug print * * * This macro adds an informational message to the TR trace buffer. * The ctrstr argument has similar syntax to a "printf" statement. * * Inputs: * * ctrstr - The text and optional formatting directives to be * saved in the trace ring buffer, only the following * directives are allowed, no width: * %s - zero-terminated string * %a - ascii string (pointer & length) * %d - decimal value * %X - hexadecimal longword * %L - hexadecimal quadword * p1-p5 - The corresponding values to be formatted. For the %s * directive, this is the address of the zero-terminated * string. For the %a directive, this requires 2 arguments, * first the address of the string buffer, then the length * of the string (by value). For the other directives, this * is passed by value. * * Usage Examples: * Macro32: * tr_print ctrstr=,p1=r4 * tr_print ctrstr=,p1=r3,p2=r5 * C: * #include vms_macros * tr_print (("this is a C test and needs double-parentheses, index %d", idx )); * tr_print (("a hex number %X and a quadword %L", irp->irp$l_func, irp->irp$q_fr3 )); * Bliss: * tr_print ('this is a Bliss test, index %d', .idx ); * tr_print ('a hex number %X and a quadword %L', .irp, .ucb ); */ extern uint64 tr$gq_debug; #define tr_print(_printf_args) \ if ( tr$gq_debug & 1 ) \ { \ int *tr_print_rtn = (int *) (tr$gq_debug & ~1); \ ((void (*)()) *tr_print_rtn) _printf_args ; \ } /* EXC_PRINT - Exception trace print * * * This macro adds an informational message to the EXC trace buffer. * The ctrstr argument has similar syntax to a "printf" statement. * * Inputs: * * ctrstr - The text and optional formatting directives to be * saved in the trace ring buffer, only the following * directives are allowed, no width: * %s - zero-terminated string * %a - ascii string (pointer & length) * %d - decimal value * %X - hexadecimal longword * %L - hexadecimal quadword * p1-p5 - The corresponding values to be formatted. For the %s * directive, this is the address of the zero-terminated * string. For the %a directive, this requires 2 arguments, * first the address of the string buffer, then the length * of the string (by value). For the other directives, this * is passed by value. * * Usage Examples: * Macro32: * exc_print ctrstr=,p1=r4 * exc_print ctrstr=,p1=r3,p2=r5 * C: * #include vms_macros * exc_print (("this is a C test and needs double-parentheses, index %d", idx )); * exc_print (("a hex number %X and a quadword %L", irp->irp$l_func, irp->irp$q_fr3 )); * Bliss: * exc_print ('this is a Bliss test, index %d', .idx ); * exc_print ('a hex number %X and a quadword %L', .irp, .ucb ); */ extern uint64 exc$gq_debug; #define exc_print(_printf_args) \ if ( exc$gq_debug & 1 ) \ { \ int *exc_print_rtn = (int *) (exc$gq_debug & ~1); \ ((void (*)()) *exc_print_rtn) _printf_args ; \ } /* __ADD_ATOMIC_LONG, and QUAD, generate memory barriers on Alpha and memory * fences on IPF around the atomic instruction sequence. __ATOMIC_ADD_LONG, and * QUAD, do not. This is a little-known fact and __ADD_ATOMIC_LONG, and QUAD, * are frequently used when the memory barriers are not needed. These macros are * intended to help code writers and readers with this distinction. Every bit * of performance improvement helps. */ #define $ADD_ATOMIC_LONG_BARRIER(data,count){ \ __ADD_ATOMIC_LONG(data,count); \ } #define $ADD_ATOMIC_LONG_NO_BARRIER(data,count){\ __ATOMIC_ADD_LONG(data,count); \ } #define $ADD_ATOMIC_QUAD_BARRIER(data,count){ \ __ADD_ATOMIC_QUAD(data,count); \ } #define $ADD_ATOMIC_QUAD_NO_BARRIER(data,count){\ __ATOMIC_ADD_QUAD(data,count); \ } /* VMS$GET_CURMODE() and VMS$GET_PREVMODE() */ #if defined (__ALPHA) /* Verified for x86 port - Clair Grant */ /* On Alpha, we get the current mode from the Processor Status Longword. */ #define VMS$GET_CURMODE() (((union _PS){.quad=__PAL_RD_PS()}).fields.psl$v_curmod) #define VMS$GET_PREVMODE() (((union _PS){.quad=__PAL_RD_PS()}).fields.psl$v_prvmod) #elif defined(__ia64) /* On IA64, we get the current mode from the Previous Function State, as we can't directly read the cpl part of the Processor Status Register. */ #define VMS$GET_CURMODE() (((PFS){.pfs$iq_prev_func_state=__getReg(_IA64_REG_AR_PFS)}).pfs$v_ppl) #define VMS$GET_PREVMODE() (((union _PS){.quad=__PAL_RD_PS()}).fields.psl$v_prvmod) #elif defined(__x86_64) /* On X86, we get the current mode from the SWIS data structure. */ #define VMS$GET_CURMODE() (__readGsWord(SWIS$K_CURMODE_OFFSET)) #define VMS$GET_PREVMODE() (__readGsWord(SWIS$K_PREVMODE_OFFSET)) #else #error Need architecture specific work here #endif /* VMS$GET_TIMESTAMP() */ #if defined (__ALPHA) /* Verified for x86 port - Clair Grant */ #define VMS$GET_TIMESTAMP() (asm ( "call_pal " STR(EVX$PAL_RSCC) ";" )) #elif defined(__ia64) #define VMS$GET_TIMESTAMP() (__getReg(_IA64_REG_AR_ITC)) #elif defined(__x86_64) #define VMS$GET_TIMESTAMP() (__getReg(_X86_REG_TSC)) #else #error Need architecture specific work here #endif /* VMS$GET_THREADENV() - get pointer to thread environment */ #if defined (__alpha) /* Verified for x86 port - Clair Grant */ #define VMS$GET_THREADENV() (asm("call_pal 0x9E")) #elif defined (__ia64) #define VMS$GET_THREADENV() (__getReg(_IA64_REG_TP)) #elif defined(__x86_64) #define VMS$GET_THREADENV() sys$pal_read_unq() #else #error Need architecture specific work here #endif /* VMS$SET_THREADENV(x) - set pointer to thread environment */ #if defined (__alpha) /* Verified for x86 port - Clair Grant */ #define VMS$SET_THREADENV(x) (asm("call_pal 0x9F", x)) #elif defined (__ia64) #define VMS$SET_THREADENV(x) (__setReg(_IA64_REG_TP, (unsigned __int64)(x))) #elif defined(__x86_64) #define VMS$SET_THREADENV(x) sys$pal_write_unq(x) #else #error Need architecture specific work here #endif /* VMS$GET_CURR_INVO_CONTEXT() - get current invocation context */ #if defined (__alpha) /* Verified for x86 port - Clair Grant */ #define VMS$GET_CURR_INVO_CONTEXT lib$get_curr_invo_context #elif defined (__ia64) #define VMS$GET_CURR_INVO_CONTEXT lib$i64_get_curr_invo_context #elif defined(__x86_64) #define VMS$GET_CURR_INVO_CONTEXT lib$x86_get_curr_invo_context #else #error Need architecture specific work here #endif /* VMS$GET_PREV_INVO_CONTEXT() - get previous invocation context */ #if defined (__alpha) /* Verified for x86 port - Clair Grant */ #define VMS$GET_PREV_INVO_CONTEXT lib$get_prev_invo_context #elif defined (__ia64) #define VMS$GET_PREV_INVO_CONTEXT lib$i64_get_prev_invo_context #elif defined(__x86_64) #define VMS$GET_PREV_INVO_CONTEXT lib$x86_get_prev_invo_context #else #error Need architecture specific work here #endif /* VMS$PREV_INVO_END() - previous invocation end */ #if defined (__alpha) /* Verified for x86 port - Clair Grant */ #define VMS$PREV_INVO_END(_ctx_) 1 #elif defined (__ia64) #define VMS$PREV_INVO_END(_ctx_) lib$i64_prev_invo_end(_ctx_) #elif defined(__x86_64) #define VMS$PREV_INVO_END(_ctx_) lib$x86_prev_invo_end(_ctx_) #else #error Need architecture specific work here #endif /* VMS$INIT_INVO_CONTEXT() - init invocation context */ #if defined (__alpha) /* Verified for x86 port - Clair Grant */ #define VMS$INIT_INVO_CONTEXT(_ctx_) 1 #elif defined (__ia64) #define VMS$INIT_INVO_CONTEXT(_ctx_) lib$i64_init_invo_context(_ctx_) #elif defined(__x86_64) #define VMS$INIT_INVO_CONTEXT(_ctx_) lib$x86_init_invo_context(_ctx_) #else #error Need architecture specific work here #endif /* VMS$GET_GR - get general register */ #if defined (__ia64) /* Verified for x86 port - Clair Grant */ #define VMS$GET_GR lib$i64_get_gr #elif defined(__x86_64) #define VMS$GET_GR lib$x86_get_gr #endif /* VMS$SET_GR - set general register */ #if defined (__ia64) /* Verified for x86 port - Clair Grant */ #define VMS$SET_GR lib$i64_set_gr #elif defined(__x86_64) #define VMS$SET_GR lib$x86_set_gr #endif /* ;+ ; NOT_X86_READY ; ; This macro is intended to act as a marker for sections of code that ; have not been modified to run on X86 - usually such sections will ; contain SVAPTE, PTE or other references that keep the modules they're ; in from compiling, or that keep any image containing the modules from ; linking. ; ; If these sections are not immediately needed (such as for first boot) ; then we can defer work on them and get the modules containing them to ; compile and link cleanly by conditionalizing them out. ; ; Besides making it easy to find these sections later on, this macro will ; bugcheck (on X86 only) to prevent any of these sections from executing ; without our knowledge. ; ; Code should never be conditionalized out like this w/o being guarded ; by this macro, because that would allow it to be silently skipped, and ; would likely lead to hard-to-diagnose run time problems. ; ; A common sequence would be (note: in the example below, replace all ; backslash characters "\" with forward slash "/" in actual code): ; ; ; not_x86_ready(); \* Code is not ready to run on X86 *\ ; ; #if !VMS$NOSVAPTE \* Compile if platform supports SVAPTEs *\ ; ; << ... code that is not X86-ready ... >> ; ; #endif \* Compile if platform supports SVAPTEs *\ ; ;- */ #if defined(__x86_64) #define not_x86_ready() \ { \ exe$kprintf("***** %s:%s:%d not_x86_ready() *****\n",__FILE__,__func__,__LINE__); \ bug_check(INCONSTATE,FATAL,COLD); \ } #else #define not_x86_ready() #endif #endif /* __VMS_MACROS_LOADED */