/***************************************************************************/ /** **/ /** HPE CONFIDENTIAL. This software is confidential proprietary software **/ /** licensed by Hewlett-Packard Enterprise Development, LP, and is not **/ /** authorized to be used, duplicated OR disclosed to anyone without the **/ /** prior written permission of HPE. **/ /** © 2023 Copyright Hewlett-Packard Enterprise Development, LP **/ /** **/ /** VMS SOFTWARE, INC. CONFIDENTIAL. This software is confidential **/ /** proprietary software licensed by VMS Software, Inc., and is not **/ /** authorized to be used, duplicated or disclosed to anyone without **/ /** the prior written permission of VMS Software, Inc. **/ /** © 2023 Copyright VMS Software, Inc. **/ /** **/ /***************************************************************************/ /********************************************************************************************************************************/ /* Created: 9-Nov-2023 12:07:49 by OpenVMS SDL V3.7 */ /* Source: 31-MAY-2022 08:15:56 $1$DGA8345:[LIB_H.SRC]CPUDEF.SDL;1 */ /********************************************************************************************************************************/ /*** MODULE $CPUDEF ***/ #ifndef __CPUDEF_LOADED #define __CPUDEF_LOADED 1 #pragma __nostandard /* This file uses non-ANSI-Standard features */ #pragma __member_alignment __save #pragma __nomember_alignment #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __save /* Save the previously-defined required ptr size */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif #ifdef __cplusplus extern "C" { #define __unknown_params ... #define __optional_params ... #else #define __unknown_params #define __optional_params ... #endif #ifndef __struct #if !defined(__VAXC) #define __struct struct #else #define __struct variant_struct #endif #endif #ifndef __union #if !defined(__VAXC) #define __union union #else #define __union variant_union #endif #endif #include /* Define the FKB type; CPU$ contains an embedded FKB */ #include #include /*+ */ /* */ /* Per-CPU Database definitions. One of these structures exists for */ /* each CPU that is participating in symmetric multiprocessing. */ /* */ /* The per-CPU database consists of 2 parts. A fixed portion that exists */ /* for any CPU type is defined first. A variable portion is also defined as */ /* necessary for various CPU types. The contents of the variable portion */ /* are CPU-specific. */ /* */ /* When creating a per-CPU database, one must allocate space to include */ /* both the fixed portion and a variable portion that is specific to the */ /* CPU type for which the database is being created. */ /* */ /*- */ #define CPU$C_RESERVED 0 /* Zero is reserved */ #define CPU$C_INIT 1 /* CPU is being INITialized */ #define CPU$C_RUN 2 /* CPU is RUNning */ #define CPU$C_STOPPING 3 /* CPU is STOPping */ #define CPU$C_STOPPED 4 /* CPU is STOPPED */ #define CPU$C_TIMOUT 5 /* Boot of CPU timed out */ #define CPU$C_BOOT_REJECTED 6 /* CPU refuses to join SMP */ #define CPU$C_BOOTED 7 /* CPU booted - waiting for "go" */ #define CPU$C_NOT_CONFIGURED 8 /* CPU exists, but not in configure set */ #define CPU$C_POWERED_DOWN 9 /* CPU in configure set, but powered down */ #define CPU$C_DEALLOCATED 10 /* CPU has been deallocated */ #define CPU$M_INV_TBS 0x1 #define CPU$M_INV_TBA 0x2 #define CPU$M_BUGCHK 0x4 #define CPU$M_BUGCHKACK 0x8 #define CPU$M_RECALSCHD 0x10 #define CPU$M_UPDASTSR 0x20 #define CPU$M_UPDATE_HWCLOCK 0x40 #define CPU$M_WORK_FQP 0x80 #define CPU$M_QLOST 0x100 #define CPU$M_RESCHED 0x200 #define CPU$M_VIRTCONS 0x400 #define CPU$M_IOPOST 0x800 #define CPU$M_INV_ISTREAM 0x1000 #define CPU$M_INV_TBSD 0x2000 #define CPU$M_INV_TBS_MMG 0x4000 #define CPU$M_INV_TBSD_MMG 0x8000 #define CPU$M_IO_INT_AFF 0x10000 #define CPU$M_IO_START_AFF 0x20000 #define CPU$M_UPDATE_SYSPTBR 0x40000 #define CPU$M_PERFMON 0x80000 #define CPU$M_READ_SCC 0x100000 #define CPU$M_CPUFILL_1 0xFFFFFFF #define CPU$M_CPUSPEC1 0x10000000 #define CPU$M_CPUSPEC2 0x20000000 #define CPU$M_CPUSPEC3 0x40000000 #define CPU$M_CPUSPEC4 0x80000000 #define CPU$K_NUM_SWIQS 6 /* Number of software interrupt queues */ #define CPU$M_SYS_ASTEN 0xF #define CPU$M_SYS_ASTSR 0xF0 #define CPU$C_HWPCBLEN 256 /* Length of HWPCB in 128 bytes */ #define CPU$K_HWPCBLEN 256 /* Length of HWPCB in 128 bytes */ #define CPU$M_TERM_ASTEN 0xF #define CPU$M_TERM_ASTSR 0xF0 #define CPU$M_BC_AST_CALLED 0x1 #define CPU$M_BC_ASTDEL 0x2 #define CPU$M_BC_XSAVE_SAVED 0x4 #define CPU$M_BC_FAULTY_TOWERS 0x8 #define CPU$M_BC_IMSEM 0x10 #define CPU$M_BC_ALPHAREG_SAVED 0x20 #define CPU$K_BC_INTSTK_LENGTH 480 /**** End of X86_64 symbols that match $CRASHDEF in [SDA]EVAX_SDADEF.SDL and $INTSTKDEF in [LIB]INTSTKDEF.SDL **** */ #define CPU$M_SCHED 0x1 #define CPU$M_FOREVER 0x2 #define CPU$M_NEWPRIM 0x4 #define CPU$M_PSWITCH 0x8 #define CPU$M_BC_STACK 0x10 #define CPU$M_BC_CONTEXT 0x20 #define CPU$M_USER_CAPABILITIES_SET 0x40 #define CPU$M_RESET_LOW_POWER 0x80 #define CPU$M_STOPPING 0x1 #define CPU$M_RESCHEDULING 0x2 #define CPU$M_PCSAMPLE_ACTIVE 0x1 #define CPU$M_IO_AFF_FKB_INUSE 0x1 #define CPU$M_PORT_ASSIGNED 0x2 #define CPU$M_DISTRIBUTED_INTS 0x4 #define CPU$M_LASTPAGE_TESTED 0x20000000 #define CPU$M_MCHECK 0x40000000 #define CPU$M_MEMORY_WRITE 0x80000000 #define CPU$M_AUTO_START 0x1 #define CPU$M_NOBINDINGS 0x2 #ifdef __cplusplus /* Define structure prototypes */ struct _pcb; struct _ktb; struct _irp; #endif /* #ifdef __cplusplus */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif typedef struct _cpu { #pragma __nomember_alignment __union { struct _pcb *cpu$l_curpcb; /* Address of CPU's current PCB */ struct _ktb *cpu$l_curktb; /* Address of CPU's current KTB */ } cpu$r_curpcb_overlay; void *cpu$l_slot_va; /* Address of CPU's HWRPB slot */ unsigned short int cpu$w_size; /* Structure size */ unsigned char cpu$b_type; /* Structure type */ unsigned char cpu$b_subtype; /* Structure subtype */ unsigned int cpu$l_state; /* State of this processor */ unsigned int cpu$l_cpumtx; /* Count of CPUMTX acquires */ unsigned int cpu$l_cur_pri; /* Current Process Priority */ /* */ /* CPU type independent work request bits */ /* */ __union { __union { unsigned int cpu$l_work_req; /* Work request bitmask */ __struct { unsigned cpu$v_inv_tbs : 1; /* Invalidate TB single */ unsigned cpu$v_inv_tba : 1; /* Invalidate TB all */ unsigned cpu$v_bugchk : 1; /* BUG_CHECK requested */ unsigned cpu$v_bugchkack : 1; /* BUG_CHECK acked */ unsigned cpu$v_recalschd : 1; /* Recalculate per cpu mask,reschedule */ unsigned cpu$v_updastsr : 1; /* Update ASTSR register */ unsigned cpu$v_update_hwclock : 1; /* Update local hardware clocks */ unsigned cpu$v_work_fqp : 1; /* Process work queue */ unsigned cpu$v_qlost : 1; /* Stall until quorum regained */ unsigned cpu$v_resched : 1; /* Issue IPL 3 SOFTINT */ unsigned cpu$v_virtcons : 1; /* Enter virtual console mode (primary) */ unsigned cpu$v_iopost : 1; /* Issue IPL 4 SOFTINT */ unsigned cpu$v_inv_istream : 1; /* Invalidate cached instruction stream */ unsigned cpu$v_inv_tbsd : 1; /* Invalidate data TB single */ unsigned cpu$v_inv_tbs_mmg : 1; /* Invalidate TB single MMG synchronized */ unsigned cpu$v_inv_tbsd_mmg : 1; /* Invalidate TB single MMG synchronized */ unsigned cpu$v_io_int_aff : 1; /* Fast Path I/O completion event */ unsigned cpu$v_io_start_aff : 1; /* Fast Path I/O start event */ unsigned cpu$v_update_sysptbr : 1; /* Update SYSPTBR register */ unsigned cpu$v_perfmon : 1; /* Performance Monitoring */ unsigned cpu$v_read_scc : 1; /* Read SCC */ unsigned cpu$v_fill_16_ : 3; } cpu$r_fill_1_; } cpu$r_fill_0_; /* */ /* Define 4 CPU type specific work request bits as bit #s 28-31. */ /* */ __union { /* CPU specific work requests */ __union { unsigned int cpu$l_cpuspec; /* generic definition */ __struct { unsigned cpu$v_cpufill_1 : 28; /* pad bit definitions into position */ unsigned cpu$v_cpuspec1 : 1; /* CPU specific */ unsigned cpu$v_cpuspec2 : 1; /* CPU specific */ unsigned cpu$v_cpuspec3 : 1; /* CPU specific */ unsigned cpu$v_cpuspec4 : 1; /* CPU specific */ } cpu$r_fill_3_; } cpu$r_fill_2_; } cpu$r_cpuspec_overlay; } cpu$r_work_req_overlay; /* */ unsigned int cpu$l_phy_cpuid; /* CPU ID number */ int cpu$l_cbb_reserved_1; /* $l_cpuid_mask moves to bottom */ unsigned int cpu$l_busywait; /* <>0 = Spinning for lock */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif __int64 cpu$q_swiqfl [6]; /* Software interrupt queues */ #pragma __nomember_alignment struct _irp *cpu$l_psfl; /* POST QUEUE forward link */ struct _irp *cpu$l_psbl; /* POST QUEUE backward link */ __union { /* Work queue overlay */ unsigned __int64 cpu$q_work_fqfl; /* Work packet queue */ unsigned __int64 cpu$q_work_ifq; /* Work packet queue */ } cpu$r_ifq_overlay; /* */ int cpu$l_zeroed_page_spte_fill; /* NOSVAPTE_V9.0 Dave Fairbanks */ void *cpu$l_zeroed_page_va; /* VA for zeroed page filling */ __int64 cpu$q_zeroed_page_state; /* State for interrupted filling */ /******************************************************************* */ /* HWPCB for this CPU's dedicated System Process */ /* */ /* This Hardware Privileged Context Block provides the context for when this */ /* CPU has no other process to run. */ /* */ /* NOTE WELL: This HWPCB must be aligned to a 128 byte boundary, the */ /* architected natural alignment of a HWPCB. */ /* */ /* NOTE WELL: There are bit symbols defined here for accessing the saved ASTEN, */ /* ASTSR, FEN and DATFX values in the HWPCB. These symbols are NOT to be used when */ /* interfacing to the ASTEN, ASTSR, FEN or DATFX internal processor registers directly. */ /* See the specific internal register definitions for bitmasks and constants */ /* to be used when interfacing to the IPRs directly. */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif unsigned __int64 cpu$q_phy_sys_hwpcb; /* Physical address of HWPCB */ /* Start of aligned section */ #pragma __nomember_alignment __union { unsigned __int64 cpu$q_sys_hwpcb; /* Base of HWPCB */ unsigned __int64 cpu$q_sys_ksp; /* Kernel stack pointer */ } cpu$r_hwpcb_overlay; unsigned __int64 cpu$q_sys_esp; /* Executive stack pointer */ unsigned __int64 cpu$q_sys_ssp; /* Supervisor stack pointer */ unsigned __int64 cpu$q_sys_usp; /* User stack pointer */ unsigned __int64 cpu$q_sys_ptbr [4]; /* Page Table base for each mode */ unsigned __int64 cpu$q_sys_asn; /* ASN (to be combined with mode for PCID) */ __union { unsigned __int64 cpu$q_sys_astsr_asten; /* ASTSR / ASTEN quadword */ __struct { unsigned cpu$v_sys_asten : 4; /* AST Enable Register */ unsigned cpu$v_sys_astsr : 4; /* AST Pending Summary Register */ } cpu$r_ast_bits0; } cpu$r_ast_overlay; unsigned __int64 cpu$q_sys_perf_ctrl; /* Perfomance monitoring control */ unsigned __int64 cpu$q_sys_cc; /* Cycle Counter */ unsigned __int64 cpu$q_unq; #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_sys_alphareg [4]; /* Pointers to emulated Alpha registers for each mode */ #else unsigned __int64 cpu$pq_sys_alphareg [4]; #endif unsigned char cpu$b_sys_pmod; /* Previous mode */ unsigned char cpu$b_sys_was_scheduled; /* Process was scheduled at least once */ char cpu$b_sys_reserved_1 [6]; unsigned int cpu$l_sys_interrupt_depth; /* Interrupt depth */ int cpu$l_sys_cur_frame_mode; /* Mode of currently active frame */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_sys_cur_frame; /* Currently active frame */ #else unsigned __int64 cpu$pq_sys_cur_frame; #endif unsigned __int64 cpu$q_sys_kstack_top; unsigned __int64 cpu$q_sys_kstack_bottom; __int64 cpu$q_sys_pal_rsvd [5]; /* Reserved for PAL Scratch */ char cpu$t_align2 [40]; /* Assure 128 byte alignment */ /* */ /* End of Hardware Privileged Context Block (HWPCB) for the system process */ /* */ /******************************************************************* */ /******************************************************************* */ /* HWPCB for this CPU's Terminating Process. */ /* */ /* This Hardware Privileged Context Block provides the context for when this */ /* CPU needs a place to run when a powerfail may, unexpectedly, happen. */ /* */ /* Remember, when a process's HWPCB is loaded (active on the CPU) the contents */ /* of the HWPCB are undefined since the processor may use that area as */ /* scratch space. All code paths that execute higher than IPL IPL$_POWER-2 (29) */ /* for an extended period of time may need to execute in the context of this */ /* process. Currently this includes most code surrounding powerfail/restart */ /* and parts of SMP$START_SECONDARY. */ /* */ /* NOTE WELL: This HWPCB must be aligned to a 128 byte boundary, the */ /* architected natural alignment of a HWPCB. */ /* */ /* NOTE WELL: There are bit symbols defined here for accessing the saved ASTEN, */ /* ASTSR, FEN and DATFX values in the HWPCB. These symbols are NOT to be used when */ /* interfacing to the ASTEN, ASTSR, FEN or DATFX internal processor registers directly. */ /* See the specific internal register definitions for bitmasks and constants */ /* to be used when interfacing to the IPRs directly. */ /* */ /* Start of aligned section */ __union { unsigned __int64 cpu$q_term_hwpcb; /* Base of HWPCB */ unsigned __int64 cpu$q_term_ksp; /* Kernel stack pointer */ } cpu$r_term_hwpcb_overlay; unsigned __int64 cpu$q_term_esp; /* Executive stack pointer */ unsigned __int64 cpu$q_term_ssp; /* Supervisor stack pointer */ unsigned __int64 cpu$q_term_usp; /* User stack pointer */ unsigned __int64 cpu$q_term_ptbr [4]; /* Page Table base for each mode */ unsigned __int64 cpu$q_term_asn; /* ASN (to be combined with mode for PCID) */ __union { unsigned __int64 cpu$q_term_astsr_asten; /* ASTSR / ASTEN quadword */ __struct { unsigned cpu$v_term_asten : 4; /* AST Enable Register */ unsigned cpu$v_term_astsr : 4; /* AST Pending Summary Register */ } cpu$r_term_ast_bits0; } cpu$r_term_ast_overlay; unsigned __int64 cpu$q_term_perf_ctrl; /* Perfomance monitoring control */ unsigned __int64 cpu$q_term_cc; /* Cycle Counter */ unsigned __int64 cpu$q_term_unq; /* Process Unique Value */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_term_alphareg [4]; /* Pointers to emulated Alpha registers for each mode */ #else unsigned __int64 cpu$pq_term_alphareg [4]; #endif unsigned char cpu$b_term_pmod; /* Previous mode */ unsigned char cpu$b_term_was_scheduled; /* Process was scheduled at least once */ char cpu$b_term_reserved_1 [6]; unsigned int cpu$l_term_interrupt_depth; /* Interrupt depth */ int cpu$l_term_cur_frame_mode; /* Mode of currently active frame */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_term_cur_frame; /* Currently active frame */ #else unsigned __int64 cpu$pq_term_cur_frame; #endif unsigned __int64 cpu$q_term_kstack_top; unsigned __int64 cpu$q_term_kstack_bottom; __int64 cpu$q_term_pal_rsvd [5]; /* Reserved for PAL Scratch */ char cpu$t_align3 [40]; /* Assure 128 byte alignment */ /* */ /* End of aligned portion of HWPCB. Next quadword is used so we don't need to */ /* convert a virtual address to a physical address every time we use the terminating */ /* process. */ /* */ unsigned __int64 cpu$q_phy_term_hwpcb; /* Physical address of HWPCB */ /* */ /* End of Hardware Privileged Context Block (HWPCB) for the terminating process */ /* */ /******************************************************************* */ /* */ /* Per-CPU state saved during powerfail interrupt processing. The state */ /* that is saved here is process independent, yet specific to this CPU. */ /* */ unsigned __int64 cpu$q_saved_pcbb; /* PCBB from powerdown (non-zero */ /* if state successfully saved) */ unsigned __int64 cpu$q_scbb; /* SCBB from powerdown */ unsigned __int64 cpu$q_sisr; /* SISR from powerdown */ /******************************************************************* */ /* The following storage is used by BUGCHECK code. The order must be */ /* preserved since it is assumed by a table within SDA (see $CRASHDEF */ /* in [SDA]EVAX_SDADEF.SDL). The cells from BC_FLAGS to BC_SS must */ /* also match the layout of $INTSTKDEF in [LIB]INTSTKDEF.SDL. */ /* */ unsigned __int64 cpu$q_bc_ksp; /* Stored KSP */ unsigned __int64 cpu$q_bc_esp; /* Stored ESP */ unsigned __int64 cpu$q_bc_ssp; /* Stored SSP */ unsigned __int64 cpu$q_bc_usp; /* Stored USP */ unsigned __int64 cpu$q_bc_ptbr [4]; /* Stored PTBR for each mode */ unsigned __int64 cpu$q_bc_asn; /* Stored ASN */ unsigned __int64 cpu$q_bc_astsr_asten; /* Stored AST SR and EN */ unsigned __int64 cpu$q_bc_perf_ctrl; /* Stored performance monitoring control */ unsigned __int64 cpu$q_bc_cc; /* Stored CC */ unsigned __int64 cpu$q_bc_unq; /* Thread pointer */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_bc_alphareg_ptr [4]; /* Emulated Alpha registers for each mode */ #else unsigned __int64 cpu$pq_bc_alphareg_ptr [4]; #endif unsigned char cpu$b_bc_pmod; /* Previous mode */ unsigned char cpu$b_bc_was_scheduled; /* Process was scheduled at least once */ char cpu$b_bc_reserved_1 [6]; /* alignment */ unsigned int cpu$l_bc_interrupt_depth; /* Interrupt depth */ int cpu$l_bc_cur_frame_mode; /* Mode of currently active frame */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_bc_cur_frame; /* Currently active frame */ #else unsigned __int64 cpu$pq_bc_cur_frame; #endif unsigned __int64 cpu$q_bc_kstack_top; unsigned __int64 cpu$q_bc_kstack_bottom; /* **** The next group of cells match $INTSTKDEF (from BC_FLAGS to BC_SS) **** */ __union { char cpu$b_bc_flags; __struct { unsigned cpu$v_bc_ast_called : 1; /* ASTDEL has been called at least once with this frame */ unsigned cpu$v_bc_astdel : 1; /* Performing outer-mode AST delivery */ unsigned cpu$v_bc_xsave_saved : 1; /* XSAVE state saved prior to AST delivery */ unsigned cpu$v_bc_faulty_towers : 1; /* Re-execute system service */ unsigned cpu$v_bc_imsem : 1; /* Inner-mode semaphore needs to be released */ unsigned cpu$v_bc_alphareg_saved : 1; /* Alpha registers saved in this frame */ unsigned cpu$v_fill_17_ : 2; } cpu$r_bc_flag_bits; } cpu$r_bc_flags_union; unsigned char cpu$b_bc_pprevmode; /* Save interrupted context's PREVMODE */ __union { unsigned char cpu$b_bc_prevmode; /* Save interrupted context's CURMODE */ unsigned char cpu$b_bc_prevstack; /* What mode of stack (register and memory) do we return to? */ } cpu$r_bc_prevmode_union; unsigned char cpu$b_bc_ipl; /* SWIS IPL state */ unsigned int cpu$l_bc_stkalign; /* How much allocated on this stack for int frame? Guaranteed that */ /* STKALIGN & 0XFFF0 is the actual length of the structure. In other */ /* words, the structure is always allocated on a 16-byte boundary and */ /* is a multiple of 16-bytes long. */ unsigned char cpu$b_bc_astmode; /* Mode of current AST delivery */ unsigned char cpu$b_bc_interrupt_depth; unsigned char cpu$b_bc_type; /* Make this structure look like a standard VMS structure */ unsigned char cpu$b_bc_subtype; unsigned int cpu$l_bc_trap_type; /* Trap type */ unsigned __int64 cpu$q_bc_fsbase; /* Saved base of FS segment */ unsigned __int64 cpu$q_bc_rax; /* Saved x86 registers */ unsigned __int64 cpu$q_bc_rdi; unsigned __int64 cpu$q_bc_rsi; unsigned __int64 cpu$q_bc_rdx; unsigned __int64 cpu$q_bc_rcx; unsigned __int64 cpu$q_bc_r8; unsigned __int64 cpu$q_bc_r9; unsigned __int64 cpu$q_bc_rbx; unsigned __int64 cpu$q_bc_rbp; unsigned __int64 cpu$q_bc_r10; unsigned __int64 cpu$q_bc_r11; unsigned __int64 cpu$q_bc_r12; unsigned __int64 cpu$q_bc_r13; unsigned __int64 cpu$q_bc_r14; unsigned __int64 cpu$q_bc_r15; unsigned __int64 cpu$q_bc_alphareg [32]; #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_bc_prev_frame; #else unsigned __int64 cpu$pq_bc_prev_frame; #endif int cpu$l_bc_prev_frame_mode; unsigned int cpu$l_bc_filler_pfm; unsigned short int cpu$w_bc_cpu_num; unsigned short int cpu$w_bc_filler_2 [3]; unsigned __int64 cpu$q_bc_vector; /* Interrupt vector we came in on */ unsigned __int64 cpu$q_bc_error_code; /* From here on this is the CPU-defined frame */ unsigned __int64 cpu$q_bc_rip; unsigned __int64 cpu$q_bc_cs; unsigned __int64 cpu$q_bc_rflags; unsigned __int64 cpu$q_bc_rsp; unsigned __int64 cpu$q_bc_ss; unsigned __int64 cpu$q_bc_pcbb; /* Stored PCBB */ unsigned __int64 cpu$q_bc_prbr; /* Stored PRBR */ unsigned __int64 cpu$q_bc_scbb; /* Stored SCBB */ unsigned __int64 cpu$q_bc_sisr; /* Stored SISR */ /**** End of X86_64 symbols that match $CRASHDEF in [SDA]EVAX_SDADEF.SDL **** */ unsigned __int64 cpu$q_bc_bugstk; /*new BUGSTK pointer following context switch */ unsigned __int64 cpu$q_bc_intstk; /*new INTSTK pointer following context switch */ unsigned __int64 cpu$q_bc_orig_intstk; /* */ /* */ /* */ /* End of storage used by BUGCHECK code. */ /******************************************************************* */ unsigned int cpu$l_bugcode; /* BUGCHECK code */ unsigned int cpu$l_capability; /* Bitmask of CPU's capabilities */ unsigned __int64 cpu$q_boot_time; /* System time this cpu booted */ unsigned __int64 cpu$q_asn; /* Last ASN assigned for this CPU */ unsigned __int64 cpu$q_asnseq; /* Current ASN sequence number */ /* */ /* Time counters defined as follows: */ /* (Also applies to UKERNEL and UNULLCPU cells) */ /* */ /* KERNEL mode in process context, no spinlock busywait active */ /* EXECUTIVE mode */ /* SUPERVISOR mode */ /* USER mode */ /* KERNEL mode in system context (PS = 1), no spinlock busywait active */ /* KERNEL mode in process or system context, spinlock busywait is active */ /* */ /* NULL time counter */ /* */ __union { unsigned __int64 cpu$q_kernel [6]; /* Clock ticks in each mode */ __struct { __int64 cpu$q_fill_1 [4]; /* non-busywait counters for 4 process modes */ unsigned __int64 cpu$q_system_context; /* Clock ticks in interrupt mode */ unsigned __int64 cpu$q_mpsynch; /* Clock ticks in MP synchronization */ } cpu$r_fill_5_; } cpu$r_fill_4_; unsigned __int64 cpu$q_nullcpu; /* Clock ticks in per-CPU system process (null) */ unsigned int cpu$l_hardaff; /* Count of processes with */ /* hard affinity for this CPU */ /* */ /* Spinlock acquisition/release tracking and verification data */ /* */ unsigned int cpu$l_rank_vec; /* Ranks of spinlocks currently held */ unsigned int cpu$l_ipl_vec; /* IPL vector of held spinlocks */ int cpu$l_ipl_array [32]; /* IPL counts of held spinlocks */ /* */ /* Cells for CPU sanity timer */ /* */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif void *cpu$l_tpointer; /* Address of SANITY_TIMER of */ /* CPU being watched */ unsigned int cpu$l_sanity_timer; /* # of sanity cycles before this CPU times out */ unsigned int cpu$l_sanity_ticks; /* # of ticks until next sanity cycle */ /* */ /* CPU flags */ /* */ __union { unsigned int cpu$l_flags; /* Various CPU flags */ __struct { unsigned cpu$v_sched : 1; /* Idle loop vying for SCHED */ unsigned cpu$v_forever : 1; /* STOP/CPU with /FOREVER qualifier */ unsigned cpu$v_newprim : 1; /* Primary-to-be CPU */ unsigned cpu$v_pswitch : 1; /* Live primary switch requested by primary CPU */ unsigned cpu$v_bc_stack : 1; /* Set if we swapped process context to write crash dump */ unsigned cpu$v_bc_context : 1; /* Set if database contains context from bugcheck */ unsigned cpu$v_user_capabilities_set : 1; /* Set if user capabilities already initialized */ unsigned cpu$v_reset_low_power : 1; /* Tell the next clock soft-tick to reset the low power switch */ } cpu$r_fill_7_; } cpu$r_fill_6_; /* */ /* The following field, INTFLAGS, must be longword aligned since */ /* interlocked instructions are used to access the bitfields. */ /* */ __union { unsigned int cpu$l_intflags; /* Interlocked CPU flags */ __struct { unsigned cpu$v_stopping : 1; /* CPU stopping flag */ unsigned cpu$v_rescheduling : 1; /* CPU rescheduling flag */ unsigned cpu$v_fill_18_ : 6; } cpu$r_fill_9_; } cpu$r_fill_8_; /* */ /* System stack base and limit */ /* */ void *cpu$l_sys_stack_base; void *cpu$l_sys_stack_limit; /* */ /* Descriptor used to locate the variable portion of the per-CPU database. */ /* This approach allows the fixed portion of the database to more easily */ /* grow over time. The offset represents a byte offset from the start of */ /* the fixed portion of the per-CPU database to a variable portion containing */ /* CPU-specific data. The variable portion is located adjacent to the fixed */ /* portion of the database. */ /* */ unsigned int cpu$l_variable_offset; /* Offset to variable portion of database */ unsigned int cpu$l_variable_length; /* Length in bytes of variable portion */ /* */ /* Define cells for machine check recovery block. These two longwords */ /* are assumed to be adjacent. */ /* */ unsigned int cpu$l_mchk_mask; /* Function mask for current recovery block */ void *cpu$l_mchk_sp; /* Saved SP for return at end of block */ /* 0 (zero) if no current recovery block */ /* */ /* Define a cell to point to a machine check crashes save area. This pointer */ /* is used by SDA to display the machine check information after a crash. */ /* */ __union { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_mchk_crash_area_va; /* VA of mcheck crash area */ #else unsigned __int64 cpu$pq_mchk_crash_area_va; #endif __struct { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif void *cpu$pl_mchk_crash_area_va_l; unsigned int cpu$il_mchk_crash_area_va_h; } cpu$r_mchk_crash_area_va_fields; } cpu$r_mchk_crash_area_va_overlay; /* */ /* Define cells for processor_corrected_error_svapte and processor_mchk_abort */ /* _svapte. */ /* */ int cpu$l_proc_corrected_error_svap_fi; /* NOSVAPTE_V9.0 Dave Fairbanks */ int cpu$l_proc_mchk_abort_svapte_fill; /* NOSVAPTE_V9.0 Dave Fairbanks */ __union { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_logout_area_va; /* VA of mcheck logout area */ #else unsigned __int64 cpu$pq_logout_area_va; #endif __struct { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif void *cpu$pl_logout_area_va_l; unsigned int cpu$il_logout_area_va_h; } cpu$r_logout_area_va_fields; } cpu$r_logout_area_va_overlay; /* */ /* Soft tick dynamic timing offsets to determine when a 10ms "soft" tick */ /* occurs for each CPU. */ /* */ unsigned int cpu$l_soft_tick; int cpu$l_time_deviation; /* */ /* The following fields support PC sampling. They must be longword aligned. */ /* */ void *cpu$l_pcsample_buffer; __union { unsigned int cpu$l_pcsample_flags; __struct { unsigned cpu$v_pcsample_active : 1; /* Sample being collected. */ unsigned cpu$v_fill_19_ : 7; } cpu$r_fill_11_; } cpu$r_fill_10_; /* */ /* Performance monitoring cells to replace global roll-up cells in idle loop. */ /* These cells MUST remain on quadword boundaries since they are updated by */ /* system quadword builtins. Any changes above these offsets must take this */ /* into account. */ /* */ unsigned __int64 cpu$q_idle_loop_count; /* Count of idle code loops */ unsigned __int64 cpu$q_zeroed_page_count; /* Count of free pages zeroed */ /* */ /* Rank counter cells for keeping track of the number of acquisitions */ /* in effect for a given ranking. This is primarily for portlock support, */ /* but is integrated into all static ranks for simplicity */ /* */ int cpu$l_rank_array [32]; /* Counts of acquisitions by rank */ /* */ /* Inline fork block for port-affinitized I/O activity */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif char cpu$l_io_aff_fkb [48]; /* */ /* Flags field for Fast Path I/O - this field is clumped with the FKB above */ /* and the queue below to get the best cache block behavior */ /* */ #pragma __nomember_alignment __union { unsigned int cpu$l_io_aff_flags; /* Fast Path I/O bits */ __struct { unsigned cpu$v_io_aff_fkb_inuse : 1; /* CPUDB FKB in use */ unsigned cpu$v_port_assigned : 1; /* CPU has port affinity */ unsigned cpu$v_distributed_ints : 1; /* CPU has hw interrupt port(s) assigned. */ unsigned cpu$v_fill_20_ : 5; } cpu$r_fill_13_; } cpu$r_fill_12_; /* */ /* Absolute queue header for port-affinitized Fast Path I/O - must be */ /* quadword aligned */ /* */ char cpu$b_fill_21_ [4]; #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif void *cpu$ps_io_start_aff_qfl; /* UCB listhead */ #pragma __nomember_alignment void *cpu$ps_io_start_aff_qbl; /* */ /* The following space doubles as debugging space as well as providing */ /* 64-byte cache alignment for the following listhead. If the structure */ /* above changes this must reflected in this count. If this space gets */ /* filled in at some point, it is critical that the new cells not be */ /* highly accessed, otherwise we have potential hangs from overlapping */ /* memory lock interactions. */ int cpu$l_fill_6 [12]; /* */ /* Absolute interlocked queue for fastpath hardware interrupt ports */ /* assigned to this cpu */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif void *cpu$ps_io_int_aff_qfl; /* Fastpath HW interrupt */ #pragma __nomember_alignment void *cpu$ps_io_int_aff_qbl; /* : ports UCB listhead */ /* */ /* Holder cell for CPU capabilities. This replaces the old CAPABILITY that */ /* existed further up the structure. The lower longword holds the system and */ /* user capabilities for this CPU. The upper longword is an affinity bitmask */ /* containing a single bit set in the CPUID position of this CPU. */ /* */ __union { unsigned __int64 cpu$q_capabilities; /* Caps and affinity */ unsigned int cpu$l_capabilities; /* Just system and user caps */ } cpu$r_capabilities_overlay; /* Cell to hold a counter for emulated instructions. This counter is incremented */ /* when an instruction that is not available on this CPU (e.g. LDBU, LDWU) is */ /* executed in system context and is emulated. */ int cpu$l_emulate_count; __union { unsigned int cpu$l_untested_page_state; /* State for interrupted memory test */ __struct { unsigned short int cpu$w_untested_chunks; /* Count of 32-byte chunks remaining to be tested in current page */ unsigned cpu$v_fill_7 : 13; unsigned cpu$v_lastpage_tested : 1; /* Last untested page is being tested */ unsigned cpu$v_mcheck : 1; /* Mcheck occurred during memory test */ unsigned cpu$v_memory_write : 1; /* Memory test is in the write process */ } cpu$r_untested_bits; } cpu$r_untested_overlay; unsigned __int64 cpu$q_untested_pattern; int cpu$l_untested_page_spte_fill; /* NOSVAPTE_V9.0 Dave Fairbanks */ void *cpu$l_untested_page_va; /* VA for testing memory */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif __int64 cpu$q_sched_data [85]; /* scheduling data based on process */ /* priority level (5 quadwords for each */ /* priority level). See SCHED_DS structure */ /* below for more details. */ /* */ __int64 cpu$q_scc_delta; /* Offset from primary SCC value */ #pragma __nomember_alignment __union { unsigned int cpu$l_transition_flags; /* Various CPU transition flags */ __struct { unsigned cpu$v_auto_start : 1; /* CPU is automatically made active */ unsigned cpu$v_nobindings : 1; /* Minimize features that prevent transition */ unsigned cpu$v_fill_22_ : 6; } cpu$r_fill_15_; } cpu$r_fill_14_; char cpu$b_fill_23_ [4]; #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_ctd_listhead; /* Offset to CPU transition block */ #else unsigned __int64 cpu$pq_ctd_listhead; #endif #pragma __nomember_alignment int cpu$l_failover_node; /* Node ID to fail this CPU over to */ char cpu$b_fill_24_ [4]; #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_gmp_listhead; /* Address of listhead for GMPs */ #else unsigned __int64 cpu$pq_gmp_listhead; #endif #pragma __nomember_alignment #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_extension_block; /* Pointer to extension of CPUDB */ #else unsigned __int64 cpu$pq_extension_block; #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_lckcpu; /* pointer to per-CPU lckmgr counter structure */ #else unsigned __int64 cpu$pq_lckcpu; #endif #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif void *cpu$l_fp_asgn_ports_fl; /* queue links to fastpath ports */ #pragma __nomember_alignment void *cpu$l_fp_asgn_ports_bl; /* : */ int cpu$l_fp_num_ports; /* number of fastpath ports assigned to this CPU */ int cpu$l_fp_num_user_ports; /* number of user preferred fastpath ports assigned */ int cpu$l_fp_spare1; int cpu$l_fp_spare2; int cpu$l_fp_spare3; int cpu$l_fp_spare4; int cpu$l_rad; /* This cell initialized to the RAD number the CPU belongs to */ int cpu$l_rad_spare1; unsigned __int64 cpu$q_bc_scc; /* System Cycle Counter recorded by BUGCHECK (all platforms) */ /* */ /* TIMEDWAIT cells to support mixed-speed CPUs in heterogeneous SMP configurations */ /* */ unsigned __int64 cpu$q_tmwt_scaler; /* Scaler value for SCC conversions */ unsigned __int64 cpu$q_tmwt_divisor; /* Divisor value for SCC conversions */ unsigned __int64 cpu$q_tmwt_shift; /* Divisor shift count for SCC conversions */ /* */ /* Fastpath hardware interrupt ports housekeeping */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif void *cpu$l_fp_asgn_hwint_ports_fl; /* queue links to hwint ports */ #pragma __nomember_alignment void *cpu$l_fp_asgn_hwint_ports_bl; /* : */ int cpu$l_num_hwint_ports; /* number HW int ports */ int cpu$l_num_usrprf_hwint_ports; /* number user-assigned fastpath HW int ports */ unsigned __int64 cpu$q_xfc_vab_pointer; /* Link to XFC per RAD structures */ /* Keep track of CPU load */ unsigned int cpu$l_load_factor; /* This is the fixed point fraction of time CPU is usable */ unsigned int cpu$l_bin_5sec; /* Which of 5 bins are we using */ unsigned int cpu$l_usable_ticks [5]; /* 5 bins counting the usable ticks */ unsigned int cpu$l_total_ticks; /* Total number of ticks during a second */ unsigned int cpu$l_counter_10ms; /* Count 10ms intervals to get one second */ int cpu$l_filler_1; /* Make quadwords even */ /* Per-CPU queues */ int cpu$aq_com_queues [128]; /* 64 queue heads for this CPU */ unsigned __int64 cpu$q_com_queue_summary; /* Bits to show which CPU queues are used */ /* Per-RAD database pointer */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_rad_database; /* Pointer to the RAD database for RAD this CPU belongs to */ #else unsigned __int64 cpu$pq_rad_database; #endif #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_cflush_va_pte_sva; /* NOSVAPTE_V9.0 Dave Fairbanks */ #else unsigned __int64 cpu$pq_cflush_va_pte_sva; #endif #pragma __nomember_alignment #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_cflush_va; /* Pointer to S2 space VA used by cflush */ #else unsigned __int64 cpu$pq_cflush_va; #endif /* Per-CPU timing cells to be filled in when CPU joins the active set */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif __int64 cpu$q_itm_width; /* Width of clock tick in IPF ITM units */ #pragma __nomember_alignment int cpu$l_max_deviation; unsigned int cpu$l_minimum_ticks; int cpu$l_over_delta; int cpu$l_under_delta; /* */ /* System register stack base and limit */ /* */ /* */ /* Termination and slot stack bases */ /* */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$q_slot_stack_base; #else unsigned __int64 cpu$q_slot_stack_base; #endif /* */ /* VHPT virtual address and setup info */ /* */ /* */ /* More BUGCHECK cells: the contents of the CR.PTA register; data from SWIS */ /* (SWIS$L_GH_PS thru SWIS$Q_DTNVFLT without the fill); the region registers */ /* */ /* */ /* Virtual addresses of the physical buffers that hold the SAL-built error */ /* records for the four hardware interrupt types */ /* */ __union { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_init_error_record_va; /* VA of INIT error record buffer */ #else unsigned __int64 cpu$pq_init_error_record_va; #endif __struct { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif void *cpu$pl_init_error_record_va_l; unsigned int cpu$il_init_error_record_va_h; } cpu$r_init_error_record_va_fields; } cpu$r_init_error_record_va_overlay; __union { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_mca_error_record_va; /* VA of MCA error record buffer */ #else unsigned __int64 cpu$pq_mca_error_record_va; #endif __struct { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif void *cpu$pl_mca_error_record_va_l; unsigned int cpu$il_mca_error_record_va_h; } cpu$r_mca_error_record_va_fields; } cpu$r_mca_error_record_va_overlay; __union { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_cmc_error_record_va; /* VA of CMC error record buffer */ #else unsigned __int64 cpu$pq_cmc_error_record_va; #endif __struct { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif void *cpu$pl_cmc_error_record_va_l; unsigned int cpu$il_cmc_error_record_va_h; } cpu$r_cmc_error_record_va_fields; } cpu$r_cmc_error_record_va_overlay; __union { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_cpe_error_record_va; /* VA of CPE error record buffer */ #else unsigned __int64 cpu$pq_cpe_error_record_va; #endif __struct { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif void *cpu$pl_cpe_error_record_va_l; unsigned int cpu$il_cpe_error_record_va_h; } cpu$r_cpe_error_record_va_fields; } cpu$r_cpe_error_record_va_overlay; #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif __union { #pragma __nomember_alignment CBB cpu$r_cbb_cpuid_mask; /* Embedded CBB block */ __struct { /* Compatability offset cells */ __int64 cpu$q_cbb_fill_1 [6]; __union { unsigned int cpu$l_cpuid_mask; /* CPU ID in longword bitmask form */ unsigned __int64 cpu$q_cpuid_mask; /* CPU ID in quadword bitmask form */ } cpu$r_cbb_cpumask_data_overlay; __int64 cpu$q_cbb_fill_2 [15]; } cpu$r_cbb_cpumask_compat_overlay; } cpu$r_cbb_cpumask_overlay; /* */ /* Itanium power management data cells. */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif int cpu$l_low_power; /* Indicates if power management is on */ #pragma __nomember_alignment int cpu$l_pwr_mgmt_on; /* Incremented when Power Mgmt is on */ __int64 cpu$q_prev_nullcpu; /* Previous NULLCPU value */ __int64 cpu$q_low_power_entered; /* Number of times a low power state was entered */ /* */ /* CPU Thread data */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif __union { #pragma __nomember_alignment CBB cpu$r_cbb_cothread_mask; /* Embedded CBB block */ __struct { /* Compatability offset cells */ __int64 cpu$q_cbbthd_fill_1 [6]; unsigned __int64 cpu$q_cothread_mask; /* CPU cothreads in quadword bitmask form */ __int64 cpu$q_cbbthd_fill_2 [15]; } cpu$r_cbb_thdmask_compat_overlay; } cpu$r_cbb_thdmask_overlay; unsigned __int64 cpu$q_cothreadd_db_qfl; /* Quadword queue to CPUDB which is another thread */ unsigned __int64 cpu$q_cothreadd_db_qbl; /* (Back link) */ unsigned int cpu$l_max_cur_cothd_priority; /* The maximum priority of the cothreads on this core */ unsigned int cpu$l_num_cothreads; /* How many threads are in the same core with this CPU? */ /* */ /* More processor registers to be saved at system crash */ /* */ /* */ /* BUGcheck LOG buffer area for dump hints/info prior to bugcheck */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif char cpu$r_buglog [256]; /* */ /* Data cells to keep track of owner and sequence number of floating point register banks */ /* */ #pragma __nomember_alignment unsigned int cpu$l_fp_high_owner_pid; unsigned int cpu$l_fp_low_owner_pid; unsigned __int64 cpu$q_fp_high_sequence_number; unsigned __int64 cpu$q_fp_low_sequence_number; /* */ /* Cells to hold the values in the first two protection key registers */ /* when the system crashes (only the first two are used, for FOE/iCache) */ /* */ unsigned __int64 cpu$q_bc_pkr0; unsigned __int64 cpu$q_bc_pkr1; /* */ /* Keep track of the current power/performance state in this CPU */ /* */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_power_accounting; /* Pointer to (possibly variable length) data to account for CPU pstate time */ #else unsigned __int64 cpu$pq_power_accounting; #endif unsigned int cpu$l_current_pstate; unsigned int cpu$l_spare_pstate_l; unsigned __int64 cpu$q_spare_pstate_q; int cpu$l_idle_exits; /* Decrement each time we exit idle. If <=0, stop saving power in idle. */ unsigned int cpu$l_spare_counter; unsigned __int64 cpu$q_idle_phl_stopped; unsigned __int64 cpu$q_idle_phl_restart_abort; /* */ /* Keep track of AR.RUC and related fields of this CPU */ /* */ unsigned int cpu$l_ruc_soft_tick; unsigned int cpu$l_ruc_spare1; unsigned __int64 cpu$q_ruc_base; unsigned __int64 cpu$q_ruc_in_use_by_host_mode; /* Used to store the "CPU unavailable" time on VM */ /* */ /* Add new cells to hold the ITC and RUC values at every timer interrupt */ /* Also add new cells to hold the ITC and RUC values at the time of crash */ /* */ unsigned __int64 cpu$q_last_timer_int_itc; /* ITC value at the time of last timer interrupt */ unsigned __int64 cpu$q_last_timer_int_ruc; /* RUC value at the time of last timer interrupt */ unsigned __int64 cpu$q_bc_itc; /* ITC value at the time of crash */ unsigned __int64 cpu$q_bc_ruc; /* RUC value at the time of crash */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_zeroed_page_spte_sva; /* NOSVAPTE_V9.0 Dave Fairbanks */ #else unsigned __int64 cpu$pq_zeroed_page_spte_sva; #endif #pragma __nomember_alignment #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_proc_correrr_svap_sva; /* NOSVAPTE_V9.0 Dave Fairbanks */ #else unsigned __int64 cpu$pq_proc_correrr_svap_sva; #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_proc_mchkabt_svap_sva; /* NOSVAPTE_V9.0 Dave Fairbanks */ #else unsigned __int64 cpu$pq_proc_mchkabt_svap_sva; #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_untested_page_spte_sva; /* NOSVAPTE_V9.0 Dave Fairbanks */ #else unsigned __int64 cpu$pq_untested_page_spte_sva; #endif unsigned __int64 cpu$iq_lapic_total; /* Track total time accumulated by LAPIC timer */ /* */ /* New cells should be added before this comment */ /* */ __int64 cpu$q_bc_expansion [8]; /* Make sure there's space at the end for new registers */ /* */ /* End of fixed portion of the per-CPU database. A variable portion may be required */ /* by this CPU type. */ /* */ /* */ /* Beginning of quadword aligned, variable portion of the per-CPU database. */ /* Access to this is via the VARIABLE_OFFSET and VARIABLE_LENGTH data cells */ /* in the fixed portion of the database. */ /* */ } CPU; #if !defined(__VAXC) #define cpu$l_curpcb cpu$r_curpcb_overlay.cpu$l_curpcb #define cpu$l_curktb cpu$r_curpcb_overlay.cpu$l_curktb #define cpu$l_work_req cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$l_work_req #define cpu$v_inv_tbs cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_inv_tbs #define cpu$v_inv_tba cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_inv_tba #define cpu$v_bugchk cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_bugchk #define cpu$v_bugchkack cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_bugchkack #define cpu$v_recalschd cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_recalschd #define cpu$v_updastsr cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_updastsr #define cpu$v_update_hwclock cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_update_hwclock #define cpu$v_work_fqp cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_work_fqp #define cpu$v_qlost cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_qlost #define cpu$v_resched cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_resched #define cpu$v_virtcons cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_virtcons #define cpu$v_iopost cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_iopost #define cpu$v_inv_istream cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_inv_istream #define cpu$v_inv_tbsd cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_inv_tbsd #define cpu$v_inv_tbs_mmg cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_inv_tbs_mmg #define cpu$v_inv_tbsd_mmg cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_inv_tbsd_mmg #define cpu$v_io_int_aff cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_io_int_aff #define cpu$v_io_start_aff cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_io_start_aff #define cpu$v_update_sysptbr cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_update_sysptbr #define cpu$v_perfmon cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_perfmon #define cpu$v_read_scc cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_read_scc #define cpu$v_cpuspec1 cpu$r_work_req_overlay.cpu$r_cpuspec_overlay.cpu$r_fill_2_.cpu$r_fill_3_.cpu$v_cpuspec1 #define cpu$v_cpuspec2 cpu$r_work_req_overlay.cpu$r_cpuspec_overlay.cpu$r_fill_2_.cpu$r_fill_3_.cpu$v_cpuspec2 #define cpu$v_cpuspec3 cpu$r_work_req_overlay.cpu$r_cpuspec_overlay.cpu$r_fill_2_.cpu$r_fill_3_.cpu$v_cpuspec3 #define cpu$v_cpuspec4 cpu$r_work_req_overlay.cpu$r_cpuspec_overlay.cpu$r_fill_2_.cpu$r_fill_3_.cpu$v_cpuspec4 #define cpu$q_work_fqfl cpu$r_ifq_overlay.cpu$q_work_fqfl #define cpu$q_work_ifq cpu$r_ifq_overlay.cpu$q_work_ifq #define cpu$q_sys_hwpcb cpu$r_hwpcb_overlay.cpu$q_sys_hwpcb #define cpu$q_sys_ksp cpu$r_hwpcb_overlay.cpu$q_sys_ksp #define cpu$q_sys_astsr_asten cpu$r_ast_overlay.cpu$q_sys_astsr_asten #define cpu$v_sys_asten cpu$r_ast_overlay.cpu$r_ast_bits0.cpu$v_sys_asten #define cpu$v_sys_astsr cpu$r_ast_overlay.cpu$r_ast_bits0.cpu$v_sys_astsr #define cpu$q_term_hwpcb cpu$r_term_hwpcb_overlay.cpu$q_term_hwpcb #define cpu$q_term_ksp cpu$r_term_hwpcb_overlay.cpu$q_term_ksp #define cpu$q_term_astsr_asten cpu$r_term_ast_overlay.cpu$q_term_astsr_asten #define cpu$v_term_asten cpu$r_term_ast_overlay.cpu$r_term_ast_bits0.cpu$v_term_asten #define cpu$v_term_astsr cpu$r_term_ast_overlay.cpu$r_term_ast_bits0.cpu$v_term_astsr #define cpu$b_bc_flags cpu$r_bc_flags_union.cpu$b_bc_flags #define cpu$r_bc_flag_bits cpu$r_bc_flags_union.cpu$r_bc_flag_bits #define cpu$v_bc_ast_called cpu$r_bc_flag_bits.cpu$v_bc_ast_called #define cpu$v_bc_astdel cpu$r_bc_flag_bits.cpu$v_bc_astdel #define cpu$v_bc_xsave_saved cpu$r_bc_flag_bits.cpu$v_bc_xsave_saved #define cpu$v_bc_faulty_towers cpu$r_bc_flag_bits.cpu$v_bc_faulty_towers #define cpu$v_bc_imsem cpu$r_bc_flag_bits.cpu$v_bc_imsem #define cpu$v_bc_alphareg_saved cpu$r_bc_flag_bits.cpu$v_bc_alphareg_saved #define cpu$b_bc_prevmode cpu$r_bc_prevmode_union.cpu$b_bc_prevmode #define cpu$b_bc_prevstack cpu$r_bc_prevmode_union.cpu$b_bc_prevstack #define cpu$q_kernel cpu$r_fill_4_.cpu$q_kernel #define cpu$q_system_context cpu$r_fill_4_.cpu$r_fill_5_.cpu$q_system_context #define cpu$q_mpsynch cpu$r_fill_4_.cpu$r_fill_5_.cpu$q_mpsynch #define cpu$l_flags cpu$r_fill_6_.cpu$l_flags #define cpu$v_sched cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_sched #define cpu$v_forever cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_forever #define cpu$v_newprim cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_newprim #define cpu$v_pswitch cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_pswitch #define cpu$v_bc_stack cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_bc_stack #define cpu$v_bc_context cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_bc_context #define cpu$v_user_capabilities_set cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_user_capabilities_set #define cpu$v_reset_low_power cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_reset_low_power #define cpu$l_intflags cpu$r_fill_8_.cpu$l_intflags #define cpu$v_stopping cpu$r_fill_8_.cpu$r_fill_9_.cpu$v_stopping #define cpu$v_rescheduling cpu$r_fill_8_.cpu$r_fill_9_.cpu$v_rescheduling #define cpu$pq_mchk_crash_area_va cpu$r_mchk_crash_area_va_overlay.cpu$pq_mchk_crash_area_va #define cpu$pl_mchk_crash_area_va_l cpu$r_mchk_crash_area_va_overlay.cpu$r_mchk_crash_area_va_fields.cpu$pl_mchk_crash_area_va_l #define cpu$il_mchk_crash_area_va_h cpu$r_mchk_crash_area_va_overlay.cpu$r_mchk_crash_area_va_fields.cpu$il_mchk_crash_area_va_h #define cpu$pq_logout_area_va cpu$r_logout_area_va_overlay.cpu$pq_logout_area_va #define cpu$pl_logout_area_va_l cpu$r_logout_area_va_overlay.cpu$r_logout_area_va_fields.cpu$pl_logout_area_va_l #define cpu$il_logout_area_va_h cpu$r_logout_area_va_overlay.cpu$r_logout_area_va_fields.cpu$il_logout_area_va_h #define cpu$l_pcsample_flags cpu$r_fill_10_.cpu$l_pcsample_flags #define cpu$v_pcsample_active cpu$r_fill_10_.cpu$r_fill_11_.cpu$v_pcsample_active #define cpu$l_io_aff_flags cpu$r_fill_12_.cpu$l_io_aff_flags #define cpu$v_io_aff_fkb_inuse cpu$r_fill_12_.cpu$r_fill_13_.cpu$v_io_aff_fkb_inuse #define cpu$v_port_assigned cpu$r_fill_12_.cpu$r_fill_13_.cpu$v_port_assigned #define cpu$v_distributed_ints cpu$r_fill_12_.cpu$r_fill_13_.cpu$v_distributed_ints #define cpu$q_capabilities cpu$r_capabilities_overlay.cpu$q_capabilities #define cpu$l_capabilities cpu$r_capabilities_overlay.cpu$l_capabilities #define cpu$l_untested_page_state cpu$r_untested_overlay.cpu$l_untested_page_state #define cpu$w_untested_chunks cpu$r_untested_overlay.cpu$r_untested_bits.cpu$w_untested_chunks #define cpu$v_lastpage_tested cpu$r_untested_overlay.cpu$r_untested_bits.cpu$v_lastpage_tested #define cpu$v_mcheck cpu$r_untested_overlay.cpu$r_untested_bits.cpu$v_mcheck #define cpu$v_memory_write cpu$r_untested_overlay.cpu$r_untested_bits.cpu$v_memory_write #define cpu$l_transition_flags cpu$r_fill_14_.cpu$l_transition_flags #define cpu$v_auto_start cpu$r_fill_14_.cpu$r_fill_15_.cpu$v_auto_start #define cpu$v_nobindings cpu$r_fill_14_.cpu$r_fill_15_.cpu$v_nobindings #define cpu$pq_init_error_record_va cpu$r_init_error_record_va_overlay.cpu$pq_init_error_record_va #define cpu$pl_init_error_record_va_l cpu$r_init_error_record_va_overlay.cpu$r_init_error_record_va_fields.cpu$pl_init_error_record\ _va_l #define cpu$il_init_error_record_va_h cpu$r_init_error_record_va_overlay.cpu$r_init_error_record_va_fields.cpu$il_init_error_record\ _va_h #define cpu$pq_mca_error_record_va cpu$r_mca_error_record_va_overlay.cpu$pq_mca_error_record_va #define cpu$pl_mca_error_record_va_l cpu$r_mca_error_record_va_overlay.cpu$r_mca_error_record_va_fields.cpu$pl_mca_error_record_va_l #define cpu$il_mca_error_record_va_h cpu$r_mca_error_record_va_overlay.cpu$r_mca_error_record_va_fields.cpu$il_mca_error_record_va_h #define cpu$pq_cmc_error_record_va cpu$r_cmc_error_record_va_overlay.cpu$pq_cmc_error_record_va #define cpu$pl_cmc_error_record_va_l cpu$r_cmc_error_record_va_overlay.cpu$r_cmc_error_record_va_fields.cpu$pl_cmc_error_record_va_l #define cpu$il_cmc_error_record_va_h cpu$r_cmc_error_record_va_overlay.cpu$r_cmc_error_record_va_fields.cpu$il_cmc_error_record_va_h #define cpu$pq_cpe_error_record_va cpu$r_cpe_error_record_va_overlay.cpu$pq_cpe_error_record_va #define cpu$pl_cpe_error_record_va_l cpu$r_cpe_error_record_va_overlay.cpu$r_cpe_error_record_va_fields.cpu$pl_cpe_error_record_va_l #define cpu$il_cpe_error_record_va_h cpu$r_cpe_error_record_va_overlay.cpu$r_cpe_error_record_va_fields.cpu$il_cpe_error_record_va_h #define cpu$r_cbb_cpuid_mask cpu$r_cbb_cpumask_overlay.cpu$r_cbb_cpuid_mask #define cpu$l_cpuid_mask cpu$r_cbb_cpumask_overlay.cpu$r_cbb_cpumask_compat_overlay.cpu$r_cbb_cpumask_data_overlay.cpu$l_cpuid_mask #define cpu$q_cpuid_mask cpu$r_cbb_cpumask_overlay.cpu$r_cbb_cpumask_compat_overlay.cpu$r_cbb_cpumask_data_overlay.cpu$q_cpuid_mask #define cpu$r_cbb_cothread_mask cpu$r_cbb_thdmask_overlay.cpu$r_cbb_cothread_mask #define cpu$q_cothread_mask cpu$r_cbb_thdmask_overlay.cpu$r_cbb_thdmask_compat_overlay.cpu$q_cothread_mask #endif /* #if !defined(__VAXC) */ #define CPU$K_LENGTH 4376 /* Total fixed structure size */ #define CPU$C_LENGTH 4376 /* Total fixed structure size */ #define CPU$M_AGE_DATA 0x1 #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif typedef struct _sched_ds { #pragma __nomember_alignment unsigned __int64 cpu$q_acc_run; /* accumulated runtime */ unsigned __int64 cpu$q_proc_count; /* # of process run at this priority level */ unsigned __int64 cpu$q_acc_interrupt; /* accumulated interrupt time */ unsigned __int64 cpu$q_acc_waitime; /* accumulated wait time */ __union { unsigned __int64 cpu$q_sched_flags; /* Scheduling flags */ __struct { unsigned cpu$v_age_data : 1; /* Indicates data needs to be aged */ unsigned cpu$v_fill_27_ : 7; } cpu$r_fill_26_; } cpu$r_fill_25_; } SCHED_DS; #if !defined(__VAXC) #define cpu$q_sched_flags cpu$r_fill_25_.cpu$q_sched_flags #define cpu$v_age_data cpu$r_fill_25_.cpu$r_fill_26_.cpu$v_age_data #endif /* #if !defined(__VAXC) */ #define CPU$K_SCHED_LENGTH 40 /* byte length of each per-priority entry */ /* in the SCHED_DATA data structure */ #pragma required_pointer_size save #pragma required_pointer_size long typedef CPU * CPU_PQ; #pragma required_pointer_size short typedef CPU * CPU_PL; #pragma required_pointer_size restore #pragma __member_alignment __restore #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __restore /* Restore the previously-defined required ptr size */ #endif #ifdef __cplusplus } #endif #pragma __standard #endif /* __CPUDEF_LOADED */