qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

internals.h (42125B)


      1 /*
      2  * QEMU ARM CPU -- internal functions and types
      3  *
      4  * Copyright (c) 2014 Linaro Ltd
      5  *
      6  * This program is free software; you can redistribute it and/or
      7  * modify it under the terms of the GNU General Public License
      8  * as published by the Free Software Foundation; either version 2
      9  * of the License, or (at your option) any later version.
     10  *
     11  * This program is distributed in the hope that it will be useful,
     12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     14  * GNU General Public License for more details.
     15  *
     16  * You should have received a copy of the GNU General Public License
     17  * along with this program; if not, see
     18  * <http://www.gnu.org/licenses/gpl-2.0.html>
     19  *
     20  * This header defines functions, types, etc which need to be shared
     21  * between different source files within target/arm/ but which are
     22  * private to it and not required by the rest of QEMU.
     23  */
     24 
     25 #ifndef TARGET_ARM_INTERNALS_H
     26 #define TARGET_ARM_INTERNALS_H
     27 
     28 #include "hw/registerfields.h"
     29 #include "tcg/tcg-gvec-desc.h"
     30 #include "syndrome.h"
     31 
     32 /* register banks for CPU modes */
     33 #define BANK_USRSYS 0
     34 #define BANK_SVC    1
     35 #define BANK_ABT    2
     36 #define BANK_UND    3
     37 #define BANK_IRQ    4
     38 #define BANK_FIQ    5
     39 #define BANK_HYP    6
     40 #define BANK_MON    7
     41 
     42 static inline bool excp_is_internal(int excp)
     43 {
     44     /* Return true if this exception number represents a QEMU-internal
     45      * exception that will not be passed to the guest.
     46      */
     47     return excp == EXCP_INTERRUPT
     48         || excp == EXCP_HLT
     49         || excp == EXCP_DEBUG
     50         || excp == EXCP_HALTED
     51         || excp == EXCP_EXCEPTION_EXIT
     52         || excp == EXCP_KERNEL_TRAP
     53         || excp == EXCP_SEMIHOST;
     54 }
     55 
     56 /* Scale factor for generic timers, ie number of ns per tick.
     57  * This gives a 62.5MHz timer.
     58  */
     59 #define GTIMER_SCALE 16
     60 
     61 /* Bit definitions for the v7M CONTROL register */
     62 FIELD(V7M_CONTROL, NPRIV, 0, 1)
     63 FIELD(V7M_CONTROL, SPSEL, 1, 1)
     64 FIELD(V7M_CONTROL, FPCA, 2, 1)
     65 FIELD(V7M_CONTROL, SFPA, 3, 1)
     66 
     67 /* Bit definitions for v7M exception return payload */
     68 FIELD(V7M_EXCRET, ES, 0, 1)
     69 FIELD(V7M_EXCRET, RES0, 1, 1)
     70 FIELD(V7M_EXCRET, SPSEL, 2, 1)
     71 FIELD(V7M_EXCRET, MODE, 3, 1)
     72 FIELD(V7M_EXCRET, FTYPE, 4, 1)
     73 FIELD(V7M_EXCRET, DCRS, 5, 1)
     74 FIELD(V7M_EXCRET, S, 6, 1)
     75 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
     76 
     77 /* Minimum value which is a magic number for exception return */
     78 #define EXC_RETURN_MIN_MAGIC 0xff000000
     79 /* Minimum number which is a magic number for function or exception return
     80  * when using v8M security extension
     81  */
     82 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
     83 
     84 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
     85 FIELD(DBGWCR, E, 0, 1)
     86 FIELD(DBGWCR, PAC, 1, 2)
     87 FIELD(DBGWCR, LSC, 3, 2)
     88 FIELD(DBGWCR, BAS, 5, 8)
     89 FIELD(DBGWCR, HMC, 13, 1)
     90 FIELD(DBGWCR, SSC, 14, 2)
     91 FIELD(DBGWCR, LBN, 16, 4)
     92 FIELD(DBGWCR, WT, 20, 1)
     93 FIELD(DBGWCR, MASK, 24, 5)
     94 FIELD(DBGWCR, SSCE, 29, 1)
     95 
     96 /* We use a few fake FSR values for internal purposes in M profile.
     97  * M profile cores don't have A/R format FSRs, but currently our
     98  * get_phys_addr() code assumes A/R profile and reports failures via
     99  * an A/R format FSR value. We then translate that into the proper
    100  * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
    101  * Mostly the FSR values we use for this are those defined for v7PMSA,
    102  * since we share some of that codepath. A few kinds of fault are
    103  * only for M profile and have no A/R equivalent, though, so we have
    104  * to pick a value from the reserved range (which we never otherwise
    105  * generate) to use for these.
    106  * These values will never be visible to the guest.
    107  */
    108 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
    109 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
    110 
    111 /**
    112  * raise_exception: Raise the specified exception.
    113  * Raise a guest exception with the specified value, syndrome register
    114  * and target exception level. This should be called from helper functions,
    115  * and never returns because we will longjump back up to the CPU main loop.
    116  */
    117 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
    118                                 uint32_t syndrome, uint32_t target_el);
    119 
    120 /*
    121  * Similarly, but also use unwinding to restore cpu state.
    122  */
    123 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
    124                                       uint32_t syndrome, uint32_t target_el,
    125                                       uintptr_t ra);
    126 
    127 /*
    128  * For AArch64, map a given EL to an index in the banked_spsr array.
    129  * Note that this mapping and the AArch32 mapping defined in bank_number()
    130  * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
    131  * mandated mapping between each other.
    132  */
    133 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
    134 {
    135     static const unsigned int map[4] = {
    136         [1] = BANK_SVC, /* EL1.  */
    137         [2] = BANK_HYP, /* EL2.  */
    138         [3] = BANK_MON, /* EL3.  */
    139     };
    140     assert(el >= 1 && el <= 3);
    141     return map[el];
    142 }
    143 
    144 /* Map CPU modes onto saved register banks.  */
    145 static inline int bank_number(int mode)
    146 {
    147     switch (mode) {
    148     case ARM_CPU_MODE_USR:
    149     case ARM_CPU_MODE_SYS:
    150         return BANK_USRSYS;
    151     case ARM_CPU_MODE_SVC:
    152         return BANK_SVC;
    153     case ARM_CPU_MODE_ABT:
    154         return BANK_ABT;
    155     case ARM_CPU_MODE_UND:
    156         return BANK_UND;
    157     case ARM_CPU_MODE_IRQ:
    158         return BANK_IRQ;
    159     case ARM_CPU_MODE_FIQ:
    160         return BANK_FIQ;
    161     case ARM_CPU_MODE_HYP:
    162         return BANK_HYP;
    163     case ARM_CPU_MODE_MON:
    164         return BANK_MON;
    165     }
    166     g_assert_not_reached();
    167 }
    168 
    169 /**
    170  * r14_bank_number: Map CPU mode onto register bank for r14
    171  *
    172  * Given an AArch32 CPU mode, return the index into the saved register
    173  * banks to use for the R14 (LR) in that mode. This is the same as
    174  * bank_number(), except for the special case of Hyp mode, where
    175  * R14 is shared with USR and SYS, unlike its R13 and SPSR.
    176  * This should be used as the index into env->banked_r14[], and
    177  * bank_number() used for the index into env->banked_r13[] and
    178  * env->banked_spsr[].
    179  */
    180 static inline int r14_bank_number(int mode)
    181 {
    182     return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
    183 }
    184 
    185 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
    186 void arm_translate_init(void);
    187 
    188 void arm_restore_state_to_opc(CPUState *cs,
    189                               const TranslationBlock *tb,
    190                               const uint64_t *data);
    191 
    192 #ifdef CONFIG_TCG
    193 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
    194 #endif /* CONFIG_TCG */
    195 
    196 enum arm_fprounding {
    197     FPROUNDING_TIEEVEN,
    198     FPROUNDING_POSINF,
    199     FPROUNDING_NEGINF,
    200     FPROUNDING_ZERO,
    201     FPROUNDING_TIEAWAY,
    202     FPROUNDING_ODD
    203 };
    204 
    205 int arm_rmode_to_sf(int rmode);
    206 
    207 static inline void aarch64_save_sp(CPUARMState *env, int el)
    208 {
    209     if (env->pstate & PSTATE_SP) {
    210         env->sp_el[el] = env->xregs[31];
    211     } else {
    212         env->sp_el[0] = env->xregs[31];
    213     }
    214 }
    215 
    216 static inline void aarch64_restore_sp(CPUARMState *env, int el)
    217 {
    218     if (env->pstate & PSTATE_SP) {
    219         env->xregs[31] = env->sp_el[el];
    220     } else {
    221         env->xregs[31] = env->sp_el[0];
    222     }
    223 }
    224 
    225 static inline void update_spsel(CPUARMState *env, uint32_t imm)
    226 {
    227     unsigned int cur_el = arm_current_el(env);
    228     /* Update PSTATE SPSel bit; this requires us to update the
    229      * working stack pointer in xregs[31].
    230      */
    231     if (!((imm ^ env->pstate) & PSTATE_SP)) {
    232         return;
    233     }
    234     aarch64_save_sp(env, cur_el);
    235     env->pstate = deposit32(env->pstate, 0, 1, imm);
    236 
    237     /* We rely on illegal updates to SPsel from EL0 to get trapped
    238      * at translation time.
    239      */
    240     assert(cur_el >= 1 && cur_el <= 3);
    241     aarch64_restore_sp(env, cur_el);
    242 }
    243 
    244 /*
    245  * arm_pamax
    246  * @cpu: ARMCPU
    247  *
    248  * Returns the implementation defined bit-width of physical addresses.
    249  * The ARMv8 reference manuals refer to this as PAMax().
    250  */
    251 unsigned int arm_pamax(ARMCPU *cpu);
    252 
    253 /* Return true if extended addresses are enabled.
    254  * This is always the case if our translation regime is 64 bit,
    255  * but depends on TTBCR.EAE for 32 bit.
    256  */
    257 static inline bool extended_addresses_enabled(CPUARMState *env)
    258 {
    259     uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
    260     return arm_el_is_aa64(env, 1) ||
    261            (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
    262 }
    263 
    264 /* Update a QEMU watchpoint based on the information the guest has set in the
    265  * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
    266  */
    267 void hw_watchpoint_update(ARMCPU *cpu, int n);
    268 /* Update the QEMU watchpoints for every guest watchpoint. This does a
    269  * complete delete-and-reinstate of the QEMU watchpoint list and so is
    270  * suitable for use after migration or on reset.
    271  */
    272 void hw_watchpoint_update_all(ARMCPU *cpu);
    273 /* Update a QEMU breakpoint based on the information the guest has set in the
    274  * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
    275  */
    276 void hw_breakpoint_update(ARMCPU *cpu, int n);
    277 /* Update the QEMU breakpoints for every guest breakpoint. This does a
    278  * complete delete-and-reinstate of the QEMU breakpoint list and so is
    279  * suitable for use after migration or on reset.
    280  */
    281 void hw_breakpoint_update_all(ARMCPU *cpu);
    282 
    283 /* Callback function for checking if a breakpoint should trigger. */
    284 bool arm_debug_check_breakpoint(CPUState *cs);
    285 
    286 /* Callback function for checking if a watchpoint should trigger. */
    287 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
    288 
    289 /* Adjust addresses (in BE32 mode) before testing against watchpoint
    290  * addresses.
    291  */
    292 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
    293 
    294 /* Callback function for when a watchpoint or breakpoint triggers. */
    295 void arm_debug_excp_handler(CPUState *cs);
    296 
    297 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
    298 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
    299 {
    300     return false;
    301 }
    302 static inline void arm_handle_psci_call(ARMCPU *cpu)
    303 {
    304     g_assert_not_reached();
    305 }
    306 #else
    307 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
    308 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
    309 /* Actually handle a PSCI call */
    310 void arm_handle_psci_call(ARMCPU *cpu);
    311 #endif
    312 
    313 /**
    314  * arm_clear_exclusive: clear the exclusive monitor
    315  * @env: CPU env
    316  * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
    317  */
    318 static inline void arm_clear_exclusive(CPUARMState *env)
    319 {
    320     env->exclusive_addr = -1;
    321 }
    322 
    323 /**
    324  * ARMFaultType: type of an ARM MMU fault
    325  * This corresponds to the v8A pseudocode's Fault enumeration,
    326  * with extensions for QEMU internal conditions.
    327  */
    328 typedef enum ARMFaultType {
    329     ARMFault_None,
    330     ARMFault_AccessFlag,
    331     ARMFault_Alignment,
    332     ARMFault_Background,
    333     ARMFault_Domain,
    334     ARMFault_Permission,
    335     ARMFault_Translation,
    336     ARMFault_AddressSize,
    337     ARMFault_SyncExternal,
    338     ARMFault_SyncExternalOnWalk,
    339     ARMFault_SyncParity,
    340     ARMFault_SyncParityOnWalk,
    341     ARMFault_AsyncParity,
    342     ARMFault_AsyncExternal,
    343     ARMFault_Debug,
    344     ARMFault_TLBConflict,
    345     ARMFault_UnsuppAtomicUpdate,
    346     ARMFault_Lockdown,
    347     ARMFault_Exclusive,
    348     ARMFault_ICacheMaint,
    349     ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
    350     ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
    351 } ARMFaultType;
    352 
    353 /**
    354  * ARMMMUFaultInfo: Information describing an ARM MMU Fault
    355  * @type: Type of fault
    356  * @level: Table walk level (for translation, access flag and permission faults)
    357  * @domain: Domain of the fault address (for non-LPAE CPUs only)
    358  * @s2addr: Address that caused a fault at stage 2
    359  * @stage2: True if we faulted at stage 2
    360  * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
    361  * @s1ns: True if we faulted on a non-secure IPA while in secure state
    362  * @ea: True if we should set the EA (external abort type) bit in syndrome
    363  */
    364 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
    365 struct ARMMMUFaultInfo {
    366     ARMFaultType type;
    367     target_ulong s2addr;
    368     int level;
    369     int domain;
    370     bool stage2;
    371     bool s1ptw;
    372     bool s1ns;
    373     bool ea;
    374 };
    375 
    376 /**
    377  * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
    378  * Compare pseudocode EncodeSDFSC(), though unlike that function
    379  * we set up a whole FSR-format code including domain field and
    380  * putting the high bit of the FSC into bit 10.
    381  */
    382 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
    383 {
    384     uint32_t fsc;
    385 
    386     switch (fi->type) {
    387     case ARMFault_None:
    388         return 0;
    389     case ARMFault_AccessFlag:
    390         fsc = fi->level == 1 ? 0x3 : 0x6;
    391         break;
    392     case ARMFault_Alignment:
    393         fsc = 0x1;
    394         break;
    395     case ARMFault_Permission:
    396         fsc = fi->level == 1 ? 0xd : 0xf;
    397         break;
    398     case ARMFault_Domain:
    399         fsc = fi->level == 1 ? 0x9 : 0xb;
    400         break;
    401     case ARMFault_Translation:
    402         fsc = fi->level == 1 ? 0x5 : 0x7;
    403         break;
    404     case ARMFault_SyncExternal:
    405         fsc = 0x8 | (fi->ea << 12);
    406         break;
    407     case ARMFault_SyncExternalOnWalk:
    408         fsc = fi->level == 1 ? 0xc : 0xe;
    409         fsc |= (fi->ea << 12);
    410         break;
    411     case ARMFault_SyncParity:
    412         fsc = 0x409;
    413         break;
    414     case ARMFault_SyncParityOnWalk:
    415         fsc = fi->level == 1 ? 0x40c : 0x40e;
    416         break;
    417     case ARMFault_AsyncParity:
    418         fsc = 0x408;
    419         break;
    420     case ARMFault_AsyncExternal:
    421         fsc = 0x406 | (fi->ea << 12);
    422         break;
    423     case ARMFault_Debug:
    424         fsc = 0x2;
    425         break;
    426     case ARMFault_TLBConflict:
    427         fsc = 0x400;
    428         break;
    429     case ARMFault_Lockdown:
    430         fsc = 0x404;
    431         break;
    432     case ARMFault_Exclusive:
    433         fsc = 0x405;
    434         break;
    435     case ARMFault_ICacheMaint:
    436         fsc = 0x4;
    437         break;
    438     case ARMFault_Background:
    439         fsc = 0x0;
    440         break;
    441     case ARMFault_QEMU_NSCExec:
    442         fsc = M_FAKE_FSR_NSC_EXEC;
    443         break;
    444     case ARMFault_QEMU_SFault:
    445         fsc = M_FAKE_FSR_SFAULT;
    446         break;
    447     default:
    448         /* Other faults can't occur in a context that requires a
    449          * short-format status code.
    450          */
    451         g_assert_not_reached();
    452     }
    453 
    454     fsc |= (fi->domain << 4);
    455     return fsc;
    456 }
    457 
    458 /**
    459  * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
    460  * Compare pseudocode EncodeLDFSC(), though unlike that function
    461  * we fill in also the LPAE bit 9 of a DFSR format.
    462  */
    463 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
    464 {
    465     uint32_t fsc;
    466 
    467     switch (fi->type) {
    468     case ARMFault_None:
    469         return 0;
    470     case ARMFault_AddressSize:
    471         assert(fi->level >= -1 && fi->level <= 3);
    472         if (fi->level < 0) {
    473             fsc = 0b101001;
    474         } else {
    475             fsc = fi->level;
    476         }
    477         break;
    478     case ARMFault_AccessFlag:
    479         assert(fi->level >= 0 && fi->level <= 3);
    480         fsc = 0b001000 | fi->level;
    481         break;
    482     case ARMFault_Permission:
    483         assert(fi->level >= 0 && fi->level <= 3);
    484         fsc = 0b001100 | fi->level;
    485         break;
    486     case ARMFault_Translation:
    487         assert(fi->level >= -1 && fi->level <= 3);
    488         if (fi->level < 0) {
    489             fsc = 0b101011;
    490         } else {
    491             fsc = 0b000100 | fi->level;
    492         }
    493         break;
    494     case ARMFault_SyncExternal:
    495         fsc = 0x10 | (fi->ea << 12);
    496         break;
    497     case ARMFault_SyncExternalOnWalk:
    498         assert(fi->level >= -1 && fi->level <= 3);
    499         if (fi->level < 0) {
    500             fsc = 0b010011;
    501         } else {
    502             fsc = 0b010100 | fi->level;
    503         }
    504         fsc |= fi->ea << 12;
    505         break;
    506     case ARMFault_SyncParity:
    507         fsc = 0x18;
    508         break;
    509     case ARMFault_SyncParityOnWalk:
    510         assert(fi->level >= -1 && fi->level <= 3);
    511         if (fi->level < 0) {
    512             fsc = 0b011011;
    513         } else {
    514             fsc = 0b011100 | fi->level;
    515         }
    516         break;
    517     case ARMFault_AsyncParity:
    518         fsc = 0x19;
    519         break;
    520     case ARMFault_AsyncExternal:
    521         fsc = 0x11 | (fi->ea << 12);
    522         break;
    523     case ARMFault_Alignment:
    524         fsc = 0x21;
    525         break;
    526     case ARMFault_Debug:
    527         fsc = 0x22;
    528         break;
    529     case ARMFault_TLBConflict:
    530         fsc = 0x30;
    531         break;
    532     case ARMFault_UnsuppAtomicUpdate:
    533         fsc = 0x31;
    534         break;
    535     case ARMFault_Lockdown:
    536         fsc = 0x34;
    537         break;
    538     case ARMFault_Exclusive:
    539         fsc = 0x35;
    540         break;
    541     default:
    542         /* Other faults can't occur in a context that requires a
    543          * long-format status code.
    544          */
    545         g_assert_not_reached();
    546     }
    547 
    548     fsc |= 1 << 9;
    549     return fsc;
    550 }
    551 
    552 static inline bool arm_extabort_type(MemTxResult result)
    553 {
    554     /* The EA bit in syndromes and fault status registers is an
    555      * IMPDEF classification of external aborts. ARM implementations
    556      * usually use this to indicate AXI bus Decode error (0) or
    557      * Slave error (1); in QEMU we follow that.
    558      */
    559     return result != MEMTX_DECODE_ERROR;
    560 }
    561 
    562 #ifdef CONFIG_USER_ONLY
    563 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
    564                             MMUAccessType access_type,
    565                             bool maperr, uintptr_t ra);
    566 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
    567                            MMUAccessType access_type, uintptr_t ra);
    568 #else
    569 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
    570                       MMUAccessType access_type, int mmu_idx,
    571                       bool probe, uintptr_t retaddr);
    572 #endif
    573 
    574 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
    575 {
    576     return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
    577 }
    578 
    579 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
    580 {
    581     if (arm_feature(env, ARM_FEATURE_M)) {
    582         return mmu_idx | ARM_MMU_IDX_M;
    583     } else {
    584         return mmu_idx | ARM_MMU_IDX_A;
    585     }
    586 }
    587 
    588 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
    589 {
    590     /* AArch64 is always a-profile. */
    591     return mmu_idx | ARM_MMU_IDX_A;
    592 }
    593 
    594 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
    595 
    596 /*
    597  * Return the MMU index for a v7M CPU with all relevant information
    598  * manually specified.
    599  */
    600 ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
    601                               bool secstate, bool priv, bool negpri);
    602 
    603 /*
    604  * Return the MMU index for a v7M CPU in the specified security and
    605  * privilege state.
    606  */
    607 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
    608                                                 bool secstate, bool priv);
    609 
    610 /* Return the MMU index for a v7M CPU in the specified security state */
    611 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
    612 
    613 /* Return true if the translation regime is using LPAE format page tables */
    614 bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
    615 
    616 /*
    617  * Return true if the stage 1 translation regime is using LPAE
    618  * format page tables
    619  */
    620 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
    621 
    622 /* Raise a data fault alignment exception for the specified virtual address */
    623 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
    624                                             MMUAccessType access_type,
    625                                             int mmu_idx, uintptr_t retaddr);
    626 
    627 /* arm_cpu_do_transaction_failed: handle a memory system error response
    628  * (eg "no device/memory present at address") by raising an external abort
    629  * exception
    630  */
    631 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
    632                                    vaddr addr, unsigned size,
    633                                    MMUAccessType access_type,
    634                                    int mmu_idx, MemTxAttrs attrs,
    635                                    MemTxResult response, uintptr_t retaddr);
    636 
    637 /* Call any registered EL change hooks */
    638 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
    639 {
    640     ARMELChangeHook *hook, *next;
    641     QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
    642         hook->hook(cpu, hook->opaque);
    643     }
    644 }
    645 static inline void arm_call_el_change_hook(ARMCPU *cpu)
    646 {
    647     ARMELChangeHook *hook, *next;
    648     QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
    649         hook->hook(cpu, hook->opaque);
    650     }
    651 }
    652 
    653 /* Return true if this address translation regime has two ranges.  */
    654 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
    655 {
    656     switch (mmu_idx) {
    657     case ARMMMUIdx_Stage1_E0:
    658     case ARMMMUIdx_Stage1_E1:
    659     case ARMMMUIdx_Stage1_E1_PAN:
    660     case ARMMMUIdx_E10_0:
    661     case ARMMMUIdx_E10_1:
    662     case ARMMMUIdx_E10_1_PAN:
    663     case ARMMMUIdx_E20_0:
    664     case ARMMMUIdx_E20_2:
    665     case ARMMMUIdx_E20_2_PAN:
    666         return true;
    667     default:
    668         return false;
    669     }
    670 }
    671 
    672 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
    673 {
    674     switch (mmu_idx) {
    675     case ARMMMUIdx_Stage1_E1_PAN:
    676     case ARMMMUIdx_E10_1_PAN:
    677     case ARMMMUIdx_E20_2_PAN:
    678         return true;
    679     default:
    680         return false;
    681     }
    682 }
    683 
    684 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
    685 {
    686     return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
    687 }
    688 
    689 /* Return the exception level which controls this address translation regime */
    690 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
    691 {
    692     switch (mmu_idx) {
    693     case ARMMMUIdx_E20_0:
    694     case ARMMMUIdx_E20_2:
    695     case ARMMMUIdx_E20_2_PAN:
    696     case ARMMMUIdx_Stage2:
    697     case ARMMMUIdx_Stage2_S:
    698     case ARMMMUIdx_E2:
    699         return 2;
    700     case ARMMMUIdx_E3:
    701         return 3;
    702     case ARMMMUIdx_E10_0:
    703     case ARMMMUIdx_Stage1_E0:
    704         return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
    705     case ARMMMUIdx_Stage1_E1:
    706     case ARMMMUIdx_Stage1_E1_PAN:
    707     case ARMMMUIdx_E10_1:
    708     case ARMMMUIdx_E10_1_PAN:
    709     case ARMMMUIdx_MPrivNegPri:
    710     case ARMMMUIdx_MUserNegPri:
    711     case ARMMMUIdx_MPriv:
    712     case ARMMMUIdx_MUser:
    713     case ARMMMUIdx_MSPrivNegPri:
    714     case ARMMMUIdx_MSUserNegPri:
    715     case ARMMMUIdx_MSPriv:
    716     case ARMMMUIdx_MSUser:
    717         return 1;
    718     default:
    719         g_assert_not_reached();
    720     }
    721 }
    722 
    723 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
    724 {
    725     switch (mmu_idx) {
    726     case ARMMMUIdx_E20_0:
    727     case ARMMMUIdx_Stage1_E0:
    728     case ARMMMUIdx_MUser:
    729     case ARMMMUIdx_MSUser:
    730     case ARMMMUIdx_MUserNegPri:
    731     case ARMMMUIdx_MSUserNegPri:
    732         return true;
    733     default:
    734         return false;
    735     case ARMMMUIdx_E10_0:
    736     case ARMMMUIdx_E10_1:
    737     case ARMMMUIdx_E10_1_PAN:
    738         g_assert_not_reached();
    739     }
    740 }
    741 
    742 /* Return the SCTLR value which controls this address translation regime */
    743 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
    744 {
    745     return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
    746 }
    747 
    748 /*
    749  * These are the fields in VTCR_EL2 which affect both the Secure stage 2
    750  * and the Non-Secure stage 2 translation regimes (and hence which are
    751  * not present in VSTCR_EL2).
    752  */
    753 #define VTCR_SHARED_FIELD_MASK \
    754     (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
    755      R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
    756      R_VTCR_DS_MASK)
    757 
    758 /* Return the value of the TCR controlling this translation regime */
    759 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
    760 {
    761     if (mmu_idx == ARMMMUIdx_Stage2) {
    762         return env->cp15.vtcr_el2;
    763     }
    764     if (mmu_idx == ARMMMUIdx_Stage2_S) {
    765         /*
    766          * Secure stage 2 shares fields from VTCR_EL2. We merge those
    767          * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
    768          * value so the callers don't need to special case this.
    769          *
    770          * If a future architecture change defines bits in VSTCR_EL2 that
    771          * overlap with these VTCR_EL2 fields we may need to revisit this.
    772          */
    773         uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
    774         v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
    775         return v;
    776     }
    777     return env->cp15.tcr_el[regime_el(env, mmu_idx)];
    778 }
    779 
    780 /**
    781  * arm_num_brps: Return number of implemented breakpoints.
    782  * Note that the ID register BRPS field is "number of bps - 1",
    783  * and we return the actual number of breakpoints.
    784  */
    785 static inline int arm_num_brps(ARMCPU *cpu)
    786 {
    787     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
    788         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
    789     } else {
    790         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
    791     }
    792 }
    793 
    794 /**
    795  * arm_num_wrps: Return number of implemented watchpoints.
    796  * Note that the ID register WRPS field is "number of wps - 1",
    797  * and we return the actual number of watchpoints.
    798  */
    799 static inline int arm_num_wrps(ARMCPU *cpu)
    800 {
    801     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
    802         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
    803     } else {
    804         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
    805     }
    806 }
    807 
    808 /**
    809  * arm_num_ctx_cmps: Return number of implemented context comparators.
    810  * Note that the ID register CTX_CMPS field is "number of cmps - 1",
    811  * and we return the actual number of comparators.
    812  */
    813 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
    814 {
    815     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
    816         return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
    817     } else {
    818         return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
    819     }
    820 }
    821 
    822 /**
    823  * v7m_using_psp: Return true if using process stack pointer
    824  * Return true if the CPU is currently using the process stack
    825  * pointer, or false if it is using the main stack pointer.
    826  */
    827 static inline bool v7m_using_psp(CPUARMState *env)
    828 {
    829     /* Handler mode always uses the main stack; for thread mode
    830      * the CONTROL.SPSEL bit determines the answer.
    831      * Note that in v7M it is not possible to be in Handler mode with
    832      * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
    833      */
    834     return !arm_v7m_is_handler_mode(env) &&
    835         env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
    836 }
    837 
    838 /**
    839  * v7m_sp_limit: Return SP limit for current CPU state
    840  * Return the SP limit value for the current CPU security state
    841  * and stack pointer.
    842  */
    843 static inline uint32_t v7m_sp_limit(CPUARMState *env)
    844 {
    845     if (v7m_using_psp(env)) {
    846         return env->v7m.psplim[env->v7m.secure];
    847     } else {
    848         return env->v7m.msplim[env->v7m.secure];
    849     }
    850 }
    851 
    852 /**
    853  * v7m_cpacr_pass:
    854  * Return true if the v7M CPACR permits access to the FPU for the specified
    855  * security state and privilege level.
    856  */
    857 static inline bool v7m_cpacr_pass(CPUARMState *env,
    858                                   bool is_secure, bool is_priv)
    859 {
    860     switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
    861     case 0:
    862     case 2: /* UNPREDICTABLE: we treat like 0 */
    863         return false;
    864     case 1:
    865         return is_priv;
    866     case 3:
    867         return true;
    868     default:
    869         g_assert_not_reached();
    870     }
    871 }
    872 
    873 /**
    874  * aarch32_mode_name(): Return name of the AArch32 CPU mode
    875  * @psr: Program Status Register indicating CPU mode
    876  *
    877  * Returns, for debug logging purposes, a printable representation
    878  * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
    879  * the low bits of the specified PSR.
    880  */
    881 static inline const char *aarch32_mode_name(uint32_t psr)
    882 {
    883     static const char cpu_mode_names[16][4] = {
    884         "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
    885         "???", "???", "hyp", "und", "???", "???", "???", "sys"
    886     };
    887 
    888     return cpu_mode_names[psr & 0xf];
    889 }
    890 
    891 /**
    892  * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
    893  *
    894  * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
    895  * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
    896  * Must be called with the iothread lock held.
    897  */
    898 void arm_cpu_update_virq(ARMCPU *cpu);
    899 
    900 /**
    901  * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
    902  *
    903  * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
    904  * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
    905  * Must be called with the iothread lock held.
    906  */
    907 void arm_cpu_update_vfiq(ARMCPU *cpu);
    908 
    909 /**
    910  * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
    911  *
    912  * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
    913  * following a change to the HCR_EL2.VSE bit.
    914  */
    915 void arm_cpu_update_vserr(ARMCPU *cpu);
    916 
    917 /**
    918  * arm_mmu_idx_el:
    919  * @env: The cpu environment
    920  * @el: The EL to use.
    921  *
    922  * Return the full ARMMMUIdx for the translation regime for EL.
    923  */
    924 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
    925 
    926 /**
    927  * arm_mmu_idx:
    928  * @env: The cpu environment
    929  *
    930  * Return the full ARMMMUIdx for the current translation regime.
    931  */
    932 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
    933 
    934 /**
    935  * arm_stage1_mmu_idx:
    936  * @env: The cpu environment
    937  *
    938  * Return the ARMMMUIdx for the stage1 traversal for the current regime.
    939  */
    940 #ifdef CONFIG_USER_ONLY
    941 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
    942 {
    943     return ARMMMUIdx_Stage1_E0;
    944 }
    945 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
    946 {
    947     return ARMMMUIdx_Stage1_E0;
    948 }
    949 #else
    950 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
    951 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
    952 #endif
    953 
    954 /**
    955  * arm_mmu_idx_is_stage1_of_2:
    956  * @mmu_idx: The ARMMMUIdx to test
    957  *
    958  * Return true if @mmu_idx is a NOTLB mmu_idx that is the
    959  * first stage of a two stage regime.
    960  */
    961 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
    962 {
    963     switch (mmu_idx) {
    964     case ARMMMUIdx_Stage1_E0:
    965     case ARMMMUIdx_Stage1_E1:
    966     case ARMMMUIdx_Stage1_E1_PAN:
    967         return true;
    968     default:
    969         return false;
    970     }
    971 }
    972 
    973 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
    974                                                const ARMISARegisters *id)
    975 {
    976     uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
    977 
    978     if ((features >> ARM_FEATURE_V4T) & 1) {
    979         valid |= CPSR_T;
    980     }
    981     if ((features >> ARM_FEATURE_V5) & 1) {
    982         valid |= CPSR_Q; /* V5TE in reality*/
    983     }
    984     if ((features >> ARM_FEATURE_V6) & 1) {
    985         valid |= CPSR_E | CPSR_GE;
    986     }
    987     if ((features >> ARM_FEATURE_THUMB2) & 1) {
    988         valid |= CPSR_IT;
    989     }
    990     if (isar_feature_aa32_jazelle(id)) {
    991         valid |= CPSR_J;
    992     }
    993     if (isar_feature_aa32_pan(id)) {
    994         valid |= CPSR_PAN;
    995     }
    996     if (isar_feature_aa32_dit(id)) {
    997         valid |= CPSR_DIT;
    998     }
    999     if (isar_feature_aa32_ssbs(id)) {
   1000         valid |= CPSR_SSBS;
   1001     }
   1002 
   1003     return valid;
   1004 }
   1005 
   1006 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
   1007 {
   1008     uint32_t valid;
   1009 
   1010     valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
   1011     if (isar_feature_aa64_bti(id)) {
   1012         valid |= PSTATE_BTYPE;
   1013     }
   1014     if (isar_feature_aa64_pan(id)) {
   1015         valid |= PSTATE_PAN;
   1016     }
   1017     if (isar_feature_aa64_uao(id)) {
   1018         valid |= PSTATE_UAO;
   1019     }
   1020     if (isar_feature_aa64_dit(id)) {
   1021         valid |= PSTATE_DIT;
   1022     }
   1023     if (isar_feature_aa64_ssbs(id)) {
   1024         valid |= PSTATE_SSBS;
   1025     }
   1026     if (isar_feature_aa64_mte(id)) {
   1027         valid |= PSTATE_TCO;
   1028     }
   1029 
   1030     return valid;
   1031 }
   1032 
   1033 /* Granule size (i.e. page size) */
   1034 typedef enum ARMGranuleSize {
   1035     /* Same order as TG0 encoding */
   1036     Gran4K,
   1037     Gran64K,
   1038     Gran16K,
   1039     GranInvalid,
   1040 } ARMGranuleSize;
   1041 
   1042 /**
   1043  * arm_granule_bits: Return address size of the granule in bits
   1044  *
   1045  * Return the address size of the granule in bits. This corresponds
   1046  * to the pseudocode TGxGranuleBits().
   1047  */
   1048 static inline int arm_granule_bits(ARMGranuleSize gran)
   1049 {
   1050     switch (gran) {
   1051     case Gran64K:
   1052         return 16;
   1053     case Gran16K:
   1054         return 14;
   1055     case Gran4K:
   1056         return 12;
   1057     default:
   1058         g_assert_not_reached();
   1059     }
   1060 }
   1061 
   1062 /*
   1063  * Parameters of a given virtual address, as extracted from the
   1064  * translation control register (TCR) for a given regime.
   1065  */
   1066 typedef struct ARMVAParameters {
   1067     unsigned tsz    : 8;
   1068     unsigned ps     : 3;
   1069     unsigned sh     : 2;
   1070     unsigned select : 1;
   1071     bool tbi        : 1;
   1072     bool epd        : 1;
   1073     bool hpd        : 1;
   1074     bool tsz_oob    : 1;  /* tsz has been clamped to legal range */
   1075     bool ds         : 1;
   1076     bool ha         : 1;
   1077     bool hd         : 1;
   1078     ARMGranuleSize gran : 2;
   1079 } ARMVAParameters;
   1080 
   1081 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
   1082                                    ARMMMUIdx mmu_idx, bool data);
   1083 
   1084 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
   1085 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
   1086 
   1087 /* Determine if allocation tags are available.  */
   1088 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
   1089                                                  uint64_t sctlr)
   1090 {
   1091     if (el < 3
   1092         && arm_feature(env, ARM_FEATURE_EL3)
   1093         && !(env->cp15.scr_el3 & SCR_ATA)) {
   1094         return false;
   1095     }
   1096     if (el < 2 && arm_is_el2_enabled(env)) {
   1097         uint64_t hcr = arm_hcr_el2_eff(env);
   1098         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
   1099             return false;
   1100         }
   1101     }
   1102     sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
   1103     return sctlr != 0;
   1104 }
   1105 
   1106 #ifndef CONFIG_USER_ONLY
   1107 
   1108 /* Security attributes for an address, as returned by v8m_security_lookup. */
   1109 typedef struct V8M_SAttributes {
   1110     bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
   1111     bool ns;
   1112     bool nsc;
   1113     uint8_t sregion;
   1114     bool srvalid;
   1115     uint8_t iregion;
   1116     bool irvalid;
   1117 } V8M_SAttributes;
   1118 
   1119 void v8m_security_lookup(CPUARMState *env, uint32_t address,
   1120                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
   1121                          bool secure, V8M_SAttributes *sattrs);
   1122 
   1123 /* Cacheability and shareability attributes for a memory access */
   1124 typedef struct ARMCacheAttrs {
   1125     /*
   1126      * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
   1127      * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
   1128      */
   1129     unsigned int attrs:8;
   1130     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
   1131     bool is_s2_format:1;
   1132     bool guarded:1;              /* guarded bit of the v8-64 PTE */
   1133 } ARMCacheAttrs;
   1134 
   1135 /* Fields that are valid upon success. */
   1136 typedef struct GetPhysAddrResult {
   1137     CPUTLBEntryFull f;
   1138     ARMCacheAttrs cacheattrs;
   1139 } GetPhysAddrResult;
   1140 
   1141 /**
   1142  * get_phys_addr_with_secure: get the physical address for a virtual address
   1143  * @env: CPUARMState
   1144  * @address: virtual address to get physical address for
   1145  * @access_type: 0 for read, 1 for write, 2 for execute
   1146  * @mmu_idx: MMU index indicating required translation regime
   1147  * @is_secure: security state for the access
   1148  * @result: set on translation success.
   1149  * @fi: set to fault info if the translation fails
   1150  *
   1151  * Find the physical address corresponding to the given virtual address,
   1152  * by doing a translation table walk on MMU based systems or using the
   1153  * MPU state on MPU based systems.
   1154  *
   1155  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
   1156  * prot and page_size may not be filled in, and the populated fsr value provides
   1157  * information on why the translation aborted, in the format of a
   1158  * DFSR/IFSR fault register, with the following caveats:
   1159  *  * we honour the short vs long DFSR format differences.
   1160  *  * the WnR bit is never set (the caller must do this).
   1161  *  * for PSMAv5 based systems we don't bother to return a full FSR format
   1162  *    value.
   1163  */
   1164 bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
   1165                                MMUAccessType access_type,
   1166                                ARMMMUIdx mmu_idx, bool is_secure,
   1167                                GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
   1168     __attribute__((nonnull));
   1169 
   1170 /**
   1171  * get_phys_addr: get the physical address for a virtual address
   1172  * @env: CPUARMState
   1173  * @address: virtual address to get physical address for
   1174  * @access_type: 0 for read, 1 for write, 2 for execute
   1175  * @mmu_idx: MMU index indicating required translation regime
   1176  * @result: set on translation success.
   1177  * @fi: set to fault info if the translation fails
   1178  *
   1179  * Similarly, but use the security regime of @mmu_idx.
   1180  */
   1181 bool get_phys_addr(CPUARMState *env, target_ulong address,
   1182                    MMUAccessType access_type, ARMMMUIdx mmu_idx,
   1183                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
   1184     __attribute__((nonnull));
   1185 
   1186 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
   1187                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
   1188                        bool is_secure, GetPhysAddrResult *result,
   1189                        ARMMMUFaultInfo *fi, uint32_t *mregion);
   1190 
   1191 void arm_log_exception(CPUState *cs);
   1192 
   1193 #endif /* !CONFIG_USER_ONLY */
   1194 
   1195 /*
   1196  * The log2 of the words in the tag block, for GMID_EL1.BS.
   1197  * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
   1198  */
   1199 #define GMID_EL1_BS  6
   1200 
   1201 /*
   1202  * SVE predicates are 1/8 the size of SVE vectors, and cannot use
   1203  * the same simd_desc() encoding due to restrictions on size.
   1204  * Use these instead.
   1205  */
   1206 FIELD(PREDDESC, OPRSZ, 0, 6)
   1207 FIELD(PREDDESC, ESZ, 6, 2)
   1208 FIELD(PREDDESC, DATA, 8, 24)
   1209 
   1210 /*
   1211  * The SVE simd_data field, for memory ops, contains either
   1212  * rd (5 bits) or a shift count (2 bits).
   1213  */
   1214 #define SVE_MTEDESC_SHIFT 5
   1215 
   1216 /* Bits within a descriptor passed to the helper_mte_check* functions. */
   1217 FIELD(MTEDESC, MIDX,  0, 4)
   1218 FIELD(MTEDESC, TBI,   4, 2)
   1219 FIELD(MTEDESC, TCMA,  6, 2)
   1220 FIELD(MTEDESC, WRITE, 8, 1)
   1221 FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9)  /* size - 1 */
   1222 
   1223 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
   1224 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
   1225 
   1226 static inline int allocation_tag_from_addr(uint64_t ptr)
   1227 {
   1228     return extract64(ptr, 56, 4);
   1229 }
   1230 
   1231 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
   1232 {
   1233     return deposit64(ptr, 56, 4, rtag);
   1234 }
   1235 
   1236 /* Return true if tbi bits mean that the access is checked.  */
   1237 static inline bool tbi_check(uint32_t desc, int bit55)
   1238 {
   1239     return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
   1240 }
   1241 
   1242 /* Return true if tcma bits mean that the access is unchecked.  */
   1243 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
   1244 {
   1245     /*
   1246      * We had extracted bit55 and ptr_tag for other reasons, so fold
   1247      * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
   1248      */
   1249     bool match = ((ptr_tag + bit55) & 0xf) == 0;
   1250     bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
   1251     return tcma && match;
   1252 }
   1253 
   1254 /*
   1255  * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
   1256  * for the tag to be present in the FAR_ELx register.  But for user-only
   1257  * mode, we do not have a TLB with which to implement this, so we must
   1258  * remove the top byte.
   1259  */
   1260 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
   1261 {
   1262 #ifdef CONFIG_USER_ONLY
   1263     /* TBI0 is known to be enabled, while TBI1 is disabled. */
   1264     ptr &= sextract64(ptr, 0, 56);
   1265 #endif
   1266     return ptr;
   1267 }
   1268 
   1269 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
   1270 {
   1271 #ifdef CONFIG_USER_ONLY
   1272     int64_t clean_ptr = sextract64(ptr, 0, 56);
   1273     if (tbi_check(desc, clean_ptr < 0)) {
   1274         ptr = clean_ptr;
   1275     }
   1276 #endif
   1277     return ptr;
   1278 }
   1279 
   1280 /* Values for M-profile PSR.ECI for MVE insns */
   1281 enum MVEECIState {
   1282     ECI_NONE = 0, /* No completed beats */
   1283     ECI_A0 = 1, /* Completed: A0 */
   1284     ECI_A0A1 = 2, /* Completed: A0, A1 */
   1285     /* 3 is reserved */
   1286     ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
   1287     ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
   1288     /* All other values reserved */
   1289 };
   1290 
   1291 /* Definitions for the PMU registers */
   1292 #define PMCRN_MASK  0xf800
   1293 #define PMCRN_SHIFT 11
   1294 #define PMCRLP  0x80
   1295 #define PMCRLC  0x40
   1296 #define PMCRDP  0x20
   1297 #define PMCRX   0x10
   1298 #define PMCRD   0x8
   1299 #define PMCRC   0x4
   1300 #define PMCRP   0x2
   1301 #define PMCRE   0x1
   1302 /*
   1303  * Mask of PMCR bits writable by guest (not including WO bits like C, P,
   1304  * which can be written as 1 to trigger behaviour but which stay RAZ).
   1305  */
   1306 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
   1307 
   1308 #define PMXEVTYPER_P          0x80000000
   1309 #define PMXEVTYPER_U          0x40000000
   1310 #define PMXEVTYPER_NSK        0x20000000
   1311 #define PMXEVTYPER_NSU        0x10000000
   1312 #define PMXEVTYPER_NSH        0x08000000
   1313 #define PMXEVTYPER_M          0x04000000
   1314 #define PMXEVTYPER_MT         0x02000000
   1315 #define PMXEVTYPER_EVTCOUNT   0x0000ffff
   1316 #define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
   1317                                PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
   1318                                PMXEVTYPER_M | PMXEVTYPER_MT | \
   1319                                PMXEVTYPER_EVTCOUNT)
   1320 
   1321 #define PMCCFILTR             0xf8000000
   1322 #define PMCCFILTR_M           PMXEVTYPER_M
   1323 #define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
   1324 
   1325 static inline uint32_t pmu_num_counters(CPUARMState *env)
   1326 {
   1327     ARMCPU *cpu = env_archcpu(env);
   1328 
   1329     return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
   1330 }
   1331 
   1332 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
   1333 static inline uint64_t pmu_counter_mask(CPUARMState *env)
   1334 {
   1335   return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
   1336 }
   1337 
   1338 #ifdef TARGET_AARCH64
   1339 int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg);
   1340 int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
   1341 int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
   1342 int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
   1343 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
   1344 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
   1345 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
   1346 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
   1347 #endif
   1348 
   1349 #ifdef CONFIG_USER_ONLY
   1350 static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
   1351 #else
   1352 void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
   1353 #endif
   1354 
   1355 bool el_is_in_host(CPUARMState *env, int el);
   1356 
   1357 void aa32_max_features(ARMCPU *cpu);
   1358 int exception_target_el(CPUARMState *env);
   1359 bool arm_singlestep_active(CPUARMState *env);
   1360 bool arm_generate_debug_exceptions(CPUARMState *env);
   1361 
   1362 /* Add the cpreg definitions for debug related system registers */
   1363 void define_debug_regs(ARMCPU *cpu);
   1364 
   1365 /* Effective value of MDCR_EL2 */
   1366 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
   1367 {
   1368     return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
   1369 }
   1370 
   1371 /* Powers of 2 for sve_vq_map et al. */
   1372 #define SVE_VQ_POW2_MAP                                 \
   1373     ((1 << (1 - 1)) | (1 << (2 - 1)) |                  \
   1374      (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
   1375 
   1376 #endif