qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

m_helper.c (106316B)


      1 /*
      2  * ARM generic helpers.
      3  *
      4  * This code is licensed under the GNU GPL v2 or later.
      5  *
      6  * SPDX-License-Identifier: GPL-2.0-or-later
      7  */
      8 
      9 #include "qemu/osdep.h"
     10 #include "qemu/units.h"
     11 #include "target/arm/idau.h"
     12 #include "trace.h"
     13 #include "cpu.h"
     14 #include "internals.h"
     15 #include "exec/gdbstub.h"
     16 #include "exec/helper-proto.h"
     17 #include "qemu/host-utils.h"
     18 #include "qemu/main-loop.h"
     19 #include "qemu/bitops.h"
     20 #include "qemu/crc32c.h"
     21 #include "qemu/qemu-print.h"
     22 #include "qemu/log.h"
     23 #include "exec/exec-all.h"
     24 #include <zlib.h> /* For crc32 */
     25 #include "semihosting/semihost.h"
     26 #include "sysemu/cpus.h"
     27 #include "sysemu/kvm.h"
     28 #include "qemu/range.h"
     29 #include "qapi/qapi-commands-machine-target.h"
     30 #include "qapi/error.h"
     31 #include "qemu/guest-random.h"
     32 #ifdef CONFIG_TCG
     33 #include "arm_ldst.h"
     34 #include "exec/cpu_ldst.h"
     35 #include "semihosting/common-semi.h"
     36 #endif
     37 
     38 static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
     39                          uint32_t reg, uint32_t val)
     40 {
     41     /* Only APSR is actually writable */
     42     if (!(reg & 4)) {
     43         uint32_t apsrmask = 0;
     44 
     45         if (mask & 8) {
     46             apsrmask |= XPSR_NZCV | XPSR_Q;
     47         }
     48         if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
     49             apsrmask |= XPSR_GE;
     50         }
     51         xpsr_write(env, val, apsrmask);
     52     }
     53 }
     54 
     55 static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
     56 {
     57     uint32_t mask = 0;
     58 
     59     if ((reg & 1) && el) {
     60         mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
     61     }
     62     if (!(reg & 4)) {
     63         mask |= XPSR_NZCV | XPSR_Q; /* APSR */
     64         if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
     65             mask |= XPSR_GE;
     66         }
     67     }
     68     /* EPSR reads as zero */
     69     return xpsr_read(env) & mask;
     70 }
     71 
     72 static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure)
     73 {
     74     uint32_t value = env->v7m.control[secure];
     75 
     76     if (!secure) {
     77         /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
     78         value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
     79     }
     80     return value;
     81 }
     82 
     83 #ifdef CONFIG_USER_ONLY
     84 
     85 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
     86 {
     87     uint32_t mask = extract32(maskreg, 8, 4);
     88     uint32_t reg = extract32(maskreg, 0, 8);
     89 
     90     switch (reg) {
     91     case 0 ... 7: /* xPSR sub-fields */
     92         v7m_msr_xpsr(env, mask, reg, val);
     93         break;
     94     case 20: /* CONTROL */
     95         /* There are no sub-fields that are actually writable from EL0. */
     96         break;
     97     default:
     98         /* Unprivileged writes to other registers are ignored */
     99         break;
    100     }
    101 }
    102 
    103 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
    104 {
    105     switch (reg) {
    106     case 0 ... 7: /* xPSR sub-fields */
    107         return v7m_mrs_xpsr(env, reg, 0);
    108     case 20: /* CONTROL */
    109         return v7m_mrs_control(env, 0);
    110     default:
    111         /* Unprivileged reads others as zero.  */
    112         return 0;
    113     }
    114 }
    115 
    116 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
    117 {
    118     /* translate.c should never generate calls here in user-only mode */
    119     g_assert_not_reached();
    120 }
    121 
    122 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
    123 {
    124     /* translate.c should never generate calls here in user-only mode */
    125     g_assert_not_reached();
    126 }
    127 
    128 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
    129 {
    130     /* translate.c should never generate calls here in user-only mode */
    131     g_assert_not_reached();
    132 }
    133 
    134 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
    135 {
    136     /* translate.c should never generate calls here in user-only mode */
    137     g_assert_not_reached();
    138 }
    139 
    140 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
    141 {
    142     /* translate.c should never generate calls here in user-only mode */
    143     g_assert_not_reached();
    144 }
    145 
    146 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
    147 {
    148     /*
    149      * The TT instructions can be used by unprivileged code, but in
    150      * user-only emulation we don't have the MPU.
    151      * Luckily since we know we are NonSecure unprivileged (and that in
    152      * turn means that the A flag wasn't specified), all the bits in the
    153      * register must be zero:
    154      *  IREGION: 0 because IRVALID is 0
    155      *  IRVALID: 0 because NS
    156      *  S: 0 because NS
    157      *  NSRW: 0 because NS
    158      *  NSR: 0 because NS
    159      *  RW: 0 because unpriv and A flag not set
    160      *  R: 0 because unpriv and A flag not set
    161      *  SRVALID: 0 because NS
    162      *  MRVALID: 0 because unpriv and A flag not set
    163      *  SREGION: 0 becaus SRVALID is 0
    164      *  MREGION: 0 because MRVALID is 0
    165      */
    166     return 0;
    167 }
    168 
    169 #else
    170 
    171 /*
    172  * What kind of stack write are we doing? This affects how exceptions
    173  * generated during the stacking are treated.
    174  */
    175 typedef enum StackingMode {
    176     STACK_NORMAL,
    177     STACK_IGNFAULTS,
    178     STACK_LAZYFP,
    179 } StackingMode;
    180 
    181 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
    182                             ARMMMUIdx mmu_idx, StackingMode mode)
    183 {
    184     CPUState *cs = CPU(cpu);
    185     CPUARMState *env = &cpu->env;
    186     MemTxResult txres;
    187     GetPhysAddrResult res = {};
    188     ARMMMUFaultInfo fi = {};
    189     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
    190     int exc;
    191     bool exc_secure;
    192 
    193     if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
    194         /* MPU/SAU lookup failed */
    195         if (fi.type == ARMFault_QEMU_SFault) {
    196             if (mode == STACK_LAZYFP) {
    197                 qemu_log_mask(CPU_LOG_INT,
    198                               "...SecureFault with SFSR.LSPERR "
    199                               "during lazy stacking\n");
    200                 env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
    201             } else {
    202                 qemu_log_mask(CPU_LOG_INT,
    203                               "...SecureFault with SFSR.AUVIOL "
    204                               "during stacking\n");
    205                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
    206             }
    207             env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
    208             env->v7m.sfar = addr;
    209             exc = ARMV7M_EXCP_SECURE;
    210             exc_secure = false;
    211         } else {
    212             if (mode == STACK_LAZYFP) {
    213                 qemu_log_mask(CPU_LOG_INT,
    214                               "...MemManageFault with CFSR.MLSPERR\n");
    215                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
    216             } else {
    217                 qemu_log_mask(CPU_LOG_INT,
    218                               "...MemManageFault with CFSR.MSTKERR\n");
    219                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
    220             }
    221             exc = ARMV7M_EXCP_MEM;
    222             exc_secure = secure;
    223         }
    224         goto pend_fault;
    225     }
    226     address_space_stl_le(arm_addressspace(cs, res.f.attrs), res.f.phys_addr,
    227                          value, res.f.attrs, &txres);
    228     if (txres != MEMTX_OK) {
    229         /* BusFault trying to write the data */
    230         if (mode == STACK_LAZYFP) {
    231             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
    232             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
    233         } else {
    234             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
    235             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
    236         }
    237         exc = ARMV7M_EXCP_BUS;
    238         exc_secure = false;
    239         goto pend_fault;
    240     }
    241     return true;
    242 
    243 pend_fault:
    244     /*
    245      * By pending the exception at this point we are making
    246      * the IMPDEF choice "overridden exceptions pended" (see the
    247      * MergeExcInfo() pseudocode). The other choice would be to not
    248      * pend them now and then make a choice about which to throw away
    249      * later if we have two derived exceptions.
    250      * The only case when we must not pend the exception but instead
    251      * throw it away is if we are doing the push of the callee registers
    252      * and we've already generated a derived exception (this is indicated
    253      * by the caller passing STACK_IGNFAULTS). Even in this case we will
    254      * still update the fault status registers.
    255      */
    256     switch (mode) {
    257     case STACK_NORMAL:
    258         armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
    259         break;
    260     case STACK_LAZYFP:
    261         armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
    262         break;
    263     case STACK_IGNFAULTS:
    264         break;
    265     }
    266     return false;
    267 }
    268 
    269 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
    270                            ARMMMUIdx mmu_idx)
    271 {
    272     CPUState *cs = CPU(cpu);
    273     CPUARMState *env = &cpu->env;
    274     MemTxResult txres;
    275     GetPhysAddrResult res = {};
    276     ARMMMUFaultInfo fi = {};
    277     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
    278     int exc;
    279     bool exc_secure;
    280     uint32_t value;
    281 
    282     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
    283         /* MPU/SAU lookup failed */
    284         if (fi.type == ARMFault_QEMU_SFault) {
    285             qemu_log_mask(CPU_LOG_INT,
    286                           "...SecureFault with SFSR.AUVIOL during unstack\n");
    287             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
    288             env->v7m.sfar = addr;
    289             exc = ARMV7M_EXCP_SECURE;
    290             exc_secure = false;
    291         } else {
    292             qemu_log_mask(CPU_LOG_INT,
    293                           "...MemManageFault with CFSR.MUNSTKERR\n");
    294             env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
    295             exc = ARMV7M_EXCP_MEM;
    296             exc_secure = secure;
    297         }
    298         goto pend_fault;
    299     }
    300 
    301     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
    302                               res.f.phys_addr, res.f.attrs, &txres);
    303     if (txres != MEMTX_OK) {
    304         /* BusFault trying to read the data */
    305         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
    306         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
    307         exc = ARMV7M_EXCP_BUS;
    308         exc_secure = false;
    309         goto pend_fault;
    310     }
    311 
    312     *dest = value;
    313     return true;
    314 
    315 pend_fault:
    316     /*
    317      * By pending the exception at this point we are making
    318      * the IMPDEF choice "overridden exceptions pended" (see the
    319      * MergeExcInfo() pseudocode). The other choice would be to not
    320      * pend them now and then make a choice about which to throw away
    321      * later if we have two derived exceptions.
    322      */
    323     armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
    324     return false;
    325 }
    326 
    327 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
    328 {
    329     /*
    330      * Preserve FP state (because LSPACT was set and we are about
    331      * to execute an FP instruction). This corresponds to the
    332      * PreserveFPState() pseudocode.
    333      * We may throw an exception if the stacking fails.
    334      */
    335     ARMCPU *cpu = env_archcpu(env);
    336     bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
    337     bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
    338     bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
    339     bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
    340     uint32_t fpcar = env->v7m.fpcar[is_secure];
    341     bool stacked_ok = true;
    342     bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
    343     bool take_exception;
    344 
    345     /* Take the iothread lock as we are going to touch the NVIC */
    346     qemu_mutex_lock_iothread();
    347 
    348     /* Check the background context had access to the FPU */
    349     if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
    350         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
    351         env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
    352         stacked_ok = false;
    353     } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
    354         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
    355         env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
    356         stacked_ok = false;
    357     }
    358 
    359     if (!splimviol && stacked_ok) {
    360         /* We only stack if the stack limit wasn't violated */
    361         int i;
    362         ARMMMUIdx mmu_idx;
    363 
    364         mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
    365         for (i = 0; i < (ts ? 32 : 16); i += 2) {
    366             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
    367             uint32_t faddr = fpcar + 4 * i;
    368             uint32_t slo = extract64(dn, 0, 32);
    369             uint32_t shi = extract64(dn, 32, 32);
    370 
    371             if (i >= 16) {
    372                 faddr += 8; /* skip the slot for the FPSCR/VPR */
    373             }
    374             stacked_ok = stacked_ok &&
    375                 v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
    376                 v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
    377         }
    378 
    379         stacked_ok = stacked_ok &&
    380             v7m_stack_write(cpu, fpcar + 0x40,
    381                             vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
    382         if (cpu_isar_feature(aa32_mve, cpu)) {
    383             stacked_ok = stacked_ok &&
    384                 v7m_stack_write(cpu, fpcar + 0x44,
    385                                 env->v7m.vpr, mmu_idx, STACK_LAZYFP);
    386         }
    387     }
    388 
    389     /*
    390      * We definitely pended an exception, but it's possible that it
    391      * might not be able to be taken now. If its priority permits us
    392      * to take it now, then we must not update the LSPACT or FP regs,
    393      * but instead jump out to take the exception immediately.
    394      * If it's just pending and won't be taken until the current
    395      * handler exits, then we do update LSPACT and the FP regs.
    396      */
    397     take_exception = !stacked_ok &&
    398         armv7m_nvic_can_take_pending_exception(env->nvic);
    399 
    400     qemu_mutex_unlock_iothread();
    401 
    402     if (take_exception) {
    403         raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
    404     }
    405 
    406     env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
    407 
    408     if (ts) {
    409         /* Clear s0 to s31 and the FPSCR and VPR */
    410         int i;
    411 
    412         for (i = 0; i < 32; i += 2) {
    413             *aa32_vfp_dreg(env, i / 2) = 0;
    414         }
    415         vfp_set_fpscr(env, 0);
    416         if (cpu_isar_feature(aa32_mve, cpu)) {
    417             env->v7m.vpr = 0;
    418         }
    419     }
    420     /*
    421      * Otherwise s0 to s15, FPSCR and VPR are UNKNOWN; we choose to leave them
    422      * unchanged.
    423      */
    424 }
    425 
    426 /*
    427  * Write to v7M CONTROL.SPSEL bit for the specified security bank.
    428  * This may change the current stack pointer between Main and Process
    429  * stack pointers if it is done for the CONTROL register for the current
    430  * security state.
    431  */
    432 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
    433                                                  bool new_spsel,
    434                                                  bool secstate)
    435 {
    436     bool old_is_psp = v7m_using_psp(env);
    437 
    438     env->v7m.control[secstate] =
    439         deposit32(env->v7m.control[secstate],
    440                   R_V7M_CONTROL_SPSEL_SHIFT,
    441                   R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
    442 
    443     if (secstate == env->v7m.secure) {
    444         bool new_is_psp = v7m_using_psp(env);
    445         uint32_t tmp;
    446 
    447         if (old_is_psp != new_is_psp) {
    448             tmp = env->v7m.other_sp;
    449             env->v7m.other_sp = env->regs[13];
    450             env->regs[13] = tmp;
    451         }
    452     }
    453 }
    454 
    455 /*
    456  * Write to v7M CONTROL.SPSEL bit. This may change the current
    457  * stack pointer between Main and Process stack pointers.
    458  */
    459 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
    460 {
    461     write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
    462 }
    463 
    464 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
    465 {
    466     /*
    467      * Write a new value to v7m.exception, thus transitioning into or out
    468      * of Handler mode; this may result in a change of active stack pointer.
    469      */
    470     bool new_is_psp, old_is_psp = v7m_using_psp(env);
    471     uint32_t tmp;
    472 
    473     env->v7m.exception = new_exc;
    474 
    475     new_is_psp = v7m_using_psp(env);
    476 
    477     if (old_is_psp != new_is_psp) {
    478         tmp = env->v7m.other_sp;
    479         env->v7m.other_sp = env->regs[13];
    480         env->regs[13] = tmp;
    481     }
    482 }
    483 
    484 /* Switch M profile security state between NS and S */
    485 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
    486 {
    487     uint32_t new_ss_msp, new_ss_psp;
    488 
    489     if (env->v7m.secure == new_secstate) {
    490         return;
    491     }
    492 
    493     /*
    494      * All the banked state is accessed by looking at env->v7m.secure
    495      * except for the stack pointer; rearrange the SP appropriately.
    496      */
    497     new_ss_msp = env->v7m.other_ss_msp;
    498     new_ss_psp = env->v7m.other_ss_psp;
    499 
    500     if (v7m_using_psp(env)) {
    501         env->v7m.other_ss_psp = env->regs[13];
    502         env->v7m.other_ss_msp = env->v7m.other_sp;
    503     } else {
    504         env->v7m.other_ss_msp = env->regs[13];
    505         env->v7m.other_ss_psp = env->v7m.other_sp;
    506     }
    507 
    508     env->v7m.secure = new_secstate;
    509 
    510     if (v7m_using_psp(env)) {
    511         env->regs[13] = new_ss_psp;
    512         env->v7m.other_sp = new_ss_msp;
    513     } else {
    514         env->regs[13] = new_ss_msp;
    515         env->v7m.other_sp = new_ss_psp;
    516     }
    517 }
    518 
    519 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
    520 {
    521     /*
    522      * Handle v7M BXNS:
    523      *  - if the return value is a magic value, do exception return (like BX)
    524      *  - otherwise bit 0 of the return value is the target security state
    525      */
    526     uint32_t min_magic;
    527 
    528     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
    529         /* Covers FNC_RETURN and EXC_RETURN magic */
    530         min_magic = FNC_RETURN_MIN_MAGIC;
    531     } else {
    532         /* EXC_RETURN magic only */
    533         min_magic = EXC_RETURN_MIN_MAGIC;
    534     }
    535 
    536     if (dest >= min_magic) {
    537         /*
    538          * This is an exception return magic value; put it where
    539          * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
    540          * Note that if we ever add gen_ss_advance() singlestep support to
    541          * M profile this should count as an "instruction execution complete"
    542          * event (compare gen_bx_excret_final_code()).
    543          */
    544         env->regs[15] = dest & ~1;
    545         env->thumb = dest & 1;
    546         HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
    547         /* notreached */
    548     }
    549 
    550     /* translate.c should have made BXNS UNDEF unless we're secure */
    551     assert(env->v7m.secure);
    552 
    553     if (!(dest & 1)) {
    554         env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
    555     }
    556     switch_v7m_security_state(env, dest & 1);
    557     env->thumb = true;
    558     env->regs[15] = dest & ~1;
    559     arm_rebuild_hflags(env);
    560 }
    561 
    562 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
    563 {
    564     /*
    565      * Handle v7M BLXNS:
    566      *  - bit 0 of the destination address is the target security state
    567      */
    568 
    569     /* At this point regs[15] is the address just after the BLXNS */
    570     uint32_t nextinst = env->regs[15] | 1;
    571     uint32_t sp = env->regs[13] - 8;
    572     uint32_t saved_psr;
    573 
    574     /* translate.c will have made BLXNS UNDEF unless we're secure */
    575     assert(env->v7m.secure);
    576 
    577     if (dest & 1) {
    578         /*
    579          * Target is Secure, so this is just a normal BLX,
    580          * except that the low bit doesn't indicate Thumb/not.
    581          */
    582         env->regs[14] = nextinst;
    583         env->thumb = true;
    584         env->regs[15] = dest & ~1;
    585         return;
    586     }
    587 
    588     /* Target is non-secure: first push a stack frame */
    589     if (!QEMU_IS_ALIGNED(sp, 8)) {
    590         qemu_log_mask(LOG_GUEST_ERROR,
    591                       "BLXNS with misaligned SP is UNPREDICTABLE\n");
    592     }
    593 
    594     if (sp < v7m_sp_limit(env)) {
    595         raise_exception(env, EXCP_STKOF, 0, 1);
    596     }
    597 
    598     saved_psr = env->v7m.exception;
    599     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
    600         saved_psr |= XPSR_SFPA;
    601     }
    602 
    603     /* Note that these stores can throw exceptions on MPU faults */
    604     cpu_stl_data_ra(env, sp, nextinst, GETPC());
    605     cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
    606 
    607     env->regs[13] = sp;
    608     env->regs[14] = 0xfeffffff;
    609     if (arm_v7m_is_handler_mode(env)) {
    610         /*
    611          * Write a dummy value to IPSR, to avoid leaking the current secure
    612          * exception number to non-secure code. This is guaranteed not
    613          * to cause write_v7m_exception() to actually change stacks.
    614          */
    615         write_v7m_exception(env, 1);
    616     }
    617     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
    618     switch_v7m_security_state(env, 0);
    619     env->thumb = true;
    620     env->regs[15] = dest;
    621     arm_rebuild_hflags(env);
    622 }
    623 
    624 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
    625                                 bool spsel)
    626 {
    627     /*
    628      * Return a pointer to the location where we currently store the
    629      * stack pointer for the requested security state and thread mode.
    630      * This pointer will become invalid if the CPU state is updated
    631      * such that the stack pointers are switched around (eg changing
    632      * the SPSEL control bit).
    633      * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
    634      * Unlike that pseudocode, we require the caller to pass us in the
    635      * SPSEL control bit value; this is because we also use this
    636      * function in handling of pushing of the callee-saves registers
    637      * part of the v8M stack frame (pseudocode PushCalleeStack()),
    638      * and in the tailchain codepath the SPSEL bit comes from the exception
    639      * return magic LR value from the previous exception. The pseudocode
    640      * opencodes the stack-selection in PushCalleeStack(), but we prefer
    641      * to make this utility function generic enough to do the job.
    642      */
    643     bool want_psp = threadmode && spsel;
    644 
    645     if (secure == env->v7m.secure) {
    646         if (want_psp == v7m_using_psp(env)) {
    647             return &env->regs[13];
    648         } else {
    649             return &env->v7m.other_sp;
    650         }
    651     } else {
    652         if (want_psp) {
    653             return &env->v7m.other_ss_psp;
    654         } else {
    655             return &env->v7m.other_ss_msp;
    656         }
    657     }
    658 }
    659 
    660 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
    661                                 uint32_t *pvec)
    662 {
    663     CPUState *cs = CPU(cpu);
    664     CPUARMState *env = &cpu->env;
    665     MemTxResult result;
    666     uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
    667     uint32_t vector_entry;
    668     MemTxAttrs attrs = {};
    669     ARMMMUIdx mmu_idx;
    670     bool exc_secure;
    671 
    672     qemu_log_mask(CPU_LOG_INT,
    673                   "...loading from element %d of %s vector table at 0x%x\n",
    674                   exc, targets_secure ? "secure" : "non-secure", addr);
    675 
    676     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
    677 
    678     /*
    679      * We don't do a get_phys_addr() here because the rules for vector
    680      * loads are special: they always use the default memory map, and
    681      * the default memory map permits reads from all addresses.
    682      * Since there's no easy way to pass through to pmsav8_mpu_lookup()
    683      * that we want this special case which would always say "yes",
    684      * we just do the SAU lookup here followed by a direct physical load.
    685      */
    686     attrs.secure = targets_secure;
    687     attrs.user = false;
    688 
    689     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
    690         V8M_SAttributes sattrs = {};
    691 
    692         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
    693                             targets_secure, &sattrs);
    694         if (sattrs.ns) {
    695             attrs.secure = false;
    696         } else if (!targets_secure) {
    697             /*
    698              * NS access to S memory: the underlying exception which we escalate
    699              * to HardFault is SecureFault, which always targets Secure.
    700              */
    701             exc_secure = true;
    702             goto load_fail;
    703         }
    704     }
    705 
    706     vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
    707                                      attrs, &result);
    708     if (result != MEMTX_OK) {
    709         /*
    710          * Underlying exception is BusFault: its target security state
    711          * depends on BFHFNMINS.
    712          */
    713         exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
    714         goto load_fail;
    715     }
    716     *pvec = vector_entry;
    717     qemu_log_mask(CPU_LOG_INT, "...loaded new PC 0x%x\n", *pvec);
    718     return true;
    719 
    720 load_fail:
    721     /*
    722      * All vector table fetch fails are reported as HardFault, with
    723      * HFSR.VECTTBL and .FORCED set. (FORCED is set because
    724      * technically the underlying exception is a SecureFault or BusFault
    725      * that is escalated to HardFault.) This is a terminal exception,
    726      * so we will either take the HardFault immediately or else enter
    727      * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
    728      * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
    729      * secure); otherwise it targets the same security state as the
    730      * underlying exception.
    731      * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
    732      */
    733     if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
    734         exc_secure = true;
    735     }
    736     env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
    737     if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
    738         env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
    739     }
    740     armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
    741     return false;
    742 }
    743 
    744 static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
    745 {
    746     /*
    747      * Return the integrity signature value for the callee-saves
    748      * stack frame section. @lr is the exception return payload/LR value
    749      * whose FType bit forms bit 0 of the signature if FP is present.
    750      */
    751     uint32_t sig = 0xfefa125a;
    752 
    753     if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
    754         || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
    755         sig |= 1;
    756     }
    757     return sig;
    758 }
    759 
    760 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
    761                                   bool ignore_faults)
    762 {
    763     /*
    764      * For v8M, push the callee-saves register part of the stack frame.
    765      * Compare the v8M pseudocode PushCalleeStack().
    766      * In the tailchaining case this may not be the current stack.
    767      */
    768     CPUARMState *env = &cpu->env;
    769     uint32_t *frame_sp_p;
    770     uint32_t frameptr;
    771     ARMMMUIdx mmu_idx;
    772     bool stacked_ok;
    773     uint32_t limit;
    774     bool want_psp;
    775     uint32_t sig;
    776     StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
    777 
    778     if (dotailchain) {
    779         bool mode = lr & R_V7M_EXCRET_MODE_MASK;
    780         bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
    781             !mode;
    782 
    783         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
    784         frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
    785                                     lr & R_V7M_EXCRET_SPSEL_MASK);
    786         want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
    787         if (want_psp) {
    788             limit = env->v7m.psplim[M_REG_S];
    789         } else {
    790             limit = env->v7m.msplim[M_REG_S];
    791         }
    792     } else {
    793         mmu_idx = arm_mmu_idx(env);
    794         frame_sp_p = &env->regs[13];
    795         limit = v7m_sp_limit(env);
    796     }
    797 
    798     frameptr = *frame_sp_p - 0x28;
    799     if (frameptr < limit) {
    800         /*
    801          * Stack limit failure: set SP to the limit value, and generate
    802          * STKOF UsageFault. Stack pushes below the limit must not be
    803          * performed. It is IMPDEF whether pushes above the limit are
    804          * performed; we choose not to.
    805          */
    806         qemu_log_mask(CPU_LOG_INT,
    807                       "...STKOF during callee-saves register stacking\n");
    808         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
    809         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
    810                                 env->v7m.secure);
    811         *frame_sp_p = limit;
    812         return true;
    813     }
    814 
    815     /*
    816      * Write as much of the stack frame as we can. A write failure may
    817      * cause us to pend a derived exception.
    818      */
    819     sig = v7m_integrity_sig(env, lr);
    820     stacked_ok =
    821         v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
    822         v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
    823         v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
    824         v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
    825         v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
    826         v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
    827         v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
    828         v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
    829         v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
    830 
    831     /* Update SP regardless of whether any of the stack accesses failed. */
    832     *frame_sp_p = frameptr;
    833 
    834     return !stacked_ok;
    835 }
    836 
    837 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
    838                                 bool ignore_stackfaults)
    839 {
    840     /*
    841      * Do the "take the exception" parts of exception entry,
    842      * but not the pushing of state to the stack. This is
    843      * similar to the pseudocode ExceptionTaken() function.
    844      */
    845     CPUARMState *env = &cpu->env;
    846     uint32_t addr;
    847     bool targets_secure;
    848     int exc;
    849     bool push_failed = false;
    850 
    851     armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
    852     qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
    853                   targets_secure ? "secure" : "nonsecure", exc);
    854 
    855     if (dotailchain) {
    856         /* Sanitize LR FType and PREFIX bits */
    857         if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
    858             lr |= R_V7M_EXCRET_FTYPE_MASK;
    859         }
    860         lr = deposit32(lr, 24, 8, 0xff);
    861     }
    862 
    863     if (arm_feature(env, ARM_FEATURE_V8)) {
    864         if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
    865             (lr & R_V7M_EXCRET_S_MASK)) {
    866             /*
    867              * The background code (the owner of the registers in the
    868              * exception frame) is Secure. This means it may either already
    869              * have or now needs to push callee-saves registers.
    870              */
    871             if (targets_secure) {
    872                 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
    873                     /*
    874                      * We took an exception from Secure to NonSecure
    875                      * (which means the callee-saved registers got stacked)
    876                      * and are now tailchaining to a Secure exception.
    877                      * Clear DCRS so eventual return from this Secure
    878                      * exception unstacks the callee-saved registers.
    879                      */
    880                     lr &= ~R_V7M_EXCRET_DCRS_MASK;
    881                 }
    882             } else {
    883                 /*
    884                  * We're going to a non-secure exception; push the
    885                  * callee-saves registers to the stack now, if they're
    886                  * not already saved.
    887                  */
    888                 if (lr & R_V7M_EXCRET_DCRS_MASK &&
    889                     !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
    890                     push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
    891                                                         ignore_stackfaults);
    892                 }
    893                 lr |= R_V7M_EXCRET_DCRS_MASK;
    894             }
    895         }
    896 
    897         lr &= ~R_V7M_EXCRET_ES_MASK;
    898         if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
    899             lr |= R_V7M_EXCRET_ES_MASK;
    900         }
    901         lr &= ~R_V7M_EXCRET_SPSEL_MASK;
    902         if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
    903             lr |= R_V7M_EXCRET_SPSEL_MASK;
    904         }
    905 
    906         /*
    907          * Clear registers if necessary to prevent non-secure exception
    908          * code being able to see register values from secure code.
    909          * Where register values become architecturally UNKNOWN we leave
    910          * them with their previous values. v8.1M is tighter than v8.0M
    911          * here and always zeroes the caller-saved registers regardless
    912          * of the security state the exception is targeting.
    913          */
    914         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
    915             if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
    916                 /*
    917                  * Always clear the caller-saved registers (they have been
    918                  * pushed to the stack earlier in v7m_push_stack()).
    919                  * Clear callee-saved registers if the background code is
    920                  * Secure (in which case these regs were saved in
    921                  * v7m_push_callee_stack()).
    922                  */
    923                 int i;
    924                 /*
    925                  * r4..r11 are callee-saves, zero only if background
    926                  * state was Secure (EXCRET.S == 1) and exception
    927                  * targets Non-secure state
    928                  */
    929                 bool zero_callee_saves = !targets_secure &&
    930                     (lr & R_V7M_EXCRET_S_MASK);
    931 
    932                 for (i = 0; i < 13; i++) {
    933                     if (i < 4 || i > 11 || zero_callee_saves) {
    934                         env->regs[i] = 0;
    935                     }
    936                 }
    937                 /* Clear EAPSR */
    938                 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
    939             }
    940         }
    941     }
    942 
    943     if (push_failed && !ignore_stackfaults) {
    944         /*
    945          * Derived exception on callee-saves register stacking:
    946          * we might now want to take a different exception which
    947          * targets a different security state, so try again from the top.
    948          */
    949         qemu_log_mask(CPU_LOG_INT,
    950                       "...derived exception on callee-saves register stacking");
    951         v7m_exception_taken(cpu, lr, true, true);
    952         return;
    953     }
    954 
    955     if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
    956         /* Vector load failed: derived exception */
    957         qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
    958         v7m_exception_taken(cpu, lr, true, true);
    959         return;
    960     }
    961 
    962     /*
    963      * Now we've done everything that might cause a derived exception
    964      * we can go ahead and activate whichever exception we're going to
    965      * take (which might now be the derived exception).
    966      */
    967     armv7m_nvic_acknowledge_irq(env->nvic);
    968 
    969     /* Switch to target security state -- must do this before writing SPSEL */
    970     switch_v7m_security_state(env, targets_secure);
    971     write_v7m_control_spsel(env, 0);
    972     arm_clear_exclusive(env);
    973     /* Clear SFPA and FPCA (has no effect if no FPU) */
    974     env->v7m.control[M_REG_S] &=
    975         ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
    976     /* Clear IT bits */
    977     env->condexec_bits = 0;
    978     env->regs[14] = lr;
    979     env->regs[15] = addr & 0xfffffffe;
    980     env->thumb = addr & 1;
    981     arm_rebuild_hflags(env);
    982 }
    983 
    984 static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
    985                              bool apply_splim)
    986 {
    987     /*
    988      * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
    989      * that we will need later in order to do lazy FP reg stacking.
    990      */
    991     bool is_secure = env->v7m.secure;
    992     void *nvic = env->nvic;
    993     /*
    994      * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
    995      * are banked and we want to update the bit in the bank for the
    996      * current security state; and in one case we want to specifically
    997      * update the NS banked version of a bit even if we are secure.
    998      */
    999     uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
   1000     uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
   1001     uint32_t *fpccr = &env->v7m.fpccr[is_secure];
   1002     bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
   1003 
   1004     env->v7m.fpcar[is_secure] = frameptr & ~0x7;
   1005 
   1006     if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
   1007         bool splimviol;
   1008         uint32_t splim = v7m_sp_limit(env);
   1009         bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
   1010             (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
   1011 
   1012         splimviol = !ign && frameptr < splim;
   1013         *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
   1014     }
   1015 
   1016     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
   1017 
   1018     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
   1019 
   1020     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
   1021 
   1022     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
   1023                         !arm_v7m_is_handler_mode(env));
   1024 
   1025     hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
   1026     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
   1027 
   1028     bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
   1029     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
   1030 
   1031     mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
   1032     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
   1033 
   1034     ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
   1035     *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
   1036 
   1037     monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
   1038     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
   1039 
   1040     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   1041         s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
   1042         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
   1043 
   1044         sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
   1045         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
   1046     }
   1047 }
   1048 
   1049 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
   1050 {
   1051     /* fptr is the value of Rn, the frame pointer we store the FP regs to */
   1052     ARMCPU *cpu = env_archcpu(env);
   1053     bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
   1054     bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
   1055     uintptr_t ra = GETPC();
   1056 
   1057     assert(env->v7m.secure);
   1058 
   1059     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
   1060         return;
   1061     }
   1062 
   1063     /* Check access to the coprocessor is permitted */
   1064     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
   1065         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
   1066     }
   1067 
   1068     if (lspact) {
   1069         /* LSPACT should not be active when there is active FP state */
   1070         raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
   1071     }
   1072 
   1073     if (fptr & 7) {
   1074         raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
   1075     }
   1076 
   1077     /*
   1078      * Note that we do not use v7m_stack_write() here, because the
   1079      * accesses should not set the FSR bits for stacking errors if they
   1080      * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
   1081      * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
   1082      * and longjmp out.
   1083      */
   1084     if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
   1085         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
   1086         int i;
   1087 
   1088         for (i = 0; i < (ts ? 32 : 16); i += 2) {
   1089             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
   1090             uint32_t faddr = fptr + 4 * i;
   1091             uint32_t slo = extract64(dn, 0, 32);
   1092             uint32_t shi = extract64(dn, 32, 32);
   1093 
   1094             if (i >= 16) {
   1095                 faddr += 8; /* skip the slot for the FPSCR */
   1096             }
   1097             cpu_stl_data_ra(env, faddr, slo, ra);
   1098             cpu_stl_data_ra(env, faddr + 4, shi, ra);
   1099         }
   1100         cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
   1101         if (cpu_isar_feature(aa32_mve, cpu)) {
   1102             cpu_stl_data_ra(env, fptr + 0x44, env->v7m.vpr, ra);
   1103         }
   1104 
   1105         /*
   1106          * If TS is 0 then s0 to s15, FPSCR and VPR are UNKNOWN; we choose to
   1107          * leave them unchanged, matching our choice in v7m_preserve_fp_state.
   1108          */
   1109         if (ts) {
   1110             for (i = 0; i < 32; i += 2) {
   1111                 *aa32_vfp_dreg(env, i / 2) = 0;
   1112             }
   1113             vfp_set_fpscr(env, 0);
   1114             if (cpu_isar_feature(aa32_mve, cpu)) {
   1115                 env->v7m.vpr = 0;
   1116             }
   1117         }
   1118     } else {
   1119         v7m_update_fpccr(env, fptr, false);
   1120     }
   1121 
   1122     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
   1123 }
   1124 
   1125 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
   1126 {
   1127     ARMCPU *cpu = env_archcpu(env);
   1128     uintptr_t ra = GETPC();
   1129 
   1130     /* fptr is the value of Rn, the frame pointer we load the FP regs from */
   1131     assert(env->v7m.secure);
   1132 
   1133     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
   1134         return;
   1135     }
   1136 
   1137     /* Check access to the coprocessor is permitted */
   1138     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
   1139         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
   1140     }
   1141 
   1142     if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
   1143         /* State in FP is still valid */
   1144         env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
   1145     } else {
   1146         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
   1147         int i;
   1148         uint32_t fpscr;
   1149 
   1150         if (fptr & 7) {
   1151             raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
   1152         }
   1153 
   1154         for (i = 0; i < (ts ? 32 : 16); i += 2) {
   1155             uint32_t slo, shi;
   1156             uint64_t dn;
   1157             uint32_t faddr = fptr + 4 * i;
   1158 
   1159             if (i >= 16) {
   1160                 faddr += 8; /* skip the slot for the FPSCR and VPR */
   1161             }
   1162 
   1163             slo = cpu_ldl_data_ra(env, faddr, ra);
   1164             shi = cpu_ldl_data_ra(env, faddr + 4, ra);
   1165 
   1166             dn = (uint64_t) shi << 32 | slo;
   1167             *aa32_vfp_dreg(env, i / 2) = dn;
   1168         }
   1169         fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
   1170         vfp_set_fpscr(env, fpscr);
   1171         if (cpu_isar_feature(aa32_mve, cpu)) {
   1172             env->v7m.vpr = cpu_ldl_data_ra(env, fptr + 0x44, ra);
   1173         }
   1174     }
   1175 
   1176     env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
   1177 }
   1178 
   1179 static bool v7m_push_stack(ARMCPU *cpu)
   1180 {
   1181     /*
   1182      * Do the "set up stack frame" part of exception entry,
   1183      * similar to pseudocode PushStack().
   1184      * Return true if we generate a derived exception (and so
   1185      * should ignore further stack faults trying to process
   1186      * that derived exception.)
   1187      */
   1188     bool stacked_ok = true, limitviol = false;
   1189     CPUARMState *env = &cpu->env;
   1190     uint32_t xpsr = xpsr_read(env);
   1191     uint32_t frameptr = env->regs[13];
   1192     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
   1193     uint32_t framesize;
   1194     bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
   1195 
   1196     if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
   1197         (env->v7m.secure || nsacr_cp10)) {
   1198         if (env->v7m.secure &&
   1199             env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
   1200             framesize = 0xa8;
   1201         } else {
   1202             framesize = 0x68;
   1203         }
   1204     } else {
   1205         framesize = 0x20;
   1206     }
   1207 
   1208     /* Align stack pointer if the guest wants that */
   1209     if ((frameptr & 4) &&
   1210         (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
   1211         frameptr -= 4;
   1212         xpsr |= XPSR_SPREALIGN;
   1213     }
   1214 
   1215     xpsr &= ~XPSR_SFPA;
   1216     if (env->v7m.secure &&
   1217         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
   1218         xpsr |= XPSR_SFPA;
   1219     }
   1220 
   1221     frameptr -= framesize;
   1222 
   1223     if (arm_feature(env, ARM_FEATURE_V8)) {
   1224         uint32_t limit = v7m_sp_limit(env);
   1225 
   1226         if (frameptr < limit) {
   1227             /*
   1228              * Stack limit failure: set SP to the limit value, and generate
   1229              * STKOF UsageFault. Stack pushes below the limit must not be
   1230              * performed. It is IMPDEF whether pushes above the limit are
   1231              * performed; we choose not to.
   1232              */
   1233             qemu_log_mask(CPU_LOG_INT,
   1234                           "...STKOF during stacking\n");
   1235             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
   1236             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   1237                                     env->v7m.secure);
   1238             env->regs[13] = limit;
   1239             /*
   1240              * We won't try to perform any further memory accesses but
   1241              * we must continue through the following code to check for
   1242              * permission faults during FPU state preservation, and we
   1243              * must update FPCCR if lazy stacking is enabled.
   1244              */
   1245             limitviol = true;
   1246             stacked_ok = false;
   1247         }
   1248     }
   1249 
   1250     /*
   1251      * Write as much of the stack frame as we can. If we fail a stack
   1252      * write this will result in a derived exception being pended
   1253      * (which may be taken in preference to the one we started with
   1254      * if it has higher priority).
   1255      */
   1256     stacked_ok = stacked_ok &&
   1257         v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
   1258         v7m_stack_write(cpu, frameptr + 4, env->regs[1],
   1259                         mmu_idx, STACK_NORMAL) &&
   1260         v7m_stack_write(cpu, frameptr + 8, env->regs[2],
   1261                         mmu_idx, STACK_NORMAL) &&
   1262         v7m_stack_write(cpu, frameptr + 12, env->regs[3],
   1263                         mmu_idx, STACK_NORMAL) &&
   1264         v7m_stack_write(cpu, frameptr + 16, env->regs[12],
   1265                         mmu_idx, STACK_NORMAL) &&
   1266         v7m_stack_write(cpu, frameptr + 20, env->regs[14],
   1267                         mmu_idx, STACK_NORMAL) &&
   1268         v7m_stack_write(cpu, frameptr + 24, env->regs[15],
   1269                         mmu_idx, STACK_NORMAL) &&
   1270         v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
   1271 
   1272     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
   1273         /* FPU is active, try to save its registers */
   1274         bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
   1275         bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
   1276 
   1277         if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   1278             qemu_log_mask(CPU_LOG_INT,
   1279                           "...SecureFault because LSPACT and FPCA both set\n");
   1280             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
   1281             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   1282         } else if (!env->v7m.secure && !nsacr_cp10) {
   1283             qemu_log_mask(CPU_LOG_INT,
   1284                           "...Secure UsageFault with CFSR.NOCP because "
   1285                           "NSACR.CP10 prevents stacking FP regs\n");
   1286             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
   1287             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
   1288         } else {
   1289             if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
   1290                 /* Lazy stacking disabled, save registers now */
   1291                 int i;
   1292                 bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
   1293                                                  arm_current_el(env) != 0);
   1294 
   1295                 if (stacked_ok && !cpacr_pass) {
   1296                     /*
   1297                      * Take UsageFault if CPACR forbids access. The pseudocode
   1298                      * here does a full CheckCPEnabled() but we know the NSACR
   1299                      * check can never fail as we have already handled that.
   1300                      */
   1301                     qemu_log_mask(CPU_LOG_INT,
   1302                                   "...UsageFault with CFSR.NOCP because "
   1303                                   "CPACR.CP10 prevents stacking FP regs\n");
   1304                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   1305                                             env->v7m.secure);
   1306                     env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
   1307                     stacked_ok = false;
   1308                 }
   1309 
   1310                 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
   1311                     uint64_t dn = *aa32_vfp_dreg(env, i / 2);
   1312                     uint32_t faddr = frameptr + 0x20 + 4 * i;
   1313                     uint32_t slo = extract64(dn, 0, 32);
   1314                     uint32_t shi = extract64(dn, 32, 32);
   1315 
   1316                     if (i >= 16) {
   1317                         faddr += 8; /* skip the slot for the FPSCR and VPR */
   1318                     }
   1319                     stacked_ok = stacked_ok &&
   1320                         v7m_stack_write(cpu, faddr, slo,
   1321                                         mmu_idx, STACK_NORMAL) &&
   1322                         v7m_stack_write(cpu, faddr + 4, shi,
   1323                                         mmu_idx, STACK_NORMAL);
   1324                 }
   1325                 stacked_ok = stacked_ok &&
   1326                     v7m_stack_write(cpu, frameptr + 0x60,
   1327                                     vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
   1328                 if (cpu_isar_feature(aa32_mve, cpu)) {
   1329                     stacked_ok = stacked_ok &&
   1330                         v7m_stack_write(cpu, frameptr + 0x64,
   1331                                         env->v7m.vpr, mmu_idx, STACK_NORMAL);
   1332                 }
   1333                 if (cpacr_pass) {
   1334                     for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
   1335                         *aa32_vfp_dreg(env, i / 2) = 0;
   1336                     }
   1337                     vfp_set_fpscr(env, 0);
   1338                     if (cpu_isar_feature(aa32_mve, cpu)) {
   1339                         env->v7m.vpr = 0;
   1340                     }
   1341                 }
   1342             } else {
   1343                 /* Lazy stacking enabled, save necessary info to stack later */
   1344                 v7m_update_fpccr(env, frameptr + 0x20, true);
   1345             }
   1346         }
   1347     }
   1348 
   1349     /*
   1350      * If we broke a stack limit then SP was already updated earlier;
   1351      * otherwise we update SP regardless of whether any of the stack
   1352      * accesses failed or we took some other kind of fault.
   1353      */
   1354     if (!limitviol) {
   1355         env->regs[13] = frameptr;
   1356     }
   1357 
   1358     return !stacked_ok;
   1359 }
   1360 
   1361 static void do_v7m_exception_exit(ARMCPU *cpu)
   1362 {
   1363     CPUARMState *env = &cpu->env;
   1364     uint32_t excret;
   1365     uint32_t xpsr, xpsr_mask;
   1366     bool ufault = false;
   1367     bool sfault = false;
   1368     bool return_to_sp_process;
   1369     bool return_to_handler;
   1370     bool rettobase = false;
   1371     bool exc_secure = false;
   1372     bool return_to_secure;
   1373     bool ftype;
   1374     bool restore_s16_s31 = false;
   1375 
   1376     /*
   1377      * If we're not in Handler mode then jumps to magic exception-exit
   1378      * addresses don't have magic behaviour. However for the v8M
   1379      * security extensions the magic secure-function-return has to
   1380      * work in thread mode too, so to avoid doing an extra check in
   1381      * the generated code we allow exception-exit magic to also cause the
   1382      * internal exception and bring us here in thread mode. Correct code
   1383      * will never try to do this (the following insn fetch will always
   1384      * fault) so we the overhead of having taken an unnecessary exception
   1385      * doesn't matter.
   1386      */
   1387     if (!arm_v7m_is_handler_mode(env)) {
   1388         return;
   1389     }
   1390 
   1391     /*
   1392      * In the spec pseudocode ExceptionReturn() is called directly
   1393      * from BXWritePC() and gets the full target PC value including
   1394      * bit zero. In QEMU's implementation we treat it as a normal
   1395      * jump-to-register (which is then caught later on), and so split
   1396      * the target value up between env->regs[15] and env->thumb in
   1397      * gen_bx(). Reconstitute it.
   1398      */
   1399     excret = env->regs[15];
   1400     if (env->thumb) {
   1401         excret |= 1;
   1402     }
   1403 
   1404     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
   1405                   " previous exception %d\n",
   1406                   excret, env->v7m.exception);
   1407 
   1408     if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
   1409         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
   1410                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
   1411                       excret);
   1412     }
   1413 
   1414     ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
   1415 
   1416     if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
   1417         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
   1418                       "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
   1419                       "if FPU not present\n",
   1420                       excret);
   1421         ftype = true;
   1422     }
   1423 
   1424     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   1425         /*
   1426          * EXC_RETURN.ES validation check (R_SMFL). We must do this before
   1427          * we pick which FAULTMASK to clear.
   1428          */
   1429         if (!env->v7m.secure &&
   1430             ((excret & R_V7M_EXCRET_ES_MASK) ||
   1431              !(excret & R_V7M_EXCRET_DCRS_MASK))) {
   1432             sfault = 1;
   1433             /* For all other purposes, treat ES as 0 (R_HXSR) */
   1434             excret &= ~R_V7M_EXCRET_ES_MASK;
   1435         }
   1436         exc_secure = excret & R_V7M_EXCRET_ES_MASK;
   1437     }
   1438 
   1439     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
   1440         /*
   1441          * Auto-clear FAULTMASK on return from other than NMI.
   1442          * If the security extension is implemented then this only
   1443          * happens if the raw execution priority is >= 0; the
   1444          * value of the ES bit in the exception return value indicates
   1445          * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
   1446          */
   1447         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   1448             if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
   1449                 env->v7m.faultmask[exc_secure] = 0;
   1450             }
   1451         } else {
   1452             env->v7m.faultmask[M_REG_NS] = 0;
   1453         }
   1454     }
   1455 
   1456     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
   1457                                      exc_secure)) {
   1458     case -1:
   1459         /* attempt to exit an exception that isn't active */
   1460         ufault = true;
   1461         break;
   1462     case 0:
   1463         /* still an irq active now */
   1464         break;
   1465     case 1:
   1466         /*
   1467          * We returned to base exception level, no nesting.
   1468          * (In the pseudocode this is written using "NestedActivation != 1"
   1469          * where we have 'rettobase == false'.)
   1470          */
   1471         rettobase = true;
   1472         break;
   1473     default:
   1474         g_assert_not_reached();
   1475     }
   1476 
   1477     return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
   1478     return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
   1479     return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
   1480         (excret & R_V7M_EXCRET_S_MASK);
   1481 
   1482     if (arm_feature(env, ARM_FEATURE_V8)) {
   1483         if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   1484             /*
   1485              * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
   1486              * we choose to take the UsageFault.
   1487              */
   1488             if ((excret & R_V7M_EXCRET_S_MASK) ||
   1489                 (excret & R_V7M_EXCRET_ES_MASK) ||
   1490                 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
   1491                 ufault = true;
   1492             }
   1493         }
   1494         if (excret & R_V7M_EXCRET_RES0_MASK) {
   1495             ufault = true;
   1496         }
   1497     } else {
   1498         /* For v7M we only recognize certain combinations of the low bits */
   1499         switch (excret & 0xf) {
   1500         case 1: /* Return to Handler */
   1501             break;
   1502         case 13: /* Return to Thread using Process stack */
   1503         case 9: /* Return to Thread using Main stack */
   1504             /*
   1505              * We only need to check NONBASETHRDENA for v7M, because in
   1506              * v8M this bit does not exist (it is RES1).
   1507              */
   1508             if (!rettobase &&
   1509                 !(env->v7m.ccr[env->v7m.secure] &
   1510                   R_V7M_CCR_NONBASETHRDENA_MASK)) {
   1511                 ufault = true;
   1512             }
   1513             break;
   1514         default:
   1515             ufault = true;
   1516         }
   1517     }
   1518 
   1519     /*
   1520      * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
   1521      * Handler mode (and will be until we write the new XPSR.Interrupt
   1522      * field) this does not switch around the current stack pointer.
   1523      * We must do this before we do any kind of tailchaining, including
   1524      * for the derived exceptions on integrity check failures, or we will
   1525      * give the guest an incorrect EXCRET.SPSEL value on exception entry.
   1526      */
   1527     write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
   1528 
   1529     /*
   1530      * Clear scratch FP values left in caller saved registers; this
   1531      * must happen before any kind of tail chaining.
   1532      */
   1533     if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
   1534         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
   1535         if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
   1536             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
   1537             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   1538             qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
   1539                           "stackframe: error during lazy state deactivation\n");
   1540             v7m_exception_taken(cpu, excret, true, false);
   1541             return;
   1542         } else {
   1543             if (arm_feature(env, ARM_FEATURE_V8_1M)) {
   1544                 /* v8.1M adds this NOCP check */
   1545                 bool nsacr_pass = exc_secure ||
   1546                     extract32(env->v7m.nsacr, 10, 1);
   1547                 bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
   1548                 if (!nsacr_pass) {
   1549                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
   1550                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
   1551                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
   1552                         "stackframe: NSACR prevents clearing FPU registers\n");
   1553                     v7m_exception_taken(cpu, excret, true, false);
   1554                     return;
   1555                 } else if (!cpacr_pass) {
   1556                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   1557                                             exc_secure);
   1558                     env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
   1559                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
   1560                         "stackframe: CPACR prevents clearing FPU registers\n");
   1561                     v7m_exception_taken(cpu, excret, true, false);
   1562                     return;
   1563                 }
   1564             }
   1565             /* Clear s0..s15, FPSCR and VPR */
   1566             int i;
   1567 
   1568             for (i = 0; i < 16; i += 2) {
   1569                 *aa32_vfp_dreg(env, i / 2) = 0;
   1570             }
   1571             vfp_set_fpscr(env, 0);
   1572             if (cpu_isar_feature(aa32_mve, cpu)) {
   1573                 env->v7m.vpr = 0;
   1574             }
   1575         }
   1576     }
   1577 
   1578     if (sfault) {
   1579         env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
   1580         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   1581         qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
   1582                       "stackframe: failed EXC_RETURN.ES validity check\n");
   1583         v7m_exception_taken(cpu, excret, true, false);
   1584         return;
   1585     }
   1586 
   1587     if (ufault) {
   1588         /*
   1589          * Bad exception return: instead of popping the exception
   1590          * stack, directly take a usage fault on the current stack.
   1591          */
   1592         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
   1593         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
   1594         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
   1595                       "stackframe: failed exception return integrity check\n");
   1596         v7m_exception_taken(cpu, excret, true, false);
   1597         return;
   1598     }
   1599 
   1600     /*
   1601      * Tailchaining: if there is currently a pending exception that
   1602      * is high enough priority to preempt execution at the level we're
   1603      * about to return to, then just directly take that exception now,
   1604      * avoiding an unstack-and-then-stack. Note that now we have
   1605      * deactivated the previous exception by calling armv7m_nvic_complete_irq()
   1606      * our current execution priority is already the execution priority we are
   1607      * returning to -- none of the state we would unstack or set based on
   1608      * the EXCRET value affects it.
   1609      */
   1610     if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
   1611         qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
   1612         v7m_exception_taken(cpu, excret, true, false);
   1613         return;
   1614     }
   1615 
   1616     switch_v7m_security_state(env, return_to_secure);
   1617 
   1618     {
   1619         /*
   1620          * The stack pointer we should be reading the exception frame from
   1621          * depends on bits in the magic exception return type value (and
   1622          * for v8M isn't necessarily the stack pointer we will eventually
   1623          * end up resuming execution with). Get a pointer to the location
   1624          * in the CPU state struct where the SP we need is currently being
   1625          * stored; we will use and modify it in place.
   1626          * We use this limited C variable scope so we don't accidentally
   1627          * use 'frame_sp_p' after we do something that makes it invalid.
   1628          */
   1629         bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
   1630         uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
   1631                                               return_to_secure,
   1632                                               !return_to_handler,
   1633                                               spsel);
   1634         uint32_t frameptr = *frame_sp_p;
   1635         bool pop_ok = true;
   1636         ARMMMUIdx mmu_idx;
   1637         bool return_to_priv = return_to_handler ||
   1638             !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
   1639 
   1640         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
   1641                                                         return_to_priv);
   1642 
   1643         if (!QEMU_IS_ALIGNED(frameptr, 8) &&
   1644             arm_feature(env, ARM_FEATURE_V8)) {
   1645             qemu_log_mask(LOG_GUEST_ERROR,
   1646                           "M profile exception return with non-8-aligned SP "
   1647                           "for destination state is UNPREDICTABLE\n");
   1648         }
   1649 
   1650         /* Do we need to pop callee-saved registers? */
   1651         if (return_to_secure &&
   1652             ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
   1653              (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
   1654             uint32_t actual_sig;
   1655 
   1656             pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
   1657 
   1658             if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
   1659                 /* Take a SecureFault on the current stack */
   1660                 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
   1661                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   1662                 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
   1663                               "stackframe: failed exception return integrity "
   1664                               "signature check\n");
   1665                 v7m_exception_taken(cpu, excret, true, false);
   1666                 return;
   1667             }
   1668 
   1669             pop_ok = pop_ok &&
   1670                 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
   1671                 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
   1672                 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
   1673                 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
   1674                 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
   1675                 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
   1676                 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
   1677                 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
   1678 
   1679             frameptr += 0x28;
   1680         }
   1681 
   1682         /* Pop registers */
   1683         pop_ok = pop_ok &&
   1684             v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
   1685             v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
   1686             v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
   1687             v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
   1688             v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
   1689             v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
   1690             v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
   1691             v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
   1692 
   1693         if (!pop_ok) {
   1694             /*
   1695              * v7m_stack_read() pended a fault, so take it (as a tail
   1696              * chained exception on the same stack frame)
   1697              */
   1698             qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
   1699             v7m_exception_taken(cpu, excret, true, false);
   1700             return;
   1701         }
   1702 
   1703         /*
   1704          * Returning from an exception with a PC with bit 0 set is defined
   1705          * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
   1706          * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
   1707          * the lsbit, and there are several RTOSes out there which incorrectly
   1708          * assume the r15 in the stack frame should be a Thumb-style "lsbit
   1709          * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
   1710          * complain about the badly behaved guest.
   1711          */
   1712         if (env->regs[15] & 1) {
   1713             env->regs[15] &= ~1U;
   1714             if (!arm_feature(env, ARM_FEATURE_V8)) {
   1715                 qemu_log_mask(LOG_GUEST_ERROR,
   1716                               "M profile return from interrupt with misaligned "
   1717                               "PC is UNPREDICTABLE on v7M\n");
   1718             }
   1719         }
   1720 
   1721         if (arm_feature(env, ARM_FEATURE_V8)) {
   1722             /*
   1723              * For v8M we have to check whether the xPSR exception field
   1724              * matches the EXCRET value for return to handler/thread
   1725              * before we commit to changing the SP and xPSR.
   1726              */
   1727             bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
   1728             if (return_to_handler != will_be_handler) {
   1729                 /*
   1730                  * Take an INVPC UsageFault on the current stack.
   1731                  * By this point we will have switched to the security state
   1732                  * for the background state, so this UsageFault will target
   1733                  * that state.
   1734                  */
   1735                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   1736                                         env->v7m.secure);
   1737                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
   1738                 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
   1739                               "stackframe: failed exception return integrity "
   1740                               "check\n");
   1741                 v7m_exception_taken(cpu, excret, true, false);
   1742                 return;
   1743             }
   1744         }
   1745 
   1746         if (!ftype) {
   1747             /* FP present and we need to handle it */
   1748             if (!return_to_secure &&
   1749                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
   1750                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   1751                 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
   1752                 qemu_log_mask(CPU_LOG_INT,
   1753                               "...taking SecureFault on existing stackframe: "
   1754                               "Secure LSPACT set but exception return is "
   1755                               "not to secure state\n");
   1756                 v7m_exception_taken(cpu, excret, true, false);
   1757                 return;
   1758             }
   1759 
   1760             restore_s16_s31 = return_to_secure &&
   1761                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
   1762 
   1763             if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
   1764                 /* State in FPU is still valid, just clear LSPACT */
   1765                 env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
   1766             } else {
   1767                 int i;
   1768                 uint32_t fpscr;
   1769                 bool cpacr_pass, nsacr_pass;
   1770 
   1771                 cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
   1772                                             return_to_priv);
   1773                 nsacr_pass = return_to_secure ||
   1774                     extract32(env->v7m.nsacr, 10, 1);
   1775 
   1776                 if (!cpacr_pass) {
   1777                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   1778                                             return_to_secure);
   1779                     env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
   1780                     qemu_log_mask(CPU_LOG_INT,
   1781                                   "...taking UsageFault on existing "
   1782                                   "stackframe: CPACR.CP10 prevents unstacking "
   1783                                   "FP regs\n");
   1784                     v7m_exception_taken(cpu, excret, true, false);
   1785                     return;
   1786                 } else if (!nsacr_pass) {
   1787                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
   1788                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
   1789                     qemu_log_mask(CPU_LOG_INT,
   1790                                   "...taking Secure UsageFault on existing "
   1791                                   "stackframe: NSACR.CP10 prevents unstacking "
   1792                                   "FP regs\n");
   1793                     v7m_exception_taken(cpu, excret, true, false);
   1794                     return;
   1795                 }
   1796 
   1797                 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
   1798                     uint32_t slo, shi;
   1799                     uint64_t dn;
   1800                     uint32_t faddr = frameptr + 0x20 + 4 * i;
   1801 
   1802                     if (i >= 16) {
   1803                         faddr += 8; /* Skip the slot for the FPSCR and VPR */
   1804                     }
   1805 
   1806                     pop_ok = pop_ok &&
   1807                         v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
   1808                         v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
   1809 
   1810                     if (!pop_ok) {
   1811                         break;
   1812                     }
   1813 
   1814                     dn = (uint64_t)shi << 32 | slo;
   1815                     *aa32_vfp_dreg(env, i / 2) = dn;
   1816                 }
   1817                 pop_ok = pop_ok &&
   1818                     v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
   1819                 if (pop_ok) {
   1820                     vfp_set_fpscr(env, fpscr);
   1821                 }
   1822                 if (cpu_isar_feature(aa32_mve, cpu)) {
   1823                     pop_ok = pop_ok &&
   1824                         v7m_stack_read(cpu, &env->v7m.vpr,
   1825                                        frameptr + 0x64, mmu_idx);
   1826                 }
   1827                 if (!pop_ok) {
   1828                     /*
   1829                      * These regs are 0 if security extension present;
   1830                      * otherwise merely UNKNOWN. We zero always.
   1831                      */
   1832                     for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
   1833                         *aa32_vfp_dreg(env, i / 2) = 0;
   1834                     }
   1835                     vfp_set_fpscr(env, 0);
   1836                     if (cpu_isar_feature(aa32_mve, cpu)) {
   1837                         env->v7m.vpr = 0;
   1838                     }
   1839                 }
   1840             }
   1841         }
   1842         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
   1843                                                V7M_CONTROL, FPCA, !ftype);
   1844 
   1845         /* Commit to consuming the stack frame */
   1846         frameptr += 0x20;
   1847         if (!ftype) {
   1848             frameptr += 0x48;
   1849             if (restore_s16_s31) {
   1850                 frameptr += 0x40;
   1851             }
   1852         }
   1853         /*
   1854          * Undo stack alignment (the SPREALIGN bit indicates that the original
   1855          * pre-exception SP was not 8-aligned and we added a padding word to
   1856          * align it, so we undo this by ORing in the bit that increases it
   1857          * from the current 8-aligned value to the 8-unaligned value. (Adding 4
   1858          * would work too but a logical OR is how the pseudocode specifies it.)
   1859          */
   1860         if (xpsr & XPSR_SPREALIGN) {
   1861             frameptr |= 4;
   1862         }
   1863         *frame_sp_p = frameptr;
   1864     }
   1865 
   1866     xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
   1867     if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
   1868         xpsr_mask &= ~XPSR_GE;
   1869     }
   1870     /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
   1871     xpsr_write(env, xpsr, xpsr_mask);
   1872 
   1873     if (env->v7m.secure) {
   1874         bool sfpa = xpsr & XPSR_SFPA;
   1875 
   1876         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
   1877                                                V7M_CONTROL, SFPA, sfpa);
   1878     }
   1879 
   1880     /*
   1881      * The restored xPSR exception field will be zero if we're
   1882      * resuming in Thread mode. If that doesn't match what the
   1883      * exception return excret specified then this is a UsageFault.
   1884      * v7M requires we make this check here; v8M did it earlier.
   1885      */
   1886     if (return_to_handler != arm_v7m_is_handler_mode(env)) {
   1887         /*
   1888          * Take an INVPC UsageFault by pushing the stack again;
   1889          * we know we're v7M so this is never a Secure UsageFault.
   1890          */
   1891         bool ignore_stackfaults;
   1892 
   1893         assert(!arm_feature(env, ARM_FEATURE_V8));
   1894         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
   1895         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
   1896         ignore_stackfaults = v7m_push_stack(cpu);
   1897         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
   1898                       "failed exception return integrity check\n");
   1899         v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
   1900         return;
   1901     }
   1902 
   1903     /* Otherwise, we have a successful exception exit. */
   1904     arm_clear_exclusive(env);
   1905     arm_rebuild_hflags(env);
   1906     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
   1907 }
   1908 
   1909 static bool do_v7m_function_return(ARMCPU *cpu)
   1910 {
   1911     /*
   1912      * v8M security extensions magic function return.
   1913      * We may either:
   1914      *  (1) throw an exception (longjump)
   1915      *  (2) return true if we successfully handled the function return
   1916      *  (3) return false if we failed a consistency check and have
   1917      *      pended a UsageFault that needs to be taken now
   1918      *
   1919      * At this point the magic return value is split between env->regs[15]
   1920      * and env->thumb. We don't bother to reconstitute it because we don't
   1921      * need it (all values are handled the same way).
   1922      */
   1923     CPUARMState *env = &cpu->env;
   1924     uint32_t newpc, newpsr, newpsr_exc;
   1925 
   1926     qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
   1927 
   1928     {
   1929         bool threadmode, spsel;
   1930         MemOpIdx oi;
   1931         ARMMMUIdx mmu_idx;
   1932         uint32_t *frame_sp_p;
   1933         uint32_t frameptr;
   1934 
   1935         /* Pull the return address and IPSR from the Secure stack */
   1936         threadmode = !arm_v7m_is_handler_mode(env);
   1937         spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
   1938 
   1939         frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
   1940         frameptr = *frame_sp_p;
   1941 
   1942         /*
   1943          * These loads may throw an exception (for MPU faults). We want to
   1944          * do them as secure, so work out what MMU index that is.
   1945          */
   1946         mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
   1947         oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
   1948         newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0);
   1949         newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0);
   1950 
   1951         /* Consistency checks on new IPSR */
   1952         newpsr_exc = newpsr & XPSR_EXCP;
   1953         if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
   1954               (env->v7m.exception == 1 && newpsr_exc != 0))) {
   1955             /* Pend the fault and tell our caller to take it */
   1956             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
   1957             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   1958                                     env->v7m.secure);
   1959             qemu_log_mask(CPU_LOG_INT,
   1960                           "...taking INVPC UsageFault: "
   1961                           "IPSR consistency check failed\n");
   1962             return false;
   1963         }
   1964 
   1965         *frame_sp_p = frameptr + 8;
   1966     }
   1967 
   1968     /* This invalidates frame_sp_p */
   1969     switch_v7m_security_state(env, true);
   1970     env->v7m.exception = newpsr_exc;
   1971     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
   1972     if (newpsr & XPSR_SFPA) {
   1973         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
   1974     }
   1975     xpsr_write(env, 0, XPSR_IT);
   1976     env->thumb = newpc & 1;
   1977     env->regs[15] = newpc & ~1;
   1978     arm_rebuild_hflags(env);
   1979 
   1980     qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
   1981     return true;
   1982 }
   1983 
   1984 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
   1985                                uint32_t addr, uint16_t *insn)
   1986 {
   1987     /*
   1988      * Load a 16-bit portion of a v7M instruction, returning true on success,
   1989      * or false on failure (in which case we will have pended the appropriate
   1990      * exception).
   1991      * We need to do the instruction fetch's MPU and SAU checks
   1992      * like this because there is no MMU index that would allow
   1993      * doing the load with a single function call. Instead we must
   1994      * first check that the security attributes permit the load
   1995      * and that they don't mismatch on the two halves of the instruction,
   1996      * and then we do the load as a secure load (ie using the security
   1997      * attributes of the address, not the CPU, as architecturally required).
   1998      */
   1999     CPUState *cs = CPU(cpu);
   2000     CPUARMState *env = &cpu->env;
   2001     V8M_SAttributes sattrs = {};
   2002     GetPhysAddrResult res = {};
   2003     ARMMMUFaultInfo fi = {};
   2004     MemTxResult txres;
   2005 
   2006     v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, secure, &sattrs);
   2007     if (!sattrs.nsc || sattrs.ns) {
   2008         /*
   2009          * This must be the second half of the insn, and it straddles a
   2010          * region boundary with the second half not being S&NSC.
   2011          */
   2012         env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
   2013         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   2014         qemu_log_mask(CPU_LOG_INT,
   2015                       "...really SecureFault with SFSR.INVEP\n");
   2016         return false;
   2017     }
   2018     if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
   2019         /* the MPU lookup failed */
   2020         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
   2021         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
   2022         qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
   2023         return false;
   2024     }
   2025     *insn = address_space_lduw_le(arm_addressspace(cs, res.f.attrs),
   2026                                   res.f.phys_addr, res.f.attrs, &txres);
   2027     if (txres != MEMTX_OK) {
   2028         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
   2029         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
   2030         qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
   2031         return false;
   2032     }
   2033     return true;
   2034 }
   2035 
   2036 static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
   2037                                    uint32_t addr, uint32_t *spdata)
   2038 {
   2039     /*
   2040      * Read a word of data from the stack for the SG instruction,
   2041      * writing the value into *spdata. If the load succeeds, return
   2042      * true; otherwise pend an appropriate exception and return false.
   2043      * (We can't use data load helpers here that throw an exception
   2044      * because of the context we're called in, which is halfway through
   2045      * arm_v7m_cpu_do_interrupt().)
   2046      */
   2047     CPUState *cs = CPU(cpu);
   2048     CPUARMState *env = &cpu->env;
   2049     MemTxResult txres;
   2050     GetPhysAddrResult res = {};
   2051     ARMMMUFaultInfo fi = {};
   2052     uint32_t value;
   2053 
   2054     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
   2055         /* MPU/SAU lookup failed */
   2056         if (fi.type == ARMFault_QEMU_SFault) {
   2057             qemu_log_mask(CPU_LOG_INT,
   2058                           "...SecureFault during stack word read\n");
   2059             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
   2060             env->v7m.sfar = addr;
   2061             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   2062         } else {
   2063             qemu_log_mask(CPU_LOG_INT,
   2064                           "...MemManageFault during stack word read\n");
   2065             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
   2066                 R_V7M_CFSR_MMARVALID_MASK;
   2067             env->v7m.mmfar[M_REG_S] = addr;
   2068             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
   2069         }
   2070         return false;
   2071     }
   2072     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
   2073                               res.f.phys_addr, res.f.attrs, &txres);
   2074     if (txres != MEMTX_OK) {
   2075         /* BusFault trying to read the data */
   2076         qemu_log_mask(CPU_LOG_INT,
   2077                       "...BusFault during stack word read\n");
   2078         env->v7m.cfsr[M_REG_NS] |=
   2079             (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
   2080         env->v7m.bfar = addr;
   2081         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
   2082         return false;
   2083     }
   2084 
   2085     *spdata = value;
   2086     return true;
   2087 }
   2088 
   2089 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
   2090 {
   2091     /*
   2092      * Check whether this attempt to execute code in a Secure & NS-Callable
   2093      * memory region is for an SG instruction; if so, then emulate the
   2094      * effect of the SG instruction and return true. Otherwise pend
   2095      * the correct kind of exception and return false.
   2096      */
   2097     CPUARMState *env = &cpu->env;
   2098     ARMMMUIdx mmu_idx;
   2099     uint16_t insn;
   2100 
   2101     /*
   2102      * We should never get here unless get_phys_addr_pmsav8() caused
   2103      * an exception for NS executing in S&NSC memory.
   2104      */
   2105     assert(!env->v7m.secure);
   2106     assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
   2107 
   2108     /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
   2109     mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
   2110 
   2111     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15], &insn)) {
   2112         return false;
   2113     }
   2114 
   2115     if (!env->thumb) {
   2116         goto gen_invep;
   2117     }
   2118 
   2119     if (insn != 0xe97f) {
   2120         /*
   2121          * Not an SG instruction first half (we choose the IMPDEF
   2122          * early-SG-check option).
   2123          */
   2124         goto gen_invep;
   2125     }
   2126 
   2127     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15] + 2, &insn)) {
   2128         return false;
   2129     }
   2130 
   2131     if (insn != 0xe97f) {
   2132         /*
   2133          * Not an SG instruction second half (yes, both halves of the SG
   2134          * insn have the same hex value)
   2135          */
   2136         goto gen_invep;
   2137     }
   2138 
   2139     /*
   2140      * OK, we have confirmed that we really have an SG instruction.
   2141      * We know we're NS in S memory so don't need to repeat those checks.
   2142      */
   2143     qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
   2144                   ", executing it\n", env->regs[15]);
   2145 
   2146     if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
   2147         !arm_v7m_is_handler_mode(env)) {
   2148         /*
   2149          * v8.1M exception stack frame integrity check. Note that we
   2150          * must perform the memory access even if CCR_S.TRD is zero
   2151          * and we aren't going to check what the data loaded is.
   2152          */
   2153         uint32_t spdata, sp;
   2154 
   2155         /*
   2156          * We know we are currently NS, so the S stack pointers must be
   2157          * in other_ss_{psp,msp}, not in regs[13]/other_sp.
   2158          */
   2159         sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
   2160         if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
   2161             /* Stack access failed and an exception has been pended */
   2162             return false;
   2163         }
   2164 
   2165         if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
   2166             if (((spdata & ~1) == 0xfefa125a) ||
   2167                 !(env->v7m.control[M_REG_S] & 1)) {
   2168                 goto gen_invep;
   2169             }
   2170         }
   2171     }
   2172 
   2173     env->regs[14] &= ~1;
   2174     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
   2175     switch_v7m_security_state(env, true);
   2176     xpsr_write(env, 0, XPSR_IT);
   2177     env->regs[15] += 4;
   2178     arm_rebuild_hflags(env);
   2179     return true;
   2180 
   2181 gen_invep:
   2182     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
   2183     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   2184     qemu_log_mask(CPU_LOG_INT,
   2185                   "...really SecureFault with SFSR.INVEP\n");
   2186     return false;
   2187 }
   2188 
   2189 void arm_v7m_cpu_do_interrupt(CPUState *cs)
   2190 {
   2191     ARMCPU *cpu = ARM_CPU(cs);
   2192     CPUARMState *env = &cpu->env;
   2193     uint32_t lr;
   2194     bool ignore_stackfaults;
   2195 
   2196     arm_log_exception(cs);
   2197 
   2198     /*
   2199      * For exceptions we just mark as pending on the NVIC, and let that
   2200      * handle it.
   2201      */
   2202     switch (cs->exception_index) {
   2203     case EXCP_UDEF:
   2204         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
   2205         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
   2206         break;
   2207     case EXCP_NOCP:
   2208     {
   2209         /*
   2210          * NOCP might be directed to something other than the current
   2211          * security state if this fault is because of NSACR; we indicate
   2212          * the target security state using exception.target_el.
   2213          */
   2214         int target_secstate;
   2215 
   2216         if (env->exception.target_el == 3) {
   2217             target_secstate = M_REG_S;
   2218         } else {
   2219             target_secstate = env->v7m.secure;
   2220         }
   2221         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
   2222         env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
   2223         break;
   2224     }
   2225     case EXCP_INVSTATE:
   2226         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
   2227         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
   2228         break;
   2229     case EXCP_STKOF:
   2230         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
   2231         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
   2232         break;
   2233     case EXCP_LSERR:
   2234         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   2235         env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
   2236         break;
   2237     case EXCP_UNALIGNED:
   2238         /* Unaligned faults reported by M-profile aware code */
   2239         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
   2240         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
   2241         break;
   2242     case EXCP_DIVBYZERO:
   2243         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
   2244         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_DIVBYZERO_MASK;
   2245         break;
   2246     case EXCP_SWI:
   2247         /* The PC already points to the next instruction.  */
   2248         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
   2249         break;
   2250     case EXCP_PREFETCH_ABORT:
   2251     case EXCP_DATA_ABORT:
   2252         /*
   2253          * Note that for M profile we don't have a guest facing FSR, but
   2254          * the env->exception.fsr will be populated by the code that
   2255          * raises the fault, in the A profile short-descriptor format.
   2256          *
   2257          * Log the exception.vaddress now regardless of subtype, because
   2258          * logging below only logs it when it goes into a guest visible
   2259          * register.
   2260          */
   2261         qemu_log_mask(CPU_LOG_INT, "...at fault address 0x%x\n",
   2262                       (uint32_t)env->exception.vaddress);
   2263         switch (env->exception.fsr & 0xf) {
   2264         case M_FAKE_FSR_NSC_EXEC:
   2265             /*
   2266              * Exception generated when we try to execute code at an address
   2267              * which is marked as Secure & Non-Secure Callable and the CPU
   2268              * is in the Non-Secure state. The only instruction which can
   2269              * be executed like this is SG (and that only if both halves of
   2270              * the SG instruction have the same security attributes.)
   2271              * Everything else must generate an INVEP SecureFault, so we
   2272              * emulate the SG instruction here.
   2273              */
   2274             if (v7m_handle_execute_nsc(cpu)) {
   2275                 return;
   2276             }
   2277             break;
   2278         case M_FAKE_FSR_SFAULT:
   2279             /*
   2280              * Various flavours of SecureFault for attempts to execute or
   2281              * access data in the wrong security state.
   2282              */
   2283             switch (cs->exception_index) {
   2284             case EXCP_PREFETCH_ABORT:
   2285                 if (env->v7m.secure) {
   2286                     env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
   2287                     qemu_log_mask(CPU_LOG_INT,
   2288                                   "...really SecureFault with SFSR.INVTRAN\n");
   2289                 } else {
   2290                     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
   2291                     qemu_log_mask(CPU_LOG_INT,
   2292                                   "...really SecureFault with SFSR.INVEP\n");
   2293                 }
   2294                 break;
   2295             case EXCP_DATA_ABORT:
   2296                 /* This must be an NS access to S memory */
   2297                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
   2298                 qemu_log_mask(CPU_LOG_INT,
   2299                               "...really SecureFault with SFSR.AUVIOL\n");
   2300                 break;
   2301             }
   2302             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
   2303             break;
   2304         case 0x8: /* External Abort */
   2305             switch (cs->exception_index) {
   2306             case EXCP_PREFETCH_ABORT:
   2307                 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
   2308                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
   2309                 break;
   2310             case EXCP_DATA_ABORT:
   2311                 env->v7m.cfsr[M_REG_NS] |=
   2312                     (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
   2313                 env->v7m.bfar = env->exception.vaddress;
   2314                 qemu_log_mask(CPU_LOG_INT,
   2315                               "...with CFSR.PRECISERR and BFAR 0x%x\n",
   2316                               env->v7m.bfar);
   2317                 break;
   2318             }
   2319             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
   2320             break;
   2321         case 0x1: /* Alignment fault reported by generic code */
   2322             qemu_log_mask(CPU_LOG_INT,
   2323                           "...really UsageFault with UFSR.UNALIGNED\n");
   2324             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
   2325             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
   2326                                     env->v7m.secure);
   2327             break;
   2328         default:
   2329             /*
   2330              * All other FSR values are either MPU faults or "can't happen
   2331              * for M profile" cases.
   2332              */
   2333             switch (cs->exception_index) {
   2334             case EXCP_PREFETCH_ABORT:
   2335                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
   2336                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
   2337                 break;
   2338             case EXCP_DATA_ABORT:
   2339                 env->v7m.cfsr[env->v7m.secure] |=
   2340                     (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
   2341                 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
   2342                 qemu_log_mask(CPU_LOG_INT,
   2343                               "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
   2344                               env->v7m.mmfar[env->v7m.secure]);
   2345                 break;
   2346             }
   2347             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
   2348                                     env->v7m.secure);
   2349             break;
   2350         }
   2351         break;
   2352     case EXCP_SEMIHOST:
   2353         qemu_log_mask(CPU_LOG_INT,
   2354                       "...handling as semihosting call 0x%x\n",
   2355                       env->regs[0]);
   2356 #ifdef CONFIG_TCG
   2357         do_common_semihosting(cs);
   2358 #else
   2359         g_assert_not_reached();
   2360 #endif
   2361         env->regs[15] += env->thumb ? 2 : 4;
   2362         return;
   2363     case EXCP_BKPT:
   2364         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
   2365         break;
   2366     case EXCP_IRQ:
   2367         break;
   2368     case EXCP_EXCEPTION_EXIT:
   2369         if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
   2370             /* Must be v8M security extension function return */
   2371             assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
   2372             assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
   2373             if (do_v7m_function_return(cpu)) {
   2374                 return;
   2375             }
   2376         } else {
   2377             do_v7m_exception_exit(cpu);
   2378             return;
   2379         }
   2380         break;
   2381     case EXCP_LAZYFP:
   2382         /*
   2383          * We already pended the specific exception in the NVIC in the
   2384          * v7m_preserve_fp_state() helper function.
   2385          */
   2386         break;
   2387     default:
   2388         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
   2389         return; /* Never happens.  Keep compiler happy.  */
   2390     }
   2391 
   2392     if (arm_feature(env, ARM_FEATURE_V8)) {
   2393         lr = R_V7M_EXCRET_RES1_MASK |
   2394             R_V7M_EXCRET_DCRS_MASK;
   2395         /*
   2396          * The S bit indicates whether we should return to Secure
   2397          * or NonSecure (ie our current state).
   2398          * The ES bit indicates whether we're taking this exception
   2399          * to Secure or NonSecure (ie our target state). We set it
   2400          * later, in v7m_exception_taken().
   2401          * The SPSEL bit is also set in v7m_exception_taken() for v8M.
   2402          * This corresponds to the ARM ARM pseudocode for v8M setting
   2403          * some LR bits in PushStack() and some in ExceptionTaken();
   2404          * the distinction matters for the tailchain cases where we
   2405          * can take an exception without pushing the stack.
   2406          */
   2407         if (env->v7m.secure) {
   2408             lr |= R_V7M_EXCRET_S_MASK;
   2409         }
   2410     } else {
   2411         lr = R_V7M_EXCRET_RES1_MASK |
   2412             R_V7M_EXCRET_S_MASK |
   2413             R_V7M_EXCRET_DCRS_MASK |
   2414             R_V7M_EXCRET_ES_MASK;
   2415         if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
   2416             lr |= R_V7M_EXCRET_SPSEL_MASK;
   2417         }
   2418     }
   2419     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
   2420         lr |= R_V7M_EXCRET_FTYPE_MASK;
   2421     }
   2422     if (!arm_v7m_is_handler_mode(env)) {
   2423         lr |= R_V7M_EXCRET_MODE_MASK;
   2424     }
   2425 
   2426     ignore_stackfaults = v7m_push_stack(cpu);
   2427     v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
   2428 }
   2429 
   2430 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
   2431 {
   2432     unsigned el = arm_current_el(env);
   2433 
   2434     /* First handle registers which unprivileged can read */
   2435     switch (reg) {
   2436     case 0 ... 7: /* xPSR sub-fields */
   2437         return v7m_mrs_xpsr(env, reg, el);
   2438     case 20: /* CONTROL */
   2439         return v7m_mrs_control(env, env->v7m.secure);
   2440     case 0x94: /* CONTROL_NS */
   2441         /*
   2442          * We have to handle this here because unprivileged Secure code
   2443          * can read the NS CONTROL register.
   2444          */
   2445         if (!env->v7m.secure) {
   2446             return 0;
   2447         }
   2448         return env->v7m.control[M_REG_NS] |
   2449             (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
   2450     }
   2451 
   2452     if (el == 0) {
   2453         return 0; /* unprivileged reads others as zero */
   2454     }
   2455 
   2456     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   2457         switch (reg) {
   2458         case 0x88: /* MSP_NS */
   2459             if (!env->v7m.secure) {
   2460                 return 0;
   2461             }
   2462             return env->v7m.other_ss_msp;
   2463         case 0x89: /* PSP_NS */
   2464             if (!env->v7m.secure) {
   2465                 return 0;
   2466             }
   2467             return env->v7m.other_ss_psp;
   2468         case 0x8a: /* MSPLIM_NS */
   2469             if (!env->v7m.secure) {
   2470                 return 0;
   2471             }
   2472             return env->v7m.msplim[M_REG_NS];
   2473         case 0x8b: /* PSPLIM_NS */
   2474             if (!env->v7m.secure) {
   2475                 return 0;
   2476             }
   2477             return env->v7m.psplim[M_REG_NS];
   2478         case 0x90: /* PRIMASK_NS */
   2479             if (!env->v7m.secure) {
   2480                 return 0;
   2481             }
   2482             return env->v7m.primask[M_REG_NS];
   2483         case 0x91: /* BASEPRI_NS */
   2484             if (!env->v7m.secure) {
   2485                 return 0;
   2486             }
   2487             return env->v7m.basepri[M_REG_NS];
   2488         case 0x93: /* FAULTMASK_NS */
   2489             if (!env->v7m.secure) {
   2490                 return 0;
   2491             }
   2492             return env->v7m.faultmask[M_REG_NS];
   2493         case 0x98: /* SP_NS */
   2494         {
   2495             /*
   2496              * This gives the non-secure SP selected based on whether we're
   2497              * currently in handler mode or not, using the NS CONTROL.SPSEL.
   2498              */
   2499             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
   2500 
   2501             if (!env->v7m.secure) {
   2502                 return 0;
   2503             }
   2504             if (!arm_v7m_is_handler_mode(env) && spsel) {
   2505                 return env->v7m.other_ss_psp;
   2506             } else {
   2507                 return env->v7m.other_ss_msp;
   2508             }
   2509         }
   2510         default:
   2511             break;
   2512         }
   2513     }
   2514 
   2515     switch (reg) {
   2516     case 8: /* MSP */
   2517         return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
   2518     case 9: /* PSP */
   2519         return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
   2520     case 10: /* MSPLIM */
   2521         if (!arm_feature(env, ARM_FEATURE_V8)) {
   2522             goto bad_reg;
   2523         }
   2524         return env->v7m.msplim[env->v7m.secure];
   2525     case 11: /* PSPLIM */
   2526         if (!arm_feature(env, ARM_FEATURE_V8)) {
   2527             goto bad_reg;
   2528         }
   2529         return env->v7m.psplim[env->v7m.secure];
   2530     case 16: /* PRIMASK */
   2531         return env->v7m.primask[env->v7m.secure];
   2532     case 17: /* BASEPRI */
   2533     case 18: /* BASEPRI_MAX */
   2534         return env->v7m.basepri[env->v7m.secure];
   2535     case 19: /* FAULTMASK */
   2536         return env->v7m.faultmask[env->v7m.secure];
   2537     default:
   2538     bad_reg:
   2539         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
   2540                                        " register %d\n", reg);
   2541         return 0;
   2542     }
   2543 }
   2544 
   2545 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
   2546 {
   2547     /*
   2548      * We're passed bits [11..0] of the instruction; extract
   2549      * SYSm and the mask bits.
   2550      * Invalid combinations of SYSm and mask are UNPREDICTABLE;
   2551      * we choose to treat them as if the mask bits were valid.
   2552      * NB that the pseudocode 'mask' variable is bits [11..10],
   2553      * whereas ours is [11..8].
   2554      */
   2555     uint32_t mask = extract32(maskreg, 8, 4);
   2556     uint32_t reg = extract32(maskreg, 0, 8);
   2557     int cur_el = arm_current_el(env);
   2558 
   2559     if (cur_el == 0 && reg > 7 && reg != 20) {
   2560         /*
   2561          * only xPSR sub-fields and CONTROL.SFPA may be written by
   2562          * unprivileged code
   2563          */
   2564         return;
   2565     }
   2566 
   2567     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
   2568         switch (reg) {
   2569         case 0x88: /* MSP_NS */
   2570             if (!env->v7m.secure) {
   2571                 return;
   2572             }
   2573             env->v7m.other_ss_msp = val & ~3;
   2574             return;
   2575         case 0x89: /* PSP_NS */
   2576             if (!env->v7m.secure) {
   2577                 return;
   2578             }
   2579             env->v7m.other_ss_psp = val & ~3;
   2580             return;
   2581         case 0x8a: /* MSPLIM_NS */
   2582             if (!env->v7m.secure) {
   2583                 return;
   2584             }
   2585             env->v7m.msplim[M_REG_NS] = val & ~7;
   2586             return;
   2587         case 0x8b: /* PSPLIM_NS */
   2588             if (!env->v7m.secure) {
   2589                 return;
   2590             }
   2591             env->v7m.psplim[M_REG_NS] = val & ~7;
   2592             return;
   2593         case 0x90: /* PRIMASK_NS */
   2594             if (!env->v7m.secure) {
   2595                 return;
   2596             }
   2597             env->v7m.primask[M_REG_NS] = val & 1;
   2598             return;
   2599         case 0x91: /* BASEPRI_NS */
   2600             if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2601                 return;
   2602             }
   2603             env->v7m.basepri[M_REG_NS] = val & 0xff;
   2604             return;
   2605         case 0x93: /* FAULTMASK_NS */
   2606             if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2607                 return;
   2608             }
   2609             env->v7m.faultmask[M_REG_NS] = val & 1;
   2610             return;
   2611         case 0x94: /* CONTROL_NS */
   2612             if (!env->v7m.secure) {
   2613                 return;
   2614             }
   2615             write_v7m_control_spsel_for_secstate(env,
   2616                                                  val & R_V7M_CONTROL_SPSEL_MASK,
   2617                                                  M_REG_NS);
   2618             if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2619                 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
   2620                 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
   2621             }
   2622             /*
   2623              * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
   2624              * RES0 if the FPU is not present, and is stored in the S bank
   2625              */
   2626             if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
   2627                 extract32(env->v7m.nsacr, 10, 1)) {
   2628                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
   2629                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
   2630             }
   2631             return;
   2632         case 0x98: /* SP_NS */
   2633         {
   2634             /*
   2635              * This gives the non-secure SP selected based on whether we're
   2636              * currently in handler mode or not, using the NS CONTROL.SPSEL.
   2637              */
   2638             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
   2639             bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
   2640             uint32_t limit;
   2641 
   2642             if (!env->v7m.secure) {
   2643                 return;
   2644             }
   2645 
   2646             limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
   2647 
   2648             val &= ~0x3;
   2649 
   2650             if (val < limit) {
   2651                 raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
   2652             }
   2653 
   2654             if (is_psp) {
   2655                 env->v7m.other_ss_psp = val;
   2656             } else {
   2657                 env->v7m.other_ss_msp = val;
   2658             }
   2659             return;
   2660         }
   2661         default:
   2662             break;
   2663         }
   2664     }
   2665 
   2666     switch (reg) {
   2667     case 0 ... 7: /* xPSR sub-fields */
   2668         v7m_msr_xpsr(env, mask, reg, val);
   2669         break;
   2670     case 8: /* MSP */
   2671         if (v7m_using_psp(env)) {
   2672             env->v7m.other_sp = val & ~3;
   2673         } else {
   2674             env->regs[13] = val & ~3;
   2675         }
   2676         break;
   2677     case 9: /* PSP */
   2678         if (v7m_using_psp(env)) {
   2679             env->regs[13] = val & ~3;
   2680         } else {
   2681             env->v7m.other_sp = val & ~3;
   2682         }
   2683         break;
   2684     case 10: /* MSPLIM */
   2685         if (!arm_feature(env, ARM_FEATURE_V8)) {
   2686             goto bad_reg;
   2687         }
   2688         env->v7m.msplim[env->v7m.secure] = val & ~7;
   2689         break;
   2690     case 11: /* PSPLIM */
   2691         if (!arm_feature(env, ARM_FEATURE_V8)) {
   2692             goto bad_reg;
   2693         }
   2694         env->v7m.psplim[env->v7m.secure] = val & ~7;
   2695         break;
   2696     case 16: /* PRIMASK */
   2697         env->v7m.primask[env->v7m.secure] = val & 1;
   2698         break;
   2699     case 17: /* BASEPRI */
   2700         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2701             goto bad_reg;
   2702         }
   2703         env->v7m.basepri[env->v7m.secure] = val & 0xff;
   2704         break;
   2705     case 18: /* BASEPRI_MAX */
   2706         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2707             goto bad_reg;
   2708         }
   2709         val &= 0xff;
   2710         if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
   2711                          || env->v7m.basepri[env->v7m.secure] == 0)) {
   2712             env->v7m.basepri[env->v7m.secure] = val;
   2713         }
   2714         break;
   2715     case 19: /* FAULTMASK */
   2716         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2717             goto bad_reg;
   2718         }
   2719         env->v7m.faultmask[env->v7m.secure] = val & 1;
   2720         break;
   2721     case 20: /* CONTROL */
   2722         /*
   2723          * Writing to the SPSEL bit only has an effect if we are in
   2724          * thread mode; other bits can be updated by any privileged code.
   2725          * write_v7m_control_spsel() deals with updating the SPSEL bit in
   2726          * env->v7m.control, so we only need update the others.
   2727          * For v7M, we must just ignore explicit writes to SPSEL in handler
   2728          * mode; for v8M the write is permitted but will have no effect.
   2729          * All these bits are writes-ignored from non-privileged code,
   2730          * except for SFPA.
   2731          */
   2732         if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
   2733                            !arm_v7m_is_handler_mode(env))) {
   2734             write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
   2735         }
   2736         if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
   2737             env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
   2738             env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
   2739         }
   2740         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
   2741             /*
   2742              * SFPA is RAZ/WI from NS or if no FPU.
   2743              * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
   2744              * Both are stored in the S bank.
   2745              */
   2746             if (env->v7m.secure) {
   2747                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
   2748                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
   2749             }
   2750             if (cur_el > 0 &&
   2751                 (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
   2752                  extract32(env->v7m.nsacr, 10, 1))) {
   2753                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
   2754                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
   2755             }
   2756         }
   2757         break;
   2758     default:
   2759     bad_reg:
   2760         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
   2761                                        " register %d\n", reg);
   2762         return;
   2763     }
   2764 }
   2765 
   2766 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
   2767 {
   2768     /* Implement the TT instruction. op is bits [7:6] of the insn. */
   2769     bool forceunpriv = op & 1;
   2770     bool alt = op & 2;
   2771     V8M_SAttributes sattrs = {};
   2772     uint32_t tt_resp;
   2773     bool r, rw, nsr, nsrw, mrvalid;
   2774     ARMMMUIdx mmu_idx;
   2775     uint32_t mregion;
   2776     bool targetpriv;
   2777     bool targetsec = env->v7m.secure;
   2778 
   2779     /*
   2780      * Work out what the security state and privilege level we're
   2781      * interested in is...
   2782      */
   2783     if (alt) {
   2784         targetsec = !targetsec;
   2785     }
   2786 
   2787     if (forceunpriv) {
   2788         targetpriv = false;
   2789     } else {
   2790         targetpriv = arm_v7m_is_handler_mode(env) ||
   2791             !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
   2792     }
   2793 
   2794     /* ...and then figure out which MMU index this is */
   2795     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
   2796 
   2797     /*
   2798      * We know that the MPU and SAU don't care about the access type
   2799      * for our purposes beyond that we don't want to claim to be
   2800      * an insn fetch, so we arbitrarily call this a read.
   2801      */
   2802 
   2803     /*
   2804      * MPU region info only available for privileged or if
   2805      * inspecting the other MPU state.
   2806      */
   2807     if (arm_current_el(env) != 0 || alt) {
   2808         GetPhysAddrResult res = {};
   2809         ARMMMUFaultInfo fi = {};
   2810 
   2811         /* We can ignore the return value as prot is always set */
   2812         pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, targetsec,
   2813                           &res, &fi, &mregion);
   2814         if (mregion == -1) {
   2815             mrvalid = false;
   2816             mregion = 0;
   2817         } else {
   2818             mrvalid = true;
   2819         }
   2820         r = res.f.prot & PAGE_READ;
   2821         rw = res.f.prot & PAGE_WRITE;
   2822     } else {
   2823         r = false;
   2824         rw = false;
   2825         mrvalid = false;
   2826         mregion = 0;
   2827     }
   2828 
   2829     if (env->v7m.secure) {
   2830         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
   2831                             targetsec, &sattrs);
   2832         nsr = sattrs.ns && r;
   2833         nsrw = sattrs.ns && rw;
   2834     } else {
   2835         sattrs.ns = true;
   2836         nsr = false;
   2837         nsrw = false;
   2838     }
   2839 
   2840     tt_resp = (sattrs.iregion << 24) |
   2841         (sattrs.irvalid << 23) |
   2842         ((!sattrs.ns) << 22) |
   2843         (nsrw << 21) |
   2844         (nsr << 20) |
   2845         (rw << 19) |
   2846         (r << 18) |
   2847         (sattrs.srvalid << 17) |
   2848         (mrvalid << 16) |
   2849         (sattrs.sregion << 8) |
   2850         mregion;
   2851 
   2852     return tt_resp;
   2853 }
   2854 
   2855 #endif /* !CONFIG_USER_ONLY */
   2856 
   2857 ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
   2858                               bool secstate, bool priv, bool negpri)
   2859 {
   2860     ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
   2861 
   2862     if (priv) {
   2863         mmu_idx |= ARM_MMU_IDX_M_PRIV;
   2864     }
   2865 
   2866     if (negpri) {
   2867         mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
   2868     }
   2869 
   2870     if (secstate) {
   2871         mmu_idx |= ARM_MMU_IDX_M_S;
   2872     }
   2873 
   2874     return mmu_idx;
   2875 }
   2876 
   2877 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
   2878                                                 bool secstate, bool priv)
   2879 {
   2880     bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
   2881 
   2882     return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
   2883 }
   2884 
   2885 /* Return the MMU index for a v7M CPU in the specified security state */
   2886 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
   2887 {
   2888     bool priv = arm_v7m_is_handler_mode(env) ||
   2889         !(env->v7m.control[secstate] & 1);
   2890 
   2891     return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
   2892 }