qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

op_helper.c (29349B)


      1 /*
      2  *  ARM helper routines
      3  *
      4  *  Copyright (c) 2005-2007 CodeSourcery, LLC
      5  *
      6  * This library is free software; you can redistribute it and/or
      7  * modify it under the terms of the GNU Lesser General Public
      8  * License as published by the Free Software Foundation; either
      9  * version 2.1 of the License, or (at your option) any later version.
     10  *
     11  * This library is distributed in the hope that it will be useful,
     12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14  * Lesser General Public License for more details.
     15  *
     16  * You should have received a copy of the GNU Lesser General Public
     17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18  */
     19 #include "qemu/osdep.h"
     20 #include "qemu/main-loop.h"
     21 #include "cpu.h"
     22 #include "exec/helper-proto.h"
     23 #include "internals.h"
     24 #include "exec/exec-all.h"
     25 #include "exec/cpu_ldst.h"
     26 #include "cpregs.h"
     27 
     28 #define SIGNBIT (uint32_t)0x80000000
     29 #define SIGNBIT64 ((uint64_t)1 << 63)
     30 
     31 int exception_target_el(CPUARMState *env)
     32 {
     33     int target_el = MAX(1, arm_current_el(env));
     34 
     35     /*
     36      * No such thing as secure EL1 if EL3 is aarch32,
     37      * so update the target EL to EL3 in this case.
     38      */
     39     if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
     40         target_el = 3;
     41     }
     42 
     43     return target_el;
     44 }
     45 
     46 void raise_exception(CPUARMState *env, uint32_t excp,
     47                      uint32_t syndrome, uint32_t target_el)
     48 {
     49     CPUState *cs = env_cpu(env);
     50 
     51     if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
     52         /*
     53          * Redirect NS EL1 exceptions to NS EL2. These are reported with
     54          * their original syndrome register value, with the exception of
     55          * SIMD/FP access traps, which are reported as uncategorized
     56          * (see DDI0478C.a D1.10.4)
     57          */
     58         target_el = 2;
     59         if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
     60             syndrome = syn_uncategorized();
     61         }
     62     }
     63 
     64     assert(!excp_is_internal(excp));
     65     cs->exception_index = excp;
     66     env->exception.syndrome = syndrome;
     67     env->exception.target_el = target_el;
     68     cpu_loop_exit(cs);
     69 }
     70 
     71 void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
     72                         uint32_t target_el, uintptr_t ra)
     73 {
     74     CPUState *cs = env_cpu(env);
     75 
     76     /*
     77      * restore_state_to_opc() will set env->exception.syndrome, so
     78      * we must restore CPU state here before setting the syndrome
     79      * the caller passed us, and cannot use cpu_loop_exit_restore().
     80      */
     81     cpu_restore_state(cs, ra);
     82     raise_exception(env, excp, syndrome, target_el);
     83 }
     84 
     85 uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
     86                           uint64_t ireg, uint64_t def)
     87 {
     88     uint64_t tmp, val = 0;
     89     uint32_t maxindex = ((desc & 3) + 1) * 8;
     90     uint32_t base_reg = desc >> 2;
     91     uint32_t shift, index, reg;
     92 
     93     for (shift = 0; shift < 64; shift += 8) {
     94         index = (ireg >> shift) & 0xff;
     95         if (index < maxindex) {
     96             reg = base_reg + (index >> 3);
     97             tmp = *aa32_vfp_dreg(env, reg);
     98             tmp = ((tmp >> ((index & 7) << 3)) & 0xff) << shift;
     99         } else {
    100             tmp = def & (0xffull << shift);
    101         }
    102         val |= tmp;
    103     }
    104     return val;
    105 }
    106 
    107 void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
    108 {
    109     /*
    110      * Perform the v8M stack limit check for SP updates from translated code,
    111      * raising an exception if the limit is breached.
    112      */
    113     if (newvalue < v7m_sp_limit(env)) {
    114         /*
    115          * Stack limit exceptions are a rare case, so rather than syncing
    116          * PC/condbits before the call, we use raise_exception_ra() so
    117          * that cpu_restore_state() will sort them out.
    118          */
    119         raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
    120     }
    121 }
    122 
    123 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
    124 {
    125     uint32_t res = a + b;
    126     if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
    127         env->QF = 1;
    128     return res;
    129 }
    130 
    131 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
    132 {
    133     uint32_t res = a + b;
    134     if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
    135         env->QF = 1;
    136         res = ~(((int32_t)a >> 31) ^ SIGNBIT);
    137     }
    138     return res;
    139 }
    140 
    141 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
    142 {
    143     uint32_t res = a - b;
    144     if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
    145         env->QF = 1;
    146         res = ~(((int32_t)a >> 31) ^ SIGNBIT);
    147     }
    148     return res;
    149 }
    150 
    151 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
    152 {
    153     uint32_t res = a + b;
    154     if (res < a) {
    155         env->QF = 1;
    156         res = ~0;
    157     }
    158     return res;
    159 }
    160 
    161 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
    162 {
    163     uint32_t res = a - b;
    164     if (res > a) {
    165         env->QF = 1;
    166         res = 0;
    167     }
    168     return res;
    169 }
    170 
    171 /* Signed saturation.  */
    172 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
    173 {
    174     int32_t top;
    175     uint32_t mask;
    176 
    177     top = val >> shift;
    178     mask = (1u << shift) - 1;
    179     if (top > 0) {
    180         env->QF = 1;
    181         return mask;
    182     } else if (top < -1) {
    183         env->QF = 1;
    184         return ~mask;
    185     }
    186     return val;
    187 }
    188 
    189 /* Unsigned saturation.  */
    190 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
    191 {
    192     uint32_t max;
    193 
    194     max = (1u << shift) - 1;
    195     if (val < 0) {
    196         env->QF = 1;
    197         return 0;
    198     } else if (val > max) {
    199         env->QF = 1;
    200         return max;
    201     }
    202     return val;
    203 }
    204 
    205 /* Signed saturate.  */
    206 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
    207 {
    208     return do_ssat(env, x, shift);
    209 }
    210 
    211 /* Dual halfword signed saturate.  */
    212 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
    213 {
    214     uint32_t res;
    215 
    216     res = (uint16_t)do_ssat(env, (int16_t)x, shift);
    217     res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
    218     return res;
    219 }
    220 
    221 /* Unsigned saturate.  */
    222 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
    223 {
    224     return do_usat(env, x, shift);
    225 }
    226 
    227 /* Dual halfword unsigned saturate.  */
    228 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
    229 {
    230     uint32_t res;
    231 
    232     res = (uint16_t)do_usat(env, (int16_t)x, shift);
    233     res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
    234     return res;
    235 }
    236 
    237 void HELPER(setend)(CPUARMState *env)
    238 {
    239     env->uncached_cpsr ^= CPSR_E;
    240     arm_rebuild_hflags(env);
    241 }
    242 
    243 void HELPER(check_bxj_trap)(CPUARMState *env, uint32_t rm)
    244 {
    245     /*
    246      * Only called if in NS EL0 or EL1 for a BXJ for a v7A CPU;
    247      * check if HSTR.TJDBX means we need to trap to EL2.
    248      */
    249     if (env->cp15.hstr_el2 & HSTR_TJDBX) {
    250         /*
    251          * We know the condition code check passed, so take the IMPDEF
    252          * choice to always report CV=1 COND 0xe
    253          */
    254         uint32_t syn = syn_bxjtrap(1, 0xe, rm);
    255         raise_exception_ra(env, EXCP_HYP_TRAP, syn, 2, GETPC());
    256     }
    257 }
    258 
    259 #ifndef CONFIG_USER_ONLY
    260 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
    261  * The function returns the target EL (1-3) if the instruction is to be trapped;
    262  * otherwise it returns 0 indicating it is not trapped.
    263  */
    264 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
    265 {
    266     int cur_el = arm_current_el(env);
    267     uint64_t mask;
    268 
    269     if (arm_feature(env, ARM_FEATURE_M)) {
    270         /* M profile cores can never trap WFI/WFE. */
    271         return 0;
    272     }
    273 
    274     /* If we are currently in EL0 then we need to check if SCTLR is set up for
    275      * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
    276      */
    277     if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
    278         int target_el;
    279 
    280         mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
    281         if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
    282             /* Secure EL0 and Secure PL1 is at EL3 */
    283             target_el = 3;
    284         } else {
    285             target_el = 1;
    286         }
    287 
    288         if (!(env->cp15.sctlr_el[target_el] & mask)) {
    289             return target_el;
    290         }
    291     }
    292 
    293     /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
    294      * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
    295      * bits will be zero indicating no trap.
    296      */
    297     if (cur_el < 2) {
    298         mask = is_wfe ? HCR_TWE : HCR_TWI;
    299         if (arm_hcr_el2_eff(env) & mask) {
    300             return 2;
    301         }
    302     }
    303 
    304     /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
    305     if (cur_el < 3) {
    306         mask = (is_wfe) ? SCR_TWE : SCR_TWI;
    307         if (env->cp15.scr_el3 & mask) {
    308             return 3;
    309         }
    310     }
    311 
    312     return 0;
    313 }
    314 #endif
    315 
    316 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
    317 {
    318 #ifdef CONFIG_USER_ONLY
    319     /*
    320      * WFI in the user-mode emulator is technically permitted but not
    321      * something any real-world code would do. AArch64 Linux kernels
    322      * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
    323      * AArch32 kernels don't trap it so it will delay a bit.
    324      * For QEMU, make it NOP here, because trying to raise EXCP_HLT
    325      * would trigger an abort.
    326      */
    327     return;
    328 #else
    329     CPUState *cs = env_cpu(env);
    330     int target_el = check_wfx_trap(env, false);
    331 
    332     if (cpu_has_work(cs)) {
    333         /* Don't bother to go into our "low power state" if
    334          * we would just wake up immediately.
    335          */
    336         return;
    337     }
    338 
    339     if (target_el) {
    340         if (env->aarch64) {
    341             env->pc -= insn_len;
    342         } else {
    343             env->regs[15] -= insn_len;
    344         }
    345 
    346         raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
    347                         target_el);
    348     }
    349 
    350     cs->exception_index = EXCP_HLT;
    351     cs->halted = 1;
    352     cpu_loop_exit(cs);
    353 #endif
    354 }
    355 
    356 void HELPER(wfe)(CPUARMState *env)
    357 {
    358     /* This is a hint instruction that is semantically different
    359      * from YIELD even though we currently implement it identically.
    360      * Don't actually halt the CPU, just yield back to top
    361      * level loop. This is not going into a "low power state"
    362      * (ie halting until some event occurs), so we never take
    363      * a configurable trap to a different exception level.
    364      */
    365     HELPER(yield)(env);
    366 }
    367 
    368 void HELPER(yield)(CPUARMState *env)
    369 {
    370     CPUState *cs = env_cpu(env);
    371 
    372     /* This is a non-trappable hint instruction that generally indicates
    373      * that the guest is currently busy-looping. Yield control back to the
    374      * top level loop so that a more deserving VCPU has a chance to run.
    375      */
    376     cs->exception_index = EXCP_YIELD;
    377     cpu_loop_exit(cs);
    378 }
    379 
    380 /* Raise an internal-to-QEMU exception. This is limited to only
    381  * those EXCP values which are special cases for QEMU to interrupt
    382  * execution and not to be used for exceptions which are passed to
    383  * the guest (those must all have syndrome information and thus should
    384  * use exception_with_syndrome*).
    385  */
    386 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
    387 {
    388     CPUState *cs = env_cpu(env);
    389 
    390     assert(excp_is_internal(excp));
    391     cs->exception_index = excp;
    392     cpu_loop_exit(cs);
    393 }
    394 
    395 /* Raise an exception with the specified syndrome register value */
    396 void HELPER(exception_with_syndrome_el)(CPUARMState *env, uint32_t excp,
    397                                         uint32_t syndrome, uint32_t target_el)
    398 {
    399     raise_exception(env, excp, syndrome, target_el);
    400 }
    401 
    402 /*
    403  * Raise an exception with the specified syndrome register value
    404  * to the default target el.
    405  */
    406 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
    407                                      uint32_t syndrome)
    408 {
    409     raise_exception(env, excp, syndrome, exception_target_el(env));
    410 }
    411 
    412 uint32_t HELPER(cpsr_read)(CPUARMState *env)
    413 {
    414     return cpsr_read(env) & ~CPSR_EXEC;
    415 }
    416 
    417 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
    418 {
    419     cpsr_write(env, val, mask, CPSRWriteByInstr);
    420     /* TODO: Not all cpsr bits are relevant to hflags.  */
    421     arm_rebuild_hflags(env);
    422 }
    423 
    424 /* Write the CPSR for a 32-bit exception return */
    425 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
    426 {
    427     uint32_t mask;
    428 
    429     qemu_mutex_lock_iothread();
    430     arm_call_pre_el_change_hook(env_archcpu(env));
    431     qemu_mutex_unlock_iothread();
    432 
    433     mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
    434     cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
    435 
    436     /* Generated code has already stored the new PC value, but
    437      * without masking out its low bits, because which bits need
    438      * masking depends on whether we're returning to Thumb or ARM
    439      * state. Do the masking now.
    440      */
    441     env->regs[15] &= (env->thumb ? ~1 : ~3);
    442     arm_rebuild_hflags(env);
    443 
    444     qemu_mutex_lock_iothread();
    445     arm_call_el_change_hook(env_archcpu(env));
    446     qemu_mutex_unlock_iothread();
    447 }
    448 
    449 /* Access to user mode registers from privileged modes.  */
    450 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
    451 {
    452     uint32_t val;
    453 
    454     if (regno == 13) {
    455         val = env->banked_r13[BANK_USRSYS];
    456     } else if (regno == 14) {
    457         val = env->banked_r14[BANK_USRSYS];
    458     } else if (regno >= 8
    459                && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
    460         val = env->usr_regs[regno - 8];
    461     } else {
    462         val = env->regs[regno];
    463     }
    464     return val;
    465 }
    466 
    467 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
    468 {
    469     if (regno == 13) {
    470         env->banked_r13[BANK_USRSYS] = val;
    471     } else if (regno == 14) {
    472         env->banked_r14[BANK_USRSYS] = val;
    473     } else if (regno >= 8
    474                && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
    475         env->usr_regs[regno - 8] = val;
    476     } else {
    477         env->regs[regno] = val;
    478     }
    479 }
    480 
    481 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
    482 {
    483     if ((env->uncached_cpsr & CPSR_M) == mode) {
    484         env->regs[13] = val;
    485     } else {
    486         env->banked_r13[bank_number(mode)] = val;
    487     }
    488 }
    489 
    490 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
    491 {
    492     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
    493         /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
    494          * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
    495          */
    496         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
    497                         exception_target_el(env));
    498     }
    499 
    500     if ((env->uncached_cpsr & CPSR_M) == mode) {
    501         return env->regs[13];
    502     } else {
    503         return env->banked_r13[bank_number(mode)];
    504     }
    505 }
    506 
    507 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
    508                                       uint32_t regno)
    509 {
    510     /* Raise an exception if the requested access is one of the UNPREDICTABLE
    511      * cases; otherwise return. This broadly corresponds to the pseudocode
    512      * BankedRegisterAccessValid() and SPSRAccessValid(),
    513      * except that we have already handled some cases at translate time.
    514      */
    515     int curmode = env->uncached_cpsr & CPSR_M;
    516 
    517     if (regno == 17) {
    518         /* ELR_Hyp: a special case because access from tgtmode is OK */
    519         if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
    520             goto undef;
    521         }
    522         return;
    523     }
    524 
    525     if (curmode == tgtmode) {
    526         goto undef;
    527     }
    528 
    529     if (tgtmode == ARM_CPU_MODE_USR) {
    530         switch (regno) {
    531         case 8 ... 12:
    532             if (curmode != ARM_CPU_MODE_FIQ) {
    533                 goto undef;
    534             }
    535             break;
    536         case 13:
    537             if (curmode == ARM_CPU_MODE_SYS) {
    538                 goto undef;
    539             }
    540             break;
    541         case 14:
    542             if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
    543                 goto undef;
    544             }
    545             break;
    546         default:
    547             break;
    548         }
    549     }
    550 
    551     if (tgtmode == ARM_CPU_MODE_HYP) {
    552         /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
    553         if (curmode != ARM_CPU_MODE_MON) {
    554             goto undef;
    555         }
    556     }
    557 
    558     return;
    559 
    560 undef:
    561     raise_exception(env, EXCP_UDEF, syn_uncategorized(),
    562                     exception_target_el(env));
    563 }
    564 
    565 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
    566                         uint32_t regno)
    567 {
    568     msr_mrs_banked_exc_checks(env, tgtmode, regno);
    569 
    570     switch (regno) {
    571     case 16: /* SPSRs */
    572         env->banked_spsr[bank_number(tgtmode)] = value;
    573         break;
    574     case 17: /* ELR_Hyp */
    575         env->elr_el[2] = value;
    576         break;
    577     case 13:
    578         env->banked_r13[bank_number(tgtmode)] = value;
    579         break;
    580     case 14:
    581         env->banked_r14[r14_bank_number(tgtmode)] = value;
    582         break;
    583     case 8 ... 12:
    584         switch (tgtmode) {
    585         case ARM_CPU_MODE_USR:
    586             env->usr_regs[regno - 8] = value;
    587             break;
    588         case ARM_CPU_MODE_FIQ:
    589             env->fiq_regs[regno - 8] = value;
    590             break;
    591         default:
    592             g_assert_not_reached();
    593         }
    594         break;
    595     default:
    596         g_assert_not_reached();
    597     }
    598 }
    599 
    600 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
    601 {
    602     msr_mrs_banked_exc_checks(env, tgtmode, regno);
    603 
    604     switch (regno) {
    605     case 16: /* SPSRs */
    606         return env->banked_spsr[bank_number(tgtmode)];
    607     case 17: /* ELR_Hyp */
    608         return env->elr_el[2];
    609     case 13:
    610         return env->banked_r13[bank_number(tgtmode)];
    611     case 14:
    612         return env->banked_r14[r14_bank_number(tgtmode)];
    613     case 8 ... 12:
    614         switch (tgtmode) {
    615         case ARM_CPU_MODE_USR:
    616             return env->usr_regs[regno - 8];
    617         case ARM_CPU_MODE_FIQ:
    618             return env->fiq_regs[regno - 8];
    619         default:
    620             g_assert_not_reached();
    621         }
    622     default:
    623         g_assert_not_reached();
    624     }
    625 }
    626 
    627 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
    628                                  uint32_t isread)
    629 {
    630     ARMCPU *cpu = env_archcpu(env);
    631     const ARMCPRegInfo *ri = rip;
    632     CPAccessResult res = CP_ACCESS_OK;
    633     int target_el;
    634 
    635     if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
    636         && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
    637         res = CP_ACCESS_TRAP;
    638         goto fail;
    639     }
    640 
    641     /*
    642      * Check for an EL2 trap due to HSTR_EL2. We expect EL0 accesses
    643      * to sysregs non accessible at EL0 to have UNDEF-ed already.
    644      */
    645     if (!is_a64(env) && arm_current_el(env) < 2 && ri->cp == 15 &&
    646         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
    647         uint32_t mask = 1 << ri->crn;
    648 
    649         if (ri->type & ARM_CP_64BIT) {
    650             mask = 1 << ri->crm;
    651         }
    652 
    653         /* T4 and T14 are RES0 */
    654         mask &= ~((1 << 4) | (1 << 14));
    655 
    656         if (env->cp15.hstr_el2 & mask) {
    657             res = CP_ACCESS_TRAP_EL2;
    658             goto fail;
    659         }
    660     }
    661 
    662     if (ri->accessfn) {
    663         res = ri->accessfn(env, ri, isread);
    664     }
    665     if (likely(res == CP_ACCESS_OK)) {
    666         return;
    667     }
    668 
    669  fail:
    670     switch (res & ~CP_ACCESS_EL_MASK) {
    671     case CP_ACCESS_TRAP:
    672         break;
    673     case CP_ACCESS_TRAP_UNCATEGORIZED:
    674         if (cpu_isar_feature(aa64_ids, cpu) && isread &&
    675             arm_cpreg_in_idspace(ri)) {
    676             /*
    677              * FEAT_IDST says this should be reported as EC_SYSTEMREGISTERTRAP,
    678              * not EC_UNCATEGORIZED
    679              */
    680             break;
    681         }
    682         syndrome = syn_uncategorized();
    683         break;
    684     default:
    685         g_assert_not_reached();
    686     }
    687 
    688     target_el = res & CP_ACCESS_EL_MASK;
    689     switch (target_el) {
    690     case 0:
    691         target_el = exception_target_el(env);
    692         break;
    693     case 2:
    694         assert(arm_current_el(env) != 3);
    695         assert(arm_is_el2_enabled(env));
    696         break;
    697     case 3:
    698         assert(arm_feature(env, ARM_FEATURE_EL3));
    699         break;
    700     default:
    701         /* No "direct" traps to EL1 */
    702         g_assert_not_reached();
    703     }
    704 
    705     raise_exception(env, EXCP_UDEF, syndrome, target_el);
    706 }
    707 
    708 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
    709 {
    710     const ARMCPRegInfo *ri = rip;
    711 
    712     if (ri->type & ARM_CP_IO) {
    713         qemu_mutex_lock_iothread();
    714         ri->writefn(env, ri, value);
    715         qemu_mutex_unlock_iothread();
    716     } else {
    717         ri->writefn(env, ri, value);
    718     }
    719 }
    720 
    721 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
    722 {
    723     const ARMCPRegInfo *ri = rip;
    724     uint32_t res;
    725 
    726     if (ri->type & ARM_CP_IO) {
    727         qemu_mutex_lock_iothread();
    728         res = ri->readfn(env, ri);
    729         qemu_mutex_unlock_iothread();
    730     } else {
    731         res = ri->readfn(env, ri);
    732     }
    733 
    734     return res;
    735 }
    736 
    737 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
    738 {
    739     const ARMCPRegInfo *ri = rip;
    740 
    741     if (ri->type & ARM_CP_IO) {
    742         qemu_mutex_lock_iothread();
    743         ri->writefn(env, ri, value);
    744         qemu_mutex_unlock_iothread();
    745     } else {
    746         ri->writefn(env, ri, value);
    747     }
    748 }
    749 
    750 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
    751 {
    752     const ARMCPRegInfo *ri = rip;
    753     uint64_t res;
    754 
    755     if (ri->type & ARM_CP_IO) {
    756         qemu_mutex_lock_iothread();
    757         res = ri->readfn(env, ri);
    758         qemu_mutex_unlock_iothread();
    759     } else {
    760         res = ri->readfn(env, ri);
    761     }
    762 
    763     return res;
    764 }
    765 
    766 void HELPER(pre_hvc)(CPUARMState *env)
    767 {
    768     ARMCPU *cpu = env_archcpu(env);
    769     int cur_el = arm_current_el(env);
    770     /* FIXME: Use actual secure state.  */
    771     bool secure = false;
    772     bool undef;
    773 
    774     if (arm_is_psci_call(cpu, EXCP_HVC)) {
    775         /* If PSCI is enabled and this looks like a valid PSCI call then
    776          * that overrides the architecturally mandated HVC behaviour.
    777          */
    778         return;
    779     }
    780 
    781     if (!arm_feature(env, ARM_FEATURE_EL2)) {
    782         /* If EL2 doesn't exist, HVC always UNDEFs */
    783         undef = true;
    784     } else if (arm_feature(env, ARM_FEATURE_EL3)) {
    785         /* EL3.HCE has priority over EL2.HCD. */
    786         undef = !(env->cp15.scr_el3 & SCR_HCE);
    787     } else {
    788         undef = env->cp15.hcr_el2 & HCR_HCD;
    789     }
    790 
    791     /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
    792      * For ARMv8/AArch64, HVC is allowed in EL3.
    793      * Note that we've already trapped HVC from EL0 at translation
    794      * time.
    795      */
    796     if (secure && (!is_a64(env) || cur_el == 1)) {
    797         undef = true;
    798     }
    799 
    800     if (undef) {
    801         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
    802                         exception_target_el(env));
    803     }
    804 }
    805 
    806 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
    807 {
    808     ARMCPU *cpu = env_archcpu(env);
    809     int cur_el = arm_current_el(env);
    810     bool secure = arm_is_secure(env);
    811     bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
    812 
    813     /*
    814      * SMC behaviour is summarized in the following table.
    815      * This helper handles the "Trap to EL2" and "Undef insn" cases.
    816      * The "Trap to EL3" and "PSCI call" cases are handled in the exception
    817      * helper.
    818      *
    819      *  -> ARM_FEATURE_EL3 and !SMD
    820      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
    821      *
    822      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
    823      *  Conduit SMC, inval call  Trap to EL2         Trap to EL3
    824      *  Conduit not SMC          Trap to EL2         Trap to EL3
    825      *
    826      *
    827      *  -> ARM_FEATURE_EL3 and SMD
    828      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
    829      *
    830      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
    831      *  Conduit SMC, inval call  Trap to EL2         Undef insn
    832      *  Conduit not SMC          Trap to EL2         Undef insn
    833      *
    834      *
    835      *  -> !ARM_FEATURE_EL3
    836      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
    837      *
    838      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
    839      *  Conduit SMC, inval call  Trap to EL2         Undef insn
    840      *  Conduit not SMC          Undef insn          Undef insn
    841      */
    842 
    843     /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
    844      * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
    845      *  extensions, SMD only applies to NS state.
    846      * On ARMv7 without the Virtualization extensions, the SMD bit
    847      * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
    848      * so we need not special case this here.
    849      */
    850     bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
    851                                                      : smd_flag && !secure;
    852 
    853     if (!arm_feature(env, ARM_FEATURE_EL3) &&
    854         cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
    855         /* If we have no EL3 then SMC always UNDEFs and can't be
    856          * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
    857          * firmware within QEMU, and we want an EL2 guest to be able
    858          * to forbid its EL1 from making PSCI calls into QEMU's
    859          * "firmware" via HCR.TSC, so for these purposes treat
    860          * PSCI-via-SMC as implying an EL3.
    861          * This handles the very last line of the previous table.
    862          */
    863         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
    864                         exception_target_el(env));
    865     }
    866 
    867     if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
    868         /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
    869          * We also want an EL2 guest to be able to forbid its EL1 from
    870          * making PSCI calls into QEMU's "firmware" via HCR.TSC.
    871          * This handles all the "Trap to EL2" cases of the previous table.
    872          */
    873         raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
    874     }
    875 
    876     /* Catch the two remaining "Undef insn" cases of the previous table:
    877      *    - PSCI conduit is SMC but we don't have a valid PCSI call,
    878      *    - We don't have EL3 or SMD is set.
    879      */
    880     if (!arm_is_psci_call(cpu, EXCP_SMC) &&
    881         (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
    882         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
    883                         exception_target_el(env));
    884     }
    885 }
    886 
    887 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
    888    The only way to do that in TCG is a conditional branch, which clobbers
    889    all our temporaries.  For now implement these as helper functions.  */
    890 
    891 /* Similarly for variable shift instructions.  */
    892 
    893 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
    894 {
    895     int shift = i & 0xff;
    896     if (shift >= 32) {
    897         if (shift == 32)
    898             env->CF = x & 1;
    899         else
    900             env->CF = 0;
    901         return 0;
    902     } else if (shift != 0) {
    903         env->CF = (x >> (32 - shift)) & 1;
    904         return x << shift;
    905     }
    906     return x;
    907 }
    908 
    909 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
    910 {
    911     int shift = i & 0xff;
    912     if (shift >= 32) {
    913         if (shift == 32)
    914             env->CF = (x >> 31) & 1;
    915         else
    916             env->CF = 0;
    917         return 0;
    918     } else if (shift != 0) {
    919         env->CF = (x >> (shift - 1)) & 1;
    920         return x >> shift;
    921     }
    922     return x;
    923 }
    924 
    925 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
    926 {
    927     int shift = i & 0xff;
    928     if (shift >= 32) {
    929         env->CF = (x >> 31) & 1;
    930         return (int32_t)x >> 31;
    931     } else if (shift != 0) {
    932         env->CF = (x >> (shift - 1)) & 1;
    933         return (int32_t)x >> shift;
    934     }
    935     return x;
    936 }
    937 
    938 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
    939 {
    940     int shift1, shift;
    941     shift1 = i & 0xff;
    942     shift = shift1 & 0x1f;
    943     if (shift == 0) {
    944         if (shift1 != 0)
    945             env->CF = (x >> 31) & 1;
    946         return x;
    947     } else {
    948         env->CF = (x >> (shift - 1)) & 1;
    949         return ((uint32_t)x >> shift) | (x << (32 - shift));
    950     }
    951 }
    952 
    953 void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
    954                           uint32_t access_type, uint32_t mmu_idx,
    955                           uint32_t size)
    956 {
    957     uint32_t in_page = -((uint32_t)ptr | TARGET_PAGE_SIZE);
    958     uintptr_t ra = GETPC();
    959 
    960     if (likely(size <= in_page)) {
    961         probe_access(env, ptr, size, access_type, mmu_idx, ra);
    962     } else {
    963         probe_access(env, ptr, in_page, access_type, mmu_idx, ra);
    964         probe_access(env, ptr + in_page, size - in_page,
    965                      access_type, mmu_idx, ra);
    966     }
    967 }
    968 
    969 /*
    970  * This function corresponds to AArch64.vESBOperation().
    971  * Note that the AArch32 version is not functionally different.
    972  */
    973 void HELPER(vesb)(CPUARMState *env)
    974 {
    975     /*
    976      * The EL2Enabled() check is done inside arm_hcr_el2_eff,
    977      * and will return HCR_EL2.VSE == 0, so nothing happens.
    978      */
    979     uint64_t hcr = arm_hcr_el2_eff(env);
    980     bool enabled = !(hcr & HCR_TGE) && (hcr & HCR_AMO);
    981     bool pending = enabled && (hcr & HCR_VSE);
    982     bool masked  = (env->daif & PSTATE_A);
    983 
    984     /* If VSE pending and masked, defer the exception.  */
    985     if (pending && masked) {
    986         uint32_t syndrome;
    987 
    988         if (arm_el_is_aa64(env, 1)) {
    989             /* Copy across IDS and ISS from VSESR. */
    990             syndrome = env->cp15.vsesr_el2 & 0x1ffffff;
    991         } else {
    992             ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal };
    993 
    994             if (extended_addresses_enabled(env)) {
    995                 syndrome = arm_fi_to_lfsc(&fi);
    996             } else {
    997                 syndrome = arm_fi_to_sfsc(&fi);
    998             }
    999             /* Copy across AET and ExT from VSESR. */
   1000             syndrome |= env->cp15.vsesr_el2 & 0xd000;
   1001         }
   1002 
   1003         /* Set VDISR_EL2.A along with the syndrome. */
   1004         env->cp15.vdisr_el2 = syndrome | (1u << 31);
   1005 
   1006         /* Clear pending virtual SError */
   1007         env->cp15.hcr_el2 &= ~HCR_VSE;
   1008         cpu_reset_interrupt(env_cpu(env), CPU_INTERRUPT_VSERR);
   1009     }
   1010 }