qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

helper.c (424586B)


      1 /*
      2  * ARM generic helpers.
      3  *
      4  * This code is licensed under the GNU GPL v2 or later.
      5  *
      6  * SPDX-License-Identifier: GPL-2.0-or-later
      7  */
      8 
      9 #include "qemu/osdep.h"
     10 #include "qemu/units.h"
     11 #include "qemu/log.h"
     12 #include "trace.h"
     13 #include "cpu.h"
     14 #include "internals.h"
     15 #include "exec/helper-proto.h"
     16 #include "qemu/host-utils.h"
     17 #include "qemu/main-loop.h"
     18 #include "qemu/timer.h"
     19 #include "qemu/bitops.h"
     20 #include "qemu/crc32c.h"
     21 #include "qemu/qemu-print.h"
     22 #include "exec/exec-all.h"
     23 #include <zlib.h> /* For crc32 */
     24 #include "hw/irq.h"
     25 #include "semihosting/semihost.h"
     26 #include "sysemu/cpus.h"
     27 #include "sysemu/cpu-timers.h"
     28 #include "sysemu/kvm.h"
     29 #include "qemu/range.h"
     30 #include "qapi/qapi-commands-machine-target.h"
     31 #include "qapi/error.h"
     32 #include "qemu/guest-random.h"
     33 #ifdef CONFIG_TCG
     34 #include "arm_ldst.h"
     35 #include "exec/cpu_ldst.h"
     36 #include "semihosting/common-semi.h"
     37 #endif
     38 #include "cpregs.h"
     39 
     40 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
     41 
     42 static void switch_mode(CPUARMState *env, int mode);
     43 
     44 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
     45 {
     46     assert(ri->fieldoffset);
     47     if (cpreg_field_is_64bit(ri)) {
     48         return CPREG_FIELD64(env, ri);
     49     } else {
     50         return CPREG_FIELD32(env, ri);
     51     }
     52 }
     53 
     54 void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
     55 {
     56     assert(ri->fieldoffset);
     57     if (cpreg_field_is_64bit(ri)) {
     58         CPREG_FIELD64(env, ri) = value;
     59     } else {
     60         CPREG_FIELD32(env, ri) = value;
     61     }
     62 }
     63 
     64 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
     65 {
     66     return (char *)env + ri->fieldoffset;
     67 }
     68 
     69 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
     70 {
     71     /* Raw read of a coprocessor register (as needed for migration, etc). */
     72     if (ri->type & ARM_CP_CONST) {
     73         return ri->resetvalue;
     74     } else if (ri->raw_readfn) {
     75         return ri->raw_readfn(env, ri);
     76     } else if (ri->readfn) {
     77         return ri->readfn(env, ri);
     78     } else {
     79         return raw_read(env, ri);
     80     }
     81 }
     82 
     83 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
     84                              uint64_t v)
     85 {
     86     /* Raw write of a coprocessor register (as needed for migration, etc).
     87      * Note that constant registers are treated as write-ignored; the
     88      * caller should check for success by whether a readback gives the
     89      * value written.
     90      */
     91     if (ri->type & ARM_CP_CONST) {
     92         return;
     93     } else if (ri->raw_writefn) {
     94         ri->raw_writefn(env, ri, v);
     95     } else if (ri->writefn) {
     96         ri->writefn(env, ri, v);
     97     } else {
     98         raw_write(env, ri, v);
     99     }
    100 }
    101 
    102 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
    103 {
    104    /* Return true if the regdef would cause an assertion if you called
    105     * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
    106     * program bug for it not to have the NO_RAW flag).
    107     * NB that returning false here doesn't necessarily mean that calling
    108     * read/write_raw_cp_reg() is safe, because we can't distinguish "has
    109     * read/write access functions which are safe for raw use" from "has
    110     * read/write access functions which have side effects but has forgotten
    111     * to provide raw access functions".
    112     * The tests here line up with the conditions in read/write_raw_cp_reg()
    113     * and assertions in raw_read()/raw_write().
    114     */
    115     if ((ri->type & ARM_CP_CONST) ||
    116         ri->fieldoffset ||
    117         ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
    118         return false;
    119     }
    120     return true;
    121 }
    122 
    123 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
    124 {
    125     /* Write the coprocessor state from cpu->env to the (index,value) list. */
    126     int i;
    127     bool ok = true;
    128 
    129     for (i = 0; i < cpu->cpreg_array_len; i++) {
    130         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
    131         const ARMCPRegInfo *ri;
    132         uint64_t newval;
    133 
    134         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
    135         if (!ri) {
    136             ok = false;
    137             continue;
    138         }
    139         if (ri->type & ARM_CP_NO_RAW) {
    140             continue;
    141         }
    142 
    143         newval = read_raw_cp_reg(&cpu->env, ri);
    144         if (kvm_sync) {
    145             /*
    146              * Only sync if the previous list->cpustate sync succeeded.
    147              * Rather than tracking the success/failure state for every
    148              * item in the list, we just recheck "does the raw write we must
    149              * have made in write_list_to_cpustate() read back OK" here.
    150              */
    151             uint64_t oldval = cpu->cpreg_values[i];
    152 
    153             if (oldval == newval) {
    154                 continue;
    155             }
    156 
    157             write_raw_cp_reg(&cpu->env, ri, oldval);
    158             if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
    159                 continue;
    160             }
    161 
    162             write_raw_cp_reg(&cpu->env, ri, newval);
    163         }
    164         cpu->cpreg_values[i] = newval;
    165     }
    166     return ok;
    167 }
    168 
    169 bool write_list_to_cpustate(ARMCPU *cpu)
    170 {
    171     int i;
    172     bool ok = true;
    173 
    174     for (i = 0; i < cpu->cpreg_array_len; i++) {
    175         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
    176         uint64_t v = cpu->cpreg_values[i];
    177         const ARMCPRegInfo *ri;
    178 
    179         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
    180         if (!ri) {
    181             ok = false;
    182             continue;
    183         }
    184         if (ri->type & ARM_CP_NO_RAW) {
    185             continue;
    186         }
    187         /* Write value and confirm it reads back as written
    188          * (to catch read-only registers and partially read-only
    189          * registers where the incoming migration value doesn't match)
    190          */
    191         write_raw_cp_reg(&cpu->env, ri, v);
    192         if (read_raw_cp_reg(&cpu->env, ri) != v) {
    193             ok = false;
    194         }
    195     }
    196     return ok;
    197 }
    198 
    199 static void add_cpreg_to_list(gpointer key, gpointer opaque)
    200 {
    201     ARMCPU *cpu = opaque;
    202     uint32_t regidx = (uintptr_t)key;
    203     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
    204 
    205     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
    206         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
    207         /* The value array need not be initialized at this point */
    208         cpu->cpreg_array_len++;
    209     }
    210 }
    211 
    212 static void count_cpreg(gpointer key, gpointer opaque)
    213 {
    214     ARMCPU *cpu = opaque;
    215     const ARMCPRegInfo *ri;
    216 
    217     ri = g_hash_table_lookup(cpu->cp_regs, key);
    218 
    219     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
    220         cpu->cpreg_array_len++;
    221     }
    222 }
    223 
    224 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
    225 {
    226     uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
    227     uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
    228 
    229     if (aidx > bidx) {
    230         return 1;
    231     }
    232     if (aidx < bidx) {
    233         return -1;
    234     }
    235     return 0;
    236 }
    237 
    238 void init_cpreg_list(ARMCPU *cpu)
    239 {
    240     /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
    241      * Note that we require cpreg_tuples[] to be sorted by key ID.
    242      */
    243     GList *keys;
    244     int arraylen;
    245 
    246     keys = g_hash_table_get_keys(cpu->cp_regs);
    247     keys = g_list_sort(keys, cpreg_key_compare);
    248 
    249     cpu->cpreg_array_len = 0;
    250 
    251     g_list_foreach(keys, count_cpreg, cpu);
    252 
    253     arraylen = cpu->cpreg_array_len;
    254     cpu->cpreg_indexes = g_new(uint64_t, arraylen);
    255     cpu->cpreg_values = g_new(uint64_t, arraylen);
    256     cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
    257     cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
    258     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
    259     cpu->cpreg_array_len = 0;
    260 
    261     g_list_foreach(keys, add_cpreg_to_list, cpu);
    262 
    263     assert(cpu->cpreg_array_len == arraylen);
    264 
    265     g_list_free(keys);
    266 }
    267 
    268 /*
    269  * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
    270  */
    271 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
    272                                         const ARMCPRegInfo *ri,
    273                                         bool isread)
    274 {
    275     if (!is_a64(env) && arm_current_el(env) == 3 &&
    276         arm_is_secure_below_el3(env)) {
    277         return CP_ACCESS_TRAP_UNCATEGORIZED;
    278     }
    279     return CP_ACCESS_OK;
    280 }
    281 
    282 /* Some secure-only AArch32 registers trap to EL3 if used from
    283  * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
    284  * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
    285  * We assume that the .access field is set to PL1_RW.
    286  */
    287 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
    288                                             const ARMCPRegInfo *ri,
    289                                             bool isread)
    290 {
    291     if (arm_current_el(env) == 3) {
    292         return CP_ACCESS_OK;
    293     }
    294     if (arm_is_secure_below_el3(env)) {
    295         if (env->cp15.scr_el3 & SCR_EEL2) {
    296             return CP_ACCESS_TRAP_EL2;
    297         }
    298         return CP_ACCESS_TRAP_EL3;
    299     }
    300     /* This will be EL1 NS and EL2 NS, which just UNDEF */
    301     return CP_ACCESS_TRAP_UNCATEGORIZED;
    302 }
    303 
    304 /* Check for traps to performance monitor registers, which are controlled
    305  * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
    306  */
    307 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
    308                                  bool isread)
    309 {
    310     int el = arm_current_el(env);
    311     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
    312 
    313     if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
    314         return CP_ACCESS_TRAP_EL2;
    315     }
    316     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
    317         return CP_ACCESS_TRAP_EL3;
    318     }
    319     return CP_ACCESS_OK;
    320 }
    321 
    322 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM.  */
    323 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
    324                                       bool isread)
    325 {
    326     if (arm_current_el(env) == 1) {
    327         uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
    328         if (arm_hcr_el2_eff(env) & trap) {
    329             return CP_ACCESS_TRAP_EL2;
    330         }
    331     }
    332     return CP_ACCESS_OK;
    333 }
    334 
    335 /* Check for traps from EL1 due to HCR_EL2.TSW.  */
    336 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
    337                                  bool isread)
    338 {
    339     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
    340         return CP_ACCESS_TRAP_EL2;
    341     }
    342     return CP_ACCESS_OK;
    343 }
    344 
    345 /* Check for traps from EL1 due to HCR_EL2.TACR.  */
    346 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
    347                                   bool isread)
    348 {
    349     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
    350         return CP_ACCESS_TRAP_EL2;
    351     }
    352     return CP_ACCESS_OK;
    353 }
    354 
    355 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
    356 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
    357                                   bool isread)
    358 {
    359     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
    360         return CP_ACCESS_TRAP_EL2;
    361     }
    362     return CP_ACCESS_OK;
    363 }
    364 
    365 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
    366 {
    367     ARMCPU *cpu = env_archcpu(env);
    368 
    369     raw_write(env, ri, value);
    370     tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
    371 }
    372 
    373 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
    374 {
    375     ARMCPU *cpu = env_archcpu(env);
    376 
    377     if (raw_read(env, ri) != value) {
    378         /* Unlike real hardware the qemu TLB uses virtual addresses,
    379          * not modified virtual addresses, so this causes a TLB flush.
    380          */
    381         tlb_flush(CPU(cpu));
    382         raw_write(env, ri, value);
    383     }
    384 }
    385 
    386 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
    387                              uint64_t value)
    388 {
    389     ARMCPU *cpu = env_archcpu(env);
    390 
    391     if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
    392         && !extended_addresses_enabled(env)) {
    393         /* For VMSA (when not using the LPAE long descriptor page table
    394          * format) this register includes the ASID, so do a TLB flush.
    395          * For PMSA it is purely a process ID and no action is needed.
    396          */
    397         tlb_flush(CPU(cpu));
    398     }
    399     raw_write(env, ri, value);
    400 }
    401 
    402 static int alle1_tlbmask(CPUARMState *env)
    403 {
    404     /*
    405      * Note that the 'ALL' scope must invalidate both stage 1 and
    406      * stage 2 translations, whereas most other scopes only invalidate
    407      * stage 1 translations.
    408      */
    409     return (ARMMMUIdxBit_E10_1 |
    410             ARMMMUIdxBit_E10_1_PAN |
    411             ARMMMUIdxBit_E10_0 |
    412             ARMMMUIdxBit_Stage2 |
    413             ARMMMUIdxBit_Stage2_S);
    414 }
    415 
    416 
    417 /* IS variants of TLB operations must affect all cores */
    418 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    419                              uint64_t value)
    420 {
    421     CPUState *cs = env_cpu(env);
    422 
    423     tlb_flush_all_cpus_synced(cs);
    424 }
    425 
    426 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    427                              uint64_t value)
    428 {
    429     CPUState *cs = env_cpu(env);
    430 
    431     tlb_flush_all_cpus_synced(cs);
    432 }
    433 
    434 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    435                              uint64_t value)
    436 {
    437     CPUState *cs = env_cpu(env);
    438 
    439     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
    440 }
    441 
    442 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    443                              uint64_t value)
    444 {
    445     CPUState *cs = env_cpu(env);
    446 
    447     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
    448 }
    449 
    450 /*
    451  * Non-IS variants of TLB operations are upgraded to
    452  * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
    453  * force broadcast of these operations.
    454  */
    455 static bool tlb_force_broadcast(CPUARMState *env)
    456 {
    457     return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
    458 }
    459 
    460 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
    461                           uint64_t value)
    462 {
    463     /* Invalidate all (TLBIALL) */
    464     CPUState *cs = env_cpu(env);
    465 
    466     if (tlb_force_broadcast(env)) {
    467         tlb_flush_all_cpus_synced(cs);
    468     } else {
    469         tlb_flush(cs);
    470     }
    471 }
    472 
    473 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
    474                           uint64_t value)
    475 {
    476     /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
    477     CPUState *cs = env_cpu(env);
    478 
    479     value &= TARGET_PAGE_MASK;
    480     if (tlb_force_broadcast(env)) {
    481         tlb_flush_page_all_cpus_synced(cs, value);
    482     } else {
    483         tlb_flush_page(cs, value);
    484     }
    485 }
    486 
    487 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
    488                            uint64_t value)
    489 {
    490     /* Invalidate by ASID (TLBIASID) */
    491     CPUState *cs = env_cpu(env);
    492 
    493     if (tlb_force_broadcast(env)) {
    494         tlb_flush_all_cpus_synced(cs);
    495     } else {
    496         tlb_flush(cs);
    497     }
    498 }
    499 
    500 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
    501                            uint64_t value)
    502 {
    503     /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
    504     CPUState *cs = env_cpu(env);
    505 
    506     value &= TARGET_PAGE_MASK;
    507     if (tlb_force_broadcast(env)) {
    508         tlb_flush_page_all_cpus_synced(cs, value);
    509     } else {
    510         tlb_flush_page(cs, value);
    511     }
    512 }
    513 
    514 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
    515                                uint64_t value)
    516 {
    517     CPUState *cs = env_cpu(env);
    518 
    519     tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
    520 }
    521 
    522 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    523                                   uint64_t value)
    524 {
    525     CPUState *cs = env_cpu(env);
    526 
    527     tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env));
    528 }
    529 
    530 
    531 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
    532                               uint64_t value)
    533 {
    534     CPUState *cs = env_cpu(env);
    535 
    536     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
    537 }
    538 
    539 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    540                                  uint64_t value)
    541 {
    542     CPUState *cs = env_cpu(env);
    543 
    544     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
    545 }
    546 
    547 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
    548                               uint64_t value)
    549 {
    550     CPUState *cs = env_cpu(env);
    551     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
    552 
    553     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
    554 }
    555 
    556 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
    557                                  uint64_t value)
    558 {
    559     CPUState *cs = env_cpu(env);
    560     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
    561 
    562     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
    563                                              ARMMMUIdxBit_E2);
    564 }
    565 
    566 static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
    567                                 uint64_t value)
    568 {
    569     CPUState *cs = env_cpu(env);
    570     uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
    571 
    572     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
    573 }
    574 
    575 static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
    576                                 uint64_t value)
    577 {
    578     CPUState *cs = env_cpu(env);
    579     uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
    580 
    581     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2);
    582 }
    583 
    584 static const ARMCPRegInfo cp_reginfo[] = {
    585     /* Define the secure and non-secure FCSE identifier CP registers
    586      * separately because there is no secure bank in V8 (no _EL3).  This allows
    587      * the secure register to be properly reset and migrated. There is also no
    588      * v8 EL1 version of the register so the non-secure instance stands alone.
    589      */
    590     { .name = "FCSEIDR",
    591       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
    592       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
    593       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
    594       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
    595     { .name = "FCSEIDR_S",
    596       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
    597       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
    598       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
    599       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
    600     /* Define the secure and non-secure context identifier CP registers
    601      * separately because there is no secure bank in V8 (no _EL3).  This allows
    602      * the secure register to be properly reset and migrated.  In the
    603      * non-secure case, the 32-bit register will have reset and migration
    604      * disabled during registration as it is handled by the 64-bit instance.
    605      */
    606     { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
    607       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
    608       .access = PL1_RW, .accessfn = access_tvm_trvm,
    609       .secure = ARM_CP_SECSTATE_NS,
    610       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
    611       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
    612     { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
    613       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
    614       .access = PL1_RW, .accessfn = access_tvm_trvm,
    615       .secure = ARM_CP_SECSTATE_S,
    616       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
    617       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
    618 };
    619 
    620 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
    621     /* NB: Some of these registers exist in v8 but with more precise
    622      * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
    623      */
    624     /* MMU Domain access control / MPU write buffer control */
    625     { .name = "DACR",
    626       .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
    627       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
    628       .writefn = dacr_write, .raw_writefn = raw_write,
    629       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
    630                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
    631     /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
    632      * For v6 and v5, these mappings are overly broad.
    633      */
    634     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
    635       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
    636     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
    637       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
    638     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
    639       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
    640     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
    641       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
    642     /* Cache maintenance ops; some of this space may be overridden later. */
    643     { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
    644       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
    645       .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
    646 };
    647 
    648 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
    649     /* Not all pre-v6 cores implemented this WFI, so this is slightly
    650      * over-broad.
    651      */
    652     { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
    653       .access = PL1_W, .type = ARM_CP_WFI },
    654 };
    655 
    656 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
    657     /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
    658      * is UNPREDICTABLE; we choose to NOP as most implementations do).
    659      */
    660     { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
    661       .access = PL1_W, .type = ARM_CP_WFI },
    662     /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
    663      * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
    664      * OMAPCP will override this space.
    665      */
    666     { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
    667       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
    668       .resetvalue = 0 },
    669     { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
    670       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
    671       .resetvalue = 0 },
    672     /* v6 doesn't have the cache ID registers but Linux reads them anyway */
    673     { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
    674       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
    675       .resetvalue = 0 },
    676     /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
    677      * implementing it as RAZ means the "debug architecture version" bits
    678      * will read as a reserved value, which should cause Linux to not try
    679      * to use the debug hardware.
    680      */
    681     { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
    682       .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
    683     /* MMU TLB control. Note that the wildcarding means we cover not just
    684      * the unified TLB ops but also the dside/iside/inner-shareable variants.
    685      */
    686     { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
    687       .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
    688       .type = ARM_CP_NO_RAW },
    689     { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
    690       .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
    691       .type = ARM_CP_NO_RAW },
    692     { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
    693       .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
    694       .type = ARM_CP_NO_RAW },
    695     { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
    696       .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
    697       .type = ARM_CP_NO_RAW },
    698     { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
    699       .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
    700     { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
    701       .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
    702 };
    703 
    704 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
    705                         uint64_t value)
    706 {
    707     uint32_t mask = 0;
    708 
    709     /* In ARMv8 most bits of CPACR_EL1 are RES0. */
    710     if (!arm_feature(env, ARM_FEATURE_V8)) {
    711         /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
    712          * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
    713          * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
    714          */
    715         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
    716             /* VFP coprocessor: cp10 & cp11 [23:20] */
    717             mask |= R_CPACR_ASEDIS_MASK |
    718                     R_CPACR_D32DIS_MASK |
    719                     R_CPACR_CP11_MASK |
    720                     R_CPACR_CP10_MASK;
    721 
    722             if (!arm_feature(env, ARM_FEATURE_NEON)) {
    723                 /* ASEDIS [31] bit is RAO/WI */
    724                 value |= R_CPACR_ASEDIS_MASK;
    725             }
    726 
    727             /* VFPv3 and upwards with NEON implement 32 double precision
    728              * registers (D0-D31).
    729              */
    730             if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
    731                 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
    732                 value |= R_CPACR_D32DIS_MASK;
    733             }
    734         }
    735         value &= mask;
    736     }
    737 
    738     /*
    739      * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
    740      * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
    741      */
    742     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
    743         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
    744         mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
    745         value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
    746     }
    747 
    748     env->cp15.cpacr_el1 = value;
    749 }
    750 
    751 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
    752 {
    753     /*
    754      * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
    755      * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
    756      */
    757     uint64_t value = env->cp15.cpacr_el1;
    758 
    759     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
    760         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
    761         value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
    762     }
    763     return value;
    764 }
    765 
    766 
    767 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
    768 {
    769     /* Call cpacr_write() so that we reset with the correct RAO bits set
    770      * for our CPU features.
    771      */
    772     cpacr_write(env, ri, 0);
    773 }
    774 
    775 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
    776                                    bool isread)
    777 {
    778     if (arm_feature(env, ARM_FEATURE_V8)) {
    779         /* Check if CPACR accesses are to be trapped to EL2 */
    780         if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
    781             FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
    782             return CP_ACCESS_TRAP_EL2;
    783         /* Check if CPACR accesses are to be trapped to EL3 */
    784         } else if (arm_current_el(env) < 3 &&
    785                    FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
    786             return CP_ACCESS_TRAP_EL3;
    787         }
    788     }
    789 
    790     return CP_ACCESS_OK;
    791 }
    792 
    793 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
    794                                   bool isread)
    795 {
    796     /* Check if CPTR accesses are set to trap to EL3 */
    797     if (arm_current_el(env) == 2 &&
    798         FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
    799         return CP_ACCESS_TRAP_EL3;
    800     }
    801 
    802     return CP_ACCESS_OK;
    803 }
    804 
    805 static const ARMCPRegInfo v6_cp_reginfo[] = {
    806     /* prefetch by MVA in v6, NOP in v7 */
    807     { .name = "MVA_prefetch",
    808       .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
    809       .access = PL1_W, .type = ARM_CP_NOP },
    810     /* We need to break the TB after ISB to execute self-modifying code
    811      * correctly and also to take any pending interrupts immediately.
    812      * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
    813      */
    814     { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
    815       .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
    816     { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
    817       .access = PL0_W, .type = ARM_CP_NOP },
    818     { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
    819       .access = PL0_W, .type = ARM_CP_NOP },
    820     { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
    821       .access = PL1_RW, .accessfn = access_tvm_trvm,
    822       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
    823                              offsetof(CPUARMState, cp15.ifar_ns) },
    824       .resetvalue = 0, },
    825     /* Watchpoint Fault Address Register : should actually only be present
    826      * for 1136, 1176, 11MPCore.
    827      */
    828     { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
    829       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
    830     { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
    831       .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
    832       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
    833       .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
    834 };
    835 
    836 typedef struct pm_event {
    837     uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
    838     /* If the event is supported on this CPU (used to generate PMCEID[01]) */
    839     bool (*supported)(CPUARMState *);
    840     /*
    841      * Retrieve the current count of the underlying event. The programmed
    842      * counters hold a difference from the return value from this function
    843      */
    844     uint64_t (*get_count)(CPUARMState *);
    845     /*
    846      * Return how many nanoseconds it will take (at a minimum) for count events
    847      * to occur. A negative value indicates the counter will never overflow, or
    848      * that the counter has otherwise arranged for the overflow bit to be set
    849      * and the PMU interrupt to be raised on overflow.
    850      */
    851     int64_t (*ns_per_count)(uint64_t);
    852 } pm_event;
    853 
    854 static bool event_always_supported(CPUARMState *env)
    855 {
    856     return true;
    857 }
    858 
    859 static uint64_t swinc_get_count(CPUARMState *env)
    860 {
    861     /*
    862      * SW_INCR events are written directly to the pmevcntr's by writes to
    863      * PMSWINC, so there is no underlying count maintained by the PMU itself
    864      */
    865     return 0;
    866 }
    867 
    868 static int64_t swinc_ns_per(uint64_t ignored)
    869 {
    870     return -1;
    871 }
    872 
    873 /*
    874  * Return the underlying cycle count for the PMU cycle counters. If we're in
    875  * usermode, simply return 0.
    876  */
    877 static uint64_t cycles_get_count(CPUARMState *env)
    878 {
    879 #ifndef CONFIG_USER_ONLY
    880     return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
    881                    ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
    882 #else
    883     return cpu_get_host_ticks();
    884 #endif
    885 }
    886 
    887 #ifndef CONFIG_USER_ONLY
    888 static int64_t cycles_ns_per(uint64_t cycles)
    889 {
    890     return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
    891 }
    892 
    893 static bool instructions_supported(CPUARMState *env)
    894 {
    895     return icount_enabled() == 1; /* Precise instruction counting */
    896 }
    897 
    898 static uint64_t instructions_get_count(CPUARMState *env)
    899 {
    900     return (uint64_t)icount_get_raw();
    901 }
    902 
    903 static int64_t instructions_ns_per(uint64_t icount)
    904 {
    905     return icount_to_ns((int64_t)icount);
    906 }
    907 #endif
    908 
    909 static bool pmuv3p1_events_supported(CPUARMState *env)
    910 {
    911     /* For events which are supported in any v8.1 PMU */
    912     return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
    913 }
    914 
    915 static bool pmuv3p4_events_supported(CPUARMState *env)
    916 {
    917     /* For events which are supported in any v8.1 PMU */
    918     return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
    919 }
    920 
    921 static uint64_t zero_event_get_count(CPUARMState *env)
    922 {
    923     /* For events which on QEMU never fire, so their count is always zero */
    924     return 0;
    925 }
    926 
    927 static int64_t zero_event_ns_per(uint64_t cycles)
    928 {
    929     /* An event which never fires can never overflow */
    930     return -1;
    931 }
    932 
    933 static const pm_event pm_events[] = {
    934     { .number = 0x000, /* SW_INCR */
    935       .supported = event_always_supported,
    936       .get_count = swinc_get_count,
    937       .ns_per_count = swinc_ns_per,
    938     },
    939 #ifndef CONFIG_USER_ONLY
    940     { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
    941       .supported = instructions_supported,
    942       .get_count = instructions_get_count,
    943       .ns_per_count = instructions_ns_per,
    944     },
    945     { .number = 0x011, /* CPU_CYCLES, Cycle */
    946       .supported = event_always_supported,
    947       .get_count = cycles_get_count,
    948       .ns_per_count = cycles_ns_per,
    949     },
    950 #endif
    951     { .number = 0x023, /* STALL_FRONTEND */
    952       .supported = pmuv3p1_events_supported,
    953       .get_count = zero_event_get_count,
    954       .ns_per_count = zero_event_ns_per,
    955     },
    956     { .number = 0x024, /* STALL_BACKEND */
    957       .supported = pmuv3p1_events_supported,
    958       .get_count = zero_event_get_count,
    959       .ns_per_count = zero_event_ns_per,
    960     },
    961     { .number = 0x03c, /* STALL */
    962       .supported = pmuv3p4_events_supported,
    963       .get_count = zero_event_get_count,
    964       .ns_per_count = zero_event_ns_per,
    965     },
    966 };
    967 
    968 /*
    969  * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
    970  * events (i.e. the statistical profiling extension), this implementation
    971  * should first be updated to something sparse instead of the current
    972  * supported_event_map[] array.
    973  */
    974 #define MAX_EVENT_ID 0x3c
    975 #define UNSUPPORTED_EVENT UINT16_MAX
    976 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
    977 
    978 /*
    979  * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
    980  * of ARM event numbers to indices in our pm_events array.
    981  *
    982  * Note: Events in the 0x40XX range are not currently supported.
    983  */
    984 void pmu_init(ARMCPU *cpu)
    985 {
    986     unsigned int i;
    987 
    988     /*
    989      * Empty supported_event_map and cpu->pmceid[01] before adding supported
    990      * events to them
    991      */
    992     for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
    993         supported_event_map[i] = UNSUPPORTED_EVENT;
    994     }
    995     cpu->pmceid0 = 0;
    996     cpu->pmceid1 = 0;
    997 
    998     for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
    999         const pm_event *cnt = &pm_events[i];
   1000         assert(cnt->number <= MAX_EVENT_ID);
   1001         /* We do not currently support events in the 0x40xx range */
   1002         assert(cnt->number <= 0x3f);
   1003 
   1004         if (cnt->supported(&cpu->env)) {
   1005             supported_event_map[cnt->number] = i;
   1006             uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
   1007             if (cnt->number & 0x20) {
   1008                 cpu->pmceid1 |= event_mask;
   1009             } else {
   1010                 cpu->pmceid0 |= event_mask;
   1011             }
   1012         }
   1013     }
   1014 }
   1015 
   1016 /*
   1017  * Check at runtime whether a PMU event is supported for the current machine
   1018  */
   1019 static bool event_supported(uint16_t number)
   1020 {
   1021     if (number > MAX_EVENT_ID) {
   1022         return false;
   1023     }
   1024     return supported_event_map[number] != UNSUPPORTED_EVENT;
   1025 }
   1026 
   1027 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
   1028                                    bool isread)
   1029 {
   1030     /* Performance monitor registers user accessibility is controlled
   1031      * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
   1032      * trapping to EL2 or EL3 for other accesses.
   1033      */
   1034     int el = arm_current_el(env);
   1035     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
   1036 
   1037     if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
   1038         return CP_ACCESS_TRAP;
   1039     }
   1040     if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
   1041         return CP_ACCESS_TRAP_EL2;
   1042     }
   1043     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
   1044         return CP_ACCESS_TRAP_EL3;
   1045     }
   1046 
   1047     return CP_ACCESS_OK;
   1048 }
   1049 
   1050 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
   1051                                            const ARMCPRegInfo *ri,
   1052                                            bool isread)
   1053 {
   1054     /* ER: event counter read trap control */
   1055     if (arm_feature(env, ARM_FEATURE_V8)
   1056         && arm_current_el(env) == 0
   1057         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
   1058         && isread) {
   1059         return CP_ACCESS_OK;
   1060     }
   1061 
   1062     return pmreg_access(env, ri, isread);
   1063 }
   1064 
   1065 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
   1066                                          const ARMCPRegInfo *ri,
   1067                                          bool isread)
   1068 {
   1069     /* SW: software increment write trap control */
   1070     if (arm_feature(env, ARM_FEATURE_V8)
   1071         && arm_current_el(env) == 0
   1072         && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
   1073         && !isread) {
   1074         return CP_ACCESS_OK;
   1075     }
   1076 
   1077     return pmreg_access(env, ri, isread);
   1078 }
   1079 
   1080 static CPAccessResult pmreg_access_selr(CPUARMState *env,
   1081                                         const ARMCPRegInfo *ri,
   1082                                         bool isread)
   1083 {
   1084     /* ER: event counter read trap control */
   1085     if (arm_feature(env, ARM_FEATURE_V8)
   1086         && arm_current_el(env) == 0
   1087         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
   1088         return CP_ACCESS_OK;
   1089     }
   1090 
   1091     return pmreg_access(env, ri, isread);
   1092 }
   1093 
   1094 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
   1095                                          const ARMCPRegInfo *ri,
   1096                                          bool isread)
   1097 {
   1098     /* CR: cycle counter read trap control */
   1099     if (arm_feature(env, ARM_FEATURE_V8)
   1100         && arm_current_el(env) == 0
   1101         && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
   1102         && isread) {
   1103         return CP_ACCESS_OK;
   1104     }
   1105 
   1106     return pmreg_access(env, ri, isread);
   1107 }
   1108 
   1109 /*
   1110  * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
   1111  * We use these to decide whether we need to wrap a write to MDCR_EL2
   1112  * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
   1113  */
   1114 #define MDCR_EL2_PMU_ENABLE_BITS \
   1115     (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
   1116 #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
   1117 
   1118 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
   1119  * the current EL, security state, and register configuration.
   1120  */
   1121 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
   1122 {
   1123     uint64_t filter;
   1124     bool e, p, u, nsk, nsu, nsh, m;
   1125     bool enabled, prohibited = false, filtered;
   1126     bool secure = arm_is_secure(env);
   1127     int el = arm_current_el(env);
   1128     uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
   1129     uint8_t hpmn = mdcr_el2 & MDCR_HPMN;
   1130 
   1131     if (!arm_feature(env, ARM_FEATURE_PMU)) {
   1132         return false;
   1133     }
   1134 
   1135     if (!arm_feature(env, ARM_FEATURE_EL2) ||
   1136             (counter < hpmn || counter == 31)) {
   1137         e = env->cp15.c9_pmcr & PMCRE;
   1138     } else {
   1139         e = mdcr_el2 & MDCR_HPME;
   1140     }
   1141     enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
   1142 
   1143     /* Is event counting prohibited? */
   1144     if (el == 2 && (counter < hpmn || counter == 31)) {
   1145         prohibited = mdcr_el2 & MDCR_HPMD;
   1146     }
   1147     if (secure) {
   1148         prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
   1149     }
   1150 
   1151     if (counter == 31) {
   1152         /*
   1153          * The cycle counter defaults to running. PMCR.DP says "disable
   1154          * the cycle counter when event counting is prohibited".
   1155          * Some MDCR bits disable the cycle counter specifically.
   1156          */
   1157         prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
   1158         if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
   1159             if (secure) {
   1160                 prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
   1161             }
   1162             if (el == 2) {
   1163                 prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
   1164             }
   1165         }
   1166     }
   1167 
   1168     if (counter == 31) {
   1169         filter = env->cp15.pmccfiltr_el0;
   1170     } else {
   1171         filter = env->cp15.c14_pmevtyper[counter];
   1172     }
   1173 
   1174     p   = filter & PMXEVTYPER_P;
   1175     u   = filter & PMXEVTYPER_U;
   1176     nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
   1177     nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
   1178     nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
   1179     m   = arm_el_is_aa64(env, 1) &&
   1180               arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
   1181 
   1182     if (el == 0) {
   1183         filtered = secure ? u : u != nsu;
   1184     } else if (el == 1) {
   1185         filtered = secure ? p : p != nsk;
   1186     } else if (el == 2) {
   1187         filtered = !nsh;
   1188     } else { /* EL3 */
   1189         filtered = m != p;
   1190     }
   1191 
   1192     if (counter != 31) {
   1193         /*
   1194          * If not checking PMCCNTR, ensure the counter is setup to an event we
   1195          * support
   1196          */
   1197         uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
   1198         if (!event_supported(event)) {
   1199             return false;
   1200         }
   1201     }
   1202 
   1203     return enabled && !prohibited && !filtered;
   1204 }
   1205 
   1206 static void pmu_update_irq(CPUARMState *env)
   1207 {
   1208     ARMCPU *cpu = env_archcpu(env);
   1209     qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
   1210             (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
   1211 }
   1212 
   1213 static bool pmccntr_clockdiv_enabled(CPUARMState *env)
   1214 {
   1215     /*
   1216      * Return true if the clock divider is enabled and the cycle counter
   1217      * is supposed to tick only once every 64 clock cycles. This is
   1218      * controlled by PMCR.D, but if PMCR.LC is set to enable the long
   1219      * (64-bit) cycle counter PMCR.D has no effect.
   1220      */
   1221     return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
   1222 }
   1223 
   1224 static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
   1225 {
   1226     /* Return true if the specified event counter is configured to be 64 bit */
   1227 
   1228     /* This isn't intended to be used with the cycle counter */
   1229     assert(counter < 31);
   1230 
   1231     if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
   1232         return false;
   1233     }
   1234 
   1235     if (arm_feature(env, ARM_FEATURE_EL2)) {
   1236         /*
   1237          * MDCR_EL2.HLP still applies even when EL2 is disabled in the
   1238          * current security state, so we don't use arm_mdcr_el2_eff() here.
   1239          */
   1240         bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
   1241         int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
   1242 
   1243         if (hpmn != 0 && counter >= hpmn) {
   1244             return hlp;
   1245         }
   1246     }
   1247     return env->cp15.c9_pmcr & PMCRLP;
   1248 }
   1249 
   1250 /*
   1251  * Ensure c15_ccnt is the guest-visible count so that operations such as
   1252  * enabling/disabling the counter or filtering, modifying the count itself,
   1253  * etc. can be done logically. This is essentially a no-op if the counter is
   1254  * not enabled at the time of the call.
   1255  */
   1256 static void pmccntr_op_start(CPUARMState *env)
   1257 {
   1258     uint64_t cycles = cycles_get_count(env);
   1259 
   1260     if (pmu_counter_enabled(env, 31)) {
   1261         uint64_t eff_cycles = cycles;
   1262         if (pmccntr_clockdiv_enabled(env)) {
   1263             eff_cycles /= 64;
   1264         }
   1265 
   1266         uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
   1267 
   1268         uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
   1269                                  1ull << 63 : 1ull << 31;
   1270         if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
   1271             env->cp15.c9_pmovsr |= (1ULL << 31);
   1272             pmu_update_irq(env);
   1273         }
   1274 
   1275         env->cp15.c15_ccnt = new_pmccntr;
   1276     }
   1277     env->cp15.c15_ccnt_delta = cycles;
   1278 }
   1279 
   1280 /*
   1281  * If PMCCNTR is enabled, recalculate the delta between the clock and the
   1282  * guest-visible count. A call to pmccntr_op_finish should follow every call to
   1283  * pmccntr_op_start.
   1284  */
   1285 static void pmccntr_op_finish(CPUARMState *env)
   1286 {
   1287     if (pmu_counter_enabled(env, 31)) {
   1288 #ifndef CONFIG_USER_ONLY
   1289         /* Calculate when the counter will next overflow */
   1290         uint64_t remaining_cycles = -env->cp15.c15_ccnt;
   1291         if (!(env->cp15.c9_pmcr & PMCRLC)) {
   1292             remaining_cycles = (uint32_t)remaining_cycles;
   1293         }
   1294         int64_t overflow_in = cycles_ns_per(remaining_cycles);
   1295 
   1296         if (overflow_in > 0) {
   1297             int64_t overflow_at;
   1298 
   1299             if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
   1300                                  overflow_in, &overflow_at)) {
   1301                 ARMCPU *cpu = env_archcpu(env);
   1302                 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
   1303             }
   1304         }
   1305 #endif
   1306 
   1307         uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
   1308         if (pmccntr_clockdiv_enabled(env)) {
   1309             prev_cycles /= 64;
   1310         }
   1311         env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
   1312     }
   1313 }
   1314 
   1315 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
   1316 {
   1317 
   1318     uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
   1319     uint64_t count = 0;
   1320     if (event_supported(event)) {
   1321         uint16_t event_idx = supported_event_map[event];
   1322         count = pm_events[event_idx].get_count(env);
   1323     }
   1324 
   1325     if (pmu_counter_enabled(env, counter)) {
   1326         uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
   1327         uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
   1328             1ULL << 63 : 1ULL << 31;
   1329 
   1330         if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
   1331             env->cp15.c9_pmovsr |= (1 << counter);
   1332             pmu_update_irq(env);
   1333         }
   1334         env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
   1335     }
   1336     env->cp15.c14_pmevcntr_delta[counter] = count;
   1337 }
   1338 
   1339 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
   1340 {
   1341     if (pmu_counter_enabled(env, counter)) {
   1342 #ifndef CONFIG_USER_ONLY
   1343         uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
   1344         uint16_t event_idx = supported_event_map[event];
   1345         uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
   1346         int64_t overflow_in;
   1347 
   1348         if (!pmevcntr_is_64_bit(env, counter)) {
   1349             delta = (uint32_t)delta;
   1350         }
   1351         overflow_in = pm_events[event_idx].ns_per_count(delta);
   1352 
   1353         if (overflow_in > 0) {
   1354             int64_t overflow_at;
   1355 
   1356             if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
   1357                                  overflow_in, &overflow_at)) {
   1358                 ARMCPU *cpu = env_archcpu(env);
   1359                 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
   1360             }
   1361         }
   1362 #endif
   1363 
   1364         env->cp15.c14_pmevcntr_delta[counter] -=
   1365             env->cp15.c14_pmevcntr[counter];
   1366     }
   1367 }
   1368 
   1369 void pmu_op_start(CPUARMState *env)
   1370 {
   1371     unsigned int i;
   1372     pmccntr_op_start(env);
   1373     for (i = 0; i < pmu_num_counters(env); i++) {
   1374         pmevcntr_op_start(env, i);
   1375     }
   1376 }
   1377 
   1378 void pmu_op_finish(CPUARMState *env)
   1379 {
   1380     unsigned int i;
   1381     pmccntr_op_finish(env);
   1382     for (i = 0; i < pmu_num_counters(env); i++) {
   1383         pmevcntr_op_finish(env, i);
   1384     }
   1385 }
   1386 
   1387 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
   1388 {
   1389     pmu_op_start(&cpu->env);
   1390 }
   1391 
   1392 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
   1393 {
   1394     pmu_op_finish(&cpu->env);
   1395 }
   1396 
   1397 void arm_pmu_timer_cb(void *opaque)
   1398 {
   1399     ARMCPU *cpu = opaque;
   1400 
   1401     /*
   1402      * Update all the counter values based on the current underlying counts,
   1403      * triggering interrupts to be raised, if necessary. pmu_op_finish() also
   1404      * has the effect of setting the cpu->pmu_timer to the next earliest time a
   1405      * counter may expire.
   1406      */
   1407     pmu_op_start(&cpu->env);
   1408     pmu_op_finish(&cpu->env);
   1409 }
   1410 
   1411 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1412                        uint64_t value)
   1413 {
   1414     pmu_op_start(env);
   1415 
   1416     if (value & PMCRC) {
   1417         /* The counter has been reset */
   1418         env->cp15.c15_ccnt = 0;
   1419     }
   1420 
   1421     if (value & PMCRP) {
   1422         unsigned int i;
   1423         for (i = 0; i < pmu_num_counters(env); i++) {
   1424             env->cp15.c14_pmevcntr[i] = 0;
   1425         }
   1426     }
   1427 
   1428     env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
   1429     env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
   1430 
   1431     pmu_op_finish(env);
   1432 }
   1433 
   1434 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1435                           uint64_t value)
   1436 {
   1437     unsigned int i;
   1438     uint64_t overflow_mask, new_pmswinc;
   1439 
   1440     for (i = 0; i < pmu_num_counters(env); i++) {
   1441         /* Increment a counter's count iff: */
   1442         if ((value & (1 << i)) && /* counter's bit is set */
   1443                 /* counter is enabled and not filtered */
   1444                 pmu_counter_enabled(env, i) &&
   1445                 /* counter is SW_INCR */
   1446                 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
   1447             pmevcntr_op_start(env, i);
   1448 
   1449             /*
   1450              * Detect if this write causes an overflow since we can't predict
   1451              * PMSWINC overflows like we can for other events
   1452              */
   1453             new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
   1454 
   1455             overflow_mask = pmevcntr_is_64_bit(env, i) ?
   1456                 1ULL << 63 : 1ULL << 31;
   1457 
   1458             if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
   1459                 env->cp15.c9_pmovsr |= (1 << i);
   1460                 pmu_update_irq(env);
   1461             }
   1462 
   1463             env->cp15.c14_pmevcntr[i] = new_pmswinc;
   1464 
   1465             pmevcntr_op_finish(env, i);
   1466         }
   1467     }
   1468 }
   1469 
   1470 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1471 {
   1472     uint64_t ret;
   1473     pmccntr_op_start(env);
   1474     ret = env->cp15.c15_ccnt;
   1475     pmccntr_op_finish(env);
   1476     return ret;
   1477 }
   1478 
   1479 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1480                          uint64_t value)
   1481 {
   1482     /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
   1483      * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
   1484      * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
   1485      * accessed.
   1486      */
   1487     env->cp15.c9_pmselr = value & 0x1f;
   1488 }
   1489 
   1490 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1491                         uint64_t value)
   1492 {
   1493     pmccntr_op_start(env);
   1494     env->cp15.c15_ccnt = value;
   1495     pmccntr_op_finish(env);
   1496 }
   1497 
   1498 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
   1499                             uint64_t value)
   1500 {
   1501     uint64_t cur_val = pmccntr_read(env, NULL);
   1502 
   1503     pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
   1504 }
   1505 
   1506 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1507                             uint64_t value)
   1508 {
   1509     pmccntr_op_start(env);
   1510     env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
   1511     pmccntr_op_finish(env);
   1512 }
   1513 
   1514 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
   1515                             uint64_t value)
   1516 {
   1517     pmccntr_op_start(env);
   1518     /* M is not accessible from AArch32 */
   1519     env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
   1520         (value & PMCCFILTR);
   1521     pmccntr_op_finish(env);
   1522 }
   1523 
   1524 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
   1525 {
   1526     /* M is not visible in AArch32 */
   1527     return env->cp15.pmccfiltr_el0 & PMCCFILTR;
   1528 }
   1529 
   1530 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1531                             uint64_t value)
   1532 {
   1533     pmu_op_start(env);
   1534     value &= pmu_counter_mask(env);
   1535     env->cp15.c9_pmcnten |= value;
   1536     pmu_op_finish(env);
   1537 }
   1538 
   1539 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1540                              uint64_t value)
   1541 {
   1542     pmu_op_start(env);
   1543     value &= pmu_counter_mask(env);
   1544     env->cp15.c9_pmcnten &= ~value;
   1545     pmu_op_finish(env);
   1546 }
   1547 
   1548 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1549                          uint64_t value)
   1550 {
   1551     value &= pmu_counter_mask(env);
   1552     env->cp15.c9_pmovsr &= ~value;
   1553     pmu_update_irq(env);
   1554 }
   1555 
   1556 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1557                          uint64_t value)
   1558 {
   1559     value &= pmu_counter_mask(env);
   1560     env->cp15.c9_pmovsr |= value;
   1561     pmu_update_irq(env);
   1562 }
   1563 
   1564 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1565                              uint64_t value, const uint8_t counter)
   1566 {
   1567     if (counter == 31) {
   1568         pmccfiltr_write(env, ri, value);
   1569     } else if (counter < pmu_num_counters(env)) {
   1570         pmevcntr_op_start(env, counter);
   1571 
   1572         /*
   1573          * If this counter's event type is changing, store the current
   1574          * underlying count for the new type in c14_pmevcntr_delta[counter] so
   1575          * pmevcntr_op_finish has the correct baseline when it converts back to
   1576          * a delta.
   1577          */
   1578         uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
   1579             PMXEVTYPER_EVTCOUNT;
   1580         uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
   1581         if (old_event != new_event) {
   1582             uint64_t count = 0;
   1583             if (event_supported(new_event)) {
   1584                 uint16_t event_idx = supported_event_map[new_event];
   1585                 count = pm_events[event_idx].get_count(env);
   1586             }
   1587             env->cp15.c14_pmevcntr_delta[counter] = count;
   1588         }
   1589 
   1590         env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
   1591         pmevcntr_op_finish(env, counter);
   1592     }
   1593     /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
   1594      * PMSELR value is equal to or greater than the number of implemented
   1595      * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
   1596      */
   1597 }
   1598 
   1599 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
   1600                                const uint8_t counter)
   1601 {
   1602     if (counter == 31) {
   1603         return env->cp15.pmccfiltr_el0;
   1604     } else if (counter < pmu_num_counters(env)) {
   1605         return env->cp15.c14_pmevtyper[counter];
   1606     } else {
   1607       /*
   1608        * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
   1609        * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
   1610        */
   1611         return 0;
   1612     }
   1613 }
   1614 
   1615 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
   1616                               uint64_t value)
   1617 {
   1618     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1619     pmevtyper_write(env, ri, value, counter);
   1620 }
   1621 
   1622 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
   1623                                uint64_t value)
   1624 {
   1625     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1626     env->cp15.c14_pmevtyper[counter] = value;
   1627 
   1628     /*
   1629      * pmevtyper_rawwrite is called between a pair of pmu_op_start and
   1630      * pmu_op_finish calls when loading saved state for a migration. Because
   1631      * we're potentially updating the type of event here, the value written to
   1632      * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
   1633      * different counter type. Therefore, we need to set this value to the
   1634      * current count for the counter type we're writing so that pmu_op_finish
   1635      * has the correct count for its calculation.
   1636      */
   1637     uint16_t event = value & PMXEVTYPER_EVTCOUNT;
   1638     if (event_supported(event)) {
   1639         uint16_t event_idx = supported_event_map[event];
   1640         env->cp15.c14_pmevcntr_delta[counter] =
   1641             pm_events[event_idx].get_count(env);
   1642     }
   1643 }
   1644 
   1645 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
   1646 {
   1647     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1648     return pmevtyper_read(env, ri, counter);
   1649 }
   1650 
   1651 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1652                              uint64_t value)
   1653 {
   1654     pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
   1655 }
   1656 
   1657 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1658 {
   1659     return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
   1660 }
   1661 
   1662 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1663                              uint64_t value, uint8_t counter)
   1664 {
   1665     if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
   1666         /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
   1667         value &= MAKE_64BIT_MASK(0, 32);
   1668     }
   1669     if (counter < pmu_num_counters(env)) {
   1670         pmevcntr_op_start(env, counter);
   1671         env->cp15.c14_pmevcntr[counter] = value;
   1672         pmevcntr_op_finish(env, counter);
   1673     }
   1674     /*
   1675      * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
   1676      * are CONSTRAINED UNPREDICTABLE.
   1677      */
   1678 }
   1679 
   1680 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
   1681                               uint8_t counter)
   1682 {
   1683     if (counter < pmu_num_counters(env)) {
   1684         uint64_t ret;
   1685         pmevcntr_op_start(env, counter);
   1686         ret = env->cp15.c14_pmevcntr[counter];
   1687         pmevcntr_op_finish(env, counter);
   1688         if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
   1689             /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
   1690             ret &= MAKE_64BIT_MASK(0, 32);
   1691         }
   1692         return ret;
   1693     } else {
   1694       /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
   1695        * are CONSTRAINED UNPREDICTABLE. */
   1696         return 0;
   1697     }
   1698 }
   1699 
   1700 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
   1701                              uint64_t value)
   1702 {
   1703     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1704     pmevcntr_write(env, ri, value, counter);
   1705 }
   1706 
   1707 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
   1708 {
   1709     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1710     return pmevcntr_read(env, ri, counter);
   1711 }
   1712 
   1713 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
   1714                              uint64_t value)
   1715 {
   1716     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1717     assert(counter < pmu_num_counters(env));
   1718     env->cp15.c14_pmevcntr[counter] = value;
   1719     pmevcntr_write(env, ri, value, counter);
   1720 }
   1721 
   1722 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
   1723 {
   1724     uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
   1725     assert(counter < pmu_num_counters(env));
   1726     return env->cp15.c14_pmevcntr[counter];
   1727 }
   1728 
   1729 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1730                              uint64_t value)
   1731 {
   1732     pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
   1733 }
   1734 
   1735 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1736 {
   1737     return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
   1738 }
   1739 
   1740 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1741                             uint64_t value)
   1742 {
   1743     if (arm_feature(env, ARM_FEATURE_V8)) {
   1744         env->cp15.c9_pmuserenr = value & 0xf;
   1745     } else {
   1746         env->cp15.c9_pmuserenr = value & 1;
   1747     }
   1748 }
   1749 
   1750 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1751                              uint64_t value)
   1752 {
   1753     /* We have no event counters so only the C bit can be changed */
   1754     value &= pmu_counter_mask(env);
   1755     env->cp15.c9_pminten |= value;
   1756     pmu_update_irq(env);
   1757 }
   1758 
   1759 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1760                              uint64_t value)
   1761 {
   1762     value &= pmu_counter_mask(env);
   1763     env->cp15.c9_pminten &= ~value;
   1764     pmu_update_irq(env);
   1765 }
   1766 
   1767 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1768                        uint64_t value)
   1769 {
   1770     /* Note that even though the AArch64 view of this register has bits
   1771      * [10:0] all RES0 we can only mask the bottom 5, to comply with the
   1772      * architectural requirements for bits which are RES0 only in some
   1773      * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
   1774      * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
   1775      */
   1776     raw_write(env, ri, value & ~0x1FULL);
   1777 }
   1778 
   1779 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
   1780 {
   1781     /* Begin with base v8.0 state.  */
   1782     uint64_t valid_mask = 0x3fff;
   1783     ARMCPU *cpu = env_archcpu(env);
   1784     uint64_t changed;
   1785 
   1786     /*
   1787      * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
   1788      * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
   1789      * Instead, choose the format based on the mode of EL3.
   1790      */
   1791     if (arm_el_is_aa64(env, 3)) {
   1792         value |= SCR_FW | SCR_AW;      /* RES1 */
   1793         valid_mask &= ~SCR_NET;        /* RES0 */
   1794 
   1795         if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
   1796             !cpu_isar_feature(aa64_aa32_el2, cpu)) {
   1797             value |= SCR_RW;           /* RAO/WI */
   1798         }
   1799         if (cpu_isar_feature(aa64_ras, cpu)) {
   1800             valid_mask |= SCR_TERR;
   1801         }
   1802         if (cpu_isar_feature(aa64_lor, cpu)) {
   1803             valid_mask |= SCR_TLOR;
   1804         }
   1805         if (cpu_isar_feature(aa64_pauth, cpu)) {
   1806             valid_mask |= SCR_API | SCR_APK;
   1807         }
   1808         if (cpu_isar_feature(aa64_sel2, cpu)) {
   1809             valid_mask |= SCR_EEL2;
   1810         }
   1811         if (cpu_isar_feature(aa64_mte, cpu)) {
   1812             valid_mask |= SCR_ATA;
   1813         }
   1814         if (cpu_isar_feature(aa64_scxtnum, cpu)) {
   1815             valid_mask |= SCR_ENSCXT;
   1816         }
   1817         if (cpu_isar_feature(aa64_doublefault, cpu)) {
   1818             valid_mask |= SCR_EASE | SCR_NMEA;
   1819         }
   1820         if (cpu_isar_feature(aa64_sme, cpu)) {
   1821             valid_mask |= SCR_ENTP2;
   1822         }
   1823     } else {
   1824         valid_mask &= ~(SCR_RW | SCR_ST);
   1825         if (cpu_isar_feature(aa32_ras, cpu)) {
   1826             valid_mask |= SCR_TERR;
   1827         }
   1828     }
   1829 
   1830     if (!arm_feature(env, ARM_FEATURE_EL2)) {
   1831         valid_mask &= ~SCR_HCE;
   1832 
   1833         /* On ARMv7, SMD (or SCD as it is called in v7) is only
   1834          * supported if EL2 exists. The bit is UNK/SBZP when
   1835          * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
   1836          * when EL2 is unavailable.
   1837          * On ARMv8, this bit is always available.
   1838          */
   1839         if (arm_feature(env, ARM_FEATURE_V7) &&
   1840             !arm_feature(env, ARM_FEATURE_V8)) {
   1841             valid_mask &= ~SCR_SMD;
   1842         }
   1843     }
   1844 
   1845     /* Clear all-context RES0 bits.  */
   1846     value &= valid_mask;
   1847     changed = env->cp15.scr_el3 ^ value;
   1848     env->cp15.scr_el3 = value;
   1849 
   1850     /*
   1851      * If SCR_EL3.NS changes, i.e. arm_is_secure_below_el3, then
   1852      * we must invalidate all TLBs below EL3.
   1853      */
   1854     if (changed & SCR_NS) {
   1855         tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
   1856                                            ARMMMUIdxBit_E20_0 |
   1857                                            ARMMMUIdxBit_E10_1 |
   1858                                            ARMMMUIdxBit_E20_2 |
   1859                                            ARMMMUIdxBit_E10_1_PAN |
   1860                                            ARMMMUIdxBit_E20_2_PAN |
   1861                                            ARMMMUIdxBit_E2));
   1862     }
   1863 }
   1864 
   1865 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   1866 {
   1867     /*
   1868      * scr_write will set the RES1 bits on an AArch64-only CPU.
   1869      * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
   1870      */
   1871     scr_write(env, ri, 0);
   1872 }
   1873 
   1874 static CPAccessResult access_aa64_tid2(CPUARMState *env,
   1875                                        const ARMCPRegInfo *ri,
   1876                                        bool isread)
   1877 {
   1878     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
   1879         return CP_ACCESS_TRAP_EL2;
   1880     }
   1881 
   1882     return CP_ACCESS_OK;
   1883 }
   1884 
   1885 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1886 {
   1887     ARMCPU *cpu = env_archcpu(env);
   1888 
   1889     /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
   1890      * bank
   1891      */
   1892     uint32_t index = A32_BANKED_REG_GET(env, csselr,
   1893                                         ri->secure & ARM_CP_SECSTATE_S);
   1894 
   1895     return cpu->ccsidr[index];
   1896 }
   1897 
   1898 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1899                          uint64_t value)
   1900 {
   1901     raw_write(env, ri, value & 0xf);
   1902 }
   1903 
   1904 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1905 {
   1906     CPUState *cs = env_cpu(env);
   1907     bool el1 = arm_current_el(env) == 1;
   1908     uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
   1909     uint64_t ret = 0;
   1910 
   1911     if (hcr_el2 & HCR_IMO) {
   1912         if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
   1913             ret |= CPSR_I;
   1914         }
   1915     } else {
   1916         if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
   1917             ret |= CPSR_I;
   1918         }
   1919     }
   1920 
   1921     if (hcr_el2 & HCR_FMO) {
   1922         if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
   1923             ret |= CPSR_F;
   1924         }
   1925     } else {
   1926         if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
   1927             ret |= CPSR_F;
   1928         }
   1929     }
   1930 
   1931     if (hcr_el2 & HCR_AMO) {
   1932         if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
   1933             ret |= CPSR_A;
   1934         }
   1935     }
   1936 
   1937     return ret;
   1938 }
   1939 
   1940 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
   1941                                        bool isread)
   1942 {
   1943     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
   1944         return CP_ACCESS_TRAP_EL2;
   1945     }
   1946 
   1947     return CP_ACCESS_OK;
   1948 }
   1949 
   1950 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
   1951                                        bool isread)
   1952 {
   1953     if (arm_feature(env, ARM_FEATURE_V8)) {
   1954         return access_aa64_tid1(env, ri, isread);
   1955     }
   1956 
   1957     return CP_ACCESS_OK;
   1958 }
   1959 
   1960 static const ARMCPRegInfo v7_cp_reginfo[] = {
   1961     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
   1962     { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
   1963       .access = PL1_W, .type = ARM_CP_NOP },
   1964     /* Performance monitors are implementation defined in v7,
   1965      * but with an ARM recommended set of registers, which we
   1966      * follow.
   1967      *
   1968      * Performance registers fall into three categories:
   1969      *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
   1970      *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
   1971      *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
   1972      * For the cases controlled by PMUSERENR we must set .access to PL0_RW
   1973      * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
   1974      */
   1975     { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
   1976       .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
   1977       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
   1978       .writefn = pmcntenset_write,
   1979       .accessfn = pmreg_access,
   1980       .raw_writefn = raw_write },
   1981     { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
   1982       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
   1983       .access = PL0_RW, .accessfn = pmreg_access,
   1984       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
   1985       .writefn = pmcntenset_write, .raw_writefn = raw_write },
   1986     { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
   1987       .access = PL0_RW,
   1988       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
   1989       .accessfn = pmreg_access,
   1990       .writefn = pmcntenclr_write,
   1991       .type = ARM_CP_ALIAS | ARM_CP_IO },
   1992     { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
   1993       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
   1994       .access = PL0_RW, .accessfn = pmreg_access,
   1995       .type = ARM_CP_ALIAS | ARM_CP_IO,
   1996       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
   1997       .writefn = pmcntenclr_write },
   1998     { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
   1999       .access = PL0_RW, .type = ARM_CP_IO,
   2000       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
   2001       .accessfn = pmreg_access,
   2002       .writefn = pmovsr_write,
   2003       .raw_writefn = raw_write },
   2004     { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
   2005       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
   2006       .access = PL0_RW, .accessfn = pmreg_access,
   2007       .type = ARM_CP_ALIAS | ARM_CP_IO,
   2008       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
   2009       .writefn = pmovsr_write,
   2010       .raw_writefn = raw_write },
   2011     { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
   2012       .access = PL0_W, .accessfn = pmreg_access_swinc,
   2013       .type = ARM_CP_NO_RAW | ARM_CP_IO,
   2014       .writefn = pmswinc_write },
   2015     { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
   2016       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
   2017       .access = PL0_W, .accessfn = pmreg_access_swinc,
   2018       .type = ARM_CP_NO_RAW | ARM_CP_IO,
   2019       .writefn = pmswinc_write },
   2020     { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
   2021       .access = PL0_RW, .type = ARM_CP_ALIAS,
   2022       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
   2023       .accessfn = pmreg_access_selr, .writefn = pmselr_write,
   2024       .raw_writefn = raw_write},
   2025     { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
   2026       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
   2027       .access = PL0_RW, .accessfn = pmreg_access_selr,
   2028       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
   2029       .writefn = pmselr_write, .raw_writefn = raw_write, },
   2030     { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
   2031       .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
   2032       .readfn = pmccntr_read, .writefn = pmccntr_write32,
   2033       .accessfn = pmreg_access_ccntr },
   2034     { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
   2035       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
   2036       .access = PL0_RW, .accessfn = pmreg_access_ccntr,
   2037       .type = ARM_CP_IO,
   2038       .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
   2039       .readfn = pmccntr_read, .writefn = pmccntr_write,
   2040       .raw_readfn = raw_read, .raw_writefn = raw_write, },
   2041     { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
   2042       .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
   2043       .access = PL0_RW, .accessfn = pmreg_access,
   2044       .type = ARM_CP_ALIAS | ARM_CP_IO,
   2045       .resetvalue = 0, },
   2046     { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
   2047       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
   2048       .writefn = pmccfiltr_write, .raw_writefn = raw_write,
   2049       .access = PL0_RW, .accessfn = pmreg_access,
   2050       .type = ARM_CP_IO,
   2051       .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
   2052       .resetvalue = 0, },
   2053     { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
   2054       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   2055       .accessfn = pmreg_access,
   2056       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
   2057     { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
   2058       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
   2059       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   2060       .accessfn = pmreg_access,
   2061       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
   2062     { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
   2063       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   2064       .accessfn = pmreg_access_xevcntr,
   2065       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
   2066     { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
   2067       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
   2068       .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   2069       .accessfn = pmreg_access_xevcntr,
   2070       .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
   2071     { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
   2072       .access = PL0_R | PL1_RW, .accessfn = access_tpm,
   2073       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
   2074       .resetvalue = 0,
   2075       .writefn = pmuserenr_write, .raw_writefn = raw_write },
   2076     { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
   2077       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
   2078       .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
   2079       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
   2080       .resetvalue = 0,
   2081       .writefn = pmuserenr_write, .raw_writefn = raw_write },
   2082     { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
   2083       .access = PL1_RW, .accessfn = access_tpm,
   2084       .type = ARM_CP_ALIAS | ARM_CP_IO,
   2085       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
   2086       .resetvalue = 0,
   2087       .writefn = pmintenset_write, .raw_writefn = raw_write },
   2088     { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
   2089       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
   2090       .access = PL1_RW, .accessfn = access_tpm,
   2091       .type = ARM_CP_IO,
   2092       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
   2093       .writefn = pmintenset_write, .raw_writefn = raw_write,
   2094       .resetvalue = 0x0 },
   2095     { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
   2096       .access = PL1_RW, .accessfn = access_tpm,
   2097       .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
   2098       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
   2099       .writefn = pmintenclr_write, },
   2100     { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
   2101       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
   2102       .access = PL1_RW, .accessfn = access_tpm,
   2103       .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
   2104       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
   2105       .writefn = pmintenclr_write },
   2106     { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
   2107       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
   2108       .access = PL1_R,
   2109       .accessfn = access_aa64_tid2,
   2110       .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
   2111     { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
   2112       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
   2113       .access = PL1_RW,
   2114       .accessfn = access_aa64_tid2,
   2115       .writefn = csselr_write, .resetvalue = 0,
   2116       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
   2117                              offsetof(CPUARMState, cp15.csselr_ns) } },
   2118     /* Auxiliary ID register: this actually has an IMPDEF value but for now
   2119      * just RAZ for all cores:
   2120      */
   2121     { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
   2122       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
   2123       .access = PL1_R, .type = ARM_CP_CONST,
   2124       .accessfn = access_aa64_tid1,
   2125       .resetvalue = 0 },
   2126     /* Auxiliary fault status registers: these also are IMPDEF, and we
   2127      * choose to RAZ/WI for all cores.
   2128      */
   2129     { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
   2130       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
   2131       .access = PL1_RW, .accessfn = access_tvm_trvm,
   2132       .type = ARM_CP_CONST, .resetvalue = 0 },
   2133     { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
   2134       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
   2135       .access = PL1_RW, .accessfn = access_tvm_trvm,
   2136       .type = ARM_CP_CONST, .resetvalue = 0 },
   2137     /* MAIR can just read-as-written because we don't implement caches
   2138      * and so don't need to care about memory attributes.
   2139      */
   2140     { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
   2141       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
   2142       .access = PL1_RW, .accessfn = access_tvm_trvm,
   2143       .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
   2144       .resetvalue = 0 },
   2145     { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
   2146       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
   2147       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
   2148       .resetvalue = 0 },
   2149     /* For non-long-descriptor page tables these are PRRR and NMRR;
   2150      * regardless they still act as reads-as-written for QEMU.
   2151      */
   2152      /* MAIR0/1 are defined separately from their 64-bit counterpart which
   2153       * allows them to assign the correct fieldoffset based on the endianness
   2154       * handled in the field definitions.
   2155       */
   2156     { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
   2157       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
   2158       .access = PL1_RW, .accessfn = access_tvm_trvm,
   2159       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
   2160                              offsetof(CPUARMState, cp15.mair0_ns) },
   2161       .resetfn = arm_cp_reset_ignore },
   2162     { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
   2163       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
   2164       .access = PL1_RW, .accessfn = access_tvm_trvm,
   2165       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
   2166                              offsetof(CPUARMState, cp15.mair1_ns) },
   2167       .resetfn = arm_cp_reset_ignore },
   2168     { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
   2169       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
   2170       .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
   2171     /* 32 bit ITLB invalidates */
   2172     { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
   2173       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2174       .writefn = tlbiall_write },
   2175     { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
   2176       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2177       .writefn = tlbimva_write },
   2178     { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
   2179       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2180       .writefn = tlbiasid_write },
   2181     /* 32 bit DTLB invalidates */
   2182     { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
   2183       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2184       .writefn = tlbiall_write },
   2185     { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
   2186       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2187       .writefn = tlbimva_write },
   2188     { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
   2189       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2190       .writefn = tlbiasid_write },
   2191     /* 32 bit TLB invalidates */
   2192     { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
   2193       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2194       .writefn = tlbiall_write },
   2195     { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
   2196       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2197       .writefn = tlbimva_write },
   2198     { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
   2199       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2200       .writefn = tlbiasid_write },
   2201     { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
   2202       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2203       .writefn = tlbimvaa_write },
   2204 };
   2205 
   2206 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
   2207     /* 32 bit TLB invalidates, Inner Shareable */
   2208     { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
   2209       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2210       .writefn = tlbiall_is_write },
   2211     { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
   2212       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2213       .writefn = tlbimva_is_write },
   2214     { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
   2215       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2216       .writefn = tlbiasid_is_write },
   2217     { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
   2218       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   2219       .writefn = tlbimvaa_is_write },
   2220 };
   2221 
   2222 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
   2223     /* PMOVSSET is not implemented in v7 before v7ve */
   2224     { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
   2225       .access = PL0_RW, .accessfn = pmreg_access,
   2226       .type = ARM_CP_ALIAS | ARM_CP_IO,
   2227       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
   2228       .writefn = pmovsset_write,
   2229       .raw_writefn = raw_write },
   2230     { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
   2231       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
   2232       .access = PL0_RW, .accessfn = pmreg_access,
   2233       .type = ARM_CP_ALIAS | ARM_CP_IO,
   2234       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
   2235       .writefn = pmovsset_write,
   2236       .raw_writefn = raw_write },
   2237 };
   2238 
   2239 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2240                         uint64_t value)
   2241 {
   2242     value &= 1;
   2243     env->teecr = value;
   2244 }
   2245 
   2246 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
   2247                                    bool isread)
   2248 {
   2249     /*
   2250      * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
   2251      * at all, so we don't need to check whether we're v8A.
   2252      */
   2253     if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
   2254         (env->cp15.hstr_el2 & HSTR_TTEE)) {
   2255         return CP_ACCESS_TRAP_EL2;
   2256     }
   2257     return CP_ACCESS_OK;
   2258 }
   2259 
   2260 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
   2261                                     bool isread)
   2262 {
   2263     if (arm_current_el(env) == 0 && (env->teecr & 1)) {
   2264         return CP_ACCESS_TRAP;
   2265     }
   2266     return teecr_access(env, ri, isread);
   2267 }
   2268 
   2269 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
   2270     { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
   2271       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
   2272       .resetvalue = 0,
   2273       .writefn = teecr_write, .accessfn = teecr_access },
   2274     { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
   2275       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
   2276       .accessfn = teehbr_access, .resetvalue = 0 },
   2277 };
   2278 
   2279 static const ARMCPRegInfo v6k_cp_reginfo[] = {
   2280     { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
   2281       .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
   2282       .access = PL0_RW,
   2283       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
   2284     { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
   2285       .access = PL0_RW,
   2286       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
   2287                              offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
   2288       .resetfn = arm_cp_reset_ignore },
   2289     { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
   2290       .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
   2291       .access = PL0_R|PL1_W,
   2292       .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
   2293       .resetvalue = 0},
   2294     { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
   2295       .access = PL0_R|PL1_W,
   2296       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
   2297                              offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
   2298       .resetfn = arm_cp_reset_ignore },
   2299     { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
   2300       .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
   2301       .access = PL1_RW,
   2302       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
   2303     { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
   2304       .access = PL1_RW,
   2305       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
   2306                              offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
   2307       .resetvalue = 0 },
   2308 };
   2309 
   2310 #ifndef CONFIG_USER_ONLY
   2311 
   2312 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
   2313                                        bool isread)
   2314 {
   2315     /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
   2316      * Writable only at the highest implemented exception level.
   2317      */
   2318     int el = arm_current_el(env);
   2319     uint64_t hcr;
   2320     uint32_t cntkctl;
   2321 
   2322     switch (el) {
   2323     case 0:
   2324         hcr = arm_hcr_el2_eff(env);
   2325         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   2326             cntkctl = env->cp15.cnthctl_el2;
   2327         } else {
   2328             cntkctl = env->cp15.c14_cntkctl;
   2329         }
   2330         if (!extract32(cntkctl, 0, 2)) {
   2331             return CP_ACCESS_TRAP;
   2332         }
   2333         break;
   2334     case 1:
   2335         if (!isread && ri->state == ARM_CP_STATE_AA32 &&
   2336             arm_is_secure_below_el3(env)) {
   2337             /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
   2338             return CP_ACCESS_TRAP_UNCATEGORIZED;
   2339         }
   2340         break;
   2341     case 2:
   2342     case 3:
   2343         break;
   2344     }
   2345 
   2346     if (!isread && el < arm_highest_el(env)) {
   2347         return CP_ACCESS_TRAP_UNCATEGORIZED;
   2348     }
   2349 
   2350     return CP_ACCESS_OK;
   2351 }
   2352 
   2353 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
   2354                                         bool isread)
   2355 {
   2356     unsigned int cur_el = arm_current_el(env);
   2357     bool has_el2 = arm_is_el2_enabled(env);
   2358     uint64_t hcr = arm_hcr_el2_eff(env);
   2359 
   2360     switch (cur_el) {
   2361     case 0:
   2362         /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
   2363         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   2364             return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
   2365                     ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
   2366         }
   2367 
   2368         /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
   2369         if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
   2370             return CP_ACCESS_TRAP;
   2371         }
   2372 
   2373         /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
   2374         if (hcr & HCR_E2H) {
   2375             if (timeridx == GTIMER_PHYS &&
   2376                 !extract32(env->cp15.cnthctl_el2, 10, 1)) {
   2377                 return CP_ACCESS_TRAP_EL2;
   2378             }
   2379         } else {
   2380             /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
   2381             if (has_el2 && timeridx == GTIMER_PHYS &&
   2382                 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
   2383                 return CP_ACCESS_TRAP_EL2;
   2384             }
   2385         }
   2386         break;
   2387 
   2388     case 1:
   2389         /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
   2390         if (has_el2 && timeridx == GTIMER_PHYS &&
   2391             (hcr & HCR_E2H
   2392              ? !extract32(env->cp15.cnthctl_el2, 10, 1)
   2393              : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
   2394             return CP_ACCESS_TRAP_EL2;
   2395         }
   2396         break;
   2397     }
   2398     return CP_ACCESS_OK;
   2399 }
   2400 
   2401 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
   2402                                       bool isread)
   2403 {
   2404     unsigned int cur_el = arm_current_el(env);
   2405     bool has_el2 = arm_is_el2_enabled(env);
   2406     uint64_t hcr = arm_hcr_el2_eff(env);
   2407 
   2408     switch (cur_el) {
   2409     case 0:
   2410         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   2411             /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
   2412             return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
   2413                     ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
   2414         }
   2415 
   2416         /*
   2417          * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
   2418          * EL0 if EL0[PV]TEN is zero.
   2419          */
   2420         if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
   2421             return CP_ACCESS_TRAP;
   2422         }
   2423         /* fall through */
   2424 
   2425     case 1:
   2426         if (has_el2 && timeridx == GTIMER_PHYS) {
   2427             if (hcr & HCR_E2H) {
   2428                 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
   2429                 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
   2430                     return CP_ACCESS_TRAP_EL2;
   2431                 }
   2432             } else {
   2433                 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
   2434                 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
   2435                     return CP_ACCESS_TRAP_EL2;
   2436                 }
   2437             }
   2438         }
   2439         break;
   2440     }
   2441     return CP_ACCESS_OK;
   2442 }
   2443 
   2444 static CPAccessResult gt_pct_access(CPUARMState *env,
   2445                                     const ARMCPRegInfo *ri,
   2446                                     bool isread)
   2447 {
   2448     return gt_counter_access(env, GTIMER_PHYS, isread);
   2449 }
   2450 
   2451 static CPAccessResult gt_vct_access(CPUARMState *env,
   2452                                     const ARMCPRegInfo *ri,
   2453                                     bool isread)
   2454 {
   2455     return gt_counter_access(env, GTIMER_VIRT, isread);
   2456 }
   2457 
   2458 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
   2459                                        bool isread)
   2460 {
   2461     return gt_timer_access(env, GTIMER_PHYS, isread);
   2462 }
   2463 
   2464 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
   2465                                        bool isread)
   2466 {
   2467     return gt_timer_access(env, GTIMER_VIRT, isread);
   2468 }
   2469 
   2470 static CPAccessResult gt_stimer_access(CPUARMState *env,
   2471                                        const ARMCPRegInfo *ri,
   2472                                        bool isread)
   2473 {
   2474     /* The AArch64 register view of the secure physical timer is
   2475      * always accessible from EL3, and configurably accessible from
   2476      * Secure EL1.
   2477      */
   2478     switch (arm_current_el(env)) {
   2479     case 1:
   2480         if (!arm_is_secure(env)) {
   2481             return CP_ACCESS_TRAP;
   2482         }
   2483         if (!(env->cp15.scr_el3 & SCR_ST)) {
   2484             return CP_ACCESS_TRAP_EL3;
   2485         }
   2486         return CP_ACCESS_OK;
   2487     case 0:
   2488     case 2:
   2489         return CP_ACCESS_TRAP;
   2490     case 3:
   2491         return CP_ACCESS_OK;
   2492     default:
   2493         g_assert_not_reached();
   2494     }
   2495 }
   2496 
   2497 static uint64_t gt_get_countervalue(CPUARMState *env)
   2498 {
   2499     ARMCPU *cpu = env_archcpu(env);
   2500 
   2501     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
   2502 }
   2503 
   2504 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
   2505 {
   2506     ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
   2507 
   2508     if (gt->ctl & 1) {
   2509         /* Timer enabled: calculate and set current ISTATUS, irq, and
   2510          * reset timer to when ISTATUS next has to change
   2511          */
   2512         uint64_t offset = timeridx == GTIMER_VIRT ?
   2513                                       cpu->env.cp15.cntvoff_el2 : 0;
   2514         uint64_t count = gt_get_countervalue(&cpu->env);
   2515         /* Note that this must be unsigned 64 bit arithmetic: */
   2516         int istatus = count - offset >= gt->cval;
   2517         uint64_t nexttick;
   2518         int irqstate;
   2519 
   2520         gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
   2521 
   2522         irqstate = (istatus && !(gt->ctl & 2));
   2523         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
   2524 
   2525         if (istatus) {
   2526             /* Next transition is when count rolls back over to zero */
   2527             nexttick = UINT64_MAX;
   2528         } else {
   2529             /* Next transition is when we hit cval */
   2530             nexttick = gt->cval + offset;
   2531         }
   2532         /* Note that the desired next expiry time might be beyond the
   2533          * signed-64-bit range of a QEMUTimer -- in this case we just
   2534          * set the timer for as far in the future as possible. When the
   2535          * timer expires we will reset the timer for any remaining period.
   2536          */
   2537         if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
   2538             timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
   2539         } else {
   2540             timer_mod(cpu->gt_timer[timeridx], nexttick);
   2541         }
   2542         trace_arm_gt_recalc(timeridx, irqstate, nexttick);
   2543     } else {
   2544         /* Timer disabled: ISTATUS and timer output always clear */
   2545         gt->ctl &= ~4;
   2546         qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
   2547         timer_del(cpu->gt_timer[timeridx]);
   2548         trace_arm_gt_recalc_disabled(timeridx);
   2549     }
   2550 }
   2551 
   2552 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
   2553                            int timeridx)
   2554 {
   2555     ARMCPU *cpu = env_archcpu(env);
   2556 
   2557     timer_del(cpu->gt_timer[timeridx]);
   2558 }
   2559 
   2560 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2561 {
   2562     return gt_get_countervalue(env);
   2563 }
   2564 
   2565 static uint64_t gt_virt_cnt_offset(CPUARMState *env)
   2566 {
   2567     uint64_t hcr;
   2568 
   2569     switch (arm_current_el(env)) {
   2570     case 2:
   2571         hcr = arm_hcr_el2_eff(env);
   2572         if (hcr & HCR_E2H) {
   2573             return 0;
   2574         }
   2575         break;
   2576     case 0:
   2577         hcr = arm_hcr_el2_eff(env);
   2578         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   2579             return 0;
   2580         }
   2581         break;
   2582     }
   2583 
   2584     return env->cp15.cntvoff_el2;
   2585 }
   2586 
   2587 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2588 {
   2589     return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
   2590 }
   2591 
   2592 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2593                           int timeridx,
   2594                           uint64_t value)
   2595 {
   2596     trace_arm_gt_cval_write(timeridx, value);
   2597     env->cp15.c14_timer[timeridx].cval = value;
   2598     gt_recalc_timer(env_archcpu(env), timeridx);
   2599 }
   2600 
   2601 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
   2602                              int timeridx)
   2603 {
   2604     uint64_t offset = 0;
   2605 
   2606     switch (timeridx) {
   2607     case GTIMER_VIRT:
   2608     case GTIMER_HYPVIRT:
   2609         offset = gt_virt_cnt_offset(env);
   2610         break;
   2611     }
   2612 
   2613     return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
   2614                       (gt_get_countervalue(env) - offset));
   2615 }
   2616 
   2617 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2618                           int timeridx,
   2619                           uint64_t value)
   2620 {
   2621     uint64_t offset = 0;
   2622 
   2623     switch (timeridx) {
   2624     case GTIMER_VIRT:
   2625     case GTIMER_HYPVIRT:
   2626         offset = gt_virt_cnt_offset(env);
   2627         break;
   2628     }
   2629 
   2630     trace_arm_gt_tval_write(timeridx, value);
   2631     env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
   2632                                          sextract64(value, 0, 32);
   2633     gt_recalc_timer(env_archcpu(env), timeridx);
   2634 }
   2635 
   2636 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2637                          int timeridx,
   2638                          uint64_t value)
   2639 {
   2640     ARMCPU *cpu = env_archcpu(env);
   2641     uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
   2642 
   2643     trace_arm_gt_ctl_write(timeridx, value);
   2644     env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
   2645     if ((oldval ^ value) & 1) {
   2646         /* Enable toggled */
   2647         gt_recalc_timer(cpu, timeridx);
   2648     } else if ((oldval ^ value) & 2) {
   2649         /* IMASK toggled: don't need to recalculate,
   2650          * just set the interrupt line based on ISTATUS
   2651          */
   2652         int irqstate = (oldval & 4) && !(value & 2);
   2653 
   2654         trace_arm_gt_imask_toggle(timeridx, irqstate);
   2655         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
   2656     }
   2657 }
   2658 
   2659 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   2660 {
   2661     gt_timer_reset(env, ri, GTIMER_PHYS);
   2662 }
   2663 
   2664 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2665                                uint64_t value)
   2666 {
   2667     gt_cval_write(env, ri, GTIMER_PHYS, value);
   2668 }
   2669 
   2670 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2671 {
   2672     return gt_tval_read(env, ri, GTIMER_PHYS);
   2673 }
   2674 
   2675 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2676                                uint64_t value)
   2677 {
   2678     gt_tval_write(env, ri, GTIMER_PHYS, value);
   2679 }
   2680 
   2681 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2682                               uint64_t value)
   2683 {
   2684     gt_ctl_write(env, ri, GTIMER_PHYS, value);
   2685 }
   2686 
   2687 static int gt_phys_redir_timeridx(CPUARMState *env)
   2688 {
   2689     switch (arm_mmu_idx(env)) {
   2690     case ARMMMUIdx_E20_0:
   2691     case ARMMMUIdx_E20_2:
   2692     case ARMMMUIdx_E20_2_PAN:
   2693         return GTIMER_HYP;
   2694     default:
   2695         return GTIMER_PHYS;
   2696     }
   2697 }
   2698 
   2699 static int gt_virt_redir_timeridx(CPUARMState *env)
   2700 {
   2701     switch (arm_mmu_idx(env)) {
   2702     case ARMMMUIdx_E20_0:
   2703     case ARMMMUIdx_E20_2:
   2704     case ARMMMUIdx_E20_2_PAN:
   2705         return GTIMER_HYPVIRT;
   2706     default:
   2707         return GTIMER_VIRT;
   2708     }
   2709 }
   2710 
   2711 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
   2712                                         const ARMCPRegInfo *ri)
   2713 {
   2714     int timeridx = gt_phys_redir_timeridx(env);
   2715     return env->cp15.c14_timer[timeridx].cval;
   2716 }
   2717 
   2718 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2719                                      uint64_t value)
   2720 {
   2721     int timeridx = gt_phys_redir_timeridx(env);
   2722     gt_cval_write(env, ri, timeridx, value);
   2723 }
   2724 
   2725 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
   2726                                         const ARMCPRegInfo *ri)
   2727 {
   2728     int timeridx = gt_phys_redir_timeridx(env);
   2729     return gt_tval_read(env, ri, timeridx);
   2730 }
   2731 
   2732 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2733                                      uint64_t value)
   2734 {
   2735     int timeridx = gt_phys_redir_timeridx(env);
   2736     gt_tval_write(env, ri, timeridx, value);
   2737 }
   2738 
   2739 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
   2740                                        const ARMCPRegInfo *ri)
   2741 {
   2742     int timeridx = gt_phys_redir_timeridx(env);
   2743     return env->cp15.c14_timer[timeridx].ctl;
   2744 }
   2745 
   2746 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2747                                     uint64_t value)
   2748 {
   2749     int timeridx = gt_phys_redir_timeridx(env);
   2750     gt_ctl_write(env, ri, timeridx, value);
   2751 }
   2752 
   2753 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   2754 {
   2755     gt_timer_reset(env, ri, GTIMER_VIRT);
   2756 }
   2757 
   2758 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2759                                uint64_t value)
   2760 {
   2761     gt_cval_write(env, ri, GTIMER_VIRT, value);
   2762 }
   2763 
   2764 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2765 {
   2766     return gt_tval_read(env, ri, GTIMER_VIRT);
   2767 }
   2768 
   2769 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2770                                uint64_t value)
   2771 {
   2772     gt_tval_write(env, ri, GTIMER_VIRT, value);
   2773 }
   2774 
   2775 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2776                               uint64_t value)
   2777 {
   2778     gt_ctl_write(env, ri, GTIMER_VIRT, value);
   2779 }
   2780 
   2781 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2782                               uint64_t value)
   2783 {
   2784     ARMCPU *cpu = env_archcpu(env);
   2785 
   2786     trace_arm_gt_cntvoff_write(value);
   2787     raw_write(env, ri, value);
   2788     gt_recalc_timer(cpu, GTIMER_VIRT);
   2789 }
   2790 
   2791 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
   2792                                         const ARMCPRegInfo *ri)
   2793 {
   2794     int timeridx = gt_virt_redir_timeridx(env);
   2795     return env->cp15.c14_timer[timeridx].cval;
   2796 }
   2797 
   2798 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2799                                      uint64_t value)
   2800 {
   2801     int timeridx = gt_virt_redir_timeridx(env);
   2802     gt_cval_write(env, ri, timeridx, value);
   2803 }
   2804 
   2805 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
   2806                                         const ARMCPRegInfo *ri)
   2807 {
   2808     int timeridx = gt_virt_redir_timeridx(env);
   2809     return gt_tval_read(env, ri, timeridx);
   2810 }
   2811 
   2812 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2813                                      uint64_t value)
   2814 {
   2815     int timeridx = gt_virt_redir_timeridx(env);
   2816     gt_tval_write(env, ri, timeridx, value);
   2817 }
   2818 
   2819 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
   2820                                        const ARMCPRegInfo *ri)
   2821 {
   2822     int timeridx = gt_virt_redir_timeridx(env);
   2823     return env->cp15.c14_timer[timeridx].ctl;
   2824 }
   2825 
   2826 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2827                                     uint64_t value)
   2828 {
   2829     int timeridx = gt_virt_redir_timeridx(env);
   2830     gt_ctl_write(env, ri, timeridx, value);
   2831 }
   2832 
   2833 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   2834 {
   2835     gt_timer_reset(env, ri, GTIMER_HYP);
   2836 }
   2837 
   2838 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2839                               uint64_t value)
   2840 {
   2841     gt_cval_write(env, ri, GTIMER_HYP, value);
   2842 }
   2843 
   2844 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2845 {
   2846     return gt_tval_read(env, ri, GTIMER_HYP);
   2847 }
   2848 
   2849 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2850                               uint64_t value)
   2851 {
   2852     gt_tval_write(env, ri, GTIMER_HYP, value);
   2853 }
   2854 
   2855 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2856                               uint64_t value)
   2857 {
   2858     gt_ctl_write(env, ri, GTIMER_HYP, value);
   2859 }
   2860 
   2861 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   2862 {
   2863     gt_timer_reset(env, ri, GTIMER_SEC);
   2864 }
   2865 
   2866 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2867                               uint64_t value)
   2868 {
   2869     gt_cval_write(env, ri, GTIMER_SEC, value);
   2870 }
   2871 
   2872 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2873 {
   2874     return gt_tval_read(env, ri, GTIMER_SEC);
   2875 }
   2876 
   2877 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2878                               uint64_t value)
   2879 {
   2880     gt_tval_write(env, ri, GTIMER_SEC, value);
   2881 }
   2882 
   2883 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2884                               uint64_t value)
   2885 {
   2886     gt_ctl_write(env, ri, GTIMER_SEC, value);
   2887 }
   2888 
   2889 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   2890 {
   2891     gt_timer_reset(env, ri, GTIMER_HYPVIRT);
   2892 }
   2893 
   2894 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2895                              uint64_t value)
   2896 {
   2897     gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
   2898 }
   2899 
   2900 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2901 {
   2902     return gt_tval_read(env, ri, GTIMER_HYPVIRT);
   2903 }
   2904 
   2905 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2906                              uint64_t value)
   2907 {
   2908     gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
   2909 }
   2910 
   2911 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2912                             uint64_t value)
   2913 {
   2914     gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
   2915 }
   2916 
   2917 void arm_gt_ptimer_cb(void *opaque)
   2918 {
   2919     ARMCPU *cpu = opaque;
   2920 
   2921     gt_recalc_timer(cpu, GTIMER_PHYS);
   2922 }
   2923 
   2924 void arm_gt_vtimer_cb(void *opaque)
   2925 {
   2926     ARMCPU *cpu = opaque;
   2927 
   2928     gt_recalc_timer(cpu, GTIMER_VIRT);
   2929 }
   2930 
   2931 void arm_gt_htimer_cb(void *opaque)
   2932 {
   2933     ARMCPU *cpu = opaque;
   2934 
   2935     gt_recalc_timer(cpu, GTIMER_HYP);
   2936 }
   2937 
   2938 void arm_gt_stimer_cb(void *opaque)
   2939 {
   2940     ARMCPU *cpu = opaque;
   2941 
   2942     gt_recalc_timer(cpu, GTIMER_SEC);
   2943 }
   2944 
   2945 void arm_gt_hvtimer_cb(void *opaque)
   2946 {
   2947     ARMCPU *cpu = opaque;
   2948 
   2949     gt_recalc_timer(cpu, GTIMER_HYPVIRT);
   2950 }
   2951 
   2952 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
   2953 {
   2954     ARMCPU *cpu = env_archcpu(env);
   2955 
   2956     cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
   2957 }
   2958 
   2959 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
   2960     /* Note that CNTFRQ is purely reads-as-written for the benefit
   2961      * of software; writing it doesn't actually change the timer frequency.
   2962      * Our reset value matches the fixed frequency we implement the timer at.
   2963      */
   2964     { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
   2965       .type = ARM_CP_ALIAS,
   2966       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
   2967       .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
   2968     },
   2969     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
   2970       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
   2971       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
   2972       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
   2973       .resetfn = arm_gt_cntfrq_reset,
   2974     },
   2975     /* overall control: mostly access permissions */
   2976     { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
   2977       .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
   2978       .access = PL1_RW,
   2979       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
   2980       .resetvalue = 0,
   2981     },
   2982     /* per-timer control */
   2983     { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
   2984       .secure = ARM_CP_SECSTATE_NS,
   2985       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
   2986       .accessfn = gt_ptimer_access,
   2987       .fieldoffset = offsetoflow32(CPUARMState,
   2988                                    cp15.c14_timer[GTIMER_PHYS].ctl),
   2989       .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
   2990       .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
   2991     },
   2992     { .name = "CNTP_CTL_S",
   2993       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
   2994       .secure = ARM_CP_SECSTATE_S,
   2995       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
   2996       .accessfn = gt_ptimer_access,
   2997       .fieldoffset = offsetoflow32(CPUARMState,
   2998                                    cp15.c14_timer[GTIMER_SEC].ctl),
   2999       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
   3000     },
   3001     { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
   3002       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
   3003       .type = ARM_CP_IO, .access = PL0_RW,
   3004       .accessfn = gt_ptimer_access,
   3005       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
   3006       .resetvalue = 0,
   3007       .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
   3008       .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
   3009     },
   3010     { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
   3011       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
   3012       .accessfn = gt_vtimer_access,
   3013       .fieldoffset = offsetoflow32(CPUARMState,
   3014                                    cp15.c14_timer[GTIMER_VIRT].ctl),
   3015       .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
   3016       .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
   3017     },
   3018     { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
   3019       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
   3020       .type = ARM_CP_IO, .access = PL0_RW,
   3021       .accessfn = gt_vtimer_access,
   3022       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
   3023       .resetvalue = 0,
   3024       .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
   3025       .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
   3026     },
   3027     /* TimerValue views: a 32 bit downcounting view of the underlying state */
   3028     { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
   3029       .secure = ARM_CP_SECSTATE_NS,
   3030       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
   3031       .accessfn = gt_ptimer_access,
   3032       .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
   3033     },
   3034     { .name = "CNTP_TVAL_S",
   3035       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
   3036       .secure = ARM_CP_SECSTATE_S,
   3037       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
   3038       .accessfn = gt_ptimer_access,
   3039       .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
   3040     },
   3041     { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
   3042       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
   3043       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
   3044       .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
   3045       .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
   3046     },
   3047     { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
   3048       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
   3049       .accessfn = gt_vtimer_access,
   3050       .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
   3051     },
   3052     { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
   3053       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
   3054       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
   3055       .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
   3056       .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
   3057     },
   3058     /* The counter itself */
   3059     { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
   3060       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
   3061       .accessfn = gt_pct_access,
   3062       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
   3063     },
   3064     { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
   3065       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
   3066       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   3067       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
   3068     },
   3069     { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
   3070       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
   3071       .accessfn = gt_vct_access,
   3072       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
   3073     },
   3074     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
   3075       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
   3076       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   3077       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
   3078     },
   3079     /* Comparison value, indicating when the timer goes off */
   3080     { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
   3081       .secure = ARM_CP_SECSTATE_NS,
   3082       .access = PL0_RW,
   3083       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
   3084       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
   3085       .accessfn = gt_ptimer_access,
   3086       .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
   3087       .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
   3088     },
   3089     { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
   3090       .secure = ARM_CP_SECSTATE_S,
   3091       .access = PL0_RW,
   3092       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
   3093       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
   3094       .accessfn = gt_ptimer_access,
   3095       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
   3096     },
   3097     { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
   3098       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
   3099       .access = PL0_RW,
   3100       .type = ARM_CP_IO,
   3101       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
   3102       .resetvalue = 0, .accessfn = gt_ptimer_access,
   3103       .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
   3104       .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
   3105     },
   3106     { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
   3107       .access = PL0_RW,
   3108       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
   3109       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
   3110       .accessfn = gt_vtimer_access,
   3111       .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
   3112       .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
   3113     },
   3114     { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
   3115       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
   3116       .access = PL0_RW,
   3117       .type = ARM_CP_IO,
   3118       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
   3119       .resetvalue = 0, .accessfn = gt_vtimer_access,
   3120       .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
   3121       .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
   3122     },
   3123     /* Secure timer -- this is actually restricted to only EL3
   3124      * and configurably Secure-EL1 via the accessfn.
   3125      */
   3126     { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
   3127       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
   3128       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
   3129       .accessfn = gt_stimer_access,
   3130       .readfn = gt_sec_tval_read,
   3131       .writefn = gt_sec_tval_write,
   3132       .resetfn = gt_sec_timer_reset,
   3133     },
   3134     { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
   3135       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
   3136       .type = ARM_CP_IO, .access = PL1_RW,
   3137       .accessfn = gt_stimer_access,
   3138       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
   3139       .resetvalue = 0,
   3140       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
   3141     },
   3142     { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
   3143       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
   3144       .type = ARM_CP_IO, .access = PL1_RW,
   3145       .accessfn = gt_stimer_access,
   3146       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
   3147       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
   3148     },
   3149 };
   3150 
   3151 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
   3152                                  bool isread)
   3153 {
   3154     if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
   3155         return CP_ACCESS_TRAP;
   3156     }
   3157     return CP_ACCESS_OK;
   3158 }
   3159 
   3160 #else
   3161 
   3162 /* In user-mode most of the generic timer registers are inaccessible
   3163  * however modern kernels (4.12+) allow access to cntvct_el0
   3164  */
   3165 
   3166 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
   3167 {
   3168     ARMCPU *cpu = env_archcpu(env);
   3169 
   3170     /* Currently we have no support for QEMUTimer in linux-user so we
   3171      * can't call gt_get_countervalue(env), instead we directly
   3172      * call the lower level functions.
   3173      */
   3174     return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
   3175 }
   3176 
   3177 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
   3178     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
   3179       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
   3180       .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
   3181       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
   3182       .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
   3183     },
   3184     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
   3185       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
   3186       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
   3187       .readfn = gt_virt_cnt_read,
   3188     },
   3189 };
   3190 
   3191 #endif
   3192 
   3193 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
   3194 {
   3195     if (arm_feature(env, ARM_FEATURE_LPAE)) {
   3196         raw_write(env, ri, value);
   3197     } else if (arm_feature(env, ARM_FEATURE_V7)) {
   3198         raw_write(env, ri, value & 0xfffff6ff);
   3199     } else {
   3200         raw_write(env, ri, value & 0xfffff1ff);
   3201     }
   3202 }
   3203 
   3204 #ifndef CONFIG_USER_ONLY
   3205 /* get_phys_addr() isn't present for user-mode-only targets */
   3206 
   3207 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
   3208                                  bool isread)
   3209 {
   3210     if (ri->opc2 & 4) {
   3211         /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
   3212          * Secure EL1 (which can only happen if EL3 is AArch64).
   3213          * They are simply UNDEF if executed from NS EL1.
   3214          * They function normally from EL2 or EL3.
   3215          */
   3216         if (arm_current_el(env) == 1) {
   3217             if (arm_is_secure_below_el3(env)) {
   3218                 if (env->cp15.scr_el3 & SCR_EEL2) {
   3219                     return CP_ACCESS_TRAP_UNCATEGORIZED_EL2;
   3220                 }
   3221                 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
   3222             }
   3223             return CP_ACCESS_TRAP_UNCATEGORIZED;
   3224         }
   3225     }
   3226     return CP_ACCESS_OK;
   3227 }
   3228 
   3229 #ifdef CONFIG_TCG
   3230 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
   3231                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
   3232                              bool is_secure)
   3233 {
   3234     bool ret;
   3235     uint64_t par64;
   3236     bool format64 = false;
   3237     ARMMMUFaultInfo fi = {};
   3238     GetPhysAddrResult res = {};
   3239 
   3240     ret = get_phys_addr_with_secure(env, value, access_type, mmu_idx,
   3241                                     is_secure, &res, &fi);
   3242 
   3243     /*
   3244      * ATS operations only do S1 or S1+S2 translations, so we never
   3245      * have to deal with the ARMCacheAttrs format for S2 only.
   3246      */
   3247     assert(!res.cacheattrs.is_s2_format);
   3248 
   3249     if (ret) {
   3250         /*
   3251          * Some kinds of translation fault must cause exceptions rather
   3252          * than being reported in the PAR.
   3253          */
   3254         int current_el = arm_current_el(env);
   3255         int target_el;
   3256         uint32_t syn, fsr, fsc;
   3257         bool take_exc = false;
   3258 
   3259         if (fi.s1ptw && current_el == 1
   3260             && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
   3261             /*
   3262              * Synchronous stage 2 fault on an access made as part of the
   3263              * translation table walk for AT S1E0* or AT S1E1* insn
   3264              * executed from NS EL1. If this is a synchronous external abort
   3265              * and SCR_EL3.EA == 1, then we take a synchronous external abort
   3266              * to EL3. Otherwise the fault is taken as an exception to EL2,
   3267              * and HPFAR_EL2 holds the faulting IPA.
   3268              */
   3269             if (fi.type == ARMFault_SyncExternalOnWalk &&
   3270                 (env->cp15.scr_el3 & SCR_EA)) {
   3271                 target_el = 3;
   3272             } else {
   3273                 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
   3274                 if (arm_is_secure_below_el3(env) && fi.s1ns) {
   3275                     env->cp15.hpfar_el2 |= HPFAR_NS;
   3276                 }
   3277                 target_el = 2;
   3278             }
   3279             take_exc = true;
   3280         } else if (fi.type == ARMFault_SyncExternalOnWalk) {
   3281             /*
   3282              * Synchronous external aborts during a translation table walk
   3283              * are taken as Data Abort exceptions.
   3284              */
   3285             if (fi.stage2) {
   3286                 if (current_el == 3) {
   3287                     target_el = 3;
   3288                 } else {
   3289                     target_el = 2;
   3290                 }
   3291             } else {
   3292                 target_el = exception_target_el(env);
   3293             }
   3294             take_exc = true;
   3295         }
   3296 
   3297         if (take_exc) {
   3298             /* Construct FSR and FSC using same logic as arm_deliver_fault() */
   3299             if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
   3300                 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
   3301                 fsr = arm_fi_to_lfsc(&fi);
   3302                 fsc = extract32(fsr, 0, 6);
   3303             } else {
   3304                 fsr = arm_fi_to_sfsc(&fi);
   3305                 fsc = 0x3f;
   3306             }
   3307             /*
   3308              * Report exception with ESR indicating a fault due to a
   3309              * translation table walk for a cache maintenance instruction.
   3310              */
   3311             syn = syn_data_abort_no_iss(current_el == target_el, 0,
   3312                                         fi.ea, 1, fi.s1ptw, 1, fsc);
   3313             env->exception.vaddress = value;
   3314             env->exception.fsr = fsr;
   3315             raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
   3316         }
   3317     }
   3318 
   3319     if (is_a64(env)) {
   3320         format64 = true;
   3321     } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
   3322         /*
   3323          * ATS1Cxx:
   3324          * * TTBCR.EAE determines whether the result is returned using the
   3325          *   32-bit or the 64-bit PAR format
   3326          * * Instructions executed in Hyp mode always use the 64bit format
   3327          *
   3328          * ATS1S2NSOxx uses the 64bit format if any of the following is true:
   3329          * * The Non-secure TTBCR.EAE bit is set to 1
   3330          * * The implementation includes EL2, and the value of HCR.VM is 1
   3331          *
   3332          * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
   3333          *
   3334          * ATS1Hx always uses the 64bit format.
   3335          */
   3336         format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
   3337 
   3338         if (arm_feature(env, ARM_FEATURE_EL2)) {
   3339             if (mmu_idx == ARMMMUIdx_E10_0 ||
   3340                 mmu_idx == ARMMMUIdx_E10_1 ||
   3341                 mmu_idx == ARMMMUIdx_E10_1_PAN) {
   3342                 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
   3343             } else {
   3344                 format64 |= arm_current_el(env) == 2;
   3345             }
   3346         }
   3347     }
   3348 
   3349     if (format64) {
   3350         /* Create a 64-bit PAR */
   3351         par64 = (1 << 11); /* LPAE bit always set */
   3352         if (!ret) {
   3353             par64 |= res.f.phys_addr & ~0xfffULL;
   3354             if (!res.f.attrs.secure) {
   3355                 par64 |= (1 << 9); /* NS */
   3356             }
   3357             par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
   3358             par64 |= res.cacheattrs.shareability << 7; /* SH */
   3359         } else {
   3360             uint32_t fsr = arm_fi_to_lfsc(&fi);
   3361 
   3362             par64 |= 1; /* F */
   3363             par64 |= (fsr & 0x3f) << 1; /* FS */
   3364             if (fi.stage2) {
   3365                 par64 |= (1 << 9); /* S */
   3366             }
   3367             if (fi.s1ptw) {
   3368                 par64 |= (1 << 8); /* PTW */
   3369             }
   3370         }
   3371     } else {
   3372         /* fsr is a DFSR/IFSR value for the short descriptor
   3373          * translation table format (with WnR always clear).
   3374          * Convert it to a 32-bit PAR.
   3375          */
   3376         if (!ret) {
   3377             /* We do not set any attribute bits in the PAR */
   3378             if (res.f.lg_page_size == 24
   3379                 && arm_feature(env, ARM_FEATURE_V7)) {
   3380                 par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
   3381             } else {
   3382                 par64 = res.f.phys_addr & 0xfffff000;
   3383             }
   3384             if (!res.f.attrs.secure) {
   3385                 par64 |= (1 << 9); /* NS */
   3386             }
   3387         } else {
   3388             uint32_t fsr = arm_fi_to_sfsc(&fi);
   3389 
   3390             par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
   3391                     ((fsr & 0xf) << 1) | 1;
   3392         }
   3393     }
   3394     return par64;
   3395 }
   3396 #endif /* CONFIG_TCG */
   3397 
   3398 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
   3399 {
   3400 #ifdef CONFIG_TCG
   3401     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
   3402     uint64_t par64;
   3403     ARMMMUIdx mmu_idx;
   3404     int el = arm_current_el(env);
   3405     bool secure = arm_is_secure_below_el3(env);
   3406 
   3407     switch (ri->opc2 & 6) {
   3408     case 0:
   3409         /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
   3410         switch (el) {
   3411         case 3:
   3412             mmu_idx = ARMMMUIdx_E3;
   3413             secure = true;
   3414             break;
   3415         case 2:
   3416             g_assert(!secure);  /* ARMv8.4-SecEL2 is 64-bit only */
   3417             /* fall through */
   3418         case 1:
   3419             if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
   3420                 mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
   3421             } else {
   3422                 mmu_idx = ARMMMUIdx_Stage1_E1;
   3423             }
   3424             break;
   3425         default:
   3426             g_assert_not_reached();
   3427         }
   3428         break;
   3429     case 2:
   3430         /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
   3431         switch (el) {
   3432         case 3:
   3433             mmu_idx = ARMMMUIdx_E10_0;
   3434             secure = true;
   3435             break;
   3436         case 2:
   3437             g_assert(!secure);  /* ARMv8.4-SecEL2 is 64-bit only */
   3438             mmu_idx = ARMMMUIdx_Stage1_E0;
   3439             break;
   3440         case 1:
   3441             mmu_idx = ARMMMUIdx_Stage1_E0;
   3442             break;
   3443         default:
   3444             g_assert_not_reached();
   3445         }
   3446         break;
   3447     case 4:
   3448         /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
   3449         mmu_idx = ARMMMUIdx_E10_1;
   3450         secure = false;
   3451         break;
   3452     case 6:
   3453         /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
   3454         mmu_idx = ARMMMUIdx_E10_0;
   3455         secure = false;
   3456         break;
   3457     default:
   3458         g_assert_not_reached();
   3459     }
   3460 
   3461     par64 = do_ats_write(env, value, access_type, mmu_idx, secure);
   3462 
   3463     A32_BANKED_CURRENT_REG_SET(env, par, par64);
   3464 #else
   3465     /* Handled by hardware accelerator. */
   3466     g_assert_not_reached();
   3467 #endif /* CONFIG_TCG */
   3468 }
   3469 
   3470 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3471                         uint64_t value)
   3472 {
   3473 #ifdef CONFIG_TCG
   3474     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
   3475     uint64_t par64;
   3476 
   3477     /* There is no SecureEL2 for AArch32. */
   3478     par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2, false);
   3479 
   3480     A32_BANKED_CURRENT_REG_SET(env, par, par64);
   3481 #else
   3482     /* Handled by hardware accelerator. */
   3483     g_assert_not_reached();
   3484 #endif /* CONFIG_TCG */
   3485 }
   3486 
   3487 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
   3488                                      bool isread)
   3489 {
   3490     if (arm_current_el(env) == 3 &&
   3491         !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
   3492         return CP_ACCESS_TRAP;
   3493     }
   3494     return CP_ACCESS_OK;
   3495 }
   3496 
   3497 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
   3498                         uint64_t value)
   3499 {
   3500 #ifdef CONFIG_TCG
   3501     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
   3502     ARMMMUIdx mmu_idx;
   3503     int secure = arm_is_secure_below_el3(env);
   3504     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
   3505     bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
   3506 
   3507     switch (ri->opc2 & 6) {
   3508     case 0:
   3509         switch (ri->opc1) {
   3510         case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
   3511             if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
   3512                 mmu_idx = regime_e20 ?
   3513                           ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
   3514             } else {
   3515                 mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
   3516             }
   3517             break;
   3518         case 4: /* AT S1E2R, AT S1E2W */
   3519             mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
   3520             break;
   3521         case 6: /* AT S1E3R, AT S1E3W */
   3522             mmu_idx = ARMMMUIdx_E3;
   3523             secure = true;
   3524             break;
   3525         default:
   3526             g_assert_not_reached();
   3527         }
   3528         break;
   3529     case 2: /* AT S1E0R, AT S1E0W */
   3530         mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
   3531         break;
   3532     case 4: /* AT S12E1R, AT S12E1W */
   3533         mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
   3534         break;
   3535     case 6: /* AT S12E0R, AT S12E0W */
   3536         mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
   3537         break;
   3538     default:
   3539         g_assert_not_reached();
   3540     }
   3541 
   3542     env->cp15.par_el[1] = do_ats_write(env, value, access_type,
   3543                                        mmu_idx, secure);
   3544 #else
   3545     /* Handled by hardware accelerator. */
   3546     g_assert_not_reached();
   3547 #endif /* CONFIG_TCG */
   3548 }
   3549 #endif
   3550 
   3551 static const ARMCPRegInfo vapa_cp_reginfo[] = {
   3552     { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
   3553       .access = PL1_RW, .resetvalue = 0,
   3554       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
   3555                              offsetoflow32(CPUARMState, cp15.par_ns) },
   3556       .writefn = par_write },
   3557 #ifndef CONFIG_USER_ONLY
   3558     /* This underdecoding is safe because the reginfo is NO_RAW. */
   3559     { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
   3560       .access = PL1_W, .accessfn = ats_access,
   3561       .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
   3562 #endif
   3563 };
   3564 
   3565 /* Return basic MPU access permission bits.  */
   3566 static uint32_t simple_mpu_ap_bits(uint32_t val)
   3567 {
   3568     uint32_t ret;
   3569     uint32_t mask;
   3570     int i;
   3571     ret = 0;
   3572     mask = 3;
   3573     for (i = 0; i < 16; i += 2) {
   3574         ret |= (val >> i) & mask;
   3575         mask <<= 2;
   3576     }
   3577     return ret;
   3578 }
   3579 
   3580 /* Pad basic MPU access permission bits to extended format.  */
   3581 static uint32_t extended_mpu_ap_bits(uint32_t val)
   3582 {
   3583     uint32_t ret;
   3584     uint32_t mask;
   3585     int i;
   3586     ret = 0;
   3587     mask = 3;
   3588     for (i = 0; i < 16; i += 2) {
   3589         ret |= (val & mask) << i;
   3590         mask <<= 2;
   3591     }
   3592     return ret;
   3593 }
   3594 
   3595 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3596                                  uint64_t value)
   3597 {
   3598     env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
   3599 }
   3600 
   3601 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
   3602 {
   3603     return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
   3604 }
   3605 
   3606 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3607                                  uint64_t value)
   3608 {
   3609     env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
   3610 }
   3611 
   3612 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
   3613 {
   3614     return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
   3615 }
   3616 
   3617 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
   3618 {
   3619     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
   3620 
   3621     if (!u32p) {
   3622         return 0;
   3623     }
   3624 
   3625     u32p += env->pmsav7.rnr[M_REG_NS];
   3626     return *u32p;
   3627 }
   3628 
   3629 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3630                          uint64_t value)
   3631 {
   3632     ARMCPU *cpu = env_archcpu(env);
   3633     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
   3634 
   3635     if (!u32p) {
   3636         return;
   3637     }
   3638 
   3639     u32p += env->pmsav7.rnr[M_REG_NS];
   3640     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
   3641     *u32p = value;
   3642 }
   3643 
   3644 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3645                               uint64_t value)
   3646 {
   3647     ARMCPU *cpu = env_archcpu(env);
   3648     uint32_t nrgs = cpu->pmsav7_dregion;
   3649 
   3650     if (value >= nrgs) {
   3651         qemu_log_mask(LOG_GUEST_ERROR,
   3652                       "PMSAv7 RGNR write >= # supported regions, %" PRIu32
   3653                       " > %" PRIu32 "\n", (uint32_t)value, nrgs);
   3654         return;
   3655     }
   3656 
   3657     raw_write(env, ri, value);
   3658 }
   3659 
   3660 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
   3661     /* Reset for all these registers is handled in arm_cpu_reset(),
   3662      * because the PMSAv7 is also used by M-profile CPUs, which do
   3663      * not register cpregs but still need the state to be reset.
   3664      */
   3665     { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
   3666       .access = PL1_RW, .type = ARM_CP_NO_RAW,
   3667       .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
   3668       .readfn = pmsav7_read, .writefn = pmsav7_write,
   3669       .resetfn = arm_cp_reset_ignore },
   3670     { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
   3671       .access = PL1_RW, .type = ARM_CP_NO_RAW,
   3672       .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
   3673       .readfn = pmsav7_read, .writefn = pmsav7_write,
   3674       .resetfn = arm_cp_reset_ignore },
   3675     { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
   3676       .access = PL1_RW, .type = ARM_CP_NO_RAW,
   3677       .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
   3678       .readfn = pmsav7_read, .writefn = pmsav7_write,
   3679       .resetfn = arm_cp_reset_ignore },
   3680     { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
   3681       .access = PL1_RW,
   3682       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
   3683       .writefn = pmsav7_rgnr_write,
   3684       .resetfn = arm_cp_reset_ignore },
   3685 };
   3686 
   3687 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
   3688     { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
   3689       .access = PL1_RW, .type = ARM_CP_ALIAS,
   3690       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
   3691       .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
   3692     { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
   3693       .access = PL1_RW, .type = ARM_CP_ALIAS,
   3694       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
   3695       .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
   3696     { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
   3697       .access = PL1_RW,
   3698       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
   3699       .resetvalue = 0, },
   3700     { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
   3701       .access = PL1_RW,
   3702       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
   3703       .resetvalue = 0, },
   3704     { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
   3705       .access = PL1_RW,
   3706       .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
   3707     { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
   3708       .access = PL1_RW,
   3709       .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
   3710     /* Protection region base and size registers */
   3711     { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
   3712       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3713       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
   3714     { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
   3715       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3716       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
   3717     { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
   3718       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3719       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
   3720     { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
   3721       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3722       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
   3723     { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
   3724       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3725       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
   3726     { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
   3727       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3728       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
   3729     { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
   3730       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3731       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
   3732     { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
   3733       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
   3734       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
   3735 };
   3736 
   3737 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3738                              uint64_t value)
   3739 {
   3740     ARMCPU *cpu = env_archcpu(env);
   3741 
   3742     if (!arm_feature(env, ARM_FEATURE_V8)) {
   3743         if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
   3744             /*
   3745              * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
   3746              * using Long-descriptor translation table format
   3747              */
   3748             value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
   3749         } else if (arm_feature(env, ARM_FEATURE_EL3)) {
   3750             /*
   3751              * In an implementation that includes the Security Extensions
   3752              * TTBCR has additional fields PD0 [4] and PD1 [5] for
   3753              * Short-descriptor translation table format.
   3754              */
   3755             value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
   3756         } else {
   3757             value &= TTBCR_N;
   3758         }
   3759     }
   3760 
   3761     if (arm_feature(env, ARM_FEATURE_LPAE)) {
   3762         /* With LPAE the TTBCR could result in a change of ASID
   3763          * via the TTBCR.A1 bit, so do a TLB flush.
   3764          */
   3765         tlb_flush(CPU(cpu));
   3766     }
   3767     raw_write(env, ri, value);
   3768 }
   3769 
   3770 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3771                                uint64_t value)
   3772 {
   3773     ARMCPU *cpu = env_archcpu(env);
   3774 
   3775     /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
   3776     tlb_flush(CPU(cpu));
   3777     raw_write(env, ri, value);
   3778 }
   3779 
   3780 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3781                             uint64_t value)
   3782 {
   3783     /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
   3784     if (cpreg_field_is_64bit(ri) &&
   3785         extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
   3786         ARMCPU *cpu = env_archcpu(env);
   3787         tlb_flush(CPU(cpu));
   3788     }
   3789     raw_write(env, ri, value);
   3790 }
   3791 
   3792 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3793                                     uint64_t value)
   3794 {
   3795     /*
   3796      * If we are running with E2&0 regime, then an ASID is active.
   3797      * Flush if that might be changing.  Note we're not checking
   3798      * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
   3799      * holds the active ASID, only checking the field that might.
   3800      */
   3801     if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
   3802         (arm_hcr_el2_eff(env) & HCR_E2H)) {
   3803         uint16_t mask = ARMMMUIdxBit_E20_2 |
   3804                         ARMMMUIdxBit_E20_2_PAN |
   3805                         ARMMMUIdxBit_E20_0;
   3806         tlb_flush_by_mmuidx(env_cpu(env), mask);
   3807     }
   3808     raw_write(env, ri, value);
   3809 }
   3810 
   3811 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3812                         uint64_t value)
   3813 {
   3814     ARMCPU *cpu = env_archcpu(env);
   3815     CPUState *cs = CPU(cpu);
   3816 
   3817     /*
   3818      * A change in VMID to the stage2 page table (Stage2) invalidates
   3819      * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
   3820      */
   3821     if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
   3822         tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
   3823     }
   3824     raw_write(env, ri, value);
   3825 }
   3826 
   3827 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
   3828     { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
   3829       .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
   3830       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
   3831                              offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
   3832     { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
   3833       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
   3834       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
   3835                              offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
   3836     { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
   3837       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
   3838       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
   3839                              offsetof(CPUARMState, cp15.dfar_ns) } },
   3840     { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
   3841       .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
   3842       .access = PL1_RW, .accessfn = access_tvm_trvm,
   3843       .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
   3844       .resetvalue = 0, },
   3845 };
   3846 
   3847 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
   3848     { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
   3849       .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
   3850       .access = PL1_RW, .accessfn = access_tvm_trvm,
   3851       .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
   3852     { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
   3853       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
   3854       .access = PL1_RW, .accessfn = access_tvm_trvm,
   3855       .writefn = vmsa_ttbr_write, .resetvalue = 0,
   3856       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
   3857                              offsetof(CPUARMState, cp15.ttbr0_ns) } },
   3858     { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
   3859       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
   3860       .access = PL1_RW, .accessfn = access_tvm_trvm,
   3861       .writefn = vmsa_ttbr_write, .resetvalue = 0,
   3862       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
   3863                              offsetof(CPUARMState, cp15.ttbr1_ns) } },
   3864     { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
   3865       .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
   3866       .access = PL1_RW, .accessfn = access_tvm_trvm,
   3867       .writefn = vmsa_tcr_el12_write,
   3868       .raw_writefn = raw_write,
   3869       .resetvalue = 0,
   3870       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
   3871     { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
   3872       .access = PL1_RW, .accessfn = access_tvm_trvm,
   3873       .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
   3874       .raw_writefn = raw_write,
   3875       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
   3876                              offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
   3877 };
   3878 
   3879 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
   3880  * qemu tlbs nor adjusting cached masks.
   3881  */
   3882 static const ARMCPRegInfo ttbcr2_reginfo = {
   3883     .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
   3884     .access = PL1_RW, .accessfn = access_tvm_trvm,
   3885     .type = ARM_CP_ALIAS,
   3886     .bank_fieldoffsets = {
   3887         offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
   3888         offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
   3889     },
   3890 };
   3891 
   3892 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3893                                 uint64_t value)
   3894 {
   3895     env->cp15.c15_ticonfig = value & 0xe7;
   3896     /* The OS_TYPE bit in this register changes the reported CPUID! */
   3897     env->cp15.c0_cpuid = (value & (1 << 5)) ?
   3898         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
   3899 }
   3900 
   3901 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3902                                 uint64_t value)
   3903 {
   3904     env->cp15.c15_threadid = value & 0xffff;
   3905 }
   3906 
   3907 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3908                            uint64_t value)
   3909 {
   3910     /* Wait-for-interrupt (deprecated) */
   3911     cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
   3912 }
   3913 
   3914 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3915                                   uint64_t value)
   3916 {
   3917     /* On OMAP there are registers indicating the max/min index of dcache lines
   3918      * containing a dirty line; cache flush operations have to reset these.
   3919      */
   3920     env->cp15.c15_i_max = 0x000;
   3921     env->cp15.c15_i_min = 0xff0;
   3922 }
   3923 
   3924 static const ARMCPRegInfo omap_cp_reginfo[] = {
   3925     { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
   3926       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
   3927       .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
   3928       .resetvalue = 0, },
   3929     { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
   3930       .access = PL1_RW, .type = ARM_CP_NOP },
   3931     { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
   3932       .access = PL1_RW,
   3933       .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
   3934       .writefn = omap_ticonfig_write },
   3935     { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
   3936       .access = PL1_RW,
   3937       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
   3938     { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
   3939       .access = PL1_RW, .resetvalue = 0xff0,
   3940       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
   3941     { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
   3942       .access = PL1_RW,
   3943       .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
   3944       .writefn = omap_threadid_write },
   3945     { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
   3946       .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
   3947       .type = ARM_CP_NO_RAW,
   3948       .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
   3949     /* TODO: Peripheral port remap register:
   3950      * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
   3951      * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
   3952      * when MMU is off.
   3953      */
   3954     { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
   3955       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
   3956       .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
   3957       .writefn = omap_cachemaint_write },
   3958     { .name = "C9", .cp = 15, .crn = 9,
   3959       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
   3960       .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
   3961 };
   3962 
   3963 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
   3964                               uint64_t value)
   3965 {
   3966     env->cp15.c15_cpar = value & 0x3fff;
   3967 }
   3968 
   3969 static const ARMCPRegInfo xscale_cp_reginfo[] = {
   3970     { .name = "XSCALE_CPAR",
   3971       .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
   3972       .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
   3973       .writefn = xscale_cpar_write, },
   3974     { .name = "XSCALE_AUXCR",
   3975       .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
   3976       .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
   3977       .resetvalue = 0, },
   3978     /* XScale specific cache-lockdown: since we have no cache we NOP these
   3979      * and hope the guest does not really rely on cache behaviour.
   3980      */
   3981     { .name = "XSCALE_LOCK_ICACHE_LINE",
   3982       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
   3983       .access = PL1_W, .type = ARM_CP_NOP },
   3984     { .name = "XSCALE_UNLOCK_ICACHE",
   3985       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
   3986       .access = PL1_W, .type = ARM_CP_NOP },
   3987     { .name = "XSCALE_DCACHE_LOCK",
   3988       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
   3989       .access = PL1_RW, .type = ARM_CP_NOP },
   3990     { .name = "XSCALE_UNLOCK_DCACHE",
   3991       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
   3992       .access = PL1_W, .type = ARM_CP_NOP },
   3993 };
   3994 
   3995 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
   3996     /* RAZ/WI the whole crn=15 space, when we don't have a more specific
   3997      * implementation of this implementation-defined space.
   3998      * Ideally this should eventually disappear in favour of actually
   3999      * implementing the correct behaviour for all cores.
   4000      */
   4001     { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
   4002       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
   4003       .access = PL1_RW,
   4004       .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
   4005       .resetvalue = 0 },
   4006 };
   4007 
   4008 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
   4009     /* Cache status: RAZ because we have no cache so it's always clean */
   4010     { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
   4011       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
   4012       .resetvalue = 0 },
   4013 };
   4014 
   4015 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
   4016     /* We never have a block transfer operation in progress */
   4017     { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
   4018       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
   4019       .resetvalue = 0 },
   4020     /* The cache ops themselves: these all NOP for QEMU */
   4021     { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
   4022       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
   4023     { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
   4024       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
   4025     { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
   4026       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
   4027     { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
   4028       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
   4029     { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
   4030       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
   4031     { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
   4032       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
   4033 };
   4034 
   4035 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
   4036     /* The cache test-and-clean instructions always return (1 << 30)
   4037      * to indicate that there are no dirty cache lines.
   4038      */
   4039     { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
   4040       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
   4041       .resetvalue = (1 << 30) },
   4042     { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
   4043       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
   4044       .resetvalue = (1 << 30) },
   4045 };
   4046 
   4047 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
   4048     /* Ignore ReadBuffer accesses */
   4049     { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
   4050       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
   4051       .access = PL1_RW, .resetvalue = 0,
   4052       .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
   4053 };
   4054 
   4055 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4056 {
   4057     unsigned int cur_el = arm_current_el(env);
   4058 
   4059     if (arm_is_el2_enabled(env) && cur_el == 1) {
   4060         return env->cp15.vpidr_el2;
   4061     }
   4062     return raw_read(env, ri);
   4063 }
   4064 
   4065 static uint64_t mpidr_read_val(CPUARMState *env)
   4066 {
   4067     ARMCPU *cpu = env_archcpu(env);
   4068     uint64_t mpidr = cpu->mp_affinity;
   4069 
   4070     if (arm_feature(env, ARM_FEATURE_V7MP)) {
   4071         mpidr |= (1U << 31);
   4072         /* Cores which are uniprocessor (non-coherent)
   4073          * but still implement the MP extensions set
   4074          * bit 30. (For instance, Cortex-R5).
   4075          */
   4076         if (cpu->mp_is_up) {
   4077             mpidr |= (1u << 30);
   4078         }
   4079     }
   4080     return mpidr;
   4081 }
   4082 
   4083 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4084 {
   4085     unsigned int cur_el = arm_current_el(env);
   4086 
   4087     if (arm_is_el2_enabled(env) && cur_el == 1) {
   4088         return env->cp15.vmpidr_el2;
   4089     }
   4090     return mpidr_read_val(env);
   4091 }
   4092 
   4093 static const ARMCPRegInfo lpae_cp_reginfo[] = {
   4094     /* NOP AMAIR0/1 */
   4095     { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
   4096       .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
   4097       .access = PL1_RW, .accessfn = access_tvm_trvm,
   4098       .type = ARM_CP_CONST, .resetvalue = 0 },
   4099     /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
   4100     { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
   4101       .access = PL1_RW, .accessfn = access_tvm_trvm,
   4102       .type = ARM_CP_CONST, .resetvalue = 0 },
   4103     { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
   4104       .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
   4105       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
   4106                              offsetof(CPUARMState, cp15.par_ns)} },
   4107     { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
   4108       .access = PL1_RW, .accessfn = access_tvm_trvm,
   4109       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
   4110       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
   4111                              offsetof(CPUARMState, cp15.ttbr0_ns) },
   4112       .writefn = vmsa_ttbr_write, },
   4113     { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
   4114       .access = PL1_RW, .accessfn = access_tvm_trvm,
   4115       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
   4116       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
   4117                              offsetof(CPUARMState, cp15.ttbr1_ns) },
   4118       .writefn = vmsa_ttbr_write, },
   4119 };
   4120 
   4121 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4122 {
   4123     return vfp_get_fpcr(env);
   4124 }
   4125 
   4126 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4127                             uint64_t value)
   4128 {
   4129     vfp_set_fpcr(env, value);
   4130 }
   4131 
   4132 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4133 {
   4134     return vfp_get_fpsr(env);
   4135 }
   4136 
   4137 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4138                             uint64_t value)
   4139 {
   4140     vfp_set_fpsr(env, value);
   4141 }
   4142 
   4143 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
   4144                                        bool isread)
   4145 {
   4146     if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
   4147         return CP_ACCESS_TRAP;
   4148     }
   4149     return CP_ACCESS_OK;
   4150 }
   4151 
   4152 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4153                             uint64_t value)
   4154 {
   4155     env->daif = value & PSTATE_DAIF;
   4156 }
   4157 
   4158 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4159 {
   4160     return env->pstate & PSTATE_PAN;
   4161 }
   4162 
   4163 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4164                            uint64_t value)
   4165 {
   4166     env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
   4167 }
   4168 
   4169 static const ARMCPRegInfo pan_reginfo = {
   4170     .name = "PAN", .state = ARM_CP_STATE_AA64,
   4171     .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
   4172     .type = ARM_CP_NO_RAW, .access = PL1_RW,
   4173     .readfn = aa64_pan_read, .writefn = aa64_pan_write
   4174 };
   4175 
   4176 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4177 {
   4178     return env->pstate & PSTATE_UAO;
   4179 }
   4180 
   4181 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4182                            uint64_t value)
   4183 {
   4184     env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
   4185 }
   4186 
   4187 static const ARMCPRegInfo uao_reginfo = {
   4188     .name = "UAO", .state = ARM_CP_STATE_AA64,
   4189     .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
   4190     .type = ARM_CP_NO_RAW, .access = PL1_RW,
   4191     .readfn = aa64_uao_read, .writefn = aa64_uao_write
   4192 };
   4193 
   4194 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4195 {
   4196     return env->pstate & PSTATE_DIT;
   4197 }
   4198 
   4199 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4200                            uint64_t value)
   4201 {
   4202     env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
   4203 }
   4204 
   4205 static const ARMCPRegInfo dit_reginfo = {
   4206     .name = "DIT", .state = ARM_CP_STATE_AA64,
   4207     .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
   4208     .type = ARM_CP_NO_RAW, .access = PL0_RW,
   4209     .readfn = aa64_dit_read, .writefn = aa64_dit_write
   4210 };
   4211 
   4212 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4213 {
   4214     return env->pstate & PSTATE_SSBS;
   4215 }
   4216 
   4217 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4218                            uint64_t value)
   4219 {
   4220     env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
   4221 }
   4222 
   4223 static const ARMCPRegInfo ssbs_reginfo = {
   4224     .name = "SSBS", .state = ARM_CP_STATE_AA64,
   4225     .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
   4226     .type = ARM_CP_NO_RAW, .access = PL0_RW,
   4227     .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
   4228 };
   4229 
   4230 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
   4231                                               const ARMCPRegInfo *ri,
   4232                                               bool isread)
   4233 {
   4234     /* Cache invalidate/clean to Point of Coherency or Persistence...  */
   4235     switch (arm_current_el(env)) {
   4236     case 0:
   4237         /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
   4238         if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
   4239             return CP_ACCESS_TRAP;
   4240         }
   4241         /* fall through */
   4242     case 1:
   4243         /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set.  */
   4244         if (arm_hcr_el2_eff(env) & HCR_TPCP) {
   4245             return CP_ACCESS_TRAP_EL2;
   4246         }
   4247         break;
   4248     }
   4249     return CP_ACCESS_OK;
   4250 }
   4251 
   4252 static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env,
   4253                                               const ARMCPRegInfo *ri,
   4254                                               bool isread)
   4255 {
   4256     /* Cache invalidate/clean to Point of Unification... */
   4257     switch (arm_current_el(env)) {
   4258     case 0:
   4259         /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
   4260         if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
   4261             return CP_ACCESS_TRAP;
   4262         }
   4263         /* fall through */
   4264     case 1:
   4265         /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set.  */
   4266         if (arm_hcr_el2_eff(env) & HCR_TPU) {
   4267             return CP_ACCESS_TRAP_EL2;
   4268         }
   4269         break;
   4270     }
   4271     return CP_ACCESS_OK;
   4272 }
   4273 
   4274 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
   4275  * Page D4-1736 (DDI0487A.b)
   4276  */
   4277 
   4278 static int vae1_tlbmask(CPUARMState *env)
   4279 {
   4280     uint64_t hcr = arm_hcr_el2_eff(env);
   4281     uint16_t mask;
   4282 
   4283     if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   4284         mask = ARMMMUIdxBit_E20_2 |
   4285                ARMMMUIdxBit_E20_2_PAN |
   4286                ARMMMUIdxBit_E20_0;
   4287     } else {
   4288         mask = ARMMMUIdxBit_E10_1 |
   4289                ARMMMUIdxBit_E10_1_PAN |
   4290                ARMMMUIdxBit_E10_0;
   4291     }
   4292     return mask;
   4293 }
   4294 
   4295 /* Return 56 if TBI is enabled, 64 otherwise. */
   4296 static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
   4297                               uint64_t addr)
   4298 {
   4299     uint64_t tcr = regime_tcr(env, mmu_idx);
   4300     int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
   4301     int select = extract64(addr, 55, 1);
   4302 
   4303     return (tbi >> select) & 1 ? 56 : 64;
   4304 }
   4305 
   4306 static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
   4307 {
   4308     uint64_t hcr = arm_hcr_el2_eff(env);
   4309     ARMMMUIdx mmu_idx;
   4310 
   4311     /* Only the regime of the mmu_idx below is significant. */
   4312     if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   4313         mmu_idx = ARMMMUIdx_E20_0;
   4314     } else {
   4315         mmu_idx = ARMMMUIdx_E10_0;
   4316     }
   4317 
   4318     return tlbbits_for_regime(env, mmu_idx, addr);
   4319 }
   4320 
   4321 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4322                                       uint64_t value)
   4323 {
   4324     CPUState *cs = env_cpu(env);
   4325     int mask = vae1_tlbmask(env);
   4326 
   4327     tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
   4328 }
   4329 
   4330 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4331                                     uint64_t value)
   4332 {
   4333     CPUState *cs = env_cpu(env);
   4334     int mask = vae1_tlbmask(env);
   4335 
   4336     if (tlb_force_broadcast(env)) {
   4337         tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
   4338     } else {
   4339         tlb_flush_by_mmuidx(cs, mask);
   4340     }
   4341 }
   4342 
   4343 static int e2_tlbmask(CPUARMState *env)
   4344 {
   4345     return (ARMMMUIdxBit_E20_0 |
   4346             ARMMMUIdxBit_E20_2 |
   4347             ARMMMUIdxBit_E20_2_PAN |
   4348             ARMMMUIdxBit_E2);
   4349 }
   4350 
   4351 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4352                                   uint64_t value)
   4353 {
   4354     CPUState *cs = env_cpu(env);
   4355     int mask = alle1_tlbmask(env);
   4356 
   4357     tlb_flush_by_mmuidx(cs, mask);
   4358 }
   4359 
   4360 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4361                                   uint64_t value)
   4362 {
   4363     CPUState *cs = env_cpu(env);
   4364     int mask = e2_tlbmask(env);
   4365 
   4366     tlb_flush_by_mmuidx(cs, mask);
   4367 }
   4368 
   4369 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4370                                   uint64_t value)
   4371 {
   4372     ARMCPU *cpu = env_archcpu(env);
   4373     CPUState *cs = CPU(cpu);
   4374 
   4375     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
   4376 }
   4377 
   4378 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4379                                     uint64_t value)
   4380 {
   4381     CPUState *cs = env_cpu(env);
   4382     int mask = alle1_tlbmask(env);
   4383 
   4384     tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
   4385 }
   4386 
   4387 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4388                                     uint64_t value)
   4389 {
   4390     CPUState *cs = env_cpu(env);
   4391     int mask = e2_tlbmask(env);
   4392 
   4393     tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
   4394 }
   4395 
   4396 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4397                                     uint64_t value)
   4398 {
   4399     CPUState *cs = env_cpu(env);
   4400 
   4401     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
   4402 }
   4403 
   4404 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4405                                  uint64_t value)
   4406 {
   4407     /* Invalidate by VA, EL2
   4408      * Currently handles both VAE2 and VALE2, since we don't support
   4409      * flush-last-level-only.
   4410      */
   4411     CPUState *cs = env_cpu(env);
   4412     int mask = e2_tlbmask(env);
   4413     uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4414 
   4415     tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
   4416 }
   4417 
   4418 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4419                                  uint64_t value)
   4420 {
   4421     /* Invalidate by VA, EL3
   4422      * Currently handles both VAE3 and VALE3, since we don't support
   4423      * flush-last-level-only.
   4424      */
   4425     ARMCPU *cpu = env_archcpu(env);
   4426     CPUState *cs = CPU(cpu);
   4427     uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4428 
   4429     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
   4430 }
   4431 
   4432 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4433                                    uint64_t value)
   4434 {
   4435     CPUState *cs = env_cpu(env);
   4436     int mask = vae1_tlbmask(env);
   4437     uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4438     int bits = vae1_tlbbits(env, pageaddr);
   4439 
   4440     tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
   4441 }
   4442 
   4443 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4444                                  uint64_t value)
   4445 {
   4446     /* Invalidate by VA, EL1&0 (AArch64 version).
   4447      * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
   4448      * since we don't support flush-for-specific-ASID-only or
   4449      * flush-last-level-only.
   4450      */
   4451     CPUState *cs = env_cpu(env);
   4452     int mask = vae1_tlbmask(env);
   4453     uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4454     int bits = vae1_tlbbits(env, pageaddr);
   4455 
   4456     if (tlb_force_broadcast(env)) {
   4457         tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
   4458     } else {
   4459         tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
   4460     }
   4461 }
   4462 
   4463 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4464                                    uint64_t value)
   4465 {
   4466     CPUState *cs = env_cpu(env);
   4467     uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4468     int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr);
   4469 
   4470     tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
   4471                                                   ARMMMUIdxBit_E2, bits);
   4472 }
   4473 
   4474 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4475                                    uint64_t value)
   4476 {
   4477     CPUState *cs = env_cpu(env);
   4478     uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4479     int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
   4480 
   4481     tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
   4482                                                   ARMMMUIdxBit_E3, bits);
   4483 }
   4484 
   4485 static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
   4486 {
   4487     /*
   4488      * The MSB of value is the NS field, which only applies if SEL2
   4489      * is implemented and SCR_EL3.NS is not set (i.e. in secure mode).
   4490      */
   4491     return (value >= 0
   4492             && cpu_isar_feature(aa64_sel2, env_archcpu(env))
   4493             && arm_is_secure_below_el3(env)
   4494             ? ARMMMUIdxBit_Stage2_S
   4495             : ARMMMUIdxBit_Stage2);
   4496 }
   4497 
   4498 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4499                                     uint64_t value)
   4500 {
   4501     CPUState *cs = env_cpu(env);
   4502     int mask = ipas2e1_tlbmask(env, value);
   4503     uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4504 
   4505     if (tlb_force_broadcast(env)) {
   4506         tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
   4507     } else {
   4508         tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
   4509     }
   4510 }
   4511 
   4512 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4513                                       uint64_t value)
   4514 {
   4515     CPUState *cs = env_cpu(env);
   4516     int mask = ipas2e1_tlbmask(env, value);
   4517     uint64_t pageaddr = sextract64(value << 12, 0, 56);
   4518 
   4519     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
   4520 }
   4521 
   4522 #ifdef TARGET_AARCH64
   4523 typedef struct {
   4524     uint64_t base;
   4525     uint64_t length;
   4526 } TLBIRange;
   4527 
   4528 static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg)
   4529 {
   4530     /*
   4531      * Note that the TLBI range TG field encoding differs from both
   4532      * TG0 and TG1 encodings.
   4533      */
   4534     switch (tg) {
   4535     case 1:
   4536         return Gran4K;
   4537     case 2:
   4538         return Gran16K;
   4539     case 3:
   4540         return Gran64K;
   4541     default:
   4542         return GranInvalid;
   4543     }
   4544 }
   4545 
   4546 static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
   4547                                      uint64_t value)
   4548 {
   4549     unsigned int page_size_granule, page_shift, num, scale, exponent;
   4550     /* Extract one bit to represent the va selector in use. */
   4551     uint64_t select = sextract64(value, 36, 1);
   4552     ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true);
   4553     TLBIRange ret = { };
   4554     ARMGranuleSize gran;
   4555 
   4556     page_size_granule = extract64(value, 46, 2);
   4557     gran = tlbi_range_tg_to_gran_size(page_size_granule);
   4558 
   4559     /* The granule encoded in value must match the granule in use. */
   4560     if (gran != param.gran) {
   4561         qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
   4562                       page_size_granule);
   4563         return ret;
   4564     }
   4565 
   4566     page_shift = arm_granule_bits(gran);
   4567     num = extract64(value, 39, 5);
   4568     scale = extract64(value, 44, 2);
   4569     exponent = (5 * scale) + 1;
   4570 
   4571     ret.length = (num + 1) << (exponent + page_shift);
   4572 
   4573     if (param.select) {
   4574         ret.base = sextract64(value, 0, 37);
   4575     } else {
   4576         ret.base = extract64(value, 0, 37);
   4577     }
   4578     if (param.ds) {
   4579         /*
   4580          * With DS=1, BaseADDR is always shifted 16 so that it is able
   4581          * to address all 52 va bits.  The input address is perforce
   4582          * aligned on a 64k boundary regardless of translation granule.
   4583          */
   4584         page_shift = 16;
   4585     }
   4586     ret.base <<= page_shift;
   4587 
   4588     return ret;
   4589 }
   4590 
   4591 static void do_rvae_write(CPUARMState *env, uint64_t value,
   4592                           int idxmap, bool synced)
   4593 {
   4594     ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
   4595     TLBIRange range;
   4596     int bits;
   4597 
   4598     range = tlbi_aa64_get_range(env, one_idx, value);
   4599     bits = tlbbits_for_regime(env, one_idx, range.base);
   4600 
   4601     if (synced) {
   4602         tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
   4603                                                   range.base,
   4604                                                   range.length,
   4605                                                   idxmap,
   4606                                                   bits);
   4607     } else {
   4608         tlb_flush_range_by_mmuidx(env_cpu(env), range.base,
   4609                                   range.length, idxmap, bits);
   4610     }
   4611 }
   4612 
   4613 static void tlbi_aa64_rvae1_write(CPUARMState *env,
   4614                                   const ARMCPRegInfo *ri,
   4615                                   uint64_t value)
   4616 {
   4617     /*
   4618      * Invalidate by VA range, EL1&0.
   4619      * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
   4620      * since we don't support flush-for-specific-ASID-only or
   4621      * flush-last-level-only.
   4622      */
   4623 
   4624     do_rvae_write(env, value, vae1_tlbmask(env),
   4625                   tlb_force_broadcast(env));
   4626 }
   4627 
   4628 static void tlbi_aa64_rvae1is_write(CPUARMState *env,
   4629                                     const ARMCPRegInfo *ri,
   4630                                     uint64_t value)
   4631 {
   4632     /*
   4633      * Invalidate by VA range, Inner/Outer Shareable EL1&0.
   4634      * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
   4635      * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
   4636      * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
   4637      * shareable specific flushes.
   4638      */
   4639 
   4640     do_rvae_write(env, value, vae1_tlbmask(env), true);
   4641 }
   4642 
   4643 static int vae2_tlbmask(CPUARMState *env)
   4644 {
   4645     return ARMMMUIdxBit_E2;
   4646 }
   4647 
   4648 static void tlbi_aa64_rvae2_write(CPUARMState *env,
   4649                                   const ARMCPRegInfo *ri,
   4650                                   uint64_t value)
   4651 {
   4652     /*
   4653      * Invalidate by VA range, EL2.
   4654      * Currently handles all of RVAE2 and RVALE2,
   4655      * since we don't support flush-for-specific-ASID-only or
   4656      * flush-last-level-only.
   4657      */
   4658 
   4659     do_rvae_write(env, value, vae2_tlbmask(env),
   4660                   tlb_force_broadcast(env));
   4661 
   4662 
   4663 }
   4664 
   4665 static void tlbi_aa64_rvae2is_write(CPUARMState *env,
   4666                                     const ARMCPRegInfo *ri,
   4667                                     uint64_t value)
   4668 {
   4669     /*
   4670      * Invalidate by VA range, Inner/Outer Shareable, EL2.
   4671      * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
   4672      * since we don't support flush-for-specific-ASID-only,
   4673      * flush-last-level-only or inner/outer shareable specific flushes.
   4674      */
   4675 
   4676     do_rvae_write(env, value, vae2_tlbmask(env), true);
   4677 
   4678 }
   4679 
   4680 static void tlbi_aa64_rvae3_write(CPUARMState *env,
   4681                                   const ARMCPRegInfo *ri,
   4682                                   uint64_t value)
   4683 {
   4684     /*
   4685      * Invalidate by VA range, EL3.
   4686      * Currently handles all of RVAE3 and RVALE3,
   4687      * since we don't support flush-for-specific-ASID-only or
   4688      * flush-last-level-only.
   4689      */
   4690 
   4691     do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
   4692 }
   4693 
   4694 static void tlbi_aa64_rvae3is_write(CPUARMState *env,
   4695                                     const ARMCPRegInfo *ri,
   4696                                     uint64_t value)
   4697 {
   4698     /*
   4699      * Invalidate by VA range, EL3, Inner/Outer Shareable.
   4700      * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
   4701      * since we don't support flush-for-specific-ASID-only,
   4702      * flush-last-level-only or inner/outer specific flushes.
   4703      */
   4704 
   4705     do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
   4706 }
   4707 
   4708 static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4709                                      uint64_t value)
   4710 {
   4711     do_rvae_write(env, value, ipas2e1_tlbmask(env, value),
   4712                   tlb_force_broadcast(env));
   4713 }
   4714 
   4715 static void tlbi_aa64_ripas2e1is_write(CPUARMState *env,
   4716                                        const ARMCPRegInfo *ri,
   4717                                        uint64_t value)
   4718 {
   4719     do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true);
   4720 }
   4721 #endif
   4722 
   4723 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
   4724                                       bool isread)
   4725 {
   4726     int cur_el = arm_current_el(env);
   4727 
   4728     if (cur_el < 2) {
   4729         uint64_t hcr = arm_hcr_el2_eff(env);
   4730 
   4731         if (cur_el == 0) {
   4732             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   4733                 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
   4734                     return CP_ACCESS_TRAP_EL2;
   4735                 }
   4736             } else {
   4737                 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
   4738                     return CP_ACCESS_TRAP;
   4739                 }
   4740                 if (hcr & HCR_TDZ) {
   4741                     return CP_ACCESS_TRAP_EL2;
   4742                 }
   4743             }
   4744         } else if (hcr & HCR_TDZ) {
   4745             return CP_ACCESS_TRAP_EL2;
   4746         }
   4747     }
   4748     return CP_ACCESS_OK;
   4749 }
   4750 
   4751 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4752 {
   4753     ARMCPU *cpu = env_archcpu(env);
   4754     int dzp_bit = 1 << 4;
   4755 
   4756     /* DZP indicates whether DC ZVA access is allowed */
   4757     if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
   4758         dzp_bit = 0;
   4759     }
   4760     return cpu->dcz_blocksize | dzp_bit;
   4761 }
   4762 
   4763 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
   4764                                     bool isread)
   4765 {
   4766     if (!(env->pstate & PSTATE_SP)) {
   4767         /* Access to SP_EL0 is undefined if it's being used as
   4768          * the stack pointer.
   4769          */
   4770         return CP_ACCESS_TRAP_UNCATEGORIZED;
   4771     }
   4772     return CP_ACCESS_OK;
   4773 }
   4774 
   4775 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
   4776 {
   4777     return env->pstate & PSTATE_SP;
   4778 }
   4779 
   4780 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
   4781 {
   4782     update_spsel(env, val);
   4783 }
   4784 
   4785 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4786                         uint64_t value)
   4787 {
   4788     ARMCPU *cpu = env_archcpu(env);
   4789 
   4790     if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
   4791         /* M bit is RAZ/WI for PMSA with no MPU implemented */
   4792         value &= ~SCTLR_M;
   4793     }
   4794 
   4795     /* ??? Lots of these bits are not implemented.  */
   4796 
   4797     if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
   4798         if (ri->opc1 == 6) { /* SCTLR_EL3 */
   4799             value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
   4800         } else {
   4801             value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
   4802                        SCTLR_ATA0 | SCTLR_ATA);
   4803         }
   4804     }
   4805 
   4806     if (raw_read(env, ri) == value) {
   4807         /* Skip the TLB flush if nothing actually changed; Linux likes
   4808          * to do a lot of pointless SCTLR writes.
   4809          */
   4810         return;
   4811     }
   4812 
   4813     raw_write(env, ri, value);
   4814 
   4815     /* This may enable/disable the MMU, so do a TLB flush.  */
   4816     tlb_flush(CPU(cpu));
   4817 
   4818     if (ri->type & ARM_CP_SUPPRESS_TB_END) {
   4819         /*
   4820          * Normally we would always end the TB on an SCTLR write; see the
   4821          * comment in ARMCPRegInfo sctlr initialization below for why Xscale
   4822          * is special.  Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
   4823          * of hflags from the translator, so do it here.
   4824          */
   4825         arm_rebuild_hflags(env);
   4826     }
   4827 }
   4828 
   4829 static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4830                            uint64_t value)
   4831 {
   4832     /*
   4833      * Some MDCR_EL3 bits affect whether PMU counters are running:
   4834      * if we are trying to change any of those then we must
   4835      * bracket this update with PMU start/finish calls.
   4836      */
   4837     bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
   4838 
   4839     if (pmu_op) {
   4840         pmu_op_start(env);
   4841     }
   4842     env->cp15.mdcr_el3 = value;
   4843     if (pmu_op) {
   4844         pmu_op_finish(env);
   4845     }
   4846 }
   4847 
   4848 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4849                        uint64_t value)
   4850 {
   4851     /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */
   4852     mdcr_el3_write(env, ri, value & SDCR_VALID_MASK);
   4853 }
   4854 
   4855 static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
   4856                            uint64_t value)
   4857 {
   4858     /*
   4859      * Some MDCR_EL2 bits affect whether PMU counters are running:
   4860      * if we are trying to change any of those then we must
   4861      * bracket this update with PMU start/finish calls.
   4862      */
   4863     bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
   4864 
   4865     if (pmu_op) {
   4866         pmu_op_start(env);
   4867     }
   4868     env->cp15.mdcr_el2 = value;
   4869     if (pmu_op) {
   4870         pmu_op_finish(env);
   4871     }
   4872 }
   4873 
   4874 static const ARMCPRegInfo v8_cp_reginfo[] = {
   4875     /* Minimal set of EL0-visible registers. This will need to be expanded
   4876      * significantly for system emulation of AArch64 CPUs.
   4877      */
   4878     { .name = "NZCV", .state = ARM_CP_STATE_AA64,
   4879       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
   4880       .access = PL0_RW, .type = ARM_CP_NZCV },
   4881     { .name = "DAIF", .state = ARM_CP_STATE_AA64,
   4882       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
   4883       .type = ARM_CP_NO_RAW,
   4884       .access = PL0_RW, .accessfn = aa64_daif_access,
   4885       .fieldoffset = offsetof(CPUARMState, daif),
   4886       .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
   4887     { .name = "FPCR", .state = ARM_CP_STATE_AA64,
   4888       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
   4889       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
   4890       .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
   4891     { .name = "FPSR", .state = ARM_CP_STATE_AA64,
   4892       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
   4893       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
   4894       .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
   4895     { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
   4896       .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
   4897       .access = PL0_R, .type = ARM_CP_NO_RAW,
   4898       .readfn = aa64_dczid_read },
   4899     { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
   4900       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
   4901       .access = PL0_W, .type = ARM_CP_DC_ZVA,
   4902 #ifndef CONFIG_USER_ONLY
   4903       /* Avoid overhead of an access check that always passes in user-mode */
   4904       .accessfn = aa64_zva_access,
   4905 #endif
   4906     },
   4907     { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
   4908       .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
   4909       .access = PL1_R, .type = ARM_CP_CURRENTEL },
   4910     /* Cache ops: all NOPs since we don't emulate caches */
   4911     { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
   4912       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
   4913       .access = PL1_W, .type = ARM_CP_NOP,
   4914       .accessfn = aa64_cacheop_pou_access },
   4915     { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
   4916       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
   4917       .access = PL1_W, .type = ARM_CP_NOP,
   4918       .accessfn = aa64_cacheop_pou_access },
   4919     { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
   4920       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
   4921       .access = PL0_W, .type = ARM_CP_NOP,
   4922       .accessfn = aa64_cacheop_pou_access },
   4923     { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
   4924       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
   4925       .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
   4926       .type = ARM_CP_NOP },
   4927     { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
   4928       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
   4929       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
   4930     { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
   4931       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
   4932       .access = PL0_W, .type = ARM_CP_NOP,
   4933       .accessfn = aa64_cacheop_poc_access },
   4934     { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
   4935       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
   4936       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
   4937     { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
   4938       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
   4939       .access = PL0_W, .type = ARM_CP_NOP,
   4940       .accessfn = aa64_cacheop_pou_access },
   4941     { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
   4942       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
   4943       .access = PL0_W, .type = ARM_CP_NOP,
   4944       .accessfn = aa64_cacheop_poc_access },
   4945     { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
   4946       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
   4947       .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
   4948     /* TLBI operations */
   4949     { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
   4950       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
   4951       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4952       .writefn = tlbi_aa64_vmalle1is_write },
   4953     { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
   4954       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
   4955       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4956       .writefn = tlbi_aa64_vae1is_write },
   4957     { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
   4958       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
   4959       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4960       .writefn = tlbi_aa64_vmalle1is_write },
   4961     { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
   4962       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
   4963       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4964       .writefn = tlbi_aa64_vae1is_write },
   4965     { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
   4966       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
   4967       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4968       .writefn = tlbi_aa64_vae1is_write },
   4969     { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
   4970       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
   4971       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4972       .writefn = tlbi_aa64_vae1is_write },
   4973     { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
   4974       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
   4975       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4976       .writefn = tlbi_aa64_vmalle1_write },
   4977     { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
   4978       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
   4979       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4980       .writefn = tlbi_aa64_vae1_write },
   4981     { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
   4982       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
   4983       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4984       .writefn = tlbi_aa64_vmalle1_write },
   4985     { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
   4986       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
   4987       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4988       .writefn = tlbi_aa64_vae1_write },
   4989     { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
   4990       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
   4991       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4992       .writefn = tlbi_aa64_vae1_write },
   4993     { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
   4994       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
   4995       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   4996       .writefn = tlbi_aa64_vae1_write },
   4997     { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
   4998       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
   4999       .access = PL2_W, .type = ARM_CP_NO_RAW,
   5000       .writefn = tlbi_aa64_ipas2e1is_write },
   5001     { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
   5002       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
   5003       .access = PL2_W, .type = ARM_CP_NO_RAW,
   5004       .writefn = tlbi_aa64_ipas2e1is_write },
   5005     { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
   5006       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
   5007       .access = PL2_W, .type = ARM_CP_NO_RAW,
   5008       .writefn = tlbi_aa64_alle1is_write },
   5009     { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
   5010       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
   5011       .access = PL2_W, .type = ARM_CP_NO_RAW,
   5012       .writefn = tlbi_aa64_alle1is_write },
   5013     { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
   5014       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
   5015       .access = PL2_W, .type = ARM_CP_NO_RAW,
   5016       .writefn = tlbi_aa64_ipas2e1_write },
   5017     { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
   5018       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
   5019       .access = PL2_W, .type = ARM_CP_NO_RAW,
   5020       .writefn = tlbi_aa64_ipas2e1_write },
   5021     { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
   5022       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
   5023       .access = PL2_W, .type = ARM_CP_NO_RAW,
   5024       .writefn = tlbi_aa64_alle1_write },
   5025     { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
   5026       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
   5027       .access = PL2_W, .type = ARM_CP_NO_RAW,
   5028       .writefn = tlbi_aa64_alle1is_write },
   5029 #ifndef CONFIG_USER_ONLY
   5030     /* 64 bit address translation operations */
   5031     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
   5032       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
   5033       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   5034       .writefn = ats_write64 },
   5035     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
   5036       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
   5037       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   5038       .writefn = ats_write64 },
   5039     { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
   5040       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
   5041       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   5042       .writefn = ats_write64 },
   5043     { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
   5044       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
   5045       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   5046       .writefn = ats_write64 },
   5047     { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
   5048       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
   5049       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   5050       .writefn = ats_write64 },
   5051     { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
   5052       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
   5053       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   5054       .writefn = ats_write64 },
   5055     { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
   5056       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
   5057       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   5058       .writefn = ats_write64 },
   5059     { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
   5060       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
   5061       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   5062       .writefn = ats_write64 },
   5063     /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
   5064     { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
   5065       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
   5066       .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   5067       .writefn = ats_write64 },
   5068     { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
   5069       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
   5070       .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   5071       .writefn = ats_write64 },
   5072     { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
   5073       .type = ARM_CP_ALIAS,
   5074       .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
   5075       .access = PL1_RW, .resetvalue = 0,
   5076       .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
   5077       .writefn = par_write },
   5078 #endif
   5079     /* TLB invalidate last level of translation table walk */
   5080     { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
   5081       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   5082       .writefn = tlbimva_is_write },
   5083     { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
   5084       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   5085       .writefn = tlbimvaa_is_write },
   5086     { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
   5087       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   5088       .writefn = tlbimva_write },
   5089     { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
   5090       .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
   5091       .writefn = tlbimvaa_write },
   5092     { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
   5093       .type = ARM_CP_NO_RAW, .access = PL2_W,
   5094       .writefn = tlbimva_hyp_write },
   5095     { .name = "TLBIMVALHIS",
   5096       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
   5097       .type = ARM_CP_NO_RAW, .access = PL2_W,
   5098       .writefn = tlbimva_hyp_is_write },
   5099     { .name = "TLBIIPAS2",
   5100       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
   5101       .type = ARM_CP_NO_RAW, .access = PL2_W,
   5102       .writefn = tlbiipas2_hyp_write },
   5103     { .name = "TLBIIPAS2IS",
   5104       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
   5105       .type = ARM_CP_NO_RAW, .access = PL2_W,
   5106       .writefn = tlbiipas2is_hyp_write },
   5107     { .name = "TLBIIPAS2L",
   5108       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
   5109       .type = ARM_CP_NO_RAW, .access = PL2_W,
   5110       .writefn = tlbiipas2_hyp_write },
   5111     { .name = "TLBIIPAS2LIS",
   5112       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
   5113       .type = ARM_CP_NO_RAW, .access = PL2_W,
   5114       .writefn = tlbiipas2is_hyp_write },
   5115     /* 32 bit cache operations */
   5116     { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
   5117       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
   5118     { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
   5119       .type = ARM_CP_NOP, .access = PL1_W },
   5120     { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
   5121       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
   5122     { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
   5123       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
   5124     { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
   5125       .type = ARM_CP_NOP, .access = PL1_W },
   5126     { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
   5127       .type = ARM_CP_NOP, .access = PL1_W },
   5128     { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
   5129       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
   5130     { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
   5131       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   5132     { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
   5133       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
   5134     { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
   5135       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   5136     { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
   5137       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
   5138     { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
   5139       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
   5140     { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
   5141       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   5142     /* MMU Domain access control / MPU write buffer control */
   5143     { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
   5144       .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
   5145       .writefn = dacr_write, .raw_writefn = raw_write,
   5146       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
   5147                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
   5148     { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
   5149       .type = ARM_CP_ALIAS,
   5150       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
   5151       .access = PL1_RW,
   5152       .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
   5153     { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
   5154       .type = ARM_CP_ALIAS,
   5155       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
   5156       .access = PL1_RW,
   5157       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
   5158     /* We rely on the access checks not allowing the guest to write to the
   5159      * state field when SPSel indicates that it's being used as the stack
   5160      * pointer.
   5161      */
   5162     { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
   5163       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
   5164       .access = PL1_RW, .accessfn = sp_el0_access,
   5165       .type = ARM_CP_ALIAS,
   5166       .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
   5167     { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
   5168       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
   5169       .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP,
   5170       .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
   5171     { .name = "SPSel", .state = ARM_CP_STATE_AA64,
   5172       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
   5173       .type = ARM_CP_NO_RAW,
   5174       .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
   5175     { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
   5176       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
   5177       .access = PL2_RW,
   5178       .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
   5179       .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
   5180     { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
   5181       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
   5182       .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
   5183       .writefn = dacr_write, .raw_writefn = raw_write,
   5184       .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
   5185     { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
   5186       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
   5187       .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
   5188       .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
   5189     { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
   5190       .type = ARM_CP_ALIAS,
   5191       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
   5192       .access = PL2_RW,
   5193       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
   5194     { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
   5195       .type = ARM_CP_ALIAS,
   5196       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
   5197       .access = PL2_RW,
   5198       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
   5199     { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
   5200       .type = ARM_CP_ALIAS,
   5201       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
   5202       .access = PL2_RW,
   5203       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
   5204     { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
   5205       .type = ARM_CP_ALIAS,
   5206       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
   5207       .access = PL2_RW,
   5208       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
   5209     { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
   5210       .type = ARM_CP_IO,
   5211       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
   5212       .resetvalue = 0,
   5213       .access = PL3_RW,
   5214       .writefn = mdcr_el3_write,
   5215       .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
   5216     { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO,
   5217       .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
   5218       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
   5219       .writefn = sdcr_write,
   5220       .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
   5221 };
   5222 
   5223 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
   5224 {
   5225     ARMCPU *cpu = env_archcpu(env);
   5226 
   5227     if (arm_feature(env, ARM_FEATURE_V8)) {
   5228         valid_mask |= MAKE_64BIT_MASK(0, 34);  /* ARMv8.0 */
   5229     } else {
   5230         valid_mask |= MAKE_64BIT_MASK(0, 28);  /* ARMv7VE */
   5231     }
   5232 
   5233     if (arm_feature(env, ARM_FEATURE_EL3)) {
   5234         valid_mask &= ~HCR_HCD;
   5235     } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
   5236         /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
   5237          * However, if we're using the SMC PSCI conduit then QEMU is
   5238          * effectively acting like EL3 firmware and so the guest at
   5239          * EL2 should retain the ability to prevent EL1 from being
   5240          * able to make SMC calls into the ersatz firmware, so in
   5241          * that case HCR.TSC should be read/write.
   5242          */
   5243         valid_mask &= ~HCR_TSC;
   5244     }
   5245 
   5246     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
   5247         if (cpu_isar_feature(aa64_vh, cpu)) {
   5248             valid_mask |= HCR_E2H;
   5249         }
   5250         if (cpu_isar_feature(aa64_ras, cpu)) {
   5251             valid_mask |= HCR_TERR | HCR_TEA;
   5252         }
   5253         if (cpu_isar_feature(aa64_lor, cpu)) {
   5254             valid_mask |= HCR_TLOR;
   5255         }
   5256         if (cpu_isar_feature(aa64_pauth, cpu)) {
   5257             valid_mask |= HCR_API | HCR_APK;
   5258         }
   5259         if (cpu_isar_feature(aa64_mte, cpu)) {
   5260             valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
   5261         }
   5262         if (cpu_isar_feature(aa64_scxtnum, cpu)) {
   5263             valid_mask |= HCR_ENSCXT;
   5264         }
   5265         if (cpu_isar_feature(aa64_fwb, cpu)) {
   5266             valid_mask |= HCR_FWB;
   5267         }
   5268     }
   5269 
   5270     /* Clear RES0 bits.  */
   5271     value &= valid_mask;
   5272 
   5273     /*
   5274      * These bits change the MMU setup:
   5275      * HCR_VM enables stage 2 translation
   5276      * HCR_PTW forbids certain page-table setups
   5277      * HCR_DC disables stage1 and enables stage2 translation
   5278      * HCR_DCT enables tagging on (disabled) stage1 translation
   5279      * HCR_FWB changes the interpretation of stage2 descriptor bits
   5280      */
   5281     if ((env->cp15.hcr_el2 ^ value) &
   5282         (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB)) {
   5283         tlb_flush(CPU(cpu));
   5284     }
   5285     env->cp15.hcr_el2 = value;
   5286 
   5287     /*
   5288      * Updates to VI and VF require us to update the status of
   5289      * virtual interrupts, which are the logical OR of these bits
   5290      * and the state of the input lines from the GIC. (This requires
   5291      * that we have the iothread lock, which is done by marking the
   5292      * reginfo structs as ARM_CP_IO.)
   5293      * Note that if a write to HCR pends a VIRQ or VFIQ it is never
   5294      * possible for it to be taken immediately, because VIRQ and
   5295      * VFIQ are masked unless running at EL0 or EL1, and HCR
   5296      * can only be written at EL2.
   5297      */
   5298     g_assert(qemu_mutex_iothread_locked());
   5299     arm_cpu_update_virq(cpu);
   5300     arm_cpu_update_vfiq(cpu);
   5301     arm_cpu_update_vserr(cpu);
   5302 }
   5303 
   5304 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
   5305 {
   5306     do_hcr_write(env, value, 0);
   5307 }
   5308 
   5309 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
   5310                           uint64_t value)
   5311 {
   5312     /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
   5313     value = deposit64(env->cp15.hcr_el2, 32, 32, value);
   5314     do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
   5315 }
   5316 
   5317 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
   5318                          uint64_t value)
   5319 {
   5320     /* Handle HCR write, i.e. write to low half of HCR_EL2 */
   5321     value = deposit64(env->cp15.hcr_el2, 0, 32, value);
   5322     do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
   5323 }
   5324 
   5325 /*
   5326  * Return the effective value of HCR_EL2, at the given security state.
   5327  * Bits that are not included here:
   5328  * RW       (read from SCR_EL3.RW as needed)
   5329  */
   5330 uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure)
   5331 {
   5332     uint64_t ret = env->cp15.hcr_el2;
   5333 
   5334     if (!arm_is_el2_enabled_secstate(env, secure)) {
   5335         /*
   5336          * "This register has no effect if EL2 is not enabled in the
   5337          * current Security state".  This is ARMv8.4-SecEL2 speak for
   5338          * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
   5339          *
   5340          * Prior to that, the language was "In an implementation that
   5341          * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
   5342          * as if this field is 0 for all purposes other than a direct
   5343          * read or write access of HCR_EL2".  With lots of enumeration
   5344          * on a per-field basis.  In current QEMU, this is condition
   5345          * is arm_is_secure_below_el3.
   5346          *
   5347          * Since the v8.4 language applies to the entire register, and
   5348          * appears to be backward compatible, use that.
   5349          */
   5350         return 0;
   5351     }
   5352 
   5353     /*
   5354      * For a cpu that supports both aarch64 and aarch32, we can set bits
   5355      * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
   5356      * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
   5357      */
   5358     if (!arm_el_is_aa64(env, 2)) {
   5359         uint64_t aa32_valid;
   5360 
   5361         /*
   5362          * These bits are up-to-date as of ARMv8.6.
   5363          * For HCR, it's easiest to list just the 2 bits that are invalid.
   5364          * For HCR2, list those that are valid.
   5365          */
   5366         aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
   5367         aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
   5368                        HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
   5369         ret &= aa32_valid;
   5370     }
   5371 
   5372     if (ret & HCR_TGE) {
   5373         /* These bits are up-to-date as of ARMv8.6.  */
   5374         if (ret & HCR_E2H) {
   5375             ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
   5376                      HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
   5377                      HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
   5378                      HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
   5379                      HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
   5380                      HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
   5381         } else {
   5382             ret |= HCR_FMO | HCR_IMO | HCR_AMO;
   5383         }
   5384         ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
   5385                  HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
   5386                  HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
   5387                  HCR_TLOR);
   5388     }
   5389 
   5390     return ret;
   5391 }
   5392 
   5393 uint64_t arm_hcr_el2_eff(CPUARMState *env)
   5394 {
   5395     return arm_hcr_el2_eff_secstate(env, arm_is_secure_below_el3(env));
   5396 }
   5397 
   5398 /*
   5399  * Corresponds to ARM pseudocode function ELIsInHost().
   5400  */
   5401 bool el_is_in_host(CPUARMState *env, int el)
   5402 {
   5403     uint64_t mask;
   5404 
   5405     /*
   5406      * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
   5407      * Perform the simplest bit tests first, and validate EL2 afterward.
   5408      */
   5409     if (el & 1) {
   5410         return false; /* EL1 or EL3 */
   5411     }
   5412 
   5413     /*
   5414      * Note that hcr_write() checks isar_feature_aa64_vh(),
   5415      * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
   5416      */
   5417     mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
   5418     if ((env->cp15.hcr_el2 & mask) != mask) {
   5419         return false;
   5420     }
   5421 
   5422     /* TGE and/or E2H set: double check those bits are currently legal. */
   5423     return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
   5424 }
   5425 
   5426 static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
   5427                        uint64_t value)
   5428 {
   5429     uint64_t valid_mask = 0;
   5430 
   5431     /* No features adding bits to HCRX are implemented. */
   5432 
   5433     /* Clear RES0 bits.  */
   5434     env->cp15.hcrx_el2 = value & valid_mask;
   5435 }
   5436 
   5437 static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
   5438                                   bool isread)
   5439 {
   5440     if (arm_current_el(env) < 3
   5441         && arm_feature(env, ARM_FEATURE_EL3)
   5442         && !(env->cp15.scr_el3 & SCR_HXEN)) {
   5443         return CP_ACCESS_TRAP_EL3;
   5444     }
   5445     return CP_ACCESS_OK;
   5446 }
   5447 
   5448 static const ARMCPRegInfo hcrx_el2_reginfo = {
   5449     .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
   5450     .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
   5451     .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
   5452     .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
   5453 };
   5454 
   5455 /* Return the effective value of HCRX_EL2.  */
   5456 uint64_t arm_hcrx_el2_eff(CPUARMState *env)
   5457 {
   5458     /*
   5459      * The bits in this register behave as 0 for all purposes other than
   5460      * direct reads of the register if:
   5461      *   - EL2 is not enabled in the current security state,
   5462      *   - SCR_EL3.HXEn is 0.
   5463      */
   5464     if (!arm_is_el2_enabled(env)
   5465         || (arm_feature(env, ARM_FEATURE_EL3)
   5466             && !(env->cp15.scr_el3 & SCR_HXEN))) {
   5467         return 0;
   5468     }
   5469     return env->cp15.hcrx_el2;
   5470 }
   5471 
   5472 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
   5473                            uint64_t value)
   5474 {
   5475     /*
   5476      * For A-profile AArch32 EL3, if NSACR.CP10
   5477      * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
   5478      */
   5479     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
   5480         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
   5481         uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
   5482         value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
   5483     }
   5484     env->cp15.cptr_el[2] = value;
   5485 }
   5486 
   5487 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
   5488 {
   5489     /*
   5490      * For A-profile AArch32 EL3, if NSACR.CP10
   5491      * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
   5492      */
   5493     uint64_t value = env->cp15.cptr_el[2];
   5494 
   5495     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
   5496         !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
   5497         value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
   5498     }
   5499     return value;
   5500 }
   5501 
   5502 static const ARMCPRegInfo el2_cp_reginfo[] = {
   5503     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
   5504       .type = ARM_CP_IO,
   5505       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
   5506       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
   5507       .writefn = hcr_write },
   5508     { .name = "HCR", .state = ARM_CP_STATE_AA32,
   5509       .type = ARM_CP_ALIAS | ARM_CP_IO,
   5510       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
   5511       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
   5512       .writefn = hcr_writelow },
   5513     { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
   5514       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
   5515       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   5516     { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
   5517       .type = ARM_CP_ALIAS,
   5518       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
   5519       .access = PL2_RW,
   5520       .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
   5521     { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
   5522       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
   5523       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
   5524     { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
   5525       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
   5526       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
   5527     { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
   5528       .type = ARM_CP_ALIAS,
   5529       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
   5530       .access = PL2_RW,
   5531       .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
   5532     { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
   5533       .type = ARM_CP_ALIAS,
   5534       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
   5535       .access = PL2_RW,
   5536       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
   5537     { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
   5538       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
   5539       .access = PL2_RW, .writefn = vbar_write,
   5540       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
   5541       .resetvalue = 0 },
   5542     { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
   5543       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
   5544       .access = PL3_RW, .type = ARM_CP_ALIAS,
   5545       .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
   5546     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
   5547       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
   5548       .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
   5549       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
   5550       .readfn = cptr_el2_read, .writefn = cptr_el2_write },
   5551     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
   5552       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
   5553       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
   5554       .resetvalue = 0 },
   5555     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
   5556       .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
   5557       .access = PL2_RW, .type = ARM_CP_ALIAS,
   5558       .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
   5559     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
   5560       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
   5561       .access = PL2_RW, .type = ARM_CP_CONST,
   5562       .resetvalue = 0 },
   5563     /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
   5564     { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
   5565       .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
   5566       .access = PL2_RW, .type = ARM_CP_CONST,
   5567       .resetvalue = 0 },
   5568     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
   5569       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
   5570       .access = PL2_RW, .type = ARM_CP_CONST,
   5571       .resetvalue = 0 },
   5572     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
   5573       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
   5574       .access = PL2_RW, .type = ARM_CP_CONST,
   5575       .resetvalue = 0 },
   5576     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
   5577       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
   5578       .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
   5579       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
   5580     { .name = "VTCR", .state = ARM_CP_STATE_AA32,
   5581       .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
   5582       .type = ARM_CP_ALIAS,
   5583       .access = PL2_RW, .accessfn = access_el3_aa32ns,
   5584       .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
   5585     { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
   5586       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
   5587       .access = PL2_RW,
   5588       /* no .writefn needed as this can't cause an ASID change */
   5589       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
   5590     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
   5591       .cp = 15, .opc1 = 6, .crm = 2,
   5592       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
   5593       .access = PL2_RW, .accessfn = access_el3_aa32ns,
   5594       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
   5595       .writefn = vttbr_write },
   5596     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
   5597       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
   5598       .access = PL2_RW, .writefn = vttbr_write,
   5599       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
   5600     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
   5601       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
   5602       .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
   5603       .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
   5604     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
   5605       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
   5606       .access = PL2_RW, .resetvalue = 0,
   5607       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
   5608     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
   5609       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
   5610       .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
   5611       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
   5612     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
   5613       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
   5614       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
   5615     { .name = "TLBIALLNSNH",
   5616       .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
   5617       .type = ARM_CP_NO_RAW, .access = PL2_W,
   5618       .writefn = tlbiall_nsnh_write },
   5619     { .name = "TLBIALLNSNHIS",
   5620       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
   5621       .type = ARM_CP_NO_RAW, .access = PL2_W,
   5622       .writefn = tlbiall_nsnh_is_write },
   5623     { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
   5624       .type = ARM_CP_NO_RAW, .access = PL2_W,
   5625       .writefn = tlbiall_hyp_write },
   5626     { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
   5627       .type = ARM_CP_NO_RAW, .access = PL2_W,
   5628       .writefn = tlbiall_hyp_is_write },
   5629     { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
   5630       .type = ARM_CP_NO_RAW, .access = PL2_W,
   5631       .writefn = tlbimva_hyp_write },
   5632     { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
   5633       .type = ARM_CP_NO_RAW, .access = PL2_W,
   5634       .writefn = tlbimva_hyp_is_write },
   5635     { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
   5636       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
   5637       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   5638       .writefn = tlbi_aa64_alle2_write },
   5639     { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
   5640       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
   5641       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   5642       .writefn = tlbi_aa64_vae2_write },
   5643     { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
   5644       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
   5645       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   5646       .writefn = tlbi_aa64_vae2_write },
   5647     { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
   5648       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
   5649       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   5650       .writefn = tlbi_aa64_alle2is_write },
   5651     { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
   5652       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
   5653       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   5654       .writefn = tlbi_aa64_vae2is_write },
   5655     { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
   5656       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
   5657       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   5658       .writefn = tlbi_aa64_vae2is_write },
   5659 #ifndef CONFIG_USER_ONLY
   5660     /* Unlike the other EL2-related AT operations, these must
   5661      * UNDEF from EL3 if EL2 is not implemented, which is why we
   5662      * define them here rather than with the rest of the AT ops.
   5663      */
   5664     { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
   5665       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
   5666       .access = PL2_W, .accessfn = at_s1e2_access,
   5667       .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
   5668       .writefn = ats_write64 },
   5669     { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
   5670       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
   5671       .access = PL2_W, .accessfn = at_s1e2_access,
   5672       .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
   5673       .writefn = ats_write64 },
   5674     /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
   5675      * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
   5676      * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
   5677      * to behave as if SCR.NS was 1.
   5678      */
   5679     { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
   5680       .access = PL2_W,
   5681       .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
   5682     { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
   5683       .access = PL2_W,
   5684       .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
   5685     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
   5686       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
   5687       /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
   5688        * reset values as IMPDEF. We choose to reset to 3 to comply with
   5689        * both ARMv7 and ARMv8.
   5690        */
   5691       .access = PL2_RW, .resetvalue = 3,
   5692       .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
   5693     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
   5694       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
   5695       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
   5696       .writefn = gt_cntvoff_write,
   5697       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
   5698     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
   5699       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
   5700       .writefn = gt_cntvoff_write,
   5701       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
   5702     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
   5703       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
   5704       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
   5705       .type = ARM_CP_IO, .access = PL2_RW,
   5706       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
   5707     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
   5708       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
   5709       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
   5710       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
   5711     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
   5712       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
   5713       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
   5714       .resetfn = gt_hyp_timer_reset,
   5715       .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
   5716     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
   5717       .type = ARM_CP_IO,
   5718       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
   5719       .access = PL2_RW,
   5720       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
   5721       .resetvalue = 0,
   5722       .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
   5723 #endif
   5724     { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
   5725       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
   5726       .access = PL2_RW, .accessfn = access_el3_aa32ns,
   5727       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
   5728     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
   5729       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
   5730       .access = PL2_RW,
   5731       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
   5732     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
   5733       .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
   5734       .access = PL2_RW,
   5735       .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
   5736 };
   5737 
   5738 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
   5739     { .name = "HCR2", .state = ARM_CP_STATE_AA32,
   5740       .type = ARM_CP_ALIAS | ARM_CP_IO,
   5741       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
   5742       .access = PL2_RW,
   5743       .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
   5744       .writefn = hcr_writehigh },
   5745 };
   5746 
   5747 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
   5748                                   bool isread)
   5749 {
   5750     if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
   5751         return CP_ACCESS_OK;
   5752     }
   5753     return CP_ACCESS_TRAP_UNCATEGORIZED;
   5754 }
   5755 
   5756 static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
   5757     { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
   5758       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
   5759       .access = PL2_RW, .accessfn = sel2_access,
   5760       .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
   5761     { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
   5762       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
   5763       .access = PL2_RW, .accessfn = sel2_access,
   5764       .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
   5765 };
   5766 
   5767 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
   5768                                    bool isread)
   5769 {
   5770     /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
   5771      * At Secure EL1 it traps to EL3 or EL2.
   5772      */
   5773     if (arm_current_el(env) == 3) {
   5774         return CP_ACCESS_OK;
   5775     }
   5776     if (arm_is_secure_below_el3(env)) {
   5777         if (env->cp15.scr_el3 & SCR_EEL2) {
   5778             return CP_ACCESS_TRAP_EL2;
   5779         }
   5780         return CP_ACCESS_TRAP_EL3;
   5781     }
   5782     /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
   5783     if (isread) {
   5784         return CP_ACCESS_OK;
   5785     }
   5786     return CP_ACCESS_TRAP_UNCATEGORIZED;
   5787 }
   5788 
   5789 static const ARMCPRegInfo el3_cp_reginfo[] = {
   5790     { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
   5791       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
   5792       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
   5793       .resetfn = scr_reset, .writefn = scr_write },
   5794     { .name = "SCR",  .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
   5795       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
   5796       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
   5797       .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
   5798       .writefn = scr_write },
   5799     { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
   5800       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
   5801       .access = PL3_RW, .resetvalue = 0,
   5802       .fieldoffset = offsetof(CPUARMState, cp15.sder) },
   5803     { .name = "SDER",
   5804       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
   5805       .access = PL3_RW, .resetvalue = 0,
   5806       .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
   5807     { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
   5808       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
   5809       .writefn = vbar_write, .resetvalue = 0,
   5810       .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
   5811     { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
   5812       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
   5813       .access = PL3_RW, .resetvalue = 0,
   5814       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
   5815     { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
   5816       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
   5817       .access = PL3_RW,
   5818       /* no .writefn needed as this can't cause an ASID change */
   5819       .resetvalue = 0,
   5820       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
   5821     { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
   5822       .type = ARM_CP_ALIAS,
   5823       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
   5824       .access = PL3_RW,
   5825       .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
   5826     { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
   5827       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
   5828       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
   5829     { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
   5830       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
   5831       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
   5832     { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
   5833       .type = ARM_CP_ALIAS,
   5834       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
   5835       .access = PL3_RW,
   5836       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
   5837     { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
   5838       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
   5839       .access = PL3_RW, .writefn = vbar_write,
   5840       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
   5841       .resetvalue = 0 },
   5842     { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
   5843       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
   5844       .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
   5845       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
   5846     { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
   5847       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
   5848       .access = PL3_RW, .resetvalue = 0,
   5849       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
   5850     { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
   5851       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
   5852       .access = PL3_RW, .type = ARM_CP_CONST,
   5853       .resetvalue = 0 },
   5854     { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
   5855       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
   5856       .access = PL3_RW, .type = ARM_CP_CONST,
   5857       .resetvalue = 0 },
   5858     { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
   5859       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
   5860       .access = PL3_RW, .type = ARM_CP_CONST,
   5861       .resetvalue = 0 },
   5862     { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
   5863       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
   5864       .access = PL3_W, .type = ARM_CP_NO_RAW,
   5865       .writefn = tlbi_aa64_alle3is_write },
   5866     { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
   5867       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
   5868       .access = PL3_W, .type = ARM_CP_NO_RAW,
   5869       .writefn = tlbi_aa64_vae3is_write },
   5870     { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
   5871       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
   5872       .access = PL3_W, .type = ARM_CP_NO_RAW,
   5873       .writefn = tlbi_aa64_vae3is_write },
   5874     { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
   5875       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
   5876       .access = PL3_W, .type = ARM_CP_NO_RAW,
   5877       .writefn = tlbi_aa64_alle3_write },
   5878     { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
   5879       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
   5880       .access = PL3_W, .type = ARM_CP_NO_RAW,
   5881       .writefn = tlbi_aa64_vae3_write },
   5882     { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
   5883       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
   5884       .access = PL3_W, .type = ARM_CP_NO_RAW,
   5885       .writefn = tlbi_aa64_vae3_write },
   5886 };
   5887 
   5888 #ifndef CONFIG_USER_ONLY
   5889 /* Test if system register redirection is to occur in the current state.  */
   5890 static bool redirect_for_e2h(CPUARMState *env)
   5891 {
   5892     return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
   5893 }
   5894 
   5895 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
   5896 {
   5897     CPReadFn *readfn;
   5898 
   5899     if (redirect_for_e2h(env)) {
   5900         /* Switch to the saved EL2 version of the register.  */
   5901         ri = ri->opaque;
   5902         readfn = ri->readfn;
   5903     } else {
   5904         readfn = ri->orig_readfn;
   5905     }
   5906     if (readfn == NULL) {
   5907         readfn = raw_read;
   5908     }
   5909     return readfn(env, ri);
   5910 }
   5911 
   5912 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
   5913                           uint64_t value)
   5914 {
   5915     CPWriteFn *writefn;
   5916 
   5917     if (redirect_for_e2h(env)) {
   5918         /* Switch to the saved EL2 version of the register.  */
   5919         ri = ri->opaque;
   5920         writefn = ri->writefn;
   5921     } else {
   5922         writefn = ri->orig_writefn;
   5923     }
   5924     if (writefn == NULL) {
   5925         writefn = raw_write;
   5926     }
   5927     writefn(env, ri, value);
   5928 }
   5929 
   5930 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
   5931 {
   5932     struct E2HAlias {
   5933         uint32_t src_key, dst_key, new_key;
   5934         const char *src_name, *dst_name, *new_name;
   5935         bool (*feature)(const ARMISARegisters *id);
   5936     };
   5937 
   5938 #define K(op0, op1, crn, crm, op2) \
   5939     ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
   5940 
   5941     static const struct E2HAlias aliases[] = {
   5942         { K(3, 0,  1, 0, 0), K(3, 4,  1, 0, 0), K(3, 5, 1, 0, 0),
   5943           "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
   5944         { K(3, 0,  1, 0, 2), K(3, 4,  1, 1, 2), K(3, 5, 1, 0, 2),
   5945           "CPACR", "CPTR_EL2", "CPACR_EL12" },
   5946         { K(3, 0,  2, 0, 0), K(3, 4,  2, 0, 0), K(3, 5, 2, 0, 0),
   5947           "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
   5948         { K(3, 0,  2, 0, 1), K(3, 4,  2, 0, 1), K(3, 5, 2, 0, 1),
   5949           "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
   5950         { K(3, 0,  2, 0, 2), K(3, 4,  2, 0, 2), K(3, 5, 2, 0, 2),
   5951           "TCR_EL1", "TCR_EL2", "TCR_EL12" },
   5952         { K(3, 0,  4, 0, 0), K(3, 4,  4, 0, 0), K(3, 5, 4, 0, 0),
   5953           "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
   5954         { K(3, 0,  4, 0, 1), K(3, 4,  4, 0, 1), K(3, 5, 4, 0, 1),
   5955           "ELR_EL1", "ELR_EL2", "ELR_EL12" },
   5956         { K(3, 0,  5, 1, 0), K(3, 4,  5, 1, 0), K(3, 5, 5, 1, 0),
   5957           "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
   5958         { K(3, 0,  5, 1, 1), K(3, 4,  5, 1, 1), K(3, 5, 5, 1, 1),
   5959           "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
   5960         { K(3, 0,  5, 2, 0), K(3, 4,  5, 2, 0), K(3, 5, 5, 2, 0),
   5961           "ESR_EL1", "ESR_EL2", "ESR_EL12" },
   5962         { K(3, 0,  6, 0, 0), K(3, 4,  6, 0, 0), K(3, 5, 6, 0, 0),
   5963           "FAR_EL1", "FAR_EL2", "FAR_EL12" },
   5964         { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
   5965           "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
   5966         { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
   5967           "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
   5968         { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
   5969           "VBAR", "VBAR_EL2", "VBAR_EL12" },
   5970         { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
   5971           "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
   5972         { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
   5973           "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
   5974 
   5975         /*
   5976          * Note that redirection of ZCR is mentioned in the description
   5977          * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
   5978          * not in the summary table.
   5979          */
   5980         { K(3, 0,  1, 2, 0), K(3, 4,  1, 2, 0), K(3, 5, 1, 2, 0),
   5981           "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
   5982         { K(3, 0,  1, 2, 6), K(3, 4,  1, 2, 6), K(3, 5, 1, 2, 6),
   5983           "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
   5984 
   5985         { K(3, 0,  5, 6, 0), K(3, 4,  5, 6, 0), K(3, 5, 5, 6, 0),
   5986           "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
   5987 
   5988         { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
   5989           "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
   5990           isar_feature_aa64_scxtnum },
   5991 
   5992         /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
   5993         /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
   5994     };
   5995 #undef K
   5996 
   5997     size_t i;
   5998 
   5999     for (i = 0; i < ARRAY_SIZE(aliases); i++) {
   6000         const struct E2HAlias *a = &aliases[i];
   6001         ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
   6002         bool ok;
   6003 
   6004         if (a->feature && !a->feature(&cpu->isar)) {
   6005             continue;
   6006         }
   6007 
   6008         src_reg = g_hash_table_lookup(cpu->cp_regs,
   6009                                       (gpointer)(uintptr_t)a->src_key);
   6010         dst_reg = g_hash_table_lookup(cpu->cp_regs,
   6011                                       (gpointer)(uintptr_t)a->dst_key);
   6012         g_assert(src_reg != NULL);
   6013         g_assert(dst_reg != NULL);
   6014 
   6015         /* Cross-compare names to detect typos in the keys.  */
   6016         g_assert(strcmp(src_reg->name, a->src_name) == 0);
   6017         g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
   6018 
   6019         /* None of the core system registers use opaque; we will.  */
   6020         g_assert(src_reg->opaque == NULL);
   6021 
   6022         /* Create alias before redirection so we dup the right data. */
   6023         new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
   6024 
   6025         new_reg->name = a->new_name;
   6026         new_reg->type |= ARM_CP_ALIAS;
   6027         /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place.  */
   6028         new_reg->access &= PL2_RW | PL3_RW;
   6029 
   6030         ok = g_hash_table_insert(cpu->cp_regs,
   6031                                  (gpointer)(uintptr_t)a->new_key, new_reg);
   6032         g_assert(ok);
   6033 
   6034         src_reg->opaque = dst_reg;
   6035         src_reg->orig_readfn = src_reg->readfn ?: raw_read;
   6036         src_reg->orig_writefn = src_reg->writefn ?: raw_write;
   6037         if (!src_reg->raw_readfn) {
   6038             src_reg->raw_readfn = raw_read;
   6039         }
   6040         if (!src_reg->raw_writefn) {
   6041             src_reg->raw_writefn = raw_write;
   6042         }
   6043         src_reg->readfn = el2_e2h_read;
   6044         src_reg->writefn = el2_e2h_write;
   6045     }
   6046 }
   6047 #endif
   6048 
   6049 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
   6050                                      bool isread)
   6051 {
   6052     int cur_el = arm_current_el(env);
   6053 
   6054     if (cur_el < 2) {
   6055         uint64_t hcr = arm_hcr_el2_eff(env);
   6056 
   6057         if (cur_el == 0) {
   6058             if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
   6059                 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
   6060                     return CP_ACCESS_TRAP_EL2;
   6061                 }
   6062             } else {
   6063                 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
   6064                     return CP_ACCESS_TRAP;
   6065                 }
   6066                 if (hcr & HCR_TID2) {
   6067                     return CP_ACCESS_TRAP_EL2;
   6068                 }
   6069             }
   6070         } else if (hcr & HCR_TID2) {
   6071             return CP_ACCESS_TRAP_EL2;
   6072         }
   6073     }
   6074 
   6075     if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
   6076         return CP_ACCESS_TRAP_EL2;
   6077     }
   6078 
   6079     return CP_ACCESS_OK;
   6080 }
   6081 
   6082 /*
   6083  * Check for traps to RAS registers, which are controlled
   6084  * by HCR_EL2.TERR and SCR_EL3.TERR.
   6085  */
   6086 static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
   6087                                   bool isread)
   6088 {
   6089     int el = arm_current_el(env);
   6090 
   6091     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
   6092         return CP_ACCESS_TRAP_EL2;
   6093     }
   6094     if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) {
   6095         return CP_ACCESS_TRAP_EL3;
   6096     }
   6097     return CP_ACCESS_OK;
   6098 }
   6099 
   6100 static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   6101 {
   6102     int el = arm_current_el(env);
   6103 
   6104     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
   6105         return env->cp15.vdisr_el2;
   6106     }
   6107     if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
   6108         return 0; /* RAZ/WI */
   6109     }
   6110     return env->cp15.disr_el1;
   6111 }
   6112 
   6113 static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
   6114 {
   6115     int el = arm_current_el(env);
   6116 
   6117     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
   6118         env->cp15.vdisr_el2 = val;
   6119         return;
   6120     }
   6121     if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
   6122         return; /* RAZ/WI */
   6123     }
   6124     env->cp15.disr_el1 = val;
   6125 }
   6126 
   6127 /*
   6128  * Minimal RAS implementation with no Error Records.
   6129  * Which means that all of the Error Record registers:
   6130  *   ERXADDR_EL1
   6131  *   ERXCTLR_EL1
   6132  *   ERXFR_EL1
   6133  *   ERXMISC0_EL1
   6134  *   ERXMISC1_EL1
   6135  *   ERXMISC2_EL1
   6136  *   ERXMISC3_EL1
   6137  *   ERXPFGCDN_EL1  (RASv1p1)
   6138  *   ERXPFGCTL_EL1  (RASv1p1)
   6139  *   ERXPFGF_EL1    (RASv1p1)
   6140  *   ERXSTATUS_EL1
   6141  * and
   6142  *   ERRSELR_EL1
   6143  * may generate UNDEFINED, which is the effect we get by not
   6144  * listing them at all.
   6145  */
   6146 static const ARMCPRegInfo minimal_ras_reginfo[] = {
   6147     { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
   6148       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
   6149       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
   6150       .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
   6151     { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
   6152       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
   6153       .access = PL1_R, .accessfn = access_terr,
   6154       .type = ARM_CP_CONST, .resetvalue = 0 },
   6155     { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
   6156       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
   6157       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
   6158     { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
   6159       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
   6160       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
   6161 };
   6162 
   6163 /*
   6164  * Return the exception level to which exceptions should be taken
   6165  * via SVEAccessTrap.  This excludes the check for whether the exception
   6166  * should be routed through AArch64.AdvSIMDFPAccessTrap.  That can easily
   6167  * be found by testing 0 < fp_exception_el < sve_exception_el.
   6168  *
   6169  * C.f. the ARM pseudocode function CheckSVEEnabled.  Note that the
   6170  * pseudocode does *not* separate out the FP trap checks, but has them
   6171  * all in one function.
   6172  */
   6173 int sve_exception_el(CPUARMState *env, int el)
   6174 {
   6175 #ifndef CONFIG_USER_ONLY
   6176     if (el <= 1 && !el_is_in_host(env, el)) {
   6177         switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
   6178         case 1:
   6179             if (el != 0) {
   6180                 break;
   6181             }
   6182             /* fall through */
   6183         case 0:
   6184         case 2:
   6185             return 1;
   6186         }
   6187     }
   6188 
   6189     if (el <= 2 && arm_is_el2_enabled(env)) {
   6190         /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
   6191         if (env->cp15.hcr_el2 & HCR_E2H) {
   6192             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
   6193             case 1:
   6194                 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
   6195                     break;
   6196                 }
   6197                 /* fall through */
   6198             case 0:
   6199             case 2:
   6200                 return 2;
   6201             }
   6202         } else {
   6203             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
   6204                 return 2;
   6205             }
   6206         }
   6207     }
   6208 
   6209     /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
   6210     if (arm_feature(env, ARM_FEATURE_EL3)
   6211         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
   6212         return 3;
   6213     }
   6214 #endif
   6215     return 0;
   6216 }
   6217 
   6218 /*
   6219  * Return the exception level to which exceptions should be taken for SME.
   6220  * C.f. the ARM pseudocode function CheckSMEAccess.
   6221  */
   6222 int sme_exception_el(CPUARMState *env, int el)
   6223 {
   6224 #ifndef CONFIG_USER_ONLY
   6225     if (el <= 1 && !el_is_in_host(env, el)) {
   6226         switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
   6227         case 1:
   6228             if (el != 0) {
   6229                 break;
   6230             }
   6231             /* fall through */
   6232         case 0:
   6233         case 2:
   6234             return 1;
   6235         }
   6236     }
   6237 
   6238     if (el <= 2 && arm_is_el2_enabled(env)) {
   6239         /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
   6240         if (env->cp15.hcr_el2 & HCR_E2H) {
   6241             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
   6242             case 1:
   6243                 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
   6244                     break;
   6245                 }
   6246                 /* fall through */
   6247             case 0:
   6248             case 2:
   6249                 return 2;
   6250             }
   6251         } else {
   6252             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
   6253                 return 2;
   6254             }
   6255         }
   6256     }
   6257 
   6258     /* CPTR_EL3.  Since ESM is negative we must check for EL3.  */
   6259     if (arm_feature(env, ARM_FEATURE_EL3)
   6260         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
   6261         return 3;
   6262     }
   6263 #endif
   6264     return 0;
   6265 }
   6266 
   6267 /* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
   6268 static bool sme_fa64(CPUARMState *env, int el)
   6269 {
   6270     if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
   6271         return false;
   6272     }
   6273 
   6274     if (el <= 1 && !el_is_in_host(env, el)) {
   6275         if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
   6276             return false;
   6277         }
   6278     }
   6279     if (el <= 2 && arm_is_el2_enabled(env)) {
   6280         if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
   6281             return false;
   6282         }
   6283     }
   6284     if (arm_feature(env, ARM_FEATURE_EL3)) {
   6285         if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
   6286             return false;
   6287         }
   6288     }
   6289 
   6290     return true;
   6291 }
   6292 
   6293 /*
   6294  * Given that SVE is enabled, return the vector length for EL.
   6295  */
   6296 uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
   6297 {
   6298     ARMCPU *cpu = env_archcpu(env);
   6299     uint64_t *cr = env->vfp.zcr_el;
   6300     uint32_t map = cpu->sve_vq.map;
   6301     uint32_t len = ARM_MAX_VQ - 1;
   6302 
   6303     if (sm) {
   6304         cr = env->vfp.smcr_el;
   6305         map = cpu->sme_vq.map;
   6306     }
   6307 
   6308     if (el <= 1 && !el_is_in_host(env, el)) {
   6309         len = MIN(len, 0xf & (uint32_t)cr[1]);
   6310     }
   6311     if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
   6312         len = MIN(len, 0xf & (uint32_t)cr[2]);
   6313     }
   6314     if (arm_feature(env, ARM_FEATURE_EL3)) {
   6315         len = MIN(len, 0xf & (uint32_t)cr[3]);
   6316     }
   6317 
   6318     map &= MAKE_64BIT_MASK(0, len + 1);
   6319     if (map != 0) {
   6320         return 31 - clz32(map);
   6321     }
   6322 
   6323     /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
   6324     assert(sm);
   6325     return ctz32(cpu->sme_vq.map);
   6326 }
   6327 
   6328 uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
   6329 {
   6330     return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
   6331 }
   6332 
   6333 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   6334                       uint64_t value)
   6335 {
   6336     int cur_el = arm_current_el(env);
   6337     int old_len = sve_vqm1_for_el(env, cur_el);
   6338     int new_len;
   6339 
   6340     /* Bits other than [3:0] are RAZ/WI.  */
   6341     QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
   6342     raw_write(env, ri, value & 0xf);
   6343 
   6344     /*
   6345      * Because we arrived here, we know both FP and SVE are enabled;
   6346      * otherwise we would have trapped access to the ZCR_ELn register.
   6347      */
   6348     new_len = sve_vqm1_for_el(env, cur_el);
   6349     if (new_len < old_len) {
   6350         aarch64_sve_narrow_vq(env, new_len + 1);
   6351     }
   6352 }
   6353 
   6354 static const ARMCPRegInfo zcr_reginfo[] = {
   6355     { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
   6356       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
   6357       .access = PL1_RW, .type = ARM_CP_SVE,
   6358       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
   6359       .writefn = zcr_write, .raw_writefn = raw_write },
   6360     { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
   6361       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
   6362       .access = PL2_RW, .type = ARM_CP_SVE,
   6363       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
   6364       .writefn = zcr_write, .raw_writefn = raw_write },
   6365     { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
   6366       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
   6367       .access = PL3_RW, .type = ARM_CP_SVE,
   6368       .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
   6369       .writefn = zcr_write, .raw_writefn = raw_write },
   6370 };
   6371 
   6372 #ifdef TARGET_AARCH64
   6373 static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
   6374                                     bool isread)
   6375 {
   6376     int el = arm_current_el(env);
   6377 
   6378     if (el == 0) {
   6379         uint64_t sctlr = arm_sctlr(env, el);
   6380         if (!(sctlr & SCTLR_EnTP2)) {
   6381             return CP_ACCESS_TRAP;
   6382         }
   6383     }
   6384     /* TODO: FEAT_FGT */
   6385     if (el < 3
   6386         && arm_feature(env, ARM_FEATURE_EL3)
   6387         && !(env->cp15.scr_el3 & SCR_ENTP2)) {
   6388         return CP_ACCESS_TRAP_EL3;
   6389     }
   6390     return CP_ACCESS_OK;
   6391 }
   6392 
   6393 static CPAccessResult access_esm(CPUARMState *env, const ARMCPRegInfo *ri,
   6394                                  bool isread)
   6395 {
   6396     /* TODO: FEAT_FGT for SMPRI_EL1 but not SMPRIMAP_EL2 */
   6397     if (arm_current_el(env) < 3
   6398         && arm_feature(env, ARM_FEATURE_EL3)
   6399         && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
   6400         return CP_ACCESS_TRAP_EL3;
   6401     }
   6402     return CP_ACCESS_OK;
   6403 }
   6404 
   6405 static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   6406                        uint64_t value)
   6407 {
   6408     helper_set_pstate_sm(env, FIELD_EX64(value, SVCR, SM));
   6409     helper_set_pstate_za(env, FIELD_EX64(value, SVCR, ZA));
   6410     arm_rebuild_hflags(env);
   6411 }
   6412 
   6413 static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   6414                        uint64_t value)
   6415 {
   6416     int cur_el = arm_current_el(env);
   6417     int old_len = sve_vqm1_for_el(env, cur_el);
   6418     int new_len;
   6419 
   6420     QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
   6421     value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
   6422     raw_write(env, ri, value);
   6423 
   6424     /*
   6425      * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
   6426      * when SVL is widened (old values kept, or zeros).  Choose to keep the
   6427      * current values for simplicity.  But for QEMU internals, we must still
   6428      * apply the narrower SVL to the Zregs and Pregs -- see the comment
   6429      * above aarch64_sve_narrow_vq.
   6430      */
   6431     new_len = sve_vqm1_for_el(env, cur_el);
   6432     if (new_len < old_len) {
   6433         aarch64_sve_narrow_vq(env, new_len + 1);
   6434     }
   6435 }
   6436 
   6437 static const ARMCPRegInfo sme_reginfo[] = {
   6438     { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
   6439       .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
   6440       .access = PL0_RW, .accessfn = access_tpidr2,
   6441       .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
   6442     { .name = "SVCR", .state = ARM_CP_STATE_AA64,
   6443       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
   6444       .access = PL0_RW, .type = ARM_CP_SME,
   6445       .fieldoffset = offsetof(CPUARMState, svcr),
   6446       .writefn = svcr_write, .raw_writefn = raw_write },
   6447     { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
   6448       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
   6449       .access = PL1_RW, .type = ARM_CP_SME,
   6450       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
   6451       .writefn = smcr_write, .raw_writefn = raw_write },
   6452     { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
   6453       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
   6454       .access = PL2_RW, .type = ARM_CP_SME,
   6455       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
   6456       .writefn = smcr_write, .raw_writefn = raw_write },
   6457     { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
   6458       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
   6459       .access = PL3_RW, .type = ARM_CP_SME,
   6460       .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
   6461       .writefn = smcr_write, .raw_writefn = raw_write },
   6462     { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
   6463       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
   6464       .access = PL1_R, .accessfn = access_aa64_tid1,
   6465       /*
   6466        * IMPLEMENTOR = 0 (software)
   6467        * REVISION    = 0 (implementation defined)
   6468        * SMPS        = 0 (no streaming execution priority in QEMU)
   6469        * AFFINITY    = 0 (streaming sve mode not shared with other PEs)
   6470        */
   6471       .type = ARM_CP_CONST, .resetvalue = 0, },
   6472     /*
   6473      * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
   6474      */
   6475     { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
   6476       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
   6477       .access = PL1_RW, .accessfn = access_esm,
   6478       .type = ARM_CP_CONST, .resetvalue = 0 },
   6479     { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
   6480       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
   6481       .access = PL2_RW, .accessfn = access_esm,
   6482       .type = ARM_CP_CONST, .resetvalue = 0 },
   6483 };
   6484 #endif /* TARGET_AARCH64 */
   6485 
   6486 static void define_pmu_regs(ARMCPU *cpu)
   6487 {
   6488     /*
   6489      * v7 performance monitor control register: same implementor
   6490      * field as main ID register, and we implement four counters in
   6491      * addition to the cycle count register.
   6492      */
   6493     unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
   6494     ARMCPRegInfo pmcr = {
   6495         .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
   6496         .access = PL0_RW,
   6497         .type = ARM_CP_IO | ARM_CP_ALIAS,
   6498         .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
   6499         .accessfn = pmreg_access, .writefn = pmcr_write,
   6500         .raw_writefn = raw_write,
   6501     };
   6502     ARMCPRegInfo pmcr64 = {
   6503         .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
   6504         .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
   6505         .access = PL0_RW, .accessfn = pmreg_access,
   6506         .type = ARM_CP_IO,
   6507         .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
   6508         .resetvalue = cpu->isar.reset_pmcr_el0,
   6509         .writefn = pmcr_write, .raw_writefn = raw_write,
   6510     };
   6511 
   6512     define_one_arm_cp_reg(cpu, &pmcr);
   6513     define_one_arm_cp_reg(cpu, &pmcr64);
   6514     for (i = 0; i < pmcrn; i++) {
   6515         char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
   6516         char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
   6517         char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
   6518         char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
   6519         ARMCPRegInfo pmev_regs[] = {
   6520             { .name = pmevcntr_name, .cp = 15, .crn = 14,
   6521               .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
   6522               .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
   6523               .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
   6524               .accessfn = pmreg_access_xevcntr },
   6525             { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
   6526               .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
   6527               .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
   6528               .type = ARM_CP_IO,
   6529               .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
   6530               .raw_readfn = pmevcntr_rawread,
   6531               .raw_writefn = pmevcntr_rawwrite },
   6532             { .name = pmevtyper_name, .cp = 15, .crn = 14,
   6533               .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
   6534               .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
   6535               .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
   6536               .accessfn = pmreg_access },
   6537             { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
   6538               .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
   6539               .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
   6540               .type = ARM_CP_IO,
   6541               .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
   6542               .raw_writefn = pmevtyper_rawwrite },
   6543         };
   6544         define_arm_cp_regs(cpu, pmev_regs);
   6545         g_free(pmevcntr_name);
   6546         g_free(pmevcntr_el0_name);
   6547         g_free(pmevtyper_name);
   6548         g_free(pmevtyper_el0_name);
   6549     }
   6550     if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
   6551         ARMCPRegInfo v81_pmu_regs[] = {
   6552             { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
   6553               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
   6554               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   6555               .resetvalue = extract64(cpu->pmceid0, 32, 32) },
   6556             { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
   6557               .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
   6558               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   6559               .resetvalue = extract64(cpu->pmceid1, 32, 32) },
   6560         };
   6561         define_arm_cp_regs(cpu, v81_pmu_regs);
   6562     }
   6563     if (cpu_isar_feature(any_pmuv3p4, cpu)) {
   6564         static const ARMCPRegInfo v84_pmmir = {
   6565             .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
   6566             .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
   6567             .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   6568             .resetvalue = 0
   6569         };
   6570         define_one_arm_cp_reg(cpu, &v84_pmmir);
   6571     }
   6572 }
   6573 
   6574 /* We don't know until after realize whether there's a GICv3
   6575  * attached, and that is what registers the gicv3 sysregs.
   6576  * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
   6577  * at runtime.
   6578  */
   6579 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
   6580 {
   6581     ARMCPU *cpu = env_archcpu(env);
   6582     uint64_t pfr1 = cpu->isar.id_pfr1;
   6583 
   6584     if (env->gicv3state) {
   6585         pfr1 |= 1 << 28;
   6586     }
   6587     return pfr1;
   6588 }
   6589 
   6590 #ifndef CONFIG_USER_ONLY
   6591 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
   6592 {
   6593     ARMCPU *cpu = env_archcpu(env);
   6594     uint64_t pfr0 = cpu->isar.id_aa64pfr0;
   6595 
   6596     if (env->gicv3state) {
   6597         pfr0 |= 1 << 24;
   6598     }
   6599     return pfr0;
   6600 }
   6601 #endif
   6602 
   6603 /* Shared logic between LORID and the rest of the LOR* registers.
   6604  * Secure state exclusion has already been dealt with.
   6605  */
   6606 static CPAccessResult access_lor_ns(CPUARMState *env,
   6607                                     const ARMCPRegInfo *ri, bool isread)
   6608 {
   6609     int el = arm_current_el(env);
   6610 
   6611     if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
   6612         return CP_ACCESS_TRAP_EL2;
   6613     }
   6614     if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
   6615         return CP_ACCESS_TRAP_EL3;
   6616     }
   6617     return CP_ACCESS_OK;
   6618 }
   6619 
   6620 static CPAccessResult access_lor_other(CPUARMState *env,
   6621                                        const ARMCPRegInfo *ri, bool isread)
   6622 {
   6623     if (arm_is_secure_below_el3(env)) {
   6624         /* Access denied in secure mode.  */
   6625         return CP_ACCESS_TRAP;
   6626     }
   6627     return access_lor_ns(env, ri, isread);
   6628 }
   6629 
   6630 /*
   6631  * A trivial implementation of ARMv8.1-LOR leaves all of these
   6632  * registers fixed at 0, which indicates that there are zero
   6633  * supported Limited Ordering regions.
   6634  */
   6635 static const ARMCPRegInfo lor_reginfo[] = {
   6636     { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
   6637       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
   6638       .access = PL1_RW, .accessfn = access_lor_other,
   6639       .type = ARM_CP_CONST, .resetvalue = 0 },
   6640     { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
   6641       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
   6642       .access = PL1_RW, .accessfn = access_lor_other,
   6643       .type = ARM_CP_CONST, .resetvalue = 0 },
   6644     { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
   6645       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
   6646       .access = PL1_RW, .accessfn = access_lor_other,
   6647       .type = ARM_CP_CONST, .resetvalue = 0 },
   6648     { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
   6649       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
   6650       .access = PL1_RW, .accessfn = access_lor_other,
   6651       .type = ARM_CP_CONST, .resetvalue = 0 },
   6652     { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
   6653       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
   6654       .access = PL1_R, .accessfn = access_lor_ns,
   6655       .type = ARM_CP_CONST, .resetvalue = 0 },
   6656 };
   6657 
   6658 #ifdef TARGET_AARCH64
   6659 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
   6660                                    bool isread)
   6661 {
   6662     int el = arm_current_el(env);
   6663 
   6664     if (el < 2 &&
   6665         arm_is_el2_enabled(env) &&
   6666         !(arm_hcr_el2_eff(env) & HCR_APK)) {
   6667         return CP_ACCESS_TRAP_EL2;
   6668     }
   6669     if (el < 3 &&
   6670         arm_feature(env, ARM_FEATURE_EL3) &&
   6671         !(env->cp15.scr_el3 & SCR_APK)) {
   6672         return CP_ACCESS_TRAP_EL3;
   6673     }
   6674     return CP_ACCESS_OK;
   6675 }
   6676 
   6677 static const ARMCPRegInfo pauth_reginfo[] = {
   6678     { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
   6679       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
   6680       .access = PL1_RW, .accessfn = access_pauth,
   6681       .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
   6682     { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
   6683       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
   6684       .access = PL1_RW, .accessfn = access_pauth,
   6685       .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
   6686     { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
   6687       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
   6688       .access = PL1_RW, .accessfn = access_pauth,
   6689       .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
   6690     { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
   6691       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
   6692       .access = PL1_RW, .accessfn = access_pauth,
   6693       .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
   6694     { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
   6695       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
   6696       .access = PL1_RW, .accessfn = access_pauth,
   6697       .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
   6698     { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
   6699       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
   6700       .access = PL1_RW, .accessfn = access_pauth,
   6701       .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
   6702     { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
   6703       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
   6704       .access = PL1_RW, .accessfn = access_pauth,
   6705       .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
   6706     { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
   6707       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
   6708       .access = PL1_RW, .accessfn = access_pauth,
   6709       .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
   6710     { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
   6711       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
   6712       .access = PL1_RW, .accessfn = access_pauth,
   6713       .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
   6714     { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
   6715       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
   6716       .access = PL1_RW, .accessfn = access_pauth,
   6717       .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
   6718 };
   6719 
   6720 static const ARMCPRegInfo tlbirange_reginfo[] = {
   6721     { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
   6722       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
   6723       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6724       .writefn = tlbi_aa64_rvae1is_write },
   6725     { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
   6726       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
   6727       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6728       .writefn = tlbi_aa64_rvae1is_write },
   6729    { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
   6730       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
   6731       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6732       .writefn = tlbi_aa64_rvae1is_write },
   6733     { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
   6734       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
   6735       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6736       .writefn = tlbi_aa64_rvae1is_write },
   6737     { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
   6738       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
   6739       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6740       .writefn = tlbi_aa64_rvae1is_write },
   6741     { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
   6742       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
   6743       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6744       .writefn = tlbi_aa64_rvae1is_write },
   6745    { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
   6746       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
   6747       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6748       .writefn = tlbi_aa64_rvae1is_write },
   6749     { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
   6750       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
   6751       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6752       .writefn = tlbi_aa64_rvae1is_write },
   6753     { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
   6754       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
   6755       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6756       .writefn = tlbi_aa64_rvae1_write },
   6757     { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
   6758       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
   6759       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6760       .writefn = tlbi_aa64_rvae1_write },
   6761    { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
   6762       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
   6763       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6764       .writefn = tlbi_aa64_rvae1_write },
   6765     { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
   6766       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
   6767       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6768       .writefn = tlbi_aa64_rvae1_write },
   6769     { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
   6770       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
   6771       .access = PL2_W, .type = ARM_CP_NO_RAW,
   6772       .writefn = tlbi_aa64_ripas2e1is_write },
   6773     { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
   6774       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
   6775       .access = PL2_W, .type = ARM_CP_NO_RAW,
   6776       .writefn = tlbi_aa64_ripas2e1is_write },
   6777     { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
   6778       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
   6779       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   6780       .writefn = tlbi_aa64_rvae2is_write },
   6781    { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
   6782       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
   6783       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   6784       .writefn = tlbi_aa64_rvae2is_write },
   6785     { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
   6786       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
   6787       .access = PL2_W, .type = ARM_CP_NO_RAW,
   6788       .writefn = tlbi_aa64_ripas2e1_write },
   6789     { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
   6790       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
   6791       .access = PL2_W, .type = ARM_CP_NO_RAW,
   6792       .writefn = tlbi_aa64_ripas2e1_write },
   6793    { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
   6794       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
   6795       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   6796       .writefn = tlbi_aa64_rvae2is_write },
   6797    { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
   6798       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
   6799       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   6800       .writefn = tlbi_aa64_rvae2is_write },
   6801     { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
   6802       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
   6803       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   6804       .writefn = tlbi_aa64_rvae2_write },
   6805    { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
   6806       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
   6807       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   6808       .writefn = tlbi_aa64_rvae2_write },
   6809    { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
   6810       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
   6811       .access = PL3_W, .type = ARM_CP_NO_RAW,
   6812       .writefn = tlbi_aa64_rvae3is_write },
   6813    { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
   6814       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
   6815       .access = PL3_W, .type = ARM_CP_NO_RAW,
   6816       .writefn = tlbi_aa64_rvae3is_write },
   6817    { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
   6818       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
   6819       .access = PL3_W, .type = ARM_CP_NO_RAW,
   6820       .writefn = tlbi_aa64_rvae3is_write },
   6821    { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
   6822       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
   6823       .access = PL3_W, .type = ARM_CP_NO_RAW,
   6824       .writefn = tlbi_aa64_rvae3is_write },
   6825    { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
   6826       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
   6827       .access = PL3_W, .type = ARM_CP_NO_RAW,
   6828       .writefn = tlbi_aa64_rvae3_write },
   6829    { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
   6830       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
   6831       .access = PL3_W, .type = ARM_CP_NO_RAW,
   6832       .writefn = tlbi_aa64_rvae3_write },
   6833 };
   6834 
   6835 static const ARMCPRegInfo tlbios_reginfo[] = {
   6836     { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
   6837       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
   6838       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6839       .writefn = tlbi_aa64_vmalle1is_write },
   6840     { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
   6841       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
   6842       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6843       .writefn = tlbi_aa64_vae1is_write },
   6844     { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
   6845       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
   6846       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6847       .writefn = tlbi_aa64_vmalle1is_write },
   6848     { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
   6849       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
   6850       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6851       .writefn = tlbi_aa64_vae1is_write },
   6852     { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
   6853       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
   6854       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6855       .writefn = tlbi_aa64_vae1is_write },
   6856     { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
   6857       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
   6858       .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
   6859       .writefn = tlbi_aa64_vae1is_write },
   6860     { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
   6861       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
   6862       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   6863       .writefn = tlbi_aa64_alle2is_write },
   6864     { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
   6865       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
   6866       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   6867       .writefn = tlbi_aa64_vae2is_write },
   6868    { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
   6869       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
   6870       .access = PL2_W, .type = ARM_CP_NO_RAW,
   6871       .writefn = tlbi_aa64_alle1is_write },
   6872     { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
   6873       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
   6874       .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
   6875       .writefn = tlbi_aa64_vae2is_write },
   6876     { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
   6877       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
   6878       .access = PL2_W, .type = ARM_CP_NO_RAW,
   6879       .writefn = tlbi_aa64_alle1is_write },
   6880     { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
   6881       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
   6882       .access = PL2_W, .type = ARM_CP_NOP },
   6883     { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
   6884       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
   6885       .access = PL2_W, .type = ARM_CP_NOP },
   6886     { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
   6887       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
   6888       .access = PL2_W, .type = ARM_CP_NOP },
   6889     { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
   6890       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
   6891       .access = PL2_W, .type = ARM_CP_NOP },
   6892     { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
   6893       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
   6894       .access = PL3_W, .type = ARM_CP_NO_RAW,
   6895       .writefn = tlbi_aa64_alle3is_write },
   6896     { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
   6897       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
   6898       .access = PL3_W, .type = ARM_CP_NO_RAW,
   6899       .writefn = tlbi_aa64_vae3is_write },
   6900     { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
   6901       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
   6902       .access = PL3_W, .type = ARM_CP_NO_RAW,
   6903       .writefn = tlbi_aa64_vae3is_write },
   6904 };
   6905 
   6906 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
   6907 {
   6908     Error *err = NULL;
   6909     uint64_t ret;
   6910 
   6911     /* Success sets NZCV = 0000.  */
   6912     env->NF = env->CF = env->VF = 0, env->ZF = 1;
   6913 
   6914     if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
   6915         /*
   6916          * ??? Failed, for unknown reasons in the crypto subsystem.
   6917          * The best we can do is log the reason and return the
   6918          * timed-out indication to the guest.  There is no reason
   6919          * we know to expect this failure to be transitory, so the
   6920          * guest may well hang retrying the operation.
   6921          */
   6922         qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
   6923                       ri->name, error_get_pretty(err));
   6924         error_free(err);
   6925 
   6926         env->ZF = 0; /* NZCF = 0100 */
   6927         return 0;
   6928     }
   6929     return ret;
   6930 }
   6931 
   6932 /* We do not support re-seeding, so the two registers operate the same.  */
   6933 static const ARMCPRegInfo rndr_reginfo[] = {
   6934     { .name = "RNDR", .state = ARM_CP_STATE_AA64,
   6935       .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
   6936       .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
   6937       .access = PL0_R, .readfn = rndr_readfn },
   6938     { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
   6939       .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
   6940       .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
   6941       .access = PL0_R, .readfn = rndr_readfn },
   6942 };
   6943 
   6944 #ifndef CONFIG_USER_ONLY
   6945 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
   6946                           uint64_t value)
   6947 {
   6948     ARMCPU *cpu = env_archcpu(env);
   6949     /* CTR_EL0 System register -> DminLine, bits [19:16] */
   6950     uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
   6951     uint64_t vaddr_in = (uint64_t) value;
   6952     uint64_t vaddr = vaddr_in & ~(dline_size - 1);
   6953     void *haddr;
   6954     int mem_idx = cpu_mmu_index(env, false);
   6955 
   6956     /* This won't be crossing page boundaries */
   6957     haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
   6958     if (haddr) {
   6959 
   6960         ram_addr_t offset;
   6961         MemoryRegion *mr;
   6962 
   6963         /* RCU lock is already being held */
   6964         mr = memory_region_from_host(haddr, &offset);
   6965 
   6966         if (mr) {
   6967             memory_region_writeback(mr, offset, dline_size);
   6968         }
   6969     }
   6970 }
   6971 
   6972 static const ARMCPRegInfo dcpop_reg[] = {
   6973     { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
   6974       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
   6975       .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
   6976       .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
   6977 };
   6978 
   6979 static const ARMCPRegInfo dcpodp_reg[] = {
   6980     { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
   6981       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
   6982       .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
   6983       .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
   6984 };
   6985 #endif /*CONFIG_USER_ONLY*/
   6986 
   6987 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
   6988                                        bool isread)
   6989 {
   6990     if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
   6991         return CP_ACCESS_TRAP_EL2;
   6992     }
   6993 
   6994     return CP_ACCESS_OK;
   6995 }
   6996 
   6997 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
   6998                                  bool isread)
   6999 {
   7000     int el = arm_current_el(env);
   7001 
   7002     if (el < 2 && arm_is_el2_enabled(env)) {
   7003         uint64_t hcr = arm_hcr_el2_eff(env);
   7004         if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
   7005             return CP_ACCESS_TRAP_EL2;
   7006         }
   7007     }
   7008     if (el < 3 &&
   7009         arm_feature(env, ARM_FEATURE_EL3) &&
   7010         !(env->cp15.scr_el3 & SCR_ATA)) {
   7011         return CP_ACCESS_TRAP_EL3;
   7012     }
   7013     return CP_ACCESS_OK;
   7014 }
   7015 
   7016 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
   7017 {
   7018     return env->pstate & PSTATE_TCO;
   7019 }
   7020 
   7021 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
   7022 {
   7023     env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
   7024 }
   7025 
   7026 static const ARMCPRegInfo mte_reginfo[] = {
   7027     { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
   7028       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
   7029       .access = PL1_RW, .accessfn = access_mte,
   7030       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
   7031     { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
   7032       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
   7033       .access = PL1_RW, .accessfn = access_mte,
   7034       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
   7035     { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
   7036       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
   7037       .access = PL2_RW, .accessfn = access_mte,
   7038       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
   7039     { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
   7040       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
   7041       .access = PL3_RW,
   7042       .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
   7043     { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
   7044       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
   7045       .access = PL1_RW, .accessfn = access_mte,
   7046       .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
   7047     { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
   7048       .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
   7049       .access = PL1_RW, .accessfn = access_mte,
   7050       .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
   7051     { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
   7052       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
   7053       .access = PL1_R, .accessfn = access_aa64_tid5,
   7054       .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
   7055     { .name = "TCO", .state = ARM_CP_STATE_AA64,
   7056       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
   7057       .type = ARM_CP_NO_RAW,
   7058       .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
   7059     { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
   7060       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
   7061       .type = ARM_CP_NOP, .access = PL1_W,
   7062       .accessfn = aa64_cacheop_poc_access },
   7063     { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
   7064       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
   7065       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   7066     { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
   7067       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
   7068       .type = ARM_CP_NOP, .access = PL1_W,
   7069       .accessfn = aa64_cacheop_poc_access },
   7070     { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
   7071       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
   7072       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   7073     { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
   7074       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
   7075       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   7076     { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
   7077       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
   7078       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   7079     { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
   7080       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
   7081       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   7082     { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
   7083       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
   7084       .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
   7085 };
   7086 
   7087 static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
   7088     { .name = "TCO", .state = ARM_CP_STATE_AA64,
   7089       .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
   7090       .type = ARM_CP_CONST, .access = PL0_RW, },
   7091 };
   7092 
   7093 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
   7094     { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
   7095       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
   7096       .type = ARM_CP_NOP, .access = PL0_W,
   7097       .accessfn = aa64_cacheop_poc_access },
   7098     { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
   7099       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
   7100       .type = ARM_CP_NOP, .access = PL0_W,
   7101       .accessfn = aa64_cacheop_poc_access },
   7102     { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
   7103       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
   7104       .type = ARM_CP_NOP, .access = PL0_W,
   7105       .accessfn = aa64_cacheop_poc_access },
   7106     { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
   7107       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
   7108       .type = ARM_CP_NOP, .access = PL0_W,
   7109       .accessfn = aa64_cacheop_poc_access },
   7110     { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
   7111       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
   7112       .type = ARM_CP_NOP, .access = PL0_W,
   7113       .accessfn = aa64_cacheop_poc_access },
   7114     { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
   7115       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
   7116       .type = ARM_CP_NOP, .access = PL0_W,
   7117       .accessfn = aa64_cacheop_poc_access },
   7118     { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
   7119       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
   7120       .type = ARM_CP_NOP, .access = PL0_W,
   7121       .accessfn = aa64_cacheop_poc_access },
   7122     { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
   7123       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
   7124       .type = ARM_CP_NOP, .access = PL0_W,
   7125       .accessfn = aa64_cacheop_poc_access },
   7126     { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
   7127       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
   7128       .access = PL0_W, .type = ARM_CP_DC_GVA,
   7129 #ifndef CONFIG_USER_ONLY
   7130       /* Avoid overhead of an access check that always passes in user-mode */
   7131       .accessfn = aa64_zva_access,
   7132 #endif
   7133     },
   7134     { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
   7135       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
   7136       .access = PL0_W, .type = ARM_CP_DC_GZVA,
   7137 #ifndef CONFIG_USER_ONLY
   7138       /* Avoid overhead of an access check that always passes in user-mode */
   7139       .accessfn = aa64_zva_access,
   7140 #endif
   7141     },
   7142 };
   7143 
   7144 static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
   7145                                      bool isread)
   7146 {
   7147     uint64_t hcr = arm_hcr_el2_eff(env);
   7148     int el = arm_current_el(env);
   7149 
   7150     if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
   7151         if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
   7152             if (hcr & HCR_TGE) {
   7153                 return CP_ACCESS_TRAP_EL2;
   7154             }
   7155             return CP_ACCESS_TRAP;
   7156         }
   7157     } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
   7158         return CP_ACCESS_TRAP_EL2;
   7159     }
   7160     if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
   7161         return CP_ACCESS_TRAP_EL2;
   7162     }
   7163     if (el < 3
   7164         && arm_feature(env, ARM_FEATURE_EL3)
   7165         && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
   7166         return CP_ACCESS_TRAP_EL3;
   7167     }
   7168     return CP_ACCESS_OK;
   7169 }
   7170 
   7171 static const ARMCPRegInfo scxtnum_reginfo[] = {
   7172     { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
   7173       .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
   7174       .access = PL0_RW, .accessfn = access_scxtnum,
   7175       .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
   7176     { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
   7177       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
   7178       .access = PL1_RW, .accessfn = access_scxtnum,
   7179       .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
   7180     { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
   7181       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
   7182       .access = PL2_RW, .accessfn = access_scxtnum,
   7183       .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
   7184     { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
   7185       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
   7186       .access = PL3_RW,
   7187       .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
   7188 };
   7189 #endif /* TARGET_AARCH64 */
   7190 
   7191 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
   7192                                      bool isread)
   7193 {
   7194     int el = arm_current_el(env);
   7195 
   7196     if (el == 0) {
   7197         uint64_t sctlr = arm_sctlr(env, el);
   7198         if (!(sctlr & SCTLR_EnRCTX)) {
   7199             return CP_ACCESS_TRAP;
   7200         }
   7201     } else if (el == 1) {
   7202         uint64_t hcr = arm_hcr_el2_eff(env);
   7203         if (hcr & HCR_NV) {
   7204             return CP_ACCESS_TRAP_EL2;
   7205         }
   7206     }
   7207     return CP_ACCESS_OK;
   7208 }
   7209 
   7210 static const ARMCPRegInfo predinv_reginfo[] = {
   7211     { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
   7212       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
   7213       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
   7214     { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
   7215       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
   7216       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
   7217     { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
   7218       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
   7219       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
   7220     /*
   7221      * Note the AArch32 opcodes have a different OPC1.
   7222      */
   7223     { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
   7224       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
   7225       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
   7226     { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
   7227       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
   7228       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
   7229     { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
   7230       .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
   7231       .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
   7232 };
   7233 
   7234 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
   7235 {
   7236     /* Read the high 32 bits of the current CCSIDR */
   7237     return extract64(ccsidr_read(env, ri), 32, 32);
   7238 }
   7239 
   7240 static const ARMCPRegInfo ccsidr2_reginfo[] = {
   7241     { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
   7242       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
   7243       .access = PL1_R,
   7244       .accessfn = access_aa64_tid2,
   7245       .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
   7246 };
   7247 
   7248 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
   7249                                        bool isread)
   7250 {
   7251     if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
   7252         return CP_ACCESS_TRAP_EL2;
   7253     }
   7254 
   7255     return CP_ACCESS_OK;
   7256 }
   7257 
   7258 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
   7259                                        bool isread)
   7260 {
   7261     if (arm_feature(env, ARM_FEATURE_V8)) {
   7262         return access_aa64_tid3(env, ri, isread);
   7263     }
   7264 
   7265     return CP_ACCESS_OK;
   7266 }
   7267 
   7268 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
   7269                                      bool isread)
   7270 {
   7271     if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
   7272         return CP_ACCESS_TRAP_EL2;
   7273     }
   7274 
   7275     return CP_ACCESS_OK;
   7276 }
   7277 
   7278 static CPAccessResult access_joscr_jmcr(CPUARMState *env,
   7279                                         const ARMCPRegInfo *ri, bool isread)
   7280 {
   7281     /*
   7282      * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
   7283      * in v7A, not in v8A.
   7284      */
   7285     if (!arm_feature(env, ARM_FEATURE_V8) &&
   7286         arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
   7287         (env->cp15.hstr_el2 & HSTR_TJDBX)) {
   7288         return CP_ACCESS_TRAP_EL2;
   7289     }
   7290     return CP_ACCESS_OK;
   7291 }
   7292 
   7293 static const ARMCPRegInfo jazelle_regs[] = {
   7294     { .name = "JIDR",
   7295       .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
   7296       .access = PL1_R, .accessfn = access_jazelle,
   7297       .type = ARM_CP_CONST, .resetvalue = 0 },
   7298     { .name = "JOSCR",
   7299       .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
   7300       .accessfn = access_joscr_jmcr,
   7301       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   7302     { .name = "JMCR",
   7303       .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
   7304       .accessfn = access_joscr_jmcr,
   7305       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
   7306 };
   7307 
   7308 static const ARMCPRegInfo contextidr_el2 = {
   7309     .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
   7310     .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
   7311     .access = PL2_RW,
   7312     .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
   7313 };
   7314 
   7315 static const ARMCPRegInfo vhe_reginfo[] = {
   7316     { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
   7317       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
   7318       .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
   7319       .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
   7320 #ifndef CONFIG_USER_ONLY
   7321     { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
   7322       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
   7323       .fieldoffset =
   7324         offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
   7325       .type = ARM_CP_IO, .access = PL2_RW,
   7326       .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
   7327     { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
   7328       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
   7329       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
   7330       .resetfn = gt_hv_timer_reset,
   7331       .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
   7332     { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
   7333       .type = ARM_CP_IO,
   7334       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
   7335       .access = PL2_RW,
   7336       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
   7337       .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
   7338     { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
   7339       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
   7340       .type = ARM_CP_IO | ARM_CP_ALIAS,
   7341       .access = PL2_RW, .accessfn = e2h_access,
   7342       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
   7343       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
   7344     { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
   7345       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
   7346       .type = ARM_CP_IO | ARM_CP_ALIAS,
   7347       .access = PL2_RW, .accessfn = e2h_access,
   7348       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
   7349       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
   7350     { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
   7351       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
   7352       .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
   7353       .access = PL2_RW, .accessfn = e2h_access,
   7354       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
   7355     { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
   7356       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
   7357       .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
   7358       .access = PL2_RW, .accessfn = e2h_access,
   7359       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
   7360     { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
   7361       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
   7362       .type = ARM_CP_IO | ARM_CP_ALIAS,
   7363       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
   7364       .access = PL2_RW, .accessfn = e2h_access,
   7365       .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
   7366     { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
   7367       .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
   7368       .type = ARM_CP_IO | ARM_CP_ALIAS,
   7369       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
   7370       .access = PL2_RW, .accessfn = e2h_access,
   7371       .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
   7372 #endif
   7373 };
   7374 
   7375 #ifndef CONFIG_USER_ONLY
   7376 static const ARMCPRegInfo ats1e1_reginfo[] = {
   7377     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
   7378       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
   7379       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   7380       .writefn = ats_write64 },
   7381     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
   7382       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
   7383       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   7384       .writefn = ats_write64 },
   7385 };
   7386 
   7387 static const ARMCPRegInfo ats1cp_reginfo[] = {
   7388     { .name = "ATS1CPRP",
   7389       .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
   7390       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   7391       .writefn = ats_write },
   7392     { .name = "ATS1CPWP",
   7393       .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
   7394       .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
   7395       .writefn = ats_write },
   7396 };
   7397 #endif
   7398 
   7399 /*
   7400  * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
   7401  * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
   7402  * is non-zero, which is never for ARMv7, optionally in ARMv8
   7403  * and mandatorily for ARMv8.2 and up.
   7404  * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
   7405  * implementation is RAZ/WI we can ignore this detail, as we
   7406  * do for ACTLR.
   7407  */
   7408 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
   7409     { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
   7410       .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
   7411       .access = PL1_RW, .accessfn = access_tacr,
   7412       .type = ARM_CP_CONST, .resetvalue = 0 },
   7413     { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
   7414       .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
   7415       .access = PL2_RW, .type = ARM_CP_CONST,
   7416       .resetvalue = 0 },
   7417 };
   7418 
   7419 void register_cp_regs_for_features(ARMCPU *cpu)
   7420 {
   7421     /* Register all the coprocessor registers based on feature bits */
   7422     CPUARMState *env = &cpu->env;
   7423     if (arm_feature(env, ARM_FEATURE_M)) {
   7424         /* M profile has no coprocessor registers */
   7425         return;
   7426     }
   7427 
   7428     define_arm_cp_regs(cpu, cp_reginfo);
   7429     if (!arm_feature(env, ARM_FEATURE_V8)) {
   7430         /* Must go early as it is full of wildcards that may be
   7431          * overridden by later definitions.
   7432          */
   7433         define_arm_cp_regs(cpu, not_v8_cp_reginfo);
   7434     }
   7435 
   7436     if (arm_feature(env, ARM_FEATURE_V6)) {
   7437         /* The ID registers all have impdef reset values */
   7438         ARMCPRegInfo v6_idregs[] = {
   7439             { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
   7440               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
   7441               .access = PL1_R, .type = ARM_CP_CONST,
   7442               .accessfn = access_aa32_tid3,
   7443               .resetvalue = cpu->isar.id_pfr0 },
   7444             /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
   7445              * the value of the GIC field until after we define these regs.
   7446              */
   7447             { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
   7448               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
   7449               .access = PL1_R, .type = ARM_CP_NO_RAW,
   7450               .accessfn = access_aa32_tid3,
   7451               .readfn = id_pfr1_read,
   7452               .writefn = arm_cp_write_ignore },
   7453             { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
   7454               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
   7455               .access = PL1_R, .type = ARM_CP_CONST,
   7456               .accessfn = access_aa32_tid3,
   7457               .resetvalue = cpu->isar.id_dfr0 },
   7458             { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
   7459               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
   7460               .access = PL1_R, .type = ARM_CP_CONST,
   7461               .accessfn = access_aa32_tid3,
   7462               .resetvalue = cpu->id_afr0 },
   7463             { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
   7464               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
   7465               .access = PL1_R, .type = ARM_CP_CONST,
   7466               .accessfn = access_aa32_tid3,
   7467               .resetvalue = cpu->isar.id_mmfr0 },
   7468             { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
   7469               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
   7470               .access = PL1_R, .type = ARM_CP_CONST,
   7471               .accessfn = access_aa32_tid3,
   7472               .resetvalue = cpu->isar.id_mmfr1 },
   7473             { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
   7474               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
   7475               .access = PL1_R, .type = ARM_CP_CONST,
   7476               .accessfn = access_aa32_tid3,
   7477               .resetvalue = cpu->isar.id_mmfr2 },
   7478             { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
   7479               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
   7480               .access = PL1_R, .type = ARM_CP_CONST,
   7481               .accessfn = access_aa32_tid3,
   7482               .resetvalue = cpu->isar.id_mmfr3 },
   7483             { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
   7484               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
   7485               .access = PL1_R, .type = ARM_CP_CONST,
   7486               .accessfn = access_aa32_tid3,
   7487               .resetvalue = cpu->isar.id_isar0 },
   7488             { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
   7489               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
   7490               .access = PL1_R, .type = ARM_CP_CONST,
   7491               .accessfn = access_aa32_tid3,
   7492               .resetvalue = cpu->isar.id_isar1 },
   7493             { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
   7494               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
   7495               .access = PL1_R, .type = ARM_CP_CONST,
   7496               .accessfn = access_aa32_tid3,
   7497               .resetvalue = cpu->isar.id_isar2 },
   7498             { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
   7499               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
   7500               .access = PL1_R, .type = ARM_CP_CONST,
   7501               .accessfn = access_aa32_tid3,
   7502               .resetvalue = cpu->isar.id_isar3 },
   7503             { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
   7504               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
   7505               .access = PL1_R, .type = ARM_CP_CONST,
   7506               .accessfn = access_aa32_tid3,
   7507               .resetvalue = cpu->isar.id_isar4 },
   7508             { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
   7509               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
   7510               .access = PL1_R, .type = ARM_CP_CONST,
   7511               .accessfn = access_aa32_tid3,
   7512               .resetvalue = cpu->isar.id_isar5 },
   7513             { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
   7514               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
   7515               .access = PL1_R, .type = ARM_CP_CONST,
   7516               .accessfn = access_aa32_tid3,
   7517               .resetvalue = cpu->isar.id_mmfr4 },
   7518             { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
   7519               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
   7520               .access = PL1_R, .type = ARM_CP_CONST,
   7521               .accessfn = access_aa32_tid3,
   7522               .resetvalue = cpu->isar.id_isar6 },
   7523         };
   7524         define_arm_cp_regs(cpu, v6_idregs);
   7525         define_arm_cp_regs(cpu, v6_cp_reginfo);
   7526     } else {
   7527         define_arm_cp_regs(cpu, not_v6_cp_reginfo);
   7528     }
   7529     if (arm_feature(env, ARM_FEATURE_V6K)) {
   7530         define_arm_cp_regs(cpu, v6k_cp_reginfo);
   7531     }
   7532     if (arm_feature(env, ARM_FEATURE_V7MP) &&
   7533         !arm_feature(env, ARM_FEATURE_PMSA)) {
   7534         define_arm_cp_regs(cpu, v7mp_cp_reginfo);
   7535     }
   7536     if (arm_feature(env, ARM_FEATURE_V7VE)) {
   7537         define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
   7538     }
   7539     if (arm_feature(env, ARM_FEATURE_V7)) {
   7540         ARMCPRegInfo clidr = {
   7541             .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
   7542             .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
   7543             .access = PL1_R, .type = ARM_CP_CONST,
   7544             .accessfn = access_aa64_tid2,
   7545             .resetvalue = cpu->clidr
   7546         };
   7547         define_one_arm_cp_reg(cpu, &clidr);
   7548         define_arm_cp_regs(cpu, v7_cp_reginfo);
   7549         define_debug_regs(cpu);
   7550         define_pmu_regs(cpu);
   7551     } else {
   7552         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
   7553     }
   7554     if (arm_feature(env, ARM_FEATURE_V8)) {
   7555         /*
   7556          * v8 ID registers, which all have impdef reset values.
   7557          * Note that within the ID register ranges the unused slots
   7558          * must all RAZ, not UNDEF; future architecture versions may
   7559          * define new registers here.
   7560          * ID registers which are AArch64 views of the AArch32 ID registers
   7561          * which already existed in v6 and v7 are handled elsewhere,
   7562          * in v6_idregs[].
   7563          */
   7564         int i;
   7565         ARMCPRegInfo v8_idregs[] = {
   7566             /*
   7567              * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
   7568              * emulation because we don't know the right value for the
   7569              * GIC field until after we define these regs.
   7570              */
   7571             { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
   7572               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
   7573               .access = PL1_R,
   7574 #ifdef CONFIG_USER_ONLY
   7575               .type = ARM_CP_CONST,
   7576               .resetvalue = cpu->isar.id_aa64pfr0
   7577 #else
   7578               .type = ARM_CP_NO_RAW,
   7579               .accessfn = access_aa64_tid3,
   7580               .readfn = id_aa64pfr0_read,
   7581               .writefn = arm_cp_write_ignore
   7582 #endif
   7583             },
   7584             { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
   7585               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
   7586               .access = PL1_R, .type = ARM_CP_CONST,
   7587               .accessfn = access_aa64_tid3,
   7588               .resetvalue = cpu->isar.id_aa64pfr1},
   7589             { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7590               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
   7591               .access = PL1_R, .type = ARM_CP_CONST,
   7592               .accessfn = access_aa64_tid3,
   7593               .resetvalue = 0 },
   7594             { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7595               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
   7596               .access = PL1_R, .type = ARM_CP_CONST,
   7597               .accessfn = access_aa64_tid3,
   7598               .resetvalue = 0 },
   7599             { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
   7600               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
   7601               .access = PL1_R, .type = ARM_CP_CONST,
   7602               .accessfn = access_aa64_tid3,
   7603               .resetvalue = cpu->isar.id_aa64zfr0 },
   7604             { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
   7605               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
   7606               .access = PL1_R, .type = ARM_CP_CONST,
   7607               .accessfn = access_aa64_tid3,
   7608               .resetvalue = cpu->isar.id_aa64smfr0 },
   7609             { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7610               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
   7611               .access = PL1_R, .type = ARM_CP_CONST,
   7612               .accessfn = access_aa64_tid3,
   7613               .resetvalue = 0 },
   7614             { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7615               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
   7616               .access = PL1_R, .type = ARM_CP_CONST,
   7617               .accessfn = access_aa64_tid3,
   7618               .resetvalue = 0 },
   7619             { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
   7620               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
   7621               .access = PL1_R, .type = ARM_CP_CONST,
   7622               .accessfn = access_aa64_tid3,
   7623               .resetvalue = cpu->isar.id_aa64dfr0 },
   7624             { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
   7625               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
   7626               .access = PL1_R, .type = ARM_CP_CONST,
   7627               .accessfn = access_aa64_tid3,
   7628               .resetvalue = cpu->isar.id_aa64dfr1 },
   7629             { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7630               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
   7631               .access = PL1_R, .type = ARM_CP_CONST,
   7632               .accessfn = access_aa64_tid3,
   7633               .resetvalue = 0 },
   7634             { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7635               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
   7636               .access = PL1_R, .type = ARM_CP_CONST,
   7637               .accessfn = access_aa64_tid3,
   7638               .resetvalue = 0 },
   7639             { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
   7640               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
   7641               .access = PL1_R, .type = ARM_CP_CONST,
   7642               .accessfn = access_aa64_tid3,
   7643               .resetvalue = cpu->id_aa64afr0 },
   7644             { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
   7645               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
   7646               .access = PL1_R, .type = ARM_CP_CONST,
   7647               .accessfn = access_aa64_tid3,
   7648               .resetvalue = cpu->id_aa64afr1 },
   7649             { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7650               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
   7651               .access = PL1_R, .type = ARM_CP_CONST,
   7652               .accessfn = access_aa64_tid3,
   7653               .resetvalue = 0 },
   7654             { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7655               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
   7656               .access = PL1_R, .type = ARM_CP_CONST,
   7657               .accessfn = access_aa64_tid3,
   7658               .resetvalue = 0 },
   7659             { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
   7660               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
   7661               .access = PL1_R, .type = ARM_CP_CONST,
   7662               .accessfn = access_aa64_tid3,
   7663               .resetvalue = cpu->isar.id_aa64isar0 },
   7664             { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
   7665               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
   7666               .access = PL1_R, .type = ARM_CP_CONST,
   7667               .accessfn = access_aa64_tid3,
   7668               .resetvalue = cpu->isar.id_aa64isar1 },
   7669             { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7670               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
   7671               .access = PL1_R, .type = ARM_CP_CONST,
   7672               .accessfn = access_aa64_tid3,
   7673               .resetvalue = 0 },
   7674             { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7675               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
   7676               .access = PL1_R, .type = ARM_CP_CONST,
   7677               .accessfn = access_aa64_tid3,
   7678               .resetvalue = 0 },
   7679             { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7680               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
   7681               .access = PL1_R, .type = ARM_CP_CONST,
   7682               .accessfn = access_aa64_tid3,
   7683               .resetvalue = 0 },
   7684             { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7685               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
   7686               .access = PL1_R, .type = ARM_CP_CONST,
   7687               .accessfn = access_aa64_tid3,
   7688               .resetvalue = 0 },
   7689             { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7690               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
   7691               .access = PL1_R, .type = ARM_CP_CONST,
   7692               .accessfn = access_aa64_tid3,
   7693               .resetvalue = 0 },
   7694             { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7695               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
   7696               .access = PL1_R, .type = ARM_CP_CONST,
   7697               .accessfn = access_aa64_tid3,
   7698               .resetvalue = 0 },
   7699             { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
   7700               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
   7701               .access = PL1_R, .type = ARM_CP_CONST,
   7702               .accessfn = access_aa64_tid3,
   7703               .resetvalue = cpu->isar.id_aa64mmfr0 },
   7704             { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
   7705               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
   7706               .access = PL1_R, .type = ARM_CP_CONST,
   7707               .accessfn = access_aa64_tid3,
   7708               .resetvalue = cpu->isar.id_aa64mmfr1 },
   7709             { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
   7710               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
   7711               .access = PL1_R, .type = ARM_CP_CONST,
   7712               .accessfn = access_aa64_tid3,
   7713               .resetvalue = cpu->isar.id_aa64mmfr2 },
   7714             { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7715               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
   7716               .access = PL1_R, .type = ARM_CP_CONST,
   7717               .accessfn = access_aa64_tid3,
   7718               .resetvalue = 0 },
   7719             { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7720               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
   7721               .access = PL1_R, .type = ARM_CP_CONST,
   7722               .accessfn = access_aa64_tid3,
   7723               .resetvalue = 0 },
   7724             { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7725               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
   7726               .access = PL1_R, .type = ARM_CP_CONST,
   7727               .accessfn = access_aa64_tid3,
   7728               .resetvalue = 0 },
   7729             { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7730               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
   7731               .access = PL1_R, .type = ARM_CP_CONST,
   7732               .accessfn = access_aa64_tid3,
   7733               .resetvalue = 0 },
   7734             { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
   7735               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
   7736               .access = PL1_R, .type = ARM_CP_CONST,
   7737               .accessfn = access_aa64_tid3,
   7738               .resetvalue = 0 },
   7739             { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
   7740               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
   7741               .access = PL1_R, .type = ARM_CP_CONST,
   7742               .accessfn = access_aa64_tid3,
   7743               .resetvalue = cpu->isar.mvfr0 },
   7744             { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
   7745               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
   7746               .access = PL1_R, .type = ARM_CP_CONST,
   7747               .accessfn = access_aa64_tid3,
   7748               .resetvalue = cpu->isar.mvfr1 },
   7749             { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
   7750               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
   7751               .access = PL1_R, .type = ARM_CP_CONST,
   7752               .accessfn = access_aa64_tid3,
   7753               .resetvalue = cpu->isar.mvfr2 },
   7754             /*
   7755              * "0, c0, c3, {0,1,2}" are the encodings corresponding to
   7756              * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
   7757              * as RAZ, since it is in the "reserved for future ID
   7758              * registers, RAZ" part of the AArch32 encoding space.
   7759              */
   7760             { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32,
   7761               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
   7762               .access = PL1_R, .type = ARM_CP_CONST,
   7763               .accessfn = access_aa64_tid3,
   7764               .resetvalue = 0 },
   7765             { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32,
   7766               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
   7767               .access = PL1_R, .type = ARM_CP_CONST,
   7768               .accessfn = access_aa64_tid3,
   7769               .resetvalue = 0 },
   7770             { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32,
   7771               .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
   7772               .access = PL1_R, .type = ARM_CP_CONST,
   7773               .accessfn = access_aa64_tid3,
   7774               .resetvalue = 0 },
   7775             /*
   7776              * Other encodings in "0, c0, c3, ..." are STATE_BOTH because
   7777              * they're also RAZ for AArch64, and in v8 are gradually
   7778              * being filled with AArch64-view-of-AArch32-ID-register
   7779              * for new ID registers.
   7780              */
   7781             { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH,
   7782               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
   7783               .access = PL1_R, .type = ARM_CP_CONST,
   7784               .accessfn = access_aa64_tid3,
   7785               .resetvalue = 0 },
   7786             { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
   7787               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
   7788               .access = PL1_R, .type = ARM_CP_CONST,
   7789               .accessfn = access_aa64_tid3,
   7790               .resetvalue = cpu->isar.id_pfr2 },
   7791             { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
   7792               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
   7793               .access = PL1_R, .type = ARM_CP_CONST,
   7794               .accessfn = access_aa64_tid3,
   7795               .resetvalue = cpu->isar.id_dfr1 },
   7796             { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
   7797               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
   7798               .access = PL1_R, .type = ARM_CP_CONST,
   7799               .accessfn = access_aa64_tid3,
   7800               .resetvalue = cpu->isar.id_mmfr5 },
   7801             { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
   7802               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
   7803               .access = PL1_R, .type = ARM_CP_CONST,
   7804               .accessfn = access_aa64_tid3,
   7805               .resetvalue = 0 },
   7806             { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
   7807               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
   7808               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   7809               .resetvalue = extract64(cpu->pmceid0, 0, 32) },
   7810             { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
   7811               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
   7812               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   7813               .resetvalue = cpu->pmceid0 },
   7814             { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
   7815               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
   7816               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   7817               .resetvalue = extract64(cpu->pmceid1, 0, 32) },
   7818             { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
   7819               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
   7820               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
   7821               .resetvalue = cpu->pmceid1 },
   7822         };
   7823 #ifdef CONFIG_USER_ONLY
   7824         static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
   7825             { .name = "ID_AA64PFR0_EL1",
   7826               .exported_bits = 0x000f000f00ff0000,
   7827               .fixed_bits    = 0x0000000000000011 },
   7828             { .name = "ID_AA64PFR1_EL1",
   7829               .exported_bits = 0x00000000000000f0 },
   7830             { .name = "ID_AA64PFR*_EL1_RESERVED",
   7831               .is_glob = true                     },
   7832             { .name = "ID_AA64ZFR0_EL1"           },
   7833             { .name = "ID_AA64MMFR0_EL1",
   7834               .fixed_bits    = 0x00000000ff000000 },
   7835             { .name = "ID_AA64MMFR1_EL1"          },
   7836             { .name = "ID_AA64MMFR*_EL1_RESERVED",
   7837               .is_glob = true                     },
   7838             { .name = "ID_AA64DFR0_EL1",
   7839               .fixed_bits    = 0x0000000000000006 },
   7840             { .name = "ID_AA64DFR1_EL1"           },
   7841             { .name = "ID_AA64DFR*_EL1_RESERVED",
   7842               .is_glob = true                     },
   7843             { .name = "ID_AA64AFR*",
   7844               .is_glob = true                     },
   7845             { .name = "ID_AA64ISAR0_EL1",
   7846               .exported_bits = 0x00fffffff0fffff0 },
   7847             { .name = "ID_AA64ISAR1_EL1",
   7848               .exported_bits = 0x000000f0ffffffff },
   7849             { .name = "ID_AA64ISAR*_EL1_RESERVED",
   7850               .is_glob = true                     },
   7851         };
   7852         modify_arm_cp_regs(v8_idregs, v8_user_idregs);
   7853 #endif
   7854         /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
   7855         if (!arm_feature(env, ARM_FEATURE_EL3) &&
   7856             !arm_feature(env, ARM_FEATURE_EL2)) {
   7857             ARMCPRegInfo rvbar = {
   7858                 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
   7859                 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
   7860                 .access = PL1_R,
   7861                 .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
   7862             };
   7863             define_one_arm_cp_reg(cpu, &rvbar);
   7864         }
   7865         define_arm_cp_regs(cpu, v8_idregs);
   7866         define_arm_cp_regs(cpu, v8_cp_reginfo);
   7867 
   7868         for (i = 4; i < 16; i++) {
   7869             /*
   7870              * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
   7871              * For pre-v8 cores there are RAZ patterns for these in
   7872              * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
   7873              * v8 extends the "must RAZ" part of the ID register space
   7874              * to also cover c0, 0, c{8-15}, {0-7}.
   7875              * These are STATE_AA32 because in the AArch64 sysreg space
   7876              * c4-c7 is where the AArch64 ID registers live (and we've
   7877              * already defined those in v8_idregs[]), and c8-c15 are not
   7878              * "must RAZ" for AArch64.
   7879              */
   7880             g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i);
   7881             ARMCPRegInfo v8_aa32_raz_idregs = {
   7882                 .name = name,
   7883                 .state = ARM_CP_STATE_AA32,
   7884                 .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY,
   7885                 .access = PL1_R, .type = ARM_CP_CONST,
   7886                 .accessfn = access_aa64_tid3,
   7887                 .resetvalue = 0 };
   7888             define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs);
   7889         }
   7890     }
   7891 
   7892     /*
   7893      * Register the base EL2 cpregs.
   7894      * Pre v8, these registers are implemented only as part of the
   7895      * Virtualization Extensions (EL2 present).  Beginning with v8,
   7896      * if EL2 is missing but EL3 is enabled, mostly these become
   7897      * RES0 from EL3, with some specific exceptions.
   7898      */
   7899     if (arm_feature(env, ARM_FEATURE_EL2)
   7900         || (arm_feature(env, ARM_FEATURE_EL3)
   7901             && arm_feature(env, ARM_FEATURE_V8))) {
   7902         uint64_t vmpidr_def = mpidr_read_val(env);
   7903         ARMCPRegInfo vpidr_regs[] = {
   7904             { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
   7905               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
   7906               .access = PL2_RW, .accessfn = access_el3_aa32ns,
   7907               .resetvalue = cpu->midr,
   7908               .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
   7909               .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
   7910             { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
   7911               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
   7912               .access = PL2_RW, .resetvalue = cpu->midr,
   7913               .type = ARM_CP_EL3_NO_EL2_C_NZ,
   7914               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
   7915             { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
   7916               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
   7917               .access = PL2_RW, .accessfn = access_el3_aa32ns,
   7918               .resetvalue = vmpidr_def,
   7919               .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
   7920               .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
   7921             { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
   7922               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
   7923               .access = PL2_RW, .resetvalue = vmpidr_def,
   7924               .type = ARM_CP_EL3_NO_EL2_C_NZ,
   7925               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
   7926         };
   7927         /*
   7928          * The only field of MDCR_EL2 that has a defined architectural reset
   7929          * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
   7930          */
   7931         ARMCPRegInfo mdcr_el2 = {
   7932             .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO,
   7933             .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
   7934             .writefn = mdcr_el2_write,
   7935             .access = PL2_RW, .resetvalue = pmu_num_counters(env),
   7936             .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
   7937         };
   7938         define_one_arm_cp_reg(cpu, &mdcr_el2);
   7939         define_arm_cp_regs(cpu, vpidr_regs);
   7940         define_arm_cp_regs(cpu, el2_cp_reginfo);
   7941         if (arm_feature(env, ARM_FEATURE_V8)) {
   7942             define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
   7943         }
   7944         if (cpu_isar_feature(aa64_sel2, cpu)) {
   7945             define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
   7946         }
   7947         /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
   7948         if (!arm_feature(env, ARM_FEATURE_EL3)) {
   7949             ARMCPRegInfo rvbar = {
   7950                 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
   7951                 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
   7952                 .access = PL2_R,
   7953                 .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
   7954             };
   7955             define_one_arm_cp_reg(cpu, &rvbar);
   7956         }
   7957     }
   7958 
   7959     /* Register the base EL3 cpregs. */
   7960     if (arm_feature(env, ARM_FEATURE_EL3)) {
   7961         define_arm_cp_regs(cpu, el3_cp_reginfo);
   7962         ARMCPRegInfo el3_regs[] = {
   7963             { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
   7964               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
   7965               .access = PL3_R,
   7966               .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
   7967             },
   7968             { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
   7969               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
   7970               .access = PL3_RW,
   7971               .raw_writefn = raw_write, .writefn = sctlr_write,
   7972               .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
   7973               .resetvalue = cpu->reset_sctlr },
   7974         };
   7975 
   7976         define_arm_cp_regs(cpu, el3_regs);
   7977     }
   7978     /* The behaviour of NSACR is sufficiently various that we don't
   7979      * try to describe it in a single reginfo:
   7980      *  if EL3 is 64 bit, then trap to EL3 from S EL1,
   7981      *     reads as constant 0xc00 from NS EL1 and NS EL2
   7982      *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
   7983      *  if v7 without EL3, register doesn't exist
   7984      *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
   7985      */
   7986     if (arm_feature(env, ARM_FEATURE_EL3)) {
   7987         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
   7988             static const ARMCPRegInfo nsacr = {
   7989                 .name = "NSACR", .type = ARM_CP_CONST,
   7990                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
   7991                 .access = PL1_RW, .accessfn = nsacr_access,
   7992                 .resetvalue = 0xc00
   7993             };
   7994             define_one_arm_cp_reg(cpu, &nsacr);
   7995         } else {
   7996             static const ARMCPRegInfo nsacr = {
   7997                 .name = "NSACR",
   7998                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
   7999                 .access = PL3_RW | PL1_R,
   8000                 .resetvalue = 0,
   8001                 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
   8002             };
   8003             define_one_arm_cp_reg(cpu, &nsacr);
   8004         }
   8005     } else {
   8006         if (arm_feature(env, ARM_FEATURE_V8)) {
   8007             static const ARMCPRegInfo nsacr = {
   8008                 .name = "NSACR", .type = ARM_CP_CONST,
   8009                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
   8010                 .access = PL1_R,
   8011                 .resetvalue = 0xc00
   8012             };
   8013             define_one_arm_cp_reg(cpu, &nsacr);
   8014         }
   8015     }
   8016 
   8017     if (arm_feature(env, ARM_FEATURE_PMSA)) {
   8018         if (arm_feature(env, ARM_FEATURE_V6)) {
   8019             /* PMSAv6 not implemented */
   8020             assert(arm_feature(env, ARM_FEATURE_V7));
   8021             define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
   8022             define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
   8023         } else {
   8024             define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
   8025         }
   8026     } else {
   8027         define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
   8028         define_arm_cp_regs(cpu, vmsa_cp_reginfo);
   8029         /* TTCBR2 is introduced with ARMv8.2-AA32HPD.  */
   8030         if (cpu_isar_feature(aa32_hpd, cpu)) {
   8031             define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
   8032         }
   8033     }
   8034     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
   8035         define_arm_cp_regs(cpu, t2ee_cp_reginfo);
   8036     }
   8037     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
   8038         define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
   8039     }
   8040     if (arm_feature(env, ARM_FEATURE_VAPA)) {
   8041         define_arm_cp_regs(cpu, vapa_cp_reginfo);
   8042     }
   8043     if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
   8044         define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
   8045     }
   8046     if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
   8047         define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
   8048     }
   8049     if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
   8050         define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
   8051     }
   8052     if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
   8053         define_arm_cp_regs(cpu, omap_cp_reginfo);
   8054     }
   8055     if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
   8056         define_arm_cp_regs(cpu, strongarm_cp_reginfo);
   8057     }
   8058     if (arm_feature(env, ARM_FEATURE_XSCALE)) {
   8059         define_arm_cp_regs(cpu, xscale_cp_reginfo);
   8060     }
   8061     if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
   8062         define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
   8063     }
   8064     if (arm_feature(env, ARM_FEATURE_LPAE)) {
   8065         define_arm_cp_regs(cpu, lpae_cp_reginfo);
   8066     }
   8067     if (cpu_isar_feature(aa32_jazelle, cpu)) {
   8068         define_arm_cp_regs(cpu, jazelle_regs);
   8069     }
   8070     /* Slightly awkwardly, the OMAP and StrongARM cores need all of
   8071      * cp15 crn=0 to be writes-ignored, whereas for other cores they should
   8072      * be read-only (ie write causes UNDEF exception).
   8073      */
   8074     {
   8075         ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
   8076             /* Pre-v8 MIDR space.
   8077              * Note that the MIDR isn't a simple constant register because
   8078              * of the TI925 behaviour where writes to another register can
   8079              * cause the MIDR value to change.
   8080              *
   8081              * Unimplemented registers in the c15 0 0 0 space default to
   8082              * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
   8083              * and friends override accordingly.
   8084              */
   8085             { .name = "MIDR",
   8086               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
   8087               .access = PL1_R, .resetvalue = cpu->midr,
   8088               .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
   8089               .readfn = midr_read,
   8090               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
   8091               .type = ARM_CP_OVERRIDE },
   8092             /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
   8093             { .name = "DUMMY",
   8094               .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
   8095               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
   8096             { .name = "DUMMY",
   8097               .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
   8098               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
   8099             { .name = "DUMMY",
   8100               .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
   8101               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
   8102             { .name = "DUMMY",
   8103               .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
   8104               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
   8105             { .name = "DUMMY",
   8106               .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
   8107               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
   8108         };
   8109         ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
   8110             { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
   8111               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
   8112               .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
   8113               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
   8114               .readfn = midr_read },
   8115             /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
   8116             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
   8117               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
   8118               .access = PL1_R, .resetvalue = cpu->midr },
   8119             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
   8120               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
   8121               .access = PL1_R, .resetvalue = cpu->midr },
   8122             { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
   8123               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
   8124               .access = PL1_R,
   8125               .accessfn = access_aa64_tid1,
   8126               .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
   8127         };
   8128         ARMCPRegInfo id_cp_reginfo[] = {
   8129             /* These are common to v8 and pre-v8 */
   8130             { .name = "CTR",
   8131               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
   8132               .access = PL1_R, .accessfn = ctr_el0_access,
   8133               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
   8134             { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
   8135               .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
   8136               .access = PL0_R, .accessfn = ctr_el0_access,
   8137               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
   8138             /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
   8139             { .name = "TCMTR",
   8140               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
   8141               .access = PL1_R,
   8142               .accessfn = access_aa32_tid1,
   8143               .type = ARM_CP_CONST, .resetvalue = 0 },
   8144         };
   8145         /* TLBTR is specific to VMSA */
   8146         ARMCPRegInfo id_tlbtr_reginfo = {
   8147               .name = "TLBTR",
   8148               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
   8149               .access = PL1_R,
   8150               .accessfn = access_aa32_tid1,
   8151               .type = ARM_CP_CONST, .resetvalue = 0,
   8152         };
   8153         /* MPUIR is specific to PMSA V6+ */
   8154         ARMCPRegInfo id_mpuir_reginfo = {
   8155               .name = "MPUIR",
   8156               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
   8157               .access = PL1_R, .type = ARM_CP_CONST,
   8158               .resetvalue = cpu->pmsav7_dregion << 8
   8159         };
   8160         static const ARMCPRegInfo crn0_wi_reginfo = {
   8161             .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
   8162             .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
   8163             .type = ARM_CP_NOP | ARM_CP_OVERRIDE
   8164         };
   8165 #ifdef CONFIG_USER_ONLY
   8166         static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
   8167             { .name = "MIDR_EL1",
   8168               .exported_bits = 0x00000000ffffffff },
   8169             { .name = "REVIDR_EL1"                },
   8170         };
   8171         modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
   8172 #endif
   8173         if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
   8174             arm_feature(env, ARM_FEATURE_STRONGARM)) {
   8175             size_t i;
   8176             /* Register the blanket "writes ignored" value first to cover the
   8177              * whole space. Then update the specific ID registers to allow write
   8178              * access, so that they ignore writes rather than causing them to
   8179              * UNDEF.
   8180              */
   8181             define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
   8182             for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
   8183                 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
   8184             }
   8185             for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
   8186                 id_cp_reginfo[i].access = PL1_RW;
   8187             }
   8188             id_mpuir_reginfo.access = PL1_RW;
   8189             id_tlbtr_reginfo.access = PL1_RW;
   8190         }
   8191         if (arm_feature(env, ARM_FEATURE_V8)) {
   8192             define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
   8193         } else {
   8194             define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
   8195         }
   8196         define_arm_cp_regs(cpu, id_cp_reginfo);
   8197         if (!arm_feature(env, ARM_FEATURE_PMSA)) {
   8198             define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
   8199         } else if (arm_feature(env, ARM_FEATURE_V7)) {
   8200             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
   8201         }
   8202     }
   8203 
   8204     if (arm_feature(env, ARM_FEATURE_MPIDR)) {
   8205         ARMCPRegInfo mpidr_cp_reginfo[] = {
   8206             { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
   8207               .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
   8208               .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
   8209         };
   8210 #ifdef CONFIG_USER_ONLY
   8211         static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
   8212             { .name = "MPIDR_EL1",
   8213               .fixed_bits = 0x0000000080000000 },
   8214         };
   8215         modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
   8216 #endif
   8217         define_arm_cp_regs(cpu, mpidr_cp_reginfo);
   8218     }
   8219 
   8220     if (arm_feature(env, ARM_FEATURE_AUXCR)) {
   8221         ARMCPRegInfo auxcr_reginfo[] = {
   8222             { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
   8223               .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
   8224               .access = PL1_RW, .accessfn = access_tacr,
   8225               .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
   8226             { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
   8227               .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
   8228               .access = PL2_RW, .type = ARM_CP_CONST,
   8229               .resetvalue = 0 },
   8230             { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
   8231               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
   8232               .access = PL3_RW, .type = ARM_CP_CONST,
   8233               .resetvalue = 0 },
   8234         };
   8235         define_arm_cp_regs(cpu, auxcr_reginfo);
   8236         if (cpu_isar_feature(aa32_ac2, cpu)) {
   8237             define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
   8238         }
   8239     }
   8240 
   8241     if (arm_feature(env, ARM_FEATURE_CBAR)) {
   8242         /*
   8243          * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
   8244          * There are two flavours:
   8245          *  (1) older 32-bit only cores have a simple 32-bit CBAR
   8246          *  (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
   8247          *      32-bit register visible to AArch32 at a different encoding
   8248          *      to the "flavour 1" register and with the bits rearranged to
   8249          *      be able to squash a 64-bit address into the 32-bit view.
   8250          * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
   8251          * in future if we support AArch32-only configs of some of the
   8252          * AArch64 cores we might need to add a specific feature flag
   8253          * to indicate cores with "flavour 2" CBAR.
   8254          */
   8255         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
   8256             /* 32 bit view is [31:18] 0...0 [43:32]. */
   8257             uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
   8258                 | extract64(cpu->reset_cbar, 32, 12);
   8259             ARMCPRegInfo cbar_reginfo[] = {
   8260                 { .name = "CBAR",
   8261                   .type = ARM_CP_CONST,
   8262                   .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
   8263                   .access = PL1_R, .resetvalue = cbar32 },
   8264                 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
   8265                   .type = ARM_CP_CONST,
   8266                   .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
   8267                   .access = PL1_R, .resetvalue = cpu->reset_cbar },
   8268             };
   8269             /* We don't implement a r/w 64 bit CBAR currently */
   8270             assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
   8271             define_arm_cp_regs(cpu, cbar_reginfo);
   8272         } else {
   8273             ARMCPRegInfo cbar = {
   8274                 .name = "CBAR",
   8275                 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
   8276                 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
   8277                 .fieldoffset = offsetof(CPUARMState,
   8278                                         cp15.c15_config_base_address)
   8279             };
   8280             if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
   8281                 cbar.access = PL1_R;
   8282                 cbar.fieldoffset = 0;
   8283                 cbar.type = ARM_CP_CONST;
   8284             }
   8285             define_one_arm_cp_reg(cpu, &cbar);
   8286         }
   8287     }
   8288 
   8289     if (arm_feature(env, ARM_FEATURE_VBAR)) {
   8290         static const ARMCPRegInfo vbar_cp_reginfo[] = {
   8291             { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
   8292               .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
   8293               .access = PL1_RW, .writefn = vbar_write,
   8294               .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
   8295                                      offsetof(CPUARMState, cp15.vbar_ns) },
   8296               .resetvalue = 0 },
   8297         };
   8298         define_arm_cp_regs(cpu, vbar_cp_reginfo);
   8299     }
   8300 
   8301     /* Generic registers whose values depend on the implementation */
   8302     {
   8303         ARMCPRegInfo sctlr = {
   8304             .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
   8305             .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
   8306             .access = PL1_RW, .accessfn = access_tvm_trvm,
   8307             .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
   8308                                    offsetof(CPUARMState, cp15.sctlr_ns) },
   8309             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
   8310             .raw_writefn = raw_write,
   8311         };
   8312         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
   8313             /* Normally we would always end the TB on an SCTLR write, but Linux
   8314              * arch/arm/mach-pxa/sleep.S expects two instructions following
   8315              * an MMU enable to execute from cache.  Imitate this behaviour.
   8316              */
   8317             sctlr.type |= ARM_CP_SUPPRESS_TB_END;
   8318         }
   8319         define_one_arm_cp_reg(cpu, &sctlr);
   8320     }
   8321 
   8322     if (cpu_isar_feature(aa64_lor, cpu)) {
   8323         define_arm_cp_regs(cpu, lor_reginfo);
   8324     }
   8325     if (cpu_isar_feature(aa64_pan, cpu)) {
   8326         define_one_arm_cp_reg(cpu, &pan_reginfo);
   8327     }
   8328 #ifndef CONFIG_USER_ONLY
   8329     if (cpu_isar_feature(aa64_ats1e1, cpu)) {
   8330         define_arm_cp_regs(cpu, ats1e1_reginfo);
   8331     }
   8332     if (cpu_isar_feature(aa32_ats1e1, cpu)) {
   8333         define_arm_cp_regs(cpu, ats1cp_reginfo);
   8334     }
   8335 #endif
   8336     if (cpu_isar_feature(aa64_uao, cpu)) {
   8337         define_one_arm_cp_reg(cpu, &uao_reginfo);
   8338     }
   8339 
   8340     if (cpu_isar_feature(aa64_dit, cpu)) {
   8341         define_one_arm_cp_reg(cpu, &dit_reginfo);
   8342     }
   8343     if (cpu_isar_feature(aa64_ssbs, cpu)) {
   8344         define_one_arm_cp_reg(cpu, &ssbs_reginfo);
   8345     }
   8346     if (cpu_isar_feature(any_ras, cpu)) {
   8347         define_arm_cp_regs(cpu, minimal_ras_reginfo);
   8348     }
   8349 
   8350     if (cpu_isar_feature(aa64_vh, cpu) ||
   8351         cpu_isar_feature(aa64_debugv8p2, cpu)) {
   8352         define_one_arm_cp_reg(cpu, &contextidr_el2);
   8353     }
   8354     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
   8355         define_arm_cp_regs(cpu, vhe_reginfo);
   8356     }
   8357 
   8358     if (cpu_isar_feature(aa64_sve, cpu)) {
   8359         define_arm_cp_regs(cpu, zcr_reginfo);
   8360     }
   8361 
   8362     if (cpu_isar_feature(aa64_hcx, cpu)) {
   8363         define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
   8364     }
   8365 
   8366 #ifdef TARGET_AARCH64
   8367     if (cpu_isar_feature(aa64_sme, cpu)) {
   8368         define_arm_cp_regs(cpu, sme_reginfo);
   8369     }
   8370     if (cpu_isar_feature(aa64_pauth, cpu)) {
   8371         define_arm_cp_regs(cpu, pauth_reginfo);
   8372     }
   8373     if (cpu_isar_feature(aa64_rndr, cpu)) {
   8374         define_arm_cp_regs(cpu, rndr_reginfo);
   8375     }
   8376     if (cpu_isar_feature(aa64_tlbirange, cpu)) {
   8377         define_arm_cp_regs(cpu, tlbirange_reginfo);
   8378     }
   8379     if (cpu_isar_feature(aa64_tlbios, cpu)) {
   8380         define_arm_cp_regs(cpu, tlbios_reginfo);
   8381     }
   8382 #ifndef CONFIG_USER_ONLY
   8383     /* Data Cache clean instructions up to PoP */
   8384     if (cpu_isar_feature(aa64_dcpop, cpu)) {
   8385         define_one_arm_cp_reg(cpu, dcpop_reg);
   8386 
   8387         if (cpu_isar_feature(aa64_dcpodp, cpu)) {
   8388             define_one_arm_cp_reg(cpu, dcpodp_reg);
   8389         }
   8390     }
   8391 #endif /*CONFIG_USER_ONLY*/
   8392 
   8393     /*
   8394      * If full MTE is enabled, add all of the system registers.
   8395      * If only "instructions available at EL0" are enabled,
   8396      * then define only a RAZ/WI version of PSTATE.TCO.
   8397      */
   8398     if (cpu_isar_feature(aa64_mte, cpu)) {
   8399         define_arm_cp_regs(cpu, mte_reginfo);
   8400         define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
   8401     } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
   8402         define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
   8403         define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
   8404     }
   8405 
   8406     if (cpu_isar_feature(aa64_scxtnum, cpu)) {
   8407         define_arm_cp_regs(cpu, scxtnum_reginfo);
   8408     }
   8409 #endif
   8410 
   8411     if (cpu_isar_feature(any_predinv, cpu)) {
   8412         define_arm_cp_regs(cpu, predinv_reginfo);
   8413     }
   8414 
   8415     if (cpu_isar_feature(any_ccidx, cpu)) {
   8416         define_arm_cp_regs(cpu, ccsidr2_reginfo);
   8417     }
   8418 
   8419 #ifndef CONFIG_USER_ONLY
   8420     /*
   8421      * Register redirections and aliases must be done last,
   8422      * after the registers from the other extensions have been defined.
   8423      */
   8424     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
   8425         define_arm_vh_e2h_redirects_aliases(cpu);
   8426     }
   8427 #endif
   8428 }
   8429 
   8430 /* Sort alphabetically by type name, except for "any". */
   8431 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
   8432 {
   8433     ObjectClass *class_a = (ObjectClass *)a;
   8434     ObjectClass *class_b = (ObjectClass *)b;
   8435     const char *name_a, *name_b;
   8436 
   8437     name_a = object_class_get_name(class_a);
   8438     name_b = object_class_get_name(class_b);
   8439     if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
   8440         return 1;
   8441     } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
   8442         return -1;
   8443     } else {
   8444         return strcmp(name_a, name_b);
   8445     }
   8446 }
   8447 
   8448 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
   8449 {
   8450     ObjectClass *oc = data;
   8451     CPUClass *cc = CPU_CLASS(oc);
   8452     const char *typename;
   8453     char *name;
   8454 
   8455     typename = object_class_get_name(oc);
   8456     name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
   8457     if (cc->deprecation_note) {
   8458         qemu_printf("  %s (deprecated)\n", name);
   8459     } else {
   8460         qemu_printf("  %s\n", name);
   8461     }
   8462     g_free(name);
   8463 }
   8464 
   8465 void arm_cpu_list(void)
   8466 {
   8467     GSList *list;
   8468 
   8469     list = object_class_get_list(TYPE_ARM_CPU, false);
   8470     list = g_slist_sort(list, arm_cpu_list_compare);
   8471     qemu_printf("Available CPUs:\n");
   8472     g_slist_foreach(list, arm_cpu_list_entry, NULL);
   8473     g_slist_free(list);
   8474 }
   8475 
   8476 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
   8477 {
   8478     ObjectClass *oc = data;
   8479     CpuDefinitionInfoList **cpu_list = user_data;
   8480     CpuDefinitionInfo *info;
   8481     const char *typename;
   8482 
   8483     typename = object_class_get_name(oc);
   8484     info = g_malloc0(sizeof(*info));
   8485     info->name = g_strndup(typename,
   8486                            strlen(typename) - strlen("-" TYPE_ARM_CPU));
   8487     info->q_typename = g_strdup(typename);
   8488 
   8489     QAPI_LIST_PREPEND(*cpu_list, info);
   8490 }
   8491 
   8492 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
   8493 {
   8494     CpuDefinitionInfoList *cpu_list = NULL;
   8495     GSList *list;
   8496 
   8497     list = object_class_get_list(TYPE_ARM_CPU, false);
   8498     g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
   8499     g_slist_free(list);
   8500 
   8501     return cpu_list;
   8502 }
   8503 
   8504 /*
   8505  * Private utility function for define_one_arm_cp_reg_with_opaque():
   8506  * add a single reginfo struct to the hash table.
   8507  */
   8508 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
   8509                                    void *opaque, CPState state,
   8510                                    CPSecureState secstate,
   8511                                    int crm, int opc1, int opc2,
   8512                                    const char *name)
   8513 {
   8514     CPUARMState *env = &cpu->env;
   8515     uint32_t key;
   8516     ARMCPRegInfo *r2;
   8517     bool is64 = r->type & ARM_CP_64BIT;
   8518     bool ns = secstate & ARM_CP_SECSTATE_NS;
   8519     int cp = r->cp;
   8520     size_t name_len;
   8521     bool make_const;
   8522 
   8523     switch (state) {
   8524     case ARM_CP_STATE_AA32:
   8525         /* We assume it is a cp15 register if the .cp field is left unset. */
   8526         if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
   8527             cp = 15;
   8528         }
   8529         key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
   8530         break;
   8531     case ARM_CP_STATE_AA64:
   8532         /*
   8533          * To allow abbreviation of ARMCPRegInfo definitions, we treat
   8534          * cp == 0 as equivalent to the value for "standard guest-visible
   8535          * sysreg".  STATE_BOTH definitions are also always "standard sysreg"
   8536          * in their AArch64 view (the .cp value may be non-zero for the
   8537          * benefit of the AArch32 view).
   8538          */
   8539         if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
   8540             cp = CP_REG_ARM64_SYSREG_CP;
   8541         }
   8542         key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
   8543         break;
   8544     default:
   8545         g_assert_not_reached();
   8546     }
   8547 
   8548     /* Overriding of an existing definition must be explicitly requested. */
   8549     if (!(r->type & ARM_CP_OVERRIDE)) {
   8550         const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
   8551         if (oldreg) {
   8552             assert(oldreg->type & ARM_CP_OVERRIDE);
   8553         }
   8554     }
   8555 
   8556     /*
   8557      * Eliminate registers that are not present because the EL is missing.
   8558      * Doing this here makes it easier to put all registers for a given
   8559      * feature into the same ARMCPRegInfo array and define them all at once.
   8560      */
   8561     make_const = false;
   8562     if (arm_feature(env, ARM_FEATURE_EL3)) {
   8563         /*
   8564          * An EL2 register without EL2 but with EL3 is (usually) RES0.
   8565          * See rule RJFFP in section D1.1.3 of DDI0487H.a.
   8566          */
   8567         int min_el = ctz32(r->access) / 2;
   8568         if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
   8569             if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
   8570                 return;
   8571             }
   8572             make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
   8573         }
   8574     } else {
   8575         CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
   8576                                  ? PL2_RW : PL1_RW);
   8577         if ((r->access & max_el) == 0) {
   8578             return;
   8579         }
   8580     }
   8581 
   8582     /* Combine cpreg and name into one allocation. */
   8583     name_len = strlen(name) + 1;
   8584     r2 = g_malloc(sizeof(*r2) + name_len);
   8585     *r2 = *r;
   8586     r2->name = memcpy(r2 + 1, name, name_len);
   8587 
   8588     /*
   8589      * Update fields to match the instantiation, overwiting wildcards
   8590      * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
   8591      */
   8592     r2->cp = cp;
   8593     r2->crm = crm;
   8594     r2->opc1 = opc1;
   8595     r2->opc2 = opc2;
   8596     r2->state = state;
   8597     r2->secure = secstate;
   8598     if (opaque) {
   8599         r2->opaque = opaque;
   8600     }
   8601 
   8602     if (make_const) {
   8603         /* This should not have been a very special register to begin. */
   8604         int old_special = r2->type & ARM_CP_SPECIAL_MASK;
   8605         assert(old_special == 0 || old_special == ARM_CP_NOP);
   8606         /*
   8607          * Set the special function to CONST, retaining the other flags.
   8608          * This is important for e.g. ARM_CP_SVE so that we still
   8609          * take the SVE trap if CPTR_EL3.EZ == 0.
   8610          */
   8611         r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
   8612         /*
   8613          * Usually, these registers become RES0, but there are a few
   8614          * special cases like VPIDR_EL2 which have a constant non-zero
   8615          * value with writes ignored.
   8616          */
   8617         if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
   8618             r2->resetvalue = 0;
   8619         }
   8620         /*
   8621          * ARM_CP_CONST has precedence, so removing the callbacks and
   8622          * offsets are not strictly necessary, but it is potentially
   8623          * less confusing to debug later.
   8624          */
   8625         r2->readfn = NULL;
   8626         r2->writefn = NULL;
   8627         r2->raw_readfn = NULL;
   8628         r2->raw_writefn = NULL;
   8629         r2->resetfn = NULL;
   8630         r2->fieldoffset = 0;
   8631         r2->bank_fieldoffsets[0] = 0;
   8632         r2->bank_fieldoffsets[1] = 0;
   8633     } else {
   8634         bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
   8635 
   8636         if (isbanked) {
   8637             /*
   8638              * Register is banked (using both entries in array).
   8639              * Overwriting fieldoffset as the array is only used to define
   8640              * banked registers but later only fieldoffset is used.
   8641              */
   8642             r2->fieldoffset = r->bank_fieldoffsets[ns];
   8643         }
   8644         if (state == ARM_CP_STATE_AA32) {
   8645             if (isbanked) {
   8646                 /*
   8647                  * If the register is banked then we don't need to migrate or
   8648                  * reset the 32-bit instance in certain cases:
   8649                  *
   8650                  * 1) If the register has both 32-bit and 64-bit instances
   8651                  *    then we can count on the 64-bit instance taking care
   8652                  *    of the non-secure bank.
   8653                  * 2) If ARMv8 is enabled then we can count on a 64-bit
   8654                  *    version taking care of the secure bank.  This requires
   8655                  *    that separate 32 and 64-bit definitions are provided.
   8656                  */
   8657                 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
   8658                     (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
   8659                     r2->type |= ARM_CP_ALIAS;
   8660                 }
   8661             } else if ((secstate != r->secure) && !ns) {
   8662                 /*
   8663                  * The register is not banked so we only want to allow
   8664                  * migration of the non-secure instance.
   8665                  */
   8666                 r2->type |= ARM_CP_ALIAS;
   8667             }
   8668 
   8669             if (HOST_BIG_ENDIAN &&
   8670                 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
   8671                 r2->fieldoffset += sizeof(uint32_t);
   8672             }
   8673         }
   8674     }
   8675 
   8676     /*
   8677      * By convention, for wildcarded registers only the first
   8678      * entry is used for migration; the others are marked as
   8679      * ALIAS so we don't try to transfer the register
   8680      * multiple times. Special registers (ie NOP/WFI) are
   8681      * never migratable and not even raw-accessible.
   8682      */
   8683     if (r2->type & ARM_CP_SPECIAL_MASK) {
   8684         r2->type |= ARM_CP_NO_RAW;
   8685     }
   8686     if (((r->crm == CP_ANY) && crm != 0) ||
   8687         ((r->opc1 == CP_ANY) && opc1 != 0) ||
   8688         ((r->opc2 == CP_ANY) && opc2 != 0)) {
   8689         r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
   8690     }
   8691 
   8692     /*
   8693      * Check that raw accesses are either forbidden or handled. Note that
   8694      * we can't assert this earlier because the setup of fieldoffset for
   8695      * banked registers has to be done first.
   8696      */
   8697     if (!(r2->type & ARM_CP_NO_RAW)) {
   8698         assert(!raw_accessors_invalid(r2));
   8699     }
   8700 
   8701     g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
   8702 }
   8703 
   8704 
   8705 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
   8706                                        const ARMCPRegInfo *r, void *opaque)
   8707 {
   8708     /* Define implementations of coprocessor registers.
   8709      * We store these in a hashtable because typically
   8710      * there are less than 150 registers in a space which
   8711      * is 16*16*16*8*8 = 262144 in size.
   8712      * Wildcarding is supported for the crm, opc1 and opc2 fields.
   8713      * If a register is defined twice then the second definition is
   8714      * used, so this can be used to define some generic registers and
   8715      * then override them with implementation specific variations.
   8716      * At least one of the original and the second definition should
   8717      * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
   8718      * against accidental use.
   8719      *
   8720      * The state field defines whether the register is to be
   8721      * visible in the AArch32 or AArch64 execution state. If the
   8722      * state is set to ARM_CP_STATE_BOTH then we synthesise a
   8723      * reginfo structure for the AArch32 view, which sees the lower
   8724      * 32 bits of the 64 bit register.
   8725      *
   8726      * Only registers visible in AArch64 may set r->opc0; opc0 cannot
   8727      * be wildcarded. AArch64 registers are always considered to be 64
   8728      * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
   8729      * the register, if any.
   8730      */
   8731     int crm, opc1, opc2;
   8732     int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
   8733     int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
   8734     int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
   8735     int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
   8736     int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
   8737     int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
   8738     CPState state;
   8739 
   8740     /* 64 bit registers have only CRm and Opc1 fields */
   8741     assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
   8742     /* op0 only exists in the AArch64 encodings */
   8743     assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
   8744     /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
   8745     assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
   8746     /*
   8747      * This API is only for Arm's system coprocessors (14 and 15) or
   8748      * (M-profile or v7A-and-earlier only) for implementation defined
   8749      * coprocessors in the range 0..7.  Our decode assumes this, since
   8750      * 8..13 can be used for other insns including VFP and Neon. See
   8751      * valid_cp() in translate.c.  Assert here that we haven't tried
   8752      * to use an invalid coprocessor number.
   8753      */
   8754     switch (r->state) {
   8755     case ARM_CP_STATE_BOTH:
   8756         /* 0 has a special meaning, but otherwise the same rules as AA32. */
   8757         if (r->cp == 0) {
   8758             break;
   8759         }
   8760         /* fall through */
   8761     case ARM_CP_STATE_AA32:
   8762         if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
   8763             !arm_feature(&cpu->env, ARM_FEATURE_M)) {
   8764             assert(r->cp >= 14 && r->cp <= 15);
   8765         } else {
   8766             assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
   8767         }
   8768         break;
   8769     case ARM_CP_STATE_AA64:
   8770         assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
   8771         break;
   8772     default:
   8773         g_assert_not_reached();
   8774     }
   8775     /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
   8776      * encodes a minimum access level for the register. We roll this
   8777      * runtime check into our general permission check code, so check
   8778      * here that the reginfo's specified permissions are strict enough
   8779      * to encompass the generic architectural permission check.
   8780      */
   8781     if (r->state != ARM_CP_STATE_AA32) {
   8782         CPAccessRights mask;
   8783         switch (r->opc1) {
   8784         case 0:
   8785             /* min_EL EL1, but some accessible to EL0 via kernel ABI */
   8786             mask = PL0U_R | PL1_RW;
   8787             break;
   8788         case 1: case 2:
   8789             /* min_EL EL1 */
   8790             mask = PL1_RW;
   8791             break;
   8792         case 3:
   8793             /* min_EL EL0 */
   8794             mask = PL0_RW;
   8795             break;
   8796         case 4:
   8797         case 5:
   8798             /* min_EL EL2 */
   8799             mask = PL2_RW;
   8800             break;
   8801         case 6:
   8802             /* min_EL EL3 */
   8803             mask = PL3_RW;
   8804             break;
   8805         case 7:
   8806             /* min_EL EL1, secure mode only (we don't check the latter) */
   8807             mask = PL1_RW;
   8808             break;
   8809         default:
   8810             /* broken reginfo with out-of-range opc1 */
   8811             g_assert_not_reached();
   8812         }
   8813         /* assert our permissions are not too lax (stricter is fine) */
   8814         assert((r->access & ~mask) == 0);
   8815     }
   8816 
   8817     /* Check that the register definition has enough info to handle
   8818      * reads and writes if they are permitted.
   8819      */
   8820     if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
   8821         if (r->access & PL3_R) {
   8822             assert((r->fieldoffset ||
   8823                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
   8824                    r->readfn);
   8825         }
   8826         if (r->access & PL3_W) {
   8827             assert((r->fieldoffset ||
   8828                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
   8829                    r->writefn);
   8830         }
   8831     }
   8832 
   8833     for (crm = crmmin; crm <= crmmax; crm++) {
   8834         for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
   8835             for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
   8836                 for (state = ARM_CP_STATE_AA32;
   8837                      state <= ARM_CP_STATE_AA64; state++) {
   8838                     if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
   8839                         continue;
   8840                     }
   8841                     if (state == ARM_CP_STATE_AA32) {
   8842                         /* Under AArch32 CP registers can be common
   8843                          * (same for secure and non-secure world) or banked.
   8844                          */
   8845                         char *name;
   8846 
   8847                         switch (r->secure) {
   8848                         case ARM_CP_SECSTATE_S:
   8849                         case ARM_CP_SECSTATE_NS:
   8850                             add_cpreg_to_hashtable(cpu, r, opaque, state,
   8851                                                    r->secure, crm, opc1, opc2,
   8852                                                    r->name);
   8853                             break;
   8854                         case ARM_CP_SECSTATE_BOTH:
   8855                             name = g_strdup_printf("%s_S", r->name);
   8856                             add_cpreg_to_hashtable(cpu, r, opaque, state,
   8857                                                    ARM_CP_SECSTATE_S,
   8858                                                    crm, opc1, opc2, name);
   8859                             g_free(name);
   8860                             add_cpreg_to_hashtable(cpu, r, opaque, state,
   8861                                                    ARM_CP_SECSTATE_NS,
   8862                                                    crm, opc1, opc2, r->name);
   8863                             break;
   8864                         default:
   8865                             g_assert_not_reached();
   8866                         }
   8867                     } else {
   8868                         /* AArch64 registers get mapped to non-secure instance
   8869                          * of AArch32 */
   8870                         add_cpreg_to_hashtable(cpu, r, opaque, state,
   8871                                                ARM_CP_SECSTATE_NS,
   8872                                                crm, opc1, opc2, r->name);
   8873                     }
   8874                 }
   8875             }
   8876         }
   8877     }
   8878 }
   8879 
   8880 /* Define a whole list of registers */
   8881 void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
   8882                                         void *opaque, size_t len)
   8883 {
   8884     size_t i;
   8885     for (i = 0; i < len; ++i) {
   8886         define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
   8887     }
   8888 }
   8889 
   8890 /*
   8891  * Modify ARMCPRegInfo for access from userspace.
   8892  *
   8893  * This is a data driven modification directed by
   8894  * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
   8895  * user-space cannot alter any values and dynamic values pertaining to
   8896  * execution state are hidden from user space view anyway.
   8897  */
   8898 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
   8899                                  const ARMCPRegUserSpaceInfo *mods,
   8900                                  size_t mods_len)
   8901 {
   8902     for (size_t mi = 0; mi < mods_len; ++mi) {
   8903         const ARMCPRegUserSpaceInfo *m = mods + mi;
   8904         GPatternSpec *pat = NULL;
   8905 
   8906         if (m->is_glob) {
   8907             pat = g_pattern_spec_new(m->name);
   8908         }
   8909         for (size_t ri = 0; ri < regs_len; ++ri) {
   8910             ARMCPRegInfo *r = regs + ri;
   8911 
   8912             if (pat && g_pattern_match_string(pat, r->name)) {
   8913                 r->type = ARM_CP_CONST;
   8914                 r->access = PL0U_R;
   8915                 r->resetvalue = 0;
   8916                 /* continue */
   8917             } else if (strcmp(r->name, m->name) == 0) {
   8918                 r->type = ARM_CP_CONST;
   8919                 r->access = PL0U_R;
   8920                 r->resetvalue &= m->exported_bits;
   8921                 r->resetvalue |= m->fixed_bits;
   8922                 break;
   8923             }
   8924         }
   8925         if (pat) {
   8926             g_pattern_spec_free(pat);
   8927         }
   8928     }
   8929 }
   8930 
   8931 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
   8932 {
   8933     return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
   8934 }
   8935 
   8936 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
   8937                          uint64_t value)
   8938 {
   8939     /* Helper coprocessor write function for write-ignore registers */
   8940 }
   8941 
   8942 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
   8943 {
   8944     /* Helper coprocessor write function for read-as-zero registers */
   8945     return 0;
   8946 }
   8947 
   8948 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
   8949 {
   8950     /* Helper coprocessor reset function for do-nothing-on-reset registers */
   8951 }
   8952 
   8953 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
   8954 {
   8955     /* Return true if it is not valid for us to switch to
   8956      * this CPU mode (ie all the UNPREDICTABLE cases in
   8957      * the ARM ARM CPSRWriteByInstr pseudocode).
   8958      */
   8959 
   8960     /* Changes to or from Hyp via MSR and CPS are illegal. */
   8961     if (write_type == CPSRWriteByInstr &&
   8962         ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
   8963          mode == ARM_CPU_MODE_HYP)) {
   8964         return 1;
   8965     }
   8966 
   8967     switch (mode) {
   8968     case ARM_CPU_MODE_USR:
   8969         return 0;
   8970     case ARM_CPU_MODE_SYS:
   8971     case ARM_CPU_MODE_SVC:
   8972     case ARM_CPU_MODE_ABT:
   8973     case ARM_CPU_MODE_UND:
   8974     case ARM_CPU_MODE_IRQ:
   8975     case ARM_CPU_MODE_FIQ:
   8976         /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
   8977          * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
   8978          */
   8979         /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
   8980          * and CPS are treated as illegal mode changes.
   8981          */
   8982         if (write_type == CPSRWriteByInstr &&
   8983             (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
   8984             (arm_hcr_el2_eff(env) & HCR_TGE)) {
   8985             return 1;
   8986         }
   8987         return 0;
   8988     case ARM_CPU_MODE_HYP:
   8989         return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
   8990     case ARM_CPU_MODE_MON:
   8991         return arm_current_el(env) < 3;
   8992     default:
   8993         return 1;
   8994     }
   8995 }
   8996 
   8997 uint32_t cpsr_read(CPUARMState *env)
   8998 {
   8999     int ZF;
   9000     ZF = (env->ZF == 0);
   9001     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
   9002         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
   9003         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
   9004         | ((env->condexec_bits & 0xfc) << 8)
   9005         | (env->GE << 16) | (env->daif & CPSR_AIF);
   9006 }
   9007 
   9008 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
   9009                 CPSRWriteType write_type)
   9010 {
   9011     uint32_t changed_daif;
   9012     bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
   9013         (mask & (CPSR_M | CPSR_E | CPSR_IL));
   9014 
   9015     if (mask & CPSR_NZCV) {
   9016         env->ZF = (~val) & CPSR_Z;
   9017         env->NF = val;
   9018         env->CF = (val >> 29) & 1;
   9019         env->VF = (val << 3) & 0x80000000;
   9020     }
   9021     if (mask & CPSR_Q)
   9022         env->QF = ((val & CPSR_Q) != 0);
   9023     if (mask & CPSR_T)
   9024         env->thumb = ((val & CPSR_T) != 0);
   9025     if (mask & CPSR_IT_0_1) {
   9026         env->condexec_bits &= ~3;
   9027         env->condexec_bits |= (val >> 25) & 3;
   9028     }
   9029     if (mask & CPSR_IT_2_7) {
   9030         env->condexec_bits &= 3;
   9031         env->condexec_bits |= (val >> 8) & 0xfc;
   9032     }
   9033     if (mask & CPSR_GE) {
   9034         env->GE = (val >> 16) & 0xf;
   9035     }
   9036 
   9037     /* In a V7 implementation that includes the security extensions but does
   9038      * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
   9039      * whether non-secure software is allowed to change the CPSR_F and CPSR_A
   9040      * bits respectively.
   9041      *
   9042      * In a V8 implementation, it is permitted for privileged software to
   9043      * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
   9044      */
   9045     if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
   9046         arm_feature(env, ARM_FEATURE_EL3) &&
   9047         !arm_feature(env, ARM_FEATURE_EL2) &&
   9048         !arm_is_secure(env)) {
   9049 
   9050         changed_daif = (env->daif ^ val) & mask;
   9051 
   9052         if (changed_daif & CPSR_A) {
   9053             /* Check to see if we are allowed to change the masking of async
   9054              * abort exceptions from a non-secure state.
   9055              */
   9056             if (!(env->cp15.scr_el3 & SCR_AW)) {
   9057                 qemu_log_mask(LOG_GUEST_ERROR,
   9058                               "Ignoring attempt to switch CPSR_A flag from "
   9059                               "non-secure world with SCR.AW bit clear\n");
   9060                 mask &= ~CPSR_A;
   9061             }
   9062         }
   9063 
   9064         if (changed_daif & CPSR_F) {
   9065             /* Check to see if we are allowed to change the masking of FIQ
   9066              * exceptions from a non-secure state.
   9067              */
   9068             if (!(env->cp15.scr_el3 & SCR_FW)) {
   9069                 qemu_log_mask(LOG_GUEST_ERROR,
   9070                               "Ignoring attempt to switch CPSR_F flag from "
   9071                               "non-secure world with SCR.FW bit clear\n");
   9072                 mask &= ~CPSR_F;
   9073             }
   9074 
   9075             /* Check whether non-maskable FIQ (NMFI) support is enabled.
   9076              * If this bit is set software is not allowed to mask
   9077              * FIQs, but is allowed to set CPSR_F to 0.
   9078              */
   9079             if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
   9080                 (val & CPSR_F)) {
   9081                 qemu_log_mask(LOG_GUEST_ERROR,
   9082                               "Ignoring attempt to enable CPSR_F flag "
   9083                               "(non-maskable FIQ [NMFI] support enabled)\n");
   9084                 mask &= ~CPSR_F;
   9085             }
   9086         }
   9087     }
   9088 
   9089     env->daif &= ~(CPSR_AIF & mask);
   9090     env->daif |= val & CPSR_AIF & mask;
   9091 
   9092     if (write_type != CPSRWriteRaw &&
   9093         ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
   9094         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
   9095             /* Note that we can only get here in USR mode if this is a
   9096              * gdb stub write; for this case we follow the architectural
   9097              * behaviour for guest writes in USR mode of ignoring an attempt
   9098              * to switch mode. (Those are caught by translate.c for writes
   9099              * triggered by guest instructions.)
   9100              */
   9101             mask &= ~CPSR_M;
   9102         } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
   9103             /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
   9104              * v7, and has defined behaviour in v8:
   9105              *  + leave CPSR.M untouched
   9106              *  + allow changes to the other CPSR fields
   9107              *  + set PSTATE.IL
   9108              * For user changes via the GDB stub, we don't set PSTATE.IL,
   9109              * as this would be unnecessarily harsh for a user error.
   9110              */
   9111             mask &= ~CPSR_M;
   9112             if (write_type != CPSRWriteByGDBStub &&
   9113                 arm_feature(env, ARM_FEATURE_V8)) {
   9114                 mask |= CPSR_IL;
   9115                 val |= CPSR_IL;
   9116             }
   9117             qemu_log_mask(LOG_GUEST_ERROR,
   9118                           "Illegal AArch32 mode switch attempt from %s to %s\n",
   9119                           aarch32_mode_name(env->uncached_cpsr),
   9120                           aarch32_mode_name(val));
   9121         } else {
   9122             qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
   9123                           write_type == CPSRWriteExceptionReturn ?
   9124                           "Exception return from AArch32" :
   9125                           "AArch32 mode switch from",
   9126                           aarch32_mode_name(env->uncached_cpsr),
   9127                           aarch32_mode_name(val), env->regs[15]);
   9128             switch_mode(env, val & CPSR_M);
   9129         }
   9130     }
   9131     mask &= ~CACHED_CPSR_BITS;
   9132     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
   9133     if (rebuild_hflags) {
   9134         arm_rebuild_hflags(env);
   9135     }
   9136 }
   9137 
   9138 /* Sign/zero extend */
   9139 uint32_t HELPER(sxtb16)(uint32_t x)
   9140 {
   9141     uint32_t res;
   9142     res = (uint16_t)(int8_t)x;
   9143     res |= (uint32_t)(int8_t)(x >> 16) << 16;
   9144     return res;
   9145 }
   9146 
   9147 static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
   9148 {
   9149     /*
   9150      * Take a division-by-zero exception if necessary; otherwise return
   9151      * to get the usual non-trapping division behaviour (result of 0)
   9152      */
   9153     if (arm_feature(env, ARM_FEATURE_M)
   9154         && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
   9155         raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
   9156     }
   9157 }
   9158 
   9159 uint32_t HELPER(uxtb16)(uint32_t x)
   9160 {
   9161     uint32_t res;
   9162     res = (uint16_t)(uint8_t)x;
   9163     res |= (uint32_t)(uint8_t)(x >> 16) << 16;
   9164     return res;
   9165 }
   9166 
   9167 int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
   9168 {
   9169     if (den == 0) {
   9170         handle_possible_div0_trap(env, GETPC());
   9171         return 0;
   9172     }
   9173     if (num == INT_MIN && den == -1) {
   9174         return INT_MIN;
   9175     }
   9176     return num / den;
   9177 }
   9178 
   9179 uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
   9180 {
   9181     if (den == 0) {
   9182         handle_possible_div0_trap(env, GETPC());
   9183         return 0;
   9184     }
   9185     return num / den;
   9186 }
   9187 
   9188 uint32_t HELPER(rbit)(uint32_t x)
   9189 {
   9190     return revbit32(x);
   9191 }
   9192 
   9193 #ifdef CONFIG_USER_ONLY
   9194 
   9195 static void switch_mode(CPUARMState *env, int mode)
   9196 {
   9197     ARMCPU *cpu = env_archcpu(env);
   9198 
   9199     if (mode != ARM_CPU_MODE_USR) {
   9200         cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
   9201     }
   9202 }
   9203 
   9204 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
   9205                                  uint32_t cur_el, bool secure)
   9206 {
   9207     return 1;
   9208 }
   9209 
   9210 void aarch64_sync_64_to_32(CPUARMState *env)
   9211 {
   9212     g_assert_not_reached();
   9213 }
   9214 
   9215 #else
   9216 
   9217 static void switch_mode(CPUARMState *env, int mode)
   9218 {
   9219     int old_mode;
   9220     int i;
   9221 
   9222     old_mode = env->uncached_cpsr & CPSR_M;
   9223     if (mode == old_mode)
   9224         return;
   9225 
   9226     if (old_mode == ARM_CPU_MODE_FIQ) {
   9227         memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
   9228         memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
   9229     } else if (mode == ARM_CPU_MODE_FIQ) {
   9230         memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
   9231         memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
   9232     }
   9233 
   9234     i = bank_number(old_mode);
   9235     env->banked_r13[i] = env->regs[13];
   9236     env->banked_spsr[i] = env->spsr;
   9237 
   9238     i = bank_number(mode);
   9239     env->regs[13] = env->banked_r13[i];
   9240     env->spsr = env->banked_spsr[i];
   9241 
   9242     env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
   9243     env->regs[14] = env->banked_r14[r14_bank_number(mode)];
   9244 }
   9245 
   9246 /* Physical Interrupt Target EL Lookup Table
   9247  *
   9248  * [ From ARM ARM section G1.13.4 (Table G1-15) ]
   9249  *
   9250  * The below multi-dimensional table is used for looking up the target
   9251  * exception level given numerous condition criteria.  Specifically, the
   9252  * target EL is based on SCR and HCR routing controls as well as the
   9253  * currently executing EL and secure state.
   9254  *
   9255  *    Dimensions:
   9256  *    target_el_table[2][2][2][2][2][4]
   9257  *                    |  |  |  |  |  +--- Current EL
   9258  *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
   9259  *                    |  |  |  +--------- HCR mask override
   9260  *                    |  |  +------------ SCR exec state control
   9261  *                    |  +--------------- SCR mask override
   9262  *                    +------------------ 32-bit(0)/64-bit(1) EL3
   9263  *
   9264  *    The table values are as such:
   9265  *    0-3 = EL0-EL3
   9266  *     -1 = Cannot occur
   9267  *
   9268  * The ARM ARM target EL table includes entries indicating that an "exception
   9269  * is not taken".  The two cases where this is applicable are:
   9270  *    1) An exception is taken from EL3 but the SCR does not have the exception
   9271  *    routed to EL3.
   9272  *    2) An exception is taken from EL2 but the HCR does not have the exception
   9273  *    routed to EL2.
   9274  * In these two cases, the below table contain a target of EL1.  This value is
   9275  * returned as it is expected that the consumer of the table data will check
   9276  * for "target EL >= current EL" to ensure the exception is not taken.
   9277  *
   9278  *            SCR     HCR
   9279  *         64  EA     AMO                 From
   9280  *        BIT IRQ     IMO      Non-secure         Secure
   9281  *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
   9282  */
   9283 static const int8_t target_el_table[2][2][2][2][2][4] = {
   9284     {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
   9285        {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
   9286       {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
   9287        {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
   9288      {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
   9289        {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
   9290       {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
   9291        {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
   9292     {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
   9293        {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 2,  2, -1,  1 },},},
   9294       {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1,  1,  1 },},
   9295        {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 2,  2,  2,  1 },},},},
   9296      {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
   9297        {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
   9298       {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},
   9299        {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},},},},
   9300 };
   9301 
   9302 /*
   9303  * Determine the target EL for physical exceptions
   9304  */
   9305 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
   9306                                  uint32_t cur_el, bool secure)
   9307 {
   9308     CPUARMState *env = cs->env_ptr;
   9309     bool rw;
   9310     bool scr;
   9311     bool hcr;
   9312     int target_el;
   9313     /* Is the highest EL AArch64? */
   9314     bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
   9315     uint64_t hcr_el2;
   9316 
   9317     if (arm_feature(env, ARM_FEATURE_EL3)) {
   9318         rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
   9319     } else {
   9320         /* Either EL2 is the highest EL (and so the EL2 register width
   9321          * is given by is64); or there is no EL2 or EL3, in which case
   9322          * the value of 'rw' does not affect the table lookup anyway.
   9323          */
   9324         rw = is64;
   9325     }
   9326 
   9327     hcr_el2 = arm_hcr_el2_eff(env);
   9328     switch (excp_idx) {
   9329     case EXCP_IRQ:
   9330         scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
   9331         hcr = hcr_el2 & HCR_IMO;
   9332         break;
   9333     case EXCP_FIQ:
   9334         scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
   9335         hcr = hcr_el2 & HCR_FMO;
   9336         break;
   9337     default:
   9338         scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
   9339         hcr = hcr_el2 & HCR_AMO;
   9340         break;
   9341     };
   9342 
   9343     /*
   9344      * For these purposes, TGE and AMO/IMO/FMO both force the
   9345      * interrupt to EL2.  Fold TGE into the bit extracted above.
   9346      */
   9347     hcr |= (hcr_el2 & HCR_TGE) != 0;
   9348 
   9349     /* Perform a table-lookup for the target EL given the current state */
   9350     target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
   9351 
   9352     assert(target_el > 0);
   9353 
   9354     return target_el;
   9355 }
   9356 
   9357 void arm_log_exception(CPUState *cs)
   9358 {
   9359     int idx = cs->exception_index;
   9360 
   9361     if (qemu_loglevel_mask(CPU_LOG_INT)) {
   9362         const char *exc = NULL;
   9363         static const char * const excnames[] = {
   9364             [EXCP_UDEF] = "Undefined Instruction",
   9365             [EXCP_SWI] = "SVC",
   9366             [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
   9367             [EXCP_DATA_ABORT] = "Data Abort",
   9368             [EXCP_IRQ] = "IRQ",
   9369             [EXCP_FIQ] = "FIQ",
   9370             [EXCP_BKPT] = "Breakpoint",
   9371             [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
   9372             [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
   9373             [EXCP_HVC] = "Hypervisor Call",
   9374             [EXCP_HYP_TRAP] = "Hypervisor Trap",
   9375             [EXCP_SMC] = "Secure Monitor Call",
   9376             [EXCP_VIRQ] = "Virtual IRQ",
   9377             [EXCP_VFIQ] = "Virtual FIQ",
   9378             [EXCP_SEMIHOST] = "Semihosting call",
   9379             [EXCP_NOCP] = "v7M NOCP UsageFault",
   9380             [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
   9381             [EXCP_STKOF] = "v8M STKOF UsageFault",
   9382             [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
   9383             [EXCP_LSERR] = "v8M LSERR UsageFault",
   9384             [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
   9385             [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
   9386             [EXCP_VSERR] = "Virtual SERR",
   9387         };
   9388 
   9389         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
   9390             exc = excnames[idx];
   9391         }
   9392         if (!exc) {
   9393             exc = "unknown";
   9394         }
   9395         qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
   9396                       idx, exc, cs->cpu_index);
   9397     }
   9398 }
   9399 
   9400 /*
   9401  * Function used to synchronize QEMU's AArch64 register set with AArch32
   9402  * register set.  This is necessary when switching between AArch32 and AArch64
   9403  * execution state.
   9404  */
   9405 void aarch64_sync_32_to_64(CPUARMState *env)
   9406 {
   9407     int i;
   9408     uint32_t mode = env->uncached_cpsr & CPSR_M;
   9409 
   9410     /* We can blanket copy R[0:7] to X[0:7] */
   9411     for (i = 0; i < 8; i++) {
   9412         env->xregs[i] = env->regs[i];
   9413     }
   9414 
   9415     /*
   9416      * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
   9417      * Otherwise, they come from the banked user regs.
   9418      */
   9419     if (mode == ARM_CPU_MODE_FIQ) {
   9420         for (i = 8; i < 13; i++) {
   9421             env->xregs[i] = env->usr_regs[i - 8];
   9422         }
   9423     } else {
   9424         for (i = 8; i < 13; i++) {
   9425             env->xregs[i] = env->regs[i];
   9426         }
   9427     }
   9428 
   9429     /*
   9430      * Registers x13-x23 are the various mode SP and FP registers. Registers
   9431      * r13 and r14 are only copied if we are in that mode, otherwise we copy
   9432      * from the mode banked register.
   9433      */
   9434     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
   9435         env->xregs[13] = env->regs[13];
   9436         env->xregs[14] = env->regs[14];
   9437     } else {
   9438         env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
   9439         /* HYP is an exception in that it is copied from r14 */
   9440         if (mode == ARM_CPU_MODE_HYP) {
   9441             env->xregs[14] = env->regs[14];
   9442         } else {
   9443             env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
   9444         }
   9445     }
   9446 
   9447     if (mode == ARM_CPU_MODE_HYP) {
   9448         env->xregs[15] = env->regs[13];
   9449     } else {
   9450         env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
   9451     }
   9452 
   9453     if (mode == ARM_CPU_MODE_IRQ) {
   9454         env->xregs[16] = env->regs[14];
   9455         env->xregs[17] = env->regs[13];
   9456     } else {
   9457         env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
   9458         env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
   9459     }
   9460 
   9461     if (mode == ARM_CPU_MODE_SVC) {
   9462         env->xregs[18] = env->regs[14];
   9463         env->xregs[19] = env->regs[13];
   9464     } else {
   9465         env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
   9466         env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
   9467     }
   9468 
   9469     if (mode == ARM_CPU_MODE_ABT) {
   9470         env->xregs[20] = env->regs[14];
   9471         env->xregs[21] = env->regs[13];
   9472     } else {
   9473         env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
   9474         env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
   9475     }
   9476 
   9477     if (mode == ARM_CPU_MODE_UND) {
   9478         env->xregs[22] = env->regs[14];
   9479         env->xregs[23] = env->regs[13];
   9480     } else {
   9481         env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
   9482         env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
   9483     }
   9484 
   9485     /*
   9486      * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
   9487      * mode, then we can copy from r8-r14.  Otherwise, we copy from the
   9488      * FIQ bank for r8-r14.
   9489      */
   9490     if (mode == ARM_CPU_MODE_FIQ) {
   9491         for (i = 24; i < 31; i++) {
   9492             env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
   9493         }
   9494     } else {
   9495         for (i = 24; i < 29; i++) {
   9496             env->xregs[i] = env->fiq_regs[i - 24];
   9497         }
   9498         env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
   9499         env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
   9500     }
   9501 
   9502     env->pc = env->regs[15];
   9503 }
   9504 
   9505 /*
   9506  * Function used to synchronize QEMU's AArch32 register set with AArch64
   9507  * register set.  This is necessary when switching between AArch32 and AArch64
   9508  * execution state.
   9509  */
   9510 void aarch64_sync_64_to_32(CPUARMState *env)
   9511 {
   9512     int i;
   9513     uint32_t mode = env->uncached_cpsr & CPSR_M;
   9514 
   9515     /* We can blanket copy X[0:7] to R[0:7] */
   9516     for (i = 0; i < 8; i++) {
   9517         env->regs[i] = env->xregs[i];
   9518     }
   9519 
   9520     /*
   9521      * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
   9522      * Otherwise, we copy x8-x12 into the banked user regs.
   9523      */
   9524     if (mode == ARM_CPU_MODE_FIQ) {
   9525         for (i = 8; i < 13; i++) {
   9526             env->usr_regs[i - 8] = env->xregs[i];
   9527         }
   9528     } else {
   9529         for (i = 8; i < 13; i++) {
   9530             env->regs[i] = env->xregs[i];
   9531         }
   9532     }
   9533 
   9534     /*
   9535      * Registers r13 & r14 depend on the current mode.
   9536      * If we are in a given mode, we copy the corresponding x registers to r13
   9537      * and r14.  Otherwise, we copy the x register to the banked r13 and r14
   9538      * for the mode.
   9539      */
   9540     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
   9541         env->regs[13] = env->xregs[13];
   9542         env->regs[14] = env->xregs[14];
   9543     } else {
   9544         env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
   9545 
   9546         /*
   9547          * HYP is an exception in that it does not have its own banked r14 but
   9548          * shares the USR r14
   9549          */
   9550         if (mode == ARM_CPU_MODE_HYP) {
   9551             env->regs[14] = env->xregs[14];
   9552         } else {
   9553             env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
   9554         }
   9555     }
   9556 
   9557     if (mode == ARM_CPU_MODE_HYP) {
   9558         env->regs[13] = env->xregs[15];
   9559     } else {
   9560         env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
   9561     }
   9562 
   9563     if (mode == ARM_CPU_MODE_IRQ) {
   9564         env->regs[14] = env->xregs[16];
   9565         env->regs[13] = env->xregs[17];
   9566     } else {
   9567         env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
   9568         env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
   9569     }
   9570 
   9571     if (mode == ARM_CPU_MODE_SVC) {
   9572         env->regs[14] = env->xregs[18];
   9573         env->regs[13] = env->xregs[19];
   9574     } else {
   9575         env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
   9576         env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
   9577     }
   9578 
   9579     if (mode == ARM_CPU_MODE_ABT) {
   9580         env->regs[14] = env->xregs[20];
   9581         env->regs[13] = env->xregs[21];
   9582     } else {
   9583         env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
   9584         env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
   9585     }
   9586 
   9587     if (mode == ARM_CPU_MODE_UND) {
   9588         env->regs[14] = env->xregs[22];
   9589         env->regs[13] = env->xregs[23];
   9590     } else {
   9591         env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
   9592         env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
   9593     }
   9594 
   9595     /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
   9596      * mode, then we can copy to r8-r14.  Otherwise, we copy to the
   9597      * FIQ bank for r8-r14.
   9598      */
   9599     if (mode == ARM_CPU_MODE_FIQ) {
   9600         for (i = 24; i < 31; i++) {
   9601             env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
   9602         }
   9603     } else {
   9604         for (i = 24; i < 29; i++) {
   9605             env->fiq_regs[i - 24] = env->xregs[i];
   9606         }
   9607         env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
   9608         env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
   9609     }
   9610 
   9611     env->regs[15] = env->pc;
   9612 }
   9613 
   9614 static void take_aarch32_exception(CPUARMState *env, int new_mode,
   9615                                    uint32_t mask, uint32_t offset,
   9616                                    uint32_t newpc)
   9617 {
   9618     int new_el;
   9619 
   9620     /* Change the CPU state so as to actually take the exception. */
   9621     switch_mode(env, new_mode);
   9622 
   9623     /*
   9624      * For exceptions taken to AArch32 we must clear the SS bit in both
   9625      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
   9626      */
   9627     env->pstate &= ~PSTATE_SS;
   9628     env->spsr = cpsr_read(env);
   9629     /* Clear IT bits.  */
   9630     env->condexec_bits = 0;
   9631     /* Switch to the new mode, and to the correct instruction set.  */
   9632     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
   9633 
   9634     /* This must be after mode switching. */
   9635     new_el = arm_current_el(env);
   9636 
   9637     /* Set new mode endianness */
   9638     env->uncached_cpsr &= ~CPSR_E;
   9639     if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
   9640         env->uncached_cpsr |= CPSR_E;
   9641     }
   9642     /* J and IL must always be cleared for exception entry */
   9643     env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
   9644     env->daif |= mask;
   9645 
   9646     if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
   9647         if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
   9648             env->uncached_cpsr |= CPSR_SSBS;
   9649         } else {
   9650             env->uncached_cpsr &= ~CPSR_SSBS;
   9651         }
   9652     }
   9653 
   9654     if (new_mode == ARM_CPU_MODE_HYP) {
   9655         env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
   9656         env->elr_el[2] = env->regs[15];
   9657     } else {
   9658         /* CPSR.PAN is normally preserved preserved unless...  */
   9659         if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
   9660             switch (new_el) {
   9661             case 3:
   9662                 if (!arm_is_secure_below_el3(env)) {
   9663                     /* ... the target is EL3, from non-secure state.  */
   9664                     env->uncached_cpsr &= ~CPSR_PAN;
   9665                     break;
   9666                 }
   9667                 /* ... the target is EL3, from secure state ... */
   9668                 /* fall through */
   9669             case 1:
   9670                 /* ... the target is EL1 and SCTLR.SPAN is 0.  */
   9671                 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
   9672                     env->uncached_cpsr |= CPSR_PAN;
   9673                 }
   9674                 break;
   9675             }
   9676         }
   9677         /*
   9678          * this is a lie, as there was no c1_sys on V4T/V5, but who cares
   9679          * and we should just guard the thumb mode on V4
   9680          */
   9681         if (arm_feature(env, ARM_FEATURE_V4T)) {
   9682             env->thumb =
   9683                 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
   9684         }
   9685         env->regs[14] = env->regs[15] + offset;
   9686     }
   9687     env->regs[15] = newpc;
   9688     arm_rebuild_hflags(env);
   9689 }
   9690 
   9691 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
   9692 {
   9693     /*
   9694      * Handle exception entry to Hyp mode; this is sufficiently
   9695      * different to entry to other AArch32 modes that we handle it
   9696      * separately here.
   9697      *
   9698      * The vector table entry used is always the 0x14 Hyp mode entry point,
   9699      * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
   9700      * The offset applied to the preferred return address is always zero
   9701      * (see DDI0487C.a section G1.12.3).
   9702      * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
   9703      */
   9704     uint32_t addr, mask;
   9705     ARMCPU *cpu = ARM_CPU(cs);
   9706     CPUARMState *env = &cpu->env;
   9707 
   9708     switch (cs->exception_index) {
   9709     case EXCP_UDEF:
   9710         addr = 0x04;
   9711         break;
   9712     case EXCP_SWI:
   9713         addr = 0x08;
   9714         break;
   9715     case EXCP_BKPT:
   9716         /* Fall through to prefetch abort.  */
   9717     case EXCP_PREFETCH_ABORT:
   9718         env->cp15.ifar_s = env->exception.vaddress;
   9719         qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
   9720                       (uint32_t)env->exception.vaddress);
   9721         addr = 0x0c;
   9722         break;
   9723     case EXCP_DATA_ABORT:
   9724         env->cp15.dfar_s = env->exception.vaddress;
   9725         qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
   9726                       (uint32_t)env->exception.vaddress);
   9727         addr = 0x10;
   9728         break;
   9729     case EXCP_IRQ:
   9730         addr = 0x18;
   9731         break;
   9732     case EXCP_FIQ:
   9733         addr = 0x1c;
   9734         break;
   9735     case EXCP_HVC:
   9736         addr = 0x08;
   9737         break;
   9738     case EXCP_HYP_TRAP:
   9739         addr = 0x14;
   9740         break;
   9741     default:
   9742         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
   9743     }
   9744 
   9745     if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
   9746         if (!arm_feature(env, ARM_FEATURE_V8)) {
   9747             /*
   9748              * QEMU syndrome values are v8-style. v7 has the IL bit
   9749              * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
   9750              * If this is a v7 CPU, squash the IL bit in those cases.
   9751              */
   9752             if (cs->exception_index == EXCP_PREFETCH_ABORT ||
   9753                 (cs->exception_index == EXCP_DATA_ABORT &&
   9754                  !(env->exception.syndrome & ARM_EL_ISV)) ||
   9755                 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
   9756                 env->exception.syndrome &= ~ARM_EL_IL;
   9757             }
   9758         }
   9759         env->cp15.esr_el[2] = env->exception.syndrome;
   9760     }
   9761 
   9762     if (arm_current_el(env) != 2 && addr < 0x14) {
   9763         addr = 0x14;
   9764     }
   9765 
   9766     mask = 0;
   9767     if (!(env->cp15.scr_el3 & SCR_EA)) {
   9768         mask |= CPSR_A;
   9769     }
   9770     if (!(env->cp15.scr_el3 & SCR_IRQ)) {
   9771         mask |= CPSR_I;
   9772     }
   9773     if (!(env->cp15.scr_el3 & SCR_FIQ)) {
   9774         mask |= CPSR_F;
   9775     }
   9776 
   9777     addr += env->cp15.hvbar;
   9778 
   9779     take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
   9780 }
   9781 
   9782 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
   9783 {
   9784     ARMCPU *cpu = ARM_CPU(cs);
   9785     CPUARMState *env = &cpu->env;
   9786     uint32_t addr;
   9787     uint32_t mask;
   9788     int new_mode;
   9789     uint32_t offset;
   9790     uint32_t moe;
   9791 
   9792     /* If this is a debug exception we must update the DBGDSCR.MOE bits */
   9793     switch (syn_get_ec(env->exception.syndrome)) {
   9794     case EC_BREAKPOINT:
   9795     case EC_BREAKPOINT_SAME_EL:
   9796         moe = 1;
   9797         break;
   9798     case EC_WATCHPOINT:
   9799     case EC_WATCHPOINT_SAME_EL:
   9800         moe = 10;
   9801         break;
   9802     case EC_AA32_BKPT:
   9803         moe = 3;
   9804         break;
   9805     case EC_VECTORCATCH:
   9806         moe = 5;
   9807         break;
   9808     default:
   9809         moe = 0;
   9810         break;
   9811     }
   9812 
   9813     if (moe) {
   9814         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
   9815     }
   9816 
   9817     if (env->exception.target_el == 2) {
   9818         arm_cpu_do_interrupt_aarch32_hyp(cs);
   9819         return;
   9820     }
   9821 
   9822     switch (cs->exception_index) {
   9823     case EXCP_UDEF:
   9824         new_mode = ARM_CPU_MODE_UND;
   9825         addr = 0x04;
   9826         mask = CPSR_I;
   9827         if (env->thumb)
   9828             offset = 2;
   9829         else
   9830             offset = 4;
   9831         break;
   9832     case EXCP_SWI:
   9833         new_mode = ARM_CPU_MODE_SVC;
   9834         addr = 0x08;
   9835         mask = CPSR_I;
   9836         /* The PC already points to the next instruction.  */
   9837         offset = 0;
   9838         break;
   9839     case EXCP_BKPT:
   9840         /* Fall through to prefetch abort.  */
   9841     case EXCP_PREFETCH_ABORT:
   9842         A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
   9843         A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
   9844         qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
   9845                       env->exception.fsr, (uint32_t)env->exception.vaddress);
   9846         new_mode = ARM_CPU_MODE_ABT;
   9847         addr = 0x0c;
   9848         mask = CPSR_A | CPSR_I;
   9849         offset = 4;
   9850         break;
   9851     case EXCP_DATA_ABORT:
   9852         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
   9853         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
   9854         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
   9855                       env->exception.fsr,
   9856                       (uint32_t)env->exception.vaddress);
   9857         new_mode = ARM_CPU_MODE_ABT;
   9858         addr = 0x10;
   9859         mask = CPSR_A | CPSR_I;
   9860         offset = 8;
   9861         break;
   9862     case EXCP_IRQ:
   9863         new_mode = ARM_CPU_MODE_IRQ;
   9864         addr = 0x18;
   9865         /* Disable IRQ and imprecise data aborts.  */
   9866         mask = CPSR_A | CPSR_I;
   9867         offset = 4;
   9868         if (env->cp15.scr_el3 & SCR_IRQ) {
   9869             /* IRQ routed to monitor mode */
   9870             new_mode = ARM_CPU_MODE_MON;
   9871             mask |= CPSR_F;
   9872         }
   9873         break;
   9874     case EXCP_FIQ:
   9875         new_mode = ARM_CPU_MODE_FIQ;
   9876         addr = 0x1c;
   9877         /* Disable FIQ, IRQ and imprecise data aborts.  */
   9878         mask = CPSR_A | CPSR_I | CPSR_F;
   9879         if (env->cp15.scr_el3 & SCR_FIQ) {
   9880             /* FIQ routed to monitor mode */
   9881             new_mode = ARM_CPU_MODE_MON;
   9882         }
   9883         offset = 4;
   9884         break;
   9885     case EXCP_VIRQ:
   9886         new_mode = ARM_CPU_MODE_IRQ;
   9887         addr = 0x18;
   9888         /* Disable IRQ and imprecise data aborts.  */
   9889         mask = CPSR_A | CPSR_I;
   9890         offset = 4;
   9891         break;
   9892     case EXCP_VFIQ:
   9893         new_mode = ARM_CPU_MODE_FIQ;
   9894         addr = 0x1c;
   9895         /* Disable FIQ, IRQ and imprecise data aborts.  */
   9896         mask = CPSR_A | CPSR_I | CPSR_F;
   9897         offset = 4;
   9898         break;
   9899     case EXCP_VSERR:
   9900         {
   9901             /*
   9902              * Note that this is reported as a data abort, but the DFAR
   9903              * has an UNKNOWN value.  Construct the SError syndrome from
   9904              * AET and ExT fields.
   9905              */
   9906             ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
   9907 
   9908             if (extended_addresses_enabled(env)) {
   9909                 env->exception.fsr = arm_fi_to_lfsc(&fi);
   9910             } else {
   9911                 env->exception.fsr = arm_fi_to_sfsc(&fi);
   9912             }
   9913             env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
   9914             A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
   9915             qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
   9916                           env->exception.fsr);
   9917 
   9918             new_mode = ARM_CPU_MODE_ABT;
   9919             addr = 0x10;
   9920             mask = CPSR_A | CPSR_I;
   9921             offset = 8;
   9922         }
   9923         break;
   9924     case EXCP_SMC:
   9925         new_mode = ARM_CPU_MODE_MON;
   9926         addr = 0x08;
   9927         mask = CPSR_A | CPSR_I | CPSR_F;
   9928         offset = 0;
   9929         break;
   9930     default:
   9931         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
   9932         return; /* Never happens.  Keep compiler happy.  */
   9933     }
   9934 
   9935     if (new_mode == ARM_CPU_MODE_MON) {
   9936         addr += env->cp15.mvbar;
   9937     } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
   9938         /* High vectors. When enabled, base address cannot be remapped. */
   9939         addr += 0xffff0000;
   9940     } else {
   9941         /* ARM v7 architectures provide a vector base address register to remap
   9942          * the interrupt vector table.
   9943          * This register is only followed in non-monitor mode, and is banked.
   9944          * Note: only bits 31:5 are valid.
   9945          */
   9946         addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
   9947     }
   9948 
   9949     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
   9950         env->cp15.scr_el3 &= ~SCR_NS;
   9951     }
   9952 
   9953     take_aarch32_exception(env, new_mode, mask, offset, addr);
   9954 }
   9955 
   9956 static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
   9957 {
   9958     /*
   9959      * Return the register number of the AArch64 view of the AArch32
   9960      * register @aarch32_reg. The CPUARMState CPSR is assumed to still
   9961      * be that of the AArch32 mode the exception came from.
   9962      */
   9963     int mode = env->uncached_cpsr & CPSR_M;
   9964 
   9965     switch (aarch32_reg) {
   9966     case 0 ... 7:
   9967         return aarch32_reg;
   9968     case 8 ... 12:
   9969         return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
   9970     case 13:
   9971         switch (mode) {
   9972         case ARM_CPU_MODE_USR:
   9973         case ARM_CPU_MODE_SYS:
   9974             return 13;
   9975         case ARM_CPU_MODE_HYP:
   9976             return 15;
   9977         case ARM_CPU_MODE_IRQ:
   9978             return 17;
   9979         case ARM_CPU_MODE_SVC:
   9980             return 19;
   9981         case ARM_CPU_MODE_ABT:
   9982             return 21;
   9983         case ARM_CPU_MODE_UND:
   9984             return 23;
   9985         case ARM_CPU_MODE_FIQ:
   9986             return 29;
   9987         default:
   9988             g_assert_not_reached();
   9989         }
   9990     case 14:
   9991         switch (mode) {
   9992         case ARM_CPU_MODE_USR:
   9993         case ARM_CPU_MODE_SYS:
   9994         case ARM_CPU_MODE_HYP:
   9995             return 14;
   9996         case ARM_CPU_MODE_IRQ:
   9997             return 16;
   9998         case ARM_CPU_MODE_SVC:
   9999             return 18;
  10000         case ARM_CPU_MODE_ABT:
  10001             return 20;
  10002         case ARM_CPU_MODE_UND:
  10003             return 22;
  10004         case ARM_CPU_MODE_FIQ:
  10005             return 30;
  10006         default:
  10007             g_assert_not_reached();
  10008         }
  10009     case 15:
  10010         return 31;
  10011     default:
  10012         g_assert_not_reached();
  10013     }
  10014 }
  10015 
  10016 static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
  10017 {
  10018     uint32_t ret = cpsr_read(env);
  10019 
  10020     /* Move DIT to the correct location for SPSR_ELx */
  10021     if (ret & CPSR_DIT) {
  10022         ret &= ~CPSR_DIT;
  10023         ret |= PSTATE_DIT;
  10024     }
  10025     /* Merge PSTATE.SS into SPSR_ELx */
  10026     ret |= env->pstate & PSTATE_SS;
  10027 
  10028     return ret;
  10029 }
  10030 
  10031 static bool syndrome_is_sync_extabt(uint32_t syndrome)
  10032 {
  10033     /* Return true if this syndrome value is a synchronous external abort */
  10034     switch (syn_get_ec(syndrome)) {
  10035     case EC_INSNABORT:
  10036     case EC_INSNABORT_SAME_EL:
  10037     case EC_DATAABORT:
  10038     case EC_DATAABORT_SAME_EL:
  10039         /* Look at fault status code for all the synchronous ext abort cases */
  10040         switch (syndrome & 0x3f) {
  10041         case 0x10:
  10042         case 0x13:
  10043         case 0x14:
  10044         case 0x15:
  10045         case 0x16:
  10046         case 0x17:
  10047             return true;
  10048         default:
  10049             return false;
  10050         }
  10051     default:
  10052         return false;
  10053     }
  10054 }
  10055 
  10056 /* Handle exception entry to a target EL which is using AArch64 */
  10057 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
  10058 {
  10059     ARMCPU *cpu = ARM_CPU(cs);
  10060     CPUARMState *env = &cpu->env;
  10061     unsigned int new_el = env->exception.target_el;
  10062     target_ulong addr = env->cp15.vbar_el[new_el];
  10063     unsigned int new_mode = aarch64_pstate_mode(new_el, true);
  10064     unsigned int old_mode;
  10065     unsigned int cur_el = arm_current_el(env);
  10066     int rt;
  10067 
  10068     /*
  10069      * Note that new_el can never be 0.  If cur_el is 0, then
  10070      * el0_a64 is is_a64(), else el0_a64 is ignored.
  10071      */
  10072     aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
  10073 
  10074     if (cur_el < new_el) {
  10075         /* Entry vector offset depends on whether the implemented EL
  10076          * immediately lower than the target level is using AArch32 or AArch64
  10077          */
  10078         bool is_aa64;
  10079         uint64_t hcr;
  10080 
  10081         switch (new_el) {
  10082         case 3:
  10083             is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
  10084             break;
  10085         case 2:
  10086             hcr = arm_hcr_el2_eff(env);
  10087             if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
  10088                 is_aa64 = (hcr & HCR_RW) != 0;
  10089                 break;
  10090             }
  10091             /* fall through */
  10092         case 1:
  10093             is_aa64 = is_a64(env);
  10094             break;
  10095         default:
  10096             g_assert_not_reached();
  10097         }
  10098 
  10099         if (is_aa64) {
  10100             addr += 0x400;
  10101         } else {
  10102             addr += 0x600;
  10103         }
  10104     } else if (pstate_read(env) & PSTATE_SP) {
  10105         addr += 0x200;
  10106     }
  10107 
  10108     switch (cs->exception_index) {
  10109     case EXCP_PREFETCH_ABORT:
  10110     case EXCP_DATA_ABORT:
  10111         /*
  10112          * FEAT_DoubleFault allows synchronous external aborts taken to EL3
  10113          * to be taken to the SError vector entrypoint.
  10114          */
  10115         if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
  10116             syndrome_is_sync_extabt(env->exception.syndrome)) {
  10117             addr += 0x180;
  10118         }
  10119         env->cp15.far_el[new_el] = env->exception.vaddress;
  10120         qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
  10121                       env->cp15.far_el[new_el]);
  10122         /* fall through */
  10123     case EXCP_BKPT:
  10124     case EXCP_UDEF:
  10125     case EXCP_SWI:
  10126     case EXCP_HVC:
  10127     case EXCP_HYP_TRAP:
  10128     case EXCP_SMC:
  10129         switch (syn_get_ec(env->exception.syndrome)) {
  10130         case EC_ADVSIMDFPACCESSTRAP:
  10131             /*
  10132              * QEMU internal FP/SIMD syndromes from AArch32 include the
  10133              * TA and coproc fields which are only exposed if the exception
  10134              * is taken to AArch32 Hyp mode. Mask them out to get a valid
  10135              * AArch64 format syndrome.
  10136              */
  10137             env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
  10138             break;
  10139         case EC_CP14RTTRAP:
  10140         case EC_CP15RTTRAP:
  10141         case EC_CP14DTTRAP:
  10142             /*
  10143              * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
  10144              * the raw register field from the insn; when taking this to
  10145              * AArch64 we must convert it to the AArch64 view of the register
  10146              * number. Notice that we read a 4-bit AArch32 register number and
  10147              * write back a 5-bit AArch64 one.
  10148              */
  10149             rt = extract32(env->exception.syndrome, 5, 4);
  10150             rt = aarch64_regnum(env, rt);
  10151             env->exception.syndrome = deposit32(env->exception.syndrome,
  10152                                                 5, 5, rt);
  10153             break;
  10154         case EC_CP15RRTTRAP:
  10155         case EC_CP14RRTTRAP:
  10156             /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
  10157             rt = extract32(env->exception.syndrome, 5, 4);
  10158             rt = aarch64_regnum(env, rt);
  10159             env->exception.syndrome = deposit32(env->exception.syndrome,
  10160                                                 5, 5, rt);
  10161             rt = extract32(env->exception.syndrome, 10, 4);
  10162             rt = aarch64_regnum(env, rt);
  10163             env->exception.syndrome = deposit32(env->exception.syndrome,
  10164                                                 10, 5, rt);
  10165             break;
  10166         }
  10167         env->cp15.esr_el[new_el] = env->exception.syndrome;
  10168         break;
  10169     case EXCP_IRQ:
  10170     case EXCP_VIRQ:
  10171         addr += 0x80;
  10172         break;
  10173     case EXCP_FIQ:
  10174     case EXCP_VFIQ:
  10175         addr += 0x100;
  10176         break;
  10177     case EXCP_VSERR:
  10178         addr += 0x180;
  10179         /* Construct the SError syndrome from IDS and ISS fields. */
  10180         env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
  10181         env->cp15.esr_el[new_el] = env->exception.syndrome;
  10182         break;
  10183     default:
  10184         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
  10185     }
  10186 
  10187     if (is_a64(env)) {
  10188         old_mode = pstate_read(env);
  10189         aarch64_save_sp(env, arm_current_el(env));
  10190         env->elr_el[new_el] = env->pc;
  10191     } else {
  10192         old_mode = cpsr_read_for_spsr_elx(env);
  10193         env->elr_el[new_el] = env->regs[15];
  10194 
  10195         aarch64_sync_32_to_64(env);
  10196 
  10197         env->condexec_bits = 0;
  10198     }
  10199     env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
  10200 
  10201     qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
  10202                   env->elr_el[new_el]);
  10203 
  10204     if (cpu_isar_feature(aa64_pan, cpu)) {
  10205         /* The value of PSTATE.PAN is normally preserved, except when ... */
  10206         new_mode |= old_mode & PSTATE_PAN;
  10207         switch (new_el) {
  10208         case 2:
  10209             /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ...  */
  10210             if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
  10211                 != (HCR_E2H | HCR_TGE)) {
  10212                 break;
  10213             }
  10214             /* fall through */
  10215         case 1:
  10216             /* ... the target is EL1 ... */
  10217             /* ... and SCTLR_ELx.SPAN == 0, then set to 1.  */
  10218             if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
  10219                 new_mode |= PSTATE_PAN;
  10220             }
  10221             break;
  10222         }
  10223     }
  10224     if (cpu_isar_feature(aa64_mte, cpu)) {
  10225         new_mode |= PSTATE_TCO;
  10226     }
  10227 
  10228     if (cpu_isar_feature(aa64_ssbs, cpu)) {
  10229         if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
  10230             new_mode |= PSTATE_SSBS;
  10231         } else {
  10232             new_mode &= ~PSTATE_SSBS;
  10233         }
  10234     }
  10235 
  10236     pstate_write(env, PSTATE_DAIF | new_mode);
  10237     env->aarch64 = true;
  10238     aarch64_restore_sp(env, new_el);
  10239     helper_rebuild_hflags_a64(env, new_el);
  10240 
  10241     env->pc = addr;
  10242 
  10243     qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
  10244                   new_el, env->pc, pstate_read(env));
  10245 }
  10246 
  10247 /*
  10248  * Do semihosting call and set the appropriate return value. All the
  10249  * permission and validity checks have been done at translate time.
  10250  *
  10251  * We only see semihosting exceptions in TCG only as they are not
  10252  * trapped to the hypervisor in KVM.
  10253  */
  10254 #ifdef CONFIG_TCG
  10255 static void handle_semihosting(CPUState *cs)
  10256 {
  10257     ARMCPU *cpu = ARM_CPU(cs);
  10258     CPUARMState *env = &cpu->env;
  10259 
  10260     if (is_a64(env)) {
  10261         qemu_log_mask(CPU_LOG_INT,
  10262                       "...handling as semihosting call 0x%" PRIx64 "\n",
  10263                       env->xregs[0]);
  10264         do_common_semihosting(cs);
  10265         env->pc += 4;
  10266     } else {
  10267         qemu_log_mask(CPU_LOG_INT,
  10268                       "...handling as semihosting call 0x%x\n",
  10269                       env->regs[0]);
  10270         do_common_semihosting(cs);
  10271         env->regs[15] += env->thumb ? 2 : 4;
  10272     }
  10273 }
  10274 #endif
  10275 
  10276 /* Handle a CPU exception for A and R profile CPUs.
  10277  * Do any appropriate logging, handle PSCI calls, and then hand off
  10278  * to the AArch64-entry or AArch32-entry function depending on the
  10279  * target exception level's register width.
  10280  *
  10281  * Note: this is used for both TCG (as the do_interrupt tcg op),
  10282  *       and KVM to re-inject guest debug exceptions, and to
  10283  *       inject a Synchronous-External-Abort.
  10284  */
  10285 void arm_cpu_do_interrupt(CPUState *cs)
  10286 {
  10287     ARMCPU *cpu = ARM_CPU(cs);
  10288     CPUARMState *env = &cpu->env;
  10289     unsigned int new_el = env->exception.target_el;
  10290 
  10291     assert(!arm_feature(env, ARM_FEATURE_M));
  10292 
  10293     arm_log_exception(cs);
  10294     qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
  10295                   new_el);
  10296     if (qemu_loglevel_mask(CPU_LOG_INT)
  10297         && !excp_is_internal(cs->exception_index)) {
  10298         qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
  10299                       syn_get_ec(env->exception.syndrome),
  10300                       env->exception.syndrome);
  10301     }
  10302 
  10303     if (arm_is_psci_call(cpu, cs->exception_index)) {
  10304         arm_handle_psci_call(cpu);
  10305         qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
  10306         return;
  10307     }
  10308 
  10309     /*
  10310      * Semihosting semantics depend on the register width of the code
  10311      * that caused the exception, not the target exception level, so
  10312      * must be handled here.
  10313      */
  10314 #ifdef CONFIG_TCG
  10315     if (cs->exception_index == EXCP_SEMIHOST) {
  10316         handle_semihosting(cs);
  10317         return;
  10318     }
  10319 #endif
  10320 
  10321     /* Hooks may change global state so BQL should be held, also the
  10322      * BQL needs to be held for any modification of
  10323      * cs->interrupt_request.
  10324      */
  10325     g_assert(qemu_mutex_iothread_locked());
  10326 
  10327     arm_call_pre_el_change_hook(cpu);
  10328 
  10329     assert(!excp_is_internal(cs->exception_index));
  10330     if (arm_el_is_aa64(env, new_el)) {
  10331         arm_cpu_do_interrupt_aarch64(cs);
  10332     } else {
  10333         arm_cpu_do_interrupt_aarch32(cs);
  10334     }
  10335 
  10336     arm_call_el_change_hook(cpu);
  10337 
  10338     if (!kvm_enabled()) {
  10339         cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
  10340     }
  10341 }
  10342 #endif /* !CONFIG_USER_ONLY */
  10343 
  10344 uint64_t arm_sctlr(CPUARMState *env, int el)
  10345 {
  10346     /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
  10347     if (el == 0) {
  10348         ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
  10349         el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
  10350     }
  10351     return env->cp15.sctlr_el[el];
  10352 }
  10353 
  10354 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
  10355 {
  10356     if (regime_has_2_ranges(mmu_idx)) {
  10357         return extract64(tcr, 37, 2);
  10358     } else if (regime_is_stage2(mmu_idx)) {
  10359         return 0; /* VTCR_EL2 */
  10360     } else {
  10361         /* Replicate the single TBI bit so we always have 2 bits.  */
  10362         return extract32(tcr, 20, 1) * 3;
  10363     }
  10364 }
  10365 
  10366 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
  10367 {
  10368     if (regime_has_2_ranges(mmu_idx)) {
  10369         return extract64(tcr, 51, 2);
  10370     } else if (regime_is_stage2(mmu_idx)) {
  10371         return 0; /* VTCR_EL2 */
  10372     } else {
  10373         /* Replicate the single TBID bit so we always have 2 bits.  */
  10374         return extract32(tcr, 29, 1) * 3;
  10375     }
  10376 }
  10377 
  10378 static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
  10379 {
  10380     if (regime_has_2_ranges(mmu_idx)) {
  10381         return extract64(tcr, 57, 2);
  10382     } else {
  10383         /* Replicate the single TCMA bit so we always have 2 bits.  */
  10384         return extract32(tcr, 30, 1) * 3;
  10385     }
  10386 }
  10387 
  10388 static ARMGranuleSize tg0_to_gran_size(int tg)
  10389 {
  10390     switch (tg) {
  10391     case 0:
  10392         return Gran4K;
  10393     case 1:
  10394         return Gran64K;
  10395     case 2:
  10396         return Gran16K;
  10397     default:
  10398         return GranInvalid;
  10399     }
  10400 }
  10401 
  10402 static ARMGranuleSize tg1_to_gran_size(int tg)
  10403 {
  10404     switch (tg) {
  10405     case 1:
  10406         return Gran16K;
  10407     case 2:
  10408         return Gran4K;
  10409     case 3:
  10410         return Gran64K;
  10411     default:
  10412         return GranInvalid;
  10413     }
  10414 }
  10415 
  10416 static inline bool have4k(ARMCPU *cpu, bool stage2)
  10417 {
  10418     return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
  10419         : cpu_isar_feature(aa64_tgran4, cpu);
  10420 }
  10421 
  10422 static inline bool have16k(ARMCPU *cpu, bool stage2)
  10423 {
  10424     return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
  10425         : cpu_isar_feature(aa64_tgran16, cpu);
  10426 }
  10427 
  10428 static inline bool have64k(ARMCPU *cpu, bool stage2)
  10429 {
  10430     return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
  10431         : cpu_isar_feature(aa64_tgran64, cpu);
  10432 }
  10433 
  10434 static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
  10435                                          bool stage2)
  10436 {
  10437     switch (gran) {
  10438     case Gran4K:
  10439         if (have4k(cpu, stage2)) {
  10440             return gran;
  10441         }
  10442         break;
  10443     case Gran16K:
  10444         if (have16k(cpu, stage2)) {
  10445             return gran;
  10446         }
  10447         break;
  10448     case Gran64K:
  10449         if (have64k(cpu, stage2)) {
  10450             return gran;
  10451         }
  10452         break;
  10453     case GranInvalid:
  10454         break;
  10455     }
  10456     /*
  10457      * If the guest selects a granule size that isn't implemented,
  10458      * the architecture requires that we behave as if it selected one
  10459      * that is (with an IMPDEF choice of which one to pick). We choose
  10460      * to implement the smallest supported granule size.
  10461      */
  10462     if (have4k(cpu, stage2)) {
  10463         return Gran4K;
  10464     }
  10465     if (have16k(cpu, stage2)) {
  10466         return Gran16K;
  10467     }
  10468     assert(have64k(cpu, stage2));
  10469     return Gran64K;
  10470 }
  10471 
  10472 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
  10473                                    ARMMMUIdx mmu_idx, bool data)
  10474 {
  10475     uint64_t tcr = regime_tcr(env, mmu_idx);
  10476     bool epd, hpd, tsz_oob, ds, ha, hd;
  10477     int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
  10478     ARMGranuleSize gran;
  10479     ARMCPU *cpu = env_archcpu(env);
  10480     bool stage2 = regime_is_stage2(mmu_idx);
  10481 
  10482     if (!regime_has_2_ranges(mmu_idx)) {
  10483         select = 0;
  10484         tsz = extract32(tcr, 0, 6);
  10485         gran = tg0_to_gran_size(extract32(tcr, 14, 2));
  10486         if (stage2) {
  10487             /* VTCR_EL2 */
  10488             hpd = false;
  10489         } else {
  10490             hpd = extract32(tcr, 24, 1);
  10491         }
  10492         epd = false;
  10493         sh = extract32(tcr, 12, 2);
  10494         ps = extract32(tcr, 16, 3);
  10495         ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu);
  10496         hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu);
  10497         ds = extract64(tcr, 32, 1);
  10498     } else {
  10499         bool e0pd;
  10500 
  10501         /*
  10502          * Bit 55 is always between the two regions, and is canonical for
  10503          * determining if address tagging is enabled.
  10504          */
  10505         select = extract64(va, 55, 1);
  10506         if (!select) {
  10507             tsz = extract32(tcr, 0, 6);
  10508             gran = tg0_to_gran_size(extract32(tcr, 14, 2));
  10509             epd = extract32(tcr, 7, 1);
  10510             sh = extract32(tcr, 12, 2);
  10511             hpd = extract64(tcr, 41, 1);
  10512             e0pd = extract64(tcr, 55, 1);
  10513         } else {
  10514             tsz = extract32(tcr, 16, 6);
  10515             gran = tg1_to_gran_size(extract32(tcr, 30, 2));
  10516             epd = extract32(tcr, 23, 1);
  10517             sh = extract32(tcr, 28, 2);
  10518             hpd = extract64(tcr, 42, 1);
  10519             e0pd = extract64(tcr, 56, 1);
  10520         }
  10521         ps = extract64(tcr, 32, 3);
  10522         ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu);
  10523         hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu);
  10524         ds = extract64(tcr, 59, 1);
  10525 
  10526         if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
  10527             regime_is_user(env, mmu_idx)) {
  10528             epd = true;
  10529         }
  10530     }
  10531 
  10532     gran = sanitize_gran_size(cpu, gran, stage2);
  10533 
  10534     if (cpu_isar_feature(aa64_st, cpu)) {
  10535         max_tsz = 48 - (gran == Gran64K);
  10536     } else {
  10537         max_tsz = 39;
  10538     }
  10539 
  10540     /*
  10541      * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
  10542      * adjust the effective value of DS, as documented.
  10543      */
  10544     min_tsz = 16;
  10545     if (gran == Gran64K) {
  10546         if (cpu_isar_feature(aa64_lva, cpu)) {
  10547             min_tsz = 12;
  10548         }
  10549         ds = false;
  10550     } else if (ds) {
  10551         if (regime_is_stage2(mmu_idx)) {
  10552             if (gran == Gran16K) {
  10553                 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
  10554             } else {
  10555                 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
  10556             }
  10557         } else {
  10558             if (gran == Gran16K) {
  10559                 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
  10560             } else {
  10561                 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
  10562             }
  10563         }
  10564         if (ds) {
  10565             min_tsz = 12;
  10566         }
  10567     }
  10568 
  10569     if (tsz > max_tsz) {
  10570         tsz = max_tsz;
  10571         tsz_oob = true;
  10572     } else if (tsz < min_tsz) {
  10573         tsz = min_tsz;
  10574         tsz_oob = true;
  10575     } else {
  10576         tsz_oob = false;
  10577     }
  10578 
  10579     /* Present TBI as a composite with TBID.  */
  10580     tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
  10581     if (!data) {
  10582         tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
  10583     }
  10584     tbi = (tbi >> select) & 1;
  10585 
  10586     return (ARMVAParameters) {
  10587         .tsz = tsz,
  10588         .ps = ps,
  10589         .sh = sh,
  10590         .select = select,
  10591         .tbi = tbi,
  10592         .epd = epd,
  10593         .hpd = hpd,
  10594         .tsz_oob = tsz_oob,
  10595         .ds = ds,
  10596         .ha = ha,
  10597         .hd = ha && hd,
  10598         .gran = gran,
  10599     };
  10600 }
  10601 
  10602 /* Note that signed overflow is undefined in C.  The following routines are
  10603    careful to use unsigned types where modulo arithmetic is required.
  10604    Failure to do so _will_ break on newer gcc.  */
  10605 
  10606 /* Signed saturating arithmetic.  */
  10607 
  10608 /* Perform 16-bit signed saturating addition.  */
  10609 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
  10610 {
  10611     uint16_t res;
  10612 
  10613     res = a + b;
  10614     if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
  10615         if (a & 0x8000)
  10616             res = 0x8000;
  10617         else
  10618             res = 0x7fff;
  10619     }
  10620     return res;
  10621 }
  10622 
  10623 /* Perform 8-bit signed saturating addition.  */
  10624 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
  10625 {
  10626     uint8_t res;
  10627 
  10628     res = a + b;
  10629     if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
  10630         if (a & 0x80)
  10631             res = 0x80;
  10632         else
  10633             res = 0x7f;
  10634     }
  10635     return res;
  10636 }
  10637 
  10638 /* Perform 16-bit signed saturating subtraction.  */
  10639 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
  10640 {
  10641     uint16_t res;
  10642 
  10643     res = a - b;
  10644     if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
  10645         if (a & 0x8000)
  10646             res = 0x8000;
  10647         else
  10648             res = 0x7fff;
  10649     }
  10650     return res;
  10651 }
  10652 
  10653 /* Perform 8-bit signed saturating subtraction.  */
  10654 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
  10655 {
  10656     uint8_t res;
  10657 
  10658     res = a - b;
  10659     if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
  10660         if (a & 0x80)
  10661             res = 0x80;
  10662         else
  10663             res = 0x7f;
  10664     }
  10665     return res;
  10666 }
  10667 
  10668 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
  10669 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
  10670 #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
  10671 #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
  10672 #define PFX q
  10673 
  10674 #include "op_addsub.h"
  10675 
  10676 /* Unsigned saturating arithmetic.  */
  10677 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
  10678 {
  10679     uint16_t res;
  10680     res = a + b;
  10681     if (res < a)
  10682         res = 0xffff;
  10683     return res;
  10684 }
  10685 
  10686 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
  10687 {
  10688     if (a > b)
  10689         return a - b;
  10690     else
  10691         return 0;
  10692 }
  10693 
  10694 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
  10695 {
  10696     uint8_t res;
  10697     res = a + b;
  10698     if (res < a)
  10699         res = 0xff;
  10700     return res;
  10701 }
  10702 
  10703 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
  10704 {
  10705     if (a > b)
  10706         return a - b;
  10707     else
  10708         return 0;
  10709 }
  10710 
  10711 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
  10712 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
  10713 #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
  10714 #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
  10715 #define PFX uq
  10716 
  10717 #include "op_addsub.h"
  10718 
  10719 /* Signed modulo arithmetic.  */
  10720 #define SARITH16(a, b, n, op) do { \
  10721     int32_t sum; \
  10722     sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
  10723     RESULT(sum, n, 16); \
  10724     if (sum >= 0) \
  10725         ge |= 3 << (n * 2); \
  10726     } while(0)
  10727 
  10728 #define SARITH8(a, b, n, op) do { \
  10729     int32_t sum; \
  10730     sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
  10731     RESULT(sum, n, 8); \
  10732     if (sum >= 0) \
  10733         ge |= 1 << n; \
  10734     } while(0)
  10735 
  10736 
  10737 #define ADD16(a, b, n) SARITH16(a, b, n, +)
  10738 #define SUB16(a, b, n) SARITH16(a, b, n, -)
  10739 #define ADD8(a, b, n)  SARITH8(a, b, n, +)
  10740 #define SUB8(a, b, n)  SARITH8(a, b, n, -)
  10741 #define PFX s
  10742 #define ARITH_GE
  10743 
  10744 #include "op_addsub.h"
  10745 
  10746 /* Unsigned modulo arithmetic.  */
  10747 #define ADD16(a, b, n) do { \
  10748     uint32_t sum; \
  10749     sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
  10750     RESULT(sum, n, 16); \
  10751     if ((sum >> 16) == 1) \
  10752         ge |= 3 << (n * 2); \
  10753     } while(0)
  10754 
  10755 #define ADD8(a, b, n) do { \
  10756     uint32_t sum; \
  10757     sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
  10758     RESULT(sum, n, 8); \
  10759     if ((sum >> 8) == 1) \
  10760         ge |= 1 << n; \
  10761     } while(0)
  10762 
  10763 #define SUB16(a, b, n) do { \
  10764     uint32_t sum; \
  10765     sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
  10766     RESULT(sum, n, 16); \
  10767     if ((sum >> 16) == 0) \
  10768         ge |= 3 << (n * 2); \
  10769     } while(0)
  10770 
  10771 #define SUB8(a, b, n) do { \
  10772     uint32_t sum; \
  10773     sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
  10774     RESULT(sum, n, 8); \
  10775     if ((sum >> 8) == 0) \
  10776         ge |= 1 << n; \
  10777     } while(0)
  10778 
  10779 #define PFX u
  10780 #define ARITH_GE
  10781 
  10782 #include "op_addsub.h"
  10783 
  10784 /* Halved signed arithmetic.  */
  10785 #define ADD16(a, b, n) \
  10786   RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
  10787 #define SUB16(a, b, n) \
  10788   RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
  10789 #define ADD8(a, b, n) \
  10790   RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
  10791 #define SUB8(a, b, n) \
  10792   RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
  10793 #define PFX sh
  10794 
  10795 #include "op_addsub.h"
  10796 
  10797 /* Halved unsigned arithmetic.  */
  10798 #define ADD16(a, b, n) \
  10799   RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
  10800 #define SUB16(a, b, n) \
  10801   RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
  10802 #define ADD8(a, b, n) \
  10803   RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
  10804 #define SUB8(a, b, n) \
  10805   RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
  10806 #define PFX uh
  10807 
  10808 #include "op_addsub.h"
  10809 
  10810 static inline uint8_t do_usad(uint8_t a, uint8_t b)
  10811 {
  10812     if (a > b)
  10813         return a - b;
  10814     else
  10815         return b - a;
  10816 }
  10817 
  10818 /* Unsigned sum of absolute byte differences.  */
  10819 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
  10820 {
  10821     uint32_t sum;
  10822     sum = do_usad(a, b);
  10823     sum += do_usad(a >> 8, b >> 8);
  10824     sum += do_usad(a >> 16, b >> 16);
  10825     sum += do_usad(a >> 24, b >> 24);
  10826     return sum;
  10827 }
  10828 
  10829 /* For ARMv6 SEL instruction.  */
  10830 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
  10831 {
  10832     uint32_t mask;
  10833 
  10834     mask = 0;
  10835     if (flags & 1)
  10836         mask |= 0xff;
  10837     if (flags & 2)
  10838         mask |= 0xff00;
  10839     if (flags & 4)
  10840         mask |= 0xff0000;
  10841     if (flags & 8)
  10842         mask |= 0xff000000;
  10843     return (a & mask) | (b & ~mask);
  10844 }
  10845 
  10846 /* CRC helpers.
  10847  * The upper bytes of val (above the number specified by 'bytes') must have
  10848  * been zeroed out by the caller.
  10849  */
  10850 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
  10851 {
  10852     uint8_t buf[4];
  10853 
  10854     stl_le_p(buf, val);
  10855 
  10856     /* zlib crc32 converts the accumulator and output to one's complement.  */
  10857     return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
  10858 }
  10859 
  10860 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
  10861 {
  10862     uint8_t buf[4];
  10863 
  10864     stl_le_p(buf, val);
  10865 
  10866     /* Linux crc32c converts the output to one's complement.  */
  10867     return crc32c(acc, buf, bytes) ^ 0xffffffff;
  10868 }
  10869 
  10870 /* Return the exception level to which FP-disabled exceptions should
  10871  * be taken, or 0 if FP is enabled.
  10872  */
  10873 int fp_exception_el(CPUARMState *env, int cur_el)
  10874 {
  10875 #ifndef CONFIG_USER_ONLY
  10876     uint64_t hcr_el2;
  10877 
  10878     /* CPACR and the CPTR registers don't exist before v6, so FP is
  10879      * always accessible
  10880      */
  10881     if (!arm_feature(env, ARM_FEATURE_V6)) {
  10882         return 0;
  10883     }
  10884 
  10885     if (arm_feature(env, ARM_FEATURE_M)) {
  10886         /* CPACR can cause a NOCP UsageFault taken to current security state */
  10887         if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
  10888             return 1;
  10889         }
  10890 
  10891         if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
  10892             if (!extract32(env->v7m.nsacr, 10, 1)) {
  10893                 /* FP insns cause a NOCP UsageFault taken to Secure */
  10894                 return 3;
  10895             }
  10896         }
  10897 
  10898         return 0;
  10899     }
  10900 
  10901     hcr_el2 = arm_hcr_el2_eff(env);
  10902 
  10903     /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
  10904      * 0, 2 : trap EL0 and EL1/PL1 accesses
  10905      * 1    : trap only EL0 accesses
  10906      * 3    : trap no accesses
  10907      * This register is ignored if E2H+TGE are both set.
  10908      */
  10909     if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
  10910         int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
  10911 
  10912         switch (fpen) {
  10913         case 1:
  10914             if (cur_el != 0) {
  10915                 break;
  10916             }
  10917             /* fall through */
  10918         case 0:
  10919         case 2:
  10920             /* Trap from Secure PL0 or PL1 to Secure PL1. */
  10921             if (!arm_el_is_aa64(env, 3)
  10922                 && (cur_el == 3 || arm_is_secure_below_el3(env))) {
  10923                 return 3;
  10924             }
  10925             if (cur_el <= 1) {
  10926                 return 1;
  10927             }
  10928             break;
  10929         }
  10930     }
  10931 
  10932     /*
  10933      * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
  10934      * to control non-secure access to the FPU. It doesn't have any
  10935      * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
  10936      */
  10937     if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
  10938          cur_el <= 2 && !arm_is_secure_below_el3(env))) {
  10939         if (!extract32(env->cp15.nsacr, 10, 1)) {
  10940             /* FP insns act as UNDEF */
  10941             return cur_el == 2 ? 2 : 1;
  10942         }
  10943     }
  10944 
  10945     /*
  10946      * CPTR_EL2 is present in v7VE or v8, and changes format
  10947      * with HCR_EL2.E2H (regardless of TGE).
  10948      */
  10949     if (cur_el <= 2) {
  10950         if (hcr_el2 & HCR_E2H) {
  10951             switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
  10952             case 1:
  10953                 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
  10954                     break;
  10955                 }
  10956                 /* fall through */
  10957             case 0:
  10958             case 2:
  10959                 return 2;
  10960             }
  10961         } else if (arm_is_el2_enabled(env)) {
  10962             if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
  10963                 return 2;
  10964             }
  10965         }
  10966     }
  10967 
  10968     /* CPTR_EL3 : present in v8 */
  10969     if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
  10970         /* Trap all FP ops to EL3 */
  10971         return 3;
  10972     }
  10973 #endif
  10974     return 0;
  10975 }
  10976 
  10977 /* Return the exception level we're running at if this is our mmu_idx */
  10978 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
  10979 {
  10980     if (mmu_idx & ARM_MMU_IDX_M) {
  10981         return mmu_idx & ARM_MMU_IDX_M_PRIV;
  10982     }
  10983 
  10984     switch (mmu_idx) {
  10985     case ARMMMUIdx_E10_0:
  10986     case ARMMMUIdx_E20_0:
  10987         return 0;
  10988     case ARMMMUIdx_E10_1:
  10989     case ARMMMUIdx_E10_1_PAN:
  10990         return 1;
  10991     case ARMMMUIdx_E2:
  10992     case ARMMMUIdx_E20_2:
  10993     case ARMMMUIdx_E20_2_PAN:
  10994         return 2;
  10995     case ARMMMUIdx_E3:
  10996         return 3;
  10997     default:
  10998         g_assert_not_reached();
  10999     }
  11000 }
  11001 
  11002 #ifndef CONFIG_TCG
  11003 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
  11004 {
  11005     g_assert_not_reached();
  11006 }
  11007 #endif
  11008 
  11009 static bool arm_pan_enabled(CPUARMState *env)
  11010 {
  11011     if (is_a64(env)) {
  11012         return env->pstate & PSTATE_PAN;
  11013     } else {
  11014         return env->uncached_cpsr & CPSR_PAN;
  11015     }
  11016 }
  11017 
  11018 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
  11019 {
  11020     ARMMMUIdx idx;
  11021     uint64_t hcr;
  11022 
  11023     if (arm_feature(env, ARM_FEATURE_M)) {
  11024         return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
  11025     }
  11026 
  11027     /* See ARM pseudo-function ELIsInHost.  */
  11028     switch (el) {
  11029     case 0:
  11030         hcr = arm_hcr_el2_eff(env);
  11031         if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
  11032             idx = ARMMMUIdx_E20_0;
  11033         } else {
  11034             idx = ARMMMUIdx_E10_0;
  11035         }
  11036         break;
  11037     case 1:
  11038         if (arm_pan_enabled(env)) {
  11039             idx = ARMMMUIdx_E10_1_PAN;
  11040         } else {
  11041             idx = ARMMMUIdx_E10_1;
  11042         }
  11043         break;
  11044     case 2:
  11045         /* Note that TGE does not apply at EL2.  */
  11046         if (arm_hcr_el2_eff(env) & HCR_E2H) {
  11047             if (arm_pan_enabled(env)) {
  11048                 idx = ARMMMUIdx_E20_2_PAN;
  11049             } else {
  11050                 idx = ARMMMUIdx_E20_2;
  11051             }
  11052         } else {
  11053             idx = ARMMMUIdx_E2;
  11054         }
  11055         break;
  11056     case 3:
  11057         return ARMMMUIdx_E3;
  11058     default:
  11059         g_assert_not_reached();
  11060     }
  11061 
  11062     return idx;
  11063 }
  11064 
  11065 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
  11066 {
  11067     return arm_mmu_idx_el(env, arm_current_el(env));
  11068 }
  11069 
  11070 static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
  11071                                            ARMMMUIdx mmu_idx,
  11072                                            CPUARMTBFlags flags)
  11073 {
  11074     DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
  11075     DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
  11076 
  11077     if (arm_singlestep_active(env)) {
  11078         DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
  11079     }
  11080     return flags;
  11081 }
  11082 
  11083 static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
  11084                                               ARMMMUIdx mmu_idx,
  11085                                               CPUARMTBFlags flags)
  11086 {
  11087     bool sctlr_b = arm_sctlr_b(env);
  11088 
  11089     if (sctlr_b) {
  11090         DP_TBFLAG_A32(flags, SCTLR__B, 1);
  11091     }
  11092     if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
  11093         DP_TBFLAG_ANY(flags, BE_DATA, 1);
  11094     }
  11095     DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
  11096 
  11097     return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
  11098 }
  11099 
  11100 static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
  11101                                         ARMMMUIdx mmu_idx)
  11102 {
  11103     CPUARMTBFlags flags = {};
  11104     uint32_t ccr = env->v7m.ccr[env->v7m.secure];
  11105 
  11106     /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
  11107     if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
  11108         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
  11109     }
  11110 
  11111     if (arm_v7m_is_handler_mode(env)) {
  11112         DP_TBFLAG_M32(flags, HANDLER, 1);
  11113     }
  11114 
  11115     /*
  11116      * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
  11117      * is suppressing them because the requested execution priority
  11118      * is less than 0.
  11119      */
  11120     if (arm_feature(env, ARM_FEATURE_V8) &&
  11121         !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
  11122           (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
  11123         DP_TBFLAG_M32(flags, STACKCHECK, 1);
  11124     }
  11125 
  11126     if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) {
  11127         DP_TBFLAG_M32(flags, SECURE, 1);
  11128     }
  11129 
  11130     return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
  11131 }
  11132 
  11133 static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
  11134                                         ARMMMUIdx mmu_idx)
  11135 {
  11136     CPUARMTBFlags flags = {};
  11137     int el = arm_current_el(env);
  11138 
  11139     if (arm_sctlr(env, el) & SCTLR_A) {
  11140         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
  11141     }
  11142 
  11143     if (arm_el_is_aa64(env, 1)) {
  11144         DP_TBFLAG_A32(flags, VFPEN, 1);
  11145     }
  11146 
  11147     if (el < 2 && env->cp15.hstr_el2 &&
  11148         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
  11149         DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
  11150     }
  11151 
  11152     if (env->uncached_cpsr & CPSR_IL) {
  11153         DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
  11154     }
  11155 
  11156     /*
  11157      * The SME exception we are testing for is raised via
  11158      * AArch64.CheckFPAdvSIMDEnabled(), as called from
  11159      * AArch32.CheckAdvSIMDOrFPEnabled().
  11160      */
  11161     if (el == 0
  11162         && FIELD_EX64(env->svcr, SVCR, SM)
  11163         && (!arm_is_el2_enabled(env)
  11164             || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
  11165         && arm_el_is_aa64(env, 1)
  11166         && !sme_fa64(env, el)) {
  11167         DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
  11168     }
  11169 
  11170     return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
  11171 }
  11172 
  11173 static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
  11174                                         ARMMMUIdx mmu_idx)
  11175 {
  11176     CPUARMTBFlags flags = {};
  11177     ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
  11178     uint64_t tcr = regime_tcr(env, mmu_idx);
  11179     uint64_t sctlr;
  11180     int tbii, tbid;
  11181 
  11182     DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
  11183 
  11184     /* Get control bits for tagged addresses.  */
  11185     tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
  11186     tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
  11187 
  11188     DP_TBFLAG_A64(flags, TBII, tbii);
  11189     DP_TBFLAG_A64(flags, TBID, tbid);
  11190 
  11191     if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
  11192         int sve_el = sve_exception_el(env, el);
  11193 
  11194         /*
  11195          * If either FP or SVE are disabled, translator does not need len.
  11196          * If SVE EL > FP EL, FP exception has precedence, and translator
  11197          * does not need SVE EL.  Save potential re-translations by forcing
  11198          * the unneeded data to zero.
  11199          */
  11200         if (fp_el != 0) {
  11201             if (sve_el > fp_el) {
  11202                 sve_el = 0;
  11203             }
  11204         } else if (sve_el == 0) {
  11205             DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
  11206         }
  11207         DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
  11208     }
  11209     if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
  11210         int sme_el = sme_exception_el(env, el);
  11211         bool sm = FIELD_EX64(env->svcr, SVCR, SM);
  11212 
  11213         DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
  11214         if (sme_el == 0) {
  11215             /* Similarly, do not compute SVL if SME is disabled. */
  11216             int svl = sve_vqm1_for_el_sm(env, el, true);
  11217             DP_TBFLAG_A64(flags, SVL, svl);
  11218             if (sm) {
  11219                 /* If SVE is disabled, we will not have set VL above. */
  11220                 DP_TBFLAG_A64(flags, VL, svl);
  11221             }
  11222         }
  11223         if (sm) {
  11224             DP_TBFLAG_A64(flags, PSTATE_SM, 1);
  11225             DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
  11226         }
  11227         DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
  11228     }
  11229 
  11230     sctlr = regime_sctlr(env, stage1);
  11231 
  11232     if (sctlr & SCTLR_A) {
  11233         DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
  11234     }
  11235 
  11236     if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
  11237         DP_TBFLAG_ANY(flags, BE_DATA, 1);
  11238     }
  11239 
  11240     if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
  11241         /*
  11242          * In order to save space in flags, we record only whether
  11243          * pauth is "inactive", meaning all insns are implemented as
  11244          * a nop, or "active" when some action must be performed.
  11245          * The decision of which action to take is left to a helper.
  11246          */
  11247         if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
  11248             DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
  11249         }
  11250     }
  11251 
  11252     if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
  11253         /* Note that SCTLR_EL[23].BT == SCTLR_BT1.  */
  11254         if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
  11255             DP_TBFLAG_A64(flags, BT, 1);
  11256         }
  11257     }
  11258 
  11259     /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
  11260     if (!(env->pstate & PSTATE_UAO)) {
  11261         switch (mmu_idx) {
  11262         case ARMMMUIdx_E10_1:
  11263         case ARMMMUIdx_E10_1_PAN:
  11264             /* TODO: ARMv8.3-NV */
  11265             DP_TBFLAG_A64(flags, UNPRIV, 1);
  11266             break;
  11267         case ARMMMUIdx_E20_2:
  11268         case ARMMMUIdx_E20_2_PAN:
  11269             /*
  11270              * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
  11271              * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
  11272              */
  11273             if (env->cp15.hcr_el2 & HCR_TGE) {
  11274                 DP_TBFLAG_A64(flags, UNPRIV, 1);
  11275             }
  11276             break;
  11277         default:
  11278             break;
  11279         }
  11280     }
  11281 
  11282     if (env->pstate & PSTATE_IL) {
  11283         DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
  11284     }
  11285 
  11286     if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
  11287         /*
  11288          * Set MTE_ACTIVE if any access may be Checked, and leave clear
  11289          * if all accesses must be Unchecked:
  11290          * 1) If no TBI, then there are no tags in the address to check,
  11291          * 2) If Tag Check Override, then all accesses are Unchecked,
  11292          * 3) If Tag Check Fail == 0, then Checked access have no effect,
  11293          * 4) If no Allocation Tag Access, then all accesses are Unchecked.
  11294          */
  11295         if (allocation_tag_access_enabled(env, el, sctlr)) {
  11296             DP_TBFLAG_A64(flags, ATA, 1);
  11297             if (tbid
  11298                 && !(env->pstate & PSTATE_TCO)
  11299                 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
  11300                 DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
  11301             }
  11302         }
  11303         /* And again for unprivileged accesses, if required.  */
  11304         if (EX_TBFLAG_A64(flags, UNPRIV)
  11305             && tbid
  11306             && !(env->pstate & PSTATE_TCO)
  11307             && (sctlr & SCTLR_TCF0)
  11308             && allocation_tag_access_enabled(env, 0, sctlr)) {
  11309             DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
  11310         }
  11311         /* Cache TCMA as well as TBI. */
  11312         DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
  11313     }
  11314 
  11315     return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
  11316 }
  11317 
  11318 static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
  11319 {
  11320     int el = arm_current_el(env);
  11321     int fp_el = fp_exception_el(env, el);
  11322     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
  11323 
  11324     if (is_a64(env)) {
  11325         return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
  11326     } else if (arm_feature(env, ARM_FEATURE_M)) {
  11327         return rebuild_hflags_m32(env, fp_el, mmu_idx);
  11328     } else {
  11329         return rebuild_hflags_a32(env, fp_el, mmu_idx);
  11330     }
  11331 }
  11332 
  11333 void arm_rebuild_hflags(CPUARMState *env)
  11334 {
  11335     env->hflags = rebuild_hflags_internal(env);
  11336 }
  11337 
  11338 /*
  11339  * If we have triggered a EL state change we can't rely on the
  11340  * translator having passed it to us, we need to recompute.
  11341  */
  11342 void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
  11343 {
  11344     int el = arm_current_el(env);
  11345     int fp_el = fp_exception_el(env, el);
  11346     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
  11347 
  11348     env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
  11349 }
  11350 
  11351 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
  11352 {
  11353     int fp_el = fp_exception_el(env, el);
  11354     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
  11355 
  11356     env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
  11357 }
  11358 
  11359 /*
  11360  * If we have triggered a EL state change we can't rely on the
  11361  * translator having passed it to us, we need to recompute.
  11362  */
  11363 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
  11364 {
  11365     int el = arm_current_el(env);
  11366     int fp_el = fp_exception_el(env, el);
  11367     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
  11368     env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
  11369 }
  11370 
  11371 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
  11372 {
  11373     int fp_el = fp_exception_el(env, el);
  11374     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
  11375 
  11376     env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
  11377 }
  11378 
  11379 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
  11380 {
  11381     int fp_el = fp_exception_el(env, el);
  11382     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
  11383 
  11384     env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
  11385 }
  11386 
  11387 static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
  11388 {
  11389 #ifdef CONFIG_DEBUG_TCG
  11390     CPUARMTBFlags c = env->hflags;
  11391     CPUARMTBFlags r = rebuild_hflags_internal(env);
  11392 
  11393     if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
  11394         fprintf(stderr, "TCG hflags mismatch "
  11395                         "(current:(0x%08x,0x" TARGET_FMT_lx ")"
  11396                         " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
  11397                 c.flags, c.flags2, r.flags, r.flags2);
  11398         abort();
  11399     }
  11400 #endif
  11401 }
  11402 
  11403 static bool mve_no_pred(CPUARMState *env)
  11404 {
  11405     /*
  11406      * Return true if there is definitely no predication of MVE
  11407      * instructions by VPR or LTPSIZE. (Returning false even if there
  11408      * isn't any predication is OK; generated code will just be
  11409      * a little worse.)
  11410      * If the CPU does not implement MVE then this TB flag is always 0.
  11411      *
  11412      * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
  11413      * logic in gen_update_fp_context() needs to be updated to match.
  11414      *
  11415      * We do not include the effect of the ECI bits here -- they are
  11416      * tracked in other TB flags. This simplifies the logic for
  11417      * "when did we emit code that changes the MVE_NO_PRED TB flag
  11418      * and thus need to end the TB?".
  11419      */
  11420     if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
  11421         return false;
  11422     }
  11423     if (env->v7m.vpr) {
  11424         return false;
  11425     }
  11426     if (env->v7m.ltpsize < 4) {
  11427         return false;
  11428     }
  11429     return true;
  11430 }
  11431 
  11432 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
  11433                           target_ulong *cs_base, uint32_t *pflags)
  11434 {
  11435     CPUARMTBFlags flags;
  11436 
  11437     assert_hflags_rebuild_correctly(env);
  11438     flags = env->hflags;
  11439 
  11440     if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
  11441         *pc = env->pc;
  11442         if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
  11443             DP_TBFLAG_A64(flags, BTYPE, env->btype);
  11444         }
  11445     } else {
  11446         *pc = env->regs[15];
  11447 
  11448         if (arm_feature(env, ARM_FEATURE_M)) {
  11449             if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
  11450                 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
  11451                 != env->v7m.secure) {
  11452                 DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
  11453             }
  11454 
  11455             if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
  11456                 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
  11457                  (env->v7m.secure &&
  11458                   !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
  11459                 /*
  11460                  * ASPEN is set, but FPCA/SFPA indicate that there is no
  11461                  * active FP context; we must create a new FP context before
  11462                  * executing any FP insn.
  11463                  */
  11464                 DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
  11465             }
  11466 
  11467             bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
  11468             if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
  11469                 DP_TBFLAG_M32(flags, LSPACT, 1);
  11470             }
  11471 
  11472             if (mve_no_pred(env)) {
  11473                 DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
  11474             }
  11475         } else {
  11476             /*
  11477              * Note that XSCALE_CPAR shares bits with VECSTRIDE.
  11478              * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
  11479              */
  11480             if (arm_feature(env, ARM_FEATURE_XSCALE)) {
  11481                 DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
  11482             } else {
  11483                 DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
  11484                 DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
  11485             }
  11486             if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
  11487                 DP_TBFLAG_A32(flags, VFPEN, 1);
  11488             }
  11489         }
  11490 
  11491         DP_TBFLAG_AM32(flags, THUMB, env->thumb);
  11492         DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
  11493     }
  11494 
  11495     /*
  11496      * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
  11497      * states defined in the ARM ARM for software singlestep:
  11498      *  SS_ACTIVE   PSTATE.SS   State
  11499      *     0            x       Inactive (the TB flag for SS is always 0)
  11500      *     1            0       Active-pending
  11501      *     1            1       Active-not-pending
  11502      * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
  11503      */
  11504     if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
  11505         DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
  11506     }
  11507 
  11508     *pflags = flags.flags;
  11509     *cs_base = flags.flags2;
  11510 }
  11511 
  11512 #ifdef TARGET_AARCH64
  11513 /*
  11514  * The manual says that when SVE is enabled and VQ is widened the
  11515  * implementation is allowed to zero the previously inaccessible
  11516  * portion of the registers.  The corollary to that is that when
  11517  * SVE is enabled and VQ is narrowed we are also allowed to zero
  11518  * the now inaccessible portion of the registers.
  11519  *
  11520  * The intent of this is that no predicate bit beyond VQ is ever set.
  11521  * Which means that some operations on predicate registers themselves
  11522  * may operate on full uint64_t or even unrolled across the maximum
  11523  * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
  11524  * may well be cheaper than conditionals to restrict the operation
  11525  * to the relevant portion of a uint16_t[16].
  11526  */
  11527 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
  11528 {
  11529     int i, j;
  11530     uint64_t pmask;
  11531 
  11532     assert(vq >= 1 && vq <= ARM_MAX_VQ);
  11533     assert(vq <= env_archcpu(env)->sve_max_vq);
  11534 
  11535     /* Zap the high bits of the zregs.  */
  11536     for (i = 0; i < 32; i++) {
  11537         memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
  11538     }
  11539 
  11540     /* Zap the high bits of the pregs and ffr.  */
  11541     pmask = 0;
  11542     if (vq & 3) {
  11543         pmask = ~(-1ULL << (16 * (vq & 3)));
  11544     }
  11545     for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
  11546         for (i = 0; i < 17; ++i) {
  11547             env->vfp.pregs[i].p[j] &= pmask;
  11548         }
  11549         pmask = 0;
  11550     }
  11551 }
  11552 
  11553 static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
  11554 {
  11555     int exc_el;
  11556 
  11557     if (sm) {
  11558         exc_el = sme_exception_el(env, el);
  11559     } else {
  11560         exc_el = sve_exception_el(env, el);
  11561     }
  11562     if (exc_el) {
  11563         return 0; /* disabled */
  11564     }
  11565     return sve_vqm1_for_el_sm(env, el, sm);
  11566 }
  11567 
  11568 /*
  11569  * Notice a change in SVE vector size when changing EL.
  11570  */
  11571 void aarch64_sve_change_el(CPUARMState *env, int old_el,
  11572                            int new_el, bool el0_a64)
  11573 {
  11574     ARMCPU *cpu = env_archcpu(env);
  11575     int old_len, new_len;
  11576     bool old_a64, new_a64, sm;
  11577 
  11578     /* Nothing to do if no SVE.  */
  11579     if (!cpu_isar_feature(aa64_sve, cpu)) {
  11580         return;
  11581     }
  11582 
  11583     /* Nothing to do if FP is disabled in either EL.  */
  11584     if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
  11585         return;
  11586     }
  11587 
  11588     old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
  11589     new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
  11590 
  11591     /*
  11592      * Both AArch64.TakeException and AArch64.ExceptionReturn
  11593      * invoke ResetSVEState when taking an exception from, or
  11594      * returning to, AArch32 state when PSTATE.SM is enabled.
  11595      */
  11596     sm = FIELD_EX64(env->svcr, SVCR, SM);
  11597     if (old_a64 != new_a64 && sm) {
  11598         arm_reset_sve_state(env);
  11599         return;
  11600     }
  11601 
  11602     /*
  11603      * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
  11604      * at ELx, or not available because the EL is in AArch32 state, then
  11605      * for all purposes other than a direct read, the ZCR_ELx.LEN field
  11606      * has an effective value of 0".
  11607      *
  11608      * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
  11609      * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
  11610      * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
  11611      * we already have the correct register contents when encountering the
  11612      * vq0->vq0 transition between EL0->EL1.
  11613      */
  11614     old_len = new_len = 0;
  11615     if (old_a64) {
  11616         old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
  11617     }
  11618     if (new_a64) {
  11619         new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
  11620     }
  11621 
  11622     /* When changing vector length, clear inaccessible state.  */
  11623     if (new_len < old_len) {
  11624         aarch64_sve_narrow_vq(env, new_len + 1);
  11625     }
  11626 }
  11627 #endif