qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

arm_gicv3_cpuif.c (92140B)


      1 /*
      2  * ARM Generic Interrupt Controller v3 (emulation)
      3  *
      4  * Copyright (c) 2016 Linaro Limited
      5  * Written by Peter Maydell
      6  *
      7  * This code is licensed under the GPL, version 2 or (at your option)
      8  * any later version.
      9  */
     10 
     11 /* This file contains the code for the system register interface
     12  * portions of the GICv3.
     13  */
     14 
     15 #include "qemu/osdep.h"
     16 #include "qemu/bitops.h"
     17 #include "qemu/log.h"
     18 #include "qemu/main-loop.h"
     19 #include "trace.h"
     20 #include "gicv3_internal.h"
     21 #include "hw/irq.h"
     22 #include "cpu.h"
     23 #include "target/arm/cpregs.h"
     24 
     25 /*
     26  * Special case return value from hppvi_index(); must be larger than
     27  * the architecturally maximum possible list register index (which is 15)
     28  */
     29 #define HPPVI_INDEX_VLPI 16
     30 
     31 static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
     32 {
     33     return env->gicv3state;
     34 }
     35 
     36 static bool gicv3_use_ns_bank(CPUARMState *env)
     37 {
     38     /* Return true if we should use the NonSecure bank for a banked GIC
     39      * CPU interface register. Note that this differs from the
     40      * access_secure_reg() function because GICv3 banked registers are
     41      * banked even for AArch64, unlike the other CPU system registers.
     42      */
     43     return !arm_is_secure_below_el3(env);
     44 }
     45 
     46 /* The minimum BPR for the virtual interface is a configurable property */
     47 static inline int icv_min_vbpr(GICv3CPUState *cs)
     48 {
     49     return 7 - cs->vprebits;
     50 }
     51 
     52 static inline int ich_num_aprs(GICv3CPUState *cs)
     53 {
     54     /* Return the number of virtual APR registers (1, 2, or 4) */
     55     int aprmax = 1 << (cs->vprebits - 5);
     56     assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
     57     return aprmax;
     58 }
     59 
     60 /* Simple accessor functions for LR fields */
     61 static uint32_t ich_lr_vintid(uint64_t lr)
     62 {
     63     return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
     64 }
     65 
     66 static uint32_t ich_lr_pintid(uint64_t lr)
     67 {
     68     return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
     69 }
     70 
     71 static uint32_t ich_lr_prio(uint64_t lr)
     72 {
     73     return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
     74 }
     75 
     76 static int ich_lr_state(uint64_t lr)
     77 {
     78     return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
     79 }
     80 
     81 static bool icv_access(CPUARMState *env, int hcr_flags)
     82 {
     83     /* Return true if this ICC_ register access should really be
     84      * directed to an ICV_ access. hcr_flags is a mask of
     85      * HCR_EL2 bits to check: we treat this as an ICV_ access
     86      * if we are in NS EL1 and at least one of the specified
     87      * HCR_EL2 bits is set.
     88      *
     89      * ICV registers fall into four categories:
     90      *  * access if NS EL1 and HCR_EL2.FMO == 1:
     91      *    all ICV regs with '0' in their name
     92      *  * access if NS EL1 and HCR_EL2.IMO == 1:
     93      *    all ICV regs with '1' in their name
     94      *  * access if NS EL1 and either IMO or FMO == 1:
     95      *    CTLR, DIR, PMR, RPR
     96      */
     97     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
     98     bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO);
     99 
    100     return flagmatch && arm_current_el(env) == 1
    101         && !arm_is_secure_below_el3(env);
    102 }
    103 
    104 static int read_vbpr(GICv3CPUState *cs, int grp)
    105 {
    106     /* Read VBPR value out of the VMCR field (caller must handle
    107      * VCBPR effects if required)
    108      */
    109     if (grp == GICV3_G0) {
    110         return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
    111                      ICH_VMCR_EL2_VBPR0_LENGTH);
    112     } else {
    113         return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
    114                          ICH_VMCR_EL2_VBPR1_LENGTH);
    115     }
    116 }
    117 
    118 static void write_vbpr(GICv3CPUState *cs, int grp, int value)
    119 {
    120     /* Write new VBPR1 value, handling the "writing a value less than
    121      * the minimum sets it to the minimum" semantics.
    122      */
    123     int min = icv_min_vbpr(cs);
    124 
    125     if (grp != GICV3_G0) {
    126         min++;
    127     }
    128 
    129     value = MAX(value, min);
    130 
    131     if (grp == GICV3_G0) {
    132         cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
    133                                      ICH_VMCR_EL2_VBPR0_LENGTH, value);
    134     } else {
    135         cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
    136                                      ICH_VMCR_EL2_VBPR1_LENGTH, value);
    137     }
    138 }
    139 
    140 static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
    141 {
    142     /* Return a mask word which clears the unimplemented priority bits
    143      * from a priority value for a virtual interrupt. (Not to be confused
    144      * with the group priority, whose mask depends on the value of VBPR
    145      * for the interrupt group.)
    146      */
    147     return ~0U << (8 - cs->vpribits);
    148 }
    149 
    150 static int ich_highest_active_virt_prio(GICv3CPUState *cs)
    151 {
    152     /* Calculate the current running priority based on the set bits
    153      * in the ICH Active Priority Registers.
    154      */
    155     int i;
    156     int aprmax = ich_num_aprs(cs);
    157 
    158     for (i = 0; i < aprmax; i++) {
    159         uint32_t apr = cs->ich_apr[GICV3_G0][i] |
    160             cs->ich_apr[GICV3_G1NS][i];
    161 
    162         if (!apr) {
    163             continue;
    164         }
    165         return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
    166     }
    167     /* No current active interrupts: return idle priority */
    168     return 0xff;
    169 }
    170 
    171 static int hppvi_index(GICv3CPUState *cs)
    172 {
    173     /*
    174      * Return the list register index of the highest priority pending
    175      * virtual interrupt, as per the HighestPriorityVirtualInterrupt
    176      * pseudocode. If no pending virtual interrupts, return -1.
    177      * If the highest priority pending virtual interrupt is a vLPI,
    178      * return HPPVI_INDEX_VLPI.
    179      * (The pseudocode handles checking whether the vLPI is higher
    180      * priority than the highest priority list register at every
    181      * callsite of HighestPriorityVirtualInterrupt; we check it here.)
    182      */
    183     ARMCPU *cpu = ARM_CPU(cs->cpu);
    184     CPUARMState *env = &cpu->env;
    185     int idx = -1;
    186     int i;
    187     /* Note that a list register entry with a priority of 0xff will
    188      * never be reported by this function; this is the architecturally
    189      * correct behaviour.
    190      */
    191     int prio = 0xff;
    192 
    193     if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
    194         /* Both groups disabled, definitely nothing to do */
    195         return idx;
    196     }
    197 
    198     for (i = 0; i < cs->num_list_regs; i++) {
    199         uint64_t lr = cs->ich_lr_el2[i];
    200         int thisprio;
    201 
    202         if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
    203             /* Not Pending */
    204             continue;
    205         }
    206 
    207         /* Ignore interrupts if relevant group enable not set */
    208         if (lr & ICH_LR_EL2_GROUP) {
    209             if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
    210                 continue;
    211             }
    212         } else {
    213             if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
    214                 continue;
    215             }
    216         }
    217 
    218         thisprio = ich_lr_prio(lr);
    219 
    220         if (thisprio < prio) {
    221             prio = thisprio;
    222             idx = i;
    223         }
    224     }
    225 
    226     /*
    227      * "no pending vLPI" is indicated with prio = 0xff, which always
    228      * fails the priority check here. vLPIs are only considered
    229      * when we are in Non-Secure state.
    230      */
    231     if (cs->hppvlpi.prio < prio && !arm_is_secure(env)) {
    232         if (cs->hppvlpi.grp == GICV3_G0) {
    233             if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0) {
    234                 return HPPVI_INDEX_VLPI;
    235             }
    236         } else {
    237             if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1) {
    238                 return HPPVI_INDEX_VLPI;
    239             }
    240         }
    241     }
    242 
    243     return idx;
    244 }
    245 
    246 static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
    247 {
    248     /* Return a mask word which clears the subpriority bits from
    249      * a priority value for a virtual interrupt in the specified group.
    250      * This depends on the VBPR value.
    251      * If using VBPR0 then:
    252      *  a BPR of 0 means the group priority bits are [7:1];
    253      *  a BPR of 1 means they are [7:2], and so on down to
    254      *  a BPR of 7 meaning no group priority bits at all.
    255      * If using VBPR1 then:
    256      *  a BPR of 0 is impossible (the minimum value is 1)
    257      *  a BPR of 1 means the group priority bits are [7:1];
    258      *  a BPR of 2 means they are [7:2], and so on down to
    259      *  a BPR of 7 meaning the group priority is [7].
    260      *
    261      * Which BPR to use depends on the group of the interrupt and
    262      * the current ICH_VMCR_EL2.VCBPR settings.
    263      *
    264      * This corresponds to the VGroupBits() pseudocode.
    265      */
    266     int bpr;
    267 
    268     if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
    269         group = GICV3_G0;
    270     }
    271 
    272     bpr = read_vbpr(cs, group);
    273     if (group == GICV3_G1NS) {
    274         assert(bpr > 0);
    275         bpr--;
    276     }
    277 
    278     return ~0U << (bpr + 1);
    279 }
    280 
    281 static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
    282 {
    283     /* Return true if we can signal this virtual interrupt defined by
    284      * the given list register value; see the pseudocode functions
    285      * CanSignalVirtualInterrupt and CanSignalVirtualInt.
    286      * Compare also icc_hppi_can_preempt() which is the non-virtual
    287      * equivalent of these checks.
    288      */
    289     int grp;
    290     uint32_t mask, prio, rprio, vpmr;
    291 
    292     if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
    293         /* Virtual interface disabled */
    294         return false;
    295     }
    296 
    297     /* We don't need to check that this LR is in Pending state because
    298      * that has already been done in hppvi_index().
    299      */
    300 
    301     prio = ich_lr_prio(lr);
    302     vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
    303                      ICH_VMCR_EL2_VPMR_LENGTH);
    304 
    305     if (prio >= vpmr) {
    306         /* Priority mask masks this interrupt */
    307         return false;
    308     }
    309 
    310     rprio = ich_highest_active_virt_prio(cs);
    311     if (rprio == 0xff) {
    312         /* No running interrupt so we can preempt */
    313         return true;
    314     }
    315 
    316     grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
    317 
    318     mask = icv_gprio_mask(cs, grp);
    319 
    320     /* We only preempt a running interrupt if the pending interrupt's
    321      * group priority is sufficient (the subpriorities are not considered).
    322      */
    323     if ((prio & mask) < (rprio & mask)) {
    324         return true;
    325     }
    326 
    327     return false;
    328 }
    329 
    330 static bool icv_hppvlpi_can_preempt(GICv3CPUState *cs)
    331 {
    332     /*
    333      * Return true if we can signal the highest priority pending vLPI.
    334      * We can assume we're Non-secure because hppvi_index() already
    335      * tested for that.
    336      */
    337     uint32_t mask, rprio, vpmr;
    338 
    339     if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
    340         /* Virtual interface disabled */
    341         return false;
    342     }
    343 
    344     vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
    345                      ICH_VMCR_EL2_VPMR_LENGTH);
    346 
    347     if (cs->hppvlpi.prio >= vpmr) {
    348         /* Priority mask masks this interrupt */
    349         return false;
    350     }
    351 
    352     rprio = ich_highest_active_virt_prio(cs);
    353     if (rprio == 0xff) {
    354         /* No running interrupt so we can preempt */
    355         return true;
    356     }
    357 
    358     mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
    359 
    360     /*
    361      * We only preempt a running interrupt if the pending interrupt's
    362      * group priority is sufficient (the subpriorities are not considered).
    363      */
    364     if ((cs->hppvlpi.prio & mask) < (rprio & mask)) {
    365         return true;
    366     }
    367 
    368     return false;
    369 }
    370 
    371 static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
    372                                                 uint32_t *misr)
    373 {
    374     /* Return a set of bits indicating the EOI maintenance interrupt status
    375      * for each list register. The EOI maintenance interrupt status is
    376      * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
    377      * (see the GICv3 spec for the ICH_EISR_EL2 register).
    378      * If misr is not NULL then we should also collect the information
    379      * about the MISR.EOI, MISR.NP and MISR.U bits.
    380      */
    381     uint32_t value = 0;
    382     int validcount = 0;
    383     bool seenpending = false;
    384     int i;
    385 
    386     for (i = 0; i < cs->num_list_regs; i++) {
    387         uint64_t lr = cs->ich_lr_el2[i];
    388 
    389         if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
    390             == ICH_LR_EL2_EOI) {
    391             value |= (1 << i);
    392         }
    393         if ((lr & ICH_LR_EL2_STATE_MASK)) {
    394             validcount++;
    395         }
    396         if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
    397             seenpending = true;
    398         }
    399     }
    400 
    401     if (misr) {
    402         if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
    403             *misr |= ICH_MISR_EL2_U;
    404         }
    405         if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
    406             *misr |= ICH_MISR_EL2_NP;
    407         }
    408         if (value) {
    409             *misr |= ICH_MISR_EL2_EOI;
    410         }
    411     }
    412     return value;
    413 }
    414 
    415 static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
    416 {
    417     /* Return a set of bits indicating the maintenance interrupt status
    418      * (as seen in the ICH_MISR_EL2 register).
    419      */
    420     uint32_t value = 0;
    421 
    422     /* Scan list registers and fill in the U, NP and EOI bits */
    423     eoi_maintenance_interrupt_state(cs, &value);
    424 
    425     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_LRENPIE) &&
    426         (cs->ich_hcr_el2 & ICH_HCR_EL2_EOICOUNT_MASK)) {
    427         value |= ICH_MISR_EL2_LRENP;
    428     }
    429 
    430     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
    431         (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
    432         value |= ICH_MISR_EL2_VGRP0E;
    433     }
    434 
    435     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
    436         !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
    437         value |= ICH_MISR_EL2_VGRP0D;
    438     }
    439     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
    440         (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
    441         value |= ICH_MISR_EL2_VGRP1E;
    442     }
    443 
    444     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
    445         !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
    446         value |= ICH_MISR_EL2_VGRP1D;
    447     }
    448 
    449     return value;
    450 }
    451 
    452 void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs)
    453 {
    454     /*
    455      * Tell the CPU about any pending virtual interrupts.
    456      * This should only be called for changes that affect the
    457      * vIRQ and vFIQ status and do not change the maintenance
    458      * interrupt status. This means that unlike gicv3_cpuif_virt_update()
    459      * this function won't recursively call back into the GIC code.
    460      * The main use of this is when the redistributor has changed the
    461      * highest priority pending virtual LPI.
    462      */
    463     int idx;
    464     int irqlevel = 0;
    465     int fiqlevel = 0;
    466 
    467     idx = hppvi_index(cs);
    468     trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx,
    469                                   cs->hppvlpi.irq, cs->hppvlpi.grp,
    470                                   cs->hppvlpi.prio);
    471     if (idx == HPPVI_INDEX_VLPI) {
    472         if (icv_hppvlpi_can_preempt(cs)) {
    473             if (cs->hppvlpi.grp == GICV3_G0) {
    474                 fiqlevel = 1;
    475             } else {
    476                 irqlevel = 1;
    477             }
    478         }
    479     } else if (idx >= 0) {
    480         uint64_t lr = cs->ich_lr_el2[idx];
    481 
    482         if (icv_hppi_can_preempt(cs, lr)) {
    483             /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */
    484             if (lr & ICH_LR_EL2_GROUP) {
    485                 irqlevel = 1;
    486             } else {
    487                 fiqlevel = 1;
    488             }
    489         }
    490     }
    491 
    492     trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
    493     qemu_set_irq(cs->parent_vfiq, fiqlevel);
    494     qemu_set_irq(cs->parent_virq, irqlevel);
    495 }
    496 
    497 static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
    498 {
    499     /*
    500      * Tell the CPU about any pending virtual interrupts or
    501      * maintenance interrupts, following a change to the state
    502      * of the CPU interface relevant to virtual interrupts.
    503      *
    504      * CAUTION: this function will call qemu_set_irq() on the
    505      * CPU maintenance IRQ line, which is typically wired up
    506      * to the GIC as a per-CPU interrupt. This means that it
    507      * will recursively call back into the GIC code via
    508      * gicv3_redist_set_irq() and thus into the CPU interface code's
    509      * gicv3_cpuif_update(). It is therefore important that this
    510      * function is only called as the final action of a CPU interface
    511      * register write implementation, after all the GIC state
    512      * fields have been updated. gicv3_cpuif_update() also must
    513      * not cause this function to be called, but that happens
    514      * naturally as a result of there being no architectural
    515      * linkage between the physical and virtual GIC logic.
    516      */
    517     ARMCPU *cpu = ARM_CPU(cs->cpu);
    518     int maintlevel = 0;
    519 
    520     gicv3_cpuif_virt_irq_fiq_update(cs);
    521 
    522     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) &&
    523         maintenance_interrupt_state(cs) != 0) {
    524         maintlevel = 1;
    525     }
    526 
    527     trace_gicv3_cpuif_virt_set_maint_irq(gicv3_redist_affid(cs), maintlevel);
    528     qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
    529 }
    530 
    531 static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
    532 {
    533     GICv3CPUState *cs = icc_cs_from_env(env);
    534     int regno = ri->opc2 & 3;
    535     int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
    536     uint64_t value = cs->ich_apr[grp][regno];
    537 
    538     trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
    539     return value;
    540 }
    541 
    542 static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
    543                          uint64_t value)
    544 {
    545     GICv3CPUState *cs = icc_cs_from_env(env);
    546     int regno = ri->opc2 & 3;
    547     int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
    548 
    549     trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
    550 
    551     cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
    552 
    553     gicv3_cpuif_virt_irq_fiq_update(cs);
    554     return;
    555 }
    556 
    557 static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
    558 {
    559     GICv3CPUState *cs = icc_cs_from_env(env);
    560     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
    561     uint64_t bpr;
    562     bool satinc = false;
    563 
    564     if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
    565         /* reads return bpr0 + 1 saturated to 7, writes ignored */
    566         grp = GICV3_G0;
    567         satinc = true;
    568     }
    569 
    570     bpr = read_vbpr(cs, grp);
    571 
    572     if (satinc) {
    573         bpr++;
    574         bpr = MIN(bpr, 7);
    575     }
    576 
    577     trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
    578 
    579     return bpr;
    580 }
    581 
    582 static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
    583                           uint64_t value)
    584 {
    585     GICv3CPUState *cs = icc_cs_from_env(env);
    586     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
    587 
    588     trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
    589                               gicv3_redist_affid(cs), value);
    590 
    591     if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
    592         /* reads return bpr0 + 1 saturated to 7, writes ignored */
    593         return;
    594     }
    595 
    596     write_vbpr(cs, grp, value);
    597 
    598     gicv3_cpuif_virt_irq_fiq_update(cs);
    599 }
    600 
    601 static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
    602 {
    603     GICv3CPUState *cs = icc_cs_from_env(env);
    604     uint64_t value;
    605 
    606     value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
    607                       ICH_VMCR_EL2_VPMR_LENGTH);
    608 
    609     trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
    610     return value;
    611 }
    612 
    613 static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
    614                           uint64_t value)
    615 {
    616     GICv3CPUState *cs = icc_cs_from_env(env);
    617 
    618     trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
    619 
    620     value &= icv_fullprio_mask(cs);
    621 
    622     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
    623                                  ICH_VMCR_EL2_VPMR_LENGTH, value);
    624 
    625     gicv3_cpuif_virt_irq_fiq_update(cs);
    626 }
    627 
    628 static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
    629 {
    630     GICv3CPUState *cs = icc_cs_from_env(env);
    631     int enbit;
    632     uint64_t value;
    633 
    634     enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
    635     value = extract64(cs->ich_vmcr_el2, enbit, 1);
    636 
    637     trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
    638                                 gicv3_redist_affid(cs), value);
    639     return value;
    640 }
    641 
    642 static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
    643                              uint64_t value)
    644 {
    645     GICv3CPUState *cs = icc_cs_from_env(env);
    646     int enbit;
    647 
    648     trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
    649                                  gicv3_redist_affid(cs), value);
    650 
    651     enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
    652 
    653     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
    654     gicv3_cpuif_virt_update(cs);
    655 }
    656 
    657 static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
    658 {
    659     GICv3CPUState *cs = icc_cs_from_env(env);
    660     uint64_t value;
    661 
    662     /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
    663      * should match the ones reported in ich_vtr_read().
    664      */
    665     value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
    666         ((cs->vpribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
    667 
    668     if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
    669         value |= ICC_CTLR_EL1_EOIMODE;
    670     }
    671 
    672     if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
    673         value |= ICC_CTLR_EL1_CBPR;
    674     }
    675 
    676     trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
    677     return value;
    678 }
    679 
    680 static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
    681                                uint64_t value)
    682 {
    683     GICv3CPUState *cs = icc_cs_from_env(env);
    684 
    685     trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
    686 
    687     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
    688                                  1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
    689     cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
    690                                  1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
    691 
    692     gicv3_cpuif_virt_irq_fiq_update(cs);
    693 }
    694 
    695 static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
    696 {
    697     GICv3CPUState *cs = icc_cs_from_env(env);
    698     int prio = ich_highest_active_virt_prio(cs);
    699 
    700     trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
    701     return prio;
    702 }
    703 
    704 static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
    705 {
    706     GICv3CPUState *cs = icc_cs_from_env(env);
    707     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
    708     int idx = hppvi_index(cs);
    709     uint64_t value = INTID_SPURIOUS;
    710 
    711     if (idx == HPPVI_INDEX_VLPI) {
    712         if (cs->hppvlpi.grp == grp) {
    713             value = cs->hppvlpi.irq;
    714         }
    715     } else if (idx >= 0) {
    716         uint64_t lr = cs->ich_lr_el2[idx];
    717         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
    718 
    719         if (grp == thisgrp) {
    720             value = ich_lr_vintid(lr);
    721         }
    722     }
    723 
    724     trace_gicv3_icv_hppir_read(ri->crm == 8 ? 0 : 1,
    725                                gicv3_redist_affid(cs), value);
    726     return value;
    727 }
    728 
    729 static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
    730 {
    731     /* Activate the interrupt in the specified list register
    732      * by moving it from Pending to Active state, and update the
    733      * Active Priority Registers.
    734      */
    735     uint32_t mask = icv_gprio_mask(cs, grp);
    736     int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
    737     int aprbit = prio >> (8 - cs->vprebits);
    738     int regno = aprbit / 32;
    739     int regbit = aprbit % 32;
    740 
    741     cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
    742     cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
    743     cs->ich_apr[grp][regno] |= (1 << regbit);
    744 }
    745 
    746 static void icv_activate_vlpi(GICv3CPUState *cs)
    747 {
    748     uint32_t mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
    749     int prio = cs->hppvlpi.prio & mask;
    750     int aprbit = prio >> (8 - cs->vprebits);
    751     int regno = aprbit / 32;
    752     int regbit = aprbit % 32;
    753 
    754     cs->ich_apr[cs->hppvlpi.grp][regno] |= (1 << regbit);
    755     gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0);
    756 }
    757 
    758 static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
    759 {
    760     GICv3CPUState *cs = icc_cs_from_env(env);
    761     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
    762     int idx = hppvi_index(cs);
    763     uint64_t intid = INTID_SPURIOUS;
    764 
    765     if (idx == HPPVI_INDEX_VLPI) {
    766         if (cs->hppvlpi.grp == grp && icv_hppvlpi_can_preempt(cs)) {
    767             intid = cs->hppvlpi.irq;
    768             icv_activate_vlpi(cs);
    769         }
    770     } else if (idx >= 0) {
    771         uint64_t lr = cs->ich_lr_el2[idx];
    772         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
    773 
    774         if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
    775             intid = ich_lr_vintid(lr);
    776             if (!gicv3_intid_is_special(intid)) {
    777                 icv_activate_irq(cs, idx, grp);
    778             } else {
    779                 /* Interrupt goes from Pending to Invalid */
    780                 cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
    781                 /* We will now return the (bogus) ID from the list register,
    782                  * as per the pseudocode.
    783                  */
    784             }
    785         }
    786     }
    787 
    788     trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
    789                              gicv3_redist_affid(cs), intid);
    790 
    791     gicv3_cpuif_virt_update(cs);
    792 
    793     return intid;
    794 }
    795 
    796 static uint32_t icc_fullprio_mask(GICv3CPUState *cs)
    797 {
    798     /*
    799      * Return a mask word which clears the unimplemented priority bits
    800      * from a priority value for a physical interrupt. (Not to be confused
    801      * with the group priority, whose mask depends on the value of BPR
    802      * for the interrupt group.)
    803      */
    804     return ~0U << (8 - cs->pribits);
    805 }
    806 
    807 static inline int icc_min_bpr(GICv3CPUState *cs)
    808 {
    809     /* The minimum BPR for the physical interface. */
    810     return 7 - cs->prebits;
    811 }
    812 
    813 static inline int icc_min_bpr_ns(GICv3CPUState *cs)
    814 {
    815     return icc_min_bpr(cs) + 1;
    816 }
    817 
    818 static inline int icc_num_aprs(GICv3CPUState *cs)
    819 {
    820     /* Return the number of APR registers (1, 2, or 4) */
    821     int aprmax = 1 << MAX(cs->prebits - 5, 0);
    822     assert(aprmax <= ARRAY_SIZE(cs->icc_apr[0]));
    823     return aprmax;
    824 }
    825 
    826 static int icc_highest_active_prio(GICv3CPUState *cs)
    827 {
    828     /* Calculate the current running priority based on the set bits
    829      * in the Active Priority Registers.
    830      */
    831     int i;
    832 
    833     for (i = 0; i < icc_num_aprs(cs); i++) {
    834         uint32_t apr = cs->icc_apr[GICV3_G0][i] |
    835             cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
    836 
    837         if (!apr) {
    838             continue;
    839         }
    840         return (i * 32 + ctz32(apr)) << (icc_min_bpr(cs) + 1);
    841     }
    842     /* No current active interrupts: return idle priority */
    843     return 0xff;
    844 }
    845 
    846 static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
    847 {
    848     /* Return a mask word which clears the subpriority bits from
    849      * a priority value for an interrupt in the specified group.
    850      * This depends on the BPR value. For CBPR0 (S or NS):
    851      *  a BPR of 0 means the group priority bits are [7:1];
    852      *  a BPR of 1 means they are [7:2], and so on down to
    853      *  a BPR of 7 meaning no group priority bits at all.
    854      * For CBPR1 NS:
    855      *  a BPR of 0 is impossible (the minimum value is 1)
    856      *  a BPR of 1 means the group priority bits are [7:1];
    857      *  a BPR of 2 means they are [7:2], and so on down to
    858      *  a BPR of 7 meaning the group priority is [7].
    859      *
    860      * Which BPR to use depends on the group of the interrupt and
    861      * the current ICC_CTLR.CBPR settings.
    862      *
    863      * This corresponds to the GroupBits() pseudocode.
    864      */
    865     int bpr;
    866 
    867     if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
    868         (group == GICV3_G1NS &&
    869          cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
    870         group = GICV3_G0;
    871     }
    872 
    873     bpr = cs->icc_bpr[group] & 7;
    874 
    875     if (group == GICV3_G1NS) {
    876         assert(bpr > 0);
    877         bpr--;
    878     }
    879 
    880     return ~0U << (bpr + 1);
    881 }
    882 
    883 static bool icc_no_enabled_hppi(GICv3CPUState *cs)
    884 {
    885     /* Return true if there is no pending interrupt, or the
    886      * highest priority pending interrupt is in a group which has been
    887      * disabled at the CPU interface by the ICC_IGRPEN* register enable bits.
    888      */
    889     return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
    890 }
    891 
    892 static bool icc_hppi_can_preempt(GICv3CPUState *cs)
    893 {
    894     /* Return true if we have a pending interrupt of sufficient
    895      * priority to preempt.
    896      */
    897     int rprio;
    898     uint32_t mask;
    899 
    900     if (icc_no_enabled_hppi(cs)) {
    901         return false;
    902     }
    903 
    904     if (cs->hppi.prio >= cs->icc_pmr_el1) {
    905         /* Priority mask masks this interrupt */
    906         return false;
    907     }
    908 
    909     rprio = icc_highest_active_prio(cs);
    910     if (rprio == 0xff) {
    911         /* No currently running interrupt so we can preempt */
    912         return true;
    913     }
    914 
    915     mask = icc_gprio_mask(cs, cs->hppi.grp);
    916 
    917     /* We only preempt a running interrupt if the pending interrupt's
    918      * group priority is sufficient (the subpriorities are not considered).
    919      */
    920     if ((cs->hppi.prio & mask) < (rprio & mask)) {
    921         return true;
    922     }
    923 
    924     return false;
    925 }
    926 
    927 void gicv3_cpuif_update(GICv3CPUState *cs)
    928 {
    929     /* Tell the CPU about its highest priority pending interrupt */
    930     int irqlevel = 0;
    931     int fiqlevel = 0;
    932     ARMCPU *cpu = ARM_CPU(cs->cpu);
    933     CPUARMState *env = &cpu->env;
    934 
    935     g_assert(qemu_mutex_iothread_locked());
    936 
    937     trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
    938                              cs->hppi.grp, cs->hppi.prio);
    939 
    940     if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
    941         /* If a Security-enabled GIC sends a G1S interrupt to a
    942          * Security-disabled CPU, we must treat it as if it were G0.
    943          */
    944         cs->hppi.grp = GICV3_G0;
    945     }
    946 
    947     if (icc_hppi_can_preempt(cs)) {
    948         /* We have an interrupt: should we signal it as IRQ or FIQ?
    949          * This is described in the GICv3 spec section 4.6.2.
    950          */
    951         bool isfiq;
    952 
    953         switch (cs->hppi.grp) {
    954         case GICV3_G0:
    955             isfiq = true;
    956             break;
    957         case GICV3_G1:
    958             isfiq = (!arm_is_secure(env) ||
    959                      (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3)));
    960             break;
    961         case GICV3_G1NS:
    962             isfiq = arm_is_secure(env);
    963             break;
    964         default:
    965             g_assert_not_reached();
    966         }
    967 
    968         if (isfiq) {
    969             fiqlevel = 1;
    970         } else {
    971             irqlevel = 1;
    972         }
    973     }
    974 
    975     trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
    976 
    977     qemu_set_irq(cs->parent_fiq, fiqlevel);
    978     qemu_set_irq(cs->parent_irq, irqlevel);
    979 }
    980 
    981 static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
    982 {
    983     GICv3CPUState *cs = icc_cs_from_env(env);
    984     uint32_t value = cs->icc_pmr_el1;
    985 
    986     if (icv_access(env, HCR_FMO | HCR_IMO)) {
    987         return icv_pmr_read(env, ri);
    988     }
    989 
    990     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
    991         (env->cp15.scr_el3 & SCR_FIQ)) {
    992         /* NS access and Group 0 is inaccessible to NS: return the
    993          * NS view of the current priority
    994          */
    995         if ((value & 0x80) == 0) {
    996             /* Secure priorities not visible to NS */
    997             value = 0;
    998         } else if (value != 0xff) {
    999             value = (value << 1) & 0xff;
   1000         }
   1001     }
   1002 
   1003     trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
   1004 
   1005     return value;
   1006 }
   1007 
   1008 static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1009                           uint64_t value)
   1010 {
   1011     GICv3CPUState *cs = icc_cs_from_env(env);
   1012 
   1013     if (icv_access(env, HCR_FMO | HCR_IMO)) {
   1014         return icv_pmr_write(env, ri, value);
   1015     }
   1016 
   1017     trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
   1018 
   1019     if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
   1020         (env->cp15.scr_el3 & SCR_FIQ)) {
   1021         /* NS access and Group 0 is inaccessible to NS: return the
   1022          * NS view of the current priority
   1023          */
   1024         if (!(cs->icc_pmr_el1 & 0x80)) {
   1025             /* Current PMR in the secure range, don't allow NS to change it */
   1026             return;
   1027         }
   1028         value = (value >> 1) | 0x80;
   1029     }
   1030     value &= icc_fullprio_mask(cs);
   1031     cs->icc_pmr_el1 = value;
   1032     gicv3_cpuif_update(cs);
   1033 }
   1034 
   1035 static void icc_activate_irq(GICv3CPUState *cs, int irq)
   1036 {
   1037     /* Move the interrupt from the Pending state to Active, and update
   1038      * the Active Priority Registers
   1039      */
   1040     uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
   1041     int prio = cs->hppi.prio & mask;
   1042     int aprbit = prio >> (8 - cs->prebits);
   1043     int regno = aprbit / 32;
   1044     int regbit = aprbit % 32;
   1045 
   1046     cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
   1047 
   1048     if (irq < GIC_INTERNAL) {
   1049         cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
   1050         cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
   1051         gicv3_redist_update(cs);
   1052     } else if (irq < GICV3_LPI_INTID_START) {
   1053         gicv3_gicd_active_set(cs->gic, irq);
   1054         gicv3_gicd_pending_clear(cs->gic, irq);
   1055         gicv3_update(cs->gic, irq, 1);
   1056     } else {
   1057         gicv3_redist_lpi_pending(cs, irq, 0);
   1058     }
   1059 }
   1060 
   1061 static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
   1062 {
   1063     /* Return the highest priority pending interrupt register value
   1064      * for group 0.
   1065      */
   1066     bool irq_is_secure;
   1067 
   1068     if (cs->hppi.prio == 0xff) {
   1069         return INTID_SPURIOUS;
   1070     }
   1071 
   1072     /* Check whether we can return the interrupt or if we should return
   1073      * a special identifier, as per the CheckGroup0ForSpecialIdentifiers
   1074      * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
   1075      * is always zero.)
   1076      */
   1077     irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
   1078                      (cs->hppi.grp != GICV3_G1NS));
   1079 
   1080     if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
   1081         return INTID_SPURIOUS;
   1082     }
   1083     if (irq_is_secure && !arm_is_secure(env)) {
   1084         /* Secure interrupts not visible to Nonsecure */
   1085         return INTID_SPURIOUS;
   1086     }
   1087 
   1088     if (cs->hppi.grp != GICV3_G0) {
   1089         /* Indicate to EL3 that there's a Group 1 interrupt for the other
   1090          * state pending.
   1091          */
   1092         return irq_is_secure ? INTID_SECURE : INTID_NONSECURE;
   1093     }
   1094 
   1095     return cs->hppi.irq;
   1096 }
   1097 
   1098 static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
   1099 {
   1100     /* Return the highest priority pending interrupt register value
   1101      * for group 1.
   1102      */
   1103     bool irq_is_secure;
   1104 
   1105     if (cs->hppi.prio == 0xff) {
   1106         return INTID_SPURIOUS;
   1107     }
   1108 
   1109     /* Check whether we can return the interrupt or if we should return
   1110      * a special identifier, as per the CheckGroup1ForSpecialIdentifiers
   1111      * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
   1112      * is always zero.)
   1113      */
   1114     irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
   1115                      (cs->hppi.grp != GICV3_G1NS));
   1116 
   1117     if (cs->hppi.grp == GICV3_G0) {
   1118         /* Group 0 interrupts not visible via HPPIR1 */
   1119         return INTID_SPURIOUS;
   1120     }
   1121     if (irq_is_secure) {
   1122         if (!arm_is_secure(env)) {
   1123             /* Secure interrupts not visible in Non-secure */
   1124             return INTID_SPURIOUS;
   1125         }
   1126     } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
   1127         /* Group 1 non-secure interrupts not visible in Secure EL1 */
   1128         return INTID_SPURIOUS;
   1129     }
   1130 
   1131     return cs->hppi.irq;
   1132 }
   1133 
   1134 static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1135 {
   1136     GICv3CPUState *cs = icc_cs_from_env(env);
   1137     uint64_t intid;
   1138 
   1139     if (icv_access(env, HCR_FMO)) {
   1140         return icv_iar_read(env, ri);
   1141     }
   1142 
   1143     if (!icc_hppi_can_preempt(cs)) {
   1144         intid = INTID_SPURIOUS;
   1145     } else {
   1146         intid = icc_hppir0_value(cs, env);
   1147     }
   1148 
   1149     if (!gicv3_intid_is_special(intid)) {
   1150         icc_activate_irq(cs, intid);
   1151     }
   1152 
   1153     trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
   1154     return intid;
   1155 }
   1156 
   1157 static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1158 {
   1159     GICv3CPUState *cs = icc_cs_from_env(env);
   1160     uint64_t intid;
   1161 
   1162     if (icv_access(env, HCR_IMO)) {
   1163         return icv_iar_read(env, ri);
   1164     }
   1165 
   1166     if (!icc_hppi_can_preempt(cs)) {
   1167         intid = INTID_SPURIOUS;
   1168     } else {
   1169         intid = icc_hppir1_value(cs, env);
   1170     }
   1171 
   1172     if (!gicv3_intid_is_special(intid)) {
   1173         icc_activate_irq(cs, intid);
   1174     }
   1175 
   1176     trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
   1177     return intid;
   1178 }
   1179 
   1180 static void icc_drop_prio(GICv3CPUState *cs, int grp)
   1181 {
   1182     /* Drop the priority of the currently active interrupt in
   1183      * the specified group.
   1184      *
   1185      * Note that we can guarantee (because of the requirement to nest
   1186      * ICC_IAR reads [which activate an interrupt and raise priority]
   1187      * with ICC_EOIR writes [which drop the priority for the interrupt])
   1188      * that the interrupt we're being called for is the highest priority
   1189      * active interrupt, meaning that it has the lowest set bit in the
   1190      * APR registers.
   1191      *
   1192      * If the guest does not honour the ordering constraints then the
   1193      * behaviour of the GIC is UNPREDICTABLE, which for us means that
   1194      * the values of the APR registers might become incorrect and the
   1195      * running priority will be wrong, so interrupts that should preempt
   1196      * might not do so, and interrupts that should not preempt might do so.
   1197      */
   1198     int i;
   1199 
   1200     for (i = 0; i < icc_num_aprs(cs); i++) {
   1201         uint64_t *papr = &cs->icc_apr[grp][i];
   1202 
   1203         if (!*papr) {
   1204             continue;
   1205         }
   1206         /* Clear the lowest set bit */
   1207         *papr &= *papr - 1;
   1208         break;
   1209     }
   1210 
   1211     /* running priority change means we need an update for this cpu i/f */
   1212     gicv3_cpuif_update(cs);
   1213 }
   1214 
   1215 static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
   1216 {
   1217     /* Return true if we should split priority drop and interrupt
   1218      * deactivation, ie whether the relevant EOIMode bit is set.
   1219      */
   1220     if (arm_is_el3_or_mon(env)) {
   1221         return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
   1222     }
   1223     if (arm_is_secure_below_el3(env)) {
   1224         return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
   1225     } else {
   1226         return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
   1227     }
   1228 }
   1229 
   1230 static int icc_highest_active_group(GICv3CPUState *cs)
   1231 {
   1232     /* Return the group with the highest priority active interrupt.
   1233      * We can do this by just comparing the APRs to see which one
   1234      * has the lowest set bit.
   1235      * (If more than one group is active at the same priority then
   1236      * we're in UNPREDICTABLE territory.)
   1237      */
   1238     int i;
   1239 
   1240     for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
   1241         int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
   1242         int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
   1243         int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
   1244 
   1245         if (g1nsctz < g0ctz && g1nsctz < g1ctz) {
   1246             return GICV3_G1NS;
   1247         }
   1248         if (g1ctz < g0ctz) {
   1249             return GICV3_G1;
   1250         }
   1251         if (g0ctz < 32) {
   1252             return GICV3_G0;
   1253         }
   1254     }
   1255     /* No set active bits? UNPREDICTABLE; return -1 so the caller
   1256      * ignores the spurious EOI attempt.
   1257      */
   1258     return -1;
   1259 }
   1260 
   1261 static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
   1262 {
   1263     if (irq < GIC_INTERNAL) {
   1264         cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
   1265         gicv3_redist_update(cs);
   1266     } else {
   1267         gicv3_gicd_active_clear(cs->gic, irq);
   1268         gicv3_update(cs->gic, irq, 1);
   1269     }
   1270 }
   1271 
   1272 static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
   1273 {
   1274     /* Return true if we should split priority drop and interrupt
   1275      * deactivation, ie whether the virtual EOIMode bit is set.
   1276      */
   1277     return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
   1278 }
   1279 
   1280 static int icv_find_active(GICv3CPUState *cs, int irq)
   1281 {
   1282     /* Given an interrupt number for an active interrupt, return the index
   1283      * of the corresponding list register, or -1 if there is no match.
   1284      * Corresponds to FindActiveVirtualInterrupt pseudocode.
   1285      */
   1286     int i;
   1287 
   1288     for (i = 0; i < cs->num_list_regs; i++) {
   1289         uint64_t lr = cs->ich_lr_el2[i];
   1290 
   1291         if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
   1292             return i;
   1293         }
   1294     }
   1295 
   1296     return -1;
   1297 }
   1298 
   1299 static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
   1300 {
   1301     /* Deactivate the interrupt in the specified list register index */
   1302     uint64_t lr = cs->ich_lr_el2[idx];
   1303 
   1304     if (lr & ICH_LR_EL2_HW) {
   1305         /* Deactivate the associated physical interrupt */
   1306         int pirq = ich_lr_pintid(lr);
   1307 
   1308         if (pirq < INTID_SECURE) {
   1309             icc_deactivate_irq(cs, pirq);
   1310         }
   1311     }
   1312 
   1313     /* Clear the 'active' part of the state, so ActivePending->Pending
   1314      * and Active->Invalid.
   1315      */
   1316     lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
   1317     cs->ich_lr_el2[idx] = lr;
   1318 }
   1319 
   1320 static void icv_increment_eoicount(GICv3CPUState *cs)
   1321 {
   1322     /* Increment the EOICOUNT field in ICH_HCR_EL2 */
   1323     int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
   1324                              ICH_HCR_EL2_EOICOUNT_LENGTH);
   1325 
   1326     cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
   1327                                 ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
   1328 }
   1329 
   1330 static int icv_drop_prio(GICv3CPUState *cs)
   1331 {
   1332     /* Drop the priority of the currently active virtual interrupt
   1333      * (favouring group 0 if there is a set active bit at
   1334      * the same priority for both group 0 and group 1).
   1335      * Return the priority value for the bit we just cleared,
   1336      * or 0xff if no bits were set in the AP registers at all.
   1337      * Note that though the ich_apr[] are uint64_t only the low
   1338      * 32 bits are actually relevant.
   1339      */
   1340     int i;
   1341     int aprmax = ich_num_aprs(cs);
   1342 
   1343     for (i = 0; i < aprmax; i++) {
   1344         uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
   1345         uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
   1346         int apr0count, apr1count;
   1347 
   1348         if (!*papr0 && !*papr1) {
   1349             continue;
   1350         }
   1351 
   1352         /* We can't just use the bit-twiddling hack icc_drop_prio() does
   1353          * because we need to return the bit number we cleared so
   1354          * it can be compared against the list register's priority field.
   1355          */
   1356         apr0count = ctz32(*papr0);
   1357         apr1count = ctz32(*papr1);
   1358 
   1359         if (apr0count <= apr1count) {
   1360             *papr0 &= *papr0 - 1;
   1361             return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
   1362         } else {
   1363             *papr1 &= *papr1 - 1;
   1364             return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
   1365         }
   1366     }
   1367     return 0xff;
   1368 }
   1369 
   1370 static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1371                           uint64_t value)
   1372 {
   1373     /* Deactivate interrupt */
   1374     GICv3CPUState *cs = icc_cs_from_env(env);
   1375     int idx;
   1376     int irq = value & 0xffffff;
   1377 
   1378     trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
   1379 
   1380     if (irq >= GICV3_MAXIRQ) {
   1381         /* Also catches special interrupt numbers and LPIs */
   1382         return;
   1383     }
   1384 
   1385     if (!icv_eoi_split(env, cs)) {
   1386         return;
   1387     }
   1388 
   1389     idx = icv_find_active(cs, irq);
   1390 
   1391     if (idx < 0) {
   1392         /* No list register matching this, so increment the EOI count
   1393          * (might trigger a maintenance interrupt)
   1394          */
   1395         icv_increment_eoicount(cs);
   1396     } else {
   1397         icv_deactivate_irq(cs, idx);
   1398     }
   1399 
   1400     gicv3_cpuif_virt_update(cs);
   1401 }
   1402 
   1403 static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1404                            uint64_t value)
   1405 {
   1406     /* End of Interrupt */
   1407     GICv3CPUState *cs = icc_cs_from_env(env);
   1408     int irq = value & 0xffffff;
   1409     int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
   1410     int idx, dropprio;
   1411 
   1412     trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
   1413                                gicv3_redist_affid(cs), value);
   1414 
   1415     if (gicv3_intid_is_special(irq)) {
   1416         return;
   1417     }
   1418 
   1419     /* We implement the IMPDEF choice of "drop priority before doing
   1420      * error checks" (because that lets us avoid scanning the AP
   1421      * registers twice).
   1422      */
   1423     dropprio = icv_drop_prio(cs);
   1424     if (dropprio == 0xff) {
   1425         /* No active interrupt. It is CONSTRAINED UNPREDICTABLE
   1426          * whether the list registers are checked in this
   1427          * situation; we choose not to.
   1428          */
   1429         return;
   1430     }
   1431 
   1432     idx = icv_find_active(cs, irq);
   1433 
   1434     if (idx < 0) {
   1435         /* No valid list register corresponding to EOI ID */
   1436         icv_increment_eoicount(cs);
   1437     } else {
   1438         uint64_t lr = cs->ich_lr_el2[idx];
   1439         int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
   1440         int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
   1441 
   1442         if (thisgrp == grp && lr_gprio == dropprio) {
   1443             if (!icv_eoi_split(env, cs)) {
   1444                 /* Priority drop and deactivate not split: deactivate irq now */
   1445                 icv_deactivate_irq(cs, idx);
   1446             }
   1447         }
   1448     }
   1449 
   1450     gicv3_cpuif_virt_update(cs);
   1451 }
   1452 
   1453 static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1454                            uint64_t value)
   1455 {
   1456     /* End of Interrupt */
   1457     GICv3CPUState *cs = icc_cs_from_env(env);
   1458     int irq = value & 0xffffff;
   1459     int grp;
   1460     bool is_eoir0 = ri->crm == 8;
   1461 
   1462     if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) {
   1463         icv_eoir_write(env, ri, value);
   1464         return;
   1465     }
   1466 
   1467     trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
   1468                                gicv3_redist_affid(cs), value);
   1469 
   1470     if ((irq >= cs->gic->num_irq) &&
   1471         !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) {
   1472         /* This handles two cases:
   1473          * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
   1474          * to the GICC_EOIR, the GIC ignores that write.
   1475          * 2. If software writes the number of a non-existent interrupt
   1476          * this must be a subcase of "value written does not match the last
   1477          * valid interrupt value read from the Interrupt Acknowledge
   1478          * register" and so this is UNPREDICTABLE. We choose to ignore it.
   1479          */
   1480         return;
   1481     }
   1482 
   1483     grp = icc_highest_active_group(cs);
   1484     switch (grp) {
   1485     case GICV3_G0:
   1486         if (!is_eoir0) {
   1487             return;
   1488         }
   1489         if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS)
   1490             && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
   1491             return;
   1492         }
   1493         break;
   1494     case GICV3_G1:
   1495         if (is_eoir0) {
   1496             return;
   1497         }
   1498         if (!arm_is_secure(env)) {
   1499             return;
   1500         }
   1501         break;
   1502     case GICV3_G1NS:
   1503         if (is_eoir0) {
   1504             return;
   1505         }
   1506         if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
   1507             return;
   1508         }
   1509         break;
   1510     default:
   1511         qemu_log_mask(LOG_GUEST_ERROR,
   1512                       "%s: IRQ %d isn't active\n", __func__, irq);
   1513         return;
   1514     }
   1515 
   1516     icc_drop_prio(cs, grp);
   1517 
   1518     if (!icc_eoi_split(env, cs)) {
   1519         /* Priority drop and deactivate not split: deactivate irq now */
   1520         icc_deactivate_irq(cs, irq);
   1521     }
   1522 }
   1523 
   1524 static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1525 {
   1526     GICv3CPUState *cs = icc_cs_from_env(env);
   1527     uint64_t value;
   1528 
   1529     if (icv_access(env, HCR_FMO)) {
   1530         return icv_hppir_read(env, ri);
   1531     }
   1532 
   1533     value = icc_hppir0_value(cs, env);
   1534     trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
   1535     return value;
   1536 }
   1537 
   1538 static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1539 {
   1540     GICv3CPUState *cs = icc_cs_from_env(env);
   1541     uint64_t value;
   1542 
   1543     if (icv_access(env, HCR_IMO)) {
   1544         return icv_hppir_read(env, ri);
   1545     }
   1546 
   1547     value = icc_hppir1_value(cs, env);
   1548     trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
   1549     return value;
   1550 }
   1551 
   1552 static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1553 {
   1554     GICv3CPUState *cs = icc_cs_from_env(env);
   1555     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
   1556     bool satinc = false;
   1557     uint64_t bpr;
   1558 
   1559     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
   1560         return icv_bpr_read(env, ri);
   1561     }
   1562 
   1563     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
   1564         grp = GICV3_G1NS;
   1565     }
   1566 
   1567     if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
   1568         (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
   1569         /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
   1570          * modify BPR0
   1571          */
   1572         grp = GICV3_G0;
   1573     }
   1574 
   1575     if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
   1576         (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
   1577         /* reads return bpr0 + 1 sat to 7, writes ignored */
   1578         grp = GICV3_G0;
   1579         satinc = true;
   1580     }
   1581 
   1582     bpr = cs->icc_bpr[grp];
   1583     if (satinc) {
   1584         bpr++;
   1585         bpr = MIN(bpr, 7);
   1586     }
   1587 
   1588     trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
   1589 
   1590     return bpr;
   1591 }
   1592 
   1593 static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1594                           uint64_t value)
   1595 {
   1596     GICv3CPUState *cs = icc_cs_from_env(env);
   1597     int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
   1598     uint64_t minval;
   1599 
   1600     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
   1601         icv_bpr_write(env, ri, value);
   1602         return;
   1603     }
   1604 
   1605     trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
   1606                               gicv3_redist_affid(cs), value);
   1607 
   1608     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
   1609         grp = GICV3_G1NS;
   1610     }
   1611 
   1612     if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
   1613         (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
   1614         /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
   1615          * modify BPR0
   1616          */
   1617         grp = GICV3_G0;
   1618     }
   1619 
   1620     if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
   1621         (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
   1622         /* reads return bpr0 + 1 sat to 7, writes ignored */
   1623         return;
   1624     }
   1625 
   1626     minval = (grp == GICV3_G1NS) ? icc_min_bpr_ns(cs) : icc_min_bpr(cs);
   1627     if (value < minval) {
   1628         value = minval;
   1629     }
   1630 
   1631     cs->icc_bpr[grp] = value & 7;
   1632     gicv3_cpuif_update(cs);
   1633 }
   1634 
   1635 static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1636 {
   1637     GICv3CPUState *cs = icc_cs_from_env(env);
   1638     uint64_t value;
   1639 
   1640     int regno = ri->opc2 & 3;
   1641     int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
   1642 
   1643     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
   1644         return icv_ap_read(env, ri);
   1645     }
   1646 
   1647     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
   1648         grp = GICV3_G1NS;
   1649     }
   1650 
   1651     value = cs->icc_apr[grp][regno];
   1652 
   1653     trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
   1654     return value;
   1655 }
   1656 
   1657 static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1658                          uint64_t value)
   1659 {
   1660     GICv3CPUState *cs = icc_cs_from_env(env);
   1661 
   1662     int regno = ri->opc2 & 3;
   1663     int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
   1664 
   1665     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
   1666         icv_ap_write(env, ri, value);
   1667         return;
   1668     }
   1669 
   1670     trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
   1671 
   1672     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
   1673         grp = GICV3_G1NS;
   1674     }
   1675 
   1676     /* It's not possible to claim that a Non-secure interrupt is active
   1677      * at a priority outside the Non-secure range (128..255), since this
   1678      * would otherwise allow malicious NS code to block delivery of S interrupts
   1679      * by writing a bad value to these registers.
   1680      */
   1681     if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) {
   1682         return;
   1683     }
   1684 
   1685     cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
   1686     gicv3_cpuif_update(cs);
   1687 }
   1688 
   1689 static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1690                           uint64_t value)
   1691 {
   1692     /* Deactivate interrupt */
   1693     GICv3CPUState *cs = icc_cs_from_env(env);
   1694     int irq = value & 0xffffff;
   1695     bool irq_is_secure, single_sec_state, irq_is_grp0;
   1696     bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
   1697 
   1698     if (icv_access(env, HCR_FMO | HCR_IMO)) {
   1699         icv_dir_write(env, ri, value);
   1700         return;
   1701     }
   1702 
   1703     trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
   1704 
   1705     if (irq >= cs->gic->num_irq) {
   1706         /* Also catches special interrupt numbers and LPIs */
   1707         return;
   1708     }
   1709 
   1710     if (!icc_eoi_split(env, cs)) {
   1711         return;
   1712     }
   1713 
   1714     int grp = gicv3_irq_group(cs->gic, cs, irq);
   1715 
   1716     single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
   1717     irq_is_secure = !single_sec_state && (grp != GICV3_G1NS);
   1718     irq_is_grp0 = grp == GICV3_G0;
   1719 
   1720     /* Check whether we're allowed to deactivate this interrupt based
   1721      * on its group and the current CPU state.
   1722      * These checks are laid out to correspond to the spec's pseudocode.
   1723      */
   1724     route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
   1725     route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
   1726     /* No need to include !IsSecure in route_*_to_el2 as it's only
   1727      * tested in cases where we know !IsSecure is true.
   1728      */
   1729     uint64_t hcr_el2 = arm_hcr_el2_eff(env);
   1730     route_fiq_to_el2 = hcr_el2 & HCR_FMO;
   1731     route_irq_to_el2 = hcr_el2 & HCR_IMO;
   1732 
   1733     switch (arm_current_el(env)) {
   1734     case 3:
   1735         break;
   1736     case 2:
   1737         if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) {
   1738             break;
   1739         }
   1740         if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) {
   1741             break;
   1742         }
   1743         return;
   1744     case 1:
   1745         if (!arm_is_secure_below_el3(env)) {
   1746             if (single_sec_state && irq_is_grp0 &&
   1747                 !route_fiq_to_el3 && !route_fiq_to_el2) {
   1748                 break;
   1749             }
   1750             if (!irq_is_secure && !irq_is_grp0 &&
   1751                 !route_irq_to_el3 && !route_irq_to_el2) {
   1752                 break;
   1753             }
   1754         } else {
   1755             if (irq_is_grp0 && !route_fiq_to_el3) {
   1756                 break;
   1757             }
   1758             if (!irq_is_grp0 &&
   1759                 (!irq_is_secure || !single_sec_state) &&
   1760                 !route_irq_to_el3) {
   1761                 break;
   1762             }
   1763         }
   1764         return;
   1765     default:
   1766         g_assert_not_reached();
   1767     }
   1768 
   1769     icc_deactivate_irq(cs, irq);
   1770 }
   1771 
   1772 static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1773 {
   1774     GICv3CPUState *cs = icc_cs_from_env(env);
   1775     int prio;
   1776 
   1777     if (icv_access(env, HCR_FMO | HCR_IMO)) {
   1778         return icv_rpr_read(env, ri);
   1779     }
   1780 
   1781     prio = icc_highest_active_prio(cs);
   1782 
   1783     if (arm_feature(env, ARM_FEATURE_EL3) &&
   1784         !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
   1785         /* NS GIC access and Group 0 is inaccessible to NS */
   1786         if ((prio & 0x80) == 0) {
   1787             /* NS mustn't see priorities in the Secure half of the range */
   1788             prio = 0;
   1789         } else if (prio != 0xff) {
   1790             /* Non-idle priority: show the Non-secure view of it */
   1791             prio = (prio << 1) & 0xff;
   1792         }
   1793     }
   1794 
   1795     trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
   1796     return prio;
   1797 }
   1798 
   1799 static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
   1800                              uint64_t value, int grp, bool ns)
   1801 {
   1802     GICv3State *s = cs->gic;
   1803 
   1804     /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */
   1805     uint64_t aff = extract64(value, 48, 8) << 16 |
   1806         extract64(value, 32, 8) << 8 |
   1807         extract64(value, 16, 8);
   1808     uint32_t targetlist = extract64(value, 0, 16);
   1809     uint32_t irq = extract64(value, 24, 4);
   1810     bool irm = extract64(value, 40, 1);
   1811     int i;
   1812 
   1813     if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
   1814         /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1
   1815          * interrupts as Group 0 interrupts and must send Secure Group 0
   1816          * interrupts to the target CPUs.
   1817          */
   1818         grp = GICV3_G0;
   1819     }
   1820 
   1821     trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
   1822                                  aff, targetlist);
   1823 
   1824     for (i = 0; i < s->num_cpu; i++) {
   1825         GICv3CPUState *ocs = &s->cpu[i];
   1826 
   1827         if (irm) {
   1828             /* IRM == 1 : route to all CPUs except self */
   1829             if (cs == ocs) {
   1830                 continue;
   1831             }
   1832         } else {
   1833             /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
   1834              * where the corresponding bit is set in targetlist
   1835              */
   1836             int aff0;
   1837 
   1838             if (ocs->gicr_typer >> 40 != aff) {
   1839                 continue;
   1840             }
   1841             aff0 = extract64(ocs->gicr_typer, 32, 8);
   1842             if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
   1843                 continue;
   1844             }
   1845         }
   1846 
   1847         /* The redistributor will check against its own GICR_NSACR as needed */
   1848         gicv3_redist_send_sgi(ocs, grp, irq, ns);
   1849     }
   1850 }
   1851 
   1852 static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1853                            uint64_t value)
   1854 {
   1855     /* Generate Secure Group 0 SGI. */
   1856     GICv3CPUState *cs = icc_cs_from_env(env);
   1857     bool ns = !arm_is_secure(env);
   1858 
   1859     icc_generate_sgi(env, cs, value, GICV3_G0, ns);
   1860 }
   1861 
   1862 static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1863                            uint64_t value)
   1864 {
   1865     /* Generate Group 1 SGI for the current Security state */
   1866     GICv3CPUState *cs = icc_cs_from_env(env);
   1867     int grp;
   1868     bool ns = !arm_is_secure(env);
   1869 
   1870     grp = ns ? GICV3_G1NS : GICV3_G1;
   1871     icc_generate_sgi(env, cs, value, grp, ns);
   1872 }
   1873 
   1874 static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1875                              uint64_t value)
   1876 {
   1877     /* Generate Group 1 SGI for the Security state that is not
   1878      * the current state
   1879      */
   1880     GICv3CPUState *cs = icc_cs_from_env(env);
   1881     int grp;
   1882     bool ns = !arm_is_secure(env);
   1883 
   1884     grp = ns ? GICV3_G1 : GICV3_G1NS;
   1885     icc_generate_sgi(env, cs, value, grp, ns);
   1886 }
   1887 
   1888 static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1889 {
   1890     GICv3CPUState *cs = icc_cs_from_env(env);
   1891     int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
   1892     uint64_t value;
   1893 
   1894     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
   1895         return icv_igrpen_read(env, ri);
   1896     }
   1897 
   1898     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
   1899         grp = GICV3_G1NS;
   1900     }
   1901 
   1902     value = cs->icc_igrpen[grp];
   1903     trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
   1904                                 gicv3_redist_affid(cs), value);
   1905     return value;
   1906 }
   1907 
   1908 static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1909                              uint64_t value)
   1910 {
   1911     GICv3CPUState *cs = icc_cs_from_env(env);
   1912     int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
   1913 
   1914     if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
   1915         icv_igrpen_write(env, ri, value);
   1916         return;
   1917     }
   1918 
   1919     trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
   1920                                  gicv3_redist_affid(cs), value);
   1921 
   1922     if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
   1923         grp = GICV3_G1NS;
   1924     }
   1925 
   1926     cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
   1927     gicv3_cpuif_update(cs);
   1928 }
   1929 
   1930 static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1931 {
   1932     GICv3CPUState *cs = icc_cs_from_env(env);
   1933     uint64_t value;
   1934 
   1935     /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
   1936     value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
   1937     trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
   1938     return value;
   1939 }
   1940 
   1941 static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1942                                   uint64_t value)
   1943 {
   1944     GICv3CPUState *cs = icc_cs_from_env(env);
   1945 
   1946     trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
   1947 
   1948     /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
   1949     cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
   1950     cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
   1951     gicv3_cpuif_update(cs);
   1952 }
   1953 
   1954 static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
   1955 {
   1956     GICv3CPUState *cs = icc_cs_from_env(env);
   1957     int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
   1958     uint64_t value;
   1959 
   1960     if (icv_access(env, HCR_FMO | HCR_IMO)) {
   1961         return icv_ctlr_read(env, ri);
   1962     }
   1963 
   1964     value = cs->icc_ctlr_el1[bank];
   1965     trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
   1966     return value;
   1967 }
   1968 
   1969 static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
   1970                                uint64_t value)
   1971 {
   1972     GICv3CPUState *cs = icc_cs_from_env(env);
   1973     int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
   1974     uint64_t mask;
   1975 
   1976     if (icv_access(env, HCR_FMO | HCR_IMO)) {
   1977         icv_ctlr_write(env, ri, value);
   1978         return;
   1979     }
   1980 
   1981     trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
   1982 
   1983     /* Only CBPR and EOIMODE can be RW;
   1984      * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
   1985      * the asseciated priority-based routing of them);
   1986      * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
   1987      */
   1988     if (arm_feature(env, ARM_FEATURE_EL3) &&
   1989         ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
   1990         mask = ICC_CTLR_EL1_EOIMODE;
   1991     } else {
   1992         mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE;
   1993     }
   1994 
   1995     cs->icc_ctlr_el1[bank] &= ~mask;
   1996     cs->icc_ctlr_el1[bank] |= (value & mask);
   1997     gicv3_cpuif_update(cs);
   1998 }
   1999 
   2000 
   2001 static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2002 {
   2003     GICv3CPUState *cs = icc_cs_from_env(env);
   2004     uint64_t value;
   2005 
   2006     value = cs->icc_ctlr_el3;
   2007     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
   2008         value |= ICC_CTLR_EL3_EOIMODE_EL1NS;
   2009     }
   2010     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
   2011         value |= ICC_CTLR_EL3_CBPR_EL1NS;
   2012     }
   2013     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
   2014         value |= ICC_CTLR_EL3_EOIMODE_EL1S;
   2015     }
   2016     if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
   2017         value |= ICC_CTLR_EL3_CBPR_EL1S;
   2018     }
   2019 
   2020     trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
   2021     return value;
   2022 }
   2023 
   2024 static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2025                                uint64_t value)
   2026 {
   2027     GICv3CPUState *cs = icc_cs_from_env(env);
   2028     uint64_t mask;
   2029 
   2030     trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
   2031 
   2032     /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */
   2033     cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
   2034     if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) {
   2035         cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
   2036     }
   2037     if (value & ICC_CTLR_EL3_CBPR_EL1NS) {
   2038         cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
   2039     }
   2040 
   2041     cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
   2042     if (value & ICC_CTLR_EL3_EOIMODE_EL1S) {
   2043         cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
   2044     }
   2045     if (value & ICC_CTLR_EL3_CBPR_EL1S) {
   2046         cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
   2047     }
   2048 
   2049     /* The only bit stored in icc_ctlr_el3 which is writable is EOIMODE_EL3: */
   2050     mask = ICC_CTLR_EL3_EOIMODE_EL3;
   2051 
   2052     cs->icc_ctlr_el3 &= ~mask;
   2053     cs->icc_ctlr_el3 |= (value & mask);
   2054     gicv3_cpuif_update(cs);
   2055 }
   2056 
   2057 static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
   2058                                           const ARMCPRegInfo *ri, bool isread)
   2059 {
   2060     CPAccessResult r = CP_ACCESS_OK;
   2061     GICv3CPUState *cs = icc_cs_from_env(env);
   2062     int el = arm_current_el(env);
   2063 
   2064     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
   2065         el == 1 && !arm_is_secure_below_el3(env)) {
   2066         /* Takes priority over a possible EL3 trap */
   2067         return CP_ACCESS_TRAP_EL2;
   2068     }
   2069 
   2070     if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
   2071         switch (el) {
   2072         case 1:
   2073             /* Note that arm_hcr_el2_eff takes secure state into account.  */
   2074             if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) {
   2075                 r = CP_ACCESS_TRAP_EL3;
   2076             }
   2077             break;
   2078         case 2:
   2079             r = CP_ACCESS_TRAP_EL3;
   2080             break;
   2081         case 3:
   2082             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
   2083                 r = CP_ACCESS_TRAP_EL3;
   2084             }
   2085             break;
   2086         default:
   2087             g_assert_not_reached();
   2088         }
   2089     }
   2090 
   2091     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
   2092         r = CP_ACCESS_TRAP;
   2093     }
   2094     return r;
   2095 }
   2096 
   2097 static CPAccessResult gicv3_dir_access(CPUARMState *env,
   2098                                        const ARMCPRegInfo *ri, bool isread)
   2099 {
   2100     GICv3CPUState *cs = icc_cs_from_env(env);
   2101 
   2102     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
   2103         arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
   2104         /* Takes priority over a possible EL3 trap */
   2105         return CP_ACCESS_TRAP_EL2;
   2106     }
   2107 
   2108     return gicv3_irqfiq_access(env, ri, isread);
   2109 }
   2110 
   2111 static CPAccessResult gicv3_sgi_access(CPUARMState *env,
   2112                                        const ARMCPRegInfo *ri, bool isread)
   2113 {
   2114     if (arm_current_el(env) == 1 &&
   2115         (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) {
   2116         /* Takes priority over a possible EL3 trap */
   2117         return CP_ACCESS_TRAP_EL2;
   2118     }
   2119 
   2120     return gicv3_irqfiq_access(env, ri, isread);
   2121 }
   2122 
   2123 static CPAccessResult gicv3_fiq_access(CPUARMState *env,
   2124                                        const ARMCPRegInfo *ri, bool isread)
   2125 {
   2126     CPAccessResult r = CP_ACCESS_OK;
   2127     GICv3CPUState *cs = icc_cs_from_env(env);
   2128     int el = arm_current_el(env);
   2129 
   2130     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
   2131         el == 1 && !arm_is_secure_below_el3(env)) {
   2132         /* Takes priority over a possible EL3 trap */
   2133         return CP_ACCESS_TRAP_EL2;
   2134     }
   2135 
   2136     if (env->cp15.scr_el3 & SCR_FIQ) {
   2137         switch (el) {
   2138         case 1:
   2139             if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) {
   2140                 r = CP_ACCESS_TRAP_EL3;
   2141             }
   2142             break;
   2143         case 2:
   2144             r = CP_ACCESS_TRAP_EL3;
   2145             break;
   2146         case 3:
   2147             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
   2148                 r = CP_ACCESS_TRAP_EL3;
   2149             }
   2150             break;
   2151         default:
   2152             g_assert_not_reached();
   2153         }
   2154     }
   2155 
   2156     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
   2157         r = CP_ACCESS_TRAP;
   2158     }
   2159     return r;
   2160 }
   2161 
   2162 static CPAccessResult gicv3_irq_access(CPUARMState *env,
   2163                                        const ARMCPRegInfo *ri, bool isread)
   2164 {
   2165     CPAccessResult r = CP_ACCESS_OK;
   2166     GICv3CPUState *cs = icc_cs_from_env(env);
   2167     int el = arm_current_el(env);
   2168 
   2169     if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
   2170         el == 1 && !arm_is_secure_below_el3(env)) {
   2171         /* Takes priority over a possible EL3 trap */
   2172         return CP_ACCESS_TRAP_EL2;
   2173     }
   2174 
   2175     if (env->cp15.scr_el3 & SCR_IRQ) {
   2176         switch (el) {
   2177         case 1:
   2178             if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) {
   2179                 r = CP_ACCESS_TRAP_EL3;
   2180             }
   2181             break;
   2182         case 2:
   2183             r = CP_ACCESS_TRAP_EL3;
   2184             break;
   2185         case 3:
   2186             if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
   2187                 r = CP_ACCESS_TRAP_EL3;
   2188             }
   2189             break;
   2190         default:
   2191             g_assert_not_reached();
   2192         }
   2193     }
   2194 
   2195     if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
   2196         r = CP_ACCESS_TRAP;
   2197     }
   2198     return r;
   2199 }
   2200 
   2201 static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
   2202 {
   2203     GICv3CPUState *cs = icc_cs_from_env(env);
   2204 
   2205     cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
   2206         (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
   2207         ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
   2208     cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
   2209         (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
   2210         ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
   2211     cs->icc_pmr_el1 = 0;
   2212     cs->icc_bpr[GICV3_G0] = icc_min_bpr(cs);
   2213     cs->icc_bpr[GICV3_G1] = icc_min_bpr(cs);
   2214     cs->icc_bpr[GICV3_G1NS] = icc_min_bpr_ns(cs);
   2215     memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
   2216     memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
   2217     cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
   2218         (1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
   2219         ((cs->pribits - 1) << ICC_CTLR_EL3_PRIBITS_SHIFT);
   2220 
   2221     memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
   2222     cs->ich_hcr_el2 = 0;
   2223     memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
   2224     cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
   2225         ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) |
   2226         (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
   2227 }
   2228 
   2229 static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
   2230     { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH,
   2231       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
   2232       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2233       .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
   2234       .readfn = icc_pmr_read,
   2235       .writefn = icc_pmr_write,
   2236       /* We hang the whole cpu interface reset routine off here
   2237        * rather than parcelling it out into one little function
   2238        * per register
   2239        */
   2240       .resetfn = icc_reset,
   2241     },
   2242     { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH,
   2243       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
   2244       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2245       .access = PL1_R, .accessfn = gicv3_fiq_access,
   2246       .readfn = icc_iar0_read,
   2247     },
   2248     { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH,
   2249       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
   2250       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2251       .access = PL1_W, .accessfn = gicv3_fiq_access,
   2252       .writefn = icc_eoir_write,
   2253     },
   2254     { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH,
   2255       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
   2256       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2257       .access = PL1_R, .accessfn = gicv3_fiq_access,
   2258       .readfn = icc_hppir0_read,
   2259     },
   2260     { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH,
   2261       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
   2262       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2263       .access = PL1_RW, .accessfn = gicv3_fiq_access,
   2264       .readfn = icc_bpr_read,
   2265       .writefn = icc_bpr_write,
   2266     },
   2267     { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
   2268       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
   2269       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2270       .access = PL1_RW, .accessfn = gicv3_fiq_access,
   2271       .readfn = icc_ap_read,
   2272       .writefn = icc_ap_write,
   2273     },
   2274     /* All the ICC_AP1R*_EL1 registers are banked */
   2275     { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
   2276       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
   2277       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2278       .access = PL1_RW, .accessfn = gicv3_irq_access,
   2279       .readfn = icc_ap_read,
   2280       .writefn = icc_ap_write,
   2281     },
   2282     { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
   2283       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
   2284       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2285       .access = PL1_W, .accessfn = gicv3_dir_access,
   2286       .writefn = icc_dir_write,
   2287     },
   2288     { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
   2289       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
   2290       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2291       .access = PL1_R, .accessfn = gicv3_irqfiq_access,
   2292       .readfn = icc_rpr_read,
   2293     },
   2294     { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
   2295       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
   2296       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2297       .access = PL1_W, .accessfn = gicv3_sgi_access,
   2298       .writefn = icc_sgi1r_write,
   2299     },
   2300     { .name = "ICC_SGI1R",
   2301       .cp = 15, .opc1 = 0, .crm = 12,
   2302       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
   2303       .access = PL1_W, .accessfn = gicv3_sgi_access,
   2304       .writefn = icc_sgi1r_write,
   2305     },
   2306     { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
   2307       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
   2308       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2309       .access = PL1_W, .accessfn = gicv3_sgi_access,
   2310       .writefn = icc_asgi1r_write,
   2311     },
   2312     { .name = "ICC_ASGI1R",
   2313       .cp = 15, .opc1 = 1, .crm = 12,
   2314       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
   2315       .access = PL1_W, .accessfn = gicv3_sgi_access,
   2316       .writefn = icc_asgi1r_write,
   2317     },
   2318     { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
   2319       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
   2320       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2321       .access = PL1_W, .accessfn = gicv3_sgi_access,
   2322       .writefn = icc_sgi0r_write,
   2323     },
   2324     { .name = "ICC_SGI0R",
   2325       .cp = 15, .opc1 = 2, .crm = 12,
   2326       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
   2327       .access = PL1_W, .accessfn = gicv3_sgi_access,
   2328       .writefn = icc_sgi0r_write,
   2329     },
   2330     { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
   2331       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
   2332       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2333       .access = PL1_R, .accessfn = gicv3_irq_access,
   2334       .readfn = icc_iar1_read,
   2335     },
   2336     { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH,
   2337       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
   2338       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2339       .access = PL1_W, .accessfn = gicv3_irq_access,
   2340       .writefn = icc_eoir_write,
   2341     },
   2342     { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH,
   2343       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
   2344       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2345       .access = PL1_R, .accessfn = gicv3_irq_access,
   2346       .readfn = icc_hppir1_read,
   2347     },
   2348     /* This register is banked */
   2349     { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH,
   2350       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
   2351       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2352       .access = PL1_RW, .accessfn = gicv3_irq_access,
   2353       .readfn = icc_bpr_read,
   2354       .writefn = icc_bpr_write,
   2355     },
   2356     /* This register is banked */
   2357     { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
   2358       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
   2359       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2360       .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
   2361       .readfn = icc_ctlr_el1_read,
   2362       .writefn = icc_ctlr_el1_write,
   2363     },
   2364     { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH,
   2365       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
   2366       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
   2367       .access = PL1_RW,
   2368       /* We don't support IRQ/FIQ bypass and system registers are
   2369        * always enabled, so all our bits are RAZ/WI or RAO/WI.
   2370        * This register is banked but since it's constant we don't
   2371        * need to do anything special.
   2372        */
   2373       .resetvalue = 0x7,
   2374     },
   2375     { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH,
   2376       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
   2377       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2378       .access = PL1_RW, .accessfn = gicv3_fiq_access,
   2379       .readfn = icc_igrpen_read,
   2380       .writefn = icc_igrpen_write,
   2381     },
   2382     /* This register is banked */
   2383     { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH,
   2384       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
   2385       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2386       .access = PL1_RW, .accessfn = gicv3_irq_access,
   2387       .readfn = icc_igrpen_read,
   2388       .writefn = icc_igrpen_write,
   2389     },
   2390     { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH,
   2391       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5,
   2392       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
   2393       .access = PL2_RW,
   2394       /* We don't support IRQ/FIQ bypass and system registers are
   2395        * always enabled, so all our bits are RAZ/WI or RAO/WI.
   2396        */
   2397       .resetvalue = 0xf,
   2398     },
   2399     { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH,
   2400       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
   2401       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2402       .access = PL3_RW,
   2403       .readfn = icc_ctlr_el3_read,
   2404       .writefn = icc_ctlr_el3_write,
   2405     },
   2406     { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH,
   2407       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5,
   2408       .type = ARM_CP_NO_RAW | ARM_CP_CONST,
   2409       .access = PL3_RW,
   2410       /* We don't support IRQ/FIQ bypass and system registers are
   2411        * always enabled, so all our bits are RAZ/WI or RAO/WI.
   2412        */
   2413       .resetvalue = 0xf,
   2414     },
   2415     { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH,
   2416       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7,
   2417       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2418       .access = PL3_RW,
   2419       .readfn = icc_igrpen1_el3_read,
   2420       .writefn = icc_igrpen1_el3_write,
   2421     },
   2422 };
   2423 
   2424 static const ARMCPRegInfo gicv3_cpuif_icc_apxr1_reginfo[] = {
   2425     { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
   2426       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
   2427       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2428       .access = PL1_RW, .accessfn = gicv3_fiq_access,
   2429       .readfn = icc_ap_read,
   2430       .writefn = icc_ap_write,
   2431     },
   2432     { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
   2433       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
   2434       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2435       .access = PL1_RW, .accessfn = gicv3_irq_access,
   2436       .readfn = icc_ap_read,
   2437       .writefn = icc_ap_write,
   2438     },
   2439 };
   2440 
   2441 static const ARMCPRegInfo gicv3_cpuif_icc_apxr23_reginfo[] = {
   2442     { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
   2443       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
   2444       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2445       .access = PL1_RW, .accessfn = gicv3_fiq_access,
   2446       .readfn = icc_ap_read,
   2447       .writefn = icc_ap_write,
   2448     },
   2449     { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
   2450       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
   2451       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2452       .access = PL1_RW, .accessfn = gicv3_fiq_access,
   2453       .readfn = icc_ap_read,
   2454       .writefn = icc_ap_write,
   2455     },
   2456     { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
   2457       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
   2458       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2459       .access = PL1_RW, .accessfn = gicv3_irq_access,
   2460       .readfn = icc_ap_read,
   2461       .writefn = icc_ap_write,
   2462     },
   2463     { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
   2464       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
   2465       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2466       .access = PL1_RW, .accessfn = gicv3_irq_access,
   2467       .readfn = icc_ap_read,
   2468       .writefn = icc_ap_write,
   2469     },
   2470 };
   2471 
   2472 static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2473 {
   2474     GICv3CPUState *cs = icc_cs_from_env(env);
   2475     int regno = ri->opc2 & 3;
   2476     int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
   2477     uint64_t value;
   2478 
   2479     value = cs->ich_apr[grp][regno];
   2480     trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
   2481     return value;
   2482 }
   2483 
   2484 static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2485                          uint64_t value)
   2486 {
   2487     GICv3CPUState *cs = icc_cs_from_env(env);
   2488     int regno = ri->opc2 & 3;
   2489     int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
   2490 
   2491     trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
   2492 
   2493     cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
   2494     gicv3_cpuif_virt_irq_fiq_update(cs);
   2495 }
   2496 
   2497 static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2498 {
   2499     GICv3CPUState *cs = icc_cs_from_env(env);
   2500     uint64_t value = cs->ich_hcr_el2;
   2501 
   2502     trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
   2503     return value;
   2504 }
   2505 
   2506 static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2507                           uint64_t value)
   2508 {
   2509     GICv3CPUState *cs = icc_cs_from_env(env);
   2510 
   2511     trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
   2512 
   2513     value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
   2514         ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
   2515         ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
   2516         ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
   2517         ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
   2518 
   2519     cs->ich_hcr_el2 = value;
   2520     gicv3_cpuif_virt_update(cs);
   2521 }
   2522 
   2523 static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2524 {
   2525     GICv3CPUState *cs = icc_cs_from_env(env);
   2526     uint64_t value = cs->ich_vmcr_el2;
   2527 
   2528     trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
   2529     return value;
   2530 }
   2531 
   2532 static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2533                          uint64_t value)
   2534 {
   2535     GICv3CPUState *cs = icc_cs_from_env(env);
   2536 
   2537     trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
   2538 
   2539     value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
   2540         ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
   2541         ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
   2542     value |= ICH_VMCR_EL2_VFIQEN;
   2543 
   2544     cs->ich_vmcr_el2 = value;
   2545     /* Enforce "writing BPRs to less than minimum sets them to the minimum"
   2546      * by reading and writing back the fields.
   2547      */
   2548     write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0));
   2549     write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
   2550 
   2551     gicv3_cpuif_virt_update(cs);
   2552 }
   2553 
   2554 static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2555 {
   2556     GICv3CPUState *cs = icc_cs_from_env(env);
   2557     int regno = ri->opc2 | ((ri->crm & 1) << 3);
   2558     uint64_t value;
   2559 
   2560     /* This read function handles all of:
   2561      * 64-bit reads of the whole LR
   2562      * 32-bit reads of the low half of the LR
   2563      * 32-bit reads of the high half of the LR
   2564      */
   2565     if (ri->state == ARM_CP_STATE_AA32) {
   2566         if (ri->crm >= 14) {
   2567             value = extract64(cs->ich_lr_el2[regno], 32, 32);
   2568             trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
   2569         } else {
   2570             value = extract64(cs->ich_lr_el2[regno], 0, 32);
   2571             trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
   2572         }
   2573     } else {
   2574         value = cs->ich_lr_el2[regno];
   2575         trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
   2576     }
   2577 
   2578     return value;
   2579 }
   2580 
   2581 static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
   2582                          uint64_t value)
   2583 {
   2584     GICv3CPUState *cs = icc_cs_from_env(env);
   2585     int regno = ri->opc2 | ((ri->crm & 1) << 3);
   2586 
   2587     /* This write function handles all of:
   2588      * 64-bit writes to the whole LR
   2589      * 32-bit writes to the low half of the LR
   2590      * 32-bit writes to the high half of the LR
   2591      */
   2592     if (ri->state == ARM_CP_STATE_AA32) {
   2593         if (ri->crm >= 14) {
   2594             trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
   2595             value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
   2596         } else {
   2597             trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
   2598             value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
   2599         }
   2600     } else {
   2601         trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
   2602     }
   2603 
   2604     /* Enforce RES0 bits in priority field */
   2605     if (cs->vpribits < 8) {
   2606         value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
   2607                           8 - cs->vpribits, 0);
   2608     }
   2609 
   2610     cs->ich_lr_el2[regno] = value;
   2611     gicv3_cpuif_virt_update(cs);
   2612 }
   2613 
   2614 static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2615 {
   2616     GICv3CPUState *cs = icc_cs_from_env(env);
   2617     uint64_t value;
   2618 
   2619     value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
   2620         | ICH_VTR_EL2_TDS | ICH_VTR_EL2_A3V
   2621         | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
   2622         | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
   2623         | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
   2624 
   2625     if (cs->gic->revision < 4) {
   2626         value |= ICH_VTR_EL2_NV4;
   2627     }
   2628 
   2629     trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
   2630     return value;
   2631 }
   2632 
   2633 static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2634 {
   2635     GICv3CPUState *cs = icc_cs_from_env(env);
   2636     uint64_t value = maintenance_interrupt_state(cs);
   2637 
   2638     trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
   2639     return value;
   2640 }
   2641 
   2642 static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2643 {
   2644     GICv3CPUState *cs = icc_cs_from_env(env);
   2645     uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
   2646 
   2647     trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
   2648     return value;
   2649 }
   2650 
   2651 static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
   2652 {
   2653     GICv3CPUState *cs = icc_cs_from_env(env);
   2654     uint64_t value = 0;
   2655     int i;
   2656 
   2657     for (i = 0; i < cs->num_list_regs; i++) {
   2658         uint64_t lr = cs->ich_lr_el2[i];
   2659 
   2660         if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
   2661             ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) {
   2662             value |= (1 << i);
   2663         }
   2664     }
   2665 
   2666     trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
   2667     return value;
   2668 }
   2669 
   2670 static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
   2671     { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
   2672       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
   2673       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2674       .access = PL2_RW,
   2675       .readfn = ich_ap_read,
   2676       .writefn = ich_ap_write,
   2677     },
   2678     { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
   2679       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
   2680       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2681       .access = PL2_RW,
   2682       .readfn = ich_ap_read,
   2683       .writefn = ich_ap_write,
   2684     },
   2685     { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
   2686       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
   2687       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2688       .access = PL2_RW,
   2689       .readfn = ich_hcr_read,
   2690       .writefn = ich_hcr_write,
   2691     },
   2692     { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
   2693       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
   2694       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2695       .access = PL2_R,
   2696       .readfn = ich_vtr_read,
   2697     },
   2698     { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
   2699       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
   2700       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2701       .access = PL2_R,
   2702       .readfn = ich_misr_read,
   2703     },
   2704     { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
   2705       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
   2706       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2707       .access = PL2_R,
   2708       .readfn = ich_eisr_read,
   2709     },
   2710     { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
   2711       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
   2712       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2713       .access = PL2_R,
   2714       .readfn = ich_elrsr_read,
   2715     },
   2716     { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
   2717       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
   2718       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2719       .access = PL2_RW,
   2720       .readfn = ich_vmcr_read,
   2721       .writefn = ich_vmcr_write,
   2722     },
   2723 };
   2724 
   2725 static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
   2726     { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
   2727       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
   2728       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2729       .access = PL2_RW,
   2730       .readfn = ich_ap_read,
   2731       .writefn = ich_ap_write,
   2732     },
   2733     { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
   2734       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
   2735       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2736       .access = PL2_RW,
   2737       .readfn = ich_ap_read,
   2738       .writefn = ich_ap_write,
   2739     },
   2740 };
   2741 
   2742 static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
   2743     { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
   2744       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
   2745       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2746       .access = PL2_RW,
   2747       .readfn = ich_ap_read,
   2748       .writefn = ich_ap_write,
   2749     },
   2750     { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
   2751       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
   2752       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2753       .access = PL2_RW,
   2754       .readfn = ich_ap_read,
   2755       .writefn = ich_ap_write,
   2756     },
   2757     { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
   2758       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
   2759       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2760       .access = PL2_RW,
   2761       .readfn = ich_ap_read,
   2762       .writefn = ich_ap_write,
   2763     },
   2764     { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
   2765       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
   2766       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2767       .access = PL2_RW,
   2768       .readfn = ich_ap_read,
   2769       .writefn = ich_ap_write,
   2770     },
   2771 };
   2772 
   2773 static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
   2774 {
   2775     GICv3CPUState *cs = opaque;
   2776 
   2777     gicv3_cpuif_update(cs);
   2778     /*
   2779      * Because vLPIs are only pending in NonSecure state,
   2780      * an EL change can change the VIRQ/VFIQ status (but
   2781      * cannot affect the maintenance interrupt state)
   2782      */
   2783     gicv3_cpuif_virt_irq_fiq_update(cs);
   2784 }
   2785 
   2786 void gicv3_init_cpuif(GICv3State *s)
   2787 {
   2788     /* Called from the GICv3 realize function; register our system
   2789      * registers with the CPU
   2790      */
   2791     int i;
   2792 
   2793     for (i = 0; i < s->num_cpu; i++) {
   2794         ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
   2795         GICv3CPUState *cs = &s->cpu[i];
   2796 
   2797         /*
   2798          * If the CPU doesn't define a GICv3 configuration, probably because
   2799          * in real hardware it doesn't have one, then we use default values
   2800          * matching the one used by most Arm CPUs. This applies to:
   2801          *  cpu->gic_num_lrs
   2802          *  cpu->gic_vpribits
   2803          *  cpu->gic_vprebits
   2804          *  cpu->gic_pribits
   2805          */
   2806 
   2807         /* Note that we can't just use the GICv3CPUState as an opaque pointer
   2808          * in define_arm_cp_regs_with_opaque(), because when we're called back
   2809          * it might be with code translated by CPU 0 but run by CPU 1, in
   2810          * which case we'd get the wrong value.
   2811          * So instead we define the regs with no ri->opaque info, and
   2812          * get back to the GICv3CPUState from the CPUARMState.
   2813          */
   2814         define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
   2815 
   2816         /*
   2817          * The CPU implementation specifies the number of supported
   2818          * bits of physical priority. For backwards compatibility
   2819          * of migration, we have a compat property that forces use
   2820          * of 8 priority bits regardless of what the CPU really has.
   2821          */
   2822         if (s->force_8bit_prio) {
   2823             cs->pribits = 8;
   2824         } else {
   2825             cs->pribits = cpu->gic_pribits ?: 5;
   2826         }
   2827 
   2828         /*
   2829          * The GICv3 has separate ID register fields for virtual priority
   2830          * and preemption bit values, but only a single ID register field
   2831          * for the physical priority bits. The preemption bit count is
   2832          * always the same as the priority bit count, except that 8 bits
   2833          * of priority means 7 preemption bits. We precalculate the
   2834          * preemption bits because it simplifies the code and makes the
   2835          * parallels between the virtual and physical bits of the GIC
   2836          * a bit clearer.
   2837          */
   2838         cs->prebits = cs->pribits;
   2839         if (cs->prebits == 8) {
   2840             cs->prebits--;
   2841         }
   2842         /*
   2843          * Check that CPU code defining pribits didn't violate
   2844          * architectural constraints our implementation relies on.
   2845          */
   2846         g_assert(cs->pribits >= 4 && cs->pribits <= 8);
   2847 
   2848         /*
   2849          * gicv3_cpuif_reginfo[] defines ICC_AP*R0_EL1; add definitions
   2850          * for ICC_AP*R{1,2,3}_EL1 if the prebits value requires them.
   2851          */
   2852         if (cs->prebits >= 6) {
   2853             define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr1_reginfo);
   2854         }
   2855         if (cs->prebits == 7) {
   2856             define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr23_reginfo);
   2857         }
   2858 
   2859         if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
   2860             int j;
   2861 
   2862             cs->num_list_regs = cpu->gic_num_lrs ?: 4;
   2863             cs->vpribits = cpu->gic_vpribits ?: 5;
   2864             cs->vprebits = cpu->gic_vprebits ?: 5;
   2865 
   2866             /* Check against architectural constraints: getting these
   2867              * wrong would be a bug in the CPU code defining these,
   2868              * and the implementation relies on them holding.
   2869              */
   2870             g_assert(cs->vprebits <= cs->vpribits);
   2871             g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
   2872             g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
   2873 
   2874             define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
   2875 
   2876             for (j = 0; j < cs->num_list_regs; j++) {
   2877                 /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
   2878                  * are split into two cp15 regs, LR (the low part, with the
   2879                  * same encoding as the AArch64 LR) and LRC (the high part).
   2880                  */
   2881                 ARMCPRegInfo lr_regset[] = {
   2882                     { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
   2883                       .opc0 = 3, .opc1 = 4, .crn = 12,
   2884                       .crm = 12 + (j >> 3), .opc2 = j & 7,
   2885                       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2886                       .access = PL2_RW,
   2887                       .readfn = ich_lr_read,
   2888                       .writefn = ich_lr_write,
   2889                     },
   2890                     { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
   2891                       .cp = 15, .opc1 = 4, .crn = 12,
   2892                       .crm = 14 + (j >> 3), .opc2 = j & 7,
   2893                       .type = ARM_CP_IO | ARM_CP_NO_RAW,
   2894                       .access = PL2_RW,
   2895                       .readfn = ich_lr_read,
   2896                       .writefn = ich_lr_write,
   2897                     },
   2898                 };
   2899                 define_arm_cp_regs(cpu, lr_regset);
   2900             }
   2901             if (cs->vprebits >= 6) {
   2902                 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
   2903             }
   2904             if (cs->vprebits == 7) {
   2905                 define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
   2906             }
   2907         }
   2908         arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
   2909     }
   2910 }