qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

spapr_xive.c (56358B)


      1 /*
      2  * QEMU PowerPC sPAPR XIVE interrupt controller model
      3  *
      4  * Copyright (c) 2017-2018, IBM Corporation.
      5  *
      6  * This code is licensed under the GPL version 2 or later. See the
      7  * COPYING file in the top-level directory.
      8  */
      9 
     10 #include "qemu/osdep.h"
     11 #include "qemu/log.h"
     12 #include "qemu/module.h"
     13 #include "qapi/error.h"
     14 #include "qemu/error-report.h"
     15 #include "target/ppc/cpu.h"
     16 #include "sysemu/cpus.h"
     17 #include "sysemu/reset.h"
     18 #include "migration/vmstate.h"
     19 #include "monitor/monitor.h"
     20 #include "hw/ppc/fdt.h"
     21 #include "hw/ppc/spapr.h"
     22 #include "hw/ppc/spapr_cpu_core.h"
     23 #include "hw/ppc/spapr_xive.h"
     24 #include "hw/ppc/xive.h"
     25 #include "hw/ppc/xive_regs.h"
     26 #include "hw/qdev-properties.h"
     27 #include "trace.h"
     28 
     29 /*
     30  * XIVE Virtualization Controller BAR and Thread Managment BAR that we
     31  * use for the ESB pages and the TIMA pages
     32  */
     33 #define SPAPR_XIVE_VC_BASE   0x0006010000000000ull
     34 #define SPAPR_XIVE_TM_BASE   0x0006030203180000ull
     35 
     36 /*
     37  * The allocation of VP blocks is a complex operation in OPAL and the
     38  * VP identifiers have a relation with the number of HW chips, the
     39  * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE
     40  * controller model does not have the same constraints and can use a
     41  * simple mapping scheme of the CPU vcpu_id
     42  *
     43  * These identifiers are never returned to the OS.
     44  */
     45 
     46 #define SPAPR_XIVE_NVT_BASE 0x400
     47 
     48 /*
     49  * sPAPR NVT and END indexing helpers
     50  */
     51 static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
     52 {
     53     return nvt_idx - SPAPR_XIVE_NVT_BASE;
     54 }
     55 
     56 static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
     57                                   uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
     58 {
     59     assert(cpu);
     60 
     61     if (out_nvt_blk) {
     62         *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
     63     }
     64 
     65     if (out_nvt_blk) {
     66         *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
     67     }
     68 }
     69 
     70 static int spapr_xive_target_to_nvt(uint32_t target,
     71                                     uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
     72 {
     73     PowerPCCPU *cpu = spapr_find_cpu(target);
     74 
     75     if (!cpu) {
     76         return -1;
     77     }
     78 
     79     spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
     80     return 0;
     81 }
     82 
     83 /*
     84  * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8
     85  * priorities per CPU
     86  */
     87 int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
     88                              uint32_t *out_server, uint8_t *out_prio)
     89 {
     90 
     91     assert(end_blk == SPAPR_XIVE_BLOCK_ID);
     92 
     93     if (out_server) {
     94         *out_server = end_idx >> 3;
     95     }
     96 
     97     if (out_prio) {
     98         *out_prio = end_idx & 0x7;
     99     }
    100     return 0;
    101 }
    102 
    103 static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
    104                                   uint8_t *out_end_blk, uint32_t *out_end_idx)
    105 {
    106     assert(cpu);
    107 
    108     if (out_end_blk) {
    109         *out_end_blk = SPAPR_XIVE_BLOCK_ID;
    110     }
    111 
    112     if (out_end_idx) {
    113         *out_end_idx = (cpu->vcpu_id << 3) + prio;
    114     }
    115 }
    116 
    117 static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
    118                                     uint8_t *out_end_blk, uint32_t *out_end_idx)
    119 {
    120     PowerPCCPU *cpu = spapr_find_cpu(target);
    121 
    122     if (!cpu) {
    123         return -1;
    124     }
    125 
    126     spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
    127     return 0;
    128 }
    129 
    130 /*
    131  * On sPAPR machines, use a simplified output for the XIVE END
    132  * structure dumping only the information related to the OS EQ.
    133  */
    134 static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
    135                                           Monitor *mon)
    136 {
    137     uint64_t qaddr_base = xive_end_qaddr(end);
    138     uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
    139     uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
    140     uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
    141     uint32_t qentries = 1 << (qsize + 10);
    142     uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
    143     uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
    144 
    145     monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d",
    146                    spapr_xive_nvt_to_target(0, nvt),
    147                    priority, qindex, qentries, qaddr_base, qgen);
    148 
    149     xive_end_queue_pic_print_info(end, 6, mon);
    150 }
    151 
    152 /*
    153  * kvm_irqchip_in_kernel() will cause the compiler to turn this
    154  * info a nop if CONFIG_KVM isn't defined.
    155  */
    156 #define spapr_xive_in_kernel(xive) \
    157     (kvm_irqchip_in_kernel() && (xive)->fd != -1)
    158 
    159 static void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
    160 {
    161     XiveSource *xsrc = &xive->source;
    162     int i;
    163 
    164     if (spapr_xive_in_kernel(xive)) {
    165         Error *local_err = NULL;
    166 
    167         kvmppc_xive_synchronize_state(xive, &local_err);
    168         if (local_err) {
    169             error_report_err(local_err);
    170             return;
    171         }
    172     }
    173 
    174     monitor_printf(mon, "  LISN         PQ    EISN     CPU/PRIO EQ\n");
    175 
    176     for (i = 0; i < xive->nr_irqs; i++) {
    177         uint8_t pq = xive_source_esb_get(xsrc, i);
    178         XiveEAS *eas = &xive->eat[i];
    179 
    180         if (!xive_eas_is_valid(eas)) {
    181             continue;
    182         }
    183 
    184         monitor_printf(mon, "  %08x %s %c%c%c %s %08x ", i,
    185                        xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
    186                        pq & XIVE_ESB_VAL_P ? 'P' : '-',
    187                        pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
    188                        xive_source_is_asserted(xsrc, i) ? 'A' : ' ',
    189                        xive_eas_is_masked(eas) ? "M" : " ",
    190                        (int) xive_get_field64(EAS_END_DATA, eas->w));
    191 
    192         if (!xive_eas_is_masked(eas)) {
    193             uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
    194             XiveEND *end;
    195 
    196             assert(end_idx < xive->nr_ends);
    197             end = &xive->endt[end_idx];
    198 
    199             if (xive_end_is_valid(end)) {
    200                 spapr_xive_end_pic_print_info(xive, end, mon);
    201             }
    202         }
    203         monitor_printf(mon, "\n");
    204     }
    205 }
    206 
    207 void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
    208 {
    209     memory_region_set_enabled(&xive->source.esb_mmio, enable);
    210     memory_region_set_enabled(&xive->tm_mmio, enable);
    211 
    212     /* Disable the END ESBs until a guest OS makes use of them */
    213     memory_region_set_enabled(&xive->end_source.esb_mmio, false);
    214 }
    215 
    216 static void spapr_xive_tm_write(void *opaque, hwaddr offset,
    217                           uint64_t value, unsigned size)
    218 {
    219     XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
    220 
    221     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
    222 }
    223 
    224 static uint64_t spapr_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
    225 {
    226     XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
    227 
    228     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
    229 }
    230 
    231 const MemoryRegionOps spapr_xive_tm_ops = {
    232     .read = spapr_xive_tm_read,
    233     .write = spapr_xive_tm_write,
    234     .endianness = DEVICE_BIG_ENDIAN,
    235     .valid = {
    236         .min_access_size = 1,
    237         .max_access_size = 8,
    238     },
    239     .impl = {
    240         .min_access_size = 1,
    241         .max_access_size = 8,
    242     },
    243 };
    244 
    245 static void spapr_xive_end_reset(XiveEND *end)
    246 {
    247     memset(end, 0, sizeof(*end));
    248 
    249     /* switch off the escalation and notification ESBs */
    250     end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
    251 }
    252 
    253 static void spapr_xive_reset(void *dev)
    254 {
    255     SpaprXive *xive = SPAPR_XIVE(dev);
    256     int i;
    257 
    258     /*
    259      * The XiveSource has its own reset handler, which mask off all
    260      * IRQs (!P|Q)
    261      */
    262 
    263     /* Mask all valid EASs in the IRQ number space. */
    264     for (i = 0; i < xive->nr_irqs; i++) {
    265         XiveEAS *eas = &xive->eat[i];
    266         if (xive_eas_is_valid(eas)) {
    267             eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
    268         } else {
    269             eas->w = 0;
    270         }
    271     }
    272 
    273     /* Clear all ENDs */
    274     for (i = 0; i < xive->nr_ends; i++) {
    275         spapr_xive_end_reset(&xive->endt[i]);
    276     }
    277 }
    278 
    279 static void spapr_xive_instance_init(Object *obj)
    280 {
    281     SpaprXive *xive = SPAPR_XIVE(obj);
    282 
    283     object_initialize_child(obj, "source", &xive->source, TYPE_XIVE_SOURCE);
    284 
    285     object_initialize_child(obj, "end_source", &xive->end_source,
    286                             TYPE_XIVE_END_SOURCE);
    287 
    288     /* Not connected to the KVM XIVE device */
    289     xive->fd = -1;
    290 }
    291 
    292 static void spapr_xive_realize(DeviceState *dev, Error **errp)
    293 {
    294     SpaprXive *xive = SPAPR_XIVE(dev);
    295     SpaprXiveClass *sxc = SPAPR_XIVE_GET_CLASS(xive);
    296     XiveSource *xsrc = &xive->source;
    297     XiveENDSource *end_xsrc = &xive->end_source;
    298     Error *local_err = NULL;
    299 
    300     /* Set by spapr_irq_init() */
    301     g_assert(xive->nr_irqs);
    302     g_assert(xive->nr_ends);
    303 
    304     sxc->parent_realize(dev, &local_err);
    305     if (local_err) {
    306         error_propagate(errp, local_err);
    307         return;
    308     }
    309 
    310     /*
    311      * Initialize the internal sources, for IPIs and virtual devices.
    312      */
    313     object_property_set_int(OBJECT(xsrc), "nr-irqs", xive->nr_irqs,
    314                             &error_fatal);
    315     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
    316     if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
    317         return;
    318     }
    319     sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
    320 
    321     /*
    322      * Initialize the END ESB source
    323      */
    324     object_property_set_int(OBJECT(end_xsrc), "nr-ends", xive->nr_irqs,
    325                             &error_fatal);
    326     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
    327                              &error_abort);
    328     if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
    329         return;
    330     }
    331     sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
    332 
    333     /* Set the mapping address of the END ESB pages after the source ESBs */
    334     xive->end_base = xive->vc_base + xive_source_esb_len(xsrc);
    335 
    336     /*
    337      * Allocate the routing tables
    338      */
    339     xive->eat = g_new0(XiveEAS, xive->nr_irqs);
    340     xive->endt = g_new0(XiveEND, xive->nr_ends);
    341 
    342     xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
    343                            xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
    344 
    345     qemu_register_reset(spapr_xive_reset, dev);
    346 
    347     /* TIMA initialization */
    348     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops,
    349                           xive, "xive.tima", 4ull << TM_SHIFT);
    350     sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
    351 
    352     /*
    353      * Map all regions. These will be enabled or disabled at reset and
    354      * can also be overridden by KVM memory regions if active
    355      */
    356     sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
    357     sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
    358     sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
    359 }
    360 
    361 static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
    362                               uint32_t eas_idx, XiveEAS *eas)
    363 {
    364     SpaprXive *xive = SPAPR_XIVE(xrtr);
    365 
    366     if (eas_idx >= xive->nr_irqs) {
    367         return -1;
    368     }
    369 
    370     *eas = xive->eat[eas_idx];
    371     return 0;
    372 }
    373 
    374 static int spapr_xive_get_end(XiveRouter *xrtr,
    375                               uint8_t end_blk, uint32_t end_idx, XiveEND *end)
    376 {
    377     SpaprXive *xive = SPAPR_XIVE(xrtr);
    378 
    379     if (end_idx >= xive->nr_ends) {
    380         return -1;
    381     }
    382 
    383     memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
    384     return 0;
    385 }
    386 
    387 static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
    388                                 uint32_t end_idx, XiveEND *end,
    389                                 uint8_t word_number)
    390 {
    391     SpaprXive *xive = SPAPR_XIVE(xrtr);
    392 
    393     if (end_idx >= xive->nr_ends) {
    394         return -1;
    395     }
    396 
    397     memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
    398     return 0;
    399 }
    400 
    401 static int spapr_xive_get_nvt(XiveRouter *xrtr,
    402                               uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
    403 {
    404     uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
    405     PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
    406 
    407     if (!cpu) {
    408         /* TODO: should we assert() if we can find a NVT ? */
    409         return -1;
    410     }
    411 
    412     /*
    413      * sPAPR does not maintain a NVT table. Return that the NVT is
    414      * valid if we have found a matching CPU
    415      */
    416     nvt->w0 = cpu_to_be32(NVT_W0_VALID);
    417     return 0;
    418 }
    419 
    420 static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
    421                                 uint32_t nvt_idx, XiveNVT *nvt,
    422                                 uint8_t word_number)
    423 {
    424     /*
    425      * We don't need to write back to the NVTs because the sPAPR
    426      * machine should never hit a non-scheduled NVT. It should never
    427      * get called.
    428      */
    429     g_assert_not_reached();
    430 }
    431 
    432 static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
    433                                 uint8_t nvt_blk, uint32_t nvt_idx,
    434                                 bool cam_ignore, uint8_t priority,
    435                                 uint32_t logic_serv, XiveTCTXMatch *match)
    436 {
    437     CPUState *cs;
    438     int count = 0;
    439 
    440     CPU_FOREACH(cs) {
    441         PowerPCCPU *cpu = POWERPC_CPU(cs);
    442         XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
    443         int ring;
    444 
    445         /*
    446          * Skip partially initialized vCPUs. This can happen when
    447          * vCPUs are hotplugged.
    448          */
    449         if (!tctx) {
    450             continue;
    451         }
    452 
    453         /*
    454          * Check the thread context CAM lines and record matches.
    455          */
    456         ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx,
    457                                          cam_ignore, logic_serv);
    458         /*
    459          * Save the matching thread interrupt context and follow on to
    460          * check for duplicates which are invalid.
    461          */
    462         if (ring != -1) {
    463             if (match->tctx) {
    464                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
    465                               "context NVT %x/%x\n", nvt_blk, nvt_idx);
    466                 return -1;
    467             }
    468 
    469             match->ring = ring;
    470             match->tctx = tctx;
    471             count++;
    472         }
    473     }
    474 
    475     return count;
    476 }
    477 
    478 static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr)
    479 {
    480     return SPAPR_XIVE_BLOCK_ID;
    481 }
    482 
    483 static int spapr_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
    484                              uint8_t *pq)
    485 {
    486     SpaprXive *xive = SPAPR_XIVE(xrtr);
    487 
    488     assert(SPAPR_XIVE_BLOCK_ID == blk);
    489 
    490     *pq = xive_source_esb_get(&xive->source, idx);
    491     return 0;
    492 }
    493 
    494 static int spapr_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
    495                              uint8_t *pq)
    496 {
    497     SpaprXive *xive = SPAPR_XIVE(xrtr);
    498 
    499     assert(SPAPR_XIVE_BLOCK_ID == blk);
    500 
    501     *pq = xive_source_esb_set(&xive->source, idx, *pq);
    502     return 0;
    503 }
    504 
    505 
    506 static const VMStateDescription vmstate_spapr_xive_end = {
    507     .name = TYPE_SPAPR_XIVE "/end",
    508     .version_id = 1,
    509     .minimum_version_id = 1,
    510     .fields = (VMStateField []) {
    511         VMSTATE_UINT32(w0, XiveEND),
    512         VMSTATE_UINT32(w1, XiveEND),
    513         VMSTATE_UINT32(w2, XiveEND),
    514         VMSTATE_UINT32(w3, XiveEND),
    515         VMSTATE_UINT32(w4, XiveEND),
    516         VMSTATE_UINT32(w5, XiveEND),
    517         VMSTATE_UINT32(w6, XiveEND),
    518         VMSTATE_UINT32(w7, XiveEND),
    519         VMSTATE_END_OF_LIST()
    520     },
    521 };
    522 
    523 static const VMStateDescription vmstate_spapr_xive_eas = {
    524     .name = TYPE_SPAPR_XIVE "/eas",
    525     .version_id = 1,
    526     .minimum_version_id = 1,
    527     .fields = (VMStateField []) {
    528         VMSTATE_UINT64(w, XiveEAS),
    529         VMSTATE_END_OF_LIST()
    530     },
    531 };
    532 
    533 static int vmstate_spapr_xive_pre_save(void *opaque)
    534 {
    535     SpaprXive *xive = SPAPR_XIVE(opaque);
    536 
    537     if (spapr_xive_in_kernel(xive)) {
    538         return kvmppc_xive_pre_save(xive);
    539     }
    540 
    541     return 0;
    542 }
    543 
    544 /*
    545  * Called by the sPAPR IRQ backend 'post_load' method at the machine
    546  * level.
    547  */
    548 static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id)
    549 {
    550     SpaprXive *xive = SPAPR_XIVE(intc);
    551 
    552     if (spapr_xive_in_kernel(xive)) {
    553         return kvmppc_xive_post_load(xive, version_id);
    554     }
    555 
    556     return 0;
    557 }
    558 
    559 static const VMStateDescription vmstate_spapr_xive = {
    560     .name = TYPE_SPAPR_XIVE,
    561     .version_id = 1,
    562     .minimum_version_id = 1,
    563     .pre_save = vmstate_spapr_xive_pre_save,
    564     .post_load = NULL, /* handled at the machine level */
    565     .fields = (VMStateField[]) {
    566         VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
    567         VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
    568                                      vmstate_spapr_xive_eas, XiveEAS),
    569         VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
    570                                              vmstate_spapr_xive_end, XiveEND),
    571         VMSTATE_END_OF_LIST()
    572     },
    573 };
    574 
    575 static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn,
    576                                 bool lsi, Error **errp)
    577 {
    578     SpaprXive *xive = SPAPR_XIVE(intc);
    579     XiveSource *xsrc = &xive->source;
    580 
    581     assert(lisn < xive->nr_irqs);
    582 
    583     trace_spapr_xive_claim_irq(lisn, lsi);
    584 
    585     if (xive_eas_is_valid(&xive->eat[lisn])) {
    586         error_setg(errp, "IRQ %d is not free", lisn);
    587         return -EBUSY;
    588     }
    589 
    590     /*
    591      * Set default values when allocating an IRQ number
    592      */
    593     xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED);
    594     if (lsi) {
    595         xive_source_irq_set_lsi(xsrc, lisn);
    596     }
    597 
    598     if (spapr_xive_in_kernel(xive)) {
    599         return kvmppc_xive_source_reset_one(xsrc, lisn, errp);
    600     }
    601 
    602     return 0;
    603 }
    604 
    605 static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn)
    606 {
    607     SpaprXive *xive = SPAPR_XIVE(intc);
    608     assert(lisn < xive->nr_irqs);
    609 
    610     trace_spapr_xive_free_irq(lisn);
    611 
    612     xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
    613 }
    614 
    615 static Property spapr_xive_properties[] = {
    616     DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
    617     DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
    618     DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
    619     DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
    620     DEFINE_PROP_UINT8("hv-prio", SpaprXive, hv_prio, 7),
    621     DEFINE_PROP_END_OF_LIST(),
    622 };
    623 
    624 static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc,
    625                                       PowerPCCPU *cpu, Error **errp)
    626 {
    627     SpaprXive *xive = SPAPR_XIVE(intc);
    628     Object *obj;
    629     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
    630 
    631     obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(xive), errp);
    632     if (!obj) {
    633         return -1;
    634     }
    635 
    636     spapr_cpu->tctx = XIVE_TCTX(obj);
    637     return 0;
    638 }
    639 
    640 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam)
    641 {
    642     uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam);
    643     memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
    644 }
    645 
    646 static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc,
    647                                      PowerPCCPU *cpu)
    648 {
    649     XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
    650     uint8_t  nvt_blk;
    651     uint32_t nvt_idx;
    652 
    653     xive_tctx_reset(tctx);
    654 
    655     /*
    656      * When a Virtual Processor is scheduled to run on a HW thread,
    657      * the hypervisor pushes its identifier in the OS CAM line.
    658      * Emulate the same behavior under QEMU.
    659      */
    660     spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx);
    661 
    662     xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx));
    663 }
    664 
    665 static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc,
    666                                         PowerPCCPU *cpu)
    667 {
    668     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
    669 
    670     xive_tctx_destroy(spapr_cpu->tctx);
    671     spapr_cpu->tctx = NULL;
    672 }
    673 
    674 static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val)
    675 {
    676     SpaprXive *xive = SPAPR_XIVE(intc);
    677 
    678     trace_spapr_xive_set_irq(irq, val);
    679 
    680     if (spapr_xive_in_kernel(xive)) {
    681         kvmppc_xive_source_set_irq(&xive->source, irq, val);
    682     } else {
    683         xive_source_set_irq(&xive->source, irq, val);
    684     }
    685 }
    686 
    687 static void spapr_xive_print_info(SpaprInterruptController *intc, Monitor *mon)
    688 {
    689     SpaprXive *xive = SPAPR_XIVE(intc);
    690     CPUState *cs;
    691 
    692     CPU_FOREACH(cs) {
    693         PowerPCCPU *cpu = POWERPC_CPU(cs);
    694 
    695         xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
    696     }
    697 
    698     spapr_xive_pic_print_info(xive, mon);
    699 }
    700 
    701 static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers,
    702                           void *fdt, uint32_t phandle)
    703 {
    704     SpaprXive *xive = SPAPR_XIVE(intc);
    705     int node;
    706     uint64_t timas[2 * 2];
    707     /* Interrupt number ranges for the IPIs */
    708     uint32_t lisn_ranges[] = {
    709         cpu_to_be32(SPAPR_IRQ_IPI),
    710         cpu_to_be32(SPAPR_IRQ_IPI + nr_servers),
    711     };
    712     /*
    713      * EQ size - the sizes of pages supported by the system 4K, 64K,
    714      * 2M, 16M. We only advertise 64K for the moment.
    715      */
    716     uint32_t eq_sizes[] = {
    717         cpu_to_be32(16), /* 64K */
    718     };
    719     /*
    720      * QEMU/KVM only needs to define a single range to reserve the
    721      * escalation priority. A priority bitmask would have been more
    722      * appropriate.
    723      */
    724     uint32_t plat_res_int_priorities[] = {
    725         cpu_to_be32(xive->hv_prio),    /* start */
    726         cpu_to_be32(0xff - xive->hv_prio), /* count */
    727     };
    728 
    729     /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */
    730     timas[0] = cpu_to_be64(xive->tm_base +
    731                            XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
    732     timas[1] = cpu_to_be64(1ull << TM_SHIFT);
    733     timas[2] = cpu_to_be64(xive->tm_base +
    734                            XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
    735     timas[3] = cpu_to_be64(1ull << TM_SHIFT);
    736 
    737     _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
    738 
    739     _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
    740     _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
    741 
    742     _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
    743     _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
    744                      sizeof(eq_sizes)));
    745     _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
    746                      sizeof(lisn_ranges)));
    747 
    748     /* For Linux to link the LSIs to the interrupt controller. */
    749     _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
    750     _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
    751 
    752     /* For SLOF */
    753     _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
    754     _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
    755 
    756     /*
    757      * The "ibm,plat-res-int-priorities" property defines the priority
    758      * ranges reserved by the hypervisor
    759      */
    760     _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
    761                      plat_res_int_priorities, sizeof(plat_res_int_priorities)));
    762 }
    763 
    764 static int spapr_xive_activate(SpaprInterruptController *intc,
    765                                uint32_t nr_servers, Error **errp)
    766 {
    767     SpaprXive *xive = SPAPR_XIVE(intc);
    768 
    769     if (kvm_enabled()) {
    770         int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, nr_servers,
    771                                     errp);
    772         if (rc < 0) {
    773             return rc;
    774         }
    775     }
    776 
    777     /* Activate the XIVE MMIOs */
    778     spapr_xive_mmio_set_enabled(xive, true);
    779 
    780     return 0;
    781 }
    782 
    783 static void spapr_xive_deactivate(SpaprInterruptController *intc)
    784 {
    785     SpaprXive *xive = SPAPR_XIVE(intc);
    786 
    787     spapr_xive_mmio_set_enabled(xive, false);
    788 
    789     if (spapr_xive_in_kernel(xive)) {
    790         kvmppc_xive_disconnect(intc);
    791     }
    792 }
    793 
    794 static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr)
    795 {
    796     return spapr_xive_in_kernel(SPAPR_XIVE(xptr));
    797 }
    798 
    799 static void spapr_xive_class_init(ObjectClass *klass, void *data)
    800 {
    801     DeviceClass *dc = DEVICE_CLASS(klass);
    802     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
    803     SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass);
    804     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
    805     SpaprXiveClass *sxc = SPAPR_XIVE_CLASS(klass);
    806 
    807     dc->desc    = "sPAPR XIVE Interrupt Controller";
    808     device_class_set_props(dc, spapr_xive_properties);
    809     device_class_set_parent_realize(dc, spapr_xive_realize,
    810                                     &sxc->parent_realize);
    811     dc->vmsd    = &vmstate_spapr_xive;
    812 
    813     xrc->get_eas = spapr_xive_get_eas;
    814     xrc->get_pq  = spapr_xive_get_pq;
    815     xrc->set_pq  = spapr_xive_set_pq;
    816     xrc->get_end = spapr_xive_get_end;
    817     xrc->write_end = spapr_xive_write_end;
    818     xrc->get_nvt = spapr_xive_get_nvt;
    819     xrc->write_nvt = spapr_xive_write_nvt;
    820     xrc->get_block_id = spapr_xive_get_block_id;
    821 
    822     sicc->activate = spapr_xive_activate;
    823     sicc->deactivate = spapr_xive_deactivate;
    824     sicc->cpu_intc_create = spapr_xive_cpu_intc_create;
    825     sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset;
    826     sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy;
    827     sicc->claim_irq = spapr_xive_claim_irq;
    828     sicc->free_irq = spapr_xive_free_irq;
    829     sicc->set_irq = spapr_xive_set_irq;
    830     sicc->print_info = spapr_xive_print_info;
    831     sicc->dt = spapr_xive_dt;
    832     sicc->post_load = spapr_xive_post_load;
    833 
    834     xpc->match_nvt  = spapr_xive_match_nvt;
    835     xpc->in_kernel  = spapr_xive_in_kernel_xptr;
    836 }
    837 
    838 static const TypeInfo spapr_xive_info = {
    839     .name = TYPE_SPAPR_XIVE,
    840     .parent = TYPE_XIVE_ROUTER,
    841     .instance_init = spapr_xive_instance_init,
    842     .instance_size = sizeof(SpaprXive),
    843     .class_init = spapr_xive_class_init,
    844     .class_size = sizeof(SpaprXiveClass),
    845     .interfaces = (InterfaceInfo[]) {
    846         { TYPE_SPAPR_INTC },
    847         { }
    848     },
    849 };
    850 
    851 static void spapr_xive_register_types(void)
    852 {
    853     type_register_static(&spapr_xive_info);
    854 }
    855 
    856 type_init(spapr_xive_register_types)
    857 
    858 /*
    859  * XIVE hcalls
    860  *
    861  * The terminology used by the XIVE hcalls is the following :
    862  *
    863  *   TARGET vCPU number
    864  *   EQ     Event Queue assigned by OS to receive event data
    865  *   ESB    page for source interrupt management
    866  *   LISN   Logical Interrupt Source Number identifying a source in the
    867  *          machine
    868  *   EISN   Effective Interrupt Source Number used by guest OS to
    869  *          identify source in the guest
    870  *
    871  * The EAS, END, NVT structures are not exposed.
    872  */
    873 
    874 /*
    875  * On POWER9, the KVM XIVE device uses priority 7 for the escalation
    876  * interrupts. So we only allow the guest to use priorities [0..6].
    877  */
    878 static bool spapr_xive_priority_is_reserved(SpaprXive *xive, uint8_t priority)
    879 {
    880     return priority >= xive->hv_prio;
    881 }
    882 
    883 /*
    884  * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical
    885  * real address of the MMIO page through which the Event State Buffer
    886  * entry associated with the value of the "lisn" parameter is managed.
    887  *
    888  * Parameters:
    889  * Input
    890  * - R4: "flags"
    891  *         Bits 0-63 reserved
    892  * - R5: "lisn" is per "interrupts", "interrupt-map", or
    893  *       "ibm,xive-lisn-ranges" properties, or as returned by the
    894  *       ibm,query-interrupt-source-number RTAS call, or as returned
    895  *       by the H_ALLOCATE_VAS_WINDOW hcall
    896  *
    897  * Output
    898  * - R4: "flags"
    899  *         Bits 0-59: Reserved
    900  *         Bit 60: H_INT_ESB must be used for Event State Buffer
    901  *                 management
    902  *         Bit 61: 1 == LSI  0 == MSI
    903  *         Bit 62: the full function page supports trigger
    904  *         Bit 63: Store EOI Supported
    905  * - R5: Logical Real address of full function Event State Buffer
    906  *       management page, -1 if H_INT_ESB hcall flag is set to 1.
    907  * - R6: Logical Real Address of trigger only Event State Buffer
    908  *       management page or -1.
    909  * - R7: Power of 2 page size for the ESB management pages returned in
    910  *       R5 and R6.
    911  */
    912 
    913 #define SPAPR_XIVE_SRC_H_INT_ESB     PPC_BIT(60) /* ESB manage with H_INT_ESB */
    914 #define SPAPR_XIVE_SRC_LSI           PPC_BIT(61) /* Virtual LSI type */
    915 #define SPAPR_XIVE_SRC_TRIGGER       PPC_BIT(62) /* Trigger and management
    916                                                     on same page */
    917 #define SPAPR_XIVE_SRC_STORE_EOI     PPC_BIT(63) /* Store EOI support */
    918 
    919 static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
    920                                           SpaprMachineState *spapr,
    921                                           target_ulong opcode,
    922                                           target_ulong *args)
    923 {
    924     SpaprXive *xive = spapr->xive;
    925     XiveSource *xsrc = &xive->source;
    926     target_ulong flags  = args[0];
    927     target_ulong lisn   = args[1];
    928 
    929     trace_spapr_xive_get_source_info(flags, lisn);
    930 
    931     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
    932         return H_FUNCTION;
    933     }
    934 
    935     if (flags) {
    936         return H_PARAMETER;
    937     }
    938 
    939     if (lisn >= xive->nr_irqs) {
    940         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
    941                       lisn);
    942         return H_P2;
    943     }
    944 
    945     if (!xive_eas_is_valid(&xive->eat[lisn])) {
    946         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
    947                       lisn);
    948         return H_P2;
    949     }
    950 
    951     /*
    952      * All sources are emulated under the main XIVE object and share
    953      * the same characteristics.
    954      */
    955     args[0] = 0;
    956     if (!xive_source_esb_has_2page(xsrc)) {
    957         args[0] |= SPAPR_XIVE_SRC_TRIGGER;
    958     }
    959     if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
    960         args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
    961     }
    962 
    963     /*
    964      * Force the use of the H_INT_ESB hcall in case of an LSI
    965      * interrupt. This is necessary under KVM to re-trigger the
    966      * interrupt if the level is still asserted
    967      */
    968     if (xive_source_irq_is_lsi(xsrc, lisn)) {
    969         args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
    970     }
    971 
    972     if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
    973         args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
    974     } else {
    975         args[1] = -1;
    976     }
    977 
    978     if (xive_source_esb_has_2page(xsrc) &&
    979         !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
    980         args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
    981     } else {
    982         args[2] = -1;
    983     }
    984 
    985     if (xive_source_esb_has_2page(xsrc)) {
    986         args[3] = xsrc->esb_shift - 1;
    987     } else {
    988         args[3] = xsrc->esb_shift;
    989     }
    990 
    991     return H_SUCCESS;
    992 }
    993 
    994 /*
    995  * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical
    996  * Interrupt Source to a target. The Logical Interrupt Source is
    997  * designated with the "lisn" parameter and the target is designated
    998  * with the "target" and "priority" parameters.  Upon return from the
    999  * hcall(), no additional interrupts will be directed to the old EQ.
   1000  *
   1001  * Parameters:
   1002  * Input:
   1003  * - R4: "flags"
   1004  *         Bits 0-61: Reserved
   1005  *         Bit 62: set the "eisn" in the EAS
   1006  *         Bit 63: masks the interrupt source in the hardware interrupt
   1007  *       control structure. An interrupt masked by this mechanism will
   1008  *       be dropped, but it's source state bits will still be
   1009  *       set. There is no race-free way of unmasking and restoring the
   1010  *       source. Thus this should only be used in interrupts that are
   1011  *       also masked at the source, and only in cases where the
   1012  *       interrupt is not meant to be used for a large amount of time
   1013  *       because no valid target exists for it for example
   1014  * - R5: "lisn" is per "interrupts", "interrupt-map", or
   1015  *       "ibm,xive-lisn-ranges" properties, or as returned by the
   1016  *       ibm,query-interrupt-source-number RTAS call, or as returned by
   1017  *       the H_ALLOCATE_VAS_WINDOW hcall
   1018  * - R6: "target" is per "ibm,ppc-interrupt-server#s" or
   1019  *       "ibm,ppc-interrupt-gserver#s"
   1020  * - R7: "priority" is a valid priority not in
   1021  *       "ibm,plat-res-int-priorities"
   1022  * - R8: "eisn" is the guest EISN associated with the "lisn"
   1023  *
   1024  * Output:
   1025  * - None
   1026  */
   1027 
   1028 #define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
   1029 #define SPAPR_XIVE_SRC_MASK     PPC_BIT(63)
   1030 
   1031 static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
   1032                                             SpaprMachineState *spapr,
   1033                                             target_ulong opcode,
   1034                                             target_ulong *args)
   1035 {
   1036     SpaprXive *xive = spapr->xive;
   1037     XiveEAS eas, new_eas;
   1038     target_ulong flags    = args[0];
   1039     target_ulong lisn     = args[1];
   1040     target_ulong target   = args[2];
   1041     target_ulong priority = args[3];
   1042     target_ulong eisn     = args[4];
   1043     uint8_t end_blk;
   1044     uint32_t end_idx;
   1045 
   1046     trace_spapr_xive_set_source_config(flags, lisn, target, priority, eisn);
   1047 
   1048     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
   1049         return H_FUNCTION;
   1050     }
   1051 
   1052     if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
   1053         return H_PARAMETER;
   1054     }
   1055 
   1056     if (lisn >= xive->nr_irqs) {
   1057         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
   1058                       lisn);
   1059         return H_P2;
   1060     }
   1061 
   1062     eas = xive->eat[lisn];
   1063     if (!xive_eas_is_valid(&eas)) {
   1064         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
   1065                       lisn);
   1066         return H_P2;
   1067     }
   1068 
   1069     /* priority 0xff is used to reset the EAS */
   1070     if (priority == 0xff) {
   1071         new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
   1072         goto out;
   1073     }
   1074 
   1075     if (flags & SPAPR_XIVE_SRC_MASK) {
   1076         new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
   1077     } else {
   1078         new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
   1079     }
   1080 
   1081     if (spapr_xive_priority_is_reserved(xive, priority)) {
   1082         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
   1083                       " is reserved\n", priority);
   1084         return H_P4;
   1085     }
   1086 
   1087     /*
   1088      * Validate that "target" is part of the list of threads allocated
   1089      * to the partition. For that, find the END corresponding to the
   1090      * target.
   1091      */
   1092     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
   1093         return H_P3;
   1094     }
   1095 
   1096     new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
   1097     new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
   1098 
   1099     if (flags & SPAPR_XIVE_SRC_SET_EISN) {
   1100         new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
   1101     }
   1102 
   1103     if (spapr_xive_in_kernel(xive)) {
   1104         Error *local_err = NULL;
   1105 
   1106         kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
   1107         if (local_err) {
   1108             error_report_err(local_err);
   1109             return H_HARDWARE;
   1110         }
   1111     }
   1112 
   1113 out:
   1114     xive->eat[lisn] = new_eas;
   1115     return H_SUCCESS;
   1116 }
   1117 
   1118 /*
   1119  * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which
   1120  * target/priority pair is assigned to the specified Logical Interrupt
   1121  * Source.
   1122  *
   1123  * Parameters:
   1124  * Input:
   1125  * - R4: "flags"
   1126  *         Bits 0-63 Reserved
   1127  * - R5: "lisn" is per "interrupts", "interrupt-map", or
   1128  *       "ibm,xive-lisn-ranges" properties, or as returned by the
   1129  *       ibm,query-interrupt-source-number RTAS call, or as
   1130  *       returned by the H_ALLOCATE_VAS_WINDOW hcall
   1131  *
   1132  * Output:
   1133  * - R4: Target to which the specified Logical Interrupt Source is
   1134  *       assigned
   1135  * - R5: Priority to which the specified Logical Interrupt Source is
   1136  *       assigned
   1137  * - R6: EISN for the specified Logical Interrupt Source (this will be
   1138  *       equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG)
   1139  */
   1140 static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
   1141                                             SpaprMachineState *spapr,
   1142                                             target_ulong opcode,
   1143                                             target_ulong *args)
   1144 {
   1145     SpaprXive *xive = spapr->xive;
   1146     target_ulong flags = args[0];
   1147     target_ulong lisn = args[1];
   1148     XiveEAS eas;
   1149     XiveEND *end;
   1150     uint8_t nvt_blk;
   1151     uint32_t end_idx, nvt_idx;
   1152 
   1153     trace_spapr_xive_get_source_config(flags, lisn);
   1154 
   1155     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
   1156         return H_FUNCTION;
   1157     }
   1158 
   1159     if (flags) {
   1160         return H_PARAMETER;
   1161     }
   1162 
   1163     if (lisn >= xive->nr_irqs) {
   1164         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
   1165                       lisn);
   1166         return H_P2;
   1167     }
   1168 
   1169     eas = xive->eat[lisn];
   1170     if (!xive_eas_is_valid(&eas)) {
   1171         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
   1172                       lisn);
   1173         return H_P2;
   1174     }
   1175 
   1176     /* EAS_END_BLOCK is unused on sPAPR */
   1177     end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
   1178 
   1179     assert(end_idx < xive->nr_ends);
   1180     end = &xive->endt[end_idx];
   1181 
   1182     nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
   1183     nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
   1184     args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
   1185 
   1186     if (xive_eas_is_masked(&eas)) {
   1187         args[1] = 0xff;
   1188     } else {
   1189         args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
   1190     }
   1191 
   1192     args[2] = xive_get_field64(EAS_END_DATA, eas.w);
   1193 
   1194     return H_SUCCESS;
   1195 }
   1196 
   1197 /*
   1198  * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real
   1199  * address of the notification management page associated with the
   1200  * specified target and priority.
   1201  *
   1202  * Parameters:
   1203  * Input:
   1204  * - R4: "flags"
   1205  *         Bits 0-63 Reserved
   1206  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
   1207  *       "ibm,ppc-interrupt-gserver#s"
   1208  * - R6: "priority" is a valid priority not in
   1209  *       "ibm,plat-res-int-priorities"
   1210  *
   1211  * Output:
   1212  * - R4: Logical real address of notification page
   1213  * - R5: Power of 2 page size of the notification page
   1214  */
   1215 static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
   1216                                          SpaprMachineState *spapr,
   1217                                          target_ulong opcode,
   1218                                          target_ulong *args)
   1219 {
   1220     SpaprXive *xive = spapr->xive;
   1221     XiveENDSource *end_xsrc = &xive->end_source;
   1222     target_ulong flags = args[0];
   1223     target_ulong target = args[1];
   1224     target_ulong priority = args[2];
   1225     XiveEND *end;
   1226     uint8_t end_blk;
   1227     uint32_t end_idx;
   1228 
   1229     trace_spapr_xive_get_queue_info(flags, target, priority);
   1230 
   1231     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
   1232         return H_FUNCTION;
   1233     }
   1234 
   1235     if (flags) {
   1236         return H_PARAMETER;
   1237     }
   1238 
   1239     /*
   1240      * H_STATE should be returned if a H_INT_RESET is in progress.
   1241      * This is not needed when running the emulation under QEMU
   1242      */
   1243 
   1244     if (spapr_xive_priority_is_reserved(xive, priority)) {
   1245         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
   1246                       " is reserved\n", priority);
   1247         return H_P3;
   1248     }
   1249 
   1250     /*
   1251      * Validate that "target" is part of the list of threads allocated
   1252      * to the partition. For that, find the END corresponding to the
   1253      * target.
   1254      */
   1255     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
   1256         return H_P2;
   1257     }
   1258 
   1259     assert(end_idx < xive->nr_ends);
   1260     end = &xive->endt[end_idx];
   1261 
   1262     args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
   1263     if (xive_end_is_enqueue(end)) {
   1264         args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
   1265     } else {
   1266         args[1] = 0;
   1267     }
   1268 
   1269     return H_SUCCESS;
   1270 }
   1271 
   1272 /*
   1273  * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for
   1274  * a given "target" and "priority".  It is also used to set the
   1275  * notification config associated with the EQ.  An EQ size of 0 is
   1276  * used to reset the EQ config for a given target and priority. If
   1277  * resetting the EQ config, the END associated with the given "target"
   1278  * and "priority" will be changed to disable queueing.
   1279  *
   1280  * Upon return from the hcall(), no additional interrupts will be
   1281  * directed to the old EQ (if one was set). The old EQ (if one was
   1282  * set) should be investigated for interrupts that occurred prior to
   1283  * or during the hcall().
   1284  *
   1285  * Parameters:
   1286  * Input:
   1287  * - R4: "flags"
   1288  *         Bits 0-62: Reserved
   1289  *         Bit 63: Unconditional Notify (n) per the XIVE spec
   1290  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
   1291  *       "ibm,ppc-interrupt-gserver#s"
   1292  * - R6: "priority" is a valid priority not in
   1293  *       "ibm,plat-res-int-priorities"
   1294  * - R7: "eventQueue": The logical real address of the start of the EQ
   1295  * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes"
   1296  *
   1297  * Output:
   1298  * - None
   1299  */
   1300 
   1301 #define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
   1302 
   1303 static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
   1304                                            SpaprMachineState *spapr,
   1305                                            target_ulong opcode,
   1306                                            target_ulong *args)
   1307 {
   1308     SpaprXive *xive = spapr->xive;
   1309     target_ulong flags = args[0];
   1310     target_ulong target = args[1];
   1311     target_ulong priority = args[2];
   1312     target_ulong qpage = args[3];
   1313     target_ulong qsize = args[4];
   1314     XiveEND end;
   1315     uint8_t end_blk, nvt_blk;
   1316     uint32_t end_idx, nvt_idx;
   1317 
   1318     trace_spapr_xive_set_queue_config(flags, target, priority, qpage, qsize);
   1319 
   1320     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
   1321         return H_FUNCTION;
   1322     }
   1323 
   1324     if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
   1325         return H_PARAMETER;
   1326     }
   1327 
   1328     /*
   1329      * H_STATE should be returned if a H_INT_RESET is in progress.
   1330      * This is not needed when running the emulation under QEMU
   1331      */
   1332 
   1333     if (spapr_xive_priority_is_reserved(xive, priority)) {
   1334         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
   1335                       " is reserved\n", priority);
   1336         return H_P3;
   1337     }
   1338 
   1339     /*
   1340      * Validate that "target" is part of the list of threads allocated
   1341      * to the partition. For that, find the END corresponding to the
   1342      * target.
   1343      */
   1344 
   1345     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
   1346         return H_P2;
   1347     }
   1348 
   1349     assert(end_idx < xive->nr_ends);
   1350     memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
   1351 
   1352     switch (qsize) {
   1353     case 12:
   1354     case 16:
   1355     case 21:
   1356     case 24:
   1357         if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) {
   1358             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx
   1359                           " is not naturally aligned with %" HWADDR_PRIx "\n",
   1360                           qpage, (hwaddr)1 << qsize);
   1361             return H_P4;
   1362         }
   1363         end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
   1364         end.w3 = cpu_to_be32(qpage & 0xffffffff);
   1365         end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
   1366         end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
   1367         break;
   1368     case 0:
   1369         /* reset queue and disable queueing */
   1370         spapr_xive_end_reset(&end);
   1371         goto out;
   1372 
   1373     default:
   1374         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
   1375                       qsize);
   1376         return H_P5;
   1377     }
   1378 
   1379     if (qsize) {
   1380         hwaddr plen = 1 << qsize;
   1381         void *eq;
   1382 
   1383         /*
   1384          * Validate the guest EQ. We should also check that the queue
   1385          * has been zeroed by the OS.
   1386          */
   1387         eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
   1388                                MEMTXATTRS_UNSPECIFIED);
   1389         if (plen != 1 << qsize) {
   1390             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
   1391                           HWADDR_PRIx "\n", qpage);
   1392             return H_P4;
   1393         }
   1394         address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
   1395     }
   1396 
   1397     /* "target" should have been validated above */
   1398     if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
   1399         g_assert_not_reached();
   1400     }
   1401 
   1402     /*
   1403      * Ensure the priority and target are correctly set (they will not
   1404      * be right after allocation)
   1405      */
   1406     end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
   1407         xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
   1408     end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
   1409 
   1410     if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
   1411         end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
   1412     } else {
   1413         end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
   1414     }
   1415 
   1416     /*
   1417      * The generation bit for the END starts at 1 and The END page
   1418      * offset counter starts at 0.
   1419      */
   1420     end.w1 = cpu_to_be32(END_W1_GENERATION) |
   1421         xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
   1422     end.w0 |= cpu_to_be32(END_W0_VALID);
   1423 
   1424     /*
   1425      * TODO: issue syncs required to ensure all in-flight interrupts
   1426      * are complete on the old END
   1427      */
   1428 
   1429 out:
   1430     if (spapr_xive_in_kernel(xive)) {
   1431         Error *local_err = NULL;
   1432 
   1433         kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
   1434         if (local_err) {
   1435             error_report_err(local_err);
   1436             return H_HARDWARE;
   1437         }
   1438     }
   1439 
   1440     /* Update END */
   1441     memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
   1442     return H_SUCCESS;
   1443 }
   1444 
   1445 /*
   1446  * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given
   1447  * target and priority.
   1448  *
   1449  * Parameters:
   1450  * Input:
   1451  * - R4: "flags"
   1452  *         Bits 0-62: Reserved
   1453  *         Bit 63: Debug: Return debug data
   1454  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
   1455  *       "ibm,ppc-interrupt-gserver#s"
   1456  * - R6: "priority" is a valid priority not in
   1457  *       "ibm,plat-res-int-priorities"
   1458  *
   1459  * Output:
   1460  * - R4: "flags":
   1461  *       Bits 0-61: Reserved
   1462  *       Bit 62: The value of Event Queue Generation Number (g) per
   1463  *              the XIVE spec if "Debug" = 1
   1464  *       Bit 63: The value of Unconditional Notify (n) per the XIVE spec
   1465  * - R5: The logical real address of the start of the EQ
   1466  * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes"
   1467  * - R7: The value of Event Queue Offset Counter per XIVE spec
   1468  *       if "Debug" = 1, else 0
   1469  *
   1470  */
   1471 
   1472 #define SPAPR_XIVE_END_DEBUG     PPC_BIT(63)
   1473 
   1474 static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
   1475                                            SpaprMachineState *spapr,
   1476                                            target_ulong opcode,
   1477                                            target_ulong *args)
   1478 {
   1479     SpaprXive *xive = spapr->xive;
   1480     target_ulong flags = args[0];
   1481     target_ulong target = args[1];
   1482     target_ulong priority = args[2];
   1483     XiveEND *end;
   1484     uint8_t end_blk;
   1485     uint32_t end_idx;
   1486 
   1487     trace_spapr_xive_get_queue_config(flags, target, priority);
   1488 
   1489     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
   1490         return H_FUNCTION;
   1491     }
   1492 
   1493     if (flags & ~SPAPR_XIVE_END_DEBUG) {
   1494         return H_PARAMETER;
   1495     }
   1496 
   1497     /*
   1498      * H_STATE should be returned if a H_INT_RESET is in progress.
   1499      * This is not needed when running the emulation under QEMU
   1500      */
   1501 
   1502     if (spapr_xive_priority_is_reserved(xive, priority)) {
   1503         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
   1504                       " is reserved\n", priority);
   1505         return H_P3;
   1506     }
   1507 
   1508     /*
   1509      * Validate that "target" is part of the list of threads allocated
   1510      * to the partition. For that, find the END corresponding to the
   1511      * target.
   1512      */
   1513     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
   1514         return H_P2;
   1515     }
   1516 
   1517     assert(end_idx < xive->nr_ends);
   1518     end = &xive->endt[end_idx];
   1519 
   1520     args[0] = 0;
   1521     if (xive_end_is_notify(end)) {
   1522         args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
   1523     }
   1524 
   1525     if (xive_end_is_enqueue(end)) {
   1526         args[1] = xive_end_qaddr(end);
   1527         args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
   1528     } else {
   1529         args[1] = 0;
   1530         args[2] = 0;
   1531     }
   1532 
   1533     if (spapr_xive_in_kernel(xive)) {
   1534         Error *local_err = NULL;
   1535 
   1536         kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
   1537         if (local_err) {
   1538             error_report_err(local_err);
   1539             return H_HARDWARE;
   1540         }
   1541     }
   1542 
   1543     /* TODO: do we need any locking on the END ? */
   1544     if (flags & SPAPR_XIVE_END_DEBUG) {
   1545         /* Load the event queue generation number into the return flags */
   1546         args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
   1547 
   1548         /* Load R7 with the event queue offset counter */
   1549         args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
   1550     } else {
   1551         args[3] = 0;
   1552     }
   1553 
   1554     return H_SUCCESS;
   1555 }
   1556 
   1557 /*
   1558  * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the
   1559  * reporting cache line pair for the calling thread.  The reporting
   1560  * cache lines will contain the OS interrupt context when the OS
   1561  * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS
   1562  * interrupt. The reporting cache lines can be reset by inputting -1
   1563  * in "reportingLine".  Issuing the CI store byte without reporting
   1564  * cache lines registered will result in the data not being accessible
   1565  * to the OS.
   1566  *
   1567  * Parameters:
   1568  * Input:
   1569  * - R4: "flags"
   1570  *         Bits 0-63: Reserved
   1571  * - R5: "reportingLine": The logical real address of the reporting cache
   1572  *       line pair
   1573  *
   1574  * Output:
   1575  * - None
   1576  */
   1577 static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
   1578                                                 SpaprMachineState *spapr,
   1579                                                 target_ulong opcode,
   1580                                                 target_ulong *args)
   1581 {
   1582     target_ulong flags   = args[0];
   1583 
   1584     trace_spapr_xive_set_os_reporting_line(flags);
   1585 
   1586     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
   1587         return H_FUNCTION;
   1588     }
   1589 
   1590     /*
   1591      * H_STATE should be returned if a H_INT_RESET is in progress.
   1592      * This is not needed when running the emulation under QEMU
   1593      */
   1594 
   1595     /* TODO: H_INT_SET_OS_REPORTING_LINE */
   1596     return H_FUNCTION;
   1597 }
   1598 
   1599 /*
   1600  * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical
   1601  * real address of the reporting cache line pair set for the input
   1602  * "target".  If no reporting cache line pair has been set, -1 is
   1603  * returned.
   1604  *
   1605  * Parameters:
   1606  * Input:
   1607  * - R4: "flags"
   1608  *         Bits 0-63: Reserved
   1609  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
   1610  *       "ibm,ppc-interrupt-gserver#s"
   1611  * - R6: "reportingLine": The logical real address of the reporting
   1612  *        cache line pair
   1613  *
   1614  * Output:
   1615  * - R4: The logical real address of the reporting line if set, else -1
   1616  */
   1617 static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
   1618                                                 SpaprMachineState *spapr,
   1619                                                 target_ulong opcode,
   1620                                                 target_ulong *args)
   1621 {
   1622     target_ulong flags   = args[0];
   1623 
   1624     trace_spapr_xive_get_os_reporting_line(flags);
   1625 
   1626     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
   1627         return H_FUNCTION;
   1628     }
   1629 
   1630     /*
   1631      * H_STATE should be returned if a H_INT_RESET is in progress.
   1632      * This is not needed when running the emulation under QEMU
   1633      */
   1634 
   1635     /* TODO: H_INT_GET_OS_REPORTING_LINE */
   1636     return H_FUNCTION;
   1637 }
   1638 
   1639 /*
   1640  * The H_INT_ESB hcall() is used to issue a load or store to the ESB
   1641  * page for the input "lisn".  This hcall is only supported for LISNs
   1642  * that have the ESB hcall flag set to 1 when returned from hcall()
   1643  * H_INT_GET_SOURCE_INFO.
   1644  *
   1645  * Parameters:
   1646  * Input:
   1647  * - R4: "flags"
   1648  *         Bits 0-62: Reserved
   1649  *         bit 63: Store: Store=1, store operation, else load operation
   1650  * - R5: "lisn" is per "interrupts", "interrupt-map", or
   1651  *       "ibm,xive-lisn-ranges" properties, or as returned by the
   1652  *       ibm,query-interrupt-source-number RTAS call, or as
   1653  *       returned by the H_ALLOCATE_VAS_WINDOW hcall
   1654  * - R6: "esbOffset" is the offset into the ESB page for the load or
   1655  *       store operation
   1656  * - R7: "storeData" is the data to write for a store operation
   1657  *
   1658  * Output:
   1659  * - R4: The value of the load if load operation, else -1
   1660  */
   1661 
   1662 #define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
   1663 
   1664 static target_ulong h_int_esb(PowerPCCPU *cpu,
   1665                               SpaprMachineState *spapr,
   1666                               target_ulong opcode,
   1667                               target_ulong *args)
   1668 {
   1669     SpaprXive *xive = spapr->xive;
   1670     XiveEAS eas;
   1671     target_ulong flags  = args[0];
   1672     target_ulong lisn   = args[1];
   1673     target_ulong offset = args[2];
   1674     target_ulong data   = args[3];
   1675     hwaddr mmio_addr;
   1676     XiveSource *xsrc = &xive->source;
   1677 
   1678     trace_spapr_xive_esb(flags, lisn, offset, data);
   1679 
   1680     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
   1681         return H_FUNCTION;
   1682     }
   1683 
   1684     if (flags & ~SPAPR_XIVE_ESB_STORE) {
   1685         return H_PARAMETER;
   1686     }
   1687 
   1688     if (lisn >= xive->nr_irqs) {
   1689         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
   1690                       lisn);
   1691         return H_P2;
   1692     }
   1693 
   1694     eas = xive->eat[lisn];
   1695     if (!xive_eas_is_valid(&eas)) {
   1696         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
   1697                       lisn);
   1698         return H_P2;
   1699     }
   1700 
   1701     if (offset > (1ull << xsrc->esb_shift)) {
   1702         return H_P3;
   1703     }
   1704 
   1705     if (spapr_xive_in_kernel(xive)) {
   1706         args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
   1707                                      flags & SPAPR_XIVE_ESB_STORE);
   1708     } else {
   1709         mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
   1710 
   1711         if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
   1712                           (flags & SPAPR_XIVE_ESB_STORE),
   1713                           MEMTXATTRS_UNSPECIFIED)) {
   1714             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
   1715                           HWADDR_PRIx "\n", mmio_addr);
   1716             return H_HARDWARE;
   1717         }
   1718         args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
   1719     }
   1720     return H_SUCCESS;
   1721 }
   1722 
   1723 /*
   1724  * The H_INT_SYNC hcall() is used to issue hardware syncs that will
   1725  * ensure any in flight events for the input lisn are in the event
   1726  * queue.
   1727  *
   1728  * Parameters:
   1729  * Input:
   1730  * - R4: "flags"
   1731  *         Bits 0-63: Reserved
   1732  * - R5: "lisn" is per "interrupts", "interrupt-map", or
   1733  *       "ibm,xive-lisn-ranges" properties, or as returned by the
   1734  *       ibm,query-interrupt-source-number RTAS call, or as
   1735  *       returned by the H_ALLOCATE_VAS_WINDOW hcall
   1736  *
   1737  * Output:
   1738  * - None
   1739  */
   1740 static target_ulong h_int_sync(PowerPCCPU *cpu,
   1741                                SpaprMachineState *spapr,
   1742                                target_ulong opcode,
   1743                                target_ulong *args)
   1744 {
   1745     SpaprXive *xive = spapr->xive;
   1746     XiveEAS eas;
   1747     target_ulong flags = args[0];
   1748     target_ulong lisn = args[1];
   1749 
   1750     trace_spapr_xive_sync(flags, lisn);
   1751 
   1752     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
   1753         return H_FUNCTION;
   1754     }
   1755 
   1756     if (flags) {
   1757         return H_PARAMETER;
   1758     }
   1759 
   1760     if (lisn >= xive->nr_irqs) {
   1761         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
   1762                       lisn);
   1763         return H_P2;
   1764     }
   1765 
   1766     eas = xive->eat[lisn];
   1767     if (!xive_eas_is_valid(&eas)) {
   1768         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
   1769                       lisn);
   1770         return H_P2;
   1771     }
   1772 
   1773     /*
   1774      * H_STATE should be returned if a H_INT_RESET is in progress.
   1775      * This is not needed when running the emulation under QEMU
   1776      */
   1777 
   1778     /*
   1779      * This is not real hardware. Nothing to be done unless when
   1780      * under KVM
   1781      */
   1782 
   1783     if (spapr_xive_in_kernel(xive)) {
   1784         Error *local_err = NULL;
   1785 
   1786         kvmppc_xive_sync_source(xive, lisn, &local_err);
   1787         if (local_err) {
   1788             error_report_err(local_err);
   1789             return H_HARDWARE;
   1790         }
   1791     }
   1792     return H_SUCCESS;
   1793 }
   1794 
   1795 /*
   1796  * The H_INT_RESET hcall() is used to reset all of the partition's
   1797  * interrupt exploitation structures to their initial state.  This
   1798  * means losing all previously set interrupt state set via
   1799  * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG.
   1800  *
   1801  * Parameters:
   1802  * Input:
   1803  * - R4: "flags"
   1804  *         Bits 0-63: Reserved
   1805  *
   1806  * Output:
   1807  * - None
   1808  */
   1809 static target_ulong h_int_reset(PowerPCCPU *cpu,
   1810                                 SpaprMachineState *spapr,
   1811                                 target_ulong opcode,
   1812                                 target_ulong *args)
   1813 {
   1814     SpaprXive *xive = spapr->xive;
   1815     target_ulong flags   = args[0];
   1816 
   1817     trace_spapr_xive_reset(flags);
   1818 
   1819     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
   1820         return H_FUNCTION;
   1821     }
   1822 
   1823     if (flags) {
   1824         return H_PARAMETER;
   1825     }
   1826 
   1827     device_cold_reset(DEVICE(xive));
   1828 
   1829     if (spapr_xive_in_kernel(xive)) {
   1830         Error *local_err = NULL;
   1831 
   1832         kvmppc_xive_reset(xive, &local_err);
   1833         if (local_err) {
   1834             error_report_err(local_err);
   1835             return H_HARDWARE;
   1836         }
   1837     }
   1838     return H_SUCCESS;
   1839 }
   1840 
   1841 void spapr_xive_hcall_init(SpaprMachineState *spapr)
   1842 {
   1843     spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
   1844     spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
   1845     spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
   1846     spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
   1847     spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
   1848     spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
   1849     spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
   1850                              h_int_set_os_reporting_line);
   1851     spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
   1852                              h_int_get_os_reporting_line);
   1853     spapr_register_hypercall(H_INT_ESB, h_int_esb);
   1854     spapr_register_hypercall(H_INT_SYNC, h_int_sync);
   1855     spapr_register_hypercall(H_INT_RESET, h_int_reset);
   1856 }