qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

spapr_events.c (38959B)


      1 /*
      2  * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
      3  *
      4  * RTAS events handling
      5  *
      6  * Copyright (c) 2012 David Gibson, IBM Corporation.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a copy
      9  * of this software and associated documentation files (the "Software"), to deal
     10  * in the Software without restriction, including without limitation the rights
     11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     12  * copies of the Software, and to permit persons to whom the Software is
     13  * furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     24  * THE SOFTWARE.
     25  *
     26  */
     27 
     28 #include "qemu/osdep.h"
     29 #include "qapi/error.h"
     30 #include "sysemu/device_tree.h"
     31 #include "sysemu/runstate.h"
     32 
     33 #include "hw/ppc/fdt.h"
     34 #include "hw/ppc/spapr.h"
     35 #include "hw/ppc/spapr_vio.h"
     36 #include "hw/pci/pci.h"
     37 #include "hw/irq.h"
     38 #include "hw/pci-host/spapr.h"
     39 #include "hw/ppc/spapr_drc.h"
     40 #include "qemu/help_option.h"
     41 #include "qemu/bcd.h"
     42 #include "qemu/main-loop.h"
     43 #include "hw/ppc/spapr_ovec.h"
     44 #include <libfdt.h>
     45 #include "migration/blocker.h"
     46 
     47 #define RTAS_LOG_VERSION_MASK                   0xff000000
     48 #define   RTAS_LOG_VERSION_6                    0x06000000
     49 #define RTAS_LOG_SEVERITY_MASK                  0x00e00000
     50 #define   RTAS_LOG_SEVERITY_ALREADY_REPORTED    0x00c00000
     51 #define   RTAS_LOG_SEVERITY_FATAL               0x00a00000
     52 #define   RTAS_LOG_SEVERITY_ERROR               0x00800000
     53 #define   RTAS_LOG_SEVERITY_ERROR_SYNC          0x00600000
     54 #define   RTAS_LOG_SEVERITY_WARNING             0x00400000
     55 #define   RTAS_LOG_SEVERITY_EVENT               0x00200000
     56 #define   RTAS_LOG_SEVERITY_NO_ERROR            0x00000000
     57 #define RTAS_LOG_DISPOSITION_MASK               0x00180000
     58 #define   RTAS_LOG_DISPOSITION_FULLY_RECOVERED  0x00000000
     59 #define   RTAS_LOG_DISPOSITION_LIMITED_RECOVERY 0x00080000
     60 #define   RTAS_LOG_DISPOSITION_NOT_RECOVERED    0x00100000
     61 #define RTAS_LOG_OPTIONAL_PART_PRESENT          0x00040000
     62 #define RTAS_LOG_INITIATOR_MASK                 0x0000f000
     63 #define   RTAS_LOG_INITIATOR_UNKNOWN            0x00000000
     64 #define   RTAS_LOG_INITIATOR_CPU                0x00001000
     65 #define   RTAS_LOG_INITIATOR_PCI                0x00002000
     66 #define   RTAS_LOG_INITIATOR_MEMORY             0x00004000
     67 #define   RTAS_LOG_INITIATOR_HOTPLUG            0x00006000
     68 #define RTAS_LOG_TARGET_MASK                    0x00000f00
     69 #define   RTAS_LOG_TARGET_UNKNOWN               0x00000000
     70 #define   RTAS_LOG_TARGET_CPU                   0x00000100
     71 #define   RTAS_LOG_TARGET_PCI                   0x00000200
     72 #define   RTAS_LOG_TARGET_MEMORY                0x00000400
     73 #define   RTAS_LOG_TARGET_HOTPLUG               0x00000600
     74 #define RTAS_LOG_TYPE_MASK                      0x000000ff
     75 #define   RTAS_LOG_TYPE_OTHER                   0x00000000
     76 #define   RTAS_LOG_TYPE_RETRY                   0x00000001
     77 #define   RTAS_LOG_TYPE_TCE_ERR                 0x00000002
     78 #define   RTAS_LOG_TYPE_INTERN_DEV_FAIL         0x00000003
     79 #define   RTAS_LOG_TYPE_TIMEOUT                 0x00000004
     80 #define   RTAS_LOG_TYPE_DATA_PARITY             0x00000005
     81 #define   RTAS_LOG_TYPE_ADDR_PARITY             0x00000006
     82 #define   RTAS_LOG_TYPE_CACHE_PARITY            0x00000007
     83 #define   RTAS_LOG_TYPE_ADDR_INVALID            0x00000008
     84 #define   RTAS_LOG_TYPE_ECC_UNCORR              0x00000009
     85 #define   RTAS_LOG_TYPE_ECC_CORR                0x0000000a
     86 #define   RTAS_LOG_TYPE_EPOW                    0x00000040
     87 #define   RTAS_LOG_TYPE_HOTPLUG                 0x000000e5
     88 
     89 struct rtas_error_log {
     90     uint32_t summary;
     91     uint32_t extended_length;
     92 } QEMU_PACKED;
     93 
     94 struct rtas_event_log_v6 {
     95     uint8_t b0;
     96 #define RTAS_LOG_V6_B0_VALID                          0x80
     97 #define RTAS_LOG_V6_B0_UNRECOVERABLE_ERROR            0x40
     98 #define RTAS_LOG_V6_B0_RECOVERABLE_ERROR              0x20
     99 #define RTAS_LOG_V6_B0_DEGRADED_OPERATION             0x10
    100 #define RTAS_LOG_V6_B0_PREDICTIVE_ERROR               0x08
    101 #define RTAS_LOG_V6_B0_NEW_LOG                        0x04
    102 #define RTAS_LOG_V6_B0_BIGENDIAN                      0x02
    103     uint8_t _resv1;
    104     uint8_t b2;
    105 #define RTAS_LOG_V6_B2_POWERPC_FORMAT                 0x80
    106 #define RTAS_LOG_V6_B2_LOG_FORMAT_MASK                0x0f
    107 #define   RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT    0x0e
    108     uint8_t _resv2[9];
    109     uint32_t company;
    110 #define RTAS_LOG_V6_COMPANY_IBM                 0x49424d00 /* IBM<null> */
    111 } QEMU_PACKED;
    112 
    113 struct rtas_event_log_v6_section_header {
    114     uint16_t section_id;
    115     uint16_t section_length;
    116     uint8_t section_version;
    117     uint8_t section_subtype;
    118     uint16_t creator_component_id;
    119 } QEMU_PACKED;
    120 
    121 struct rtas_event_log_v6_maina {
    122 #define RTAS_LOG_V6_SECTION_ID_MAINA                0x5048 /* PH */
    123     struct rtas_event_log_v6_section_header hdr;
    124     uint32_t creation_date; /* BCD: YYYYMMDD */
    125     uint32_t creation_time; /* BCD: HHMMSS00 */
    126     uint8_t _platform1[8];
    127     char creator_id;
    128     uint8_t _resv1[2];
    129     uint8_t section_count;
    130     uint8_t _resv2[4];
    131     uint8_t _platform2[8];
    132     uint32_t plid;
    133     uint8_t _platform3[4];
    134 } QEMU_PACKED;
    135 
    136 struct rtas_event_log_v6_mainb {
    137 #define RTAS_LOG_V6_SECTION_ID_MAINB                0x5548 /* UH */
    138     struct rtas_event_log_v6_section_header hdr;
    139     uint8_t subsystem_id;
    140     uint8_t _platform1;
    141     uint8_t event_severity;
    142     uint8_t event_subtype;
    143     uint8_t _platform2[4];
    144     uint8_t _resv1[2];
    145     uint16_t action_flags;
    146     uint8_t _resv2[4];
    147 } QEMU_PACKED;
    148 
    149 struct rtas_event_log_v6_epow {
    150 #define RTAS_LOG_V6_SECTION_ID_EPOW                 0x4550 /* EP */
    151     struct rtas_event_log_v6_section_header hdr;
    152     uint8_t sensor_value;
    153 #define RTAS_LOG_V6_EPOW_ACTION_RESET                    0
    154 #define RTAS_LOG_V6_EPOW_ACTION_WARN_COOLING             1
    155 #define RTAS_LOG_V6_EPOW_ACTION_WARN_POWER               2
    156 #define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN          3
    157 #define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_HALT              4
    158 #define RTAS_LOG_V6_EPOW_ACTION_MAIN_ENCLOSURE           5
    159 #define RTAS_LOG_V6_EPOW_ACTION_POWER_OFF                7
    160     uint8_t event_modifier;
    161 #define RTAS_LOG_V6_EPOW_MODIFIER_NORMAL                 1
    162 #define RTAS_LOG_V6_EPOW_MODIFIER_ON_UPS                 2
    163 #define RTAS_LOG_V6_EPOW_MODIFIER_CRITICAL               3
    164 #define RTAS_LOG_V6_EPOW_MODIFIER_TEMPERATURE            4
    165     uint8_t extended_modifier;
    166 #define RTAS_LOG_V6_EPOW_XMODIFIER_SYSTEM_WIDE           0
    167 #define RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC    1
    168     uint8_t _resv;
    169     uint64_t reason_code;
    170 } QEMU_PACKED;
    171 
    172 struct epow_extended_log {
    173     struct rtas_event_log_v6 v6hdr;
    174     struct rtas_event_log_v6_maina maina;
    175     struct rtas_event_log_v6_mainb mainb;
    176     struct rtas_event_log_v6_epow epow;
    177 } QEMU_PACKED;
    178 
    179 union drc_identifier {
    180     uint32_t index;
    181     uint32_t count;
    182     struct {
    183         uint32_t count;
    184         uint32_t index;
    185     } count_indexed;
    186     char name[1];
    187 } QEMU_PACKED;
    188 
    189 struct rtas_event_log_v6_hp {
    190 #define RTAS_LOG_V6_SECTION_ID_HOTPLUG              0x4850 /* HP */
    191     struct rtas_event_log_v6_section_header hdr;
    192     uint8_t hotplug_type;
    193 #define RTAS_LOG_V6_HP_TYPE_CPU                          1
    194 #define RTAS_LOG_V6_HP_TYPE_MEMORY                       2
    195 #define RTAS_LOG_V6_HP_TYPE_SLOT                         3
    196 #define RTAS_LOG_V6_HP_TYPE_PHB                          4
    197 #define RTAS_LOG_V6_HP_TYPE_PCI                          5
    198 #define RTAS_LOG_V6_HP_TYPE_PMEM                         6
    199     uint8_t hotplug_action;
    200 #define RTAS_LOG_V6_HP_ACTION_ADD                        1
    201 #define RTAS_LOG_V6_HP_ACTION_REMOVE                     2
    202     uint8_t hotplug_identifier;
    203 #define RTAS_LOG_V6_HP_ID_DRC_NAME                       1
    204 #define RTAS_LOG_V6_HP_ID_DRC_INDEX                      2
    205 #define RTAS_LOG_V6_HP_ID_DRC_COUNT                      3
    206 #define RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED              4
    207     uint8_t reserved;
    208     union drc_identifier drc_id;
    209 } QEMU_PACKED;
    210 
    211 struct hp_extended_log {
    212     struct rtas_event_log_v6 v6hdr;
    213     struct rtas_event_log_v6_maina maina;
    214     struct rtas_event_log_v6_mainb mainb;
    215     struct rtas_event_log_v6_hp hp;
    216 } QEMU_PACKED;
    217 
    218 struct rtas_event_log_v6_mc {
    219 #define RTAS_LOG_V6_SECTION_ID_MC                   0x4D43 /* MC */
    220     struct rtas_event_log_v6_section_header hdr;
    221     uint32_t fru_id;
    222     uint32_t proc_id;
    223     uint8_t error_type;
    224 #define RTAS_LOG_V6_MC_TYPE_UE                           0
    225 #define RTAS_LOG_V6_MC_TYPE_SLB                          1
    226 #define RTAS_LOG_V6_MC_TYPE_ERAT                         2
    227 #define RTAS_LOG_V6_MC_TYPE_TLB                          4
    228 #define RTAS_LOG_V6_MC_TYPE_D_CACHE                      5
    229 #define RTAS_LOG_V6_MC_TYPE_I_CACHE                      7
    230     uint8_t sub_err_type;
    231 #define RTAS_LOG_V6_MC_UE_INDETERMINATE                  0
    232 #define RTAS_LOG_V6_MC_UE_IFETCH                         1
    233 #define RTAS_LOG_V6_MC_UE_PAGE_TABLE_WALK_IFETCH         2
    234 #define RTAS_LOG_V6_MC_UE_LOAD_STORE                     3
    235 #define RTAS_LOG_V6_MC_UE_PAGE_TABLE_WALK_LOAD_STORE     4
    236 #define RTAS_LOG_V6_MC_SLB_PARITY                        0
    237 #define RTAS_LOG_V6_MC_SLB_MULTIHIT                      1
    238 #define RTAS_LOG_V6_MC_SLB_INDETERMINATE                 2
    239 #define RTAS_LOG_V6_MC_ERAT_PARITY                       1
    240 #define RTAS_LOG_V6_MC_ERAT_MULTIHIT                     2
    241 #define RTAS_LOG_V6_MC_ERAT_INDETERMINATE                3
    242 #define RTAS_LOG_V6_MC_TLB_PARITY                        1
    243 #define RTAS_LOG_V6_MC_TLB_MULTIHIT                      2
    244 #define RTAS_LOG_V6_MC_TLB_INDETERMINATE                 3
    245 /*
    246  * Per PAPR,
    247  * For UE error type, set bit 1 of sub_err_type to indicate effective addr is
    248  * provided. For other error types (SLB/ERAT/TLB), set bit 0 to indicate
    249  * same.
    250  */
    251 #define RTAS_LOG_V6_MC_UE_EA_ADDR_PROVIDED               0x40
    252 #define RTAS_LOG_V6_MC_EA_ADDR_PROVIDED                  0x80
    253     uint8_t reserved_1[6];
    254     uint64_t effective_address;
    255     uint64_t logical_address;
    256 } QEMU_PACKED;
    257 
    258 struct mc_extended_log {
    259     struct rtas_event_log_v6 v6hdr;
    260     struct rtas_event_log_v6_mc mc;
    261 } QEMU_PACKED;
    262 
    263 struct MC_ierror_table {
    264     unsigned long srr1_mask;
    265     unsigned long srr1_value;
    266     bool nip_valid; /* nip is a valid indicator of faulting address */
    267     uint8_t error_type;
    268     uint8_t error_subtype;
    269     unsigned int initiator;
    270     unsigned int severity;
    271 };
    272 
    273 static const struct MC_ierror_table mc_ierror_table[] = {
    274 { 0x00000000081c0000, 0x0000000000040000, true,
    275   RTAS_LOG_V6_MC_TYPE_UE, RTAS_LOG_V6_MC_UE_IFETCH,
    276   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
    277 { 0x00000000081c0000, 0x0000000000080000, true,
    278   RTAS_LOG_V6_MC_TYPE_SLB, RTAS_LOG_V6_MC_SLB_PARITY,
    279   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
    280 { 0x00000000081c0000, 0x00000000000c0000, true,
    281   RTAS_LOG_V6_MC_TYPE_SLB, RTAS_LOG_V6_MC_SLB_MULTIHIT,
    282   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
    283 { 0x00000000081c0000, 0x0000000000100000, true,
    284   RTAS_LOG_V6_MC_TYPE_ERAT, RTAS_LOG_V6_MC_ERAT_MULTIHIT,
    285   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
    286 { 0x00000000081c0000, 0x0000000000140000, true,
    287   RTAS_LOG_V6_MC_TYPE_TLB, RTAS_LOG_V6_MC_TLB_MULTIHIT,
    288   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
    289 { 0x00000000081c0000, 0x0000000000180000, true,
    290   RTAS_LOG_V6_MC_TYPE_UE, RTAS_LOG_V6_MC_UE_PAGE_TABLE_WALK_IFETCH,
    291   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, } };
    292 
    293 struct MC_derror_table {
    294     unsigned long dsisr_value;
    295     bool dar_valid; /* dar is a valid indicator of faulting address */
    296     uint8_t error_type;
    297     uint8_t error_subtype;
    298     unsigned int initiator;
    299     unsigned int severity;
    300 };
    301 
    302 static const struct MC_derror_table mc_derror_table[] = {
    303 { 0x00008000, false,
    304   RTAS_LOG_V6_MC_TYPE_UE, RTAS_LOG_V6_MC_UE_LOAD_STORE,
    305   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
    306 { 0x00004000, true,
    307   RTAS_LOG_V6_MC_TYPE_UE, RTAS_LOG_V6_MC_UE_PAGE_TABLE_WALK_LOAD_STORE,
    308   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
    309 { 0x00000800, true,
    310   RTAS_LOG_V6_MC_TYPE_ERAT, RTAS_LOG_V6_MC_ERAT_MULTIHIT,
    311   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
    312 { 0x00000400, true,
    313   RTAS_LOG_V6_MC_TYPE_TLB, RTAS_LOG_V6_MC_TLB_MULTIHIT,
    314   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
    315 { 0x00000080, true,
    316   RTAS_LOG_V6_MC_TYPE_SLB, RTAS_LOG_V6_MC_SLB_MULTIHIT,  /* Before PARITY */
    317   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, },
    318 { 0x00000100, true,
    319   RTAS_LOG_V6_MC_TYPE_SLB, RTAS_LOG_V6_MC_SLB_PARITY,
    320   RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, } };
    321 
    322 #define SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42))
    323 
    324 typedef enum EventClass {
    325     EVENT_CLASS_INTERNAL_ERRORS     = 0,
    326     EVENT_CLASS_EPOW                = 1,
    327     EVENT_CLASS_RESERVED            = 2,
    328     EVENT_CLASS_HOT_PLUG            = 3,
    329     EVENT_CLASS_IO                  = 4,
    330     EVENT_CLASS_MAX
    331 } EventClassIndex;
    332 #define EVENT_CLASS_MASK(index) (1 << (31 - index))
    333 
    334 static const char * const event_names[EVENT_CLASS_MAX] = {
    335     [EVENT_CLASS_INTERNAL_ERRORS]       = "internal-errors",
    336     [EVENT_CLASS_EPOW]                  = "epow-events",
    337     [EVENT_CLASS_HOT_PLUG]              = "hot-plug-events",
    338     [EVENT_CLASS_IO]                    = "ibm,io-events",
    339 };
    340 
    341 struct SpaprEventSource {
    342     int irq;
    343     uint32_t mask;
    344     bool enabled;
    345 };
    346 
    347 static SpaprEventSource *spapr_event_sources_new(void)
    348 {
    349     return g_new0(SpaprEventSource, EVENT_CLASS_MAX);
    350 }
    351 
    352 static void spapr_event_sources_register(SpaprEventSource *event_sources,
    353                                         EventClassIndex index, int irq)
    354 {
    355     /* we only support 1 irq per event class at the moment */
    356     g_assert(event_sources);
    357     g_assert(!event_sources[index].enabled);
    358     event_sources[index].irq = irq;
    359     event_sources[index].mask = EVENT_CLASS_MASK(index);
    360     event_sources[index].enabled = true;
    361 }
    362 
    363 static const SpaprEventSource *
    364 spapr_event_sources_get_source(SpaprEventSource *event_sources,
    365                                EventClassIndex index)
    366 {
    367     g_assert(index < EVENT_CLASS_MAX);
    368     g_assert(event_sources);
    369 
    370     return &event_sources[index];
    371 }
    372 
    373 void spapr_dt_events(SpaprMachineState *spapr, void *fdt)
    374 {
    375     uint32_t irq_ranges[EVENT_CLASS_MAX * 2];
    376     int i, count = 0, event_sources;
    377     SpaprEventSource *events = spapr->event_sources;
    378 
    379     g_assert(events);
    380 
    381     _FDT(event_sources = fdt_add_subnode(fdt, 0, "event-sources"));
    382 
    383     for (i = 0, count = 0; i < EVENT_CLASS_MAX; i++) {
    384         int node_offset;
    385         uint32_t interrupts[2];
    386         const SpaprEventSource *source =
    387             spapr_event_sources_get_source(events, i);
    388         const char *source_name = event_names[i];
    389 
    390         if (!source->enabled) {
    391             continue;
    392         }
    393 
    394         spapr_dt_irq(interrupts, source->irq, false);
    395 
    396         _FDT(node_offset = fdt_add_subnode(fdt, event_sources, source_name));
    397         _FDT(fdt_setprop(fdt, node_offset, "interrupts", interrupts,
    398                          sizeof(interrupts)));
    399 
    400         irq_ranges[count++] = interrupts[0];
    401         irq_ranges[count++] = cpu_to_be32(1);
    402     }
    403 
    404     _FDT((fdt_setprop(fdt, event_sources, "interrupt-controller", NULL, 0)));
    405     _FDT((fdt_setprop_cell(fdt, event_sources, "#interrupt-cells", 2)));
    406     _FDT((fdt_setprop(fdt, event_sources, "interrupt-ranges",
    407                       irq_ranges, count * sizeof(uint32_t))));
    408 }
    409 
    410 static const SpaprEventSource *
    411 rtas_event_log_to_source(SpaprMachineState *spapr, int log_type)
    412 {
    413     const SpaprEventSource *source;
    414 
    415     g_assert(spapr->event_sources);
    416 
    417     switch (log_type) {
    418     case RTAS_LOG_TYPE_HOTPLUG:
    419         source = spapr_event_sources_get_source(spapr->event_sources,
    420                                                 EVENT_CLASS_HOT_PLUG);
    421         if (spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT)) {
    422             g_assert(source->enabled);
    423             break;
    424         }
    425         /* fall through back to epow for legacy hotplug interrupt source */
    426     case RTAS_LOG_TYPE_EPOW:
    427         source = spapr_event_sources_get_source(spapr->event_sources,
    428                                                 EVENT_CLASS_EPOW);
    429         break;
    430     default:
    431         source = NULL;
    432     }
    433 
    434     return source;
    435 }
    436 
    437 static int rtas_event_log_to_irq(SpaprMachineState *spapr, int log_type)
    438 {
    439     const SpaprEventSource *source;
    440 
    441     source = rtas_event_log_to_source(spapr, log_type);
    442     g_assert(source);
    443     g_assert(source->enabled);
    444 
    445     return source->irq;
    446 }
    447 
    448 static uint32_t spapr_event_log_entry_type(SpaprEventLogEntry *entry)
    449 {
    450     return entry->summary & RTAS_LOG_TYPE_MASK;
    451 }
    452 
    453 static void rtas_event_log_queue(SpaprMachineState *spapr,
    454                                  SpaprEventLogEntry *entry)
    455 {
    456     QTAILQ_INSERT_TAIL(&spapr->pending_events, entry, next);
    457 }
    458 
    459 static SpaprEventLogEntry *rtas_event_log_dequeue(SpaprMachineState *spapr,
    460                                                   uint32_t event_mask)
    461 {
    462     SpaprEventLogEntry *entry = NULL;
    463 
    464     QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
    465         const SpaprEventSource *source =
    466             rtas_event_log_to_source(spapr,
    467                                      spapr_event_log_entry_type(entry));
    468 
    469         g_assert(source);
    470         if (source->mask & event_mask) {
    471             break;
    472         }
    473     }
    474 
    475     if (entry) {
    476         QTAILQ_REMOVE(&spapr->pending_events, entry, next);
    477     }
    478 
    479     return entry;
    480 }
    481 
    482 static bool rtas_event_log_contains(SpaprMachineState *spapr, uint32_t event_mask)
    483 {
    484     SpaprEventLogEntry *entry = NULL;
    485 
    486     QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
    487         const SpaprEventSource *source =
    488             rtas_event_log_to_source(spapr,
    489                                      spapr_event_log_entry_type(entry));
    490 
    491         if (source->mask & event_mask) {
    492             return true;
    493         }
    494     }
    495 
    496     return false;
    497 }
    498 
    499 static uint32_t next_plid;
    500 
    501 static void spapr_init_v6hdr(struct rtas_event_log_v6 *v6hdr)
    502 {
    503     v6hdr->b0 = RTAS_LOG_V6_B0_VALID | RTAS_LOG_V6_B0_NEW_LOG
    504         | RTAS_LOG_V6_B0_BIGENDIAN;
    505     v6hdr->b2 = RTAS_LOG_V6_B2_POWERPC_FORMAT
    506         | RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT;
    507     v6hdr->company = cpu_to_be32(RTAS_LOG_V6_COMPANY_IBM);
    508 }
    509 
    510 static void spapr_init_maina(SpaprMachineState *spapr,
    511                              struct rtas_event_log_v6_maina *maina,
    512                              int section_count)
    513 {
    514     struct tm tm;
    515     int year;
    516 
    517     maina->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINA);
    518     maina->hdr.section_length = cpu_to_be16(sizeof(*maina));
    519     /* FIXME: section version, subtype and creator id? */
    520     spapr_rtc_read(&spapr->rtc, &tm, NULL);
    521     year = tm.tm_year + 1900;
    522     maina->creation_date = cpu_to_be32((to_bcd(year / 100) << 24)
    523                                        | (to_bcd(year % 100) << 16)
    524                                        | (to_bcd(tm.tm_mon + 1) << 8)
    525                                        | to_bcd(tm.tm_mday));
    526     maina->creation_time = cpu_to_be32((to_bcd(tm.tm_hour) << 24)
    527                                        | (to_bcd(tm.tm_min) << 16)
    528                                        | (to_bcd(tm.tm_sec) << 8));
    529     maina->creator_id = 'H'; /* Hypervisor */
    530     maina->section_count = section_count;
    531     maina->plid = next_plid++;
    532 }
    533 
    534 static void spapr_powerdown_req(Notifier *n, void *opaque)
    535 {
    536     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
    537     SpaprEventLogEntry *entry;
    538     struct rtas_event_log_v6 *v6hdr;
    539     struct rtas_event_log_v6_maina *maina;
    540     struct rtas_event_log_v6_mainb *mainb;
    541     struct rtas_event_log_v6_epow *epow;
    542     struct epow_extended_log *new_epow;
    543 
    544     entry = g_new(SpaprEventLogEntry, 1);
    545     new_epow = g_malloc0(sizeof(*new_epow));
    546     entry->extended_log = new_epow;
    547 
    548     v6hdr = &new_epow->v6hdr;
    549     maina = &new_epow->maina;
    550     mainb = &new_epow->mainb;
    551     epow = &new_epow->epow;
    552 
    553     entry->summary = RTAS_LOG_VERSION_6
    554                        | RTAS_LOG_SEVERITY_EVENT
    555                        | RTAS_LOG_DISPOSITION_NOT_RECOVERED
    556                        | RTAS_LOG_OPTIONAL_PART_PRESENT
    557                        | RTAS_LOG_TYPE_EPOW;
    558     entry->extended_length = sizeof(*new_epow);
    559 
    560     spapr_init_v6hdr(v6hdr);
    561     spapr_init_maina(spapr, maina, 3 /* Main-A, Main-B and EPOW */);
    562 
    563     mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
    564     mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
    565     /* FIXME: section version, subtype and creator id? */
    566     mainb->subsystem_id = 0xa0; /* External environment */
    567     mainb->event_severity = 0x00; /* Informational / non-error */
    568     mainb->event_subtype = 0xd0; /* Normal shutdown */
    569 
    570     epow->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_EPOW);
    571     epow->hdr.section_length = cpu_to_be16(sizeof(*epow));
    572     epow->hdr.section_version = 2; /* includes extended modifier */
    573     /* FIXME: section subtype and creator id? */
    574     epow->sensor_value = RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN;
    575     epow->event_modifier = RTAS_LOG_V6_EPOW_MODIFIER_NORMAL;
    576     epow->extended_modifier = RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC;
    577 
    578     rtas_event_log_queue(spapr, entry);
    579 
    580     qemu_irq_pulse(spapr_qirq(spapr,
    581                    rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_EPOW)));
    582 }
    583 
    584 static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
    585                                     SpaprDrcType drc_type,
    586                                     union drc_identifier *drc_id)
    587 {
    588     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
    589     SpaprEventLogEntry *entry;
    590     struct hp_extended_log *new_hp;
    591     struct rtas_event_log_v6 *v6hdr;
    592     struct rtas_event_log_v6_maina *maina;
    593     struct rtas_event_log_v6_mainb *mainb;
    594     struct rtas_event_log_v6_hp *hp;
    595 
    596     entry = g_new(SpaprEventLogEntry, 1);
    597     new_hp = g_new0(struct hp_extended_log, 1);
    598     entry->extended_log = new_hp;
    599 
    600     v6hdr = &new_hp->v6hdr;
    601     maina = &new_hp->maina;
    602     mainb = &new_hp->mainb;
    603     hp = &new_hp->hp;
    604 
    605     entry->summary = RTAS_LOG_VERSION_6
    606         | RTAS_LOG_SEVERITY_EVENT
    607         | RTAS_LOG_DISPOSITION_NOT_RECOVERED
    608         | RTAS_LOG_OPTIONAL_PART_PRESENT
    609         | RTAS_LOG_INITIATOR_HOTPLUG
    610         | RTAS_LOG_TYPE_HOTPLUG;
    611     entry->extended_length = sizeof(*new_hp);
    612 
    613     spapr_init_v6hdr(v6hdr);
    614     spapr_init_maina(spapr, maina, 3 /* Main-A, Main-B, HP */);
    615 
    616     mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
    617     mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
    618     mainb->subsystem_id = 0x80; /* External environment */
    619     mainb->event_severity = 0x00; /* Informational / non-error */
    620     mainb->event_subtype = 0x00; /* Normal shutdown */
    621 
    622     hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG);
    623     hp->hdr.section_length = cpu_to_be16(sizeof(*hp));
    624     hp->hdr.section_version = 1; /* includes extended modifier */
    625     hp->hotplug_action = hp_action;
    626     hp->hotplug_identifier = hp_id;
    627 
    628     switch (drc_type) {
    629     case SPAPR_DR_CONNECTOR_TYPE_PCI:
    630         hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI;
    631         break;
    632     case SPAPR_DR_CONNECTOR_TYPE_LMB:
    633         hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY;
    634         break;
    635     case SPAPR_DR_CONNECTOR_TYPE_CPU:
    636         hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_CPU;
    637         break;
    638     case SPAPR_DR_CONNECTOR_TYPE_PHB:
    639         hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PHB;
    640         break;
    641     case SPAPR_DR_CONNECTOR_TYPE_PMEM:
    642         hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PMEM;
    643         break;
    644     default:
    645         /* we shouldn't be signaling hotplug events for resources
    646          * that don't support them
    647          */
    648         g_assert(false);
    649         return;
    650     }
    651 
    652     if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) {
    653         hp->drc_id.count = cpu_to_be32(drc_id->count);
    654     } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_INDEX) {
    655         hp->drc_id.index = cpu_to_be32(drc_id->index);
    656     } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED) {
    657         /* we should not be using count_indexed value unless the guest
    658          * supports dedicated hotplug event source
    659          */
    660         g_assert(spapr_memory_hot_unplug_supported(spapr));
    661         hp->drc_id.count_indexed.count =
    662             cpu_to_be32(drc_id->count_indexed.count);
    663         hp->drc_id.count_indexed.index =
    664             cpu_to_be32(drc_id->count_indexed.index);
    665     }
    666 
    667     rtas_event_log_queue(spapr, entry);
    668 
    669     qemu_irq_pulse(spapr_qirq(spapr,
    670                    rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_HOTPLUG)));
    671 }
    672 
    673 void spapr_hotplug_req_add_by_index(SpaprDrc *drc)
    674 {
    675     SpaprDrcType drc_type = spapr_drc_type(drc);
    676     union drc_identifier drc_id;
    677 
    678     drc_id.index = spapr_drc_index(drc);
    679     spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
    680                             RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
    681 }
    682 
    683 void spapr_hotplug_req_remove_by_index(SpaprDrc *drc)
    684 {
    685     SpaprDrcType drc_type = spapr_drc_type(drc);
    686     union drc_identifier drc_id;
    687 
    688     drc_id.index = spapr_drc_index(drc);
    689     spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
    690                             RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
    691 }
    692 
    693 void spapr_hotplug_req_add_by_count(SpaprDrcType drc_type,
    694                                        uint32_t count)
    695 {
    696     union drc_identifier drc_id;
    697 
    698     drc_id.count = count;
    699     spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
    700                             RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
    701 }
    702 
    703 void spapr_hotplug_req_remove_by_count(SpaprDrcType drc_type,
    704                                           uint32_t count)
    705 {
    706     union drc_identifier drc_id;
    707 
    708     drc_id.count = count;
    709     spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
    710                             RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
    711 }
    712 
    713 void spapr_hotplug_req_add_by_count_indexed(SpaprDrcType drc_type,
    714                                             uint32_t count, uint32_t index)
    715 {
    716     union drc_identifier drc_id;
    717 
    718     drc_id.count_indexed.count = count;
    719     drc_id.count_indexed.index = index;
    720     spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED,
    721                             RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
    722 }
    723 
    724 void spapr_hotplug_req_remove_by_count_indexed(SpaprDrcType drc_type,
    725                                                uint32_t count, uint32_t index)
    726 {
    727     union drc_identifier drc_id;
    728 
    729     drc_id.count_indexed.count = count;
    730     drc_id.count_indexed.index = index;
    731     spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED,
    732                             RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
    733 }
    734 
    735 static void spapr_mc_set_ea_provided_flag(struct mc_extended_log *ext_elog)
    736 {
    737     switch (ext_elog->mc.error_type) {
    738     case RTAS_LOG_V6_MC_TYPE_UE:
    739         ext_elog->mc.sub_err_type |= RTAS_LOG_V6_MC_UE_EA_ADDR_PROVIDED;
    740         break;
    741     case RTAS_LOG_V6_MC_TYPE_SLB:
    742     case RTAS_LOG_V6_MC_TYPE_ERAT:
    743     case RTAS_LOG_V6_MC_TYPE_TLB:
    744         ext_elog->mc.sub_err_type |= RTAS_LOG_V6_MC_EA_ADDR_PROVIDED;
    745         break;
    746     default:
    747         break;
    748     }
    749 }
    750 
    751 static uint32_t spapr_mce_get_elog_type(PowerPCCPU *cpu, bool recovered,
    752                                         struct mc_extended_log *ext_elog)
    753 {
    754     int i;
    755     CPUPPCState *env = &cpu->env;
    756     uint32_t summary;
    757     uint64_t dsisr = env->spr[SPR_DSISR];
    758 
    759     summary = RTAS_LOG_VERSION_6 | RTAS_LOG_OPTIONAL_PART_PRESENT;
    760     if (recovered) {
    761         summary |= RTAS_LOG_DISPOSITION_FULLY_RECOVERED;
    762     } else {
    763         summary |= RTAS_LOG_DISPOSITION_NOT_RECOVERED;
    764     }
    765 
    766     if (SRR1_MC_LOADSTORE(env->spr[SPR_SRR1])) {
    767         for (i = 0; i < ARRAY_SIZE(mc_derror_table); i++) {
    768             if (!(dsisr & mc_derror_table[i].dsisr_value)) {
    769                 continue;
    770             }
    771 
    772             ext_elog->mc.error_type = mc_derror_table[i].error_type;
    773             ext_elog->mc.sub_err_type = mc_derror_table[i].error_subtype;
    774             if (mc_derror_table[i].dar_valid) {
    775                 ext_elog->mc.effective_address = cpu_to_be64(env->spr[SPR_DAR]);
    776                 spapr_mc_set_ea_provided_flag(ext_elog);
    777             }
    778 
    779             summary |= mc_derror_table[i].initiator
    780                         | mc_derror_table[i].severity;
    781 
    782             return summary;
    783         }
    784     } else {
    785         for (i = 0; i < ARRAY_SIZE(mc_ierror_table); i++) {
    786             if ((env->spr[SPR_SRR1] & mc_ierror_table[i].srr1_mask) !=
    787                     mc_ierror_table[i].srr1_value) {
    788                 continue;
    789             }
    790 
    791             ext_elog->mc.error_type = mc_ierror_table[i].error_type;
    792             ext_elog->mc.sub_err_type = mc_ierror_table[i].error_subtype;
    793             if (mc_ierror_table[i].nip_valid) {
    794                 ext_elog->mc.effective_address = cpu_to_be64(env->nip);
    795                 spapr_mc_set_ea_provided_flag(ext_elog);
    796             }
    797 
    798             summary |= mc_ierror_table[i].initiator
    799                         | mc_ierror_table[i].severity;
    800 
    801             return summary;
    802         }
    803     }
    804 
    805     summary |= RTAS_LOG_INITIATOR_CPU;
    806     return summary;
    807 }
    808 
    809 static void spapr_mce_dispatch_elog(SpaprMachineState *spapr, PowerPCCPU *cpu,
    810                                     bool recovered)
    811 {
    812     CPUState *cs = CPU(cpu);
    813     CPUPPCState *env = &cpu->env;
    814     uint64_t rtas_addr;
    815     struct rtas_error_log log;
    816     struct mc_extended_log *ext_elog;
    817     uint32_t summary;
    818 
    819     ext_elog = g_malloc0(sizeof(*ext_elog));
    820     summary = spapr_mce_get_elog_type(cpu, recovered, ext_elog);
    821 
    822     log.summary = cpu_to_be32(summary);
    823     log.extended_length = cpu_to_be32(sizeof(*ext_elog));
    824 
    825     spapr_init_v6hdr(&ext_elog->v6hdr);
    826     ext_elog->mc.hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MC);
    827     ext_elog->mc.hdr.section_length =
    828                     cpu_to_be16(sizeof(struct rtas_event_log_v6_mc));
    829     ext_elog->mc.hdr.section_version = 1;
    830 
    831     /* get rtas addr from fdt */
    832     rtas_addr = spapr_get_rtas_addr();
    833     if (!rtas_addr) {
    834         if (!recovered) {
    835             error_report(
    836 "FWNMI: Unable to deliver machine check to guest: rtas_addr not found.");
    837             qemu_system_guest_panicked(NULL);
    838         } else {
    839             warn_report(
    840 "FWNMI: Unable to deliver machine check to guest: rtas_addr not found. "
    841 "Machine check recovered.");
    842         }
    843         g_free(ext_elog);
    844         return;
    845     }
    846 
    847     /*
    848      * By taking the interlock, we assume that the MCE will be
    849      * delivered to the guest. CAUTION: don't add anything that could
    850      * prevent the MCE to be delivered after this line, otherwise the
    851      * guest won't be able to release the interlock and ultimately
    852      * hang/crash?
    853      */
    854     spapr->fwnmi_machine_check_interlock = cpu->vcpu_id;
    855 
    856     stq_be_phys(&address_space_memory, rtas_addr + RTAS_ERROR_LOG_OFFSET,
    857                 env->gpr[3]);
    858     cpu_physical_memory_write(rtas_addr + RTAS_ERROR_LOG_OFFSET +
    859                               sizeof(env->gpr[3]), &log, sizeof(log));
    860     cpu_physical_memory_write(rtas_addr + RTAS_ERROR_LOG_OFFSET +
    861                               sizeof(env->gpr[3]) + sizeof(log), ext_elog,
    862                               sizeof(*ext_elog));
    863     g_free(ext_elog);
    864 
    865     env->gpr[3] = rtas_addr + RTAS_ERROR_LOG_OFFSET;
    866 
    867     ppc_cpu_do_fwnmi_machine_check(cs, spapr->fwnmi_machine_check_addr);
    868 }
    869 
    870 void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered)
    871 {
    872     SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
    873     CPUState *cs = CPU(cpu);
    874     int ret;
    875 
    876     if (spapr->fwnmi_machine_check_addr == -1) {
    877         /* Non-FWNMI case, deliver it like an architected CPU interrupt. */
    878         cs->exception_index = POWERPC_EXCP_MCHECK;
    879         ppc_cpu_do_interrupt(cs);
    880         return;
    881     }
    882 
    883     /* Wait for FWNMI interlock. */
    884     while (spapr->fwnmi_machine_check_interlock != -1) {
    885         /*
    886          * Check whether the same CPU got machine check error
    887          * while still handling the mc error (i.e., before
    888          * that CPU called "ibm,nmi-interlock")
    889          */
    890         if (spapr->fwnmi_machine_check_interlock == cpu->vcpu_id) {
    891             if (!recovered) {
    892                 error_report(
    893 "FWNMI: Unable to deliver machine check to guest: nested machine check.");
    894                 qemu_system_guest_panicked(NULL);
    895             } else {
    896                 warn_report(
    897 "FWNMI: Unable to deliver machine check to guest: nested machine check. "
    898 "Machine check recovered.");
    899             }
    900             return;
    901         }
    902         qemu_cond_wait_iothread(&spapr->fwnmi_machine_check_interlock_cond);
    903         if (spapr->fwnmi_machine_check_addr == -1) {
    904             /*
    905              * If the machine was reset while waiting for the interlock,
    906              * abort the delivery. The machine check applies to a context
    907              * that no longer exists, so it wouldn't make sense to deliver
    908              * it now.
    909              */
    910             return;
    911         }
    912     }
    913 
    914     /*
    915      * Try to block migration while FWNMI is being handled, so the
    916      * machine check handler runs where the information passed to it
    917      * actually makes sense.  This shouldn't actually block migration,
    918      * only delay it slightly, assuming migration is retried.  If the
    919      * attempt to block fails, carry on.  Unfortunately, it always
    920      * fails when running with -only-migrate.  A proper interface to
    921      * delay migration completion for a bit could avoid that.
    922      */
    923     ret = migrate_add_blocker(spapr->fwnmi_migration_blocker, NULL);
    924     if (ret == -EBUSY) {
    925         warn_report("Received a fwnmi while migration was in progress");
    926     }
    927 
    928     spapr_mce_dispatch_elog(spapr, cpu, recovered);
    929 }
    930 
    931 static void check_exception(PowerPCCPU *cpu, SpaprMachineState *spapr,
    932                             uint32_t token, uint32_t nargs,
    933                             target_ulong args,
    934                             uint32_t nret, target_ulong rets)
    935 {
    936     uint32_t mask, buf, len, event_len;
    937     SpaprEventLogEntry *event;
    938     struct rtas_error_log header;
    939     int i;
    940 
    941     if ((nargs < 6) || (nargs > 7) || nret != 1) {
    942         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
    943         return;
    944     }
    945 
    946     mask = rtas_ld(args, 2);
    947     buf = rtas_ld(args, 4);
    948     len = rtas_ld(args, 5);
    949 
    950     event = rtas_event_log_dequeue(spapr, mask);
    951     if (!event) {
    952         goto out_no_events;
    953     }
    954 
    955     event_len = event->extended_length + sizeof(header);
    956 
    957     if (event_len < len) {
    958         len = event_len;
    959     }
    960 
    961     header.summary = cpu_to_be32(event->summary);
    962     header.extended_length = cpu_to_be32(event->extended_length);
    963     cpu_physical_memory_write(buf, &header, sizeof(header));
    964     cpu_physical_memory_write(buf + sizeof(header), event->extended_log,
    965                               event->extended_length);
    966     rtas_st(rets, 0, RTAS_OUT_SUCCESS);
    967     g_free(event->extended_log);
    968     g_free(event);
    969 
    970     /* according to PAPR+, the IRQ must be left asserted, or re-asserted, if
    971      * there are still pending events to be fetched via check-exception. We
    972      * do the latter here, since our code relies on edge-triggered
    973      * interrupts.
    974      */
    975     for (i = 0; i < EVENT_CLASS_MAX; i++) {
    976         if (rtas_event_log_contains(spapr, EVENT_CLASS_MASK(i))) {
    977             const SpaprEventSource *source =
    978                 spapr_event_sources_get_source(spapr->event_sources, i);
    979 
    980             g_assert(source->enabled);
    981             qemu_irq_pulse(spapr_qirq(spapr, source->irq));
    982         }
    983     }
    984 
    985     return;
    986 
    987 out_no_events:
    988     rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
    989 }
    990 
    991 static void event_scan(PowerPCCPU *cpu, SpaprMachineState *spapr,
    992                        uint32_t token, uint32_t nargs,
    993                        target_ulong args,
    994                        uint32_t nret, target_ulong rets)
    995 {
    996     int i;
    997     if (nargs != 4 || nret != 1) {
    998         rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
    999         return;
   1000     }
   1001 
   1002     for (i = 0; i < EVENT_CLASS_MAX; i++) {
   1003         if (rtas_event_log_contains(spapr, EVENT_CLASS_MASK(i))) {
   1004             const SpaprEventSource *source =
   1005                 spapr_event_sources_get_source(spapr->event_sources, i);
   1006 
   1007             g_assert(source->enabled);
   1008             qemu_irq_pulse(spapr_qirq(spapr, source->irq));
   1009         }
   1010     }
   1011 
   1012     rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
   1013 }
   1014 
   1015 void spapr_clear_pending_events(SpaprMachineState *spapr)
   1016 {
   1017     SpaprEventLogEntry *entry = NULL, *next_entry;
   1018 
   1019     QTAILQ_FOREACH_SAFE(entry, &spapr->pending_events, next, next_entry) {
   1020         QTAILQ_REMOVE(&spapr->pending_events, entry, next);
   1021         g_free(entry->extended_log);
   1022         g_free(entry);
   1023     }
   1024 }
   1025 
   1026 void spapr_clear_pending_hotplug_events(SpaprMachineState *spapr)
   1027 {
   1028     SpaprEventLogEntry *entry = NULL, *next_entry;
   1029 
   1030     QTAILQ_FOREACH_SAFE(entry, &spapr->pending_events, next, next_entry) {
   1031         if (spapr_event_log_entry_type(entry) == RTAS_LOG_TYPE_HOTPLUG) {
   1032             QTAILQ_REMOVE(&spapr->pending_events, entry, next);
   1033             g_free(entry->extended_log);
   1034             g_free(entry);
   1035         }
   1036     }
   1037 }
   1038 
   1039 void spapr_events_init(SpaprMachineState *spapr)
   1040 {
   1041     int epow_irq = SPAPR_IRQ_EPOW;
   1042 
   1043     if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
   1044         epow_irq = spapr_irq_findone(spapr, &error_fatal);
   1045     }
   1046 
   1047     spapr_irq_claim(spapr, epow_irq, false, &error_fatal);
   1048 
   1049     QTAILQ_INIT(&spapr->pending_events);
   1050 
   1051     spapr->event_sources = spapr_event_sources_new();
   1052 
   1053     spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_EPOW,
   1054                                  epow_irq);
   1055 
   1056     /* NOTE: if machine supports modern/dedicated hotplug event source,
   1057      * we add it to the device-tree unconditionally. This means we may
   1058      * have cases where the source is enabled in QEMU, but unused by the
   1059      * guest because it does not support modern hotplug events, so we
   1060      * take care to rely on checking for negotiation of OV5_HP_EVT option
   1061      * before attempting to use it to signal events, rather than simply
   1062      * checking that it's enabled.
   1063      */
   1064     if (spapr->use_hotplug_event_source) {
   1065         int hp_irq = SPAPR_IRQ_HOTPLUG;
   1066 
   1067         if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
   1068             hp_irq = spapr_irq_findone(spapr, &error_fatal);
   1069         }
   1070 
   1071         spapr_irq_claim(spapr, hp_irq, false, &error_fatal);
   1072 
   1073         spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_HOT_PLUG,
   1074                                      hp_irq);
   1075     }
   1076 
   1077     spapr->epow_notifier.notify = spapr_powerdown_req;
   1078     qemu_register_powerdown_notifier(&spapr->epow_notifier);
   1079     spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception",
   1080                         check_exception);
   1081     spapr_rtas_register(RTAS_EVENT_SCAN, "event-scan", event_scan);
   1082 }