qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

xen_pt_config_init.c (67168B)


      1 /*
      2  * Copyright (c) 2007, Neocleus Corporation.
      3  * Copyright (c) 2007, Intel Corporation.
      4  *
      5  * This work is licensed under the terms of the GNU GPL, version 2.  See
      6  * the COPYING file in the top-level directory.
      7  *
      8  * Alex Novik <alex@neocleus.com>
      9  * Allen Kay <allen.m.kay@intel.com>
     10  * Guy Zana <guy@neocleus.com>
     11  *
     12  * This file implements direct PCI assignment to a HVM guest
     13  */
     14 
     15 #include "qemu/osdep.h"
     16 #include "qapi/error.h"
     17 #include "qemu/timer.h"
     18 #include "hw/xen/xen-legacy-backend.h"
     19 #include "xen_pt.h"
     20 
     21 #define XEN_PT_MERGE_VALUE(value, data, val_mask) \
     22     (((value) & (val_mask)) | ((data) & ~(val_mask)))
     23 
     24 #define XEN_PT_INVALID_REG          0xFFFFFFFF      /* invalid register value */
     25 
     26 /* prototype */
     27 
     28 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
     29                                uint32_t real_offset, uint32_t *data);
     30 
     31 
     32 /* helper */
     33 
     34 /* A return value of 1 means the capability should NOT be exposed to guest. */
     35 static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id)
     36 {
     37     switch (grp_id) {
     38     case PCI_CAP_ID_EXP:
     39         /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE
     40          * Controller looks trivial, e.g., the PCI Express Capabilities
     41          * Register is 0. We should not try to expose it to guest.
     42          *
     43          * The datasheet is available at
     44          * http://download.intel.com/design/network/datashts/82599_datasheet.pdf
     45          *
     46          * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the
     47          * PCI Express Capability Structure of the VF of Intel 82599 10GbE
     48          * Controller looks trivial, e.g., the PCI Express Capabilities
     49          * Register is 0, so the Capability Version is 0 and
     50          * xen_pt_pcie_size_init() would fail.
     51          */
     52         if (d->vendor_id == PCI_VENDOR_ID_INTEL &&
     53             d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) {
     54             return 1;
     55         }
     56         break;
     57     }
     58     return 0;
     59 }
     60 
     61 /*   find emulate register group entry */
     62 XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address)
     63 {
     64     XenPTRegGroup *entry = NULL;
     65 
     66     /* find register group entry */
     67     QLIST_FOREACH(entry, &s->reg_grps, entries) {
     68         /* check address */
     69         if ((entry->base_offset <= address)
     70             && ((entry->base_offset + entry->size) > address)) {
     71             return entry;
     72         }
     73     }
     74 
     75     /* group entry not found */
     76     return NULL;
     77 }
     78 
     79 /* find emulate register entry */
     80 XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address)
     81 {
     82     XenPTReg *reg_entry = NULL;
     83     XenPTRegInfo *reg = NULL;
     84     uint32_t real_offset = 0;
     85 
     86     /* find register entry */
     87     QLIST_FOREACH(reg_entry, &reg_grp->reg_tbl_list, entries) {
     88         reg = reg_entry->reg;
     89         real_offset = reg_grp->base_offset + reg->offset;
     90         /* check address */
     91         if ((real_offset <= address)
     92             && ((real_offset + reg->size) > address)) {
     93             return reg_entry;
     94         }
     95     }
     96 
     97     return NULL;
     98 }
     99 
    100 static uint32_t get_throughable_mask(const XenPCIPassthroughState *s,
    101                                      XenPTRegInfo *reg, uint32_t valid_mask)
    102 {
    103     uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask);
    104 
    105     if (!s->permissive) {
    106         throughable_mask &= ~reg->res_mask;
    107     }
    108 
    109     return throughable_mask & valid_mask;
    110 }
    111 
    112 /****************
    113  * general register functions
    114  */
    115 
    116 /* register initialization function */
    117 
    118 static int xen_pt_common_reg_init(XenPCIPassthroughState *s,
    119                                   XenPTRegInfo *reg, uint32_t real_offset,
    120                                   uint32_t *data)
    121 {
    122     *data = reg->init_val;
    123     return 0;
    124 }
    125 
    126 /* Read register functions */
    127 
    128 static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
    129                                 uint8_t *value, uint8_t valid_mask)
    130 {
    131     XenPTRegInfo *reg = cfg_entry->reg;
    132     uint8_t valid_emu_mask = 0;
    133     uint8_t *data = cfg_entry->ptr.byte;
    134 
    135     /* emulate byte register */
    136     valid_emu_mask = reg->emu_mask & valid_mask;
    137     *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
    138 
    139     return 0;
    140 }
    141 static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
    142                                 uint16_t *value, uint16_t valid_mask)
    143 {
    144     XenPTRegInfo *reg = cfg_entry->reg;
    145     uint16_t valid_emu_mask = 0;
    146     uint16_t *data = cfg_entry->ptr.half_word;
    147 
    148     /* emulate word register */
    149     valid_emu_mask = reg->emu_mask & valid_mask;
    150     *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
    151 
    152     return 0;
    153 }
    154 static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
    155                                 uint32_t *value, uint32_t valid_mask)
    156 {
    157     XenPTRegInfo *reg = cfg_entry->reg;
    158     uint32_t valid_emu_mask = 0;
    159     uint32_t *data = cfg_entry->ptr.word;
    160 
    161     /* emulate long register */
    162     valid_emu_mask = reg->emu_mask & valid_mask;
    163     *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
    164 
    165     return 0;
    166 }
    167 
    168 /* Write register functions */
    169 
    170 static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
    171                                  uint8_t *val, uint8_t dev_value,
    172                                  uint8_t valid_mask)
    173 {
    174     XenPTRegInfo *reg = cfg_entry->reg;
    175     uint8_t writable_mask = 0;
    176     uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
    177     uint8_t *data = cfg_entry->ptr.byte;
    178 
    179     /* modify emulate register */
    180     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
    181     *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
    182 
    183     /* create value for writing to I/O device register */
    184     *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
    185                               throughable_mask);
    186 
    187     return 0;
    188 }
    189 static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
    190                                  uint16_t *val, uint16_t dev_value,
    191                                  uint16_t valid_mask)
    192 {
    193     XenPTRegInfo *reg = cfg_entry->reg;
    194     uint16_t writable_mask = 0;
    195     uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
    196     uint16_t *data = cfg_entry->ptr.half_word;
    197 
    198     /* modify emulate register */
    199     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
    200     *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
    201 
    202     /* create value for writing to I/O device register */
    203     *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
    204                               throughable_mask);
    205 
    206     return 0;
    207 }
    208 static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
    209                                  uint32_t *val, uint32_t dev_value,
    210                                  uint32_t valid_mask)
    211 {
    212     XenPTRegInfo *reg = cfg_entry->reg;
    213     uint32_t writable_mask = 0;
    214     uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
    215     uint32_t *data = cfg_entry->ptr.word;
    216 
    217     /* modify emulate register */
    218     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
    219     *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
    220 
    221     /* create value for writing to I/O device register */
    222     *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
    223                               throughable_mask);
    224 
    225     return 0;
    226 }
    227 
    228 
    229 /* XenPTRegInfo declaration
    230  * - only for emulated register (either a part or whole bit).
    231  * - for passthrough register that need special behavior (like interacting with
    232  *   other component), set emu_mask to all 0 and specify r/w func properly.
    233  * - do NOT use ALL F for init_val, otherwise the tbl will not be registered.
    234  */
    235 
    236 /********************
    237  * Header Type0
    238  */
    239 
    240 static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s,
    241                                   XenPTRegInfo *reg, uint32_t real_offset,
    242                                   uint32_t *data)
    243 {
    244     *data = s->real_device.vendor_id;
    245     return 0;
    246 }
    247 static int xen_pt_device_reg_init(XenPCIPassthroughState *s,
    248                                   XenPTRegInfo *reg, uint32_t real_offset,
    249                                   uint32_t *data)
    250 {
    251     *data = s->real_device.device_id;
    252     return 0;
    253 }
    254 static int xen_pt_status_reg_init(XenPCIPassthroughState *s,
    255                                   XenPTRegInfo *reg, uint32_t real_offset,
    256                                   uint32_t *data)
    257 {
    258     XenPTRegGroup *reg_grp_entry = NULL;
    259     XenPTReg *reg_entry = NULL;
    260     uint32_t reg_field = 0;
    261 
    262     /* find Header register group */
    263     reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST);
    264     if (reg_grp_entry) {
    265         /* find Capabilities Pointer register */
    266         reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST);
    267         if (reg_entry) {
    268             /* check Capabilities Pointer register */
    269             if (*reg_entry->ptr.half_word) {
    270                 reg_field |= PCI_STATUS_CAP_LIST;
    271             } else {
    272                 reg_field &= ~PCI_STATUS_CAP_LIST;
    273             }
    274         } else {
    275             xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*"
    276                                      " for Capabilities Pointer register."
    277                                      " (%s)\n", __func__);
    278             return -1;
    279         }
    280     } else {
    281         xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup"
    282                                  " for Header. (%s)\n", __func__);
    283         return -1;
    284     }
    285 
    286     *data = reg_field;
    287     return 0;
    288 }
    289 static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s,
    290                                        XenPTRegInfo *reg, uint32_t real_offset,
    291                                        uint32_t *data)
    292 {
    293     /* read PCI_HEADER_TYPE */
    294     *data = reg->init_val | 0x80;
    295     return 0;
    296 }
    297 
    298 /* initialize Interrupt Pin register */
    299 static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s,
    300                                   XenPTRegInfo *reg, uint32_t real_offset,
    301                                   uint32_t *data)
    302 {
    303     if (s->real_device.irq) {
    304         *data = xen_pt_pci_read_intx(s);
    305     }
    306     return 0;
    307 }
    308 
    309 /* Command register */
    310 static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
    311                                 uint16_t *val, uint16_t dev_value,
    312                                 uint16_t valid_mask)
    313 {
    314     XenPTRegInfo *reg = cfg_entry->reg;
    315     uint16_t writable_mask = 0;
    316     uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
    317     uint16_t *data = cfg_entry->ptr.half_word;
    318 
    319     /* modify emulate register */
    320     writable_mask = ~reg->ro_mask & valid_mask;
    321     *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
    322 
    323     /* create value for writing to I/O device register */
    324     if (*val & PCI_COMMAND_INTX_DISABLE) {
    325         throughable_mask |= PCI_COMMAND_INTX_DISABLE;
    326     } else {
    327         if (s->machine_irq) {
    328             throughable_mask |= PCI_COMMAND_INTX_DISABLE;
    329         }
    330     }
    331 
    332     *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
    333 
    334     return 0;
    335 }
    336 
    337 /* BAR */
    338 #define XEN_PT_BAR_MEM_RO_MASK    0x0000000F  /* BAR ReadOnly mask(Memory) */
    339 #define XEN_PT_BAR_MEM_EMU_MASK   0xFFFFFFF0  /* BAR emul mask(Memory) */
    340 #define XEN_PT_BAR_IO_RO_MASK     0x00000003  /* BAR ReadOnly mask(I/O) */
    341 #define XEN_PT_BAR_IO_EMU_MASK    0xFFFFFFFC  /* BAR emul mask(I/O) */
    342 
    343 static bool is_64bit_bar(PCIIORegion *r)
    344 {
    345     return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64);
    346 }
    347 
    348 static uint64_t xen_pt_get_bar_size(PCIIORegion *r)
    349 {
    350     if (is_64bit_bar(r)) {
    351         uint64_t size64;
    352         size64 = (r + 1)->size;
    353         size64 <<= 32;
    354         size64 += r->size;
    355         return size64;
    356     }
    357     return r->size;
    358 }
    359 
    360 static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s,
    361                                          int index)
    362 {
    363     PCIDevice *d = PCI_DEVICE(s);
    364     XenPTRegion *region = NULL;
    365     PCIIORegion *r;
    366 
    367     /* check 64bit BAR */
    368     if ((0 < index) && (index < PCI_ROM_SLOT)) {
    369         int type = s->real_device.io_regions[index - 1].type;
    370 
    371         if ((type & XEN_HOST_PCI_REGION_TYPE_MEM)
    372             && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) {
    373             region = &s->bases[index - 1];
    374             if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) {
    375                 return XEN_PT_BAR_FLAG_UPPER;
    376             }
    377         }
    378     }
    379 
    380     /* check unused BAR */
    381     r = &d->io_regions[index];
    382     if (!xen_pt_get_bar_size(r)) {
    383         return XEN_PT_BAR_FLAG_UNUSED;
    384     }
    385 
    386     /* for ExpROM BAR */
    387     if (index == PCI_ROM_SLOT) {
    388         return XEN_PT_BAR_FLAG_MEM;
    389     }
    390 
    391     /* check BAR I/O indicator */
    392     if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) {
    393         return XEN_PT_BAR_FLAG_IO;
    394     } else {
    395         return XEN_PT_BAR_FLAG_MEM;
    396     }
    397 }
    398 
    399 static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr)
    400 {
    401     if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) {
    402         return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK);
    403     } else {
    404         return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK);
    405     }
    406 }
    407 
    408 static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
    409                                uint32_t real_offset, uint32_t *data)
    410 {
    411     uint32_t reg_field = 0;
    412     int index;
    413 
    414     index = xen_pt_bar_offset_to_index(reg->offset);
    415     if (index < 0 || index >= PCI_NUM_REGIONS) {
    416         XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
    417         return -1;
    418     }
    419 
    420     /* set BAR flag */
    421     s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index);
    422     if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) {
    423         reg_field = XEN_PT_INVALID_REG;
    424     }
    425 
    426     *data = reg_field;
    427     return 0;
    428 }
    429 static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
    430                                uint32_t *value, uint32_t valid_mask)
    431 {
    432     XenPTRegInfo *reg = cfg_entry->reg;
    433     uint32_t valid_emu_mask = 0;
    434     uint32_t bar_emu_mask = 0;
    435     int index;
    436 
    437     /* get BAR index */
    438     index = xen_pt_bar_offset_to_index(reg->offset);
    439     if (index < 0 || index >= PCI_NUM_REGIONS - 1) {
    440         XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
    441         return -1;
    442     }
    443 
    444     /* use fixed-up value from kernel sysfs */
    445     *value = base_address_with_flags(&s->real_device.io_regions[index]);
    446 
    447     /* set emulate mask depend on BAR flag */
    448     switch (s->bases[index].bar_flag) {
    449     case XEN_PT_BAR_FLAG_MEM:
    450         bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
    451         break;
    452     case XEN_PT_BAR_FLAG_IO:
    453         bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
    454         break;
    455     case XEN_PT_BAR_FLAG_UPPER:
    456         bar_emu_mask = XEN_PT_BAR_ALLF;
    457         break;
    458     default:
    459         break;
    460     }
    461 
    462     /* emulate BAR */
    463     valid_emu_mask = bar_emu_mask & valid_mask;
    464     *value = XEN_PT_MERGE_VALUE(*value, *cfg_entry->ptr.word, ~valid_emu_mask);
    465 
    466     return 0;
    467 }
    468 static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
    469                                 uint32_t *val, uint32_t dev_value,
    470                                 uint32_t valid_mask)
    471 {
    472     XenPTRegInfo *reg = cfg_entry->reg;
    473     XenPTRegion *base = NULL;
    474     PCIDevice *d = PCI_DEVICE(s);
    475     const PCIIORegion *r;
    476     uint32_t writable_mask = 0;
    477     uint32_t bar_emu_mask = 0;
    478     uint32_t bar_ro_mask = 0;
    479     uint32_t r_size = 0;
    480     int index = 0;
    481     uint32_t *data = cfg_entry->ptr.word;
    482 
    483     index = xen_pt_bar_offset_to_index(reg->offset);
    484     if (index < 0 || index >= PCI_NUM_REGIONS) {
    485         XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index);
    486         return -1;
    487     }
    488 
    489     r = &d->io_regions[index];
    490     base = &s->bases[index];
    491     r_size = xen_pt_get_emul_size(base->bar_flag, r->size);
    492 
    493     /* set emulate mask and read-only mask values depend on the BAR flag */
    494     switch (s->bases[index].bar_flag) {
    495     case XEN_PT_BAR_FLAG_MEM:
    496         bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
    497         if (!r_size) {
    498             /* low 32 bits mask for 64 bit bars */
    499             bar_ro_mask = XEN_PT_BAR_ALLF;
    500         } else {
    501             bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1);
    502         }
    503         break;
    504     case XEN_PT_BAR_FLAG_IO:
    505         bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
    506         bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1);
    507         break;
    508     case XEN_PT_BAR_FLAG_UPPER:
    509         assert(index > 0);
    510         r_size = d->io_regions[index - 1].size >> 32;
    511         bar_emu_mask = XEN_PT_BAR_ALLF;
    512         bar_ro_mask = r_size ? r_size - 1 : 0;
    513         break;
    514     default:
    515         break;
    516     }
    517 
    518     /* modify emulate register */
    519     writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask;
    520     *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
    521 
    522     /* check whether we need to update the virtual region address or not */
    523     switch (s->bases[index].bar_flag) {
    524     case XEN_PT_BAR_FLAG_UPPER:
    525     case XEN_PT_BAR_FLAG_MEM:
    526         /* nothing to do */
    527         break;
    528     case XEN_PT_BAR_FLAG_IO:
    529         /* nothing to do */
    530         break;
    531     default:
    532         break;
    533     }
    534 
    535     /* create value for writing to I/O device register */
    536     *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
    537 
    538     return 0;
    539 }
    540 
    541 /* write Exp ROM BAR */
    542 static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
    543                                         XenPTReg *cfg_entry, uint32_t *val,
    544                                         uint32_t dev_value, uint32_t valid_mask)
    545 {
    546     XenPTRegInfo *reg = cfg_entry->reg;
    547     XenPTRegion *base = NULL;
    548     PCIDevice *d = PCI_DEVICE(s);
    549     uint32_t writable_mask = 0;
    550     uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
    551     pcibus_t r_size = 0;
    552     uint32_t bar_ro_mask = 0;
    553     uint32_t *data = cfg_entry->ptr.word;
    554 
    555     r_size = d->io_regions[PCI_ROM_SLOT].size;
    556     base = &s->bases[PCI_ROM_SLOT];
    557     /* align memory type resource size */
    558     r_size = xen_pt_get_emul_size(base->bar_flag, r_size);
    559 
    560     /* set emulate mask and read-only mask */
    561     bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE;
    562 
    563     /* modify emulate register */
    564     writable_mask = ~bar_ro_mask & valid_mask;
    565     *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
    566 
    567     /* create value for writing to I/O device register */
    568     *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
    569 
    570     return 0;
    571 }
    572 
    573 static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s,
    574                                       XenPTReg *cfg_entry,
    575                                       uint32_t *value, uint32_t valid_mask)
    576 {
    577     *value = igd_read_opregion(s);
    578     return 0;
    579 }
    580 
    581 static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s,
    582                                        XenPTReg *cfg_entry, uint32_t *value,
    583                                        uint32_t dev_value, uint32_t valid_mask)
    584 {
    585     igd_write_opregion(s, *value);
    586     return 0;
    587 }
    588 
    589 /* Header Type0 reg static information table */
    590 static XenPTRegInfo xen_pt_emu_reg_header0[] = {
    591     /* Vendor ID reg */
    592     {
    593         .offset     = PCI_VENDOR_ID,
    594         .size       = 2,
    595         .init_val   = 0x0000,
    596         .ro_mask    = 0xFFFF,
    597         .emu_mask   = 0xFFFF,
    598         .init       = xen_pt_vendor_reg_init,
    599         .u.w.read   = xen_pt_word_reg_read,
    600         .u.w.write  = xen_pt_word_reg_write,
    601     },
    602     /* Device ID reg */
    603     {
    604         .offset     = PCI_DEVICE_ID,
    605         .size       = 2,
    606         .init_val   = 0x0000,
    607         .ro_mask    = 0xFFFF,
    608         .emu_mask   = 0xFFFF,
    609         .init       = xen_pt_device_reg_init,
    610         .u.w.read   = xen_pt_word_reg_read,
    611         .u.w.write  = xen_pt_word_reg_write,
    612     },
    613     /* Command reg */
    614     {
    615         .offset     = PCI_COMMAND,
    616         .size       = 2,
    617         .init_val   = 0x0000,
    618         .res_mask   = 0xF880,
    619         .emu_mask   = 0x0743,
    620         .init       = xen_pt_common_reg_init,
    621         .u.w.read   = xen_pt_word_reg_read,
    622         .u.w.write  = xen_pt_cmd_reg_write,
    623     },
    624     /* Capabilities Pointer reg */
    625     {
    626         .offset     = PCI_CAPABILITY_LIST,
    627         .size       = 1,
    628         .init_val   = 0x00,
    629         .ro_mask    = 0xFF,
    630         .emu_mask   = 0xFF,
    631         .init       = xen_pt_ptr_reg_init,
    632         .u.b.read   = xen_pt_byte_reg_read,
    633         .u.b.write  = xen_pt_byte_reg_write,
    634     },
    635     /* Status reg */
    636     /* use emulated Cap Ptr value to initialize,
    637      * so need to be declared after Cap Ptr reg
    638      */
    639     {
    640         .offset     = PCI_STATUS,
    641         .size       = 2,
    642         .init_val   = 0x0000,
    643         .res_mask   = 0x0007,
    644         .ro_mask    = 0x06F8,
    645         .rw1c_mask  = 0xF900,
    646         .emu_mask   = 0x0010,
    647         .init       = xen_pt_status_reg_init,
    648         .u.w.read   = xen_pt_word_reg_read,
    649         .u.w.write  = xen_pt_word_reg_write,
    650     },
    651     /* Cache Line Size reg */
    652     {
    653         .offset     = PCI_CACHE_LINE_SIZE,
    654         .size       = 1,
    655         .init_val   = 0x00,
    656         .ro_mask    = 0x00,
    657         .emu_mask   = 0xFF,
    658         .init       = xen_pt_common_reg_init,
    659         .u.b.read   = xen_pt_byte_reg_read,
    660         .u.b.write  = xen_pt_byte_reg_write,
    661     },
    662     /* Latency Timer reg */
    663     {
    664         .offset     = PCI_LATENCY_TIMER,
    665         .size       = 1,
    666         .init_val   = 0x00,
    667         .ro_mask    = 0x00,
    668         .emu_mask   = 0xFF,
    669         .init       = xen_pt_common_reg_init,
    670         .u.b.read   = xen_pt_byte_reg_read,
    671         .u.b.write  = xen_pt_byte_reg_write,
    672     },
    673     /* Header Type reg */
    674     {
    675         .offset     = PCI_HEADER_TYPE,
    676         .size       = 1,
    677         .init_val   = 0x00,
    678         .ro_mask    = 0xFF,
    679         .emu_mask   = 0x00,
    680         .init       = xen_pt_header_type_reg_init,
    681         .u.b.read   = xen_pt_byte_reg_read,
    682         .u.b.write  = xen_pt_byte_reg_write,
    683     },
    684     /* Interrupt Line reg */
    685     {
    686         .offset     = PCI_INTERRUPT_LINE,
    687         .size       = 1,
    688         .init_val   = 0x00,
    689         .ro_mask    = 0x00,
    690         .emu_mask   = 0xFF,
    691         .init       = xen_pt_common_reg_init,
    692         .u.b.read   = xen_pt_byte_reg_read,
    693         .u.b.write  = xen_pt_byte_reg_write,
    694     },
    695     /* Interrupt Pin reg */
    696     {
    697         .offset     = PCI_INTERRUPT_PIN,
    698         .size       = 1,
    699         .init_val   = 0x00,
    700         .ro_mask    = 0xFF,
    701         .emu_mask   = 0xFF,
    702         .init       = xen_pt_irqpin_reg_init,
    703         .u.b.read   = xen_pt_byte_reg_read,
    704         .u.b.write  = xen_pt_byte_reg_write,
    705     },
    706     /* BAR 0 reg */
    707     /* mask of BAR need to be decided later, depends on IO/MEM type */
    708     {
    709         .offset     = PCI_BASE_ADDRESS_0,
    710         .size       = 4,
    711         .init_val   = 0x00000000,
    712         .init       = xen_pt_bar_reg_init,
    713         .u.dw.read  = xen_pt_bar_reg_read,
    714         .u.dw.write = xen_pt_bar_reg_write,
    715     },
    716     /* BAR 1 reg */
    717     {
    718         .offset     = PCI_BASE_ADDRESS_1,
    719         .size       = 4,
    720         .init_val   = 0x00000000,
    721         .init       = xen_pt_bar_reg_init,
    722         .u.dw.read  = xen_pt_bar_reg_read,
    723         .u.dw.write = xen_pt_bar_reg_write,
    724     },
    725     /* BAR 2 reg */
    726     {
    727         .offset     = PCI_BASE_ADDRESS_2,
    728         .size       = 4,
    729         .init_val   = 0x00000000,
    730         .init       = xen_pt_bar_reg_init,
    731         .u.dw.read  = xen_pt_bar_reg_read,
    732         .u.dw.write = xen_pt_bar_reg_write,
    733     },
    734     /* BAR 3 reg */
    735     {
    736         .offset     = PCI_BASE_ADDRESS_3,
    737         .size       = 4,
    738         .init_val   = 0x00000000,
    739         .init       = xen_pt_bar_reg_init,
    740         .u.dw.read  = xen_pt_bar_reg_read,
    741         .u.dw.write = xen_pt_bar_reg_write,
    742     },
    743     /* BAR 4 reg */
    744     {
    745         .offset     = PCI_BASE_ADDRESS_4,
    746         .size       = 4,
    747         .init_val   = 0x00000000,
    748         .init       = xen_pt_bar_reg_init,
    749         .u.dw.read  = xen_pt_bar_reg_read,
    750         .u.dw.write = xen_pt_bar_reg_write,
    751     },
    752     /* BAR 5 reg */
    753     {
    754         .offset     = PCI_BASE_ADDRESS_5,
    755         .size       = 4,
    756         .init_val   = 0x00000000,
    757         .init       = xen_pt_bar_reg_init,
    758         .u.dw.read  = xen_pt_bar_reg_read,
    759         .u.dw.write = xen_pt_bar_reg_write,
    760     },
    761     /* Expansion ROM BAR reg */
    762     {
    763         .offset     = PCI_ROM_ADDRESS,
    764         .size       = 4,
    765         .init_val   = 0x00000000,
    766         .ro_mask    = ~PCI_ROM_ADDRESS_MASK & ~PCI_ROM_ADDRESS_ENABLE,
    767         .emu_mask   = (uint32_t)PCI_ROM_ADDRESS_MASK,
    768         .init       = xen_pt_bar_reg_init,
    769         .u.dw.read  = xen_pt_long_reg_read,
    770         .u.dw.write = xen_pt_exp_rom_bar_reg_write,
    771     },
    772     {
    773         .size = 0,
    774     },
    775 };
    776 
    777 
    778 /*********************************
    779  * Vital Product Data Capability
    780  */
    781 
    782 /* Vital Product Data Capability Structure reg static information table */
    783 static XenPTRegInfo xen_pt_emu_reg_vpd[] = {
    784     {
    785         .offset     = PCI_CAP_LIST_NEXT,
    786         .size       = 1,
    787         .init_val   = 0x00,
    788         .ro_mask    = 0xFF,
    789         .emu_mask   = 0xFF,
    790         .init       = xen_pt_ptr_reg_init,
    791         .u.b.read   = xen_pt_byte_reg_read,
    792         .u.b.write  = xen_pt_byte_reg_write,
    793     },
    794     {
    795         .offset     = PCI_VPD_ADDR,
    796         .size       = 2,
    797         .ro_mask    = 0x0003,
    798         .emu_mask   = 0x0003,
    799         .init       = xen_pt_common_reg_init,
    800         .u.w.read   = xen_pt_word_reg_read,
    801         .u.w.write  = xen_pt_word_reg_write,
    802     },
    803     {
    804         .size = 0,
    805     },
    806 };
    807 
    808 
    809 /**************************************
    810  * Vendor Specific Capability
    811  */
    812 
    813 /* Vendor Specific Capability Structure reg static information table */
    814 static XenPTRegInfo xen_pt_emu_reg_vendor[] = {
    815     {
    816         .offset     = PCI_CAP_LIST_NEXT,
    817         .size       = 1,
    818         .init_val   = 0x00,
    819         .ro_mask    = 0xFF,
    820         .emu_mask   = 0xFF,
    821         .init       = xen_pt_ptr_reg_init,
    822         .u.b.read   = xen_pt_byte_reg_read,
    823         .u.b.write  = xen_pt_byte_reg_write,
    824     },
    825     {
    826         .size = 0,
    827     },
    828 };
    829 
    830 
    831 /*****************************
    832  * PCI Express Capability
    833  */
    834 
    835 static inline uint8_t get_capability_version(XenPCIPassthroughState *s,
    836                                              uint32_t offset)
    837 {
    838     uint8_t flag;
    839     if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) {
    840         return 0;
    841     }
    842     return flag & PCI_EXP_FLAGS_VERS;
    843 }
    844 
    845 static inline uint8_t get_device_type(XenPCIPassthroughState *s,
    846                                       uint32_t offset)
    847 {
    848     uint8_t flag;
    849     if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) {
    850         return 0;
    851     }
    852     return (flag & PCI_EXP_FLAGS_TYPE) >> 4;
    853 }
    854 
    855 /* initialize Link Control register */
    856 static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s,
    857                                     XenPTRegInfo *reg, uint32_t real_offset,
    858                                     uint32_t *data)
    859 {
    860     uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
    861     uint8_t dev_type = get_device_type(s, real_offset - reg->offset);
    862 
    863     /* no need to initialize in case of Root Complex Integrated Endpoint
    864      * with cap_ver 1.x
    865      */
    866     if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) {
    867         *data = XEN_PT_INVALID_REG;
    868     }
    869 
    870     *data = reg->init_val;
    871     return 0;
    872 }
    873 /* initialize Device Control 2 register */
    874 static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s,
    875                                     XenPTRegInfo *reg, uint32_t real_offset,
    876                                     uint32_t *data)
    877 {
    878     uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
    879 
    880     /* no need to initialize in case of cap_ver 1.x */
    881     if (cap_ver == 1) {
    882         *data = XEN_PT_INVALID_REG;
    883     }
    884 
    885     *data = reg->init_val;
    886     return 0;
    887 }
    888 /* initialize Link Control 2 register */
    889 static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s,
    890                                      XenPTRegInfo *reg, uint32_t real_offset,
    891                                      uint32_t *data)
    892 {
    893     uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
    894     uint32_t reg_field = 0;
    895 
    896     /* no need to initialize in case of cap_ver 1.x */
    897     if (cap_ver == 1) {
    898         reg_field = XEN_PT_INVALID_REG;
    899     } else {
    900         /* set Supported Link Speed */
    901         uint8_t lnkcap;
    902         int rc;
    903         rc = xen_host_pci_get_byte(&s->real_device,
    904                                    real_offset - reg->offset + PCI_EXP_LNKCAP,
    905                                    &lnkcap);
    906         if (rc) {
    907             return rc;
    908         }
    909         reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap;
    910     }
    911 
    912     *data = reg_field;
    913     return 0;
    914 }
    915 
    916 /* PCI Express Capability Structure reg static information table */
    917 static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
    918     /* Next Pointer reg */
    919     {
    920         .offset     = PCI_CAP_LIST_NEXT,
    921         .size       = 1,
    922         .init_val   = 0x00,
    923         .ro_mask    = 0xFF,
    924         .emu_mask   = 0xFF,
    925         .init       = xen_pt_ptr_reg_init,
    926         .u.b.read   = xen_pt_byte_reg_read,
    927         .u.b.write  = xen_pt_byte_reg_write,
    928     },
    929     /* Device Capabilities reg */
    930     {
    931         .offset     = PCI_EXP_DEVCAP,
    932         .size       = 4,
    933         .init_val   = 0x00000000,
    934         .ro_mask    = 0xFFFFFFFF,
    935         .emu_mask   = 0x10000000,
    936         .init       = xen_pt_common_reg_init,
    937         .u.dw.read  = xen_pt_long_reg_read,
    938         .u.dw.write = xen_pt_long_reg_write,
    939     },
    940     /* Device Control reg */
    941     {
    942         .offset     = PCI_EXP_DEVCTL,
    943         .size       = 2,
    944         .init_val   = 0x2810,
    945         .ro_mask    = 0x8400,
    946         .emu_mask   = 0xFFFF,
    947         .init       = xen_pt_common_reg_init,
    948         .u.w.read   = xen_pt_word_reg_read,
    949         .u.w.write  = xen_pt_word_reg_write,
    950     },
    951     /* Device Status reg */
    952     {
    953         .offset     = PCI_EXP_DEVSTA,
    954         .size       = 2,
    955         .res_mask   = 0xFFC0,
    956         .ro_mask    = 0x0030,
    957         .rw1c_mask  = 0x000F,
    958         .init       = xen_pt_common_reg_init,
    959         .u.w.read   = xen_pt_word_reg_read,
    960         .u.w.write  = xen_pt_word_reg_write,
    961     },
    962     /* Link Control reg */
    963     {
    964         .offset     = PCI_EXP_LNKCTL,
    965         .size       = 2,
    966         .init_val   = 0x0000,
    967         .ro_mask    = 0xFC34,
    968         .emu_mask   = 0xFFFF,
    969         .init       = xen_pt_linkctrl_reg_init,
    970         .u.w.read   = xen_pt_word_reg_read,
    971         .u.w.write  = xen_pt_word_reg_write,
    972     },
    973     /* Link Status reg */
    974     {
    975         .offset     = PCI_EXP_LNKSTA,
    976         .size       = 2,
    977         .ro_mask    = 0x3FFF,
    978         .rw1c_mask  = 0xC000,
    979         .init       = xen_pt_common_reg_init,
    980         .u.w.read   = xen_pt_word_reg_read,
    981         .u.w.write  = xen_pt_word_reg_write,
    982     },
    983     /* Device Control 2 reg */
    984     {
    985         .offset     = 0x28,
    986         .size       = 2,
    987         .init_val   = 0x0000,
    988         .ro_mask    = 0xFFA0,
    989         .emu_mask   = 0xFFBF,
    990         .init       = xen_pt_devctrl2_reg_init,
    991         .u.w.read   = xen_pt_word_reg_read,
    992         .u.w.write  = xen_pt_word_reg_write,
    993     },
    994     /* Link Control 2 reg */
    995     {
    996         .offset     = 0x30,
    997         .size       = 2,
    998         .init_val   = 0x0000,
    999         .ro_mask    = 0xE040,
   1000         .emu_mask   = 0xFFFF,
   1001         .init       = xen_pt_linkctrl2_reg_init,
   1002         .u.w.read   = xen_pt_word_reg_read,
   1003         .u.w.write  = xen_pt_word_reg_write,
   1004     },
   1005     {
   1006         .size = 0,
   1007     },
   1008 };
   1009 
   1010 
   1011 /*********************************
   1012  * Power Management Capability
   1013  */
   1014 
   1015 /* Power Management Capability reg static information table */
   1016 static XenPTRegInfo xen_pt_emu_reg_pm[] = {
   1017     /* Next Pointer reg */
   1018     {
   1019         .offset     = PCI_CAP_LIST_NEXT,
   1020         .size       = 1,
   1021         .init_val   = 0x00,
   1022         .ro_mask    = 0xFF,
   1023         .emu_mask   = 0xFF,
   1024         .init       = xen_pt_ptr_reg_init,
   1025         .u.b.read   = xen_pt_byte_reg_read,
   1026         .u.b.write  = xen_pt_byte_reg_write,
   1027     },
   1028     /* Power Management Capabilities reg */
   1029     {
   1030         .offset     = PCI_CAP_FLAGS,
   1031         .size       = 2,
   1032         .init_val   = 0x0000,
   1033         .ro_mask    = 0xFFFF,
   1034         .emu_mask   = 0xF9C8,
   1035         .init       = xen_pt_common_reg_init,
   1036         .u.w.read   = xen_pt_word_reg_read,
   1037         .u.w.write  = xen_pt_word_reg_write,
   1038     },
   1039     /* PCI Power Management Control/Status reg */
   1040     {
   1041         .offset     = PCI_PM_CTRL,
   1042         .size       = 2,
   1043         .init_val   = 0x0008,
   1044         .res_mask   = 0x00F0,
   1045         .ro_mask    = 0x610C,
   1046         .rw1c_mask  = 0x8000,
   1047         .emu_mask   = 0x810B,
   1048         .init       = xen_pt_common_reg_init,
   1049         .u.w.read   = xen_pt_word_reg_read,
   1050         .u.w.write  = xen_pt_word_reg_write,
   1051     },
   1052     {
   1053         .size = 0,
   1054     },
   1055 };
   1056 
   1057 
   1058 /********************************
   1059  * MSI Capability
   1060  */
   1061 
   1062 /* Helper */
   1063 #define xen_pt_msi_check_type(offset, flags, what) \
   1064         ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \
   1065                       PCI_MSI_##what##_64 : PCI_MSI_##what##_32))
   1066 
   1067 /* Message Control register */
   1068 static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s,
   1069                                    XenPTRegInfo *reg, uint32_t real_offset,
   1070                                    uint32_t *data)
   1071 {
   1072     XenPTMSI *msi = s->msi;
   1073     uint16_t reg_field;
   1074     int rc;
   1075 
   1076     /* use I/O device register's value as initial value */
   1077     rc = xen_host_pci_get_word(&s->real_device, real_offset, &reg_field);
   1078     if (rc) {
   1079         return rc;
   1080     }
   1081     if (reg_field & PCI_MSI_FLAGS_ENABLE) {
   1082         XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n");
   1083         xen_host_pci_set_word(&s->real_device, real_offset,
   1084                               reg_field & ~PCI_MSI_FLAGS_ENABLE);
   1085     }
   1086     msi->flags |= reg_field;
   1087     msi->ctrl_offset = real_offset;
   1088     msi->initialized = false;
   1089     msi->mapped = false;
   1090 
   1091     *data = reg->init_val;
   1092     return 0;
   1093 }
   1094 static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
   1095                                     XenPTReg *cfg_entry, uint16_t *val,
   1096                                     uint16_t dev_value, uint16_t valid_mask)
   1097 {
   1098     XenPTRegInfo *reg = cfg_entry->reg;
   1099     XenPTMSI *msi = s->msi;
   1100     uint16_t writable_mask = 0;
   1101     uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
   1102     uint16_t *data = cfg_entry->ptr.half_word;
   1103 
   1104     /* Currently no support for multi-vector */
   1105     if (*val & PCI_MSI_FLAGS_QSIZE) {
   1106         XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val);
   1107     }
   1108 
   1109     /* modify emulate register */
   1110     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
   1111     *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
   1112     msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE;
   1113 
   1114     /* create value for writing to I/O device register */
   1115     *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
   1116 
   1117     /* update MSI */
   1118     if (*val & PCI_MSI_FLAGS_ENABLE) {
   1119         /* setup MSI pirq for the first time */
   1120         if (!msi->initialized) {
   1121             /* Init physical one */
   1122             XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val);
   1123             if (xen_pt_msi_setup(s)) {
   1124                 /* We do not broadcast the error to the framework code, so
   1125                  * that MSI errors are contained in MSI emulation code and
   1126                  * QEMU can go on running.
   1127                  * Guest MSI would be actually not working.
   1128                  */
   1129                 *val &= ~PCI_MSI_FLAGS_ENABLE;
   1130                 XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val);
   1131                 return 0;
   1132             }
   1133             if (xen_pt_msi_update(s)) {
   1134                 *val &= ~PCI_MSI_FLAGS_ENABLE;
   1135                 XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val);
   1136                 return 0;
   1137             }
   1138             msi->initialized = true;
   1139             msi->mapped = true;
   1140         }
   1141         msi->flags |= PCI_MSI_FLAGS_ENABLE;
   1142     } else if (msi->mapped) {
   1143         xen_pt_msi_disable(s);
   1144     }
   1145 
   1146     return 0;
   1147 }
   1148 
   1149 /* initialize Message Upper Address register */
   1150 static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s,
   1151                                      XenPTRegInfo *reg, uint32_t real_offset,
   1152                                      uint32_t *data)
   1153 {
   1154     /* no need to initialize in case of 32 bit type */
   1155     if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
   1156         *data = XEN_PT_INVALID_REG;
   1157     } else {
   1158         *data = reg->init_val;
   1159     }
   1160 
   1161     return 0;
   1162 }
   1163 /* this function will be called twice (for 32 bit and 64 bit type) */
   1164 /* initialize Message Data register */
   1165 static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s,
   1166                                    XenPTRegInfo *reg, uint32_t real_offset,
   1167                                    uint32_t *data)
   1168 {
   1169     uint32_t flags = s->msi->flags;
   1170     uint32_t offset = reg->offset;
   1171 
   1172     /* check the offset whether matches the type or not */
   1173     if (xen_pt_msi_check_type(offset, flags, DATA)) {
   1174         *data = reg->init_val;
   1175     } else {
   1176         *data = XEN_PT_INVALID_REG;
   1177     }
   1178     return 0;
   1179 }
   1180 
   1181 /* this function will be called twice (for 32 bit and 64 bit type) */
   1182 /* initialize Mask register */
   1183 static int xen_pt_mask_reg_init(XenPCIPassthroughState *s,
   1184                                 XenPTRegInfo *reg, uint32_t real_offset,
   1185                                 uint32_t *data)
   1186 {
   1187     uint32_t flags = s->msi->flags;
   1188 
   1189     /* check the offset whether matches the type or not */
   1190     if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
   1191         *data = XEN_PT_INVALID_REG;
   1192     } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) {
   1193         *data = reg->init_val;
   1194     } else {
   1195         *data = XEN_PT_INVALID_REG;
   1196     }
   1197     return 0;
   1198 }
   1199 
   1200 /* this function will be called twice (for 32 bit and 64 bit type) */
   1201 /* initialize Pending register */
   1202 static int xen_pt_pending_reg_init(XenPCIPassthroughState *s,
   1203                                    XenPTRegInfo *reg, uint32_t real_offset,
   1204                                    uint32_t *data)
   1205 {
   1206     uint32_t flags = s->msi->flags;
   1207 
   1208     /* check the offset whether matches the type or not */
   1209     if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
   1210         *data = XEN_PT_INVALID_REG;
   1211     } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) {
   1212         *data = reg->init_val;
   1213     } else {
   1214         *data = XEN_PT_INVALID_REG;
   1215     }
   1216     return 0;
   1217 }
   1218 
   1219 /* write Message Address register */
   1220 static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s,
   1221                                       XenPTReg *cfg_entry, uint32_t *val,
   1222                                       uint32_t dev_value, uint32_t valid_mask)
   1223 {
   1224     XenPTRegInfo *reg = cfg_entry->reg;
   1225     uint32_t writable_mask = 0;
   1226     uint32_t old_addr = *cfg_entry->ptr.word;
   1227     uint32_t *data = cfg_entry->ptr.word;
   1228 
   1229     /* modify emulate register */
   1230     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
   1231     *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
   1232     s->msi->addr_lo = *data;
   1233 
   1234     /* create value for writing to I/O device register */
   1235     *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
   1236 
   1237     /* update MSI */
   1238     if (*data != old_addr) {
   1239         if (s->msi->mapped) {
   1240             xen_pt_msi_update(s);
   1241         }
   1242     }
   1243 
   1244     return 0;
   1245 }
   1246 /* write Message Upper Address register */
   1247 static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s,
   1248                                       XenPTReg *cfg_entry, uint32_t *val,
   1249                                       uint32_t dev_value, uint32_t valid_mask)
   1250 {
   1251     XenPTRegInfo *reg = cfg_entry->reg;
   1252     uint32_t writable_mask = 0;
   1253     uint32_t old_addr = *cfg_entry->ptr.word;
   1254     uint32_t *data = cfg_entry->ptr.word;
   1255 
   1256     /* check whether the type is 64 bit or not */
   1257     if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
   1258         XEN_PT_ERR(&s->dev,
   1259                    "Can't write to the upper address without 64 bit support\n");
   1260         return -1;
   1261     }
   1262 
   1263     /* modify emulate register */
   1264     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
   1265     *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
   1266     /* update the msi_info too */
   1267     s->msi->addr_hi = *data;
   1268 
   1269     /* create value for writing to I/O device register */
   1270     *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
   1271 
   1272     /* update MSI */
   1273     if (*data != old_addr) {
   1274         if (s->msi->mapped) {
   1275             xen_pt_msi_update(s);
   1276         }
   1277     }
   1278 
   1279     return 0;
   1280 }
   1281 
   1282 
   1283 /* this function will be called twice (for 32 bit and 64 bit type) */
   1284 /* write Message Data register */
   1285 static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s,
   1286                                     XenPTReg *cfg_entry, uint16_t *val,
   1287                                     uint16_t dev_value, uint16_t valid_mask)
   1288 {
   1289     XenPTRegInfo *reg = cfg_entry->reg;
   1290     XenPTMSI *msi = s->msi;
   1291     uint16_t writable_mask = 0;
   1292     uint16_t old_data = *cfg_entry->ptr.half_word;
   1293     uint32_t offset = reg->offset;
   1294     uint16_t *data = cfg_entry->ptr.half_word;
   1295 
   1296     /* check the offset whether matches the type or not */
   1297     if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) {
   1298         /* exit I/O emulator */
   1299         XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n");
   1300         return -1;
   1301     }
   1302 
   1303     /* modify emulate register */
   1304     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
   1305     *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
   1306     /* update the msi_info too */
   1307     msi->data = *data;
   1308 
   1309     /* create value for writing to I/O device register */
   1310     *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
   1311 
   1312     /* update MSI */
   1313     if (*data != old_data) {
   1314         if (msi->mapped) {
   1315             xen_pt_msi_update(s);
   1316         }
   1317     }
   1318 
   1319     return 0;
   1320 }
   1321 
   1322 static int xen_pt_mask_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
   1323                                  uint32_t *val, uint32_t dev_value,
   1324                                  uint32_t valid_mask)
   1325 {
   1326     int rc;
   1327 
   1328     rc = xen_pt_long_reg_write(s, cfg_entry, val, dev_value, valid_mask);
   1329     if (rc) {
   1330         return rc;
   1331     }
   1332 
   1333     s->msi->mask = *val;
   1334 
   1335     return 0;
   1336 }
   1337 
   1338 /* MSI Capability Structure reg static information table */
   1339 static XenPTRegInfo xen_pt_emu_reg_msi[] = {
   1340     /* Next Pointer reg */
   1341     {
   1342         .offset     = PCI_CAP_LIST_NEXT,
   1343         .size       = 1,
   1344         .init_val   = 0x00,
   1345         .ro_mask    = 0xFF,
   1346         .emu_mask   = 0xFF,
   1347         .init       = xen_pt_ptr_reg_init,
   1348         .u.b.read   = xen_pt_byte_reg_read,
   1349         .u.b.write  = xen_pt_byte_reg_write,
   1350     },
   1351     /* Message Control reg */
   1352     {
   1353         .offset     = PCI_MSI_FLAGS,
   1354         .size       = 2,
   1355         .init_val   = 0x0000,
   1356         .res_mask   = 0xFE00,
   1357         .ro_mask    = 0x018E,
   1358         .emu_mask   = 0x017E,
   1359         .init       = xen_pt_msgctrl_reg_init,
   1360         .u.w.read   = xen_pt_word_reg_read,
   1361         .u.w.write  = xen_pt_msgctrl_reg_write,
   1362     },
   1363     /* Message Address reg */
   1364     {
   1365         .offset     = PCI_MSI_ADDRESS_LO,
   1366         .size       = 4,
   1367         .init_val   = 0x00000000,
   1368         .ro_mask    = 0x00000003,
   1369         .emu_mask   = 0xFFFFFFFF,
   1370         .init       = xen_pt_common_reg_init,
   1371         .u.dw.read  = xen_pt_long_reg_read,
   1372         .u.dw.write = xen_pt_msgaddr32_reg_write,
   1373     },
   1374     /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */
   1375     {
   1376         .offset     = PCI_MSI_ADDRESS_HI,
   1377         .size       = 4,
   1378         .init_val   = 0x00000000,
   1379         .ro_mask    = 0x00000000,
   1380         .emu_mask   = 0xFFFFFFFF,
   1381         .init       = xen_pt_msgaddr64_reg_init,
   1382         .u.dw.read  = xen_pt_long_reg_read,
   1383         .u.dw.write = xen_pt_msgaddr64_reg_write,
   1384     },
   1385     /* Message Data reg (16 bits of data for 32-bit devices) */
   1386     {
   1387         .offset     = PCI_MSI_DATA_32,
   1388         .size       = 2,
   1389         .init_val   = 0x0000,
   1390         .ro_mask    = 0x0000,
   1391         .emu_mask   = 0xFFFF,
   1392         .init       = xen_pt_msgdata_reg_init,
   1393         .u.w.read   = xen_pt_word_reg_read,
   1394         .u.w.write  = xen_pt_msgdata_reg_write,
   1395     },
   1396     /* Message Data reg (16 bits of data for 64-bit devices) */
   1397     {
   1398         .offset     = PCI_MSI_DATA_64,
   1399         .size       = 2,
   1400         .init_val   = 0x0000,
   1401         .ro_mask    = 0x0000,
   1402         .emu_mask   = 0xFFFF,
   1403         .init       = xen_pt_msgdata_reg_init,
   1404         .u.w.read   = xen_pt_word_reg_read,
   1405         .u.w.write  = xen_pt_msgdata_reg_write,
   1406     },
   1407     /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
   1408     {
   1409         .offset     = PCI_MSI_MASK_32,
   1410         .size       = 4,
   1411         .init_val   = 0x00000000,
   1412         .ro_mask    = 0xFFFFFFFF,
   1413         .emu_mask   = 0xFFFFFFFF,
   1414         .init       = xen_pt_mask_reg_init,
   1415         .u.dw.read  = xen_pt_long_reg_read,
   1416         .u.dw.write = xen_pt_mask_reg_write,
   1417     },
   1418     /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
   1419     {
   1420         .offset     = PCI_MSI_MASK_64,
   1421         .size       = 4,
   1422         .init_val   = 0x00000000,
   1423         .ro_mask    = 0xFFFFFFFF,
   1424         .emu_mask   = 0xFFFFFFFF,
   1425         .init       = xen_pt_mask_reg_init,
   1426         .u.dw.read  = xen_pt_long_reg_read,
   1427         .u.dw.write = xen_pt_mask_reg_write,
   1428     },
   1429     /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
   1430     {
   1431         .offset     = PCI_MSI_MASK_32 + 4,
   1432         .size       = 4,
   1433         .init_val   = 0x00000000,
   1434         .ro_mask    = 0xFFFFFFFF,
   1435         .emu_mask   = 0x00000000,
   1436         .init       = xen_pt_pending_reg_init,
   1437         .u.dw.read  = xen_pt_long_reg_read,
   1438         .u.dw.write = xen_pt_long_reg_write,
   1439     },
   1440     /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
   1441     {
   1442         .offset     = PCI_MSI_MASK_64 + 4,
   1443         .size       = 4,
   1444         .init_val   = 0x00000000,
   1445         .ro_mask    = 0xFFFFFFFF,
   1446         .emu_mask   = 0x00000000,
   1447         .init       = xen_pt_pending_reg_init,
   1448         .u.dw.read  = xen_pt_long_reg_read,
   1449         .u.dw.write = xen_pt_long_reg_write,
   1450     },
   1451     {
   1452         .size = 0,
   1453     },
   1454 };
   1455 
   1456 
   1457 /**************************************
   1458  * MSI-X Capability
   1459  */
   1460 
   1461 /* Message Control register for MSI-X */
   1462 static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s,
   1463                                     XenPTRegInfo *reg, uint32_t real_offset,
   1464                                     uint32_t *data)
   1465 {
   1466     uint16_t reg_field;
   1467     int rc;
   1468 
   1469     /* use I/O device register's value as initial value */
   1470     rc = xen_host_pci_get_word(&s->real_device, real_offset, &reg_field);
   1471     if (rc) {
   1472         return rc;
   1473     }
   1474     if (reg_field & PCI_MSIX_FLAGS_ENABLE) {
   1475         XEN_PT_LOG(&s->dev, "MSIX already enabled, disabling it first\n");
   1476         xen_host_pci_set_word(&s->real_device, real_offset,
   1477                               reg_field & ~PCI_MSIX_FLAGS_ENABLE);
   1478     }
   1479 
   1480     s->msix->ctrl_offset = real_offset;
   1481 
   1482     *data = reg->init_val;
   1483     return 0;
   1484 }
   1485 static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s,
   1486                                      XenPTReg *cfg_entry, uint16_t *val,
   1487                                      uint16_t dev_value, uint16_t valid_mask)
   1488 {
   1489     XenPTRegInfo *reg = cfg_entry->reg;
   1490     uint16_t writable_mask = 0;
   1491     uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
   1492     int debug_msix_enabled_old;
   1493     uint16_t *data = cfg_entry->ptr.half_word;
   1494 
   1495     /* modify emulate register */
   1496     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
   1497     *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
   1498 
   1499     /* create value for writing to I/O device register */
   1500     *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
   1501 
   1502     /* update MSI-X */
   1503     if ((*val & PCI_MSIX_FLAGS_ENABLE)
   1504         && !(*val & PCI_MSIX_FLAGS_MASKALL)) {
   1505         xen_pt_msix_update(s);
   1506     } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) {
   1507         xen_pt_msix_disable(s);
   1508     }
   1509 
   1510     s->msix->maskall = *val & PCI_MSIX_FLAGS_MASKALL;
   1511 
   1512     debug_msix_enabled_old = s->msix->enabled;
   1513     s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE);
   1514     if (s->msix->enabled != debug_msix_enabled_old) {
   1515         XEN_PT_LOG(&s->dev, "%s MSI-X\n",
   1516                    s->msix->enabled ? "enable" : "disable");
   1517     }
   1518 
   1519     return 0;
   1520 }
   1521 
   1522 /* MSI-X Capability Structure reg static information table */
   1523 static XenPTRegInfo xen_pt_emu_reg_msix[] = {
   1524     /* Next Pointer reg */
   1525     {
   1526         .offset     = PCI_CAP_LIST_NEXT,
   1527         .size       = 1,
   1528         .init_val   = 0x00,
   1529         .ro_mask    = 0xFF,
   1530         .emu_mask   = 0xFF,
   1531         .init       = xen_pt_ptr_reg_init,
   1532         .u.b.read   = xen_pt_byte_reg_read,
   1533         .u.b.write  = xen_pt_byte_reg_write,
   1534     },
   1535     /* Message Control reg */
   1536     {
   1537         .offset     = PCI_MSI_FLAGS,
   1538         .size       = 2,
   1539         .init_val   = 0x0000,
   1540         .res_mask   = 0x3800,
   1541         .ro_mask    = 0x07FF,
   1542         .emu_mask   = 0x0000,
   1543         .init       = xen_pt_msixctrl_reg_init,
   1544         .u.w.read   = xen_pt_word_reg_read,
   1545         .u.w.write  = xen_pt_msixctrl_reg_write,
   1546     },
   1547     {
   1548         .size = 0,
   1549     },
   1550 };
   1551 
   1552 static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = {
   1553     /* Intel IGFX OpRegion reg */
   1554     {
   1555         .offset     = 0x0,
   1556         .size       = 4,
   1557         .init_val   = 0,
   1558         .emu_mask   = 0xFFFFFFFF,
   1559         .u.dw.read   = xen_pt_intel_opregion_read,
   1560         .u.dw.write  = xen_pt_intel_opregion_write,
   1561     },
   1562     {
   1563         .size = 0,
   1564     },
   1565 };
   1566 
   1567 /****************************
   1568  * Capabilities
   1569  */
   1570 
   1571 /* capability structure register group size functions */
   1572 
   1573 static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s,
   1574                                     const XenPTRegGroupInfo *grp_reg,
   1575                                     uint32_t base_offset, uint8_t *size)
   1576 {
   1577     *size = grp_reg->grp_size;
   1578     return 0;
   1579 }
   1580 /* get Vendor Specific Capability Structure register group size */
   1581 static int xen_pt_vendor_size_init(XenPCIPassthroughState *s,
   1582                                    const XenPTRegGroupInfo *grp_reg,
   1583                                    uint32_t base_offset, uint8_t *size)
   1584 {
   1585     return xen_host_pci_get_byte(&s->real_device, base_offset + 0x02, size);
   1586 }
   1587 /* get PCI Express Capability Structure register group size */
   1588 static int xen_pt_pcie_size_init(XenPCIPassthroughState *s,
   1589                                  const XenPTRegGroupInfo *grp_reg,
   1590                                  uint32_t base_offset, uint8_t *size)
   1591 {
   1592     PCIDevice *d = PCI_DEVICE(s);
   1593     uint8_t version = get_capability_version(s, base_offset);
   1594     uint8_t type = get_device_type(s, base_offset);
   1595     uint8_t pcie_size = 0;
   1596 
   1597 
   1598     /* calculate size depend on capability version and device/port type */
   1599     /* in case of PCI Express Base Specification Rev 1.x */
   1600     if (version == 1) {
   1601         /* The PCI Express Capabilities, Device Capabilities, and Device
   1602          * Status/Control registers are required for all PCI Express devices.
   1603          * The Link Capabilities and Link Status/Control are required for all
   1604          * Endpoints that are not Root Complex Integrated Endpoints. Endpoints
   1605          * are not required to implement registers other than those listed
   1606          * above and terminate the capability structure.
   1607          */
   1608         switch (type) {
   1609         case PCI_EXP_TYPE_ENDPOINT:
   1610         case PCI_EXP_TYPE_LEG_END:
   1611             pcie_size = 0x14;
   1612             break;
   1613         case PCI_EXP_TYPE_RC_END:
   1614             /* has no link */
   1615             pcie_size = 0x0C;
   1616             break;
   1617             /* only EndPoint passthrough is supported */
   1618         case PCI_EXP_TYPE_ROOT_PORT:
   1619         case PCI_EXP_TYPE_UPSTREAM:
   1620         case PCI_EXP_TYPE_DOWNSTREAM:
   1621         case PCI_EXP_TYPE_PCI_BRIDGE:
   1622         case PCI_EXP_TYPE_PCIE_BRIDGE:
   1623         case PCI_EXP_TYPE_RC_EC:
   1624         default:
   1625             XEN_PT_ERR(d, "Unsupported device/port type 0x%x.\n", type);
   1626             return -1;
   1627         }
   1628     }
   1629     /* in case of PCI Express Base Specification Rev 2.0 */
   1630     else if (version == 2) {
   1631         switch (type) {
   1632         case PCI_EXP_TYPE_ENDPOINT:
   1633         case PCI_EXP_TYPE_LEG_END:
   1634         case PCI_EXP_TYPE_RC_END:
   1635             /* For Functions that do not implement the registers,
   1636              * these spaces must be hardwired to 0b.
   1637              */
   1638             pcie_size = 0x3C;
   1639             break;
   1640             /* only EndPoint passthrough is supported */
   1641         case PCI_EXP_TYPE_ROOT_PORT:
   1642         case PCI_EXP_TYPE_UPSTREAM:
   1643         case PCI_EXP_TYPE_DOWNSTREAM:
   1644         case PCI_EXP_TYPE_PCI_BRIDGE:
   1645         case PCI_EXP_TYPE_PCIE_BRIDGE:
   1646         case PCI_EXP_TYPE_RC_EC:
   1647         default:
   1648             XEN_PT_ERR(d, "Unsupported device/port type 0x%x.\n", type);
   1649             return -1;
   1650         }
   1651     } else {
   1652         XEN_PT_ERR(d, "Unsupported capability version 0x%x.\n", version);
   1653         return -1;
   1654     }
   1655 
   1656     *size = pcie_size;
   1657     return 0;
   1658 }
   1659 /* get MSI Capability Structure register group size */
   1660 static int xen_pt_msi_size_init(XenPCIPassthroughState *s,
   1661                                 const XenPTRegGroupInfo *grp_reg,
   1662                                 uint32_t base_offset, uint8_t *size)
   1663 {
   1664     uint16_t msg_ctrl = 0;
   1665     uint8_t msi_size = 0xa;
   1666     int rc;
   1667 
   1668     rc = xen_host_pci_get_word(&s->real_device, base_offset + PCI_MSI_FLAGS,
   1669                                &msg_ctrl);
   1670     if (rc) {
   1671         return rc;
   1672     }
   1673     /* check if 64-bit address is capable of per-vector masking */
   1674     if (msg_ctrl & PCI_MSI_FLAGS_64BIT) {
   1675         msi_size += 4;
   1676     }
   1677     if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) {
   1678         msi_size += 10;
   1679     }
   1680 
   1681     s->msi = g_new0(XenPTMSI, 1);
   1682     s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
   1683 
   1684     *size = msi_size;
   1685     return 0;
   1686 }
   1687 /* get MSI-X Capability Structure register group size */
   1688 static int xen_pt_msix_size_init(XenPCIPassthroughState *s,
   1689                                  const XenPTRegGroupInfo *grp_reg,
   1690                                  uint32_t base_offset, uint8_t *size)
   1691 {
   1692     int rc = 0;
   1693 
   1694     rc = xen_pt_msix_init(s, base_offset);
   1695 
   1696     if (rc < 0) {
   1697         XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n");
   1698         return rc;
   1699     }
   1700 
   1701     *size = grp_reg->grp_size;
   1702     return 0;
   1703 }
   1704 
   1705 
   1706 static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = {
   1707     /* Header Type0 reg group */
   1708     {
   1709         .grp_id      = 0xFF,
   1710         .grp_type    = XEN_PT_GRP_TYPE_EMU,
   1711         .grp_size    = 0x40,
   1712         .size_init   = xen_pt_reg_grp_size_init,
   1713         .emu_regs = xen_pt_emu_reg_header0,
   1714     },
   1715     /* PCI PowerManagement Capability reg group */
   1716     {
   1717         .grp_id      = PCI_CAP_ID_PM,
   1718         .grp_type    = XEN_PT_GRP_TYPE_EMU,
   1719         .grp_size    = PCI_PM_SIZEOF,
   1720         .size_init   = xen_pt_reg_grp_size_init,
   1721         .emu_regs = xen_pt_emu_reg_pm,
   1722     },
   1723     /* AGP Capability Structure reg group */
   1724     {
   1725         .grp_id     = PCI_CAP_ID_AGP,
   1726         .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
   1727         .grp_size   = 0x30,
   1728         .size_init  = xen_pt_reg_grp_size_init,
   1729     },
   1730     /* Vital Product Data Capability Structure reg group */
   1731     {
   1732         .grp_id      = PCI_CAP_ID_VPD,
   1733         .grp_type    = XEN_PT_GRP_TYPE_EMU,
   1734         .grp_size    = 0x08,
   1735         .size_init   = xen_pt_reg_grp_size_init,
   1736         .emu_regs = xen_pt_emu_reg_vpd,
   1737     },
   1738     /* Slot Identification reg group */
   1739     {
   1740         .grp_id     = PCI_CAP_ID_SLOTID,
   1741         .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
   1742         .grp_size   = 0x04,
   1743         .size_init  = xen_pt_reg_grp_size_init,
   1744     },
   1745     /* MSI Capability Structure reg group */
   1746     {
   1747         .grp_id      = PCI_CAP_ID_MSI,
   1748         .grp_type    = XEN_PT_GRP_TYPE_EMU,
   1749         .grp_size    = 0xFF,
   1750         .size_init   = xen_pt_msi_size_init,
   1751         .emu_regs = xen_pt_emu_reg_msi,
   1752     },
   1753     /* PCI-X Capabilities List Item reg group */
   1754     {
   1755         .grp_id     = PCI_CAP_ID_PCIX,
   1756         .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
   1757         .grp_size   = 0x18,
   1758         .size_init  = xen_pt_reg_grp_size_init,
   1759     },
   1760     /* Vendor Specific Capability Structure reg group */
   1761     {
   1762         .grp_id      = PCI_CAP_ID_VNDR,
   1763         .grp_type    = XEN_PT_GRP_TYPE_EMU,
   1764         .grp_size    = 0xFF,
   1765         .size_init   = xen_pt_vendor_size_init,
   1766         .emu_regs = xen_pt_emu_reg_vendor,
   1767     },
   1768     /* SHPC Capability List Item reg group */
   1769     {
   1770         .grp_id     = PCI_CAP_ID_SHPC,
   1771         .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
   1772         .grp_size   = 0x08,
   1773         .size_init  = xen_pt_reg_grp_size_init,
   1774     },
   1775     /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */
   1776     {
   1777         .grp_id     = PCI_CAP_ID_SSVID,
   1778         .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
   1779         .grp_size   = 0x08,
   1780         .size_init  = xen_pt_reg_grp_size_init,
   1781     },
   1782     /* AGP 8x Capability Structure reg group */
   1783     {
   1784         .grp_id     = PCI_CAP_ID_AGP3,
   1785         .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
   1786         .grp_size   = 0x30,
   1787         .size_init  = xen_pt_reg_grp_size_init,
   1788     },
   1789     /* PCI Express Capability Structure reg group */
   1790     {
   1791         .grp_id      = PCI_CAP_ID_EXP,
   1792         .grp_type    = XEN_PT_GRP_TYPE_EMU,
   1793         .grp_size    = 0xFF,
   1794         .size_init   = xen_pt_pcie_size_init,
   1795         .emu_regs = xen_pt_emu_reg_pcie,
   1796     },
   1797     /* MSI-X Capability Structure reg group */
   1798     {
   1799         .grp_id      = PCI_CAP_ID_MSIX,
   1800         .grp_type    = XEN_PT_GRP_TYPE_EMU,
   1801         .grp_size    = 0x0C,
   1802         .size_init   = xen_pt_msix_size_init,
   1803         .emu_regs = xen_pt_emu_reg_msix,
   1804     },
   1805     /* Intel IGD Opregion group */
   1806     {
   1807         .grp_id      = XEN_PCI_INTEL_OPREGION,
   1808         .grp_type    = XEN_PT_GRP_TYPE_EMU,
   1809         .grp_size    = 0x4,
   1810         .size_init   = xen_pt_reg_grp_size_init,
   1811         .emu_regs    = xen_pt_emu_reg_igd_opregion,
   1812     },
   1813     {
   1814         .grp_size = 0,
   1815     },
   1816 };
   1817 
   1818 /* initialize Capabilities Pointer or Next Pointer register */
   1819 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s,
   1820                                XenPTRegInfo *reg, uint32_t real_offset,
   1821                                uint32_t *data)
   1822 {
   1823     int i, rc;
   1824     uint8_t reg_field;
   1825     uint8_t cap_id = 0;
   1826 
   1827     rc = xen_host_pci_get_byte(&s->real_device, real_offset, &reg_field);
   1828     if (rc) {
   1829         return rc;
   1830     }
   1831     /* find capability offset */
   1832     while (reg_field) {
   1833         for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
   1834             if (xen_pt_hide_dev_cap(&s->real_device,
   1835                                     xen_pt_emu_reg_grps[i].grp_id)) {
   1836                 continue;
   1837             }
   1838 
   1839             rc = xen_host_pci_get_byte(&s->real_device,
   1840                                        reg_field + PCI_CAP_LIST_ID, &cap_id);
   1841             if (rc) {
   1842                 XEN_PT_ERR(&s->dev, "Failed to read capability @0x%x (rc:%d)\n",
   1843                            reg_field + PCI_CAP_LIST_ID, rc);
   1844                 return rc;
   1845             }
   1846             if (xen_pt_emu_reg_grps[i].grp_id == cap_id) {
   1847                 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
   1848                     goto out;
   1849                 }
   1850                 /* ignore the 0 hardwired capability, find next one */
   1851                 break;
   1852             }
   1853         }
   1854 
   1855         /* next capability */
   1856         rc = xen_host_pci_get_byte(&s->real_device,
   1857                                    reg_field + PCI_CAP_LIST_NEXT, &reg_field);
   1858         if (rc) {
   1859             return rc;
   1860         }
   1861     }
   1862 
   1863 out:
   1864     *data = reg_field;
   1865     return 0;
   1866 }
   1867 
   1868 
   1869 /*************
   1870  * Main
   1871  */
   1872 
   1873 static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap)
   1874 {
   1875     uint8_t id;
   1876     unsigned max_cap = XEN_PCI_CAP_MAX;
   1877     uint8_t pos = PCI_CAPABILITY_LIST;
   1878     uint8_t status = 0;
   1879 
   1880     if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) {
   1881         return 0;
   1882     }
   1883     if ((status & PCI_STATUS_CAP_LIST) == 0) {
   1884         return 0;
   1885     }
   1886 
   1887     while (max_cap--) {
   1888         if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) {
   1889             break;
   1890         }
   1891         if (pos < PCI_CONFIG_HEADER_SIZE) {
   1892             break;
   1893         }
   1894 
   1895         pos &= ~3;
   1896         if (xen_host_pci_get_byte(&s->real_device,
   1897                                   pos + PCI_CAP_LIST_ID, &id)) {
   1898             break;
   1899         }
   1900 
   1901         if (id == 0xff) {
   1902             break;
   1903         }
   1904         if (id == cap) {
   1905             return pos;
   1906         }
   1907 
   1908         pos += PCI_CAP_LIST_NEXT;
   1909     }
   1910     return 0;
   1911 }
   1912 
   1913 static void xen_pt_config_reg_init(XenPCIPassthroughState *s,
   1914                                    XenPTRegGroup *reg_grp, XenPTRegInfo *reg,
   1915                                    Error **errp)
   1916 {
   1917     XenPTReg *reg_entry;
   1918     uint32_t data = 0;
   1919     int rc = 0;
   1920 
   1921     reg_entry = g_new0(XenPTReg, 1);
   1922     reg_entry->reg = reg;
   1923 
   1924     if (reg->init) {
   1925         uint32_t host_mask, size_mask;
   1926         unsigned int offset;
   1927         uint32_t val;
   1928 
   1929         /* initialize emulate register */
   1930         rc = reg->init(s, reg_entry->reg,
   1931                        reg_grp->base_offset + reg->offset, &data);
   1932         if (rc < 0) {
   1933             g_free(reg_entry);
   1934             error_setg(errp, "Init emulate register fail");
   1935             return;
   1936         }
   1937         if (data == XEN_PT_INVALID_REG) {
   1938             /* free unused BAR register entry */
   1939             g_free(reg_entry);
   1940             return;
   1941         }
   1942         /* Sync up the data to dev.config */
   1943         offset = reg_grp->base_offset + reg->offset;
   1944         size_mask = 0xFFFFFFFF >> ((4 - reg->size) << 3);
   1945 
   1946         switch (reg->size) {
   1947         case 1: rc = xen_host_pci_get_byte(&s->real_device, offset, (uint8_t *)&val);
   1948                 break;
   1949         case 2: rc = xen_host_pci_get_word(&s->real_device, offset, (uint16_t *)&val);
   1950                 break;
   1951         case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val);
   1952                 break;
   1953         default: abort();
   1954         }
   1955         if (rc) {
   1956             /* Serious issues when we cannot read the host values! */
   1957             g_free(reg_entry);
   1958             error_setg(errp, "Cannot read host values");
   1959             return;
   1960         }
   1961         /* Set bits in emu_mask are the ones we emulate. The dev.config shall
   1962          * contain the emulated view of the guest - therefore we flip the mask
   1963          * to mask out the host values (which dev.config initially has) . */
   1964         host_mask = size_mask & ~reg->emu_mask;
   1965 
   1966         if ((data & host_mask) != (val & host_mask)) {
   1967             uint32_t new_val;
   1968             /*
   1969              * Merge the emulated bits (data) with the host bits (val)
   1970              * and mask out the bits past size to enable restoration
   1971              * of the proper value for logging below.
   1972              */
   1973             new_val = XEN_PT_MERGE_VALUE(val, data, host_mask) & size_mask;
   1974             /* Leave intact host and emulated values past the size - even though
   1975              * we do not care as we write per reg->size granularity, but for the
   1976              * logging below lets have the proper value. */
   1977             new_val |= ((val | data)) & ~size_mask;
   1978             XEN_PT_LOG(&s->dev,"Offset 0x%04x mismatch! Emulated=0x%04x, host=0x%04x, syncing to 0x%04x.\n",
   1979                        offset, data, val, new_val);
   1980             val = new_val;
   1981         } else
   1982             val = data;
   1983 
   1984         if (val & ~size_mask) {
   1985             error_setg(errp, "Offset 0x%04x:0x%04x expands past"
   1986                     " register size (%d)", offset, val, reg->size);
   1987             g_free(reg_entry);
   1988             return;
   1989         }
   1990         /* This could be just pci_set_long as we don't modify the bits
   1991          * past reg->size, but in case this routine is run in parallel or the
   1992          * init value is larger, we do not want to over-write registers. */
   1993         switch (reg->size) {
   1994         case 1: pci_set_byte(s->dev.config + offset, (uint8_t)val);
   1995                 break;
   1996         case 2: pci_set_word(s->dev.config + offset, (uint16_t)val);
   1997                 break;
   1998         case 4: pci_set_long(s->dev.config + offset, val);
   1999                 break;
   2000         default: abort();
   2001         }
   2002         /* set register value pointer to the data. */
   2003         reg_entry->ptr.byte = s->dev.config + offset;
   2004 
   2005     }
   2006     /* list add register entry */
   2007     QLIST_INSERT_HEAD(&reg_grp->reg_tbl_list, reg_entry, entries);
   2008 }
   2009 
   2010 void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp)
   2011 {
   2012     ERRP_GUARD();
   2013     int i, rc;
   2014 
   2015     QLIST_INIT(&s->reg_grps);
   2016 
   2017     for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
   2018         uint32_t reg_grp_offset = 0;
   2019         XenPTRegGroup *reg_grp_entry = NULL;
   2020 
   2021         if (xen_pt_emu_reg_grps[i].grp_id != 0xFF
   2022             && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) {
   2023             if (xen_pt_hide_dev_cap(&s->real_device,
   2024                                     xen_pt_emu_reg_grps[i].grp_id)) {
   2025                 continue;
   2026             }
   2027 
   2028             reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id);
   2029 
   2030             if (!reg_grp_offset) {
   2031                 continue;
   2032             }
   2033         }
   2034 
   2035         if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) {
   2036             if (!is_igd_vga_passthrough(&s->real_device) ||
   2037                 s->real_device.vendor_id != PCI_VENDOR_ID_INTEL) {
   2038                 continue;
   2039             }
   2040             /*
   2041              * By default we will trap up to 0x40 in the cfg space.
   2042              * If an intel device is pass through we need to trap 0xfc,
   2043              * therefore the size should be 0xff.
   2044              */
   2045             reg_grp_offset = XEN_PCI_INTEL_OPREGION;
   2046         }
   2047 
   2048         reg_grp_entry = g_new0(XenPTRegGroup, 1);
   2049         QLIST_INIT(&reg_grp_entry->reg_tbl_list);
   2050         QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries);
   2051 
   2052         reg_grp_entry->base_offset = reg_grp_offset;
   2053         reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i;
   2054         if (xen_pt_emu_reg_grps[i].size_init) {
   2055             /* get register group size */
   2056             rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp,
   2057                                                   reg_grp_offset,
   2058                                                   &reg_grp_entry->size);
   2059             if (rc < 0) {
   2060                 error_setg(errp, "Failed to initialize %d/%zu, type = 0x%x,"
   2061                            " rc: %d", i, ARRAY_SIZE(xen_pt_emu_reg_grps),
   2062                            xen_pt_emu_reg_grps[i].grp_type, rc);
   2063                 xen_pt_config_delete(s);
   2064                 return;
   2065             }
   2066         }
   2067 
   2068         if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
   2069             if (xen_pt_emu_reg_grps[i].emu_regs) {
   2070                 int j = 0;
   2071                 XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs;
   2072 
   2073                 /* initialize capability register */
   2074                 for (j = 0; regs->size != 0; j++, regs++) {
   2075                     xen_pt_config_reg_init(s, reg_grp_entry, regs, errp);
   2076                     if (*errp) {
   2077                         error_append_hint(errp, "Failed to init register %d"
   2078                                           " offsets 0x%x in grp_type = 0x%x (%d/%zu)",
   2079                                           j,
   2080                                           regs->offset,
   2081                                           xen_pt_emu_reg_grps[i].grp_type,
   2082                                           i, ARRAY_SIZE(xen_pt_emu_reg_grps));
   2083                         xen_pt_config_delete(s);
   2084                         return;
   2085                     }
   2086                 }
   2087             }
   2088         }
   2089     }
   2090 }
   2091 
   2092 /* delete all emulate register */
   2093 void xen_pt_config_delete(XenPCIPassthroughState *s)
   2094 {
   2095     struct XenPTRegGroup *reg_group, *next_grp;
   2096     struct XenPTReg *reg, *next_reg;
   2097 
   2098     /* free MSI/MSI-X info table */
   2099     if (s->msix) {
   2100         xen_pt_msix_unmap(s);
   2101     }
   2102     g_free(s->msi);
   2103 
   2104     /* free all register group entry */
   2105     QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) {
   2106         /* free all register entry */
   2107         QLIST_FOREACH_SAFE(reg, &reg_group->reg_tbl_list, entries, next_reg) {
   2108             QLIST_REMOVE(reg, entries);
   2109             g_free(reg);
   2110         }
   2111 
   2112         QLIST_REMOVE(reg_group, entries);
   2113         g_free(reg_group);
   2114     }
   2115 }