qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

vhost-user-gpu.c (17832B)


      1 /*
      2  * vhost-user GPU Device
      3  *
      4  * Copyright Red Hat, Inc. 2018
      5  *
      6  * Authors:
      7  *     Marc-André Lureau <marcandre.lureau@redhat.com>
      8  *
      9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
     10  * See the COPYING file in the top-level directory.
     11  */
     12 
     13 #include "qemu/osdep.h"
     14 #include "qemu/sockets.h"
     15 #include "hw/qdev-properties.h"
     16 #include "hw/virtio/virtio-gpu.h"
     17 #include "chardev/char-fe.h"
     18 #include "qapi/error.h"
     19 #include "migration/blocker.h"
     20 
     21 typedef enum VhostUserGpuRequest {
     22     VHOST_USER_GPU_NONE = 0,
     23     VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
     24     VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
     25     VHOST_USER_GPU_GET_DISPLAY_INFO,
     26     VHOST_USER_GPU_CURSOR_POS,
     27     VHOST_USER_GPU_CURSOR_POS_HIDE,
     28     VHOST_USER_GPU_CURSOR_UPDATE,
     29     VHOST_USER_GPU_SCANOUT,
     30     VHOST_USER_GPU_UPDATE,
     31     VHOST_USER_GPU_DMABUF_SCANOUT,
     32     VHOST_USER_GPU_DMABUF_UPDATE,
     33 } VhostUserGpuRequest;
     34 
     35 typedef struct VhostUserGpuDisplayInfoReply {
     36     struct virtio_gpu_resp_display_info info;
     37 } VhostUserGpuDisplayInfoReply;
     38 
     39 typedef struct VhostUserGpuCursorPos {
     40     uint32_t scanout_id;
     41     uint32_t x;
     42     uint32_t y;
     43 } QEMU_PACKED VhostUserGpuCursorPos;
     44 
     45 typedef struct VhostUserGpuCursorUpdate {
     46     VhostUserGpuCursorPos pos;
     47     uint32_t hot_x;
     48     uint32_t hot_y;
     49     uint32_t data[64 * 64];
     50 } QEMU_PACKED VhostUserGpuCursorUpdate;
     51 
     52 typedef struct VhostUserGpuScanout {
     53     uint32_t scanout_id;
     54     uint32_t width;
     55     uint32_t height;
     56 } QEMU_PACKED VhostUserGpuScanout;
     57 
     58 typedef struct VhostUserGpuUpdate {
     59     uint32_t scanout_id;
     60     uint32_t x;
     61     uint32_t y;
     62     uint32_t width;
     63     uint32_t height;
     64     uint8_t data[];
     65 } QEMU_PACKED VhostUserGpuUpdate;
     66 
     67 typedef struct VhostUserGpuDMABUFScanout {
     68     uint32_t scanout_id;
     69     uint32_t x;
     70     uint32_t y;
     71     uint32_t width;
     72     uint32_t height;
     73     uint32_t fd_width;
     74     uint32_t fd_height;
     75     uint32_t fd_stride;
     76     uint32_t fd_flags;
     77     int fd_drm_fourcc;
     78 } QEMU_PACKED VhostUserGpuDMABUFScanout;
     79 
     80 typedef struct VhostUserGpuMsg {
     81     uint32_t request; /* VhostUserGpuRequest */
     82     uint32_t flags;
     83     uint32_t size; /* the following payload size */
     84     union {
     85         VhostUserGpuCursorPos cursor_pos;
     86         VhostUserGpuCursorUpdate cursor_update;
     87         VhostUserGpuScanout scanout;
     88         VhostUserGpuUpdate update;
     89         VhostUserGpuDMABUFScanout dmabuf_scanout;
     90         struct virtio_gpu_resp_display_info display_info;
     91         uint64_t u64;
     92     } payload;
     93 } QEMU_PACKED VhostUserGpuMsg;
     94 
     95 static VhostUserGpuMsg m __attribute__ ((unused));
     96 #define VHOST_USER_GPU_HDR_SIZE \
     97     (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags))
     98 
     99 #define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
    100 
    101 static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
    102 
    103 static void
    104 vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg)
    105 {
    106     VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos;
    107     struct virtio_gpu_scanout *s;
    108 
    109     if (pos->scanout_id >= g->parent_obj.conf.max_outputs) {
    110         return;
    111     }
    112     s = &g->parent_obj.scanout[pos->scanout_id];
    113 
    114     if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) {
    115         VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update;
    116         if (!s->current_cursor) {
    117             s->current_cursor = cursor_alloc(64, 64);
    118         }
    119 
    120         s->current_cursor->hot_x = up->hot_x;
    121         s->current_cursor->hot_y = up->hot_y;
    122 
    123         memcpy(s->current_cursor->data, up->data,
    124                64 * 64 * sizeof(uint32_t));
    125 
    126         dpy_cursor_define(s->con, s->current_cursor);
    127     }
    128 
    129     dpy_mouse_set(s->con, pos->x, pos->y,
    130                   msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE);
    131 }
    132 
    133 static void
    134 vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg)
    135 {
    136     qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg,
    137                       VHOST_USER_GPU_HDR_SIZE + msg->size);
    138 }
    139 
    140 static void
    141 vhost_user_gpu_unblock(VhostUserGPU *g)
    142 {
    143     VhostUserGpuMsg msg = {
    144         .request = VHOST_USER_GPU_DMABUF_UPDATE,
    145         .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
    146     };
    147 
    148     vhost_user_gpu_send_msg(g, &msg);
    149 }
    150 
    151 static void
    152 vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
    153 {
    154     QemuConsole *con = NULL;
    155     struct virtio_gpu_scanout *s;
    156 
    157     switch (msg->request) {
    158     case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: {
    159         VhostUserGpuMsg reply = {
    160             .request = msg->request,
    161             .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
    162             .size = sizeof(uint64_t),
    163         };
    164 
    165         vhost_user_gpu_send_msg(g, &reply);
    166         break;
    167     }
    168     case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: {
    169         break;
    170     }
    171     case VHOST_USER_GPU_GET_DISPLAY_INFO: {
    172         struct virtio_gpu_resp_display_info display_info = { {} };
    173         VhostUserGpuMsg reply = {
    174             .request = msg->request,
    175             .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
    176             .size = sizeof(struct virtio_gpu_resp_display_info),
    177         };
    178 
    179         display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
    180         virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
    181         memcpy(&reply.payload.display_info, &display_info,
    182                sizeof(display_info));
    183         vhost_user_gpu_send_msg(g, &reply);
    184         break;
    185     }
    186     case VHOST_USER_GPU_SCANOUT: {
    187         VhostUserGpuScanout *m = &msg->payload.scanout;
    188 
    189         if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
    190             return;
    191         }
    192 
    193         g->parent_obj.enable = 1;
    194         s = &g->parent_obj.scanout[m->scanout_id];
    195         con = s->con;
    196 
    197         if (m->width == 0) {
    198             dpy_gfx_replace_surface(con, NULL);
    199         } else {
    200             s->ds = qemu_create_displaysurface(m->width, m->height);
    201             /* replace surface on next update */
    202         }
    203 
    204         break;
    205     }
    206     case VHOST_USER_GPU_DMABUF_SCANOUT: {
    207         VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout;
    208         int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
    209         QemuDmaBuf *dmabuf;
    210 
    211         if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
    212             error_report("invalid scanout: %d", m->scanout_id);
    213             if (fd >= 0) {
    214                 close(fd);
    215             }
    216             break;
    217         }
    218 
    219         g->parent_obj.enable = 1;
    220         con = g->parent_obj.scanout[m->scanout_id].con;
    221         dmabuf = &g->dmabuf[m->scanout_id];
    222         if (dmabuf->fd >= 0) {
    223             close(dmabuf->fd);
    224             dmabuf->fd = -1;
    225         }
    226         dpy_gl_release_dmabuf(con, dmabuf);
    227         if (fd == -1) {
    228             dpy_gl_scanout_disable(con);
    229             break;
    230         }
    231         *dmabuf = (QemuDmaBuf) {
    232             .fd = fd,
    233             .width = m->fd_width,
    234             .height = m->fd_height,
    235             .stride = m->fd_stride,
    236             .fourcc = m->fd_drm_fourcc,
    237             .y0_top = m->fd_flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
    238         };
    239         dpy_gl_scanout_dmabuf(con, dmabuf);
    240         break;
    241     }
    242     case VHOST_USER_GPU_DMABUF_UPDATE: {
    243         VhostUserGpuUpdate *m = &msg->payload.update;
    244 
    245         if (m->scanout_id >= g->parent_obj.conf.max_outputs ||
    246             !g->parent_obj.scanout[m->scanout_id].con) {
    247             error_report("invalid scanout update: %d", m->scanout_id);
    248             vhost_user_gpu_unblock(g);
    249             break;
    250         }
    251 
    252         con = g->parent_obj.scanout[m->scanout_id].con;
    253         if (!console_has_gl(con)) {
    254             error_report("console doesn't support GL!");
    255             vhost_user_gpu_unblock(g);
    256             break;
    257         }
    258         g->backend_blocked = true;
    259         dpy_gl_update(con, m->x, m->y, m->width, m->height);
    260         break;
    261     }
    262     case VHOST_USER_GPU_UPDATE: {
    263         VhostUserGpuUpdate *m = &msg->payload.update;
    264 
    265         if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
    266             break;
    267         }
    268         s = &g->parent_obj.scanout[m->scanout_id];
    269         con = s->con;
    270         pixman_image_t *image =
    271             pixman_image_create_bits(PIXMAN_x8r8g8b8,
    272                                      m->width,
    273                                      m->height,
    274                                      (uint32_t *)m->data,
    275                                      m->width * 4);
    276 
    277         pixman_image_composite(PIXMAN_OP_SRC,
    278                                image, NULL, s->ds->image,
    279                                0, 0, 0, 0, m->x, m->y, m->width, m->height);
    280 
    281         pixman_image_unref(image);
    282         if (qemu_console_surface(con) != s->ds) {
    283             dpy_gfx_replace_surface(con, s->ds);
    284         } else {
    285             dpy_gfx_update(con, m->x, m->y, m->width, m->height);
    286         }
    287         break;
    288     }
    289     default:
    290         g_warning("unhandled message %d %d", msg->request, msg->size);
    291     }
    292 
    293     if (con && qemu_console_is_gl_blocked(con)) {
    294         vhost_user_gpu_update_blocked(g, true);
    295     }
    296 }
    297 
    298 static void
    299 vhost_user_gpu_chr_read(void *opaque)
    300 {
    301     VhostUserGPU *g = opaque;
    302     VhostUserGpuMsg *msg = NULL;
    303     VhostUserGpuRequest request;
    304     uint32_t size, flags;
    305     int r;
    306 
    307     r = qemu_chr_fe_read_all(&g->vhost_chr,
    308                              (uint8_t *)&request, sizeof(uint32_t));
    309     if (r != sizeof(uint32_t)) {
    310         error_report("failed to read msg header: %d, %d", r, errno);
    311         goto end;
    312     }
    313 
    314     r = qemu_chr_fe_read_all(&g->vhost_chr,
    315                              (uint8_t *)&flags, sizeof(uint32_t));
    316     if (r != sizeof(uint32_t)) {
    317         error_report("failed to read msg flags");
    318         goto end;
    319     }
    320 
    321     r = qemu_chr_fe_read_all(&g->vhost_chr,
    322                              (uint8_t *)&size, sizeof(uint32_t));
    323     if (r != sizeof(uint32_t)) {
    324         error_report("failed to read msg size");
    325         goto end;
    326     }
    327 
    328     msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size);
    329 
    330     r = qemu_chr_fe_read_all(&g->vhost_chr,
    331                              (uint8_t *)&msg->payload, size);
    332     if (r != size) {
    333         error_report("failed to read msg payload %d != %d", r, size);
    334         goto end;
    335     }
    336 
    337     msg->request = request;
    338     msg->flags = size;
    339     msg->size = size;
    340 
    341     if (request == VHOST_USER_GPU_CURSOR_UPDATE ||
    342         request == VHOST_USER_GPU_CURSOR_POS ||
    343         request == VHOST_USER_GPU_CURSOR_POS_HIDE) {
    344         vhost_user_gpu_handle_cursor(g, msg);
    345     } else {
    346         vhost_user_gpu_handle_display(g, msg);
    347     }
    348 
    349 end:
    350     g_free(msg);
    351 }
    352 
    353 static void
    354 vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked)
    355 {
    356     qemu_set_fd_handler(g->vhost_gpu_fd,
    357                         blocked ? NULL : vhost_user_gpu_chr_read, NULL, g);
    358 }
    359 
    360 static void
    361 vhost_user_gpu_gl_flushed(VirtIOGPUBase *b)
    362 {
    363     VhostUserGPU *g = VHOST_USER_GPU(b);
    364 
    365     if (g->backend_blocked) {
    366         vhost_user_gpu_unblock(VHOST_USER_GPU(g));
    367         g->backend_blocked = false;
    368     }
    369 
    370     vhost_user_gpu_update_blocked(VHOST_USER_GPU(g), false);
    371 }
    372 
    373 static bool
    374 vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp)
    375 {
    376     Chardev *chr;
    377     int sv[2];
    378 
    379     if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
    380         error_setg_errno(errp, errno, "socketpair() failed");
    381         return false;
    382     }
    383 
    384     chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET));
    385     if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) {
    386         error_setg(errp, "Failed to make socket chardev");
    387         goto err;
    388     }
    389     if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) {
    390         goto err;
    391     }
    392     if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) {
    393         error_setg(errp, "Failed to set vhost-user-gpu socket");
    394         qemu_chr_fe_deinit(&g->vhost_chr, false);
    395         goto err;
    396     }
    397 
    398     g->vhost_gpu_fd = sv[0];
    399     vhost_user_gpu_update_blocked(g, false);
    400     close(sv[1]);
    401     return true;
    402 
    403 err:
    404     close(sv[0]);
    405     close(sv[1]);
    406     if (chr) {
    407         object_unref(OBJECT(chr));
    408     }
    409     return false;
    410 }
    411 
    412 static void
    413 vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data)
    414 {
    415     VhostUserGPU *g = VHOST_USER_GPU(vdev);
    416     VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
    417     struct virtio_gpu_config *vgconfig =
    418         (struct virtio_gpu_config *)config_data;
    419     Error *local_err = NULL;
    420     int ret;
    421 
    422     memset(config_data, 0, sizeof(struct virtio_gpu_config));
    423 
    424     ret = vhost_dev_get_config(&g->vhost->dev,
    425                                config_data, sizeof(struct virtio_gpu_config),
    426                                &local_err);
    427     if (ret) {
    428         error_report_err(local_err);
    429         return;
    430     }
    431 
    432     /* those fields are managed by qemu */
    433     vgconfig->num_scanouts = b->virtio_config.num_scanouts;
    434     vgconfig->events_read = b->virtio_config.events_read;
    435     vgconfig->events_clear = b->virtio_config.events_clear;
    436 }
    437 
    438 static void
    439 vhost_user_gpu_set_config(VirtIODevice *vdev,
    440                           const uint8_t *config_data)
    441 {
    442     VhostUserGPU *g = VHOST_USER_GPU(vdev);
    443     VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
    444     const struct virtio_gpu_config *vgconfig =
    445         (const struct virtio_gpu_config *)config_data;
    446     int ret;
    447 
    448     if (vgconfig->events_clear) {
    449         b->virtio_config.events_read &= ~vgconfig->events_clear;
    450     }
    451 
    452     ret = vhost_dev_set_config(&g->vhost->dev, config_data,
    453                                0, sizeof(struct virtio_gpu_config),
    454                                VHOST_SET_CONFIG_TYPE_MASTER);
    455     if (ret) {
    456         error_report("vhost-user-gpu: set device config space failed");
    457         return;
    458     }
    459 }
    460 
    461 static void
    462 vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
    463 {
    464     VhostUserGPU *g = VHOST_USER_GPU(vdev);
    465     Error *err = NULL;
    466 
    467     if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) {
    468         if (!vhost_user_gpu_do_set_socket(g, &err)) {
    469             error_report_err(err);
    470             return;
    471         }
    472         vhost_user_backend_start(g->vhost);
    473     } else {
    474         /* unblock any wait and stop processing */
    475         if (g->vhost_gpu_fd != -1) {
    476             vhost_user_gpu_update_blocked(g, true);
    477             qemu_chr_fe_deinit(&g->vhost_chr, true);
    478             g->vhost_gpu_fd = -1;
    479         }
    480         vhost_user_backend_stop(g->vhost);
    481     }
    482 }
    483 
    484 static bool
    485 vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
    486 {
    487     VhostUserGPU *g = VHOST_USER_GPU(vdev);
    488 
    489     return vhost_virtqueue_pending(&g->vhost->dev, idx);
    490 }
    491 
    492 static void
    493 vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
    494 {
    495     VhostUserGPU *g = VHOST_USER_GPU(vdev);
    496 
    497     vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
    498 }
    499 
    500 static void
    501 vhost_user_gpu_instance_init(Object *obj)
    502 {
    503     VhostUserGPU *g = VHOST_USER_GPU(obj);
    504 
    505     g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
    506     object_property_add_alias(obj, "chardev",
    507                               OBJECT(g->vhost), "chardev");
    508 }
    509 
    510 static void
    511 vhost_user_gpu_instance_finalize(Object *obj)
    512 {
    513     VhostUserGPU *g = VHOST_USER_GPU(obj);
    514 
    515     object_unref(OBJECT(g->vhost));
    516 }
    517 
    518 static void
    519 vhost_user_gpu_reset(VirtIODevice *vdev)
    520 {
    521     VhostUserGPU *g = VHOST_USER_GPU(vdev);
    522 
    523     virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
    524 
    525     vhost_user_backend_stop(g->vhost);
    526 }
    527 
    528 static int
    529 vhost_user_gpu_config_change(struct vhost_dev *dev)
    530 {
    531     error_report("vhost-user-gpu: unhandled backend config change");
    532     return -1;
    533 }
    534 
    535 static const VhostDevConfigOps config_ops = {
    536     .vhost_dev_config_notifier = vhost_user_gpu_config_change,
    537 };
    538 
    539 static void
    540 vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
    541 {
    542     VhostUserGPU *g = VHOST_USER_GPU(qdev);
    543     VirtIODevice *vdev = VIRTIO_DEVICE(g);
    544 
    545     vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops);
    546     if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) {
    547         return;
    548     }
    549 
    550     /* existing backend may send DMABUF, so let's add that requirement */
    551     g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED;
    552     if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) {
    553         g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED;
    554     }
    555     if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_EDID)) {
    556         g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_EDID_ENABLED;
    557     } else {
    558         error_report("EDID requested but the backend doesn't support it.");
    559         g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_EDID_ENABLED);
    560     }
    561 
    562     if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) {
    563         return;
    564     }
    565 
    566     g->vhost_gpu_fd = -1;
    567 }
    568 
    569 static struct vhost_dev *vhost_user_gpu_get_vhost(VirtIODevice *vdev)
    570 {
    571     VhostUserGPU *g = VHOST_USER_GPU(vdev);
    572     return &g->vhost->dev;
    573 }
    574 
    575 static Property vhost_user_gpu_properties[] = {
    576     VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf),
    577     DEFINE_PROP_END_OF_LIST(),
    578 };
    579 
    580 static void
    581 vhost_user_gpu_class_init(ObjectClass *klass, void *data)
    582 {
    583     DeviceClass *dc = DEVICE_CLASS(klass);
    584     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
    585     VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
    586 
    587     vgc->gl_flushed = vhost_user_gpu_gl_flushed;
    588 
    589     vdc->realize = vhost_user_gpu_device_realize;
    590     vdc->reset = vhost_user_gpu_reset;
    591     vdc->set_status   = vhost_user_gpu_set_status;
    592     vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask;
    593     vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending;
    594     vdc->get_config = vhost_user_gpu_get_config;
    595     vdc->set_config = vhost_user_gpu_set_config;
    596     vdc->get_vhost = vhost_user_gpu_get_vhost;
    597 
    598     device_class_set_props(dc, vhost_user_gpu_properties);
    599 }
    600 
    601 static const TypeInfo vhost_user_gpu_info = {
    602     .name = TYPE_VHOST_USER_GPU,
    603     .parent = TYPE_VIRTIO_GPU_BASE,
    604     .instance_size = sizeof(VhostUserGPU),
    605     .instance_init = vhost_user_gpu_instance_init,
    606     .instance_finalize = vhost_user_gpu_instance_finalize,
    607     .class_init = vhost_user_gpu_class_init,
    608 };
    609 module_obj(TYPE_VHOST_USER_GPU);
    610 module_kconfig(VHOST_USER_GPU);
    611 
    612 static void vhost_user_gpu_register_types(void)
    613 {
    614     type_register_static(&vhost_user_gpu_info);
    615 }
    616 
    617 type_init(vhost_user_gpu_register_types)