qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

virgl.c (17382B)


      1 /*
      2  * Virtio vhost-user GPU Device
      3  *
      4  * Copyright Red Hat, Inc. 2013-2018
      5  *
      6  * Authors:
      7  *     Dave Airlie <airlied@redhat.com>
      8  *     Gerd Hoffmann <kraxel@redhat.com>
      9  *     Marc-André Lureau <marcandre.lureau@redhat.com>
     10  *
     11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
     12  * See the COPYING file in the top-level directory.
     13  */
     14 
     15 #include "qemu/osdep.h"
     16 #include <virglrenderer.h>
     17 #include "virgl.h"
     18 
     19 #include <epoxy/gl.h>
     20 
     21 void
     22 vg_virgl_update_cursor_data(VuGpu *g, uint32_t resource_id,
     23                             gpointer data)
     24 {
     25     uint32_t width, height;
     26     uint32_t *cursor;
     27 
     28     cursor = virgl_renderer_get_cursor_data(resource_id, &width, &height);
     29     g_return_if_fail(cursor != NULL);
     30     g_return_if_fail(width == 64);
     31     g_return_if_fail(height == 64);
     32 
     33     memcpy(data, cursor, 64 * 64 * sizeof(uint32_t));
     34     free(cursor);
     35 }
     36 
     37 static void
     38 virgl_cmd_context_create(VuGpu *g,
     39                          struct virtio_gpu_ctrl_command *cmd)
     40 {
     41     struct virtio_gpu_ctx_create cc;
     42 
     43     VUGPU_FILL_CMD(cc);
     44 
     45     virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen,
     46                                   cc.debug_name);
     47 }
     48 
     49 static void
     50 virgl_cmd_context_destroy(VuGpu *g,
     51                           struct virtio_gpu_ctrl_command *cmd)
     52 {
     53     struct virtio_gpu_ctx_destroy cd;
     54 
     55     VUGPU_FILL_CMD(cd);
     56 
     57     virgl_renderer_context_destroy(cd.hdr.ctx_id);
     58 }
     59 
     60 static void
     61 virgl_cmd_create_resource_2d(VuGpu *g,
     62                              struct virtio_gpu_ctrl_command *cmd)
     63 {
     64     struct virtio_gpu_resource_create_2d c2d;
     65     struct virgl_renderer_resource_create_args args;
     66 
     67     VUGPU_FILL_CMD(c2d);
     68 
     69     args.handle = c2d.resource_id;
     70     args.target = 2;
     71     args.format = c2d.format;
     72     args.bind = (1 << 1);
     73     args.width = c2d.width;
     74     args.height = c2d.height;
     75     args.depth = 1;
     76     args.array_size = 1;
     77     args.last_level = 0;
     78     args.nr_samples = 0;
     79     args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
     80     virgl_renderer_resource_create(&args, NULL, 0);
     81 }
     82 
     83 static void
     84 virgl_cmd_create_resource_3d(VuGpu *g,
     85                              struct virtio_gpu_ctrl_command *cmd)
     86 {
     87     struct virtio_gpu_resource_create_3d c3d;
     88     struct virgl_renderer_resource_create_args args;
     89 
     90     VUGPU_FILL_CMD(c3d);
     91 
     92     args.handle = c3d.resource_id;
     93     args.target = c3d.target;
     94     args.format = c3d.format;
     95     args.bind = c3d.bind;
     96     args.width = c3d.width;
     97     args.height = c3d.height;
     98     args.depth = c3d.depth;
     99     args.array_size = c3d.array_size;
    100     args.last_level = c3d.last_level;
    101     args.nr_samples = c3d.nr_samples;
    102     args.flags = c3d.flags;
    103     virgl_renderer_resource_create(&args, NULL, 0);
    104 }
    105 
    106 static void
    107 virgl_cmd_resource_unref(VuGpu *g,
    108                          struct virtio_gpu_ctrl_command *cmd)
    109 {
    110     struct virtio_gpu_resource_unref unref;
    111     struct iovec *res_iovs = NULL;
    112     int num_iovs = 0;
    113 
    114     VUGPU_FILL_CMD(unref);
    115 
    116     virgl_renderer_resource_detach_iov(unref.resource_id,
    117                                        &res_iovs,
    118                                        &num_iovs);
    119     if (res_iovs != NULL && num_iovs != 0) {
    120         vg_cleanup_mapping_iov(g, res_iovs, num_iovs);
    121     }
    122     virgl_renderer_resource_unref(unref.resource_id);
    123 }
    124 
    125 /* Not yet(?) defined in standard-headers, remove when possible */
    126 #ifndef VIRTIO_GPU_CAPSET_VIRGL2
    127 #define VIRTIO_GPU_CAPSET_VIRGL2 2
    128 #endif
    129 
    130 static void
    131 virgl_cmd_get_capset_info(VuGpu *g,
    132                           struct virtio_gpu_ctrl_command *cmd)
    133 {
    134     struct virtio_gpu_get_capset_info info;
    135     struct virtio_gpu_resp_capset_info resp;
    136 
    137     VUGPU_FILL_CMD(info);
    138 
    139     memset(&resp, 0, sizeof(resp));
    140     if (info.capset_index == 0) {
    141         resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
    142         virgl_renderer_get_cap_set(resp.capset_id,
    143                                    &resp.capset_max_version,
    144                                    &resp.capset_max_size);
    145     } else if (info.capset_index == 1) {
    146         resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2;
    147         virgl_renderer_get_cap_set(resp.capset_id,
    148                                    &resp.capset_max_version,
    149                                    &resp.capset_max_size);
    150     } else {
    151         resp.capset_max_version = 0;
    152         resp.capset_max_size = 0;
    153     }
    154     resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
    155     vg_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
    156 }
    157 
    158 uint32_t
    159 vg_virgl_get_num_capsets(void)
    160 {
    161     uint32_t capset2_max_ver, capset2_max_size;
    162     virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
    163                                &capset2_max_ver,
    164                                &capset2_max_size);
    165 
    166     return capset2_max_ver ? 2 : 1;
    167 }
    168 
    169 static void
    170 virgl_cmd_get_capset(VuGpu *g,
    171                      struct virtio_gpu_ctrl_command *cmd)
    172 {
    173     struct virtio_gpu_get_capset gc;
    174     struct virtio_gpu_resp_capset *resp;
    175     uint32_t max_ver, max_size;
    176 
    177     VUGPU_FILL_CMD(gc);
    178 
    179     virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
    180                                &max_size);
    181     if (!max_size) {
    182         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
    183         return;
    184     }
    185     resp = g_malloc0(sizeof(*resp) + max_size);
    186 
    187     resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
    188     virgl_renderer_fill_caps(gc.capset_id,
    189                              gc.capset_version,
    190                              (void *)resp->capset_data);
    191     vg_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
    192     g_free(resp);
    193 }
    194 
    195 static void
    196 virgl_cmd_submit_3d(VuGpu *g,
    197                     struct virtio_gpu_ctrl_command *cmd)
    198 {
    199     struct virtio_gpu_cmd_submit cs;
    200     void *buf;
    201     size_t s;
    202 
    203     VUGPU_FILL_CMD(cs);
    204 
    205     buf = g_malloc(cs.size);
    206     s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
    207                    sizeof(cs), buf, cs.size);
    208     if (s != cs.size) {
    209         g_critical("%s: size mismatch (%zd/%d)", __func__, s, cs.size);
    210         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
    211         goto out;
    212     }
    213 
    214     virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
    215 
    216 out:
    217     g_free(buf);
    218 }
    219 
    220 static void
    221 virgl_cmd_transfer_to_host_2d(VuGpu *g,
    222                               struct virtio_gpu_ctrl_command *cmd)
    223 {
    224     struct virtio_gpu_transfer_to_host_2d t2d;
    225     struct virtio_gpu_box box;
    226 
    227     VUGPU_FILL_CMD(t2d);
    228 
    229     box.x = t2d.r.x;
    230     box.y = t2d.r.y;
    231     box.z = 0;
    232     box.w = t2d.r.width;
    233     box.h = t2d.r.height;
    234     box.d = 1;
    235 
    236     virgl_renderer_transfer_write_iov(t2d.resource_id,
    237                                       0,
    238                                       0,
    239                                       0,
    240                                       0,
    241                                       (struct virgl_box *)&box,
    242                                       t2d.offset, NULL, 0);
    243 }
    244 
    245 static void
    246 virgl_cmd_transfer_to_host_3d(VuGpu *g,
    247                               struct virtio_gpu_ctrl_command *cmd)
    248 {
    249     struct virtio_gpu_transfer_host_3d t3d;
    250 
    251     VUGPU_FILL_CMD(t3d);
    252 
    253     virgl_renderer_transfer_write_iov(t3d.resource_id,
    254                                       t3d.hdr.ctx_id,
    255                                       t3d.level,
    256                                       t3d.stride,
    257                                       t3d.layer_stride,
    258                                       (struct virgl_box *)&t3d.box,
    259                                       t3d.offset, NULL, 0);
    260 }
    261 
    262 static void
    263 virgl_cmd_transfer_from_host_3d(VuGpu *g,
    264                                 struct virtio_gpu_ctrl_command *cmd)
    265 {
    266     struct virtio_gpu_transfer_host_3d tf3d;
    267 
    268     VUGPU_FILL_CMD(tf3d);
    269 
    270     virgl_renderer_transfer_read_iov(tf3d.resource_id,
    271                                      tf3d.hdr.ctx_id,
    272                                      tf3d.level,
    273                                      tf3d.stride,
    274                                      tf3d.layer_stride,
    275                                      (struct virgl_box *)&tf3d.box,
    276                                      tf3d.offset, NULL, 0);
    277 }
    278 
    279 static void
    280 virgl_resource_attach_backing(VuGpu *g,
    281                               struct virtio_gpu_ctrl_command *cmd)
    282 {
    283     struct virtio_gpu_resource_attach_backing att_rb;
    284     struct iovec *res_iovs;
    285     int ret;
    286 
    287     VUGPU_FILL_CMD(att_rb);
    288 
    289     ret = vg_create_mapping_iov(g, &att_rb, cmd, &res_iovs);
    290     if (ret != 0) {
    291         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
    292         return;
    293     }
    294 
    295     ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
    296                                        res_iovs, att_rb.nr_entries);
    297     if (ret != 0) {
    298         vg_cleanup_mapping_iov(g, res_iovs, att_rb.nr_entries);
    299     }
    300 }
    301 
    302 static void
    303 virgl_resource_detach_backing(VuGpu *g,
    304                               struct virtio_gpu_ctrl_command *cmd)
    305 {
    306     struct virtio_gpu_resource_detach_backing detach_rb;
    307     struct iovec *res_iovs = NULL;
    308     int num_iovs = 0;
    309 
    310     VUGPU_FILL_CMD(detach_rb);
    311 
    312     virgl_renderer_resource_detach_iov(detach_rb.resource_id,
    313                                        &res_iovs,
    314                                        &num_iovs);
    315     if (res_iovs == NULL || num_iovs == 0) {
    316         return;
    317     }
    318     vg_cleanup_mapping_iov(g, res_iovs, num_iovs);
    319 }
    320 
    321 static void
    322 virgl_cmd_set_scanout(VuGpu *g,
    323                       struct virtio_gpu_ctrl_command *cmd)
    324 {
    325     struct virtio_gpu_set_scanout ss;
    326     struct virgl_renderer_resource_info info;
    327     int ret;
    328 
    329     VUGPU_FILL_CMD(ss);
    330 
    331     if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) {
    332         g_critical("%s: illegal scanout id specified %d",
    333                    __func__, ss.scanout_id);
    334         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
    335         return;
    336     }
    337 
    338     memset(&info, 0, sizeof(info));
    339 
    340     if (ss.resource_id && ss.r.width && ss.r.height) {
    341         ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
    342         if (ret == -1) {
    343             g_critical("%s: illegal resource specified %d\n",
    344                        __func__, ss.resource_id);
    345             cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
    346             return;
    347         }
    348 
    349         int fd = -1;
    350         if (virgl_renderer_get_fd_for_texture(info.tex_id, &fd) < 0) {
    351             g_critical("%s: failed to get fd for texture\n", __func__);
    352             cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
    353             return;
    354         }
    355         assert(fd >= 0);
    356         VhostUserGpuMsg msg = {
    357             .request = VHOST_USER_GPU_DMABUF_SCANOUT,
    358             .size = sizeof(VhostUserGpuDMABUFScanout),
    359             .payload.dmabuf_scanout.scanout_id = ss.scanout_id,
    360             .payload.dmabuf_scanout.x =  ss.r.x,
    361             .payload.dmabuf_scanout.y =  ss.r.y,
    362             .payload.dmabuf_scanout.width = ss.r.width,
    363             .payload.dmabuf_scanout.height = ss.r.height,
    364             .payload.dmabuf_scanout.fd_width = info.width,
    365             .payload.dmabuf_scanout.fd_height = info.height,
    366             .payload.dmabuf_scanout.fd_stride = info.stride,
    367             .payload.dmabuf_scanout.fd_flags = info.flags,
    368             .payload.dmabuf_scanout.fd_drm_fourcc = info.drm_fourcc
    369         };
    370         vg_send_msg(g, &msg, fd);
    371         close(fd);
    372     } else {
    373         VhostUserGpuMsg msg = {
    374             .request = VHOST_USER_GPU_DMABUF_SCANOUT,
    375             .size = sizeof(VhostUserGpuDMABUFScanout),
    376             .payload.dmabuf_scanout.scanout_id = ss.scanout_id,
    377         };
    378         g_debug("disable scanout");
    379         vg_send_msg(g, &msg, -1);
    380     }
    381     g->scanout[ss.scanout_id].resource_id = ss.resource_id;
    382 }
    383 
    384 static void
    385 virgl_cmd_resource_flush(VuGpu *g,
    386                          struct virtio_gpu_ctrl_command *cmd)
    387 {
    388     struct virtio_gpu_resource_flush rf;
    389     int i;
    390 
    391     VUGPU_FILL_CMD(rf);
    392 
    393     glFlush();
    394     if (!rf.resource_id) {
    395         g_debug("bad resource id for flush..?");
    396         return;
    397     }
    398     for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
    399         if (g->scanout[i].resource_id != rf.resource_id) {
    400             continue;
    401         }
    402         VhostUserGpuMsg msg = {
    403             .request = VHOST_USER_GPU_DMABUF_UPDATE,
    404             .size = sizeof(VhostUserGpuUpdate),
    405             .payload.update.scanout_id = i,
    406             .payload.update.x = rf.r.x,
    407             .payload.update.y = rf.r.y,
    408             .payload.update.width = rf.r.width,
    409             .payload.update.height = rf.r.height
    410         };
    411         vg_send_msg(g, &msg, -1);
    412         vg_wait_ok(g);
    413     }
    414 }
    415 
    416 static void
    417 virgl_cmd_ctx_attach_resource(VuGpu *g,
    418                               struct virtio_gpu_ctrl_command *cmd)
    419 {
    420     struct virtio_gpu_ctx_resource att_res;
    421 
    422     VUGPU_FILL_CMD(att_res);
    423 
    424     virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
    425 }
    426 
    427 static void
    428 virgl_cmd_ctx_detach_resource(VuGpu *g,
    429                               struct virtio_gpu_ctrl_command *cmd)
    430 {
    431     struct virtio_gpu_ctx_resource det_res;
    432 
    433     VUGPU_FILL_CMD(det_res);
    434 
    435     virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
    436 }
    437 
    438 void vg_virgl_process_cmd(VuGpu *g, struct virtio_gpu_ctrl_command *cmd)
    439 {
    440     virgl_renderer_force_ctx_0();
    441     switch (cmd->cmd_hdr.type) {
    442     case VIRTIO_GPU_CMD_CTX_CREATE:
    443         virgl_cmd_context_create(g, cmd);
    444         break;
    445     case VIRTIO_GPU_CMD_CTX_DESTROY:
    446         virgl_cmd_context_destroy(g, cmd);
    447         break;
    448     case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
    449         virgl_cmd_create_resource_2d(g, cmd);
    450         break;
    451     case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
    452         virgl_cmd_create_resource_3d(g, cmd);
    453         break;
    454     case VIRTIO_GPU_CMD_SUBMIT_3D:
    455         virgl_cmd_submit_3d(g, cmd);
    456         break;
    457     case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
    458         virgl_cmd_transfer_to_host_2d(g, cmd);
    459         break;
    460     case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
    461         virgl_cmd_transfer_to_host_3d(g, cmd);
    462         break;
    463     case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
    464         virgl_cmd_transfer_from_host_3d(g, cmd);
    465         break;
    466     case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
    467         virgl_resource_attach_backing(g, cmd);
    468         break;
    469     case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
    470         virgl_resource_detach_backing(g, cmd);
    471         break;
    472     case VIRTIO_GPU_CMD_SET_SCANOUT:
    473         virgl_cmd_set_scanout(g, cmd);
    474         break;
    475     case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
    476         virgl_cmd_resource_flush(g, cmd);
    477         break;
    478     case VIRTIO_GPU_CMD_RESOURCE_UNREF:
    479         virgl_cmd_resource_unref(g, cmd);
    480         break;
    481     case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
    482         /* TODO add security */
    483         virgl_cmd_ctx_attach_resource(g, cmd);
    484         break;
    485     case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
    486         /* TODO add security */
    487         virgl_cmd_ctx_detach_resource(g, cmd);
    488         break;
    489     case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
    490         virgl_cmd_get_capset_info(g, cmd);
    491         break;
    492     case VIRTIO_GPU_CMD_GET_CAPSET:
    493         virgl_cmd_get_capset(g, cmd);
    494         break;
    495     case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
    496         vg_get_display_info(g, cmd);
    497         break;
    498     default:
    499         g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
    500         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
    501         break;
    502     }
    503 
    504     if (cmd->state != VG_CMD_STATE_NEW) {
    505         return;
    506     }
    507 
    508     if (cmd->error) {
    509         g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__,
    510                   cmd->cmd_hdr.type, cmd->error);
    511         vg_ctrl_response_nodata(g, cmd, cmd->error);
    512         return;
    513     }
    514 
    515     if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
    516         vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
    517         return;
    518     }
    519 
    520     g_debug("Creating fence id:%" PRId64 " type:%d",
    521             cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
    522     virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
    523 }
    524 
    525 static void
    526 virgl_write_fence(void *opaque, uint32_t fence)
    527 {
    528     VuGpu *g = opaque;
    529     struct virtio_gpu_ctrl_command *cmd, *tmp;
    530 
    531     QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
    532         /*
    533          * the guest can end up emitting fences out of order
    534          * so we should check all fenced cmds not just the first one.
    535          */
    536         if (cmd->cmd_hdr.fence_id > fence) {
    537             continue;
    538         }
    539         g_debug("FENCE %" PRIu64, cmd->cmd_hdr.fence_id);
    540         vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
    541         QTAILQ_REMOVE(&g->fenceq, cmd, next);
    542         free(cmd);
    543         g->inflight--;
    544     }
    545 }
    546 
    547 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
    548     VIRGL_RENDERER_CALLBACKS_VERSION >= 2
    549 static int
    550 virgl_get_drm_fd(void *opaque)
    551 {
    552     VuGpu *g = opaque;
    553 
    554     return g->drm_rnode_fd;
    555 }
    556 #endif
    557 
    558 static struct virgl_renderer_callbacks virgl_cbs = {
    559 #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) &&    \
    560     VIRGL_RENDERER_CALLBACKS_VERSION >= 2
    561     .get_drm_fd  = virgl_get_drm_fd,
    562     .version     = 2,
    563 #else
    564     .version     = 1,
    565 #endif
    566     .write_fence = virgl_write_fence,
    567 };
    568 
    569 static void
    570 vg_virgl_poll(VuDev *dev, int condition, void *data)
    571 {
    572     virgl_renderer_poll();
    573 }
    574 
    575 bool
    576 vg_virgl_init(VuGpu *g)
    577 {
    578     int ret;
    579 
    580     if (g->drm_rnode_fd && virgl_cbs.version == 1) {
    581         g_warning("virgl will use the default rendernode");
    582     }
    583 
    584     ret = virgl_renderer_init(g,
    585                               VIRGL_RENDERER_USE_EGL |
    586                               VIRGL_RENDERER_THREAD_SYNC,
    587                               &virgl_cbs);
    588     if (ret != 0) {
    589         return false;
    590     }
    591 
    592     ret = virgl_renderer_get_poll_fd();
    593     if (ret != -1) {
    594         g->renderer_source =
    595             vug_source_new(&g->dev, ret, G_IO_IN, vg_virgl_poll, g);
    596     }
    597 
    598     return true;
    599 }