qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

vhost-backend.c (12754B)


      1 /*
      2  * vhost-backend
      3  *
      4  * Copyright (c) 2013 Virtual Open Systems Sarl.
      5  *
      6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
      7  * See the COPYING file in the top-level directory.
      8  *
      9  */
     10 
     11 #include "qemu/osdep.h"
     12 #include "hw/virtio/vhost.h"
     13 #include "hw/virtio/vhost-backend.h"
     14 #include "qemu/error-report.h"
     15 #include "qemu/main-loop.h"
     16 #include "standard-headers/linux/vhost_types.h"
     17 
     18 #include "hw/virtio/vhost-vdpa.h"
     19 #ifdef CONFIG_VHOST_KERNEL
     20 #include <linux/vhost.h>
     21 #include <sys/ioctl.h>
     22 
     23 static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
     24                              void *arg)
     25 {
     26     int fd = (uintptr_t) dev->opaque;
     27     int ret;
     28 
     29     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
     30 
     31     ret = ioctl(fd, request, arg);
     32     return ret < 0 ? -errno : ret;
     33 }
     34 
     35 static int vhost_kernel_init(struct vhost_dev *dev, void *opaque, Error **errp)
     36 {
     37     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
     38 
     39     dev->opaque = opaque;
     40 
     41     return 0;
     42 }
     43 
     44 static int vhost_kernel_cleanup(struct vhost_dev *dev)
     45 {
     46     int fd = (uintptr_t) dev->opaque;
     47 
     48     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
     49 
     50     return close(fd) < 0 ? -errno : 0;
     51 }
     52 
     53 static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
     54 {
     55     int limit = 64;
     56     char *s;
     57 
     58     if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
     59                             &s, NULL, NULL)) {
     60         uint64_t val = g_ascii_strtoull(s, NULL, 10);
     61         if (val < INT_MAX && val > 0) {
     62             g_free(s);
     63             return val;
     64         }
     65         error_report("ignoring invalid max_mem_regions value in vhost module:"
     66                      " %s", s);
     67     }
     68     g_free(s);
     69     return limit;
     70 }
     71 
     72 static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
     73                                         struct vhost_vring_file *file)
     74 {
     75     return vhost_kernel_call(dev, VHOST_NET_SET_BACKEND, file);
     76 }
     77 
     78 static int vhost_kernel_scsi_set_endpoint(struct vhost_dev *dev,
     79                                           struct vhost_scsi_target *target)
     80 {
     81     return vhost_kernel_call(dev, VHOST_SCSI_SET_ENDPOINT, target);
     82 }
     83 
     84 static int vhost_kernel_scsi_clear_endpoint(struct vhost_dev *dev,
     85                                             struct vhost_scsi_target *target)
     86 {
     87     return vhost_kernel_call(dev, VHOST_SCSI_CLEAR_ENDPOINT, target);
     88 }
     89 
     90 static int vhost_kernel_scsi_get_abi_version(struct vhost_dev *dev, int *version)
     91 {
     92     return vhost_kernel_call(dev, VHOST_SCSI_GET_ABI_VERSION, version);
     93 }
     94 
     95 static int vhost_kernel_set_log_base(struct vhost_dev *dev, uint64_t base,
     96                                      struct vhost_log *log)
     97 {
     98     return vhost_kernel_call(dev, VHOST_SET_LOG_BASE, &base);
     99 }
    100 
    101 static int vhost_kernel_set_mem_table(struct vhost_dev *dev,
    102                                       struct vhost_memory *mem)
    103 {
    104     return vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem);
    105 }
    106 
    107 static int vhost_kernel_set_vring_addr(struct vhost_dev *dev,
    108                                        struct vhost_vring_addr *addr)
    109 {
    110     return vhost_kernel_call(dev, VHOST_SET_VRING_ADDR, addr);
    111 }
    112 
    113 static int vhost_kernel_set_vring_endian(struct vhost_dev *dev,
    114                                          struct vhost_vring_state *ring)
    115 {
    116     return vhost_kernel_call(dev, VHOST_SET_VRING_ENDIAN, ring);
    117 }
    118 
    119 static int vhost_kernel_set_vring_num(struct vhost_dev *dev,
    120                                       struct vhost_vring_state *ring)
    121 {
    122     return vhost_kernel_call(dev, VHOST_SET_VRING_NUM, ring);
    123 }
    124 
    125 static int vhost_kernel_set_vring_base(struct vhost_dev *dev,
    126                                        struct vhost_vring_state *ring)
    127 {
    128     return vhost_kernel_call(dev, VHOST_SET_VRING_BASE, ring);
    129 }
    130 
    131 static int vhost_kernel_get_vring_base(struct vhost_dev *dev,
    132                                        struct vhost_vring_state *ring)
    133 {
    134     return vhost_kernel_call(dev, VHOST_GET_VRING_BASE, ring);
    135 }
    136 
    137 static int vhost_kernel_set_vring_kick(struct vhost_dev *dev,
    138                                        struct vhost_vring_file *file)
    139 {
    140     return vhost_kernel_call(dev, VHOST_SET_VRING_KICK, file);
    141 }
    142 
    143 static int vhost_kernel_set_vring_call(struct vhost_dev *dev,
    144                                        struct vhost_vring_file *file)
    145 {
    146     return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file);
    147 }
    148 
    149 static int vhost_kernel_set_vring_err(struct vhost_dev *dev,
    150                                       struct vhost_vring_file *file)
    151 {
    152     return vhost_kernel_call(dev, VHOST_SET_VRING_ERR, file);
    153 }
    154 
    155 static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
    156                                                    struct vhost_vring_state *s)
    157 {
    158     return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s);
    159 }
    160 
    161 static int vhost_kernel_set_features(struct vhost_dev *dev,
    162                                      uint64_t features)
    163 {
    164     return vhost_kernel_call(dev, VHOST_SET_FEATURES, &features);
    165 }
    166 
    167 static int vhost_kernel_set_backend_cap(struct vhost_dev *dev)
    168 {
    169     uint64_t features;
    170     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2;
    171     int r;
    172 
    173     if (vhost_kernel_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
    174         return 0;
    175     }
    176 
    177     features &= f;
    178     r = vhost_kernel_call(dev, VHOST_SET_BACKEND_FEATURES,
    179                               &features);
    180     if (r) {
    181         return 0;
    182     }
    183 
    184     dev->backend_cap = features;
    185 
    186     return 0;
    187 }
    188 
    189 static int vhost_kernel_get_features(struct vhost_dev *dev,
    190                                      uint64_t *features)
    191 {
    192     return vhost_kernel_call(dev, VHOST_GET_FEATURES, features);
    193 }
    194 
    195 static int vhost_kernel_set_owner(struct vhost_dev *dev)
    196 {
    197     return vhost_kernel_call(dev, VHOST_SET_OWNER, NULL);
    198 }
    199 
    200 static int vhost_kernel_reset_device(struct vhost_dev *dev)
    201 {
    202     return vhost_kernel_call(dev, VHOST_RESET_OWNER, NULL);
    203 }
    204 
    205 static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
    206 {
    207     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
    208 
    209     return idx - dev->vq_index;
    210 }
    211 
    212 static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev *dev,
    213                                             uint64_t guest_cid)
    214 {
    215     return vhost_kernel_call(dev, VHOST_VSOCK_SET_GUEST_CID, &guest_cid);
    216 }
    217 
    218 static int vhost_kernel_vsock_set_running(struct vhost_dev *dev, int start)
    219 {
    220     return vhost_kernel_call(dev, VHOST_VSOCK_SET_RUNNING, &start);
    221 }
    222 
    223 static void vhost_kernel_iotlb_read(void *opaque)
    224 {
    225     struct vhost_dev *dev = opaque;
    226     ssize_t len;
    227 
    228     if (dev->backend_cap &
    229         (0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)) {
    230         struct vhost_msg_v2 msg;
    231 
    232         while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) {
    233             if (len < sizeof msg) {
    234                 error_report("Wrong vhost message len: %d", (int)len);
    235                 break;
    236             }
    237             if (msg.type != VHOST_IOTLB_MSG_V2) {
    238                 error_report("Unknown vhost iotlb message type");
    239                 break;
    240             }
    241 
    242             vhost_backend_handle_iotlb_msg(dev, &msg.iotlb);
    243         }
    244     } else {
    245         struct vhost_msg msg;
    246 
    247         while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) {
    248             if (len < sizeof msg) {
    249                 error_report("Wrong vhost message len: %d", (int)len);
    250                 break;
    251             }
    252             if (msg.type != VHOST_IOTLB_MSG) {
    253                 error_report("Unknown vhost iotlb message type");
    254                 break;
    255             }
    256 
    257             vhost_backend_handle_iotlb_msg(dev, &msg.iotlb);
    258         }
    259     }
    260 }
    261 
    262 static int vhost_kernel_send_device_iotlb_msg(struct vhost_dev *dev,
    263                                               struct vhost_iotlb_msg *imsg)
    264 {
    265     if (dev->backend_cap & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)) {
    266         struct vhost_msg_v2 msg = {};
    267 
    268         msg.type = VHOST_IOTLB_MSG_V2;
    269         msg.iotlb = *imsg;
    270 
    271         if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
    272             error_report("Fail to update device iotlb");
    273             return -EFAULT;
    274         }
    275     } else {
    276         struct vhost_msg msg = {};
    277 
    278         msg.type = VHOST_IOTLB_MSG;
    279         msg.iotlb = *imsg;
    280 
    281         if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
    282             error_report("Fail to update device iotlb");
    283             return -EFAULT;
    284         }
    285     }
    286 
    287     return 0;
    288 }
    289 
    290 static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
    291                                            int enabled)
    292 {
    293     if (enabled)
    294         qemu_set_fd_handler((uintptr_t)dev->opaque,
    295                             vhost_kernel_iotlb_read, NULL, dev);
    296     else
    297         qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
    298 }
    299 
    300 const VhostOps kernel_ops = {
    301         .backend_type = VHOST_BACKEND_TYPE_KERNEL,
    302         .vhost_backend_init = vhost_kernel_init,
    303         .vhost_backend_cleanup = vhost_kernel_cleanup,
    304         .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
    305         .vhost_net_set_backend = vhost_kernel_net_set_backend,
    306         .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
    307         .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint,
    308         .vhost_scsi_get_abi_version = vhost_kernel_scsi_get_abi_version,
    309         .vhost_set_log_base = vhost_kernel_set_log_base,
    310         .vhost_set_mem_table = vhost_kernel_set_mem_table,
    311         .vhost_set_vring_addr = vhost_kernel_set_vring_addr,
    312         .vhost_set_vring_endian = vhost_kernel_set_vring_endian,
    313         .vhost_set_vring_num = vhost_kernel_set_vring_num,
    314         .vhost_set_vring_base = vhost_kernel_set_vring_base,
    315         .vhost_get_vring_base = vhost_kernel_get_vring_base,
    316         .vhost_set_vring_kick = vhost_kernel_set_vring_kick,
    317         .vhost_set_vring_call = vhost_kernel_set_vring_call,
    318         .vhost_set_vring_err = vhost_kernel_set_vring_err,
    319         .vhost_set_vring_busyloop_timeout =
    320                                 vhost_kernel_set_vring_busyloop_timeout,
    321         .vhost_set_features = vhost_kernel_set_features,
    322         .vhost_get_features = vhost_kernel_get_features,
    323         .vhost_set_backend_cap = vhost_kernel_set_backend_cap,
    324         .vhost_set_owner = vhost_kernel_set_owner,
    325         .vhost_reset_device = vhost_kernel_reset_device,
    326         .vhost_get_vq_index = vhost_kernel_get_vq_index,
    327         .vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid,
    328         .vhost_vsock_set_running = vhost_kernel_vsock_set_running,
    329         .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
    330         .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
    331 };
    332 #endif
    333 
    334 int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
    335                                              uint64_t iova, uint64_t uaddr,
    336                                              uint64_t len,
    337                                              IOMMUAccessFlags perm)
    338 {
    339     struct vhost_iotlb_msg imsg;
    340 
    341     imsg.iova =  iova;
    342     imsg.uaddr = uaddr;
    343     imsg.size = len;
    344     imsg.type = VHOST_IOTLB_UPDATE;
    345 
    346     switch (perm) {
    347     case IOMMU_RO:
    348         imsg.perm = VHOST_ACCESS_RO;
    349         break;
    350     case IOMMU_WO:
    351         imsg.perm = VHOST_ACCESS_WO;
    352         break;
    353     case IOMMU_RW:
    354         imsg.perm = VHOST_ACCESS_RW;
    355         break;
    356     default:
    357         return -EINVAL;
    358     }
    359 
    360     if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg)
    361         return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg);
    362 
    363     return -ENODEV;
    364 }
    365 
    366 int vhost_backend_invalidate_device_iotlb(struct vhost_dev *dev,
    367                                                  uint64_t iova, uint64_t len)
    368 {
    369     struct vhost_iotlb_msg imsg;
    370 
    371     imsg.iova = iova;
    372     imsg.size = len;
    373     imsg.type = VHOST_IOTLB_INVALIDATE;
    374 
    375     if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg)
    376         return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg);
    377 
    378     return -ENODEV;
    379 }
    380 
    381 int vhost_backend_handle_iotlb_msg(struct vhost_dev *dev,
    382                                           struct vhost_iotlb_msg *imsg)
    383 {
    384     int ret = 0;
    385 
    386     if (unlikely(!dev->vdev)) {
    387         error_report("Unexpected IOTLB message when virtio device is stopped");
    388         return -EINVAL;
    389     }
    390 
    391     switch (imsg->type) {
    392     case VHOST_IOTLB_MISS:
    393         ret = vhost_device_iotlb_miss(dev, imsg->iova,
    394                                       imsg->perm != VHOST_ACCESS_RO);
    395         break;
    396     case VHOST_IOTLB_ACCESS_FAIL:
    397         /* FIXME: report device iotlb error */
    398         error_report("Access failure IOTLB message type not supported");
    399         ret = -ENOTSUP;
    400         break;
    401     case VHOST_IOTLB_UPDATE:
    402     case VHOST_IOTLB_INVALIDATE:
    403     default:
    404         error_report("Unexpected IOTLB message type");
    405         ret = -EINVAL;
    406         break;
    407     }
    408 
    409     return ret;
    410 }