qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

virtio-blk.c (44491B)


      1 /*
      2  * Virtio Block Device
      3  *
      4  * Copyright IBM, Corp. 2007
      5  *
      6  * Authors:
      7  *  Anthony Liguori   <aliguori@us.ibm.com>
      8  *
      9  * This work is licensed under the terms of the GNU GPL, version 2.  See
     10  * the COPYING file in the top-level directory.
     11  *
     12  */
     13 
     14 #include "qemu/osdep.h"
     15 #include "qapi/error.h"
     16 #include "qemu/iov.h"
     17 #include "qemu/module.h"
     18 #include "qemu/error-report.h"
     19 #include "qemu/main-loop.h"
     20 #include "trace.h"
     21 #include "hw/block/block.h"
     22 #include "hw/qdev-properties.h"
     23 #include "sysemu/blockdev.h"
     24 #include "sysemu/block-ram-registrar.h"
     25 #include "sysemu/sysemu.h"
     26 #include "sysemu/runstate.h"
     27 #include "hw/virtio/virtio-blk.h"
     28 #include "dataplane/virtio-blk.h"
     29 #include "scsi/constants.h"
     30 #ifdef __linux__
     31 # include <scsi/sg.h>
     32 #endif
     33 #include "hw/virtio/virtio-bus.h"
     34 #include "migration/qemu-file-types.h"
     35 #include "hw/virtio/virtio-access.h"
     36 #include "hw/virtio/virtio-blk-common.h"
     37 #include "qemu/coroutine.h"
     38 
     39 static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
     40                                     VirtIOBlockReq *req)
     41 {
     42     req->dev = s;
     43     req->vq = vq;
     44     req->qiov.size = 0;
     45     req->in_len = 0;
     46     req->next = NULL;
     47     req->mr_next = NULL;
     48 }
     49 
     50 static void virtio_blk_free_request(VirtIOBlockReq *req)
     51 {
     52     g_free(req);
     53 }
     54 
     55 static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
     56 {
     57     VirtIOBlock *s = req->dev;
     58     VirtIODevice *vdev = VIRTIO_DEVICE(s);
     59 
     60     trace_virtio_blk_req_complete(vdev, req, status);
     61 
     62     stb_p(&req->in->status, status);
     63     iov_discard_undo(&req->inhdr_undo);
     64     iov_discard_undo(&req->outhdr_undo);
     65     virtqueue_push(req->vq, &req->elem, req->in_len);
     66     if (s->dataplane_started && !s->dataplane_disabled) {
     67         virtio_blk_data_plane_notify(s->dataplane, req->vq);
     68     } else {
     69         virtio_notify(vdev, req->vq);
     70     }
     71 }
     72 
     73 static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
     74     bool is_read, bool acct_failed)
     75 {
     76     VirtIOBlock *s = req->dev;
     77     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
     78 
     79     if (action == BLOCK_ERROR_ACTION_STOP) {
     80         /* Break the link as the next request is going to be parsed from the
     81          * ring again. Otherwise we may end up doing a double completion! */
     82         req->mr_next = NULL;
     83         req->next = s->rq;
     84         s->rq = req;
     85     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
     86         virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
     87         if (acct_failed) {
     88             block_acct_failed(blk_get_stats(s->blk), &req->acct);
     89         }
     90         virtio_blk_free_request(req);
     91     }
     92 
     93     blk_error_action(s->blk, action, is_read, error);
     94     return action != BLOCK_ERROR_ACTION_IGNORE;
     95 }
     96 
     97 static void virtio_blk_rw_complete(void *opaque, int ret)
     98 {
     99     VirtIOBlockReq *next = opaque;
    100     VirtIOBlock *s = next->dev;
    101     VirtIODevice *vdev = VIRTIO_DEVICE(s);
    102 
    103     aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
    104     while (next) {
    105         VirtIOBlockReq *req = next;
    106         next = req->mr_next;
    107         trace_virtio_blk_rw_complete(vdev, req, ret);
    108 
    109         if (req->qiov.nalloc != -1) {
    110             /* If nalloc is != -1 req->qiov is a local copy of the original
    111              * external iovec. It was allocated in submit_requests to be
    112              * able to merge requests. */
    113             qemu_iovec_destroy(&req->qiov);
    114         }
    115 
    116         if (ret) {
    117             int p = virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type);
    118             bool is_read = !(p & VIRTIO_BLK_T_OUT);
    119             /* Note that memory may be dirtied on read failure.  If the
    120              * virtio request is not completed here, as is the case for
    121              * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
    122              * correctly during live migration.  While this is ugly,
    123              * it is acceptable because the device is free to write to
    124              * the memory until the request is completed (which will
    125              * happen on the other side of the migration).
    126              */
    127             if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) {
    128                 continue;
    129             }
    130         }
    131 
    132         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
    133         block_acct_done(blk_get_stats(s->blk), &req->acct);
    134         virtio_blk_free_request(req);
    135     }
    136     aio_context_release(blk_get_aio_context(s->conf.conf.blk));
    137 }
    138 
    139 static void virtio_blk_flush_complete(void *opaque, int ret)
    140 {
    141     VirtIOBlockReq *req = opaque;
    142     VirtIOBlock *s = req->dev;
    143 
    144     aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
    145     if (ret) {
    146         if (virtio_blk_handle_rw_error(req, -ret, 0, true)) {
    147             goto out;
    148         }
    149     }
    150 
    151     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
    152     block_acct_done(blk_get_stats(s->blk), &req->acct);
    153     virtio_blk_free_request(req);
    154 
    155 out:
    156     aio_context_release(blk_get_aio_context(s->conf.conf.blk));
    157 }
    158 
    159 static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
    160 {
    161     VirtIOBlockReq *req = opaque;
    162     VirtIOBlock *s = req->dev;
    163     bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) &
    164                             ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES;
    165 
    166     aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
    167     if (ret) {
    168         if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
    169             goto out;
    170         }
    171     }
    172 
    173     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
    174     if (is_write_zeroes) {
    175         block_acct_done(blk_get_stats(s->blk), &req->acct);
    176     }
    177     virtio_blk_free_request(req);
    178 
    179 out:
    180     aio_context_release(blk_get_aio_context(s->conf.conf.blk));
    181 }
    182 
    183 #ifdef __linux__
    184 
    185 typedef struct {
    186     VirtIOBlockReq *req;
    187     struct sg_io_hdr hdr;
    188 } VirtIOBlockIoctlReq;
    189 
    190 static void virtio_blk_ioctl_complete(void *opaque, int status)
    191 {
    192     VirtIOBlockIoctlReq *ioctl_req = opaque;
    193     VirtIOBlockReq *req = ioctl_req->req;
    194     VirtIOBlock *s = req->dev;
    195     VirtIODevice *vdev = VIRTIO_DEVICE(s);
    196     struct virtio_scsi_inhdr *scsi;
    197     struct sg_io_hdr *hdr;
    198 
    199     scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base;
    200 
    201     if (status) {
    202         status = VIRTIO_BLK_S_UNSUPP;
    203         virtio_stl_p(vdev, &scsi->errors, 255);
    204         goto out;
    205     }
    206 
    207     hdr = &ioctl_req->hdr;
    208     /*
    209      * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
    210      * clear the masked_status field [hence status gets cleared too, see
    211      * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
    212      * status has occurred.  However they do set DRIVER_SENSE in driver_status
    213      * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
    214      */
    215     if (hdr->status == 0 && hdr->sb_len_wr > 0) {
    216         hdr->status = CHECK_CONDITION;
    217     }
    218 
    219     virtio_stl_p(vdev, &scsi->errors,
    220                  hdr->status | (hdr->msg_status << 8) |
    221                  (hdr->host_status << 16) | (hdr->driver_status << 24));
    222     virtio_stl_p(vdev, &scsi->residual, hdr->resid);
    223     virtio_stl_p(vdev, &scsi->sense_len, hdr->sb_len_wr);
    224     virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
    225 
    226 out:
    227     aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
    228     virtio_blk_req_complete(req, status);
    229     virtio_blk_free_request(req);
    230     aio_context_release(blk_get_aio_context(s->conf.conf.blk));
    231     g_free(ioctl_req);
    232 }
    233 
    234 #endif
    235 
    236 static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq)
    237 {
    238     VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq));
    239 
    240     if (req) {
    241         virtio_blk_init_request(s, vq, req);
    242     }
    243     return req;
    244 }
    245 
    246 static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req)
    247 {
    248     int status = VIRTIO_BLK_S_OK;
    249     struct virtio_scsi_inhdr *scsi = NULL;
    250     VirtIOBlock *blk = req->dev;
    251     VirtIODevice *vdev = VIRTIO_DEVICE(blk);
    252     VirtQueueElement *elem = &req->elem;
    253 
    254 #ifdef __linux__
    255     int i;
    256     VirtIOBlockIoctlReq *ioctl_req;
    257     BlockAIOCB *acb;
    258 #endif
    259 
    260     /*
    261      * We require at least one output segment each for the virtio_blk_outhdr
    262      * and the SCSI command block.
    263      *
    264      * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
    265      * and the sense buffer pointer in the input segments.
    266      */
    267     if (elem->out_num < 2 || elem->in_num < 3) {
    268         status = VIRTIO_BLK_S_IOERR;
    269         goto fail;
    270     }
    271 
    272     /*
    273      * The scsi inhdr is placed in the second-to-last input segment, just
    274      * before the regular inhdr.
    275      */
    276     scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base;
    277 
    278     if (!virtio_has_feature(blk->host_features, VIRTIO_BLK_F_SCSI)) {
    279         status = VIRTIO_BLK_S_UNSUPP;
    280         goto fail;
    281     }
    282 
    283     /*
    284      * No support for bidirection commands yet.
    285      */
    286     if (elem->out_num > 2 && elem->in_num > 3) {
    287         status = VIRTIO_BLK_S_UNSUPP;
    288         goto fail;
    289     }
    290 
    291 #ifdef __linux__
    292     ioctl_req = g_new0(VirtIOBlockIoctlReq, 1);
    293     ioctl_req->req = req;
    294     ioctl_req->hdr.interface_id = 'S';
    295     ioctl_req->hdr.cmd_len = elem->out_sg[1].iov_len;
    296     ioctl_req->hdr.cmdp = elem->out_sg[1].iov_base;
    297     ioctl_req->hdr.dxfer_len = 0;
    298 
    299     if (elem->out_num > 2) {
    300         /*
    301          * If there are more than the minimally required 2 output segments
    302          * there is write payload starting from the third iovec.
    303          */
    304         ioctl_req->hdr.dxfer_direction = SG_DXFER_TO_DEV;
    305         ioctl_req->hdr.iovec_count = elem->out_num - 2;
    306 
    307         for (i = 0; i < ioctl_req->hdr.iovec_count; i++) {
    308             ioctl_req->hdr.dxfer_len += elem->out_sg[i + 2].iov_len;
    309         }
    310 
    311         ioctl_req->hdr.dxferp = elem->out_sg + 2;
    312 
    313     } else if (elem->in_num > 3) {
    314         /*
    315          * If we have more than 3 input segments the guest wants to actually
    316          * read data.
    317          */
    318         ioctl_req->hdr.dxfer_direction = SG_DXFER_FROM_DEV;
    319         ioctl_req->hdr.iovec_count = elem->in_num - 3;
    320         for (i = 0; i < ioctl_req->hdr.iovec_count; i++) {
    321             ioctl_req->hdr.dxfer_len += elem->in_sg[i].iov_len;
    322         }
    323 
    324         ioctl_req->hdr.dxferp = elem->in_sg;
    325     } else {
    326         /*
    327          * Some SCSI commands don't actually transfer any data.
    328          */
    329         ioctl_req->hdr.dxfer_direction = SG_DXFER_NONE;
    330     }
    331 
    332     ioctl_req->hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base;
    333     ioctl_req->hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len;
    334 
    335     acb = blk_aio_ioctl(blk->blk, SG_IO, &ioctl_req->hdr,
    336                         virtio_blk_ioctl_complete, ioctl_req);
    337     if (!acb) {
    338         g_free(ioctl_req);
    339         status = VIRTIO_BLK_S_UNSUPP;
    340         goto fail;
    341     }
    342     return -EINPROGRESS;
    343 #else
    344     abort();
    345 #endif
    346 
    347 fail:
    348     /* Just put anything nonzero so that the ioctl fails in the guest.  */
    349     if (scsi) {
    350         virtio_stl_p(vdev, &scsi->errors, 255);
    351     }
    352     return status;
    353 }
    354 
    355 static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
    356 {
    357     int status;
    358 
    359     status = virtio_blk_handle_scsi_req(req);
    360     if (status != -EINPROGRESS) {
    361         virtio_blk_req_complete(req, status);
    362         virtio_blk_free_request(req);
    363     }
    364 }
    365 
    366 static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb,
    367                                    int start, int num_reqs, int niov)
    368 {
    369     BlockBackend *blk = s->blk;
    370     QEMUIOVector *qiov = &mrb->reqs[start]->qiov;
    371     int64_t sector_num = mrb->reqs[start]->sector_num;
    372     bool is_write = mrb->is_write;
    373     BdrvRequestFlags flags = 0;
    374 
    375     if (num_reqs > 1) {
    376         int i;
    377         struct iovec *tmp_iov = qiov->iov;
    378         int tmp_niov = qiov->niov;
    379 
    380         /* mrb->reqs[start]->qiov was initialized from external so we can't
    381          * modify it here. We need to initialize it locally and then add the
    382          * external iovecs. */
    383         qemu_iovec_init(qiov, niov);
    384 
    385         for (i = 0; i < tmp_niov; i++) {
    386             qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len);
    387         }
    388 
    389         for (i = start + 1; i < start + num_reqs; i++) {
    390             qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0,
    391                               mrb->reqs[i]->qiov.size);
    392             mrb->reqs[i - 1]->mr_next = mrb->reqs[i];
    393         }
    394 
    395         trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb->reqs[start]->dev),
    396                                          mrb, start, num_reqs,
    397                                          sector_num << BDRV_SECTOR_BITS,
    398                                          qiov->size, is_write);
    399         block_acct_merge_done(blk_get_stats(blk),
    400                               is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ,
    401                               num_reqs - 1);
    402     }
    403 
    404     if (blk_ram_registrar_ok(&s->blk_ram_registrar)) {
    405         flags |= BDRV_REQ_REGISTERED_BUF;
    406     }
    407 
    408     if (is_write) {
    409         blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov,
    410                         flags, virtio_blk_rw_complete,
    411                         mrb->reqs[start]);
    412     } else {
    413         blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov,
    414                        flags, virtio_blk_rw_complete,
    415                        mrb->reqs[start]);
    416     }
    417 }
    418 
    419 static int multireq_compare(const void *a, const void *b)
    420 {
    421     const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a,
    422                          *req2 = *(VirtIOBlockReq **)b;
    423 
    424     /*
    425      * Note that we can't simply subtract sector_num1 from sector_num2
    426      * here as that could overflow the return value.
    427      */
    428     if (req1->sector_num > req2->sector_num) {
    429         return 1;
    430     } else if (req1->sector_num < req2->sector_num) {
    431         return -1;
    432     } else {
    433         return 0;
    434     }
    435 }
    436 
    437 static void virtio_blk_submit_multireq(VirtIOBlock *s, MultiReqBuffer *mrb)
    438 {
    439     int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
    440     uint32_t max_transfer;
    441     int64_t sector_num = 0;
    442 
    443     if (mrb->num_reqs == 1) {
    444         submit_requests(s, mrb, 0, 1, -1);
    445         mrb->num_reqs = 0;
    446         return;
    447     }
    448 
    449     max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk);
    450 
    451     qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs),
    452           &multireq_compare);
    453 
    454     for (i = 0; i < mrb->num_reqs; i++) {
    455         VirtIOBlockReq *req = mrb->reqs[i];
    456         if (num_reqs > 0) {
    457             /*
    458              * NOTE: We cannot merge the requests in below situations:
    459              * 1. requests are not sequential
    460              * 2. merge would exceed maximum number of IOVs
    461              * 3. merge would exceed maximum transfer length of backend device
    462              */
    463             if (sector_num + nb_sectors != req->sector_num ||
    464                 niov > blk_get_max_iov(s->blk) - req->qiov.niov ||
    465                 req->qiov.size > max_transfer ||
    466                 nb_sectors > (max_transfer -
    467                               req->qiov.size) / BDRV_SECTOR_SIZE) {
    468                 submit_requests(s, mrb, start, num_reqs, niov);
    469                 num_reqs = 0;
    470             }
    471         }
    472 
    473         if (num_reqs == 0) {
    474             sector_num = req->sector_num;
    475             nb_sectors = niov = 0;
    476             start = i;
    477         }
    478 
    479         nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE;
    480         niov += req->qiov.niov;
    481         num_reqs++;
    482     }
    483 
    484     submit_requests(s, mrb, start, num_reqs, niov);
    485     mrb->num_reqs = 0;
    486 }
    487 
    488 static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
    489 {
    490     VirtIOBlock *s = req->dev;
    491 
    492     block_acct_start(blk_get_stats(s->blk), &req->acct, 0,
    493                      BLOCK_ACCT_FLUSH);
    494 
    495     /*
    496      * Make sure all outstanding writes are posted to the backing device.
    497      */
    498     if (mrb->is_write && mrb->num_reqs > 0) {
    499         virtio_blk_submit_multireq(s, mrb);
    500     }
    501     blk_aio_flush(s->blk, virtio_blk_flush_complete, req);
    502 }
    503 
    504 static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
    505                                      uint64_t sector, size_t size)
    506 {
    507     uint64_t nb_sectors = size >> BDRV_SECTOR_BITS;
    508     uint64_t total_sectors;
    509 
    510     if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
    511         return false;
    512     }
    513     if (sector & dev->sector_mask) {
    514         return false;
    515     }
    516     if (size % dev->conf.conf.logical_block_size) {
    517         return false;
    518     }
    519     blk_get_geometry(dev->blk, &total_sectors);
    520     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
    521         return false;
    522     }
    523     return true;
    524 }
    525 
    526 static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req,
    527     struct virtio_blk_discard_write_zeroes *dwz_hdr, bool is_write_zeroes)
    528 {
    529     VirtIOBlock *s = req->dev;
    530     VirtIODevice *vdev = VIRTIO_DEVICE(s);
    531     uint64_t sector;
    532     uint32_t num_sectors, flags, max_sectors;
    533     uint8_t err_status;
    534     int bytes;
    535 
    536     sector = virtio_ldq_p(vdev, &dwz_hdr->sector);
    537     num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors);
    538     flags = virtio_ldl_p(vdev, &dwz_hdr->flags);
    539     max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors :
    540                   s->conf.max_discard_sectors;
    541 
    542     /*
    543      * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check
    544      * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in
    545      * the integer variable.
    546      */
    547     if (unlikely(num_sectors > max_sectors)) {
    548         err_status = VIRTIO_BLK_S_IOERR;
    549         goto err;
    550     }
    551 
    552     bytes = num_sectors << BDRV_SECTOR_BITS;
    553 
    554     if (unlikely(!virtio_blk_sect_range_ok(s, sector, bytes))) {
    555         err_status = VIRTIO_BLK_S_IOERR;
    556         goto err;
    557     }
    558 
    559     /*
    560      * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
    561      * and write zeroes commands if any unknown flag is set.
    562      */
    563     if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
    564         err_status = VIRTIO_BLK_S_UNSUPP;
    565         goto err;
    566     }
    567 
    568     if (is_write_zeroes) { /* VIRTIO_BLK_T_WRITE_ZEROES */
    569         int blk_aio_flags = 0;
    570 
    571         if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
    572             blk_aio_flags |= BDRV_REQ_MAY_UNMAP;
    573         }
    574 
    575         block_acct_start(blk_get_stats(s->blk), &req->acct, bytes,
    576                          BLOCK_ACCT_WRITE);
    577 
    578         blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS,
    579                               bytes, blk_aio_flags,
    580                               virtio_blk_discard_write_zeroes_complete, req);
    581     } else { /* VIRTIO_BLK_T_DISCARD */
    582         /*
    583          * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
    584          * discard commands if the unmap flag is set.
    585          */
    586         if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
    587             err_status = VIRTIO_BLK_S_UNSUPP;
    588             goto err;
    589         }
    590 
    591         blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes,
    592                          virtio_blk_discard_write_zeroes_complete, req);
    593     }
    594 
    595     return VIRTIO_BLK_S_OK;
    596 
    597 err:
    598     if (is_write_zeroes) {
    599         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
    600     }
    601     return err_status;
    602 }
    603 
    604 static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
    605 {
    606     uint32_t type;
    607     struct iovec *in_iov = req->elem.in_sg;
    608     struct iovec *out_iov = req->elem.out_sg;
    609     unsigned in_num = req->elem.in_num;
    610     unsigned out_num = req->elem.out_num;
    611     VirtIOBlock *s = req->dev;
    612     VirtIODevice *vdev = VIRTIO_DEVICE(s);
    613 
    614     if (req->elem.out_num < 1 || req->elem.in_num < 1) {
    615         virtio_error(vdev, "virtio-blk missing headers");
    616         return -1;
    617     }
    618 
    619     if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out,
    620                             sizeof(req->out)) != sizeof(req->out))) {
    621         virtio_error(vdev, "virtio-blk request outhdr too short");
    622         return -1;
    623     }
    624 
    625     iov_discard_front_undoable(&out_iov, &out_num, sizeof(req->out),
    626                                &req->outhdr_undo);
    627 
    628     if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
    629         virtio_error(vdev, "virtio-blk request inhdr too short");
    630         iov_discard_undo(&req->outhdr_undo);
    631         return -1;
    632     }
    633 
    634     /* We always touch the last byte, so just see how big in_iov is.  */
    635     req->in_len = iov_size(in_iov, in_num);
    636     req->in = (void *)in_iov[in_num - 1].iov_base
    637               + in_iov[in_num - 1].iov_len
    638               - sizeof(struct virtio_blk_inhdr);
    639     iov_discard_back_undoable(in_iov, &in_num, sizeof(struct virtio_blk_inhdr),
    640                               &req->inhdr_undo);
    641 
    642     type = virtio_ldl_p(vdev, &req->out.type);
    643 
    644     /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
    645      * is an optional flag. Although a guest should not send this flag if
    646      * not negotiated we ignored it in the past. So keep ignoring it. */
    647     switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) {
    648     case VIRTIO_BLK_T_IN:
    649     {
    650         bool is_write = type & VIRTIO_BLK_T_OUT;
    651         req->sector_num = virtio_ldq_p(vdev, &req->out.sector);
    652 
    653         if (is_write) {
    654             qemu_iovec_init_external(&req->qiov, out_iov, out_num);
    655             trace_virtio_blk_handle_write(vdev, req, req->sector_num,
    656                                           req->qiov.size / BDRV_SECTOR_SIZE);
    657         } else {
    658             qemu_iovec_init_external(&req->qiov, in_iov, in_num);
    659             trace_virtio_blk_handle_read(vdev, req, req->sector_num,
    660                                          req->qiov.size / BDRV_SECTOR_SIZE);
    661         }
    662 
    663         if (!virtio_blk_sect_range_ok(s, req->sector_num, req->qiov.size)) {
    664             virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
    665             block_acct_invalid(blk_get_stats(s->blk),
    666                                is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
    667             virtio_blk_free_request(req);
    668             return 0;
    669         }
    670 
    671         block_acct_start(blk_get_stats(s->blk), &req->acct, req->qiov.size,
    672                          is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
    673 
    674         /* merge would exceed maximum number of requests or IO direction
    675          * changes */
    676         if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS ||
    677                                   is_write != mrb->is_write ||
    678                                   !s->conf.request_merging)) {
    679             virtio_blk_submit_multireq(s, mrb);
    680         }
    681 
    682         assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS);
    683         mrb->reqs[mrb->num_reqs++] = req;
    684         mrb->is_write = is_write;
    685         break;
    686     }
    687     case VIRTIO_BLK_T_FLUSH:
    688         virtio_blk_handle_flush(req, mrb);
    689         break;
    690     case VIRTIO_BLK_T_SCSI_CMD:
    691         virtio_blk_handle_scsi(req);
    692         break;
    693     case VIRTIO_BLK_T_GET_ID:
    694     {
    695         /*
    696          * NB: per existing s/n string convention the string is
    697          * terminated by '\0' only when shorter than buffer.
    698          */
    699         const char *serial = s->conf.serial ? s->conf.serial : "";
    700         size_t size = MIN(strlen(serial) + 1,
    701                           MIN(iov_size(in_iov, in_num),
    702                               VIRTIO_BLK_ID_BYTES));
    703         iov_from_buf(in_iov, in_num, 0, serial, size);
    704         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
    705         virtio_blk_free_request(req);
    706         break;
    707     }
    708     /*
    709      * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with
    710      * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement,
    711      * so we must mask it for these requests, then we will check if it is set.
    712      */
    713     case VIRTIO_BLK_T_DISCARD & ~VIRTIO_BLK_T_OUT:
    714     case VIRTIO_BLK_T_WRITE_ZEROES & ~VIRTIO_BLK_T_OUT:
    715     {
    716         struct virtio_blk_discard_write_zeroes dwz_hdr;
    717         size_t out_len = iov_size(out_iov, out_num);
    718         bool is_write_zeroes = (type & ~VIRTIO_BLK_T_BARRIER) ==
    719                                VIRTIO_BLK_T_WRITE_ZEROES;
    720         uint8_t err_status;
    721 
    722         /*
    723          * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains
    724          * more than one segment.
    725          */
    726         if (unlikely(!(type & VIRTIO_BLK_T_OUT) ||
    727                      out_len > sizeof(dwz_hdr))) {
    728             virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
    729             virtio_blk_free_request(req);
    730             return 0;
    731         }
    732 
    733         if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr,
    734                                 sizeof(dwz_hdr)) != sizeof(dwz_hdr))) {
    735             iov_discard_undo(&req->inhdr_undo);
    736             iov_discard_undo(&req->outhdr_undo);
    737             virtio_error(vdev, "virtio-blk discard/write_zeroes header"
    738                          " too short");
    739             return -1;
    740         }
    741 
    742         err_status = virtio_blk_handle_discard_write_zeroes(req, &dwz_hdr,
    743                                                             is_write_zeroes);
    744         if (err_status != VIRTIO_BLK_S_OK) {
    745             virtio_blk_req_complete(req, err_status);
    746             virtio_blk_free_request(req);
    747         }
    748 
    749         break;
    750     }
    751     default:
    752         virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
    753         virtio_blk_free_request(req);
    754     }
    755     return 0;
    756 }
    757 
    758 void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
    759 {
    760     VirtIOBlockReq *req;
    761     MultiReqBuffer mrb = {};
    762     bool suppress_notifications = virtio_queue_get_notification(vq);
    763 
    764     aio_context_acquire(blk_get_aio_context(s->blk));
    765     blk_io_plug(s->blk);
    766 
    767     do {
    768         if (suppress_notifications) {
    769             virtio_queue_set_notification(vq, 0);
    770         }
    771 
    772         while ((req = virtio_blk_get_request(s, vq))) {
    773             if (virtio_blk_handle_request(req, &mrb)) {
    774                 virtqueue_detach_element(req->vq, &req->elem, 0);
    775                 virtio_blk_free_request(req);
    776                 break;
    777             }
    778         }
    779 
    780         if (suppress_notifications) {
    781             virtio_queue_set_notification(vq, 1);
    782         }
    783     } while (!virtio_queue_empty(vq));
    784 
    785     if (mrb.num_reqs) {
    786         virtio_blk_submit_multireq(s, &mrb);
    787     }
    788 
    789     blk_io_unplug(s->blk);
    790     aio_context_release(blk_get_aio_context(s->blk));
    791 }
    792 
    793 static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
    794 {
    795     VirtIOBlock *s = (VirtIOBlock *)vdev;
    796 
    797     if (s->dataplane && !s->dataplane_started) {
    798         /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
    799          * dataplane here instead of waiting for .set_status().
    800          */
    801         virtio_device_start_ioeventfd(vdev);
    802         if (!s->dataplane_disabled) {
    803             return;
    804         }
    805     }
    806     virtio_blk_handle_vq(s, vq);
    807 }
    808 
    809 void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh)
    810 {
    811     VirtIOBlockReq *req = s->rq;
    812     MultiReqBuffer mrb = {};
    813 
    814     s->rq = NULL;
    815 
    816     aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
    817     while (req) {
    818         VirtIOBlockReq *next = req->next;
    819         if (virtio_blk_handle_request(req, &mrb)) {
    820             /* Device is now broken and won't do any processing until it gets
    821              * reset. Already queued requests will be lost: let's purge them.
    822              */
    823             while (req) {
    824                 next = req->next;
    825                 virtqueue_detach_element(req->vq, &req->elem, 0);
    826                 virtio_blk_free_request(req);
    827                 req = next;
    828             }
    829             break;
    830         }
    831         req = next;
    832     }
    833 
    834     if (mrb.num_reqs) {
    835         virtio_blk_submit_multireq(s, &mrb);
    836     }
    837     if (is_bh) {
    838         blk_dec_in_flight(s->conf.conf.blk);
    839     }
    840     aio_context_release(blk_get_aio_context(s->conf.conf.blk));
    841 }
    842 
    843 static void virtio_blk_dma_restart_bh(void *opaque)
    844 {
    845     VirtIOBlock *s = opaque;
    846 
    847     qemu_bh_delete(s->bh);
    848     s->bh = NULL;
    849 
    850     virtio_blk_process_queued_requests(s, true);
    851 }
    852 
    853 static void virtio_blk_dma_restart_cb(void *opaque, bool running,
    854                                       RunState state)
    855 {
    856     VirtIOBlock *s = opaque;
    857     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
    858     VirtioBusState *bus = VIRTIO_BUS(qbus);
    859 
    860     if (!running) {
    861         return;
    862     }
    863 
    864     /*
    865      * If ioeventfd is enabled, don't schedule the BH here as queued
    866      * requests will be processed while starting the data plane.
    867      */
    868     if (!s->bh && !virtio_bus_ioeventfd_enabled(bus)) {
    869         s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk),
    870                            virtio_blk_dma_restart_bh, s);
    871         blk_inc_in_flight(s->conf.conf.blk);
    872         qemu_bh_schedule(s->bh);
    873     }
    874 }
    875 
    876 static void virtio_blk_reset(VirtIODevice *vdev)
    877 {
    878     VirtIOBlock *s = VIRTIO_BLK(vdev);
    879     AioContext *ctx;
    880     VirtIOBlockReq *req;
    881 
    882     ctx = blk_get_aio_context(s->blk);
    883     aio_context_acquire(ctx);
    884     blk_drain(s->blk);
    885 
    886     /* We drop queued requests after blk_drain() because blk_drain() itself can
    887      * produce them. */
    888     while (s->rq) {
    889         req = s->rq;
    890         s->rq = req->next;
    891         virtqueue_detach_element(req->vq, &req->elem, 0);
    892         virtio_blk_free_request(req);
    893     }
    894 
    895     aio_context_release(ctx);
    896 
    897     assert(!s->dataplane_started);
    898     blk_set_enable_write_cache(s->blk, s->original_wce);
    899 }
    900 
    901 /* coalesce internal state, copy to pci i/o region 0
    902  */
    903 static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
    904 {
    905     VirtIOBlock *s = VIRTIO_BLK(vdev);
    906     BlockConf *conf = &s->conf.conf;
    907     struct virtio_blk_config blkcfg;
    908     uint64_t capacity;
    909     int64_t length;
    910     int blk_size = conf->logical_block_size;
    911 
    912     blk_get_geometry(s->blk, &capacity);
    913     memset(&blkcfg, 0, sizeof(blkcfg));
    914     virtio_stq_p(vdev, &blkcfg.capacity, capacity);
    915     virtio_stl_p(vdev, &blkcfg.seg_max,
    916                  s->conf.seg_max_adjust ? s->conf.queue_size - 2 : 128 - 2);
    917     virtio_stw_p(vdev, &blkcfg.geometry.cylinders, conf->cyls);
    918     virtio_stl_p(vdev, &blkcfg.blk_size, blk_size);
    919     virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size);
    920     virtio_stl_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size);
    921     blkcfg.geometry.heads = conf->heads;
    922     /*
    923      * We must ensure that the block device capacity is a multiple of
    924      * the logical block size. If that is not the case, let's use
    925      * sector_mask to adopt the geometry to have a correct picture.
    926      * For those devices where the capacity is ok for the given geometry
    927      * we don't touch the sector value of the geometry, since some devices
    928      * (like s390 dasd) need a specific value. Here the capacity is already
    929      * cyls*heads*secs*blk_size and the sector value is not block size
    930      * divided by 512 - instead it is the amount of blk_size blocks
    931      * per track (cylinder).
    932      */
    933     length = blk_getlength(s->blk);
    934     if (length > 0 && length / conf->heads / conf->secs % blk_size) {
    935         blkcfg.geometry.sectors = conf->secs & ~s->sector_mask;
    936     } else {
    937         blkcfg.geometry.sectors = conf->secs;
    938     }
    939     blkcfg.size_max = 0;
    940     blkcfg.physical_block_exp = get_physical_block_exp(conf);
    941     blkcfg.alignment_offset = 0;
    942     blkcfg.wce = blk_enable_write_cache(s->blk);
    943     virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues);
    944     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) {
    945         uint32_t discard_granularity = conf->discard_granularity;
    946         if (discard_granularity == -1 || !s->conf.report_discard_granularity) {
    947             discard_granularity = blk_size;
    948         }
    949         virtio_stl_p(vdev, &blkcfg.max_discard_sectors,
    950                      s->conf.max_discard_sectors);
    951         virtio_stl_p(vdev, &blkcfg.discard_sector_alignment,
    952                      discard_granularity >> BDRV_SECTOR_BITS);
    953         /*
    954          * We support only one segment per request since multiple segments
    955          * are not widely used and there are no userspace APIs that allow
    956          * applications to submit multiple segments in a single call.
    957          */
    958         virtio_stl_p(vdev, &blkcfg.max_discard_seg, 1);
    959     }
    960     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) {
    961         virtio_stl_p(vdev, &blkcfg.max_write_zeroes_sectors,
    962                      s->conf.max_write_zeroes_sectors);
    963         blkcfg.write_zeroes_may_unmap = 1;
    964         virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1);
    965     }
    966     memcpy(config, &blkcfg, s->config_size);
    967 }
    968 
    969 static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
    970 {
    971     VirtIOBlock *s = VIRTIO_BLK(vdev);
    972     struct virtio_blk_config blkcfg;
    973 
    974     memcpy(&blkcfg, config, s->config_size);
    975 
    976     aio_context_acquire(blk_get_aio_context(s->blk));
    977     blk_set_enable_write_cache(s->blk, blkcfg.wce != 0);
    978     aio_context_release(blk_get_aio_context(s->blk));
    979 }
    980 
    981 static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
    982                                         Error **errp)
    983 {
    984     VirtIOBlock *s = VIRTIO_BLK(vdev);
    985 
    986     /* Firstly sync all virtio-blk possible supported features */
    987     features |= s->host_features;
    988 
    989     virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
    990     virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
    991     virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
    992     virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
    993     if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) {
    994         if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_SCSI)) {
    995             error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0");
    996             return 0;
    997         }
    998     } else {
    999         virtio_clear_feature(&features, VIRTIO_F_ANY_LAYOUT);
   1000         virtio_add_feature(&features, VIRTIO_BLK_F_SCSI);
   1001     }
   1002 
   1003     if (blk_enable_write_cache(s->blk) ||
   1004         (s->conf.x_enable_wce_if_config_wce &&
   1005          virtio_has_feature(features, VIRTIO_BLK_F_CONFIG_WCE))) {
   1006         virtio_add_feature(&features, VIRTIO_BLK_F_WCE);
   1007     }
   1008     if (!blk_is_writable(s->blk)) {
   1009         virtio_add_feature(&features, VIRTIO_BLK_F_RO);
   1010     }
   1011     if (s->conf.num_queues > 1) {
   1012         virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
   1013     }
   1014 
   1015     return features;
   1016 }
   1017 
   1018 static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
   1019 {
   1020     VirtIOBlock *s = VIRTIO_BLK(vdev);
   1021 
   1022     if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) {
   1023         assert(!s->dataplane_started);
   1024     }
   1025 
   1026     if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
   1027         return;
   1028     }
   1029 
   1030     /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
   1031      * cache flushes.  Thus, the "auto writethrough" behavior is never
   1032      * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
   1033      * Leaving it enabled would break the following sequence:
   1034      *
   1035      *     Guest started with "-drive cache=writethrough"
   1036      *     Guest sets status to 0
   1037      *     Guest sets DRIVER bit in status field
   1038      *     Guest reads host features (WCE=0, CONFIG_WCE=1)
   1039      *     Guest writes guest features (WCE=0, CONFIG_WCE=1)
   1040      *     Guest writes 1 to the WCE configuration field (writeback mode)
   1041      *     Guest sets DRIVER_OK bit in status field
   1042      *
   1043      * s->blk would erroneously be placed in writethrough mode.
   1044      */
   1045     if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) {
   1046         aio_context_acquire(blk_get_aio_context(s->blk));
   1047         blk_set_enable_write_cache(s->blk,
   1048                                    virtio_vdev_has_feature(vdev,
   1049                                                            VIRTIO_BLK_F_WCE));
   1050         aio_context_release(blk_get_aio_context(s->blk));
   1051     }
   1052 }
   1053 
   1054 static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
   1055 {
   1056     VirtIOBlock *s = VIRTIO_BLK(vdev);
   1057     VirtIOBlockReq *req = s->rq;
   1058 
   1059     while (req) {
   1060         qemu_put_sbyte(f, 1);
   1061 
   1062         if (s->conf.num_queues > 1) {
   1063             qemu_put_be32(f, virtio_get_queue_index(req->vq));
   1064         }
   1065 
   1066         qemu_put_virtqueue_element(vdev, f, &req->elem);
   1067         req = req->next;
   1068     }
   1069     qemu_put_sbyte(f, 0);
   1070 }
   1071 
   1072 static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
   1073                                   int version_id)
   1074 {
   1075     VirtIOBlock *s = VIRTIO_BLK(vdev);
   1076 
   1077     while (qemu_get_sbyte(f)) {
   1078         unsigned nvqs = s->conf.num_queues;
   1079         unsigned vq_idx = 0;
   1080         VirtIOBlockReq *req;
   1081 
   1082         if (nvqs > 1) {
   1083             vq_idx = qemu_get_be32(f);
   1084 
   1085             if (vq_idx >= nvqs) {
   1086                 error_report("Invalid virtqueue index in request list: %#x",
   1087                              vq_idx);
   1088                 return -EINVAL;
   1089             }
   1090         }
   1091 
   1092         req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq));
   1093         virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req);
   1094         req->next = s->rq;
   1095         s->rq = req;
   1096     }
   1097 
   1098     return 0;
   1099 }
   1100 
   1101 static void virtio_resize_cb(void *opaque)
   1102 {
   1103     VirtIODevice *vdev = opaque;
   1104 
   1105     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
   1106     virtio_notify_config(vdev);
   1107 }
   1108 
   1109 static void virtio_blk_resize(void *opaque)
   1110 {
   1111     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
   1112 
   1113     /*
   1114      * virtio_notify_config() needs to acquire the global mutex,
   1115      * so it can't be called from an iothread. Instead, schedule
   1116      * it to be run in the main context BH.
   1117      */
   1118     aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev);
   1119 }
   1120 
   1121 static const BlockDevOps virtio_block_ops = {
   1122     .resize_cb = virtio_blk_resize,
   1123 };
   1124 
   1125 static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
   1126 {
   1127     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
   1128     VirtIOBlock *s = VIRTIO_BLK(dev);
   1129     VirtIOBlkConf *conf = &s->conf;
   1130     Error *err = NULL;
   1131     unsigned i;
   1132 
   1133     if (!conf->conf.blk) {
   1134         error_setg(errp, "drive property not set");
   1135         return;
   1136     }
   1137     if (!blk_is_inserted(conf->conf.blk)) {
   1138         error_setg(errp, "Device needs media, but drive is empty");
   1139         return;
   1140     }
   1141     if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) {
   1142         conf->num_queues = 1;
   1143     }
   1144     if (!conf->num_queues) {
   1145         error_setg(errp, "num-queues property must be larger than 0");
   1146         return;
   1147     }
   1148     if (conf->queue_size <= 2) {
   1149         error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
   1150                    "must be > 2", conf->queue_size);
   1151         return;
   1152     }
   1153     if (!is_power_of_2(conf->queue_size) ||
   1154         conf->queue_size > VIRTQUEUE_MAX_SIZE) {
   1155         error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
   1156                    "must be a power of 2 (max %d)",
   1157                    conf->queue_size, VIRTQUEUE_MAX_SIZE);
   1158         return;
   1159     }
   1160 
   1161     if (!blkconf_apply_backend_options(&conf->conf,
   1162                                        !blk_supports_write_perm(conf->conf.blk),
   1163                                        true, errp)) {
   1164         return;
   1165     }
   1166     s->original_wce = blk_enable_write_cache(conf->conf.blk);
   1167     if (!blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, errp)) {
   1168         return;
   1169     }
   1170 
   1171     if (!blkconf_blocksizes(&conf->conf, errp)) {
   1172         return;
   1173     }
   1174 
   1175     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) &&
   1176         (!conf->max_discard_sectors ||
   1177          conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) {
   1178         error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")"
   1179                    ", must be between 1 and %d",
   1180                    conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS);
   1181         return;
   1182     }
   1183 
   1184     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) &&
   1185         (!conf->max_write_zeroes_sectors ||
   1186          conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) {
   1187         error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32
   1188                    "), must be between 1 and %d",
   1189                    conf->max_write_zeroes_sectors,
   1190                    (int)BDRV_REQUEST_MAX_SECTORS);
   1191         return;
   1192     }
   1193 
   1194     s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
   1195                                             s->host_features);
   1196     virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
   1197 
   1198     s->blk = conf->conf.blk;
   1199     s->rq = NULL;
   1200     s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
   1201 
   1202     for (i = 0; i < conf->num_queues; i++) {
   1203         virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output);
   1204     }
   1205     qemu_coroutine_inc_pool_size(conf->num_queues * conf->queue_size / 2);
   1206     virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
   1207     if (err != NULL) {
   1208         error_propagate(errp, err);
   1209         for (i = 0; i < conf->num_queues; i++) {
   1210             virtio_del_queue(vdev, i);
   1211         }
   1212         virtio_cleanup(vdev);
   1213         return;
   1214     }
   1215 
   1216     s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
   1217     blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
   1218     blk_set_dev_ops(s->blk, &virtio_block_ops, s);
   1219 
   1220     blk_iostatus_enable(s->blk);
   1221 
   1222     add_boot_device_lchs(dev, "/disk@0,0",
   1223                          conf->conf.lcyls,
   1224                          conf->conf.lheads,
   1225                          conf->conf.lsecs);
   1226 }
   1227 
   1228 static void virtio_blk_device_unrealize(DeviceState *dev)
   1229 {
   1230     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
   1231     VirtIOBlock *s = VIRTIO_BLK(dev);
   1232     VirtIOBlkConf *conf = &s->conf;
   1233     unsigned i;
   1234 
   1235     blk_drain(s->blk);
   1236     del_boot_device_lchs(dev, "/disk@0,0");
   1237     virtio_blk_data_plane_destroy(s->dataplane);
   1238     s->dataplane = NULL;
   1239     for (i = 0; i < conf->num_queues; i++) {
   1240         virtio_del_queue(vdev, i);
   1241     }
   1242     qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2);
   1243     blk_ram_registrar_destroy(&s->blk_ram_registrar);
   1244     qemu_del_vm_change_state_handler(s->change);
   1245     blockdev_mark_auto_del(s->blk);
   1246     virtio_cleanup(vdev);
   1247 }
   1248 
   1249 static void virtio_blk_instance_init(Object *obj)
   1250 {
   1251     VirtIOBlock *s = VIRTIO_BLK(obj);
   1252 
   1253     device_add_bootindex_property(obj, &s->conf.conf.bootindex,
   1254                                   "bootindex", "/disk@0,0",
   1255                                   DEVICE(obj));
   1256 }
   1257 
   1258 static const VMStateDescription vmstate_virtio_blk = {
   1259     .name = "virtio-blk",
   1260     .minimum_version_id = 2,
   1261     .version_id = 2,
   1262     .fields = (VMStateField[]) {
   1263         VMSTATE_VIRTIO_DEVICE,
   1264         VMSTATE_END_OF_LIST()
   1265     },
   1266 };
   1267 
   1268 static Property virtio_blk_properties[] = {
   1269     DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf),
   1270     DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf),
   1271     DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf),
   1272     DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial),
   1273     DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features,
   1274                       VIRTIO_BLK_F_CONFIG_WCE, true),
   1275 #ifdef __linux__
   1276     DEFINE_PROP_BIT64("scsi", VirtIOBlock, host_features,
   1277                       VIRTIO_BLK_F_SCSI, false),
   1278 #endif
   1279     DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
   1280                     true),
   1281     DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues,
   1282                        VIRTIO_BLK_AUTO_NUM_QUEUES),
   1283     DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256),
   1284     DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true),
   1285     DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
   1286                      IOThread *),
   1287     DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features,
   1288                       VIRTIO_BLK_F_DISCARD, true),
   1289     DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock,
   1290                      conf.report_discard_granularity, true),
   1291     DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features,
   1292                       VIRTIO_BLK_F_WRITE_ZEROES, true),
   1293     DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock,
   1294                        conf.max_discard_sectors, BDRV_REQUEST_MAX_SECTORS),
   1295     DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock,
   1296                        conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS),
   1297     DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock,
   1298                      conf.x_enable_wce_if_config_wce, true),
   1299     DEFINE_PROP_END_OF_LIST(),
   1300 };
   1301 
   1302 static void virtio_blk_class_init(ObjectClass *klass, void *data)
   1303 {
   1304     DeviceClass *dc = DEVICE_CLASS(klass);
   1305     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
   1306 
   1307     device_class_set_props(dc, virtio_blk_properties);
   1308     dc->vmsd = &vmstate_virtio_blk;
   1309     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
   1310     vdc->realize = virtio_blk_device_realize;
   1311     vdc->unrealize = virtio_blk_device_unrealize;
   1312     vdc->get_config = virtio_blk_update_config;
   1313     vdc->set_config = virtio_blk_set_config;
   1314     vdc->get_features = virtio_blk_get_features;
   1315     vdc->set_status = virtio_blk_set_status;
   1316     vdc->reset = virtio_blk_reset;
   1317     vdc->save = virtio_blk_save_device;
   1318     vdc->load = virtio_blk_load_device;
   1319     vdc->start_ioeventfd = virtio_blk_data_plane_start;
   1320     vdc->stop_ioeventfd = virtio_blk_data_plane_stop;
   1321 }
   1322 
   1323 static const TypeInfo virtio_blk_info = {
   1324     .name = TYPE_VIRTIO_BLK,
   1325     .parent = TYPE_VIRTIO_DEVICE,
   1326     .instance_size = sizeof(VirtIOBlock),
   1327     .instance_init = virtio_blk_instance_init,
   1328     .class_init = virtio_blk_class_init,
   1329 };
   1330 
   1331 static void virtio_register_types(void)
   1332 {
   1333     type_register_static(&virtio_blk_info);
   1334 }
   1335 
   1336 type_init(virtio_register_types)