qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

test-block-iothread.c (27344B)


      1 /*
      2  * Block tests for iothreads
      3  *
      4  * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a copy
      7  * of this software and associated documentation files (the "Software"), to deal
      8  * in the Software without restriction, including without limitation the rights
      9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     10  * copies of the Software, and to permit persons to whom the Software is
     11  * furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     22  * THE SOFTWARE.
     23  */
     24 
     25 #include "qemu/osdep.h"
     26 #include "block/block.h"
     27 #include "block/blockjob_int.h"
     28 #include "sysemu/block-backend.h"
     29 #include "qapi/error.h"
     30 #include "qapi/qmp/qdict.h"
     31 #include "qemu/main-loop.h"
     32 #include "iothread.h"
     33 
     34 static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
     35                                             int64_t offset, int64_t bytes,
     36                                             QEMUIOVector *qiov,
     37                                             BdrvRequestFlags flags)
     38 {
     39     return 0;
     40 }
     41 
     42 static int coroutine_fn bdrv_test_co_pwritev(BlockDriverState *bs,
     43                                              int64_t offset, int64_t bytes,
     44                                              QEMUIOVector *qiov,
     45                                              BdrvRequestFlags flags)
     46 {
     47     return 0;
     48 }
     49 
     50 static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
     51                                               int64_t offset, int64_t bytes)
     52 {
     53     return 0;
     54 }
     55 
     56 static int coroutine_fn
     57 bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
     58                       PreallocMode prealloc, BdrvRequestFlags flags,
     59                       Error **errp)
     60 {
     61     return 0;
     62 }
     63 
     64 static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
     65                                                   bool want_zero,
     66                                                   int64_t offset, int64_t count,
     67                                                   int64_t *pnum, int64_t *map,
     68                                                   BlockDriverState **file)
     69 {
     70     *pnum = count;
     71     return 0;
     72 }
     73 
     74 static BlockDriver bdrv_test = {
     75     .format_name            = "test",
     76     .instance_size          = 1,
     77 
     78     .bdrv_co_preadv         = bdrv_test_co_preadv,
     79     .bdrv_co_pwritev        = bdrv_test_co_pwritev,
     80     .bdrv_co_pdiscard       = bdrv_test_co_pdiscard,
     81     .bdrv_co_truncate       = bdrv_test_co_truncate,
     82     .bdrv_co_block_status   = bdrv_test_co_block_status,
     83 };
     84 
     85 static void test_sync_op_pread(BdrvChild *c)
     86 {
     87     uint8_t buf[512];
     88     int ret;
     89 
     90     /* Success */
     91     ret = bdrv_pread(c, 0, sizeof(buf), buf, 0);
     92     g_assert_cmpint(ret, ==, 0);
     93 
     94     /* Early error: Negative offset */
     95     ret = bdrv_pread(c, -2, sizeof(buf), buf, 0);
     96     g_assert_cmpint(ret, ==, -EIO);
     97 }
     98 
     99 static void test_sync_op_pwrite(BdrvChild *c)
    100 {
    101     uint8_t buf[512] = { 0 };
    102     int ret;
    103 
    104     /* Success */
    105     ret = bdrv_pwrite(c, 0, sizeof(buf), buf, 0);
    106     g_assert_cmpint(ret, ==, 0);
    107 
    108     /* Early error: Negative offset */
    109     ret = bdrv_pwrite(c, -2, sizeof(buf), buf, 0);
    110     g_assert_cmpint(ret, ==, -EIO);
    111 }
    112 
    113 static void test_sync_op_blk_pread(BlockBackend *blk)
    114 {
    115     uint8_t buf[512];
    116     int ret;
    117 
    118     /* Success */
    119     ret = blk_pread(blk, 0, sizeof(buf), buf, 0);
    120     g_assert_cmpint(ret, ==, 0);
    121 
    122     /* Early error: Negative offset */
    123     ret = blk_pread(blk, -2, sizeof(buf), buf, 0);
    124     g_assert_cmpint(ret, ==, -EIO);
    125 }
    126 
    127 static void test_sync_op_blk_pwrite(BlockBackend *blk)
    128 {
    129     uint8_t buf[512] = { 0 };
    130     int ret;
    131 
    132     /* Success */
    133     ret = blk_pwrite(blk, 0, sizeof(buf), buf, 0);
    134     g_assert_cmpint(ret, ==, 0);
    135 
    136     /* Early error: Negative offset */
    137     ret = blk_pwrite(blk, -2, sizeof(buf), buf, 0);
    138     g_assert_cmpint(ret, ==, -EIO);
    139 }
    140 
    141 static void test_sync_op_blk_preadv(BlockBackend *blk)
    142 {
    143     uint8_t buf[512];
    144     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
    145     int ret;
    146 
    147     /* Success */
    148     ret = blk_preadv(blk, 0, sizeof(buf), &qiov, 0);
    149     g_assert_cmpint(ret, ==, 0);
    150 
    151     /* Early error: Negative offset */
    152     ret = blk_preadv(blk, -2, sizeof(buf), &qiov, 0);
    153     g_assert_cmpint(ret, ==, -EIO);
    154 }
    155 
    156 static void test_sync_op_blk_pwritev(BlockBackend *blk)
    157 {
    158     uint8_t buf[512] = { 0 };
    159     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
    160     int ret;
    161 
    162     /* Success */
    163     ret = blk_pwritev(blk, 0, sizeof(buf), &qiov, 0);
    164     g_assert_cmpint(ret, ==, 0);
    165 
    166     /* Early error: Negative offset */
    167     ret = blk_pwritev(blk, -2, sizeof(buf), &qiov, 0);
    168     g_assert_cmpint(ret, ==, -EIO);
    169 }
    170 
    171 static void test_sync_op_blk_preadv_part(BlockBackend *blk)
    172 {
    173     uint8_t buf[512];
    174     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
    175     int ret;
    176 
    177     /* Success */
    178     ret = blk_preadv_part(blk, 0, sizeof(buf), &qiov, 0, 0);
    179     g_assert_cmpint(ret, ==, 0);
    180 
    181     /* Early error: Negative offset */
    182     ret = blk_preadv_part(blk, -2, sizeof(buf), &qiov, 0, 0);
    183     g_assert_cmpint(ret, ==, -EIO);
    184 }
    185 
    186 static void test_sync_op_blk_pwritev_part(BlockBackend *blk)
    187 {
    188     uint8_t buf[512] = { 0 };
    189     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, sizeof(buf));
    190     int ret;
    191 
    192     /* Success */
    193     ret = blk_pwritev_part(blk, 0, sizeof(buf), &qiov, 0, 0);
    194     g_assert_cmpint(ret, ==, 0);
    195 
    196     /* Early error: Negative offset */
    197     ret = blk_pwritev_part(blk, -2, sizeof(buf), &qiov, 0, 0);
    198     g_assert_cmpint(ret, ==, -EIO);
    199 }
    200 
    201 static void test_sync_op_blk_pwrite_compressed(BlockBackend *blk)
    202 {
    203     uint8_t buf[512] = { 0 };
    204     int ret;
    205 
    206     /* Late error: Not supported */
    207     ret = blk_pwrite_compressed(blk, 0, sizeof(buf), buf);
    208     g_assert_cmpint(ret, ==, -ENOTSUP);
    209 
    210     /* Early error: Negative offset */
    211     ret = blk_pwrite_compressed(blk, -2, sizeof(buf), buf);
    212     g_assert_cmpint(ret, ==, -EIO);
    213 }
    214 
    215 static void test_sync_op_blk_pwrite_zeroes(BlockBackend *blk)
    216 {
    217     int ret;
    218 
    219     /* Success */
    220     ret = blk_pwrite_zeroes(blk, 0, 512, 0);
    221     g_assert_cmpint(ret, ==, 0);
    222 
    223     /* Early error: Negative offset */
    224     ret = blk_pwrite_zeroes(blk, -2, 512, 0);
    225     g_assert_cmpint(ret, ==, -EIO);
    226 }
    227 
    228 static void test_sync_op_load_vmstate(BdrvChild *c)
    229 {
    230     uint8_t buf[512];
    231     int ret;
    232 
    233     /* Error: Driver does not support snapshots */
    234     ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf));
    235     g_assert_cmpint(ret, ==, -ENOTSUP);
    236 }
    237 
    238 static void test_sync_op_save_vmstate(BdrvChild *c)
    239 {
    240     uint8_t buf[512] = { 0 };
    241     int ret;
    242 
    243     /* Error: Driver does not support snapshots */
    244     ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf));
    245     g_assert_cmpint(ret, ==, -ENOTSUP);
    246 }
    247 
    248 static void test_sync_op_pdiscard(BdrvChild *c)
    249 {
    250     int ret;
    251 
    252     /* Normal success path */
    253     c->bs->open_flags |= BDRV_O_UNMAP;
    254     ret = bdrv_pdiscard(c, 0, 512);
    255     g_assert_cmpint(ret, ==, 0);
    256 
    257     /* Early success: UNMAP not supported */
    258     c->bs->open_flags &= ~BDRV_O_UNMAP;
    259     ret = bdrv_pdiscard(c, 0, 512);
    260     g_assert_cmpint(ret, ==, 0);
    261 
    262     /* Early error: Negative offset */
    263     ret = bdrv_pdiscard(c, -2, 512);
    264     g_assert_cmpint(ret, ==, -EIO);
    265 }
    266 
    267 static void test_sync_op_blk_pdiscard(BlockBackend *blk)
    268 {
    269     int ret;
    270 
    271     /* Early success: UNMAP not supported */
    272     ret = blk_pdiscard(blk, 0, 512);
    273     g_assert_cmpint(ret, ==, 0);
    274 
    275     /* Early error: Negative offset */
    276     ret = blk_pdiscard(blk, -2, 512);
    277     g_assert_cmpint(ret, ==, -EIO);
    278 }
    279 
    280 static void test_sync_op_truncate(BdrvChild *c)
    281 {
    282     int ret;
    283 
    284     /* Normal success path */
    285     ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
    286     g_assert_cmpint(ret, ==, 0);
    287 
    288     /* Early error: Negative offset */
    289     ret = bdrv_truncate(c, -2, false, PREALLOC_MODE_OFF, 0, NULL);
    290     g_assert_cmpint(ret, ==, -EINVAL);
    291 
    292     /* Error: Read-only image */
    293     c->bs->open_flags &= ~BDRV_O_RDWR;
    294 
    295     ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
    296     g_assert_cmpint(ret, ==, -EACCES);
    297 
    298     c->bs->open_flags |= BDRV_O_RDWR;
    299 }
    300 
    301 static void test_sync_op_blk_truncate(BlockBackend *blk)
    302 {
    303     int ret;
    304 
    305     /* Normal success path */
    306     ret = blk_truncate(blk, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
    307     g_assert_cmpint(ret, ==, 0);
    308 
    309     /* Early error: Negative offset */
    310     ret = blk_truncate(blk, -2, false, PREALLOC_MODE_OFF, 0, NULL);
    311     g_assert_cmpint(ret, ==, -EINVAL);
    312 }
    313 
    314 static void test_sync_op_block_status(BdrvChild *c)
    315 {
    316     int ret;
    317     int64_t n;
    318 
    319     /* Normal success path */
    320     ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
    321     g_assert_cmpint(ret, ==, 0);
    322 
    323     /* Early success: No driver support */
    324     bdrv_test.bdrv_co_block_status = NULL;
    325     ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
    326     g_assert_cmpint(ret, ==, 1);
    327 
    328     /* Early success: bytes = 0 */
    329     ret = bdrv_is_allocated(c->bs, 0, 0, &n);
    330     g_assert_cmpint(ret, ==, 0);
    331 
    332     /* Early success: Offset > image size*/
    333     ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n);
    334     g_assert_cmpint(ret, ==, 0);
    335 }
    336 
    337 static void test_sync_op_flush(BdrvChild *c)
    338 {
    339     int ret;
    340 
    341     /* Normal success path */
    342     ret = bdrv_flush(c->bs);
    343     g_assert_cmpint(ret, ==, 0);
    344 
    345     /* Early success: Read-only image */
    346     c->bs->open_flags &= ~BDRV_O_RDWR;
    347 
    348     ret = bdrv_flush(c->bs);
    349     g_assert_cmpint(ret, ==, 0);
    350 
    351     c->bs->open_flags |= BDRV_O_RDWR;
    352 }
    353 
    354 static void test_sync_op_blk_flush(BlockBackend *blk)
    355 {
    356     BlockDriverState *bs = blk_bs(blk);
    357     int ret;
    358 
    359     /* Normal success path */
    360     ret = blk_flush(blk);
    361     g_assert_cmpint(ret, ==, 0);
    362 
    363     /* Early success: Read-only image */
    364     bs->open_flags &= ~BDRV_O_RDWR;
    365 
    366     ret = blk_flush(blk);
    367     g_assert_cmpint(ret, ==, 0);
    368 
    369     bs->open_flags |= BDRV_O_RDWR;
    370 }
    371 
    372 static void test_sync_op_check(BdrvChild *c)
    373 {
    374     BdrvCheckResult result;
    375     int ret;
    376 
    377     /* Error: Driver does not implement check */
    378     ret = bdrv_check(c->bs, &result, 0);
    379     g_assert_cmpint(ret, ==, -ENOTSUP);
    380 }
    381 
    382 static void test_sync_op_activate(BdrvChild *c)
    383 {
    384     /* Early success: Image is not inactive */
    385     bdrv_activate(c->bs, NULL);
    386 }
    387 
    388 
    389 typedef struct SyncOpTest {
    390     const char *name;
    391     void (*fn)(BdrvChild *c);
    392     void (*blkfn)(BlockBackend *blk);
    393 } SyncOpTest;
    394 
    395 const SyncOpTest sync_op_tests[] = {
    396     {
    397         .name   = "/sync-op/pread",
    398         .fn     = test_sync_op_pread,
    399         .blkfn  = test_sync_op_blk_pread,
    400     }, {
    401         .name   = "/sync-op/pwrite",
    402         .fn     = test_sync_op_pwrite,
    403         .blkfn  = test_sync_op_blk_pwrite,
    404     }, {
    405         .name   = "/sync-op/preadv",
    406         .fn     = NULL,
    407         .blkfn  = test_sync_op_blk_preadv,
    408     }, {
    409         .name   = "/sync-op/pwritev",
    410         .fn     = NULL,
    411         .blkfn  = test_sync_op_blk_pwritev,
    412     }, {
    413         .name   = "/sync-op/preadv_part",
    414         .fn     = NULL,
    415         .blkfn  = test_sync_op_blk_preadv_part,
    416     }, {
    417         .name   = "/sync-op/pwritev_part",
    418         .fn     = NULL,
    419         .blkfn  = test_sync_op_blk_pwritev_part,
    420     }, {
    421         .name   = "/sync-op/pwrite_compressed",
    422         .fn     = NULL,
    423         .blkfn  = test_sync_op_blk_pwrite_compressed,
    424     }, {
    425         .name   = "/sync-op/pwrite_zeroes",
    426         .fn     = NULL,
    427         .blkfn  = test_sync_op_blk_pwrite_zeroes,
    428     }, {
    429         .name   = "/sync-op/load_vmstate",
    430         .fn     = test_sync_op_load_vmstate,
    431     }, {
    432         .name   = "/sync-op/save_vmstate",
    433         .fn     = test_sync_op_save_vmstate,
    434     }, {
    435         .name   = "/sync-op/pdiscard",
    436         .fn     = test_sync_op_pdiscard,
    437         .blkfn  = test_sync_op_blk_pdiscard,
    438     }, {
    439         .name   = "/sync-op/truncate",
    440         .fn     = test_sync_op_truncate,
    441         .blkfn  = test_sync_op_blk_truncate,
    442     }, {
    443         .name   = "/sync-op/block_status",
    444         .fn     = test_sync_op_block_status,
    445     }, {
    446         .name   = "/sync-op/flush",
    447         .fn     = test_sync_op_flush,
    448         .blkfn  = test_sync_op_blk_flush,
    449     }, {
    450         .name   = "/sync-op/check",
    451         .fn     = test_sync_op_check,
    452     }, {
    453         .name   = "/sync-op/activate",
    454         .fn     = test_sync_op_activate,
    455     },
    456 };
    457 
    458 /* Test synchronous operations that run in a different iothread, so we have to
    459  * poll for the coroutine there to return. */
    460 static void test_sync_op(const void *opaque)
    461 {
    462     const SyncOpTest *t = opaque;
    463     IOThread *iothread = iothread_new();
    464     AioContext *ctx = iothread_get_aio_context(iothread);
    465     BlockBackend *blk;
    466     BlockDriverState *bs;
    467     BdrvChild *c;
    468 
    469     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
    470     bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
    471     bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
    472     blk_insert_bs(blk, bs, &error_abort);
    473     c = QLIST_FIRST(&bs->parents);
    474 
    475     blk_set_aio_context(blk, ctx, &error_abort);
    476     aio_context_acquire(ctx);
    477     if (t->fn) {
    478         t->fn(c);
    479     }
    480     if (t->blkfn) {
    481         t->blkfn(blk);
    482     }
    483     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
    484     aio_context_release(ctx);
    485 
    486     bdrv_unref(bs);
    487     blk_unref(blk);
    488 }
    489 
    490 typedef struct TestBlockJob {
    491     BlockJob common;
    492     bool should_complete;
    493     int n;
    494 } TestBlockJob;
    495 
    496 static int test_job_prepare(Job *job)
    497 {
    498     g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
    499     return 0;
    500 }
    501 
    502 static int coroutine_fn test_job_run(Job *job, Error **errp)
    503 {
    504     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
    505 
    506     job_transition_to_ready(&s->common.job);
    507     while (!s->should_complete) {
    508         s->n++;
    509         g_assert(qemu_get_current_aio_context() == job->aio_context);
    510 
    511         /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
    512          * emulate some actual activity (probably some I/O) here so that the
    513          * drain involved in AioContext switches has to wait for this activity
    514          * to stop. */
    515         qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
    516 
    517         job_pause_point(&s->common.job);
    518     }
    519 
    520     g_assert(qemu_get_current_aio_context() == job->aio_context);
    521     return 0;
    522 }
    523 
    524 static void test_job_complete(Job *job, Error **errp)
    525 {
    526     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
    527     s->should_complete = true;
    528 }
    529 
    530 BlockJobDriver test_job_driver = {
    531     .job_driver = {
    532         .instance_size  = sizeof(TestBlockJob),
    533         .free           = block_job_free,
    534         .user_resume    = block_job_user_resume,
    535         .run            = test_job_run,
    536         .complete       = test_job_complete,
    537         .prepare        = test_job_prepare,
    538     },
    539 };
    540 
    541 static void test_attach_blockjob(void)
    542 {
    543     IOThread *iothread = iothread_new();
    544     AioContext *ctx = iothread_get_aio_context(iothread);
    545     BlockBackend *blk;
    546     BlockDriverState *bs;
    547     TestBlockJob *tjob;
    548 
    549     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
    550     bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
    551     blk_insert_bs(blk, bs, &error_abort);
    552 
    553     tjob = block_job_create("job0", &test_job_driver, NULL, bs,
    554                             0, BLK_PERM_ALL,
    555                             0, 0, NULL, NULL, &error_abort);
    556     job_start(&tjob->common.job);
    557 
    558     while (tjob->n == 0) {
    559         aio_poll(qemu_get_aio_context(), false);
    560     }
    561 
    562     blk_set_aio_context(blk, ctx, &error_abort);
    563 
    564     tjob->n = 0;
    565     while (tjob->n == 0) {
    566         aio_poll(qemu_get_aio_context(), false);
    567     }
    568 
    569     aio_context_acquire(ctx);
    570     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
    571     aio_context_release(ctx);
    572 
    573     tjob->n = 0;
    574     while (tjob->n == 0) {
    575         aio_poll(qemu_get_aio_context(), false);
    576     }
    577 
    578     blk_set_aio_context(blk, ctx, &error_abort);
    579 
    580     tjob->n = 0;
    581     while (tjob->n == 0) {
    582         aio_poll(qemu_get_aio_context(), false);
    583     }
    584 
    585     WITH_JOB_LOCK_GUARD() {
    586         job_complete_sync_locked(&tjob->common.job, &error_abort);
    587     }
    588     aio_context_acquire(ctx);
    589     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
    590     aio_context_release(ctx);
    591 
    592     bdrv_unref(bs);
    593     blk_unref(blk);
    594 }
    595 
    596 /*
    597  * Test that changing the AioContext for one node in a tree (here through blk)
    598  * changes all other nodes as well:
    599  *
    600  *  blk
    601  *   |
    602  *   |  bs_verify [blkverify]
    603  *   |   /               \
    604  *   |  /                 \
    605  *  bs_a [bdrv_test]    bs_b [bdrv_test]
    606  *
    607  */
    608 static void test_propagate_basic(void)
    609 {
    610     IOThread *iothread = iothread_new();
    611     AioContext *ctx = iothread_get_aio_context(iothread);
    612     AioContext *main_ctx;
    613     BlockBackend *blk;
    614     BlockDriverState *bs_a, *bs_b, *bs_verify;
    615     QDict *options;
    616 
    617     /*
    618      * Create bs_a and its BlockBackend.  We cannot take the RESIZE
    619      * permission because blkverify will not share it on the test
    620      * image.
    621      */
    622     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
    623                   BLK_PERM_ALL);
    624     bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
    625     blk_insert_bs(blk, bs_a, &error_abort);
    626 
    627     /* Create bs_b */
    628     bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort);
    629 
    630     /* Create blkverify filter that references both bs_a and bs_b */
    631     options = qdict_new();
    632     qdict_put_str(options, "driver", "blkverify");
    633     qdict_put_str(options, "test", "bs_a");
    634     qdict_put_str(options, "raw", "bs_b");
    635 
    636     bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
    637 
    638     /* Switch the AioContext */
    639     blk_set_aio_context(blk, ctx, &error_abort);
    640     g_assert(blk_get_aio_context(blk) == ctx);
    641     g_assert(bdrv_get_aio_context(bs_a) == ctx);
    642     g_assert(bdrv_get_aio_context(bs_verify) == ctx);
    643     g_assert(bdrv_get_aio_context(bs_b) == ctx);
    644 
    645     /* Switch the AioContext back */
    646     main_ctx = qemu_get_aio_context();
    647     aio_context_acquire(ctx);
    648     blk_set_aio_context(blk, main_ctx, &error_abort);
    649     aio_context_release(ctx);
    650     g_assert(blk_get_aio_context(blk) == main_ctx);
    651     g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
    652     g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
    653     g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
    654 
    655     bdrv_unref(bs_verify);
    656     bdrv_unref(bs_b);
    657     bdrv_unref(bs_a);
    658     blk_unref(blk);
    659 }
    660 
    661 /*
    662  * Test that diamonds in the graph don't lead to endless recursion:
    663  *
    664  *              blk
    665  *               |
    666  *      bs_verify [blkverify]
    667  *       /              \
    668  *      /                \
    669  *   bs_b [raw]         bs_c[raw]
    670  *      \                /
    671  *       \              /
    672  *       bs_a [bdrv_test]
    673  */
    674 static void test_propagate_diamond(void)
    675 {
    676     IOThread *iothread = iothread_new();
    677     AioContext *ctx = iothread_get_aio_context(iothread);
    678     AioContext *main_ctx;
    679     BlockBackend *blk;
    680     BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify;
    681     QDict *options;
    682 
    683     /* Create bs_a */
    684     bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
    685 
    686     /* Create bs_b and bc_c */
    687     options = qdict_new();
    688     qdict_put_str(options, "driver", "raw");
    689     qdict_put_str(options, "file", "bs_a");
    690     qdict_put_str(options, "node-name", "bs_b");
    691     bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
    692 
    693     options = qdict_new();
    694     qdict_put_str(options, "driver", "raw");
    695     qdict_put_str(options, "file", "bs_a");
    696     qdict_put_str(options, "node-name", "bs_c");
    697     bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
    698 
    699     /* Create blkverify filter that references both bs_b and bs_c */
    700     options = qdict_new();
    701     qdict_put_str(options, "driver", "blkverify");
    702     qdict_put_str(options, "test", "bs_b");
    703     qdict_put_str(options, "raw", "bs_c");
    704 
    705     bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
    706     /*
    707      * Do not take the RESIZE permission: This would require the same
    708      * from bs_c and thus from bs_a; however, blkverify will not share
    709      * it on bs_b, and thus it will not be available for bs_a.
    710      */
    711     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
    712                   BLK_PERM_ALL);
    713     blk_insert_bs(blk, bs_verify, &error_abort);
    714 
    715     /* Switch the AioContext */
    716     blk_set_aio_context(blk, ctx, &error_abort);
    717     g_assert(blk_get_aio_context(blk) == ctx);
    718     g_assert(bdrv_get_aio_context(bs_verify) == ctx);
    719     g_assert(bdrv_get_aio_context(bs_a) == ctx);
    720     g_assert(bdrv_get_aio_context(bs_b) == ctx);
    721     g_assert(bdrv_get_aio_context(bs_c) == ctx);
    722 
    723     /* Switch the AioContext back */
    724     main_ctx = qemu_get_aio_context();
    725     aio_context_acquire(ctx);
    726     blk_set_aio_context(blk, main_ctx, &error_abort);
    727     aio_context_release(ctx);
    728     g_assert(blk_get_aio_context(blk) == main_ctx);
    729     g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
    730     g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
    731     g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
    732     g_assert(bdrv_get_aio_context(bs_c) == main_ctx);
    733 
    734     blk_unref(blk);
    735     bdrv_unref(bs_verify);
    736     bdrv_unref(bs_c);
    737     bdrv_unref(bs_b);
    738     bdrv_unref(bs_a);
    739 }
    740 
    741 static void test_propagate_mirror(void)
    742 {
    743     IOThread *iothread = iothread_new();
    744     AioContext *ctx = iothread_get_aio_context(iothread);
    745     AioContext *main_ctx = qemu_get_aio_context();
    746     BlockDriverState *src, *target, *filter;
    747     BlockBackend *blk;
    748     Job *job;
    749     Error *local_err = NULL;
    750 
    751     /* Create src and target*/
    752     src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort);
    753     target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
    754                                   &error_abort);
    755 
    756     /* Start a mirror job */
    757     mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
    758                  MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
    759                  BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
    760                  false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
    761                  &error_abort);
    762     WITH_JOB_LOCK_GUARD() {
    763         job = job_get_locked("job0");
    764     }
    765     filter = bdrv_find_node("filter_node");
    766 
    767     /* Change the AioContext of src */
    768     bdrv_try_change_aio_context(src, ctx, NULL, &error_abort);
    769     g_assert(bdrv_get_aio_context(src) == ctx);
    770     g_assert(bdrv_get_aio_context(target) == ctx);
    771     g_assert(bdrv_get_aio_context(filter) == ctx);
    772     g_assert(job->aio_context == ctx);
    773 
    774     /* Change the AioContext of target */
    775     aio_context_acquire(ctx);
    776     bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
    777     aio_context_release(ctx);
    778     g_assert(bdrv_get_aio_context(src) == main_ctx);
    779     g_assert(bdrv_get_aio_context(target) == main_ctx);
    780     g_assert(bdrv_get_aio_context(filter) == main_ctx);
    781 
    782     /* With a BlockBackend on src, changing target must fail */
    783     blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
    784     blk_insert_bs(blk, src, &error_abort);
    785 
    786     bdrv_try_change_aio_context(target, ctx, NULL, &local_err);
    787     error_free_or_abort(&local_err);
    788 
    789     g_assert(blk_get_aio_context(blk) == main_ctx);
    790     g_assert(bdrv_get_aio_context(src) == main_ctx);
    791     g_assert(bdrv_get_aio_context(target) == main_ctx);
    792     g_assert(bdrv_get_aio_context(filter) == main_ctx);
    793 
    794     /* ...unless we explicitly allow it */
    795     aio_context_acquire(ctx);
    796     blk_set_allow_aio_context_change(blk, true);
    797     bdrv_try_change_aio_context(target, ctx, NULL, &error_abort);
    798     aio_context_release(ctx);
    799 
    800     g_assert(blk_get_aio_context(blk) == ctx);
    801     g_assert(bdrv_get_aio_context(src) == ctx);
    802     g_assert(bdrv_get_aio_context(target) == ctx);
    803     g_assert(bdrv_get_aio_context(filter) == ctx);
    804 
    805     job_cancel_sync_all();
    806 
    807     aio_context_acquire(ctx);
    808     blk_set_aio_context(blk, main_ctx, &error_abort);
    809     bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
    810     aio_context_release(ctx);
    811 
    812     blk_unref(blk);
    813     bdrv_unref(src);
    814     bdrv_unref(target);
    815 }
    816 
    817 static void test_attach_second_node(void)
    818 {
    819     IOThread *iothread = iothread_new();
    820     AioContext *ctx = iothread_get_aio_context(iothread);
    821     AioContext *main_ctx = qemu_get_aio_context();
    822     BlockBackend *blk;
    823     BlockDriverState *bs, *filter;
    824     QDict *options;
    825 
    826     blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
    827     bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
    828     blk_insert_bs(blk, bs, &error_abort);
    829 
    830     options = qdict_new();
    831     qdict_put_str(options, "driver", "raw");
    832     qdict_put_str(options, "file", "base");
    833 
    834     filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
    835     g_assert(blk_get_aio_context(blk) == ctx);
    836     g_assert(bdrv_get_aio_context(bs) == ctx);
    837     g_assert(bdrv_get_aio_context(filter) == ctx);
    838 
    839     aio_context_acquire(ctx);
    840     blk_set_aio_context(blk, main_ctx, &error_abort);
    841     aio_context_release(ctx);
    842     g_assert(blk_get_aio_context(blk) == main_ctx);
    843     g_assert(bdrv_get_aio_context(bs) == main_ctx);
    844     g_assert(bdrv_get_aio_context(filter) == main_ctx);
    845 
    846     bdrv_unref(filter);
    847     bdrv_unref(bs);
    848     blk_unref(blk);
    849 }
    850 
    851 static void test_attach_preserve_blk_ctx(void)
    852 {
    853     IOThread *iothread = iothread_new();
    854     AioContext *ctx = iothread_get_aio_context(iothread);
    855     BlockBackend *blk;
    856     BlockDriverState *bs;
    857 
    858     blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
    859     bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
    860     bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
    861 
    862     /* Add node to BlockBackend that has an iothread context assigned */
    863     blk_insert_bs(blk, bs, &error_abort);
    864     g_assert(blk_get_aio_context(blk) == ctx);
    865     g_assert(bdrv_get_aio_context(bs) == ctx);
    866 
    867     /* Remove the node again */
    868     aio_context_acquire(ctx);
    869     blk_remove_bs(blk);
    870     aio_context_release(ctx);
    871     g_assert(blk_get_aio_context(blk) == ctx);
    872     g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
    873 
    874     /* Re-attach the node */
    875     blk_insert_bs(blk, bs, &error_abort);
    876     g_assert(blk_get_aio_context(blk) == ctx);
    877     g_assert(bdrv_get_aio_context(bs) == ctx);
    878 
    879     aio_context_acquire(ctx);
    880     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
    881     aio_context_release(ctx);
    882     bdrv_unref(bs);
    883     blk_unref(blk);
    884 }
    885 
    886 int main(int argc, char **argv)
    887 {
    888     int i;
    889 
    890     bdrv_init();
    891     qemu_init_main_loop(&error_abort);
    892 
    893     g_test_init(&argc, &argv, NULL);
    894 
    895     for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) {
    896         const SyncOpTest *t = &sync_op_tests[i];
    897         g_test_add_data_func(t->name, t, test_sync_op);
    898     }
    899 
    900     g_test_add_func("/attach/blockjob", test_attach_blockjob);
    901     g_test_add_func("/attach/second_node", test_attach_second_node);
    902     g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx);
    903     g_test_add_func("/propagate/basic", test_propagate_basic);
    904     g_test_add_func("/propagate/diamond", test_propagate_diamond);
    905     g_test_add_func("/propagate/mirror", test_propagate_mirror);
    906 
    907     return g_test_run();
    908 }