qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

vhost-user-test.c (32278B)


      1 /*
      2  * QTest testcase for the vhost-user
      3  *
      4  * Copyright (c) 2014 Virtual Open Systems Sarl.
      5  *
      6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
      7  * See the COPYING file in the top-level directory.
      8  *
      9  */
     10 
     11 #include "qemu/osdep.h"
     12 
     13 #include "libqtest-single.h"
     14 #include "qapi/error.h"
     15 #include "qapi/qmp/qdict.h"
     16 #include "qemu/config-file.h"
     17 #include "qemu/option.h"
     18 #include "qemu/range.h"
     19 #include "qemu/sockets.h"
     20 #include "chardev/char-fe.h"
     21 #include "qemu/memfd.h"
     22 #include "qemu/module.h"
     23 #include "sysemu/sysemu.h"
     24 #include "libqos/libqos.h"
     25 #include "libqos/pci-pc.h"
     26 #include "libqos/virtio-pci.h"
     27 
     28 #include "libqos/malloc-pc.h"
     29 #include "libqos/qgraph_internal.h"
     30 #include "hw/virtio/virtio-net.h"
     31 
     32 #include "standard-headers/linux/vhost_types.h"
     33 #include "standard-headers/linux/virtio_ids.h"
     34 #include "standard-headers/linux/virtio_net.h"
     35 #include "standard-headers/linux/virtio_gpio.h"
     36 
     37 #ifdef CONFIG_LINUX
     38 #include <sys/vfs.h>
     39 #endif
     40 
     41 
     42 #define QEMU_CMD_MEM    " -m %d -object memory-backend-file,id=mem,size=%dM," \
     43                         "mem-path=%s,share=on -numa node,memdev=mem"
     44 #define QEMU_CMD_MEMFD  " -m %d -object memory-backend-memfd,id=mem,size=%dM," \
     45                         " -numa node,memdev=mem"
     46 #define QEMU_CMD_CHR    " -chardev socket,id=%s,path=%s%s"
     47 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce=on"
     48 
     49 #define HUGETLBFS_MAGIC       0x958458f6
     50 
     51 /*********** FROM hw/virtio/vhost-user.c *************************************/
     52 
     53 #define VHOST_MEMORY_MAX_NREGIONS    8
     54 #define VHOST_MAX_VIRTQUEUES    0x100
     55 
     56 #define VHOST_USER_F_PROTOCOL_FEATURES 30
     57 #define VIRTIO_F_VERSION_1 32
     58 
     59 #define VHOST_USER_PROTOCOL_F_MQ 0
     60 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
     61 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN   6
     62 #define VHOST_USER_PROTOCOL_F_CONFIG 9
     63 
     64 #define VHOST_LOG_PAGE 0x1000
     65 
     66 typedef enum VhostUserRequest {
     67     VHOST_USER_NONE = 0,
     68     VHOST_USER_GET_FEATURES = 1,
     69     VHOST_USER_SET_FEATURES = 2,
     70     VHOST_USER_SET_OWNER = 3,
     71     VHOST_USER_RESET_OWNER = 4,
     72     VHOST_USER_SET_MEM_TABLE = 5,
     73     VHOST_USER_SET_LOG_BASE = 6,
     74     VHOST_USER_SET_LOG_FD = 7,
     75     VHOST_USER_SET_VRING_NUM = 8,
     76     VHOST_USER_SET_VRING_ADDR = 9,
     77     VHOST_USER_SET_VRING_BASE = 10,
     78     VHOST_USER_GET_VRING_BASE = 11,
     79     VHOST_USER_SET_VRING_KICK = 12,
     80     VHOST_USER_SET_VRING_CALL = 13,
     81     VHOST_USER_SET_VRING_ERR = 14,
     82     VHOST_USER_GET_PROTOCOL_FEATURES = 15,
     83     VHOST_USER_SET_PROTOCOL_FEATURES = 16,
     84     VHOST_USER_GET_QUEUE_NUM = 17,
     85     VHOST_USER_SET_VRING_ENABLE = 18,
     86     VHOST_USER_GET_CONFIG = 24,
     87     VHOST_USER_SET_CONFIG = 25,
     88     VHOST_USER_MAX
     89 } VhostUserRequest;
     90 
     91 typedef struct VhostUserMemoryRegion {
     92     uint64_t guest_phys_addr;
     93     uint64_t memory_size;
     94     uint64_t userspace_addr;
     95     uint64_t mmap_offset;
     96 } VhostUserMemoryRegion;
     97 
     98 typedef struct VhostUserMemory {
     99     uint32_t nregions;
    100     uint32_t padding;
    101     VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
    102 } VhostUserMemory;
    103 
    104 typedef struct VhostUserLog {
    105     uint64_t mmap_size;
    106     uint64_t mmap_offset;
    107 } VhostUserLog;
    108 
    109 typedef struct VhostUserMsg {
    110     VhostUserRequest request;
    111 
    112 #define VHOST_USER_VERSION_MASK     (0x3)
    113 #define VHOST_USER_REPLY_MASK       (0x1<<2)
    114     uint32_t flags;
    115     uint32_t size; /* the following payload size */
    116     union {
    117 #define VHOST_USER_VRING_IDX_MASK   (0xff)
    118 #define VHOST_USER_VRING_NOFD_MASK  (0x1<<8)
    119         uint64_t u64;
    120         struct vhost_vring_state state;
    121         struct vhost_vring_addr addr;
    122         VhostUserMemory memory;
    123         VhostUserLog log;
    124     } payload;
    125 } QEMU_PACKED VhostUserMsg;
    126 
    127 static VhostUserMsg m __attribute__ ((unused));
    128 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
    129                             + sizeof(m.flags) \
    130                             + sizeof(m.size))
    131 
    132 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
    133 
    134 /* The version of the protocol we support */
    135 #define VHOST_USER_VERSION    (0x1)
    136 /*****************************************************************************/
    137 
    138 enum {
    139     TEST_FLAGS_OK,
    140     TEST_FLAGS_DISCONNECT,
    141     TEST_FLAGS_BAD,
    142     TEST_FLAGS_END,
    143 };
    144 
    145 enum {
    146     VHOST_USER_NET,
    147     VHOST_USER_GPIO,
    148 };
    149 
    150 typedef struct TestServer {
    151     gchar *socket_path;
    152     gchar *mig_path;
    153     gchar *chr_name;
    154     gchar *tmpfs;
    155     CharBackend chr;
    156     int fds_num;
    157     int fds[VHOST_MEMORY_MAX_NREGIONS];
    158     VhostUserMemory memory;
    159     GMainContext *context;
    160     GMainLoop *loop;
    161     GThread *thread;
    162     GMutex data_mutex;
    163     GCond data_cond;
    164     int log_fd;
    165     uint64_t rings;
    166     bool test_fail;
    167     int test_flags;
    168     int queues;
    169     struct vhost_user_ops *vu_ops;
    170 } TestServer;
    171 
    172 struct vhost_user_ops {
    173     /* Device types. */
    174     int type;
    175     void (*append_opts)(TestServer *s, GString *cmd_line,
    176             const char *chr_opts);
    177 
    178     /* VHOST-USER commands. */
    179     uint64_t (*get_features)(TestServer *s);
    180     void (*set_features)(TestServer *s, CharBackend *chr,
    181                          VhostUserMsg *msg);
    182     void (*get_protocol_features)(TestServer *s,
    183                                   CharBackend *chr, VhostUserMsg *msg);
    184 };
    185 
    186 static const char *init_hugepagefs(void);
    187 static TestServer *test_server_new(const gchar *name,
    188         struct vhost_user_ops *ops);
    189 static void test_server_free(TestServer *server);
    190 static void test_server_listen(TestServer *server);
    191 
    192 enum test_memfd {
    193     TEST_MEMFD_AUTO,
    194     TEST_MEMFD_YES,
    195     TEST_MEMFD_NO,
    196 };
    197 
    198 static void append_vhost_net_opts(TestServer *s, GString *cmd_line,
    199                              const char *chr_opts)
    200 {
    201     g_string_append_printf(cmd_line, QEMU_CMD_CHR QEMU_CMD_NETDEV,
    202                            s->chr_name, s->socket_path,
    203                            chr_opts, s->chr_name);
    204 }
    205 
    206 /*
    207  * For GPIO there are no other magic devices we need to add (like
    208  * block or netdev) so all we need to worry about is the vhost-user
    209  * chardev socket.
    210  */
    211 static void append_vhost_gpio_opts(TestServer *s, GString *cmd_line,
    212                              const char *chr_opts)
    213 {
    214     g_string_append_printf(cmd_line, QEMU_CMD_CHR,
    215                            s->chr_name, s->socket_path,
    216                            chr_opts);
    217 }
    218 
    219 static void append_mem_opts(TestServer *server, GString *cmd_line,
    220                             int size, enum test_memfd memfd)
    221 {
    222     if (memfd == TEST_MEMFD_AUTO) {
    223         memfd = qemu_memfd_check(MFD_ALLOW_SEALING) ? TEST_MEMFD_YES
    224                                                     : TEST_MEMFD_NO;
    225     }
    226 
    227     if (memfd == TEST_MEMFD_YES) {
    228         g_string_append_printf(cmd_line, QEMU_CMD_MEMFD, size, size);
    229     } else {
    230         const char *root = init_hugepagefs() ? : server->tmpfs;
    231 
    232         g_string_append_printf(cmd_line, QEMU_CMD_MEM, size, size, root);
    233     }
    234 }
    235 
    236 static bool wait_for_fds(TestServer *s)
    237 {
    238     gint64 end_time;
    239     bool got_region;
    240     int i;
    241 
    242     g_mutex_lock(&s->data_mutex);
    243 
    244     end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
    245     while (!s->fds_num) {
    246         if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
    247             /* timeout has passed */
    248             g_assert(s->fds_num);
    249             break;
    250         }
    251     }
    252 
    253     /* check for sanity */
    254     g_assert_cmpint(s->fds_num, >, 0);
    255     g_assert_cmpint(s->fds_num, ==, s->memory.nregions);
    256 
    257     g_mutex_unlock(&s->data_mutex);
    258 
    259     got_region = false;
    260     for (i = 0; i < s->memory.nregions; ++i) {
    261         VhostUserMemoryRegion *reg = &s->memory.regions[i];
    262         if (reg->guest_phys_addr == 0) {
    263             got_region = true;
    264             break;
    265         }
    266     }
    267     if (!got_region) {
    268         g_test_skip("No memory at address 0x0");
    269     }
    270     return got_region;
    271 }
    272 
    273 static void read_guest_mem_server(QTestState *qts, TestServer *s)
    274 {
    275     uint8_t *guest_mem;
    276     int i, j;
    277     size_t size;
    278 
    279     g_mutex_lock(&s->data_mutex);
    280 
    281     /* iterate all regions */
    282     for (i = 0; i < s->fds_num; i++) {
    283 
    284         /* We'll check only the region statring at 0x0*/
    285         if (s->memory.regions[i].guest_phys_addr != 0x0) {
    286             continue;
    287         }
    288 
    289         g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
    290 
    291         size = s->memory.regions[i].memory_size +
    292             s->memory.regions[i].mmap_offset;
    293 
    294         guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
    295                          MAP_SHARED, s->fds[i], 0);
    296 
    297         g_assert(guest_mem != MAP_FAILED);
    298         guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
    299 
    300         for (j = 0; j < 1024; j++) {
    301             uint32_t a = qtest_readb(qts, s->memory.regions[i].guest_phys_addr + j);
    302             uint32_t b = guest_mem[j];
    303 
    304             g_assert_cmpint(a, ==, b);
    305         }
    306 
    307         munmap(guest_mem, s->memory.regions[i].memory_size);
    308     }
    309 
    310     g_mutex_unlock(&s->data_mutex);
    311 }
    312 
    313 static void *thread_function(void *data)
    314 {
    315     GMainLoop *loop = data;
    316     g_main_loop_run(loop);
    317     return NULL;
    318 }
    319 
    320 static int chr_can_read(void *opaque)
    321 {
    322     return VHOST_USER_HDR_SIZE;
    323 }
    324 
    325 static void chr_read(void *opaque, const uint8_t *buf, int size)
    326 {
    327     g_autoptr(GError) err = NULL;
    328     TestServer *s = opaque;
    329     CharBackend *chr = &s->chr;
    330     VhostUserMsg msg;
    331     uint8_t *p = (uint8_t *) &msg;
    332     int fd = -1;
    333 
    334     if (s->test_fail) {
    335         qemu_chr_fe_disconnect(chr);
    336         /* now switch to non-failure */
    337         s->test_fail = false;
    338     }
    339 
    340     if (size != VHOST_USER_HDR_SIZE) {
    341         qos_printf("%s: Wrong message size received %d\n", __func__, size);
    342         return;
    343     }
    344 
    345     g_mutex_lock(&s->data_mutex);
    346     memcpy(p, buf, VHOST_USER_HDR_SIZE);
    347 
    348     if (msg.size) {
    349         p += VHOST_USER_HDR_SIZE;
    350         size = qemu_chr_fe_read_all(chr, p, msg.size);
    351         if (size != msg.size) {
    352             qos_printf("%s: Wrong message size received %d != %d\n",
    353                        __func__, size, msg.size);
    354             return;
    355         }
    356     }
    357 
    358     switch (msg.request) {
    359     case VHOST_USER_GET_FEATURES:
    360         /* Mandatory for tests to define get_features */
    361         g_assert(s->vu_ops->get_features);
    362 
    363         /* send back features to qemu */
    364         msg.flags |= VHOST_USER_REPLY_MASK;
    365         msg.size = sizeof(m.payload.u64);
    366 
    367         if (s->test_flags >= TEST_FLAGS_BAD) {
    368             msg.payload.u64 = 0;
    369             s->test_flags = TEST_FLAGS_END;
    370         } else {
    371             msg.payload.u64 = s->vu_ops->get_features(s);
    372         }
    373 
    374         qemu_chr_fe_write_all(chr, (uint8_t *) &msg,
    375                               VHOST_USER_HDR_SIZE + msg.size);
    376         break;
    377 
    378     case VHOST_USER_SET_FEATURES:
    379         if (s->vu_ops->set_features) {
    380             s->vu_ops->set_features(s, chr, &msg);
    381         }
    382         break;
    383 
    384     case VHOST_USER_SET_OWNER:
    385         /*
    386          * We don't need to do anything here, the remote is just
    387          * letting us know it is in charge. Just log it.
    388          */
    389         qos_printf("set_owner: start of session\n");
    390         break;
    391 
    392     case VHOST_USER_GET_PROTOCOL_FEATURES:
    393         if (s->vu_ops->get_protocol_features) {
    394             s->vu_ops->get_protocol_features(s, chr, &msg);
    395         }
    396         break;
    397 
    398     case VHOST_USER_GET_CONFIG:
    399         /*
    400          * Treat GET_CONFIG as a NOP and just reply and let the guest
    401          * consider we have updated its memory. Tests currently don't
    402          * require working configs.
    403          */
    404         msg.flags |= VHOST_USER_REPLY_MASK;
    405         p = (uint8_t *) &msg;
    406         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
    407         break;
    408 
    409     case VHOST_USER_SET_PROTOCOL_FEATURES:
    410         /*
    411          * We did set VHOST_USER_F_PROTOCOL_FEATURES so its valid for
    412          * the remote end to send this. There is no handshake reply so
    413          * just log the details for debugging.
    414          */
    415         qos_printf("set_protocol_features: 0x%"PRIx64 "\n", msg.payload.u64);
    416         break;
    417 
    418         /*
    419          * A real vhost-user backend would actually set the size and
    420          * address of the vrings but we can simply report them.
    421          */
    422     case VHOST_USER_SET_VRING_NUM:
    423         qos_printf("set_vring_num: %d/%d\n",
    424                    msg.payload.state.index, msg.payload.state.num);
    425         break;
    426     case VHOST_USER_SET_VRING_ADDR:
    427         qos_printf("set_vring_addr: 0x%"PRIx64"/0x%"PRIx64"/0x%"PRIx64"\n",
    428                    msg.payload.addr.avail_user_addr,
    429                    msg.payload.addr.desc_user_addr,
    430                    msg.payload.addr.used_user_addr);
    431         break;
    432 
    433     case VHOST_USER_GET_VRING_BASE:
    434         /* send back vring base to qemu */
    435         msg.flags |= VHOST_USER_REPLY_MASK;
    436         msg.size = sizeof(m.payload.state);
    437         msg.payload.state.num = 0;
    438         p = (uint8_t *) &msg;
    439         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
    440 
    441         assert(msg.payload.state.index < s->queues * 2);
    442         s->rings &= ~(0x1ULL << msg.payload.state.index);
    443         g_cond_broadcast(&s->data_cond);
    444         break;
    445 
    446     case VHOST_USER_SET_MEM_TABLE:
    447         /* received the mem table */
    448         memcpy(&s->memory, &msg.payload.memory, sizeof(msg.payload.memory));
    449         s->fds_num = qemu_chr_fe_get_msgfds(chr, s->fds,
    450                                             G_N_ELEMENTS(s->fds));
    451 
    452         /* signal the test that it can continue */
    453         g_cond_broadcast(&s->data_cond);
    454         break;
    455 
    456     case VHOST_USER_SET_VRING_KICK:
    457     case VHOST_USER_SET_VRING_CALL:
    458         /* consume the fd */
    459         qemu_chr_fe_get_msgfds(chr, &fd, 1);
    460         /*
    461          * This is a non-blocking eventfd.
    462          * The receive function forces it to be blocking,
    463          * so revert it back to non-blocking.
    464          */
    465         g_unix_set_fd_nonblocking(fd, true, &err);
    466         g_assert_no_error(err);
    467         break;
    468 
    469     case VHOST_USER_SET_LOG_BASE:
    470         if (s->log_fd != -1) {
    471             close(s->log_fd);
    472             s->log_fd = -1;
    473         }
    474         qemu_chr_fe_get_msgfds(chr, &s->log_fd, 1);
    475         msg.flags |= VHOST_USER_REPLY_MASK;
    476         msg.size = 0;
    477         p = (uint8_t *) &msg;
    478         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE);
    479 
    480         g_cond_broadcast(&s->data_cond);
    481         break;
    482 
    483     case VHOST_USER_SET_VRING_BASE:
    484         assert(msg.payload.state.index < s->queues * 2);
    485         s->rings |= 0x1ULL << msg.payload.state.index;
    486         g_cond_broadcast(&s->data_cond);
    487         break;
    488 
    489     case VHOST_USER_GET_QUEUE_NUM:
    490         msg.flags |= VHOST_USER_REPLY_MASK;
    491         msg.size = sizeof(m.payload.u64);
    492         msg.payload.u64 = s->queues;
    493         p = (uint8_t *) &msg;
    494         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
    495         break;
    496 
    497     case VHOST_USER_SET_VRING_ENABLE:
    498         /*
    499          * Another case we ignore as we don't need to respond. With a
    500          * fully functioning vhost-user we would enable/disable the
    501          * vring monitoring.
    502          */
    503         qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
    504                    msg.payload.state.num ? "enabled" : "disabled");
    505         break;
    506 
    507     default:
    508         qos_printf("vhost-user: un-handled message: %d\n", msg.request);
    509         break;
    510     }
    511 
    512     g_mutex_unlock(&s->data_mutex);
    513 }
    514 
    515 static const char *init_hugepagefs(void)
    516 {
    517 #ifdef CONFIG_LINUX
    518     static const char *hugepagefs;
    519     const char *path = getenv("QTEST_HUGETLBFS_PATH");
    520     struct statfs fs;
    521     int ret;
    522 
    523     if (hugepagefs) {
    524         return hugepagefs;
    525     }
    526     if (!path) {
    527         return NULL;
    528     }
    529 
    530     if (access(path, R_OK | W_OK | X_OK)) {
    531         qos_printf("access on path (%s): %s", path, strerror(errno));
    532         g_test_fail();
    533         return NULL;
    534     }
    535 
    536     do {
    537         ret = statfs(path, &fs);
    538     } while (ret != 0 && errno == EINTR);
    539 
    540     if (ret != 0) {
    541         qos_printf("statfs on path (%s): %s", path, strerror(errno));
    542         g_test_fail();
    543         return NULL;
    544     }
    545 
    546     if (fs.f_type != HUGETLBFS_MAGIC) {
    547         qos_printf("Warning: path not on HugeTLBFS: %s", path);
    548         g_test_fail();
    549         return NULL;
    550     }
    551 
    552     hugepagefs = path;
    553     return hugepagefs;
    554 #else
    555     return NULL;
    556 #endif
    557 }
    558 
    559 static TestServer *test_server_new(const gchar *name,
    560         struct vhost_user_ops *ops)
    561 {
    562     TestServer *server = g_new0(TestServer, 1);
    563     g_autofree const char *tmpfs = NULL;
    564     GError *err = NULL;
    565 
    566     server->context = g_main_context_new();
    567     server->loop = g_main_loop_new(server->context, FALSE);
    568 
    569     /* run the main loop thread so the chardev may operate */
    570     server->thread = g_thread_new(NULL, thread_function, server->loop);
    571 
    572     tmpfs = g_dir_make_tmp("vhost-test-XXXXXX", &err);
    573     if (!tmpfs) {
    574         g_test_message("Can't create temporary directory in %s: %s",
    575                        g_get_tmp_dir(), err->message);
    576         g_error_free(err);
    577     }
    578     g_assert(tmpfs);
    579 
    580     server->tmpfs = g_strdup(tmpfs);
    581     server->socket_path = g_strdup_printf("%s/%s.sock", tmpfs, name);
    582     server->mig_path = g_strdup_printf("%s/%s.mig", tmpfs, name);
    583     server->chr_name = g_strdup_printf("chr-%s", name);
    584 
    585     g_mutex_init(&server->data_mutex);
    586     g_cond_init(&server->data_cond);
    587 
    588     server->log_fd = -1;
    589     server->queues = 1;
    590     server->vu_ops = ops;
    591 
    592     return server;
    593 }
    594 
    595 static void chr_event(void *opaque, QEMUChrEvent event)
    596 {
    597     TestServer *s = opaque;
    598 
    599     if (s->test_flags == TEST_FLAGS_END &&
    600         event == CHR_EVENT_CLOSED) {
    601         s->test_flags = TEST_FLAGS_OK;
    602     }
    603 }
    604 
    605 static void test_server_create_chr(TestServer *server, const gchar *opt)
    606 {
    607     g_autofree gchar *chr_path = g_strdup_printf("unix:%s%s",
    608                                                  server->socket_path, opt);
    609     Chardev *chr;
    610 
    611     chr = qemu_chr_new(server->chr_name, chr_path, server->context);
    612     g_assert(chr);
    613 
    614     qemu_chr_fe_init(&server->chr, chr, &error_abort);
    615     qemu_chr_fe_set_handlers(&server->chr, chr_can_read, chr_read,
    616                              chr_event, NULL, server, server->context, true);
    617 }
    618 
    619 static void test_server_listen(TestServer *server)
    620 {
    621     test_server_create_chr(server, ",server=on,wait=off");
    622 }
    623 
    624 static void test_server_free(TestServer *server)
    625 {
    626     int i, ret;
    627 
    628     /* finish the helper thread and dispatch pending sources */
    629     g_main_loop_quit(server->loop);
    630     g_thread_join(server->thread);
    631     while (g_main_context_pending(NULL)) {
    632         g_main_context_iteration(NULL, TRUE);
    633     }
    634 
    635     unlink(server->socket_path);
    636     g_free(server->socket_path);
    637 
    638     unlink(server->mig_path);
    639     g_free(server->mig_path);
    640 
    641     ret = rmdir(server->tmpfs);
    642     if (ret != 0) {
    643         g_test_message("unable to rmdir: path (%s): %s",
    644                        server->tmpfs, strerror(errno));
    645     }
    646     g_free(server->tmpfs);
    647 
    648     qemu_chr_fe_deinit(&server->chr, true);
    649 
    650     for (i = 0; i < server->fds_num; i++) {
    651         close(server->fds[i]);
    652     }
    653 
    654     if (server->log_fd != -1) {
    655         close(server->log_fd);
    656     }
    657 
    658     g_free(server->chr_name);
    659 
    660     g_main_loop_unref(server->loop);
    661     g_main_context_unref(server->context);
    662     g_cond_clear(&server->data_cond);
    663     g_mutex_clear(&server->data_mutex);
    664     g_free(server);
    665 }
    666 
    667 static void wait_for_log_fd(TestServer *s)
    668 {
    669     gint64 end_time;
    670 
    671     g_mutex_lock(&s->data_mutex);
    672     end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
    673     while (s->log_fd == -1) {
    674         if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
    675             /* timeout has passed */
    676             g_assert(s->log_fd != -1);
    677             break;
    678         }
    679     }
    680 
    681     g_mutex_unlock(&s->data_mutex);
    682 }
    683 
    684 static void write_guest_mem(TestServer *s, uint32_t seed)
    685 {
    686     uint32_t *guest_mem;
    687     int i, j;
    688     size_t size;
    689 
    690     /* iterate all regions */
    691     for (i = 0; i < s->fds_num; i++) {
    692 
    693         /* We'll write only the region statring at 0x0 */
    694         if (s->memory.regions[i].guest_phys_addr != 0x0) {
    695             continue;
    696         }
    697 
    698         g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
    699 
    700         size = s->memory.regions[i].memory_size +
    701             s->memory.regions[i].mmap_offset;
    702 
    703         guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
    704                          MAP_SHARED, s->fds[i], 0);
    705 
    706         g_assert(guest_mem != MAP_FAILED);
    707         guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
    708 
    709         for (j = 0; j < 256; j++) {
    710             guest_mem[j] = seed + j;
    711         }
    712 
    713         munmap(guest_mem, s->memory.regions[i].memory_size);
    714         break;
    715     }
    716 }
    717 
    718 static guint64 get_log_size(TestServer *s)
    719 {
    720     guint64 log_size = 0;
    721     int i;
    722 
    723     for (i = 0; i < s->memory.nregions; ++i) {
    724         VhostUserMemoryRegion *reg = &s->memory.regions[i];
    725         guint64 last = range_get_last(reg->guest_phys_addr,
    726                                        reg->memory_size);
    727         log_size = MAX(log_size, last / (8 * VHOST_LOG_PAGE) + 1);
    728     }
    729 
    730     return log_size;
    731 }
    732 
    733 typedef struct TestMigrateSource {
    734     GSource source;
    735     TestServer *src;
    736     TestServer *dest;
    737 } TestMigrateSource;
    738 
    739 static gboolean
    740 test_migrate_source_check(GSource *source)
    741 {
    742     TestMigrateSource *t = (TestMigrateSource *)source;
    743     gboolean overlap = t->src->rings && t->dest->rings;
    744 
    745     g_assert(!overlap);
    746 
    747     return FALSE;
    748 }
    749 
    750 GSourceFuncs test_migrate_source_funcs = {
    751     .check = test_migrate_source_check,
    752 };
    753 
    754 static void vhost_user_test_cleanup(void *s)
    755 {
    756     TestServer *server = s;
    757 
    758     qos_invalidate_command_line();
    759     test_server_free(server);
    760 }
    761 
    762 static void *vhost_user_test_setup(GString *cmd_line, void *arg)
    763 {
    764     TestServer *server = test_server_new("vhost-user-test", arg);
    765     test_server_listen(server);
    766 
    767     append_mem_opts(server, cmd_line, 256, TEST_MEMFD_AUTO);
    768     server->vu_ops->append_opts(server, cmd_line, "");
    769 
    770     g_test_queue_destroy(vhost_user_test_cleanup, server);
    771 
    772     return server;
    773 }
    774 
    775 static void *vhost_user_test_setup_memfd(GString *cmd_line, void *arg)
    776 {
    777     TestServer *server = test_server_new("vhost-user-test", arg);
    778     test_server_listen(server);
    779 
    780     append_mem_opts(server, cmd_line, 256, TEST_MEMFD_YES);
    781     server->vu_ops->append_opts(server, cmd_line, "");
    782 
    783     g_test_queue_destroy(vhost_user_test_cleanup, server);
    784 
    785     return server;
    786 }
    787 
    788 static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
    789 {
    790     TestServer *server = arg;
    791 
    792     if (!wait_for_fds(server)) {
    793         return;
    794     }
    795 
    796     read_guest_mem_server(global_qtest, server);
    797 }
    798 
    799 static void test_migrate(void *obj, void *arg, QGuestAllocator *alloc)
    800 {
    801     TestServer *s = arg;
    802     TestServer *dest;
    803     GString *dest_cmdline;
    804     char *uri;
    805     QTestState *to;
    806     GSource *source;
    807     QDict *rsp;
    808     guint8 *log;
    809     guint64 size;
    810 
    811     if (!wait_for_fds(s)) {
    812         return;
    813     }
    814 
    815     dest = test_server_new("dest", s->vu_ops);
    816     dest_cmdline = g_string_new(qos_get_current_command_line());
    817     uri = g_strdup_printf("%s%s", "unix:", dest->mig_path);
    818 
    819     size = get_log_size(s);
    820     g_assert_cmpint(size, ==, (256 * 1024 * 1024) / (VHOST_LOG_PAGE * 8));
    821 
    822     test_server_listen(dest);
    823     g_string_append_printf(dest_cmdline, " -incoming %s", uri);
    824     append_mem_opts(dest, dest_cmdline, 256, TEST_MEMFD_AUTO);
    825     dest->vu_ops->append_opts(dest, dest_cmdline, "");
    826     to = qtest_init(dest_cmdline->str);
    827 
    828     /* This would be where you call qos_allocate_objects(to, NULL), if you want
    829      * to talk to the QVirtioNet object on the destination.
    830      */
    831 
    832     source = g_source_new(&test_migrate_source_funcs,
    833                           sizeof(TestMigrateSource));
    834     ((TestMigrateSource *)source)->src = s;
    835     ((TestMigrateSource *)source)->dest = dest;
    836     g_source_attach(source, s->context);
    837 
    838     /* slow down migration to have time to fiddle with log */
    839     /* TODO: qtest could learn to break on some places */
    840     rsp = qmp("{ 'execute': 'migrate-set-parameters',"
    841               "'arguments': { 'max-bandwidth': 10 } }");
    842     g_assert(qdict_haskey(rsp, "return"));
    843     qobject_unref(rsp);
    844 
    845     rsp = qmp("{ 'execute': 'migrate', 'arguments': { 'uri': %s } }", uri);
    846     g_assert(qdict_haskey(rsp, "return"));
    847     qobject_unref(rsp);
    848 
    849     wait_for_log_fd(s);
    850 
    851     log = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, s->log_fd, 0);
    852     g_assert(log != MAP_FAILED);
    853 
    854     /* modify first page */
    855     write_guest_mem(s, 0x42);
    856     log[0] = 1;
    857     munmap(log, size);
    858 
    859     /* speed things up */
    860     rsp = qmp("{ 'execute': 'migrate-set-parameters',"
    861               "'arguments': { 'max-bandwidth': 0 } }");
    862     g_assert(qdict_haskey(rsp, "return"));
    863     qobject_unref(rsp);
    864 
    865     qmp_eventwait("STOP");
    866     qtest_qmp_eventwait(to, "RESUME");
    867 
    868     g_assert(wait_for_fds(dest));
    869     read_guest_mem_server(to, dest);
    870 
    871     g_source_destroy(source);
    872     g_source_unref(source);
    873 
    874     qtest_quit(to);
    875     test_server_free(dest);
    876     g_free(uri);
    877     g_string_free(dest_cmdline, true);
    878 }
    879 
    880 static void wait_for_rings_started(TestServer *s, size_t count)
    881 {
    882     gint64 end_time;
    883 
    884     g_mutex_lock(&s->data_mutex);
    885     end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
    886     while (ctpop64(s->rings) != count) {
    887         if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
    888             /* timeout has passed */
    889             g_assert_cmpint(ctpop64(s->rings), ==, count);
    890             break;
    891         }
    892     }
    893 
    894     g_mutex_unlock(&s->data_mutex);
    895 }
    896 
    897 static inline void test_server_connect(TestServer *server)
    898 {
    899     test_server_create_chr(server, ",reconnect=1");
    900 }
    901 
    902 static gboolean
    903 reconnect_cb(gpointer user_data)
    904 {
    905     TestServer *s = user_data;
    906 
    907     qemu_chr_fe_disconnect(&s->chr);
    908 
    909     return FALSE;
    910 }
    911 
    912 static gpointer
    913 connect_thread(gpointer data)
    914 {
    915     TestServer *s = data;
    916 
    917     /* wait for qemu to start before first try, to avoid extra warnings */
    918     g_usleep(G_USEC_PER_SEC);
    919     test_server_connect(s);
    920 
    921     return NULL;
    922 }
    923 
    924 static void *vhost_user_test_setup_reconnect(GString *cmd_line, void *arg)
    925 {
    926     TestServer *s = test_server_new("reconnect", arg);
    927 
    928     g_thread_new("connect", connect_thread, s);
    929     append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
    930     s->vu_ops->append_opts(s, cmd_line, ",server=on");
    931 
    932     g_test_queue_destroy(vhost_user_test_cleanup, s);
    933 
    934     return s;
    935 }
    936 
    937 static void test_reconnect(void *obj, void *arg, QGuestAllocator *alloc)
    938 {
    939     TestServer *s = arg;
    940     GSource *src;
    941 
    942     if (!wait_for_fds(s)) {
    943         return;
    944     }
    945 
    946     wait_for_rings_started(s, 2);
    947 
    948     /* reconnect */
    949     s->fds_num = 0;
    950     s->rings = 0;
    951     src = g_idle_source_new();
    952     g_source_set_callback(src, reconnect_cb, s, NULL);
    953     g_source_attach(src, s->context);
    954     g_source_unref(src);
    955     g_assert(wait_for_fds(s));
    956     wait_for_rings_started(s, 2);
    957 }
    958 
    959 static void *vhost_user_test_setup_connect_fail(GString *cmd_line, void *arg)
    960 {
    961     TestServer *s = test_server_new("connect-fail", arg);
    962 
    963     s->test_fail = true;
    964 
    965     g_thread_new("connect", connect_thread, s);
    966     append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
    967     s->vu_ops->append_opts(s, cmd_line, ",server=on");
    968 
    969     g_test_queue_destroy(vhost_user_test_cleanup, s);
    970 
    971     return s;
    972 }
    973 
    974 static void *vhost_user_test_setup_flags_mismatch(GString *cmd_line, void *arg)
    975 {
    976     TestServer *s = test_server_new("flags-mismatch", arg);
    977 
    978     s->test_flags = TEST_FLAGS_DISCONNECT;
    979 
    980     g_thread_new("connect", connect_thread, s);
    981     append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
    982     s->vu_ops->append_opts(s, cmd_line, ",server=on");
    983 
    984     g_test_queue_destroy(vhost_user_test_cleanup, s);
    985 
    986     return s;
    987 }
    988 
    989 static void test_vhost_user_started(void *obj, void *arg, QGuestAllocator *alloc)
    990 {
    991     TestServer *s = arg;
    992 
    993     if (!wait_for_fds(s)) {
    994         return;
    995     }
    996     wait_for_rings_started(s, 2);
    997 }
    998 
    999 static void *vhost_user_test_setup_multiqueue(GString *cmd_line, void *arg)
   1000 {
   1001     TestServer *s = vhost_user_test_setup(cmd_line, arg);
   1002 
   1003     s->queues = 2;
   1004     g_string_append_printf(cmd_line,
   1005                            " -set netdev.hs0.queues=%d"
   1006                            " -global virtio-net-pci.vectors=%d",
   1007                            s->queues, s->queues * 2 + 2);
   1008 
   1009     return s;
   1010 }
   1011 
   1012 static void test_multiqueue(void *obj, void *arg, QGuestAllocator *alloc)
   1013 {
   1014     TestServer *s = arg;
   1015 
   1016     wait_for_rings_started(s, s->queues * 2);
   1017 }
   1018 
   1019 
   1020 static uint64_t vu_net_get_features(TestServer *s)
   1021 {
   1022     uint64_t features = 0x1ULL << VHOST_F_LOG_ALL |
   1023         0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
   1024 
   1025     if (s->queues > 1) {
   1026         features |= 0x1ULL << VIRTIO_NET_F_MQ;
   1027     }
   1028 
   1029     return features;
   1030 }
   1031 
   1032 static void vu_net_set_features(TestServer *s, CharBackend *chr,
   1033                                 VhostUserMsg *msg)
   1034 {
   1035     g_assert(msg->payload.u64 & (0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES));
   1036     if (s->test_flags == TEST_FLAGS_DISCONNECT) {
   1037         qemu_chr_fe_disconnect(chr);
   1038         s->test_flags = TEST_FLAGS_BAD;
   1039     }
   1040 }
   1041 
   1042 static void vu_net_get_protocol_features(TestServer *s, CharBackend *chr,
   1043         VhostUserMsg *msg)
   1044 {
   1045     /* send back features to qemu */
   1046     msg->flags |= VHOST_USER_REPLY_MASK;
   1047     msg->size = sizeof(m.payload.u64);
   1048     msg->payload.u64 = 1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD;
   1049     msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_CROSS_ENDIAN;
   1050     if (s->queues > 1) {
   1051         msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_MQ;
   1052     }
   1053     qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
   1054 }
   1055 
   1056 /* Each VHOST-USER device should have its ops structure defined. */
   1057 static struct vhost_user_ops g_vu_net_ops = {
   1058     .type = VHOST_USER_NET,
   1059 
   1060     .append_opts = append_vhost_net_opts,
   1061 
   1062     .get_features = vu_net_get_features,
   1063     .set_features = vu_net_set_features,
   1064     .get_protocol_features = vu_net_get_protocol_features,
   1065 };
   1066 
   1067 static void register_vhost_user_test(void)
   1068 {
   1069     QOSGraphTestOptions opts = {
   1070         .before = vhost_user_test_setup,
   1071         .subprocess = true,
   1072         .arg = &g_vu_net_ops,
   1073     };
   1074 
   1075     qemu_add_opts(&qemu_chardev_opts);
   1076 
   1077     qos_add_test("vhost-user/read-guest-mem/memfile",
   1078                  "virtio-net",
   1079                  test_read_guest_mem, &opts);
   1080 
   1081     if (qemu_memfd_check(MFD_ALLOW_SEALING)) {
   1082         opts.before = vhost_user_test_setup_memfd;
   1083         qos_add_test("vhost-user/read-guest-mem/memfd",
   1084                      "virtio-net",
   1085                      test_read_guest_mem, &opts);
   1086     }
   1087 
   1088     qos_add_test("vhost-user/migrate",
   1089                  "virtio-net",
   1090                  test_migrate, &opts);
   1091 
   1092     opts.before = vhost_user_test_setup_reconnect;
   1093     qos_add_test("vhost-user/reconnect", "virtio-net",
   1094                  test_reconnect, &opts);
   1095 
   1096     opts.before = vhost_user_test_setup_connect_fail;
   1097     qos_add_test("vhost-user/connect-fail", "virtio-net",
   1098                  test_vhost_user_started, &opts);
   1099 
   1100     opts.before = vhost_user_test_setup_flags_mismatch;
   1101     qos_add_test("vhost-user/flags-mismatch", "virtio-net",
   1102                  test_vhost_user_started, &opts);
   1103 
   1104     opts.before = vhost_user_test_setup_multiqueue;
   1105     opts.edge.extra_device_opts = "mq=on";
   1106     qos_add_test("vhost-user/multiqueue",
   1107                  "virtio-net",
   1108                  test_multiqueue, &opts);
   1109 }
   1110 libqos_init(register_vhost_user_test);
   1111 
   1112 static uint64_t vu_gpio_get_features(TestServer *s)
   1113 {
   1114     return 0x1ULL << VIRTIO_F_VERSION_1 |
   1115         0x1ULL << VIRTIO_GPIO_F_IRQ |
   1116         0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
   1117 }
   1118 
   1119 /*
   1120  * This stub can't handle all the message types but we should reply
   1121  * that we support VHOST_USER_PROTOCOL_F_CONFIG as gpio would use it
   1122  * talking to a read vhost-user daemon.
   1123  */
   1124 static void vu_gpio_get_protocol_features(TestServer *s, CharBackend *chr,
   1125                                           VhostUserMsg *msg)
   1126 {
   1127     /* send back features to qemu */
   1128     msg->flags |= VHOST_USER_REPLY_MASK;
   1129     msg->size = sizeof(m.payload.u64);
   1130     msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
   1131 
   1132     qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
   1133 }
   1134 
   1135 static struct vhost_user_ops g_vu_gpio_ops = {
   1136     .type = VHOST_USER_GPIO,
   1137 
   1138     .append_opts = append_vhost_gpio_opts,
   1139 
   1140     .get_features = vu_gpio_get_features,
   1141     .set_features = vu_net_set_features,
   1142     .get_protocol_features = vu_gpio_get_protocol_features,
   1143 };
   1144 
   1145 static void register_vhost_gpio_test(void)
   1146 {
   1147     QOSGraphTestOptions opts = {
   1148         .before = vhost_user_test_setup,
   1149         .subprocess = true,
   1150         .arg = &g_vu_gpio_ops,
   1151     };
   1152 
   1153     qemu_add_opts(&qemu_chardev_opts);
   1154 
   1155     qos_add_test("read-guest-mem/memfile",
   1156                  "vhost-user-gpio", test_read_guest_mem, &opts);
   1157 }
   1158 libqos_init(register_vhost_gpio_test);