qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

aio.h (25931B)


      1 /*
      2  * QEMU aio implementation
      3  *
      4  * Copyright IBM, Corp. 2008
      5  *
      6  * Authors:
      7  *  Anthony Liguori   <aliguori@us.ibm.com>
      8  *
      9  * This work is licensed under the terms of the GNU GPL, version 2.  See
     10  * the COPYING file in the top-level directory.
     11  *
     12  */
     13 
     14 #ifndef QEMU_AIO_H
     15 #define QEMU_AIO_H
     16 
     17 #ifdef CONFIG_LINUX_IO_URING
     18 #include <liburing.h>
     19 #endif
     20 #include "qemu/coroutine.h"
     21 #include "qemu/queue.h"
     22 #include "qemu/event_notifier.h"
     23 #include "qemu/thread.h"
     24 #include "qemu/timer.h"
     25 
     26 typedef struct BlockAIOCB BlockAIOCB;
     27 typedef void BlockCompletionFunc(void *opaque, int ret);
     28 
     29 typedef struct AIOCBInfo {
     30     void (*cancel_async)(BlockAIOCB *acb);
     31     AioContext *(*get_aio_context)(BlockAIOCB *acb);
     32     size_t aiocb_size;
     33 } AIOCBInfo;
     34 
     35 struct BlockAIOCB {
     36     const AIOCBInfo *aiocb_info;
     37     BlockDriverState *bs;
     38     BlockCompletionFunc *cb;
     39     void *opaque;
     40     int refcnt;
     41 };
     42 
     43 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
     44                    BlockCompletionFunc *cb, void *opaque);
     45 void qemu_aio_unref(void *p);
     46 void qemu_aio_ref(void *p);
     47 
     48 typedef struct AioHandler AioHandler;
     49 typedef QLIST_HEAD(, AioHandler) AioHandlerList;
     50 typedef void QEMUBHFunc(void *opaque);
     51 typedef bool AioPollFn(void *opaque);
     52 typedef void IOHandler(void *opaque);
     53 
     54 struct Coroutine;
     55 struct ThreadPool;
     56 struct LinuxAioState;
     57 struct LuringState;
     58 
     59 /* Is polling disabled? */
     60 bool aio_poll_disabled(AioContext *ctx);
     61 
     62 /* Callbacks for file descriptor monitoring implementations */
     63 typedef struct {
     64     /*
     65      * update:
     66      * @ctx: the AioContext
     67      * @old_node: the existing handler or NULL if this file descriptor is being
     68      *            monitored for the first time
     69      * @new_node: the new handler or NULL if this file descriptor is being
     70      *            removed
     71      *
     72      * Add/remove/modify a monitored file descriptor.
     73      *
     74      * Called with ctx->list_lock acquired.
     75      */
     76     void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node);
     77 
     78     /*
     79      * wait:
     80      * @ctx: the AioContext
     81      * @ready_list: list for handlers that become ready
     82      * @timeout: maximum duration to wait, in nanoseconds
     83      *
     84      * Wait for file descriptors to become ready and place them on ready_list.
     85      *
     86      * Called with ctx->list_lock incremented but not locked.
     87      *
     88      * Returns: number of ready file descriptors.
     89      */
     90     int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout);
     91 
     92     /*
     93      * need_wait:
     94      * @ctx: the AioContext
     95      *
     96      * Tell aio_poll() when to stop userspace polling early because ->wait()
     97      * has fds ready.
     98      *
     99      * File descriptor monitoring implementations that cannot poll fd readiness
    100      * from userspace should use aio_poll_disabled() here.  This ensures that
    101      * file descriptors are not starved by handlers that frequently make
    102      * progress via userspace polling.
    103      *
    104      * Returns: true if ->wait() should be called, false otherwise.
    105      */
    106     bool (*need_wait)(AioContext *ctx);
    107 } FDMonOps;
    108 
    109 /*
    110  * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
    111  * scheduled BHs are not processed until the next aio_bh_poll() call.  All
    112  * active aio_bh_poll() calls chain their slices together in a list, so that
    113  * nested aio_bh_poll() calls process all scheduled bottom halves.
    114  */
    115 typedef QSLIST_HEAD(, QEMUBH) BHList;
    116 typedef struct BHListSlice BHListSlice;
    117 struct BHListSlice {
    118     BHList bh_list;
    119     QSIMPLEQ_ENTRY(BHListSlice) next;
    120 };
    121 
    122 typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
    123 
    124 struct AioContext {
    125     GSource source;
    126 
    127     /* Used by AioContext users to protect from multi-threaded access.  */
    128     QemuRecMutex lock;
    129 
    130     /* The list of registered AIO handlers.  Protected by ctx->list_lock. */
    131     AioHandlerList aio_handlers;
    132 
    133     /* The list of AIO handlers to be deleted.  Protected by ctx->list_lock. */
    134     AioHandlerList deleted_aio_handlers;
    135 
    136     /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
    137      * only written from the AioContext home thread, or under the BQL in
    138      * the case of the main AioContext.  However, it is read from any
    139      * thread so it is still accessed with atomic primitives.
    140      *
    141      * If this field is 0, everything (file descriptors, bottom halves,
    142      * timers) will be re-evaluated before the next blocking poll() or
    143      * io_uring wait; therefore, the event_notifier_set call can be
    144      * skipped.  If it is non-zero, you may need to wake up a concurrent
    145      * aio_poll or the glib main event loop, making event_notifier_set
    146      * necessary.
    147      *
    148      * Bit 0 is reserved for GSource usage of the AioContext, and is 1
    149      * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
    150      * Bits 1-31 simply count the number of active calls to aio_poll
    151      * that are in the prepare or poll phase.
    152      *
    153      * The GSource and aio_poll must use a different mechanism because
    154      * there is no certainty that a call to GSource's prepare callback
    155      * (via g_main_context_prepare) is indeed followed by check and
    156      * dispatch.  It's not clear whether this would be a bug, but let's
    157      * play safe and allow it---it will just cause extra calls to
    158      * event_notifier_set until the next call to dispatch.
    159      *
    160      * Instead, the aio_poll calls include both the prepare and the
    161      * dispatch phase, hence a simple counter is enough for them.
    162      */
    163     uint32_t notify_me;
    164 
    165     /* A lock to protect between QEMUBH and AioHandler adders and deleter,
    166      * and to ensure that no callbacks are removed while we're walking and
    167      * dispatching them.
    168      */
    169     QemuLockCnt list_lock;
    170 
    171     /* Bottom Halves pending aio_bh_poll() processing */
    172     BHList bh_list;
    173 
    174     /* Chained BH list slices for each nested aio_bh_poll() call */
    175     QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
    176 
    177     /* Used by aio_notify.
    178      *
    179      * "notified" is used to avoid expensive event_notifier_test_and_clear
    180      * calls.  When it is clear, the EventNotifier is clear, or one thread
    181      * is going to clear "notified" before processing more events.  False
    182      * positives are possible, i.e. "notified" could be set even though the
    183      * EventNotifier is clear.
    184      *
    185      * Note that event_notifier_set *cannot* be optimized the same way.  For
    186      * more information on the problem that would result, see "#ifdef BUG2"
    187      * in the docs/aio_notify_accept.promela formal model.
    188      */
    189     bool notified;
    190     EventNotifier notifier;
    191 
    192     QSLIST_HEAD(, Coroutine) scheduled_coroutines;
    193     QEMUBH *co_schedule_bh;
    194 
    195     int thread_pool_min;
    196     int thread_pool_max;
    197     /* Thread pool for performing work and receiving completion callbacks.
    198      * Has its own locking.
    199      */
    200     struct ThreadPool *thread_pool;
    201 
    202 #ifdef CONFIG_LINUX_AIO
    203     /*
    204      * State for native Linux AIO.  Uses aio_context_acquire/release for
    205      * locking.
    206      */
    207     struct LinuxAioState *linux_aio;
    208 #endif
    209 #ifdef CONFIG_LINUX_IO_URING
    210     /*
    211      * State for Linux io_uring.  Uses aio_context_acquire/release for
    212      * locking.
    213      */
    214     struct LuringState *linux_io_uring;
    215 
    216     /* State for file descriptor monitoring using Linux io_uring */
    217     struct io_uring fdmon_io_uring;
    218     AioHandlerSList submit_list;
    219 #endif
    220 
    221     /* TimerLists for calling timers - one per clock type.  Has its own
    222      * locking.
    223      */
    224     QEMUTimerListGroup tlg;
    225 
    226     int external_disable_cnt;
    227 
    228     /* Number of AioHandlers without .io_poll() */
    229     int poll_disable_cnt;
    230 
    231     /* Polling mode parameters */
    232     int64_t poll_ns;        /* current polling time in nanoseconds */
    233     int64_t poll_max_ns;    /* maximum polling time in nanoseconds */
    234     int64_t poll_grow;      /* polling time growth factor */
    235     int64_t poll_shrink;    /* polling time shrink factor */
    236 
    237     /* AIO engine parameters */
    238     int64_t aio_max_batch;  /* maximum number of requests in a batch */
    239 
    240     /*
    241      * List of handlers participating in userspace polling.  Protected by
    242      * ctx->list_lock.  Iterated and modified mostly by the event loop thread
    243      * from aio_poll() with ctx->list_lock incremented.  aio_set_fd_handler()
    244      * only touches the list to delete nodes if ctx->list_lock's count is zero.
    245      */
    246     AioHandlerList poll_aio_handlers;
    247 
    248     /* Are we in polling mode or monitoring file descriptors? */
    249     bool poll_started;
    250 
    251     /* epoll(7) state used when built with CONFIG_EPOLL */
    252     int epollfd;
    253 
    254     const FDMonOps *fdmon_ops;
    255 };
    256 
    257 /**
    258  * aio_context_new: Allocate a new AioContext.
    259  *
    260  * AioContext provide a mini event-loop that can be waited on synchronously.
    261  * They also provide bottom halves, a service to execute a piece of code
    262  * as soon as possible.
    263  */
    264 AioContext *aio_context_new(Error **errp);
    265 
    266 /**
    267  * aio_context_ref:
    268  * @ctx: The AioContext to operate on.
    269  *
    270  * Add a reference to an AioContext.
    271  */
    272 void aio_context_ref(AioContext *ctx);
    273 
    274 /**
    275  * aio_context_unref:
    276  * @ctx: The AioContext to operate on.
    277  *
    278  * Drop a reference to an AioContext.
    279  */
    280 void aio_context_unref(AioContext *ctx);
    281 
    282 /* Take ownership of the AioContext.  If the AioContext will be shared between
    283  * threads, and a thread does not want to be interrupted, it will have to
    284  * take ownership around calls to aio_poll().  Otherwise, aio_poll()
    285  * automatically takes care of calling aio_context_acquire and
    286  * aio_context_release.
    287  *
    288  * Note that this is separate from bdrv_drained_begin/bdrv_drained_end.  A
    289  * thread still has to call those to avoid being interrupted by the guest.
    290  *
    291  * Bottom halves, timers and callbacks can be created or removed without
    292  * acquiring the AioContext.
    293  */
    294 void aio_context_acquire(AioContext *ctx);
    295 
    296 /* Relinquish ownership of the AioContext. */
    297 void aio_context_release(AioContext *ctx);
    298 
    299 /**
    300  * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will
    301  * run only once and as soon as possible.
    302  *
    303  * @name: A human-readable identifier for debugging purposes.
    304  */
    305 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
    306                                   const char *name);
    307 
    308 /**
    309  * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
    310  * only once and as soon as possible.
    311  *
    312  * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the
    313  * name string.
    314  */
    315 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \
    316     aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)))
    317 
    318 /**
    319  * aio_bh_new_full: Allocate a new bottom half structure.
    320  *
    321  * Bottom halves are lightweight callbacks whose invocation is guaranteed
    322  * to be wait-free, thread-safe and signal-safe.  The #QEMUBH structure
    323  * is opaque and must be allocated prior to its use.
    324  *
    325  * @name: A human-readable identifier for debugging purposes.
    326  */
    327 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
    328                         const char *name);
    329 
    330 /**
    331  * aio_bh_new: Allocate a new bottom half structure
    332  *
    333  * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
    334  * string.
    335  */
    336 #define aio_bh_new(ctx, cb, opaque) \
    337     aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)))
    338 
    339 /**
    340  * aio_notify: Force processing of pending events.
    341  *
    342  * Similar to signaling a condition variable, aio_notify forces
    343  * aio_poll to exit, so that the next call will re-examine pending events.
    344  * The caller of aio_notify will usually call aio_poll again very soon,
    345  * or go through another iteration of the GLib main loop.  Hence, aio_notify
    346  * also has the side effect of recalculating the sets of file descriptors
    347  * that the main loop waits for.
    348  *
    349  * Calling aio_notify is rarely necessary, because for example scheduling
    350  * a bottom half calls it already.
    351  */
    352 void aio_notify(AioContext *ctx);
    353 
    354 /**
    355  * aio_notify_accept: Acknowledge receiving an aio_notify.
    356  *
    357  * aio_notify() uses an EventNotifier in order to wake up a sleeping
    358  * aio_poll() or g_main_context_iteration().  Calls to aio_notify() are
    359  * usually rare, but the AioContext has to clear the EventNotifier on
    360  * every aio_poll() or g_main_context_iteration() in order to avoid
    361  * busy waiting.  This event_notifier_test_and_clear() cannot be done
    362  * using the usual aio_context_set_event_notifier(), because it must
    363  * be done before processing all events (file descriptors, bottom halves,
    364  * timers).
    365  *
    366  * aio_notify_accept() is an optimized event_notifier_test_and_clear()
    367  * that is specific to an AioContext's notifier; it is used internally
    368  * to clear the EventNotifier only if aio_notify() had been called.
    369  */
    370 void aio_notify_accept(AioContext *ctx);
    371 
    372 /**
    373  * aio_bh_call: Executes callback function of the specified BH.
    374  */
    375 void aio_bh_call(QEMUBH *bh);
    376 
    377 /**
    378  * aio_bh_poll: Poll bottom halves for an AioContext.
    379  *
    380  * These are internal functions used by the QEMU main loop.
    381  * And notice that multiple occurrences of aio_bh_poll cannot
    382  * be called concurrently
    383  */
    384 int aio_bh_poll(AioContext *ctx);
    385 
    386 /**
    387  * qemu_bh_schedule: Schedule a bottom half.
    388  *
    389  * Scheduling a bottom half interrupts the main loop and causes the
    390  * execution of the callback that was passed to qemu_bh_new.
    391  *
    392  * Bottom halves that are scheduled from a bottom half handler are instantly
    393  * invoked.  This can create an infinite loop if a bottom half handler
    394  * schedules itself.
    395  *
    396  * @bh: The bottom half to be scheduled.
    397  */
    398 void qemu_bh_schedule(QEMUBH *bh);
    399 
    400 /**
    401  * qemu_bh_cancel: Cancel execution of a bottom half.
    402  *
    403  * Canceling execution of a bottom half undoes the effect of calls to
    404  * qemu_bh_schedule without freeing its resources yet.  While cancellation
    405  * itself is also wait-free and thread-safe, it can of course race with the
    406  * loop that executes bottom halves unless you are holding the iothread
    407  * mutex.  This makes it mostly useless if you are not holding the mutex.
    408  *
    409  * @bh: The bottom half to be canceled.
    410  */
    411 void qemu_bh_cancel(QEMUBH *bh);
    412 
    413 /**
    414  *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
    415  *
    416  * Deleting a bottom half frees the memory that was allocated for it by
    417  * qemu_bh_new.  It also implies canceling the bottom half if it was
    418  * scheduled.
    419  * This func is async. The bottom half will do the delete action at the finial
    420  * end.
    421  *
    422  * @bh: The bottom half to be deleted.
    423  */
    424 void qemu_bh_delete(QEMUBH *bh);
    425 
    426 /* Return whether there are any pending callbacks from the GSource
    427  * attached to the AioContext, before g_poll is invoked.
    428  *
    429  * This is used internally in the implementation of the GSource.
    430  */
    431 bool aio_prepare(AioContext *ctx);
    432 
    433 /* Return whether there are any pending callbacks from the GSource
    434  * attached to the AioContext, after g_poll is invoked.
    435  *
    436  * This is used internally in the implementation of the GSource.
    437  */
    438 bool aio_pending(AioContext *ctx);
    439 
    440 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
    441  *
    442  * This is used internally in the implementation of the GSource.
    443  */
    444 void aio_dispatch(AioContext *ctx);
    445 
    446 /* Progress in completing AIO work to occur.  This can issue new pending
    447  * aio as a result of executing I/O completion or bh callbacks.
    448  *
    449  * Return whether any progress was made by executing AIO or bottom half
    450  * handlers.  If @blocking == true, this should always be true except
    451  * if someone called aio_notify.
    452  *
    453  * If there are no pending bottom halves, but there are pending AIO
    454  * operations, it may not be possible to make any progress without
    455  * blocking.  If @blocking is true, this function will wait until one
    456  * or more AIO events have completed, to ensure something has moved
    457  * before returning.
    458  */
    459 bool aio_poll(AioContext *ctx, bool blocking);
    460 
    461 /* Register a file descriptor and associated callbacks.  Behaves very similarly
    462  * to qemu_set_fd_handler.  Unlike qemu_set_fd_handler, these callbacks will
    463  * be invoked when using aio_poll().
    464  *
    465  * Code that invokes AIO completion functions should rely on this function
    466  * instead of qemu_set_fd_handler[2].
    467  */
    468 void aio_set_fd_handler(AioContext *ctx,
    469                         int fd,
    470                         bool is_external,
    471                         IOHandler *io_read,
    472                         IOHandler *io_write,
    473                         AioPollFn *io_poll,
    474                         IOHandler *io_poll_ready,
    475                         void *opaque);
    476 
    477 /* Set polling begin/end callbacks for a file descriptor that has already been
    478  * registered with aio_set_fd_handler.  Do nothing if the file descriptor is
    479  * not registered.
    480  */
    481 void aio_set_fd_poll(AioContext *ctx, int fd,
    482                      IOHandler *io_poll_begin,
    483                      IOHandler *io_poll_end);
    484 
    485 /* Register an event notifier and associated callbacks.  Behaves very similarly
    486  * to event_notifier_set_handler.  Unlike event_notifier_set_handler, these callbacks
    487  * will be invoked when using aio_poll().
    488  *
    489  * Code that invokes AIO completion functions should rely on this function
    490  * instead of event_notifier_set_handler.
    491  */
    492 void aio_set_event_notifier(AioContext *ctx,
    493                             EventNotifier *notifier,
    494                             bool is_external,
    495                             EventNotifierHandler *io_read,
    496                             AioPollFn *io_poll,
    497                             EventNotifierHandler *io_poll_ready);
    498 
    499 /* Set polling begin/end callbacks for an event notifier that has already been
    500  * registered with aio_set_event_notifier.  Do nothing if the event notifier is
    501  * not registered.
    502  */
    503 void aio_set_event_notifier_poll(AioContext *ctx,
    504                                  EventNotifier *notifier,
    505                                  EventNotifierHandler *io_poll_begin,
    506                                  EventNotifierHandler *io_poll_end);
    507 
    508 /* Return a GSource that lets the main loop poll the file descriptors attached
    509  * to this AioContext.
    510  */
    511 GSource *aio_get_g_source(AioContext *ctx);
    512 
    513 /* Return the ThreadPool bound to this AioContext */
    514 struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
    515 
    516 /* Setup the LinuxAioState bound to this AioContext */
    517 struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
    518 
    519 /* Return the LinuxAioState bound to this AioContext */
    520 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
    521 
    522 /* Setup the LuringState bound to this AioContext */
    523 struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
    524 
    525 /* Return the LuringState bound to this AioContext */
    526 struct LuringState *aio_get_linux_io_uring(AioContext *ctx);
    527 /**
    528  * aio_timer_new_with_attrs:
    529  * @ctx: the aio context
    530  * @type: the clock type
    531  * @scale: the scale
    532  * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
    533  *              to assign
    534  * @cb: the callback to call on timer expiry
    535  * @opaque: the opaque pointer to pass to the callback
    536  *
    537  * Allocate a new timer (with attributes) attached to the context @ctx.
    538  * The function is responsible for memory allocation.
    539  *
    540  * The preferred interface is aio_timer_init or aio_timer_init_with_attrs.
    541  * Use that unless you really need dynamic memory allocation.
    542  *
    543  * Returns: a pointer to the new timer
    544  */
    545 static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx,
    546                                                   QEMUClockType type,
    547                                                   int scale, int attributes,
    548                                                   QEMUTimerCB *cb, void *opaque)
    549 {
    550     return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque);
    551 }
    552 
    553 /**
    554  * aio_timer_new:
    555  * @ctx: the aio context
    556  * @type: the clock type
    557  * @scale: the scale
    558  * @cb: the callback to call on timer expiry
    559  * @opaque: the opaque pointer to pass to the callback
    560  *
    561  * Allocate a new timer attached to the context @ctx.
    562  * See aio_timer_new_with_attrs for details.
    563  *
    564  * Returns: a pointer to the new timer
    565  */
    566 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
    567                                        int scale,
    568                                        QEMUTimerCB *cb, void *opaque)
    569 {
    570     return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque);
    571 }
    572 
    573 /**
    574  * aio_timer_init_with_attrs:
    575  * @ctx: the aio context
    576  * @ts: the timer
    577  * @type: the clock type
    578  * @scale: the scale
    579  * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
    580  *              to assign
    581  * @cb: the callback to call on timer expiry
    582  * @opaque: the opaque pointer to pass to the callback
    583  *
    584  * Initialise a new timer (with attributes) attached to the context @ctx.
    585  * The caller is responsible for memory allocation.
    586  */
    587 static inline void aio_timer_init_with_attrs(AioContext *ctx,
    588                                              QEMUTimer *ts, QEMUClockType type,
    589                                              int scale, int attributes,
    590                                              QEMUTimerCB *cb, void *opaque)
    591 {
    592     timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque);
    593 }
    594 
    595 /**
    596  * aio_timer_init:
    597  * @ctx: the aio context
    598  * @ts: the timer
    599  * @type: the clock type
    600  * @scale: the scale
    601  * @cb: the callback to call on timer expiry
    602  * @opaque: the opaque pointer to pass to the callback
    603  *
    604  * Initialise a new timer attached to the context @ctx.
    605  * See aio_timer_init_with_attrs for details.
    606  */
    607 static inline void aio_timer_init(AioContext *ctx,
    608                                   QEMUTimer *ts, QEMUClockType type,
    609                                   int scale,
    610                                   QEMUTimerCB *cb, void *opaque)
    611 {
    612     timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque);
    613 }
    614 
    615 /**
    616  * aio_compute_timeout:
    617  * @ctx: the aio context
    618  *
    619  * Compute the timeout that a blocking aio_poll should use.
    620  */
    621 int64_t aio_compute_timeout(AioContext *ctx);
    622 
    623 /**
    624  * aio_disable_external:
    625  * @ctx: the aio context
    626  *
    627  * Disable the further processing of external clients.
    628  */
    629 static inline void aio_disable_external(AioContext *ctx)
    630 {
    631     qatomic_inc(&ctx->external_disable_cnt);
    632 }
    633 
    634 /**
    635  * aio_enable_external:
    636  * @ctx: the aio context
    637  *
    638  * Enable the processing of external clients.
    639  */
    640 static inline void aio_enable_external(AioContext *ctx)
    641 {
    642     int old;
    643 
    644     old = qatomic_fetch_dec(&ctx->external_disable_cnt);
    645     assert(old > 0);
    646     if (old == 1) {
    647         /* Kick event loop so it re-arms file descriptors */
    648         aio_notify(ctx);
    649     }
    650 }
    651 
    652 /**
    653  * aio_external_disabled:
    654  * @ctx: the aio context
    655  *
    656  * Return true if the external clients are disabled.
    657  */
    658 static inline bool aio_external_disabled(AioContext *ctx)
    659 {
    660     return qatomic_read(&ctx->external_disable_cnt);
    661 }
    662 
    663 /**
    664  * aio_node_check:
    665  * @ctx: the aio context
    666  * @is_external: Whether or not the checked node is an external event source.
    667  *
    668  * Check if the node's is_external flag is okay to be polled by the ctx at this
    669  * moment. True means green light.
    670  */
    671 static inline bool aio_node_check(AioContext *ctx, bool is_external)
    672 {
    673     return !is_external || !qatomic_read(&ctx->external_disable_cnt);
    674 }
    675 
    676 /**
    677  * aio_co_schedule:
    678  * @ctx: the aio context
    679  * @co: the coroutine
    680  *
    681  * Start a coroutine on a remote AioContext.
    682  *
    683  * The coroutine must not be entered by anyone else while aio_co_schedule()
    684  * is active.  In addition the coroutine must have yielded unless ctx
    685  * is the context in which the coroutine is running (i.e. the value of
    686  * qemu_get_current_aio_context() from the coroutine itself).
    687  */
    688 void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
    689 
    690 /**
    691  * aio_co_reschedule_self:
    692  * @new_ctx: the new context
    693  *
    694  * Move the currently running coroutine to new_ctx. If the coroutine is already
    695  * running in new_ctx, do nothing.
    696  */
    697 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx);
    698 
    699 /**
    700  * aio_co_wake:
    701  * @co: the coroutine
    702  *
    703  * Restart a coroutine on the AioContext where it was running last, thus
    704  * preventing coroutines from jumping from one context to another when they
    705  * go to sleep.
    706  *
    707  * aio_co_wake may be executed either in coroutine or non-coroutine
    708  * context.  The coroutine must not be entered by anyone else while
    709  * aio_co_wake() is active.
    710  */
    711 void aio_co_wake(struct Coroutine *co);
    712 
    713 /**
    714  * aio_co_enter:
    715  * @ctx: the context to run the coroutine
    716  * @co: the coroutine to run
    717  *
    718  * Enter a coroutine in the specified AioContext.
    719  */
    720 void aio_co_enter(AioContext *ctx, struct Coroutine *co);
    721 
    722 /**
    723  * Return the AioContext whose event loop runs in the current thread.
    724  *
    725  * If called from an IOThread this will be the IOThread's AioContext.  If
    726  * called from the main thread or with the "big QEMU lock" taken it
    727  * will be the main loop AioContext.
    728  */
    729 AioContext *qemu_get_current_aio_context(void);
    730 
    731 void qemu_set_current_aio_context(AioContext *ctx);
    732 
    733 /**
    734  * aio_context_setup:
    735  * @ctx: the aio context
    736  *
    737  * Initialize the aio context.
    738  */
    739 void aio_context_setup(AioContext *ctx);
    740 
    741 /**
    742  * aio_context_destroy:
    743  * @ctx: the aio context
    744  *
    745  * Destroy the aio context.
    746  */
    747 void aio_context_destroy(AioContext *ctx);
    748 
    749 /* Used internally, do not call outside AioContext code */
    750 void aio_context_use_g_source(AioContext *ctx);
    751 
    752 /**
    753  * aio_context_set_poll_params:
    754  * @ctx: the aio context
    755  * @max_ns: how long to busy poll for, in nanoseconds
    756  * @grow: polling time growth factor
    757  * @shrink: polling time shrink factor
    758  *
    759  * Poll mode can be disabled by setting poll_max_ns to 0.
    760  */
    761 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
    762                                  int64_t grow, int64_t shrink,
    763                                  Error **errp);
    764 
    765 /**
    766  * aio_context_set_aio_params:
    767  * @ctx: the aio context
    768  * @max_batch: maximum number of requests in a batch, 0 means that the
    769  *             engine will use its default
    770  */
    771 void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
    772                                 Error **errp);
    773 
    774 /**
    775  * aio_context_set_thread_pool_params:
    776  * @ctx: the aio context
    777  * @min: min number of threads to have readily available in the thread pool
    778  * @min: max number of threads the thread pool can contain
    779  */
    780 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
    781                                         int64_t max, Error **errp);
    782 #endif