qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

cpus.c (18854B)


      1 /*
      2  * QEMU System Emulator
      3  *
      4  * Copyright (c) 2003-2008 Fabrice Bellard
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a copy
      7  * of this software and associated documentation files (the "Software"), to deal
      8  * in the Software without restriction, including without limitation the rights
      9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     10  * copies of the Software, and to permit persons to whom the Software is
     11  * furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     22  * THE SOFTWARE.
     23  */
     24 
     25 #include "qemu/osdep.h"
     26 #include "monitor/monitor.h"
     27 #include "qemu/coroutine-tls.h"
     28 #include "qapi/error.h"
     29 #include "qapi/qapi-commands-machine.h"
     30 #include "qapi/qapi-commands-misc.h"
     31 #include "qapi/qapi-events-run-state.h"
     32 #include "qapi/qmp/qerror.h"
     33 #include "exec/gdbstub.h"
     34 #include "sysemu/hw_accel.h"
     35 #include "exec/cpu-common.h"
     36 #include "qemu/thread.h"
     37 #include "qemu/plugin.h"
     38 #include "sysemu/cpus.h"
     39 #include "qemu/guest-random.h"
     40 #include "hw/nmi.h"
     41 #include "sysemu/replay.h"
     42 #include "sysemu/runstate.h"
     43 #include "sysemu/cpu-timers.h"
     44 #include "sysemu/whpx.h"
     45 #include "hw/boards.h"
     46 #include "hw/hw.h"
     47 #include "trace.h"
     48 
     49 #ifdef CONFIG_LINUX
     50 
     51 #include <sys/prctl.h>
     52 
     53 #ifndef PR_MCE_KILL
     54 #define PR_MCE_KILL 33
     55 #endif
     56 
     57 #ifndef PR_MCE_KILL_SET
     58 #define PR_MCE_KILL_SET 1
     59 #endif
     60 
     61 #ifndef PR_MCE_KILL_EARLY
     62 #define PR_MCE_KILL_EARLY 1
     63 #endif
     64 
     65 #endif /* CONFIG_LINUX */
     66 
     67 static QemuMutex qemu_global_mutex;
     68 
     69 /*
     70  * The chosen accelerator is supposed to register this.
     71  */
     72 static const AccelOpsClass *cpus_accel;
     73 
     74 bool cpu_is_stopped(CPUState *cpu)
     75 {
     76     return cpu->stopped || !runstate_is_running();
     77 }
     78 
     79 bool cpu_work_list_empty(CPUState *cpu)
     80 {
     81     return QSIMPLEQ_EMPTY_ATOMIC(&cpu->work_list);
     82 }
     83 
     84 bool cpu_thread_is_idle(CPUState *cpu)
     85 {
     86     if (cpu->stop || !cpu_work_list_empty(cpu)) {
     87         return false;
     88     }
     89     if (cpu_is_stopped(cpu)) {
     90         return true;
     91     }
     92     if (!cpu->halted || cpu_has_work(cpu)) {
     93         return false;
     94     }
     95     if (cpus_accel->cpu_thread_is_idle) {
     96         return cpus_accel->cpu_thread_is_idle(cpu);
     97     }
     98     return true;
     99 }
    100 
    101 bool all_cpu_threads_idle(void)
    102 {
    103     CPUState *cpu;
    104 
    105     CPU_FOREACH(cpu) {
    106         if (!cpu_thread_is_idle(cpu)) {
    107             return false;
    108         }
    109     }
    110     return true;
    111 }
    112 
    113 /***********************************************************/
    114 void hw_error(const char *fmt, ...)
    115 {
    116     va_list ap;
    117     CPUState *cpu;
    118 
    119     va_start(ap, fmt);
    120     fprintf(stderr, "qemu: hardware error: ");
    121     vfprintf(stderr, fmt, ap);
    122     fprintf(stderr, "\n");
    123     CPU_FOREACH(cpu) {
    124         fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
    125         cpu_dump_state(cpu, stderr, CPU_DUMP_FPU);
    126     }
    127     va_end(ap);
    128     abort();
    129 }
    130 
    131 void cpu_synchronize_all_states(void)
    132 {
    133     CPUState *cpu;
    134 
    135     CPU_FOREACH(cpu) {
    136         cpu_synchronize_state(cpu);
    137     }
    138 }
    139 
    140 void cpu_synchronize_all_post_reset(void)
    141 {
    142     CPUState *cpu;
    143 
    144     CPU_FOREACH(cpu) {
    145         cpu_synchronize_post_reset(cpu);
    146     }
    147 }
    148 
    149 void cpu_synchronize_all_post_init(void)
    150 {
    151     CPUState *cpu;
    152 
    153     CPU_FOREACH(cpu) {
    154         cpu_synchronize_post_init(cpu);
    155     }
    156 }
    157 
    158 void cpu_synchronize_all_pre_loadvm(void)
    159 {
    160     CPUState *cpu;
    161 
    162     CPU_FOREACH(cpu) {
    163         cpu_synchronize_pre_loadvm(cpu);
    164     }
    165 }
    166 
    167 void cpu_synchronize_state(CPUState *cpu)
    168 {
    169     if (cpus_accel->synchronize_state) {
    170         cpus_accel->synchronize_state(cpu);
    171     }
    172 }
    173 
    174 void cpu_synchronize_post_reset(CPUState *cpu)
    175 {
    176     if (cpus_accel->synchronize_post_reset) {
    177         cpus_accel->synchronize_post_reset(cpu);
    178     }
    179 }
    180 
    181 void cpu_synchronize_post_init(CPUState *cpu)
    182 {
    183     if (cpus_accel->synchronize_post_init) {
    184         cpus_accel->synchronize_post_init(cpu);
    185     }
    186 }
    187 
    188 void cpu_synchronize_pre_loadvm(CPUState *cpu)
    189 {
    190     if (cpus_accel->synchronize_pre_loadvm) {
    191         cpus_accel->synchronize_pre_loadvm(cpu);
    192     }
    193 }
    194 
    195 bool cpus_are_resettable(void)
    196 {
    197     if (cpus_accel->cpus_are_resettable) {
    198         return cpus_accel->cpus_are_resettable();
    199     }
    200     return true;
    201 }
    202 
    203 int64_t cpus_get_virtual_clock(void)
    204 {
    205     /*
    206      * XXX
    207      *
    208      * need to check that cpus_accel is not NULL, because qcow2 calls
    209      * qemu_get_clock_ns(CLOCK_VIRTUAL) without any accel initialized and
    210      * with ticks disabled in some io-tests:
    211      * 030 040 041 060 099 120 127 140 156 161 172 181 191 192 195 203 229 249 256 267
    212      *
    213      * is this expected?
    214      *
    215      * XXX
    216      */
    217     if (cpus_accel && cpus_accel->get_virtual_clock) {
    218         return cpus_accel->get_virtual_clock();
    219     }
    220     return cpu_get_clock();
    221 }
    222 
    223 /*
    224  * return the time elapsed in VM between vm_start and vm_stop.  Unless
    225  * icount is active, cpus_get_elapsed_ticks() uses units of the host CPU cycle
    226  * counter.
    227  */
    228 int64_t cpus_get_elapsed_ticks(void)
    229 {
    230     if (cpus_accel->get_elapsed_ticks) {
    231         return cpus_accel->get_elapsed_ticks();
    232     }
    233     return cpu_get_ticks();
    234 }
    235 
    236 static void generic_handle_interrupt(CPUState *cpu, int mask)
    237 {
    238     cpu->interrupt_request |= mask;
    239 
    240     if (!qemu_cpu_is_self(cpu)) {
    241         qemu_cpu_kick(cpu);
    242     }
    243 }
    244 
    245 void cpu_interrupt(CPUState *cpu, int mask)
    246 {
    247     if (cpus_accel->handle_interrupt) {
    248         cpus_accel->handle_interrupt(cpu, mask);
    249     } else {
    250         generic_handle_interrupt(cpu, mask);
    251     }
    252 }
    253 
    254 static int do_vm_stop(RunState state, bool send_stop)
    255 {
    256     int ret = 0;
    257 
    258     if (runstate_is_running()) {
    259         runstate_set(state);
    260         cpu_disable_ticks();
    261         pause_all_vcpus();
    262         vm_state_notify(0, state);
    263         if (send_stop) {
    264             qapi_event_send_stop();
    265         }
    266     }
    267 
    268     bdrv_drain_all();
    269     ret = bdrv_flush_all();
    270     trace_vm_stop_flush_all(ret);
    271 
    272     return ret;
    273 }
    274 
    275 /* Special vm_stop() variant for terminating the process.  Historically clients
    276  * did not expect a QMP STOP event and so we need to retain compatibility.
    277  */
    278 int vm_shutdown(void)
    279 {
    280     return do_vm_stop(RUN_STATE_SHUTDOWN, false);
    281 }
    282 
    283 bool cpu_can_run(CPUState *cpu)
    284 {
    285     if (cpu->stop) {
    286         return false;
    287     }
    288     if (cpu_is_stopped(cpu)) {
    289         return false;
    290     }
    291     return true;
    292 }
    293 
    294 void cpu_handle_guest_debug(CPUState *cpu)
    295 {
    296     if (replay_running_debug()) {
    297         if (!cpu->singlestep_enabled) {
    298             /*
    299              * Report about the breakpoint and
    300              * make a single step to skip it
    301              */
    302             replay_breakpoint();
    303             cpu_single_step(cpu, SSTEP_ENABLE);
    304         } else {
    305             cpu_single_step(cpu, 0);
    306         }
    307     } else {
    308         gdb_set_stop_cpu(cpu);
    309         qemu_system_debug_request();
    310         cpu->stopped = true;
    311     }
    312 }
    313 
    314 #ifdef CONFIG_LINUX
    315 static void sigbus_reraise(void)
    316 {
    317     sigset_t set;
    318     struct sigaction action;
    319 
    320     memset(&action, 0, sizeof(action));
    321     action.sa_handler = SIG_DFL;
    322     if (!sigaction(SIGBUS, &action, NULL)) {
    323         raise(SIGBUS);
    324         sigemptyset(&set);
    325         sigaddset(&set, SIGBUS);
    326         pthread_sigmask(SIG_UNBLOCK, &set, NULL);
    327     }
    328     perror("Failed to re-raise SIGBUS!");
    329     abort();
    330 }
    331 
    332 static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
    333 {
    334     if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
    335         sigbus_reraise();
    336     }
    337 
    338     if (current_cpu) {
    339         /* Called asynchronously in VCPU thread.  */
    340         if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
    341             sigbus_reraise();
    342         }
    343     } else {
    344         /* Called synchronously (via signalfd) in main thread.  */
    345         if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
    346             sigbus_reraise();
    347         }
    348     }
    349 }
    350 
    351 static void qemu_init_sigbus(void)
    352 {
    353     struct sigaction action;
    354 
    355     /*
    356      * ALERT: when modifying this, take care that SIGBUS forwarding in
    357      * qemu_prealloc_mem() will continue working as expected.
    358      */
    359     memset(&action, 0, sizeof(action));
    360     action.sa_flags = SA_SIGINFO;
    361     action.sa_sigaction = sigbus_handler;
    362     sigaction(SIGBUS, &action, NULL);
    363 
    364     prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
    365 }
    366 #else /* !CONFIG_LINUX */
    367 static void qemu_init_sigbus(void)
    368 {
    369 }
    370 #endif /* !CONFIG_LINUX */
    371 
    372 static QemuThread io_thread;
    373 
    374 /* cpu creation */
    375 static QemuCond qemu_cpu_cond;
    376 /* system init */
    377 static QemuCond qemu_pause_cond;
    378 
    379 void qemu_init_cpu_loop(void)
    380 {
    381     qemu_init_sigbus();
    382     qemu_cond_init(&qemu_cpu_cond);
    383     qemu_cond_init(&qemu_pause_cond);
    384     qemu_mutex_init(&qemu_global_mutex);
    385 
    386     qemu_thread_get_self(&io_thread);
    387 }
    388 
    389 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
    390 {
    391     do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
    392 }
    393 
    394 static void qemu_cpu_stop(CPUState *cpu, bool exit)
    395 {
    396     g_assert(qemu_cpu_is_self(cpu));
    397     cpu->stop = false;
    398     cpu->stopped = true;
    399     if (exit) {
    400         cpu_exit(cpu);
    401     }
    402     qemu_cond_broadcast(&qemu_pause_cond);
    403 }
    404 
    405 void qemu_wait_io_event_common(CPUState *cpu)
    406 {
    407     qatomic_mb_set(&cpu->thread_kicked, false);
    408     if (cpu->stop) {
    409         qemu_cpu_stop(cpu, false);
    410     }
    411     process_queued_cpu_work(cpu);
    412 }
    413 
    414 void qemu_wait_io_event(CPUState *cpu)
    415 {
    416     bool slept = false;
    417 
    418     while (cpu_thread_is_idle(cpu)) {
    419         if (!slept) {
    420             slept = true;
    421             qemu_plugin_vcpu_idle_cb(cpu);
    422         }
    423         qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
    424     }
    425     if (slept) {
    426         qemu_plugin_vcpu_resume_cb(cpu);
    427     }
    428 
    429 #ifdef _WIN32
    430     /* Eat dummy APC queued by cpus_kick_thread. */
    431     if (hax_enabled()) {
    432         SleepEx(0, TRUE);
    433     }
    434 #endif
    435     qemu_wait_io_event_common(cpu);
    436 }
    437 
    438 void cpus_kick_thread(CPUState *cpu)
    439 {
    440     if (cpu->thread_kicked) {
    441         return;
    442     }
    443     cpu->thread_kicked = true;
    444 
    445 #ifndef _WIN32
    446     int err = pthread_kill(cpu->thread->thread, SIG_IPI);
    447     if (err && err != ESRCH) {
    448         fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
    449         exit(1);
    450     }
    451 #else
    452     qemu_sem_post(&cpu->sem);
    453 #endif
    454 }
    455 
    456 void qemu_cpu_kick(CPUState *cpu)
    457 {
    458     qemu_cond_broadcast(cpu->halt_cond);
    459     if (cpus_accel->kick_vcpu_thread) {
    460         cpus_accel->kick_vcpu_thread(cpu);
    461     } else { /* default */
    462         cpus_kick_thread(cpu);
    463     }
    464 }
    465 
    466 void qemu_cpu_kick_self(void)
    467 {
    468     assert(current_cpu);
    469     cpus_kick_thread(current_cpu);
    470 }
    471 
    472 bool qemu_cpu_is_self(CPUState *cpu)
    473 {
    474     return qemu_thread_is_self(cpu->thread);
    475 }
    476 
    477 bool qemu_in_vcpu_thread(void)
    478 {
    479     return current_cpu && qemu_cpu_is_self(current_cpu);
    480 }
    481 
    482 QEMU_DEFINE_STATIC_CO_TLS(bool, iothread_locked)
    483 
    484 bool qemu_mutex_iothread_locked(void)
    485 {
    486     return get_iothread_locked();
    487 }
    488 
    489 bool qemu_in_main_thread(void)
    490 {
    491     return qemu_mutex_iothread_locked();
    492 }
    493 
    494 /*
    495  * The BQL is taken from so many places that it is worth profiling the
    496  * callers directly, instead of funneling them all through a single function.
    497  */
    498 void qemu_mutex_lock_iothread_impl(const char *file, int line)
    499 {
    500     QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func);
    501 
    502     g_assert(!qemu_mutex_iothread_locked());
    503     bql_lock(&qemu_global_mutex, file, line);
    504     set_iothread_locked(true);
    505 }
    506 
    507 void qemu_mutex_unlock_iothread(void)
    508 {
    509     g_assert(qemu_mutex_iothread_locked());
    510     set_iothread_locked(false);
    511     qemu_mutex_unlock(&qemu_global_mutex);
    512 }
    513 
    514 void qemu_cond_wait_iothread(QemuCond *cond)
    515 {
    516     qemu_cond_wait(cond, &qemu_global_mutex);
    517 }
    518 
    519 void qemu_cond_timedwait_iothread(QemuCond *cond, int ms)
    520 {
    521     qemu_cond_timedwait(cond, &qemu_global_mutex, ms);
    522 }
    523 
    524 /* signal CPU creation */
    525 void cpu_thread_signal_created(CPUState *cpu)
    526 {
    527     cpu->created = true;
    528     qemu_cond_signal(&qemu_cpu_cond);
    529 }
    530 
    531 /* signal CPU destruction */
    532 void cpu_thread_signal_destroyed(CPUState *cpu)
    533 {
    534     cpu->created = false;
    535     qemu_cond_signal(&qemu_cpu_cond);
    536 }
    537 
    538 
    539 static bool all_vcpus_paused(void)
    540 {
    541     CPUState *cpu;
    542 
    543     CPU_FOREACH(cpu) {
    544         if (!cpu->stopped) {
    545             return false;
    546         }
    547     }
    548 
    549     return true;
    550 }
    551 
    552 void pause_all_vcpus(void)
    553 {
    554     CPUState *cpu;
    555 
    556     qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
    557     CPU_FOREACH(cpu) {
    558         if (qemu_cpu_is_self(cpu)) {
    559             qemu_cpu_stop(cpu, true);
    560         } else {
    561             cpu->stop = true;
    562             qemu_cpu_kick(cpu);
    563         }
    564     }
    565 
    566     /* We need to drop the replay_lock so any vCPU threads woken up
    567      * can finish their replay tasks
    568      */
    569     replay_mutex_unlock();
    570 
    571     while (!all_vcpus_paused()) {
    572         qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
    573         CPU_FOREACH(cpu) {
    574             qemu_cpu_kick(cpu);
    575         }
    576     }
    577 
    578     qemu_mutex_unlock_iothread();
    579     replay_mutex_lock();
    580     qemu_mutex_lock_iothread();
    581 }
    582 
    583 void cpu_resume(CPUState *cpu)
    584 {
    585     cpu->stop = false;
    586     cpu->stopped = false;
    587     qemu_cpu_kick(cpu);
    588 }
    589 
    590 void resume_all_vcpus(void)
    591 {
    592     CPUState *cpu;
    593 
    594     if (!runstate_is_running()) {
    595         return;
    596     }
    597 
    598     qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
    599     CPU_FOREACH(cpu) {
    600         cpu_resume(cpu);
    601     }
    602 }
    603 
    604 void cpu_remove_sync(CPUState *cpu)
    605 {
    606     cpu->stop = true;
    607     cpu->unplug = true;
    608     qemu_cpu_kick(cpu);
    609     qemu_mutex_unlock_iothread();
    610     qemu_thread_join(cpu->thread);
    611     qemu_mutex_lock_iothread();
    612 }
    613 
    614 void cpus_register_accel(const AccelOpsClass *ops)
    615 {
    616     assert(ops != NULL);
    617     assert(ops->create_vcpu_thread != NULL); /* mandatory */
    618     cpus_accel = ops;
    619 }
    620 
    621 const AccelOpsClass *cpus_get_accel(void)
    622 {
    623     /* broken if we call this early */
    624     assert(cpus_accel);
    625     return cpus_accel;
    626 }
    627 
    628 void qemu_init_vcpu(CPUState *cpu)
    629 {
    630     MachineState *ms = MACHINE(qdev_get_machine());
    631 
    632     cpu->nr_cores = ms->smp.cores;
    633     cpu->nr_threads =  ms->smp.threads;
    634     cpu->stopped = true;
    635     cpu->random_seed = qemu_guest_random_seed_thread_part1();
    636 
    637     if (!cpu->as) {
    638         /* If the target cpu hasn't set up any address spaces itself,
    639          * give it the default one.
    640          */
    641         cpu->num_ases = 1;
    642         cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
    643     }
    644 
    645     /* accelerators all implement the AccelOpsClass */
    646     g_assert(cpus_accel != NULL && cpus_accel->create_vcpu_thread != NULL);
    647     cpus_accel->create_vcpu_thread(cpu);
    648 
    649     while (!cpu->created) {
    650         qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
    651     }
    652 }
    653 
    654 void cpu_stop_current(void)
    655 {
    656     if (current_cpu) {
    657         current_cpu->stop = true;
    658         cpu_exit(current_cpu);
    659     }
    660 }
    661 
    662 int vm_stop(RunState state)
    663 {
    664     if (qemu_in_vcpu_thread()) {
    665         qemu_system_vmstop_request_prepare();
    666         qemu_system_vmstop_request(state);
    667         /*
    668          * FIXME: should not return to device code in case
    669          * vm_stop() has been requested.
    670          */
    671         cpu_stop_current();
    672         return 0;
    673     }
    674 
    675     return do_vm_stop(state, true);
    676 }
    677 
    678 /**
    679  * Prepare for (re)starting the VM.
    680  * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
    681  * running or in case of an error condition), 0 otherwise.
    682  */
    683 int vm_prepare_start(bool step_pending)
    684 {
    685     RunState requested;
    686 
    687     qemu_vmstop_requested(&requested);
    688     if (runstate_is_running() && requested == RUN_STATE__MAX) {
    689         return -1;
    690     }
    691 
    692     /* Ensure that a STOP/RESUME pair of events is emitted if a
    693      * vmstop request was pending.  The BLOCK_IO_ERROR event, for
    694      * example, according to documentation is always followed by
    695      * the STOP event.
    696      */
    697     if (runstate_is_running()) {
    698         qapi_event_send_stop();
    699         qapi_event_send_resume();
    700         return -1;
    701     }
    702 
    703     /*
    704      * WHPX accelerator needs to know whether we are going to step
    705      * any CPUs, before starting the first one.
    706      */
    707     if (cpus_accel->synchronize_pre_resume) {
    708         cpus_accel->synchronize_pre_resume(step_pending);
    709     }
    710 
    711     /* We are sending this now, but the CPUs will be resumed shortly later */
    712     qapi_event_send_resume();
    713 
    714     cpu_enable_ticks();
    715     runstate_set(RUN_STATE_RUNNING);
    716     vm_state_notify(1, RUN_STATE_RUNNING);
    717     return 0;
    718 }
    719 
    720 void vm_start(void)
    721 {
    722     if (!vm_prepare_start(false)) {
    723         resume_all_vcpus();
    724     }
    725 }
    726 
    727 /* does a state transition even if the VM is already stopped,
    728    current state is forgotten forever */
    729 int vm_stop_force_state(RunState state)
    730 {
    731     if (runstate_is_running()) {
    732         return vm_stop(state);
    733     } else {
    734         int ret;
    735         runstate_set(state);
    736 
    737         bdrv_drain_all();
    738         /* Make sure to return an error if the flush in a previous vm_stop()
    739          * failed. */
    740         ret = bdrv_flush_all();
    741         trace_vm_stop_flush_all(ret);
    742         return ret;
    743     }
    744 }
    745 
    746 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
    747                  bool has_cpu, int64_t cpu_index, Error **errp)
    748 {
    749     FILE *f;
    750     uint32_t l;
    751     CPUState *cpu;
    752     uint8_t buf[1024];
    753     int64_t orig_addr = addr, orig_size = size;
    754 
    755     if (!has_cpu) {
    756         cpu_index = 0;
    757     }
    758 
    759     cpu = qemu_get_cpu(cpu_index);
    760     if (cpu == NULL) {
    761         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
    762                    "a CPU number");
    763         return;
    764     }
    765 
    766     f = fopen(filename, "wb");
    767     if (!f) {
    768         error_setg_file_open(errp, errno, filename);
    769         return;
    770     }
    771 
    772     while (size != 0) {
    773         l = sizeof(buf);
    774         if (l > size)
    775             l = size;
    776         if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
    777             error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
    778                              " specified", orig_addr, orig_size);
    779             goto exit;
    780         }
    781         if (fwrite(buf, 1, l, f) != l) {
    782             error_setg(errp, QERR_IO_ERROR);
    783             goto exit;
    784         }
    785         addr += l;
    786         size -= l;
    787     }
    788 
    789 exit:
    790     fclose(f);
    791 }
    792 
    793 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
    794                   Error **errp)
    795 {
    796     FILE *f;
    797     uint32_t l;
    798     uint8_t buf[1024];
    799 
    800     f = fopen(filename, "wb");
    801     if (!f) {
    802         error_setg_file_open(errp, errno, filename);
    803         return;
    804     }
    805 
    806     while (size != 0) {
    807         l = sizeof(buf);
    808         if (l > size)
    809             l = size;
    810         cpu_physical_memory_read(addr, buf, l);
    811         if (fwrite(buf, 1, l, f) != l) {
    812             error_setg(errp, QERR_IO_ERROR);
    813             goto exit;
    814         }
    815         addr += l;
    816         size -= l;
    817     }
    818 
    819 exit:
    820     fclose(f);
    821 }
    822 
    823 void qmp_inject_nmi(Error **errp)
    824 {
    825     nmi_monitor_handle(monitor_get_cpu_index(monitor_cur()), errp);
    826 }
    827