qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

tcg-accel-ops-rr.c (8666B)


      1 /*
      2  * QEMU TCG Single Threaded vCPUs implementation
      3  *
      4  * Copyright (c) 2003-2008 Fabrice Bellard
      5  * Copyright (c) 2014 Red Hat Inc.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining a copy
      8  * of this software and associated documentation files (the "Software"), to deal
      9  * in the Software without restriction, including without limitation the rights
     10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     11  * copies of the Software, and to permit persons to whom the Software is
     12  * furnished to do so, subject to the following conditions:
     13  *
     14  * The above copyright notice and this permission notice shall be included in
     15  * all copies or substantial portions of the Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     23  * THE SOFTWARE.
     24  */
     25 
     26 #include "qemu/osdep.h"
     27 #include "sysemu/tcg.h"
     28 #include "sysemu/replay.h"
     29 #include "sysemu/cpu-timers.h"
     30 #include "qemu/main-loop.h"
     31 #include "qemu/notify.h"
     32 #include "qemu/guest-random.h"
     33 #include "exec/exec-all.h"
     34 
     35 #include "tcg-accel-ops.h"
     36 #include "tcg-accel-ops-rr.h"
     37 #include "tcg-accel-ops-icount.h"
     38 
     39 /* Kick all RR vCPUs */
     40 void rr_kick_vcpu_thread(CPUState *unused)
     41 {
     42     CPUState *cpu;
     43 
     44     CPU_FOREACH(cpu) {
     45         cpu_exit(cpu);
     46     };
     47 }
     48 
     49 /*
     50  * TCG vCPU kick timer
     51  *
     52  * The kick timer is responsible for moving single threaded vCPU
     53  * emulation on to the next vCPU. If more than one vCPU is running a
     54  * timer event we force a cpu->exit so the next vCPU can get
     55  * scheduled.
     56  *
     57  * The timer is removed if all vCPUs are idle and restarted again once
     58  * idleness is complete.
     59  */
     60 
     61 static QEMUTimer *rr_kick_vcpu_timer;
     62 static CPUState *rr_current_cpu;
     63 
     64 static inline int64_t rr_next_kick_time(void)
     65 {
     66     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
     67 }
     68 
     69 /* Kick the currently round-robin scheduled vCPU to next */
     70 static void rr_kick_next_cpu(void)
     71 {
     72     CPUState *cpu;
     73     do {
     74         cpu = qatomic_mb_read(&rr_current_cpu);
     75         if (cpu) {
     76             cpu_exit(cpu);
     77         }
     78     } while (cpu != qatomic_mb_read(&rr_current_cpu));
     79 }
     80 
     81 static void rr_kick_thread(void *opaque)
     82 {
     83     timer_mod(rr_kick_vcpu_timer, rr_next_kick_time());
     84     rr_kick_next_cpu();
     85 }
     86 
     87 static void rr_start_kick_timer(void)
     88 {
     89     if (!rr_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
     90         rr_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
     91                                            rr_kick_thread, NULL);
     92     }
     93     if (rr_kick_vcpu_timer && !timer_pending(rr_kick_vcpu_timer)) {
     94         timer_mod(rr_kick_vcpu_timer, rr_next_kick_time());
     95     }
     96 }
     97 
     98 static void rr_stop_kick_timer(void)
     99 {
    100     if (rr_kick_vcpu_timer && timer_pending(rr_kick_vcpu_timer)) {
    101         timer_del(rr_kick_vcpu_timer);
    102     }
    103 }
    104 
    105 static void rr_wait_io_event(void)
    106 {
    107     CPUState *cpu;
    108 
    109     while (all_cpu_threads_idle()) {
    110         rr_stop_kick_timer();
    111         qemu_cond_wait_iothread(first_cpu->halt_cond);
    112     }
    113 
    114     rr_start_kick_timer();
    115 
    116     CPU_FOREACH(cpu) {
    117         qemu_wait_io_event_common(cpu);
    118     }
    119 }
    120 
    121 /*
    122  * Destroy any remaining vCPUs which have been unplugged and have
    123  * finished running
    124  */
    125 static void rr_deal_with_unplugged_cpus(void)
    126 {
    127     CPUState *cpu;
    128 
    129     CPU_FOREACH(cpu) {
    130         if (cpu->unplug && !cpu_can_run(cpu)) {
    131             tcg_cpus_destroy(cpu);
    132             break;
    133         }
    134     }
    135 }
    136 
    137 static void rr_force_rcu(Notifier *notify, void *data)
    138 {
    139     rr_kick_next_cpu();
    140 }
    141 
    142 /*
    143  * In the single-threaded case each vCPU is simulated in turn. If
    144  * there is more than a single vCPU we create a simple timer to kick
    145  * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
    146  * This is done explicitly rather than relying on side-effects
    147  * elsewhere.
    148  */
    149 
    150 static void *rr_cpu_thread_fn(void *arg)
    151 {
    152     Notifier force_rcu;
    153     CPUState *cpu = arg;
    154 
    155     assert(tcg_enabled());
    156     rcu_register_thread();
    157     force_rcu.notify = rr_force_rcu;
    158     rcu_add_force_rcu_notifier(&force_rcu);
    159     tcg_register_thread();
    160 
    161     qemu_mutex_lock_iothread();
    162     qemu_thread_get_self(cpu->thread);
    163 
    164     cpu->thread_id = qemu_get_thread_id();
    165     cpu->can_do_io = 1;
    166     cpu_thread_signal_created(cpu);
    167     qemu_guest_random_seed_thread_part2(cpu->random_seed);
    168 
    169     /* wait for initial kick-off after machine start */
    170     while (first_cpu->stopped) {
    171         qemu_cond_wait_iothread(first_cpu->halt_cond);
    172 
    173         /* process any pending work */
    174         CPU_FOREACH(cpu) {
    175             current_cpu = cpu;
    176             qemu_wait_io_event_common(cpu);
    177         }
    178     }
    179 
    180     rr_start_kick_timer();
    181 
    182     cpu = first_cpu;
    183 
    184     /* process any pending work */
    185     cpu->exit_request = 1;
    186 
    187     while (1) {
    188         qemu_mutex_unlock_iothread();
    189         replay_mutex_lock();
    190         qemu_mutex_lock_iothread();
    191 
    192         if (icount_enabled()) {
    193             /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
    194             icount_account_warp_timer();
    195             /*
    196              * Run the timers here.  This is much more efficient than
    197              * waking up the I/O thread and waiting for completion.
    198              */
    199             icount_handle_deadline();
    200         }
    201 
    202         replay_mutex_unlock();
    203 
    204         if (!cpu) {
    205             cpu = first_cpu;
    206         }
    207 
    208         while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
    209 
    210             qatomic_mb_set(&rr_current_cpu, cpu);
    211             current_cpu = cpu;
    212 
    213             qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
    214                               (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
    215 
    216             if (cpu_can_run(cpu)) {
    217                 int r;
    218 
    219                 qemu_mutex_unlock_iothread();
    220                 if (icount_enabled()) {
    221                     icount_prepare_for_run(cpu);
    222                 }
    223                 r = tcg_cpus_exec(cpu);
    224                 if (icount_enabled()) {
    225                     icount_process_data(cpu);
    226                 }
    227                 qemu_mutex_lock_iothread();
    228 
    229                 if (r == EXCP_DEBUG) {
    230                     cpu_handle_guest_debug(cpu);
    231                     break;
    232                 } else if (r == EXCP_ATOMIC) {
    233                     qemu_mutex_unlock_iothread();
    234                     cpu_exec_step_atomic(cpu);
    235                     qemu_mutex_lock_iothread();
    236                     break;
    237                 }
    238             } else if (cpu->stop) {
    239                 if (cpu->unplug) {
    240                     cpu = CPU_NEXT(cpu);
    241                 }
    242                 break;
    243             }
    244 
    245             cpu = CPU_NEXT(cpu);
    246         } /* while (cpu && !cpu->exit_request).. */
    247 
    248         /* Does not need qatomic_mb_set because a spurious wakeup is okay.  */
    249         qatomic_set(&rr_current_cpu, NULL);
    250 
    251         if (cpu && cpu->exit_request) {
    252             qatomic_mb_set(&cpu->exit_request, 0);
    253         }
    254 
    255         if (icount_enabled() && all_cpu_threads_idle()) {
    256             /*
    257              * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
    258              * in the main_loop, wake it up in order to start the warp timer.
    259              */
    260             qemu_notify_event();
    261         }
    262 
    263         rr_wait_io_event();
    264         rr_deal_with_unplugged_cpus();
    265     }
    266 
    267     rcu_remove_force_rcu_notifier(&force_rcu);
    268     rcu_unregister_thread();
    269     return NULL;
    270 }
    271 
    272 void rr_start_vcpu_thread(CPUState *cpu)
    273 {
    274     char thread_name[VCPU_THREAD_NAME_SIZE];
    275     static QemuCond *single_tcg_halt_cond;
    276     static QemuThread *single_tcg_cpu_thread;
    277 
    278     g_assert(tcg_enabled());
    279     tcg_cpu_init_cflags(cpu, false);
    280 
    281     if (!single_tcg_cpu_thread) {
    282         cpu->thread = g_new0(QemuThread, 1);
    283         cpu->halt_cond = g_new0(QemuCond, 1);
    284         qemu_cond_init(cpu->halt_cond);
    285 
    286         /* share a single thread for all cpus with TCG */
    287         snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
    288         qemu_thread_create(cpu->thread, thread_name,
    289                            rr_cpu_thread_fn,
    290                            cpu, QEMU_THREAD_JOINABLE);
    291 
    292         single_tcg_halt_cond = cpu->halt_cond;
    293         single_tcg_cpu_thread = cpu->thread;
    294 #ifdef _WIN32
    295         cpu->hThread = qemu_thread_get_handle(cpu->thread);
    296 #endif
    297     } else {
    298         /* we share the thread */
    299         cpu->thread = single_tcg_cpu_thread;
    300         cpu->halt_cond = single_tcg_halt_cond;
    301         cpu->thread_id = first_cpu->thread_id;
    302         cpu->can_do_io = 1;
    303         cpu->created = true;
    304     }
    305 }