qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

user-exec.c (16750B)


      1 /*
      2  *  User emulator execution
      3  *
      4  *  Copyright (c) 2003-2005 Fabrice Bellard
      5  *
      6  * This library is free software; you can redistribute it and/or
      7  * modify it under the terms of the GNU Lesser General Public
      8  * License as published by the Free Software Foundation; either
      9  * version 2.1 of the License, or (at your option) any later version.
     10  *
     11  * This library is distributed in the hope that it will be useful,
     12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14  * Lesser General Public License for more details.
     15  *
     16  * You should have received a copy of the GNU Lesser General Public
     17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18  */
     19 #include "qemu/osdep.h"
     20 #include "hw/core/tcg-cpu-ops.h"
     21 #include "disas/disas.h"
     22 #include "exec/exec-all.h"
     23 #include "tcg/tcg.h"
     24 #include "qemu/bitops.h"
     25 #include "exec/cpu_ldst.h"
     26 #include "exec/translate-all.h"
     27 #include "exec/helper-proto.h"
     28 #include "qemu/atomic128.h"
     29 #include "trace/trace-root.h"
     30 #include "tcg/tcg-ldst.h"
     31 #include "internal.h"
     32 
     33 __thread uintptr_t helper_retaddr;
     34 
     35 //#define DEBUG_SIGNAL
     36 
     37 /*
     38  * Adjust the pc to pass to cpu_restore_state; return the memop type.
     39  */
     40 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
     41 {
     42     switch (helper_retaddr) {
     43     default:
     44         /*
     45          * Fault during host memory operation within a helper function.
     46          * The helper's host return address, saved here, gives us a
     47          * pointer into the generated code that will unwind to the
     48          * correct guest pc.
     49          */
     50         *pc = helper_retaddr;
     51         break;
     52 
     53     case 0:
     54         /*
     55          * Fault during host memory operation within generated code.
     56          * (Or, a unrelated bug within qemu, but we can't tell from here).
     57          *
     58          * We take the host pc from the signal frame.  However, we cannot
     59          * use that value directly.  Within cpu_restore_state_from_tb, we
     60          * assume PC comes from GETPC(), as used by the helper functions,
     61          * so we adjust the address by -GETPC_ADJ to form an address that
     62          * is within the call insn, so that the address does not accidentally
     63          * match the beginning of the next guest insn.  However, when the
     64          * pc comes from the signal frame it points to the actual faulting
     65          * host memory insn and not the return from a call insn.
     66          *
     67          * Therefore, adjust to compensate for what will be done later
     68          * by cpu_restore_state_from_tb.
     69          */
     70         *pc += GETPC_ADJ;
     71         break;
     72 
     73     case 1:
     74         /*
     75          * Fault during host read for translation, or loosely, "execution".
     76          *
     77          * The guest pc is already pointing to the start of the TB for which
     78          * code is being generated.  If the guest translator manages the
     79          * page crossings correctly, this is exactly the correct address
     80          * (and if the translator doesn't handle page boundaries correctly
     81          * there's little we can do about that here).  Therefore, do not
     82          * trigger the unwinder.
     83          */
     84         *pc = 0;
     85         return MMU_INST_FETCH;
     86     }
     87 
     88     return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
     89 }
     90 
     91 /**
     92  * handle_sigsegv_accerr_write:
     93  * @cpu: the cpu context
     94  * @old_set: the sigset_t from the signal ucontext_t
     95  * @host_pc: the host pc, adjusted for the signal
     96  * @guest_addr: the guest address of the fault
     97  *
     98  * Return true if the write fault has been handled, and should be re-tried.
     99  *
    100  * Note that it is important that we don't call page_unprotect() unless
    101  * this is really a "write to nonwritable page" fault, because
    102  * page_unprotect() assumes that if it is called for an access to
    103  * a page that's writable this means we had two threads racing and
    104  * another thread got there first and already made the page writable;
    105  * so we will retry the access. If we were to call page_unprotect()
    106  * for some other kind of fault that should really be passed to the
    107  * guest, we'd end up in an infinite loop of retrying the faulting access.
    108  */
    109 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
    110                                  uintptr_t host_pc, abi_ptr guest_addr)
    111 {
    112     switch (page_unprotect(guest_addr, host_pc)) {
    113     case 0:
    114         /*
    115          * Fault not caused by a page marked unwritable to protect
    116          * cached translations, must be the guest binary's problem.
    117          */
    118         return false;
    119     case 1:
    120         /*
    121          * Fault caused by protection of cached translation; TBs
    122          * invalidated, so resume execution.
    123          */
    124         return true;
    125     case 2:
    126         /*
    127          * Fault caused by protection of cached translation, and the
    128          * currently executing TB was modified and must be exited immediately.
    129          */
    130         sigprocmask(SIG_SETMASK, old_set, NULL);
    131         cpu_loop_exit_noexc(cpu);
    132         /* NORETURN */
    133     default:
    134         g_assert_not_reached();
    135     }
    136 }
    137 
    138 static int probe_access_internal(CPUArchState *env, target_ulong addr,
    139                                  int fault_size, MMUAccessType access_type,
    140                                  bool nonfault, uintptr_t ra)
    141 {
    142     int acc_flag;
    143     bool maperr;
    144 
    145     switch (access_type) {
    146     case MMU_DATA_STORE:
    147         acc_flag = PAGE_WRITE_ORG;
    148         break;
    149     case MMU_DATA_LOAD:
    150         acc_flag = PAGE_READ;
    151         break;
    152     case MMU_INST_FETCH:
    153         acc_flag = PAGE_EXEC;
    154         break;
    155     default:
    156         g_assert_not_reached();
    157     }
    158 
    159     if (guest_addr_valid_untagged(addr)) {
    160         int page_flags = page_get_flags(addr);
    161         if (page_flags & acc_flag) {
    162             return 0; /* success */
    163         }
    164         maperr = !(page_flags & PAGE_VALID);
    165     } else {
    166         maperr = true;
    167     }
    168 
    169     if (nonfault) {
    170         return TLB_INVALID_MASK;
    171     }
    172 
    173     cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
    174 }
    175 
    176 int probe_access_flags(CPUArchState *env, target_ulong addr,
    177                        MMUAccessType access_type, int mmu_idx,
    178                        bool nonfault, void **phost, uintptr_t ra)
    179 {
    180     int flags;
    181 
    182     flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
    183     *phost = flags ? NULL : g2h(env_cpu(env), addr);
    184     return flags;
    185 }
    186 
    187 void *probe_access(CPUArchState *env, target_ulong addr, int size,
    188                    MMUAccessType access_type, int mmu_idx, uintptr_t ra)
    189 {
    190     int flags;
    191 
    192     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
    193     flags = probe_access_internal(env, addr, size, access_type, false, ra);
    194     g_assert(flags == 0);
    195 
    196     return size ? g2h(env_cpu(env), addr) : NULL;
    197 }
    198 
    199 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
    200                                         void **hostp)
    201 {
    202     int flags;
    203 
    204     flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
    205     g_assert(flags == 0);
    206 
    207     if (hostp) {
    208         *hostp = g2h_untagged(addr);
    209     }
    210     return addr;
    211 }
    212 
    213 void page_reset_target_data(target_ulong start, target_ulong end)
    214 {
    215 #ifdef TARGET_PAGE_DATA_SIZE
    216     target_ulong addr, len;
    217 
    218     /*
    219      * This function should never be called with addresses outside the
    220      * guest address space.  If this assert fires, it probably indicates
    221      * a missing call to h2g_valid.
    222      */
    223     assert(end - 1 <= GUEST_ADDR_MAX);
    224     assert(start < end);
    225     assert_memory_lock();
    226 
    227     start = start & TARGET_PAGE_MASK;
    228     end = TARGET_PAGE_ALIGN(end);
    229 
    230     for (addr = start, len = end - start;
    231          len != 0;
    232          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
    233         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
    234 
    235         g_free(p->target_data);
    236         p->target_data = NULL;
    237     }
    238 #endif
    239 }
    240 
    241 #ifdef TARGET_PAGE_DATA_SIZE
    242 void *page_get_target_data(target_ulong address)
    243 {
    244     PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
    245     void *ret = p->target_data;
    246 
    247     if (!ret) {
    248         ret = g_malloc0(TARGET_PAGE_DATA_SIZE);
    249         p->target_data = ret;
    250     }
    251     return ret;
    252 }
    253 #endif
    254 
    255 /* The softmmu versions of these helpers are in cputlb.c.  */
    256 
    257 /*
    258  * Verify that we have passed the correct MemOp to the correct function.
    259  *
    260  * We could present one function to target code, and dispatch based on
    261  * the MemOp, but so far we have worked hard to avoid an indirect function
    262  * call along the memory path.
    263  */
    264 static void validate_memop(MemOpIdx oi, MemOp expected)
    265 {
    266 #ifdef CONFIG_DEBUG_TCG
    267     MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
    268     assert(have == expected);
    269 #endif
    270 }
    271 
    272 void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
    273 {
    274     cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
    275 }
    276 
    277 void helper_unaligned_st(CPUArchState *env, target_ulong addr)
    278 {
    279     cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
    280 }
    281 
    282 static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
    283                             MemOpIdx oi, uintptr_t ra, MMUAccessType type)
    284 {
    285     MemOp mop = get_memop(oi);
    286     int a_bits = get_alignment_bits(mop);
    287     void *ret;
    288 
    289     /* Enforce guest required alignment.  */
    290     if (unlikely(addr & ((1 << a_bits) - 1))) {
    291         cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
    292     }
    293 
    294     ret = g2h(env_cpu(env), addr);
    295     set_helper_retaddr(ra);
    296     return ret;
    297 }
    298 
    299 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
    300                     MemOpIdx oi, uintptr_t ra)
    301 {
    302     void *haddr;
    303     uint8_t ret;
    304 
    305     validate_memop(oi, MO_UB);
    306     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
    307     ret = ldub_p(haddr);
    308     clear_helper_retaddr();
    309     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
    310     return ret;
    311 }
    312 
    313 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
    314                         MemOpIdx oi, uintptr_t ra)
    315 {
    316     void *haddr;
    317     uint16_t ret;
    318 
    319     validate_memop(oi, MO_BEUW);
    320     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
    321     ret = lduw_be_p(haddr);
    322     clear_helper_retaddr();
    323     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
    324     return ret;
    325 }
    326 
    327 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
    328                         MemOpIdx oi, uintptr_t ra)
    329 {
    330     void *haddr;
    331     uint32_t ret;
    332 
    333     validate_memop(oi, MO_BEUL);
    334     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
    335     ret = ldl_be_p(haddr);
    336     clear_helper_retaddr();
    337     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
    338     return ret;
    339 }
    340 
    341 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
    342                         MemOpIdx oi, uintptr_t ra)
    343 {
    344     void *haddr;
    345     uint64_t ret;
    346 
    347     validate_memop(oi, MO_BEUQ);
    348     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
    349     ret = ldq_be_p(haddr);
    350     clear_helper_retaddr();
    351     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
    352     return ret;
    353 }
    354 
    355 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
    356                         MemOpIdx oi, uintptr_t ra)
    357 {
    358     void *haddr;
    359     uint16_t ret;
    360 
    361     validate_memop(oi, MO_LEUW);
    362     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
    363     ret = lduw_le_p(haddr);
    364     clear_helper_retaddr();
    365     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
    366     return ret;
    367 }
    368 
    369 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
    370                         MemOpIdx oi, uintptr_t ra)
    371 {
    372     void *haddr;
    373     uint32_t ret;
    374 
    375     validate_memop(oi, MO_LEUL);
    376     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
    377     ret = ldl_le_p(haddr);
    378     clear_helper_retaddr();
    379     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
    380     return ret;
    381 }
    382 
    383 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
    384                         MemOpIdx oi, uintptr_t ra)
    385 {
    386     void *haddr;
    387     uint64_t ret;
    388 
    389     validate_memop(oi, MO_LEUQ);
    390     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
    391     ret = ldq_le_p(haddr);
    392     clear_helper_retaddr();
    393     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
    394     return ret;
    395 }
    396 
    397 void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
    398                  MemOpIdx oi, uintptr_t ra)
    399 {
    400     void *haddr;
    401 
    402     validate_memop(oi, MO_UB);
    403     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
    404     stb_p(haddr, val);
    405     clear_helper_retaddr();
    406     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
    407 }
    408 
    409 void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
    410                     MemOpIdx oi, uintptr_t ra)
    411 {
    412     void *haddr;
    413 
    414     validate_memop(oi, MO_BEUW);
    415     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
    416     stw_be_p(haddr, val);
    417     clear_helper_retaddr();
    418     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
    419 }
    420 
    421 void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
    422                     MemOpIdx oi, uintptr_t ra)
    423 {
    424     void *haddr;
    425 
    426     validate_memop(oi, MO_BEUL);
    427     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
    428     stl_be_p(haddr, val);
    429     clear_helper_retaddr();
    430     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
    431 }
    432 
    433 void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
    434                     MemOpIdx oi, uintptr_t ra)
    435 {
    436     void *haddr;
    437 
    438     validate_memop(oi, MO_BEUQ);
    439     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
    440     stq_be_p(haddr, val);
    441     clear_helper_retaddr();
    442     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
    443 }
    444 
    445 void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
    446                     MemOpIdx oi, uintptr_t ra)
    447 {
    448     void *haddr;
    449 
    450     validate_memop(oi, MO_LEUW);
    451     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
    452     stw_le_p(haddr, val);
    453     clear_helper_retaddr();
    454     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
    455 }
    456 
    457 void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
    458                     MemOpIdx oi, uintptr_t ra)
    459 {
    460     void *haddr;
    461 
    462     validate_memop(oi, MO_LEUL);
    463     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
    464     stl_le_p(haddr, val);
    465     clear_helper_retaddr();
    466     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
    467 }
    468 
    469 void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
    470                     MemOpIdx oi, uintptr_t ra)
    471 {
    472     void *haddr;
    473 
    474     validate_memop(oi, MO_LEUQ);
    475     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
    476     stq_le_p(haddr, val);
    477     clear_helper_retaddr();
    478     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
    479 }
    480 
    481 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
    482 {
    483     uint32_t ret;
    484 
    485     set_helper_retaddr(1);
    486     ret = ldub_p(g2h_untagged(ptr));
    487     clear_helper_retaddr();
    488     return ret;
    489 }
    490 
    491 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
    492 {
    493     uint32_t ret;
    494 
    495     set_helper_retaddr(1);
    496     ret = lduw_p(g2h_untagged(ptr));
    497     clear_helper_retaddr();
    498     return ret;
    499 }
    500 
    501 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
    502 {
    503     uint32_t ret;
    504 
    505     set_helper_retaddr(1);
    506     ret = ldl_p(g2h_untagged(ptr));
    507     clear_helper_retaddr();
    508     return ret;
    509 }
    510 
    511 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
    512 {
    513     uint64_t ret;
    514 
    515     set_helper_retaddr(1);
    516     ret = ldq_p(g2h_untagged(ptr));
    517     clear_helper_retaddr();
    518     return ret;
    519 }
    520 
    521 #include "ldst_common.c.inc"
    522 
    523 /*
    524  * Do not allow unaligned operations to proceed.  Return the host address.
    525  *
    526  * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
    527  */
    528 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
    529                                MemOpIdx oi, int size, int prot,
    530                                uintptr_t retaddr)
    531 {
    532     MemOp mop = get_memop(oi);
    533     int a_bits = get_alignment_bits(mop);
    534     void *ret;
    535 
    536     /* Enforce guest required alignment.  */
    537     if (unlikely(addr & ((1 << a_bits) - 1))) {
    538         MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
    539         cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
    540     }
    541 
    542     /* Enforce qemu required alignment.  */
    543     if (unlikely(addr & (size - 1))) {
    544         cpu_loop_exit_atomic(env_cpu(env), retaddr);
    545     }
    546 
    547     ret = g2h(env_cpu(env), addr);
    548     set_helper_retaddr(retaddr);
    549     return ret;
    550 }
    551 
    552 #include "atomic_common.c.inc"
    553 
    554 /*
    555  * First set of functions passes in OI and RETADDR.
    556  * This makes them callable from other helpers.
    557  */
    558 
    559 #define ATOMIC_NAME(X) \
    560     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
    561 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
    562 
    563 #define DATA_SIZE 1
    564 #include "atomic_template.h"
    565 
    566 #define DATA_SIZE 2
    567 #include "atomic_template.h"
    568 
    569 #define DATA_SIZE 4
    570 #include "atomic_template.h"
    571 
    572 #ifdef CONFIG_ATOMIC64
    573 #define DATA_SIZE 8
    574 #include "atomic_template.h"
    575 #endif
    576 
    577 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
    578 #define DATA_SIZE 16
    579 #include "atomic_template.h"
    580 #endif