qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

cpu_ldst.h (19194B)


      1 /*
      2  *  Software MMU support
      3  *
      4  * This library is free software; you can redistribute it and/or
      5  * modify it under the terms of the GNU Lesser General Public
      6  * License as published by the Free Software Foundation; either
      7  * version 2.1 of the License, or (at your option) any later version.
      8  *
      9  * This library is distributed in the hope that it will be useful,
     10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     12  * Lesser General Public License for more details.
     13  *
     14  * You should have received a copy of the GNU Lesser General Public
     15  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     16  *
     17  */
     18 
     19 /*
     20  * Generate inline load/store functions for all MMU modes (typically
     21  * at least _user and _kernel) as well as _data versions, for all data
     22  * sizes.
     23  *
     24  * Used by target op helpers.
     25  *
     26  * The syntax for the accessors is:
     27  *
     28  * load:  cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
     29  *        cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
     30  *        cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
     31  *        cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
     32  *
     33  * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
     34  *        cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
     35  *        cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
     36  *        cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
     37  *
     38  * sign is:
     39  * (empty): for 32 and 64 bit sizes
     40  *   u    : unsigned
     41  *   s    : signed
     42  *
     43  * size is:
     44  *   b: 8 bits
     45  *   w: 16 bits
     46  *   l: 32 bits
     47  *   q: 64 bits
     48  *
     49  * end is:
     50  * (empty): for target native endian, or for 8 bit access
     51  *     _be: for forced big endian
     52  *     _le: for forced little endian
     53  *
     54  * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
     55  * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
     56  * the index to use; the "data" and "code" suffixes take the index from
     57  * cpu_mmu_index().
     58  *
     59  * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
     60  * MemOp including alignment requirements.  The alignment will be enforced.
     61  */
     62 #ifndef CPU_LDST_H
     63 #define CPU_LDST_H
     64 
     65 #include "exec/memopidx.h"
     66 #include "qemu/int128.h"
     67 #include "cpu.h"
     68 
     69 #if defined(CONFIG_USER_ONLY)
     70 /* sparc32plus has 64bit long but 32bit space address
     71  * this can make bad result with g2h() and h2g()
     72  */
     73 #if TARGET_VIRT_ADDR_SPACE_BITS <= 32
     74 typedef uint32_t abi_ptr;
     75 #define TARGET_ABI_FMT_ptr "%x"
     76 #else
     77 typedef uint64_t abi_ptr;
     78 #define TARGET_ABI_FMT_ptr "%"PRIx64
     79 #endif
     80 
     81 #ifndef TARGET_TAGGED_ADDRESSES
     82 static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
     83 {
     84     return x;
     85 }
     86 #endif
     87 
     88 /* All direct uses of g2h and h2g need to go away for usermode softmmu.  */
     89 static inline void *g2h_untagged(abi_ptr x)
     90 {
     91     return (void *)((uintptr_t)(x) + guest_base);
     92 }
     93 
     94 static inline void *g2h(CPUState *cs, abi_ptr x)
     95 {
     96     return g2h_untagged(cpu_untagged_addr(cs, x));
     97 }
     98 
     99 static inline bool guest_addr_valid_untagged(abi_ulong x)
    100 {
    101     return x <= GUEST_ADDR_MAX;
    102 }
    103 
    104 static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
    105 {
    106     return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
    107 }
    108 
    109 #define h2g_valid(x) \
    110     (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \
    111      (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX)
    112 
    113 #define h2g_nocheck(x) ({ \
    114     uintptr_t __ret = (uintptr_t)(x) - guest_base; \
    115     (abi_ptr)__ret; \
    116 })
    117 
    118 #define h2g(x) ({ \
    119     /* Check if given address fits target address space */ \
    120     assert(h2g_valid(x)); \
    121     h2g_nocheck(x); \
    122 })
    123 #else
    124 typedef target_ulong abi_ptr;
    125 #define TARGET_ABI_FMT_ptr TARGET_FMT_lx
    126 #endif
    127 
    128 uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
    129 int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
    130 uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
    131 int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
    132 uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
    133 uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
    134 uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
    135 int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
    136 uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
    137 uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
    138 
    139 uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
    140 int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
    141 uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
    142 int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
    143 uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
    144 uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
    145 uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
    146 int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
    147 uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
    148 uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
    149 
    150 void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
    151 void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
    152 void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
    153 void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
    154 void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
    155 void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
    156 void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
    157 
    158 void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
    159                      uint32_t val, uintptr_t ra);
    160 void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
    161                         uint32_t val, uintptr_t ra);
    162 void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
    163                         uint32_t val, uintptr_t ra);
    164 void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
    165                         uint64_t val, uintptr_t ra);
    166 void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
    167                         uint32_t val, uintptr_t ra);
    168 void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
    169                         uint32_t val, uintptr_t ra);
    170 void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
    171                         uint64_t val, uintptr_t ra);
    172 
    173 uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
    174                             int mmu_idx, uintptr_t ra);
    175 int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
    176                        int mmu_idx, uintptr_t ra);
    177 uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
    178                                int mmu_idx, uintptr_t ra);
    179 int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
    180                           int mmu_idx, uintptr_t ra);
    181 uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
    182                               int mmu_idx, uintptr_t ra);
    183 uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
    184                               int mmu_idx, uintptr_t ra);
    185 uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
    186                                int mmu_idx, uintptr_t ra);
    187 int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
    188                           int mmu_idx, uintptr_t ra);
    189 uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
    190                               int mmu_idx, uintptr_t ra);
    191 uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
    192                               int mmu_idx, uintptr_t ra);
    193 
    194 void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
    195                        int mmu_idx, uintptr_t ra);
    196 void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
    197                           int mmu_idx, uintptr_t ra);
    198 void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
    199                           int mmu_idx, uintptr_t ra);
    200 void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
    201                           int mmu_idx, uintptr_t ra);
    202 void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
    203                           int mmu_idx, uintptr_t ra);
    204 void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
    205                           int mmu_idx, uintptr_t ra);
    206 void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
    207                           int mmu_idx, uintptr_t ra);
    208 
    209 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
    210 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr ptr,
    211                         MemOpIdx oi, uintptr_t ra);
    212 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr ptr,
    213                         MemOpIdx oi, uintptr_t ra);
    214 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr ptr,
    215                         MemOpIdx oi, uintptr_t ra);
    216 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr ptr,
    217                         MemOpIdx oi, uintptr_t ra);
    218 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr ptr,
    219                         MemOpIdx oi, uintptr_t ra);
    220 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr ptr,
    221                         MemOpIdx oi, uintptr_t ra);
    222 
    223 void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
    224                  MemOpIdx oi, uintptr_t ra);
    225 void cpu_stw_be_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
    226                     MemOpIdx oi, uintptr_t ra);
    227 void cpu_stl_be_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
    228                     MemOpIdx oi, uintptr_t ra);
    229 void cpu_stq_be_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
    230                     MemOpIdx oi, uintptr_t ra);
    231 void cpu_stw_le_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
    232                     MemOpIdx oi, uintptr_t ra);
    233 void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
    234                     MemOpIdx oi, uintptr_t ra);
    235 void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
    236                     MemOpIdx oi, uintptr_t ra);
    237 
    238 uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
    239                                  uint32_t cmpv, uint32_t newv,
    240                                  MemOpIdx oi, uintptr_t retaddr);
    241 uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
    242                                     uint32_t cmpv, uint32_t newv,
    243                                     MemOpIdx oi, uintptr_t retaddr);
    244 uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
    245                                     uint32_t cmpv, uint32_t newv,
    246                                     MemOpIdx oi, uintptr_t retaddr);
    247 uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
    248                                     uint64_t cmpv, uint64_t newv,
    249                                     MemOpIdx oi, uintptr_t retaddr);
    250 uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
    251                                     uint32_t cmpv, uint32_t newv,
    252                                     MemOpIdx oi, uintptr_t retaddr);
    253 uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
    254                                     uint32_t cmpv, uint32_t newv,
    255                                     MemOpIdx oi, uintptr_t retaddr);
    256 uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
    257                                     uint64_t cmpv, uint64_t newv,
    258                                     MemOpIdx oi, uintptr_t retaddr);
    259 
    260 #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX)         \
    261 TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu            \
    262     (CPUArchState *env, target_ulong addr, TYPE val,  \
    263      MemOpIdx oi, uintptr_t retaddr);
    264 
    265 #ifdef CONFIG_ATOMIC64
    266 #define GEN_ATOMIC_HELPER_ALL(NAME)          \
    267     GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
    268     GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
    269     GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
    270     GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
    271     GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)  \
    272     GEN_ATOMIC_HELPER(NAME, uint64_t, q_le)  \
    273     GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
    274 #else
    275 #define GEN_ATOMIC_HELPER_ALL(NAME)          \
    276     GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
    277     GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
    278     GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
    279     GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
    280     GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
    281 #endif
    282 
    283 GEN_ATOMIC_HELPER_ALL(fetch_add)
    284 GEN_ATOMIC_HELPER_ALL(fetch_sub)
    285 GEN_ATOMIC_HELPER_ALL(fetch_and)
    286 GEN_ATOMIC_HELPER_ALL(fetch_or)
    287 GEN_ATOMIC_HELPER_ALL(fetch_xor)
    288 GEN_ATOMIC_HELPER_ALL(fetch_smin)
    289 GEN_ATOMIC_HELPER_ALL(fetch_umin)
    290 GEN_ATOMIC_HELPER_ALL(fetch_smax)
    291 GEN_ATOMIC_HELPER_ALL(fetch_umax)
    292 
    293 GEN_ATOMIC_HELPER_ALL(add_fetch)
    294 GEN_ATOMIC_HELPER_ALL(sub_fetch)
    295 GEN_ATOMIC_HELPER_ALL(and_fetch)
    296 GEN_ATOMIC_HELPER_ALL(or_fetch)
    297 GEN_ATOMIC_HELPER_ALL(xor_fetch)
    298 GEN_ATOMIC_HELPER_ALL(smin_fetch)
    299 GEN_ATOMIC_HELPER_ALL(umin_fetch)
    300 GEN_ATOMIC_HELPER_ALL(smax_fetch)
    301 GEN_ATOMIC_HELPER_ALL(umax_fetch)
    302 
    303 GEN_ATOMIC_HELPER_ALL(xchg)
    304 
    305 #undef GEN_ATOMIC_HELPER_ALL
    306 #undef GEN_ATOMIC_HELPER
    307 
    308 Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
    309                                   Int128 cmpv, Int128 newv,
    310                                   MemOpIdx oi, uintptr_t retaddr);
    311 Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
    312                                   Int128 cmpv, Int128 newv,
    313                                   MemOpIdx oi, uintptr_t retaddr);
    314 
    315 Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
    316                              MemOpIdx oi, uintptr_t retaddr);
    317 Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
    318                              MemOpIdx oi, uintptr_t retaddr);
    319 void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
    320                            MemOpIdx oi, uintptr_t retaddr);
    321 void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
    322                            MemOpIdx oi, uintptr_t retaddr);
    323 
    324 #if defined(CONFIG_USER_ONLY)
    325 
    326 extern __thread uintptr_t helper_retaddr;
    327 
    328 static inline void set_helper_retaddr(uintptr_t ra)
    329 {
    330     helper_retaddr = ra;
    331     /*
    332      * Ensure that this write is visible to the SIGSEGV handler that
    333      * may be invoked due to a subsequent invalid memory operation.
    334      */
    335     signal_barrier();
    336 }
    337 
    338 static inline void clear_helper_retaddr(void)
    339 {
    340     /*
    341      * Ensure that previous memory operations have succeeded before
    342      * removing the data visible to the signal handler.
    343      */
    344     signal_barrier();
    345     helper_retaddr = 0;
    346 }
    347 
    348 #else
    349 
    350 /* Needed for TCG_OVERSIZED_GUEST */
    351 #include "tcg/tcg.h"
    352 
    353 static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
    354 {
    355 #if TCG_OVERSIZED_GUEST
    356     return entry->addr_write;
    357 #else
    358     return qatomic_read(&entry->addr_write);
    359 #endif
    360 }
    361 
    362 /* Find the TLB index corresponding to the mmu_idx + address pair.  */
    363 static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
    364                                   target_ulong addr)
    365 {
    366     uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
    367 
    368     return (addr >> TARGET_PAGE_BITS) & size_mask;
    369 }
    370 
    371 /* Find the TLB entry corresponding to the mmu_idx + address pair.  */
    372 static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
    373                                      target_ulong addr)
    374 {
    375     return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
    376 }
    377 
    378 #endif /* defined(CONFIG_USER_ONLY) */
    379 
    380 #if TARGET_BIG_ENDIAN
    381 # define cpu_lduw_data        cpu_lduw_be_data
    382 # define cpu_ldsw_data        cpu_ldsw_be_data
    383 # define cpu_ldl_data         cpu_ldl_be_data
    384 # define cpu_ldq_data         cpu_ldq_be_data
    385 # define cpu_lduw_data_ra     cpu_lduw_be_data_ra
    386 # define cpu_ldsw_data_ra     cpu_ldsw_be_data_ra
    387 # define cpu_ldl_data_ra      cpu_ldl_be_data_ra
    388 # define cpu_ldq_data_ra      cpu_ldq_be_data_ra
    389 # define cpu_lduw_mmuidx_ra   cpu_lduw_be_mmuidx_ra
    390 # define cpu_ldsw_mmuidx_ra   cpu_ldsw_be_mmuidx_ra
    391 # define cpu_ldl_mmuidx_ra    cpu_ldl_be_mmuidx_ra
    392 # define cpu_ldq_mmuidx_ra    cpu_ldq_be_mmuidx_ra
    393 # define cpu_ldw_mmu          cpu_ldw_be_mmu
    394 # define cpu_ldl_mmu          cpu_ldl_be_mmu
    395 # define cpu_ldq_mmu          cpu_ldq_be_mmu
    396 # define cpu_stw_data         cpu_stw_be_data
    397 # define cpu_stl_data         cpu_stl_be_data
    398 # define cpu_stq_data         cpu_stq_be_data
    399 # define cpu_stw_data_ra      cpu_stw_be_data_ra
    400 # define cpu_stl_data_ra      cpu_stl_be_data_ra
    401 # define cpu_stq_data_ra      cpu_stq_be_data_ra
    402 # define cpu_stw_mmuidx_ra    cpu_stw_be_mmuidx_ra
    403 # define cpu_stl_mmuidx_ra    cpu_stl_be_mmuidx_ra
    404 # define cpu_stq_mmuidx_ra    cpu_stq_be_mmuidx_ra
    405 # define cpu_stw_mmu          cpu_stw_be_mmu
    406 # define cpu_stl_mmu          cpu_stl_be_mmu
    407 # define cpu_stq_mmu          cpu_stq_be_mmu
    408 #else
    409 # define cpu_lduw_data        cpu_lduw_le_data
    410 # define cpu_ldsw_data        cpu_ldsw_le_data
    411 # define cpu_ldl_data         cpu_ldl_le_data
    412 # define cpu_ldq_data         cpu_ldq_le_data
    413 # define cpu_lduw_data_ra     cpu_lduw_le_data_ra
    414 # define cpu_ldsw_data_ra     cpu_ldsw_le_data_ra
    415 # define cpu_ldl_data_ra      cpu_ldl_le_data_ra
    416 # define cpu_ldq_data_ra      cpu_ldq_le_data_ra
    417 # define cpu_lduw_mmuidx_ra   cpu_lduw_le_mmuidx_ra
    418 # define cpu_ldsw_mmuidx_ra   cpu_ldsw_le_mmuidx_ra
    419 # define cpu_ldl_mmuidx_ra    cpu_ldl_le_mmuidx_ra
    420 # define cpu_ldq_mmuidx_ra    cpu_ldq_le_mmuidx_ra
    421 # define cpu_ldw_mmu          cpu_ldw_le_mmu
    422 # define cpu_ldl_mmu          cpu_ldl_le_mmu
    423 # define cpu_ldq_mmu          cpu_ldq_le_mmu
    424 # define cpu_stw_data         cpu_stw_le_data
    425 # define cpu_stl_data         cpu_stl_le_data
    426 # define cpu_stq_data         cpu_stq_le_data
    427 # define cpu_stw_data_ra      cpu_stw_le_data_ra
    428 # define cpu_stl_data_ra      cpu_stl_le_data_ra
    429 # define cpu_stq_data_ra      cpu_stq_le_data_ra
    430 # define cpu_stw_mmuidx_ra    cpu_stw_le_mmuidx_ra
    431 # define cpu_stl_mmuidx_ra    cpu_stl_le_mmuidx_ra
    432 # define cpu_stq_mmuidx_ra    cpu_stq_le_mmuidx_ra
    433 # define cpu_stw_mmu          cpu_stw_le_mmu
    434 # define cpu_stl_mmu          cpu_stl_le_mmu
    435 # define cpu_stq_mmu          cpu_stq_le_mmu
    436 #endif
    437 
    438 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
    439 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
    440 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
    441 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
    442 
    443 static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr)
    444 {
    445     return (int8_t)cpu_ldub_code(env, addr);
    446 }
    447 
    448 static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
    449 {
    450     return (int16_t)cpu_lduw_code(env, addr);
    451 }
    452 
    453 /**
    454  * tlb_vaddr_to_host:
    455  * @env: CPUArchState
    456  * @addr: guest virtual address to look up
    457  * @access_type: 0 for read, 1 for write, 2 for execute
    458  * @mmu_idx: MMU index to use for lookup
    459  *
    460  * Look up the specified guest virtual index in the TCG softmmu TLB.
    461  * If we can translate a host virtual address suitable for direct RAM
    462  * access, without causing a guest exception, then return it.
    463  * Otherwise (TLB entry is for an I/O access, guest software
    464  * TLB fill required, etc) return NULL.
    465  */
    466 #ifdef CONFIG_USER_ONLY
    467 static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
    468                                       MMUAccessType access_type, int mmu_idx)
    469 {
    470     return g2h(env_cpu(env), addr);
    471 }
    472 #else
    473 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
    474                         MMUAccessType access_type, int mmu_idx);
    475 #endif
    476 
    477 #endif /* CPU_LDST_H */