qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

tcg.h (38524B)


      1 /*
      2  * Tiny Code Generator for QEMU
      3  *
      4  * Copyright (c) 2008 Fabrice Bellard
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a copy
      7  * of this software and associated documentation files (the "Software"), to deal
      8  * in the Software without restriction, including without limitation the rights
      9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     10  * copies of the Software, and to permit persons to whom the Software is
     11  * furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     22  * THE SOFTWARE.
     23  */
     24 
     25 #ifndef TCG_H
     26 #define TCG_H
     27 
     28 #include "cpu.h"
     29 #include "exec/memop.h"
     30 #include "exec/memopidx.h"
     31 #include "qemu/bitops.h"
     32 #include "qemu/plugin.h"
     33 #include "qemu/queue.h"
     34 #include "tcg/tcg-mo.h"
     35 #include "tcg-target.h"
     36 #include "tcg/tcg-cond.h"
     37 
     38 /* XXX: make safe guess about sizes */
     39 #define MAX_OP_PER_INSTR 266
     40 
     41 #if HOST_LONG_BITS == 32
     42 #define MAX_OPC_PARAM_PER_ARG 2
     43 #else
     44 #define MAX_OPC_PARAM_PER_ARG 1
     45 #endif
     46 #define MAX_OPC_PARAM_IARGS 7
     47 #define MAX_OPC_PARAM_OARGS 1
     48 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
     49 
     50 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
     51  * and up to 4 + N parameters on 64-bit archs
     52  * (N = number of input arguments + output arguments).  */
     53 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
     54 
     55 #define CPU_TEMP_BUF_NLONGS 128
     56 #define TCG_STATIC_FRAME_SIZE  (CPU_TEMP_BUF_NLONGS * sizeof(long))
     57 
     58 /* Default target word size to pointer size.  */
     59 #ifndef TCG_TARGET_REG_BITS
     60 # if UINTPTR_MAX == UINT32_MAX
     61 #  define TCG_TARGET_REG_BITS 32
     62 # elif UINTPTR_MAX == UINT64_MAX
     63 #  define TCG_TARGET_REG_BITS 64
     64 # else
     65 #  error Unknown pointer size for tcg target
     66 # endif
     67 #endif
     68 
     69 #if TCG_TARGET_REG_BITS == 32
     70 typedef int32_t tcg_target_long;
     71 typedef uint32_t tcg_target_ulong;
     72 #define TCG_PRIlx PRIx32
     73 #define TCG_PRIld PRId32
     74 #elif TCG_TARGET_REG_BITS == 64
     75 typedef int64_t tcg_target_long;
     76 typedef uint64_t tcg_target_ulong;
     77 #define TCG_PRIlx PRIx64
     78 #define TCG_PRIld PRId64
     79 #else
     80 #error unsupported
     81 #endif
     82 
     83 /* Oversized TCG guests make things like MTTCG hard
     84  * as we can't use atomics for cputlb updates.
     85  */
     86 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
     87 #define TCG_OVERSIZED_GUEST 1
     88 #else
     89 #define TCG_OVERSIZED_GUEST 0
     90 #endif
     91 
     92 #if TCG_TARGET_NB_REGS <= 32
     93 typedef uint32_t TCGRegSet;
     94 #elif TCG_TARGET_NB_REGS <= 64
     95 typedef uint64_t TCGRegSet;
     96 #else
     97 #error unsupported
     98 #endif
     99 
    100 #if TCG_TARGET_REG_BITS == 32
    101 /* Turn some undef macros into false macros.  */
    102 #define TCG_TARGET_HAS_extrl_i64_i32    0
    103 #define TCG_TARGET_HAS_extrh_i64_i32    0
    104 #define TCG_TARGET_HAS_div_i64          0
    105 #define TCG_TARGET_HAS_rem_i64          0
    106 #define TCG_TARGET_HAS_div2_i64         0
    107 #define TCG_TARGET_HAS_rot_i64          0
    108 #define TCG_TARGET_HAS_ext8s_i64        0
    109 #define TCG_TARGET_HAS_ext16s_i64       0
    110 #define TCG_TARGET_HAS_ext32s_i64       0
    111 #define TCG_TARGET_HAS_ext8u_i64        0
    112 #define TCG_TARGET_HAS_ext16u_i64       0
    113 #define TCG_TARGET_HAS_ext32u_i64       0
    114 #define TCG_TARGET_HAS_bswap16_i64      0
    115 #define TCG_TARGET_HAS_bswap32_i64      0
    116 #define TCG_TARGET_HAS_bswap64_i64      0
    117 #define TCG_TARGET_HAS_neg_i64          0
    118 #define TCG_TARGET_HAS_not_i64          0
    119 #define TCG_TARGET_HAS_andc_i64         0
    120 #define TCG_TARGET_HAS_orc_i64          0
    121 #define TCG_TARGET_HAS_eqv_i64          0
    122 #define TCG_TARGET_HAS_nand_i64         0
    123 #define TCG_TARGET_HAS_nor_i64          0
    124 #define TCG_TARGET_HAS_clz_i64          0
    125 #define TCG_TARGET_HAS_ctz_i64          0
    126 #define TCG_TARGET_HAS_ctpop_i64        0
    127 #define TCG_TARGET_HAS_deposit_i64      0
    128 #define TCG_TARGET_HAS_extract_i64      0
    129 #define TCG_TARGET_HAS_sextract_i64     0
    130 #define TCG_TARGET_HAS_extract2_i64     0
    131 #define TCG_TARGET_HAS_movcond_i64      0
    132 #define TCG_TARGET_HAS_add2_i64         0
    133 #define TCG_TARGET_HAS_sub2_i64         0
    134 #define TCG_TARGET_HAS_mulu2_i64        0
    135 #define TCG_TARGET_HAS_muls2_i64        0
    136 #define TCG_TARGET_HAS_muluh_i64        0
    137 #define TCG_TARGET_HAS_mulsh_i64        0
    138 /* Turn some undef macros into true macros.  */
    139 #define TCG_TARGET_HAS_add2_i32         1
    140 #define TCG_TARGET_HAS_sub2_i32         1
    141 #endif
    142 
    143 #ifndef TCG_TARGET_deposit_i32_valid
    144 #define TCG_TARGET_deposit_i32_valid(ofs, len) 1
    145 #endif
    146 #ifndef TCG_TARGET_deposit_i64_valid
    147 #define TCG_TARGET_deposit_i64_valid(ofs, len) 1
    148 #endif
    149 #ifndef TCG_TARGET_extract_i32_valid
    150 #define TCG_TARGET_extract_i32_valid(ofs, len) 1
    151 #endif
    152 #ifndef TCG_TARGET_extract_i64_valid
    153 #define TCG_TARGET_extract_i64_valid(ofs, len) 1
    154 #endif
    155 
    156 /* Only one of DIV or DIV2 should be defined.  */
    157 #if defined(TCG_TARGET_HAS_div_i32)
    158 #define TCG_TARGET_HAS_div2_i32         0
    159 #elif defined(TCG_TARGET_HAS_div2_i32)
    160 #define TCG_TARGET_HAS_div_i32          0
    161 #define TCG_TARGET_HAS_rem_i32          0
    162 #endif
    163 #if defined(TCG_TARGET_HAS_div_i64)
    164 #define TCG_TARGET_HAS_div2_i64         0
    165 #elif defined(TCG_TARGET_HAS_div2_i64)
    166 #define TCG_TARGET_HAS_div_i64          0
    167 #define TCG_TARGET_HAS_rem_i64          0
    168 #endif
    169 
    170 /* For 32-bit targets, some sort of unsigned widening multiply is required.  */
    171 #if TCG_TARGET_REG_BITS == 32 \
    172     && !(defined(TCG_TARGET_HAS_mulu2_i32) \
    173          || defined(TCG_TARGET_HAS_muluh_i32))
    174 # error "Missing unsigned widening multiply"
    175 #endif
    176 
    177 #if !defined(TCG_TARGET_HAS_v64) \
    178     && !defined(TCG_TARGET_HAS_v128) \
    179     && !defined(TCG_TARGET_HAS_v256)
    180 #define TCG_TARGET_MAYBE_vec            0
    181 #define TCG_TARGET_HAS_abs_vec          0
    182 #define TCG_TARGET_HAS_neg_vec          0
    183 #define TCG_TARGET_HAS_not_vec          0
    184 #define TCG_TARGET_HAS_andc_vec         0
    185 #define TCG_TARGET_HAS_orc_vec          0
    186 #define TCG_TARGET_HAS_nand_vec         0
    187 #define TCG_TARGET_HAS_nor_vec          0
    188 #define TCG_TARGET_HAS_eqv_vec          0
    189 #define TCG_TARGET_HAS_roti_vec         0
    190 #define TCG_TARGET_HAS_rots_vec         0
    191 #define TCG_TARGET_HAS_rotv_vec         0
    192 #define TCG_TARGET_HAS_shi_vec          0
    193 #define TCG_TARGET_HAS_shs_vec          0
    194 #define TCG_TARGET_HAS_shv_vec          0
    195 #define TCG_TARGET_HAS_mul_vec          0
    196 #define TCG_TARGET_HAS_sat_vec          0
    197 #define TCG_TARGET_HAS_minmax_vec       0
    198 #define TCG_TARGET_HAS_bitsel_vec       0
    199 #define TCG_TARGET_HAS_cmpsel_vec       0
    200 #else
    201 #define TCG_TARGET_MAYBE_vec            1
    202 #endif
    203 #ifndef TCG_TARGET_HAS_v64
    204 #define TCG_TARGET_HAS_v64              0
    205 #endif
    206 #ifndef TCG_TARGET_HAS_v128
    207 #define TCG_TARGET_HAS_v128             0
    208 #endif
    209 #ifndef TCG_TARGET_HAS_v256
    210 #define TCG_TARGET_HAS_v256             0
    211 #endif
    212 
    213 #ifndef TARGET_INSN_START_EXTRA_WORDS
    214 # define TARGET_INSN_START_WORDS 1
    215 #else
    216 # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
    217 #endif
    218 
    219 typedef enum TCGOpcode {
    220 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
    221 #include "tcg/tcg-opc.h"
    222 #undef DEF
    223     NB_OPS,
    224 } TCGOpcode;
    225 
    226 #define tcg_regset_set_reg(d, r)   ((d) |= (TCGRegSet)1 << (r))
    227 #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
    228 #define tcg_regset_test_reg(d, r)  (((d) >> (r)) & 1)
    229 
    230 #ifndef TCG_TARGET_INSN_UNIT_SIZE
    231 # error "Missing TCG_TARGET_INSN_UNIT_SIZE"
    232 #elif TCG_TARGET_INSN_UNIT_SIZE == 1
    233 typedef uint8_t tcg_insn_unit;
    234 #elif TCG_TARGET_INSN_UNIT_SIZE == 2
    235 typedef uint16_t tcg_insn_unit;
    236 #elif TCG_TARGET_INSN_UNIT_SIZE == 4
    237 typedef uint32_t tcg_insn_unit;
    238 #elif TCG_TARGET_INSN_UNIT_SIZE == 8
    239 typedef uint64_t tcg_insn_unit;
    240 #else
    241 /* The port better have done this.  */
    242 #endif
    243 
    244 
    245 #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
    246 # define tcg_debug_assert(X) do { assert(X); } while (0)
    247 #else
    248 # define tcg_debug_assert(X) \
    249     do { if (!(X)) { __builtin_unreachable(); } } while (0)
    250 #endif
    251 
    252 typedef struct TCGRelocation TCGRelocation;
    253 struct TCGRelocation {
    254     QSIMPLEQ_ENTRY(TCGRelocation) next;
    255     tcg_insn_unit *ptr;
    256     intptr_t addend;
    257     int type;
    258 };
    259 
    260 typedef struct TCGLabel TCGLabel;
    261 struct TCGLabel {
    262     unsigned present : 1;
    263     unsigned has_value : 1;
    264     unsigned id : 14;
    265     unsigned refs : 16;
    266     union {
    267         uintptr_t value;
    268         const tcg_insn_unit *value_ptr;
    269     } u;
    270     QSIMPLEQ_HEAD(, TCGRelocation) relocs;
    271     QSIMPLEQ_ENTRY(TCGLabel) next;
    272 };
    273 
    274 typedef struct TCGPool {
    275     struct TCGPool *next;
    276     int size;
    277     uint8_t data[] __attribute__ ((aligned));
    278 } TCGPool;
    279 
    280 #define TCG_POOL_CHUNK_SIZE 32768
    281 
    282 #define TCG_MAX_TEMPS 512
    283 #define TCG_MAX_INSNS 512
    284 
    285 /* when the size of the arguments of a called function is smaller than
    286    this value, they are statically allocated in the TB stack frame */
    287 #define TCG_STATIC_CALL_ARGS_SIZE 128
    288 
    289 typedef enum TCGType {
    290     TCG_TYPE_I32,
    291     TCG_TYPE_I64,
    292 
    293     TCG_TYPE_V64,
    294     TCG_TYPE_V128,
    295     TCG_TYPE_V256,
    296 
    297     TCG_TYPE_COUNT, /* number of different types */
    298 
    299     /* An alias for the size of the host register.  */
    300 #if TCG_TARGET_REG_BITS == 32
    301     TCG_TYPE_REG = TCG_TYPE_I32,
    302 #else
    303     TCG_TYPE_REG = TCG_TYPE_I64,
    304 #endif
    305 
    306     /* An alias for the size of the native pointer.  */
    307 #if UINTPTR_MAX == UINT32_MAX
    308     TCG_TYPE_PTR = TCG_TYPE_I32,
    309 #else
    310     TCG_TYPE_PTR = TCG_TYPE_I64,
    311 #endif
    312 
    313     /* An alias for the size of the target "long", aka register.  */
    314 #if TARGET_LONG_BITS == 64
    315     TCG_TYPE_TL = TCG_TYPE_I64,
    316 #else
    317     TCG_TYPE_TL = TCG_TYPE_I32,
    318 #endif
    319 } TCGType;
    320 
    321 /**
    322  * get_alignment_bits
    323  * @memop: MemOp value
    324  *
    325  * Extract the alignment size from the memop.
    326  */
    327 static inline unsigned get_alignment_bits(MemOp memop)
    328 {
    329     unsigned a = memop & MO_AMASK;
    330 
    331     if (a == MO_UNALN) {
    332         /* No alignment required.  */
    333         a = 0;
    334     } else if (a == MO_ALIGN) {
    335         /* A natural alignment requirement.  */
    336         a = memop & MO_SIZE;
    337     } else {
    338         /* A specific alignment requirement.  */
    339         a = a >> MO_ASHIFT;
    340     }
    341 #if defined(CONFIG_SOFTMMU)
    342     /* The requested alignment cannot overlap the TLB flags.  */
    343     tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
    344 #endif
    345     return a;
    346 }
    347 
    348 typedef tcg_target_ulong TCGArg;
    349 
    350 /* Define type and accessor macros for TCG variables.
    351 
    352    TCG variables are the inputs and outputs of TCG ops, as described
    353    in tcg/README. Target CPU front-end code uses these types to deal
    354    with TCG variables as it emits TCG code via the tcg_gen_* functions.
    355    They come in several flavours:
    356     * TCGv_i32 : 32 bit integer type
    357     * TCGv_i64 : 64 bit integer type
    358     * TCGv_ptr : a host pointer type
    359     * TCGv_vec : a host vector type; the exact size is not exposed
    360                  to the CPU front-end code.
    361     * TCGv : an integer type the same size as target_ulong
    362              (an alias for either TCGv_i32 or TCGv_i64)
    363    The compiler's type checking will complain if you mix them
    364    up and pass the wrong sized TCGv to a function.
    365 
    366    Users of tcg_gen_* don't need to know about any of the internal
    367    details of these, and should treat them as opaque types.
    368    You won't be able to look inside them in a debugger either.
    369 
    370    Internal implementation details follow:
    371 
    372    Note that there is no definition of the structs TCGv_i32_d etc anywhere.
    373    This is deliberate, because the values we store in variables of type
    374    TCGv_i32 are not really pointers-to-structures. They're just small
    375    integers, but keeping them in pointer types like this means that the
    376    compiler will complain if you accidentally pass a TCGv_i32 to a
    377    function which takes a TCGv_i64, and so on. Only the internals of
    378    TCG need to care about the actual contents of the types.  */
    379 
    380 typedef struct TCGv_i32_d *TCGv_i32;
    381 typedef struct TCGv_i64_d *TCGv_i64;
    382 typedef struct TCGv_ptr_d *TCGv_ptr;
    383 typedef struct TCGv_vec_d *TCGv_vec;
    384 typedef TCGv_ptr TCGv_env;
    385 #if TARGET_LONG_BITS == 32
    386 #define TCGv TCGv_i32
    387 #elif TARGET_LONG_BITS == 64
    388 #define TCGv TCGv_i64
    389 #else
    390 #error Unhandled TARGET_LONG_BITS value
    391 #endif
    392 
    393 /* call flags */
    394 /* Helper does not read globals (either directly or through an exception). It
    395    implies TCG_CALL_NO_WRITE_GLOBALS. */
    396 #define TCG_CALL_NO_READ_GLOBALS    0x0001
    397 /* Helper does not write globals */
    398 #define TCG_CALL_NO_WRITE_GLOBALS   0x0002
    399 /* Helper can be safely suppressed if the return value is not used. */
    400 #define TCG_CALL_NO_SIDE_EFFECTS    0x0004
    401 /* Helper is G_NORETURN.  */
    402 #define TCG_CALL_NO_RETURN          0x0008
    403 
    404 /* convenience version of most used call flags */
    405 #define TCG_CALL_NO_RWG         TCG_CALL_NO_READ_GLOBALS
    406 #define TCG_CALL_NO_WG          TCG_CALL_NO_WRITE_GLOBALS
    407 #define TCG_CALL_NO_SE          TCG_CALL_NO_SIDE_EFFECTS
    408 #define TCG_CALL_NO_RWG_SE      (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
    409 #define TCG_CALL_NO_WG_SE       (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
    410 
    411 /* Used to align parameters.  See the comment before tcgv_i32_temp.  */
    412 #define TCG_CALL_DUMMY_ARG      ((TCGArg)0)
    413 
    414 /*
    415  * Flags for the bswap opcodes.
    416  * If IZ, the input is zero-extended, otherwise unknown.
    417  * If OZ or OS, the output is zero- or sign-extended respectively,
    418  * otherwise the high bits are undefined.
    419  */
    420 enum {
    421     TCG_BSWAP_IZ = 1,
    422     TCG_BSWAP_OZ = 2,
    423     TCG_BSWAP_OS = 4,
    424 };
    425 
    426 typedef enum TCGTempVal {
    427     TEMP_VAL_DEAD,
    428     TEMP_VAL_REG,
    429     TEMP_VAL_MEM,
    430     TEMP_VAL_CONST,
    431 } TCGTempVal;
    432 
    433 typedef enum TCGTempKind {
    434     /* Temp is dead at the end of all basic blocks. */
    435     TEMP_NORMAL,
    436     /* Temp is live across conditional branch, but dead otherwise. */
    437     TEMP_EBB,
    438     /* Temp is saved across basic blocks but dead at the end of TBs. */
    439     TEMP_LOCAL,
    440     /* Temp is saved across both basic blocks and translation blocks. */
    441     TEMP_GLOBAL,
    442     /* Temp is in a fixed register. */
    443     TEMP_FIXED,
    444     /* Temp is a fixed constant. */
    445     TEMP_CONST,
    446 } TCGTempKind;
    447 
    448 typedef struct TCGTemp {
    449     TCGReg reg:8;
    450     TCGTempVal val_type:8;
    451     TCGType base_type:8;
    452     TCGType type:8;
    453     TCGTempKind kind:3;
    454     unsigned int indirect_reg:1;
    455     unsigned int indirect_base:1;
    456     unsigned int mem_coherent:1;
    457     unsigned int mem_allocated:1;
    458     unsigned int temp_allocated:1;
    459 
    460     int64_t val;
    461     struct TCGTemp *mem_base;
    462     intptr_t mem_offset;
    463     const char *name;
    464 
    465     /* Pass-specific information that can be stored for a temporary.
    466        One word worth of integer data, and one pointer to data
    467        allocated separately.  */
    468     uintptr_t state;
    469     void *state_ptr;
    470 } TCGTemp;
    471 
    472 typedef struct TCGContext TCGContext;
    473 
    474 typedef struct TCGTempSet {
    475     unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
    476 } TCGTempSet;
    477 
    478 /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
    479    this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
    480    There are never more than 2 outputs, which means that we can store all
    481    dead + sync data within 16 bits.  */
    482 #define DEAD_ARG  4
    483 #define SYNC_ARG  1
    484 typedef uint16_t TCGLifeData;
    485 
    486 /* The layout here is designed to avoid a bitfield crossing of
    487    a 32-bit boundary, which would cause GCC to add extra padding.  */
    488 typedef struct TCGOp {
    489     TCGOpcode opc   : 8;        /*  8 */
    490 
    491     /* Parameters for this opcode.  See below.  */
    492     unsigned param1 : 4;        /* 12 */
    493     unsigned param2 : 4;        /* 16 */
    494 
    495     /* Lifetime data of the operands.  */
    496     unsigned life   : 16;       /* 32 */
    497 
    498     /* Next and previous opcodes.  */
    499     QTAILQ_ENTRY(TCGOp) link;
    500 
    501     /* Arguments for the opcode.  */
    502     TCGArg args[MAX_OPC_PARAM];
    503 
    504     /* Register preferences for the output(s).  */
    505     TCGRegSet output_pref[2];
    506 } TCGOp;
    507 
    508 #define TCGOP_CALLI(X)    (X)->param1
    509 #define TCGOP_CALLO(X)    (X)->param2
    510 
    511 #define TCGOP_VECL(X)     (X)->param1
    512 #define TCGOP_VECE(X)     (X)->param2
    513 
    514 /* Make sure operands fit in the bitfields above.  */
    515 QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
    516 
    517 typedef struct TCGProfile {
    518     int64_t cpu_exec_time;
    519     int64_t tb_count1;
    520     int64_t tb_count;
    521     int64_t op_count; /* total insn count */
    522     int op_count_max; /* max insn per TB */
    523     int temp_count_max;
    524     int64_t temp_count;
    525     int64_t del_op_count;
    526     int64_t code_in_len;
    527     int64_t code_out_len;
    528     int64_t search_out_len;
    529     int64_t interm_time;
    530     int64_t code_time;
    531     int64_t la_time;
    532     int64_t opt_time;
    533     int64_t restore_count;
    534     int64_t restore_time;
    535     int64_t table_op_count[NB_OPS];
    536 } TCGProfile;
    537 
    538 struct TCGContext {
    539     uint8_t *pool_cur, *pool_end;
    540     TCGPool *pool_first, *pool_current, *pool_first_large;
    541     int nb_labels;
    542     int nb_globals;
    543     int nb_temps;
    544     int nb_indirects;
    545     int nb_ops;
    546 
    547     /* goto_tb support */
    548     tcg_insn_unit *code_buf;
    549     uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
    550     uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
    551     uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
    552 
    553     TCGRegSet reserved_regs;
    554     uint32_t tb_cflags; /* cflags of the current TB */
    555     intptr_t current_frame_offset;
    556     intptr_t frame_start;
    557     intptr_t frame_end;
    558     TCGTemp *frame_temp;
    559 
    560     tcg_insn_unit *code_ptr;
    561 
    562 #ifdef CONFIG_PROFILER
    563     TCGProfile prof;
    564 #endif
    565 
    566 #ifdef CONFIG_DEBUG_TCG
    567     int temps_in_use;
    568     int goto_tb_issue_mask;
    569     const TCGOpcode *vecop_list;
    570 #endif
    571 
    572     /* Code generation.  Note that we specifically do not use tcg_insn_unit
    573        here, because there's too much arithmetic throughout that relies
    574        on addition and subtraction working on bytes.  Rely on the GCC
    575        extension that allows arithmetic on void*.  */
    576     void *code_gen_buffer;
    577     size_t code_gen_buffer_size;
    578     void *code_gen_ptr;
    579     void *data_gen_ptr;
    580 
    581     /* Threshold to flush the translated code buffer.  */
    582     void *code_gen_highwater;
    583 
    584     /* Track which vCPU triggers events */
    585     CPUState *cpu;                      /* *_trans */
    586 
    587     /* These structures are private to tcg-target.c.inc.  */
    588 #ifdef TCG_TARGET_NEED_LDST_LABELS
    589     QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels;
    590 #endif
    591 #ifdef TCG_TARGET_NEED_POOL_LABELS
    592     struct TCGLabelPoolData *pool_labels;
    593 #endif
    594 
    595     TCGLabel *exitreq_label;
    596 
    597 #ifdef CONFIG_PLUGIN
    598     /*
    599      * We keep one plugin_tb struct per TCGContext. Note that on every TB
    600      * translation we clear but do not free its contents; this way we
    601      * avoid a lot of malloc/free churn, since after a few TB's it's
    602      * unlikely that we'll need to allocate either more instructions or more
    603      * space for instructions (for variable-instruction-length ISAs).
    604      */
    605     struct qemu_plugin_tb *plugin_tb;
    606 
    607     /* descriptor of the instruction being translated */
    608     struct qemu_plugin_insn *plugin_insn;
    609 #endif
    610 
    611     GHashTable *const_table[TCG_TYPE_COUNT];
    612     TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
    613     TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
    614 
    615     QTAILQ_HEAD(, TCGOp) ops, free_ops;
    616     QSIMPLEQ_HEAD(, TCGLabel) labels;
    617 
    618     /* Tells which temporary holds a given register.
    619        It does not take into account fixed registers */
    620     TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
    621 
    622     uint16_t gen_insn_end_off[TCG_MAX_INSNS];
    623     target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
    624 
    625     /* Exit to translator on overflow. */
    626     sigjmp_buf jmp_trans;
    627 };
    628 
    629 static inline bool temp_readonly(TCGTemp *ts)
    630 {
    631     return ts->kind >= TEMP_FIXED;
    632 }
    633 
    634 extern __thread TCGContext *tcg_ctx;
    635 extern const void *tcg_code_gen_epilogue;
    636 extern uintptr_t tcg_splitwx_diff;
    637 extern TCGv_env cpu_env;
    638 
    639 bool in_code_gen_buffer(const void *p);
    640 
    641 #ifdef CONFIG_DEBUG_TCG
    642 const void *tcg_splitwx_to_rx(void *rw);
    643 void *tcg_splitwx_to_rw(const void *rx);
    644 #else
    645 static inline const void *tcg_splitwx_to_rx(void *rw)
    646 {
    647     return rw ? rw + tcg_splitwx_diff : NULL;
    648 }
    649 
    650 static inline void *tcg_splitwx_to_rw(const void *rx)
    651 {
    652     return rx ? (void *)rx - tcg_splitwx_diff : NULL;
    653 }
    654 #endif
    655 
    656 static inline size_t temp_idx(TCGTemp *ts)
    657 {
    658     ptrdiff_t n = ts - tcg_ctx->temps;
    659     tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
    660     return n;
    661 }
    662 
    663 static inline TCGArg temp_arg(TCGTemp *ts)
    664 {
    665     return (uintptr_t)ts;
    666 }
    667 
    668 static inline TCGTemp *arg_temp(TCGArg a)
    669 {
    670     return (TCGTemp *)(uintptr_t)a;
    671 }
    672 
    673 /* Using the offset of a temporary, relative to TCGContext, rather than
    674    its index means that we don't use 0.  That leaves offset 0 free for
    675    a NULL representation without having to leave index 0 unused.  */
    676 static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
    677 {
    678     uintptr_t o = (uintptr_t)v;
    679     TCGTemp *t = (void *)tcg_ctx + o;
    680     tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
    681     return t;
    682 }
    683 
    684 static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
    685 {
    686     return tcgv_i32_temp((TCGv_i32)v);
    687 }
    688 
    689 static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
    690 {
    691     return tcgv_i32_temp((TCGv_i32)v);
    692 }
    693 
    694 static inline TCGTemp *tcgv_vec_temp(TCGv_vec v)
    695 {
    696     return tcgv_i32_temp((TCGv_i32)v);
    697 }
    698 
    699 static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
    700 {
    701     return temp_arg(tcgv_i32_temp(v));
    702 }
    703 
    704 static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
    705 {
    706     return temp_arg(tcgv_i64_temp(v));
    707 }
    708 
    709 static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
    710 {
    711     return temp_arg(tcgv_ptr_temp(v));
    712 }
    713 
    714 static inline TCGArg tcgv_vec_arg(TCGv_vec v)
    715 {
    716     return temp_arg(tcgv_vec_temp(v));
    717 }
    718 
    719 static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
    720 {
    721     (void)temp_idx(t); /* trigger embedded assert */
    722     return (TCGv_i32)((void *)t - (void *)tcg_ctx);
    723 }
    724 
    725 static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
    726 {
    727     return (TCGv_i64)temp_tcgv_i32(t);
    728 }
    729 
    730 static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
    731 {
    732     return (TCGv_ptr)temp_tcgv_i32(t);
    733 }
    734 
    735 static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
    736 {
    737     return (TCGv_vec)temp_tcgv_i32(t);
    738 }
    739 
    740 #if TCG_TARGET_REG_BITS == 32
    741 static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
    742 {
    743     return temp_tcgv_i32(tcgv_i64_temp(t));
    744 }
    745 
    746 static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
    747 {
    748     return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
    749 }
    750 #endif
    751 
    752 static inline TCGArg tcg_get_insn_param(TCGOp *op, int arg)
    753 {
    754     return op->args[arg];
    755 }
    756 
    757 static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
    758 {
    759     op->args[arg] = v;
    760 }
    761 
    762 static inline target_ulong tcg_get_insn_start_param(TCGOp *op, int arg)
    763 {
    764 #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
    765     return tcg_get_insn_param(op, arg);
    766 #else
    767     return tcg_get_insn_param(op, arg * 2) |
    768            ((uint64_t)tcg_get_insn_param(op, arg * 2 + 1) << 32);
    769 #endif
    770 }
    771 
    772 static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v)
    773 {
    774 #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
    775     tcg_set_insn_param(op, arg, v);
    776 #else
    777     tcg_set_insn_param(op, arg * 2, v);
    778     tcg_set_insn_param(op, arg * 2 + 1, v >> 32);
    779 #endif
    780 }
    781 
    782 /* The last op that was emitted.  */
    783 static inline TCGOp *tcg_last_op(void)
    784 {
    785     return QTAILQ_LAST(&tcg_ctx->ops);
    786 }
    787 
    788 /* Test for whether to terminate the TB for using too many opcodes.  */
    789 static inline bool tcg_op_buf_full(void)
    790 {
    791     /* This is not a hard limit, it merely stops translation when
    792      * we have produced "enough" opcodes.  We want to limit TB size
    793      * such that a RISC host can reasonably use a 16-bit signed
    794      * branch within the TB.  We also need to be mindful of the
    795      * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
    796      * and TCGContext.gen_insn_end_off[].
    797      */
    798     return tcg_ctx->nb_ops >= 4000;
    799 }
    800 
    801 /* pool based memory allocation */
    802 
    803 /* user-mode: mmap_lock must be held for tcg_malloc_internal. */
    804 void *tcg_malloc_internal(TCGContext *s, int size);
    805 void tcg_pool_reset(TCGContext *s);
    806 TranslationBlock *tcg_tb_alloc(TCGContext *s);
    807 
    808 void tcg_region_reset_all(void);
    809 
    810 size_t tcg_code_size(void);
    811 size_t tcg_code_capacity(void);
    812 
    813 void tcg_tb_insert(TranslationBlock *tb);
    814 void tcg_tb_remove(TranslationBlock *tb);
    815 TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
    816 void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
    817 size_t tcg_nb_tbs(void);
    818 
    819 /* user-mode: Called with mmap_lock held.  */
    820 static inline void *tcg_malloc(int size)
    821 {
    822     TCGContext *s = tcg_ctx;
    823     uint8_t *ptr, *ptr_end;
    824 
    825     /* ??? This is a weak placeholder for minimum malloc alignment.  */
    826     size = QEMU_ALIGN_UP(size, 8);
    827 
    828     ptr = s->pool_cur;
    829     ptr_end = ptr + size;
    830     if (unlikely(ptr_end > s->pool_end)) {
    831         return tcg_malloc_internal(tcg_ctx, size);
    832     } else {
    833         s->pool_cur = ptr_end;
    834         return ptr;
    835     }
    836 }
    837 
    838 void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus);
    839 void tcg_register_thread(void);
    840 void tcg_prologue_init(TCGContext *s);
    841 void tcg_func_start(TCGContext *s);
    842 
    843 int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start);
    844 
    845 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
    846 
    847 TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
    848                                      intptr_t, const char *);
    849 TCGTemp *tcg_temp_new_internal(TCGType, bool);
    850 void tcg_temp_free_internal(TCGTemp *);
    851 TCGv_vec tcg_temp_new_vec(TCGType type);
    852 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
    853 
    854 static inline void tcg_temp_free_i32(TCGv_i32 arg)
    855 {
    856     tcg_temp_free_internal(tcgv_i32_temp(arg));
    857 }
    858 
    859 static inline void tcg_temp_free_i64(TCGv_i64 arg)
    860 {
    861     tcg_temp_free_internal(tcgv_i64_temp(arg));
    862 }
    863 
    864 static inline void tcg_temp_free_ptr(TCGv_ptr arg)
    865 {
    866     tcg_temp_free_internal(tcgv_ptr_temp(arg));
    867 }
    868 
    869 static inline void tcg_temp_free_vec(TCGv_vec arg)
    870 {
    871     tcg_temp_free_internal(tcgv_vec_temp(arg));
    872 }
    873 
    874 static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
    875                                               const char *name)
    876 {
    877     TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
    878     return temp_tcgv_i32(t);
    879 }
    880 
    881 static inline TCGv_i32 tcg_temp_new_i32(void)
    882 {
    883     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false);
    884     return temp_tcgv_i32(t);
    885 }
    886 
    887 static inline TCGv_i32 tcg_temp_local_new_i32(void)
    888 {
    889     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true);
    890     return temp_tcgv_i32(t);
    891 }
    892 
    893 static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
    894                                               const char *name)
    895 {
    896     TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
    897     return temp_tcgv_i64(t);
    898 }
    899 
    900 static inline TCGv_i64 tcg_temp_new_i64(void)
    901 {
    902     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false);
    903     return temp_tcgv_i64(t);
    904 }
    905 
    906 static inline TCGv_i64 tcg_temp_local_new_i64(void)
    907 {
    908     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true);
    909     return temp_tcgv_i64(t);
    910 }
    911 
    912 static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
    913                                               const char *name)
    914 {
    915     TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_PTR, reg, offset, name);
    916     return temp_tcgv_ptr(t);
    917 }
    918 
    919 static inline TCGv_ptr tcg_temp_new_ptr(void)
    920 {
    921     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false);
    922     return temp_tcgv_ptr(t);
    923 }
    924 
    925 static inline TCGv_ptr tcg_temp_local_new_ptr(void)
    926 {
    927     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true);
    928     return temp_tcgv_ptr(t);
    929 }
    930 
    931 #if defined(CONFIG_DEBUG_TCG)
    932 /* If you call tcg_clear_temp_count() at the start of a section of
    933  * code which is not supposed to leak any TCG temporaries, then
    934  * calling tcg_check_temp_count() at the end of the section will
    935  * return 1 if the section did in fact leak a temporary.
    936  */
    937 void tcg_clear_temp_count(void);
    938 int tcg_check_temp_count(void);
    939 #else
    940 #define tcg_clear_temp_count() do { } while (0)
    941 #define tcg_check_temp_count() 0
    942 #endif
    943 
    944 int64_t tcg_cpu_exec_time(void);
    945 void tcg_dump_info(GString *buf);
    946 void tcg_dump_op_count(GString *buf);
    947 
    948 #define TCG_CT_CONST  1 /* any constant of register size */
    949 
    950 typedef struct TCGArgConstraint {
    951     unsigned ct : 16;
    952     unsigned alias_index : 4;
    953     unsigned sort_index : 4;
    954     bool oalias : 1;
    955     bool ialias : 1;
    956     bool newreg : 1;
    957     TCGRegSet regs;
    958 } TCGArgConstraint;
    959 
    960 #define TCG_MAX_OP_ARGS 16
    961 
    962 /* Bits for TCGOpDef->flags, 8 bits available, all used.  */
    963 enum {
    964     /* Instruction exits the translation block.  */
    965     TCG_OPF_BB_EXIT      = 0x01,
    966     /* Instruction defines the end of a basic block.  */
    967     TCG_OPF_BB_END       = 0x02,
    968     /* Instruction clobbers call registers and potentially update globals.  */
    969     TCG_OPF_CALL_CLOBBER = 0x04,
    970     /* Instruction has side effects: it cannot be removed if its outputs
    971        are not used, and might trigger exceptions.  */
    972     TCG_OPF_SIDE_EFFECTS = 0x08,
    973     /* Instruction operands are 64-bits (otherwise 32-bits).  */
    974     TCG_OPF_64BIT        = 0x10,
    975     /* Instruction is optional and not implemented by the host, or insn
    976        is generic and should not be implemened by the host.  */
    977     TCG_OPF_NOT_PRESENT  = 0x20,
    978     /* Instruction operands are vectors.  */
    979     TCG_OPF_VECTOR       = 0x40,
    980     /* Instruction is a conditional branch. */
    981     TCG_OPF_COND_BRANCH  = 0x80
    982 };
    983 
    984 typedef struct TCGOpDef {
    985     const char *name;
    986     uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
    987     uint8_t flags;
    988     TCGArgConstraint *args_ct;
    989 } TCGOpDef;
    990 
    991 extern TCGOpDef tcg_op_defs[];
    992 extern const size_t tcg_op_defs_max;
    993 
    994 typedef struct TCGTargetOpDef {
    995     TCGOpcode op;
    996     const char *args_ct_str[TCG_MAX_OP_ARGS];
    997 } TCGTargetOpDef;
    998 
    999 #define tcg_abort() \
   1000 do {\
   1001     fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
   1002     abort();\
   1003 } while (0)
   1004 
   1005 bool tcg_op_supported(TCGOpcode op);
   1006 
   1007 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
   1008 
   1009 TCGOp *tcg_emit_op(TCGOpcode opc);
   1010 void tcg_op_remove(TCGContext *s, TCGOp *op);
   1011 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc);
   1012 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc);
   1013 
   1014 /**
   1015  * tcg_remove_ops_after:
   1016  * @op: target operation
   1017  *
   1018  * Discard any opcodes emitted since @op.  Expected usage is to save
   1019  * a starting point with tcg_last_op(), speculatively emit opcodes,
   1020  * then decide whether or not to keep those opcodes after the fact.
   1021  */
   1022 void tcg_remove_ops_after(TCGOp *op);
   1023 
   1024 void tcg_optimize(TCGContext *s);
   1025 
   1026 /* Allocate a new temporary and initialize it with a constant. */
   1027 TCGv_i32 tcg_const_i32(int32_t val);
   1028 TCGv_i64 tcg_const_i64(int64_t val);
   1029 TCGv_i32 tcg_const_local_i32(int32_t val);
   1030 TCGv_i64 tcg_const_local_i64(int64_t val);
   1031 TCGv_vec tcg_const_zeros_vec(TCGType);
   1032 TCGv_vec tcg_const_ones_vec(TCGType);
   1033 TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
   1034 TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
   1035 
   1036 /*
   1037  * Locate or create a read-only temporary that is a constant.
   1038  * This kind of temporary need not be freed, but for convenience
   1039  * will be silently ignored by tcg_temp_free_*.
   1040  */
   1041 TCGTemp *tcg_constant_internal(TCGType type, int64_t val);
   1042 
   1043 static inline TCGv_i32 tcg_constant_i32(int32_t val)
   1044 {
   1045     return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32, val));
   1046 }
   1047 
   1048 static inline TCGv_i64 tcg_constant_i64(int64_t val)
   1049 {
   1050     return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val));
   1051 }
   1052 
   1053 TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val);
   1054 TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val);
   1055 
   1056 #if UINTPTR_MAX == UINT32_MAX
   1057 # define tcg_const_ptr(x)        ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
   1058 # define tcg_const_local_ptr(x)  ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))
   1059 # define tcg_constant_ptr(x)     ((TCGv_ptr)tcg_constant_i32((intptr_t)(x)))
   1060 #else
   1061 # define tcg_const_ptr(x)        ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
   1062 # define tcg_const_local_ptr(x)  ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x)))
   1063 # define tcg_constant_ptr(x)     ((TCGv_ptr)tcg_constant_i64((intptr_t)(x)))
   1064 #endif
   1065 
   1066 TCGLabel *gen_new_label(void);
   1067 
   1068 /**
   1069  * label_arg
   1070  * @l: label
   1071  *
   1072  * Encode a label for storage in the TCG opcode stream.
   1073  */
   1074 
   1075 static inline TCGArg label_arg(TCGLabel *l)
   1076 {
   1077     return (uintptr_t)l;
   1078 }
   1079 
   1080 /**
   1081  * arg_label
   1082  * @i: value
   1083  *
   1084  * The opposite of label_arg.  Retrieve a label from the
   1085  * encoding of the TCG opcode stream.
   1086  */
   1087 
   1088 static inline TCGLabel *arg_label(TCGArg i)
   1089 {
   1090     return (TCGLabel *)(uintptr_t)i;
   1091 }
   1092 
   1093 /**
   1094  * tcg_ptr_byte_diff
   1095  * @a, @b: addresses to be differenced
   1096  *
   1097  * There are many places within the TCG backends where we need a byte
   1098  * difference between two pointers.  While this can be accomplished
   1099  * with local casting, it's easy to get wrong -- especially if one is
   1100  * concerned with the signedness of the result.
   1101  *
   1102  * This version relies on GCC's void pointer arithmetic to get the
   1103  * correct result.
   1104  */
   1105 
   1106 static inline ptrdiff_t tcg_ptr_byte_diff(const void *a, const void *b)
   1107 {
   1108     return a - b;
   1109 }
   1110 
   1111 /**
   1112  * tcg_pcrel_diff
   1113  * @s: the tcg context
   1114  * @target: address of the target
   1115  *
   1116  * Produce a pc-relative difference, from the current code_ptr
   1117  * to the destination address.
   1118  */
   1119 
   1120 static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, const void *target)
   1121 {
   1122     return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_ptr));
   1123 }
   1124 
   1125 /**
   1126  * tcg_tbrel_diff
   1127  * @s: the tcg context
   1128  * @target: address of the target
   1129  *
   1130  * Produce a difference, from the beginning of the current TB code
   1131  * to the destination address.
   1132  */
   1133 static inline ptrdiff_t tcg_tbrel_diff(TCGContext *s, const void *target)
   1134 {
   1135     return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_buf));
   1136 }
   1137 
   1138 /**
   1139  * tcg_current_code_size
   1140  * @s: the tcg context
   1141  *
   1142  * Compute the current code size within the translation block.
   1143  * This is used to fill in qemu's data structures for goto_tb.
   1144  */
   1145 
   1146 static inline size_t tcg_current_code_size(TCGContext *s)
   1147 {
   1148     return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
   1149 }
   1150 
   1151 /**
   1152  * tcg_qemu_tb_exec:
   1153  * @env: pointer to CPUArchState for the CPU
   1154  * @tb_ptr: address of generated code for the TB to execute
   1155  *
   1156  * Start executing code from a given translation block.
   1157  * Where translation blocks have been linked, execution
   1158  * may proceed from the given TB into successive ones.
   1159  * Control eventually returns only when some action is needed
   1160  * from the top-level loop: either control must pass to a TB
   1161  * which has not yet been directly linked, or an asynchronous
   1162  * event such as an interrupt needs handling.
   1163  *
   1164  * Return: The return value is the value passed to the corresponding
   1165  * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
   1166  * The value is either zero or a 4-byte aligned pointer to that TB combined
   1167  * with additional information in its two least significant bits. The
   1168  * additional information is encoded as follows:
   1169  *  0, 1: the link between this TB and the next is via the specified
   1170  *        TB index (0 or 1). That is, we left the TB via (the equivalent
   1171  *        of) "goto_tb <index>". The main loop uses this to determine
   1172  *        how to link the TB just executed to the next.
   1173  *  2:    we are using instruction counting code generation, and we
   1174  *        did not start executing this TB because the instruction counter
   1175  *        would hit zero midway through it. In this case the pointer
   1176  *        returned is the TB we were about to execute, and the caller must
   1177  *        arrange to execute the remaining count of instructions.
   1178  *  3:    we stopped because the CPU's exit_request flag was set
   1179  *        (usually meaning that there is an interrupt that needs to be
   1180  *        handled). The pointer returned is the TB we were about to execute
   1181  *        when we noticed the pending exit request.
   1182  *
   1183  * If the bottom two bits indicate an exit-via-index then the CPU
   1184  * state is correctly synchronised and ready for execution of the next
   1185  * TB (and in particular the guest PC is the address to execute next).
   1186  * Otherwise, we gave up on execution of this TB before it started, and
   1187  * the caller must fix up the CPU state by calling the CPU's
   1188  * synchronize_from_tb() method with the TB pointer we return (falling
   1189  * back to calling the CPU's set_pc method with tb->pb if no
   1190  * synchronize_from_tb() method exists).
   1191  *
   1192  * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
   1193  * to this default (which just calls the prologue.code emitted by
   1194  * tcg_target_qemu_prologue()).
   1195  */
   1196 #define TB_EXIT_MASK      3
   1197 #define TB_EXIT_IDX0      0
   1198 #define TB_EXIT_IDX1      1
   1199 #define TB_EXIT_IDXMAX    1
   1200 #define TB_EXIT_REQUESTED 3
   1201 
   1202 #ifdef CONFIG_TCG_INTERPRETER
   1203 uintptr_t tcg_qemu_tb_exec(CPUArchState *env, const void *tb_ptr);
   1204 #else
   1205 typedef uintptr_t tcg_prologue_fn(CPUArchState *env, const void *tb_ptr);
   1206 extern tcg_prologue_fn *tcg_qemu_tb_exec;
   1207 #endif
   1208 
   1209 void tcg_register_jit(const void *buf, size_t buf_size);
   1210 
   1211 #if TCG_TARGET_MAYBE_vec
   1212 /* Return zero if the tuple (opc, type, vece) is unsupportable;
   1213    return > 0 if it is directly supportable;
   1214    return < 0 if we must call tcg_expand_vec_op.  */
   1215 int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned);
   1216 #else
   1217 static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
   1218 {
   1219     return 0;
   1220 }
   1221 #endif
   1222 
   1223 /* Expand the tuple (opc, type, vece) on the given arguments.  */
   1224 void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
   1225 
   1226 /* Replicate a constant C accoring to the log2 of the element size.  */
   1227 uint64_t dup_const(unsigned vece, uint64_t c);
   1228 
   1229 #define dup_const(VECE, C)                                         \
   1230     (__builtin_constant_p(VECE)                                    \
   1231      ? (  (VECE) == MO_8  ? 0x0101010101010101ull * (uint8_t)(C)   \
   1232         : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C)  \
   1233         : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C)  \
   1234         : (VECE) == MO_64 ? (uint64_t)(C)                          \
   1235         : (qemu_build_not_reached_always(), 0))                    \
   1236      : dup_const(VECE, C))
   1237 
   1238 #if TARGET_LONG_BITS == 64
   1239 # define dup_const_tl  dup_const
   1240 #else
   1241 # define dup_const_tl(VECE, C)                                     \
   1242     (__builtin_constant_p(VECE)                                    \
   1243      ? (  (VECE) == MO_8  ? 0x01010101ul * (uint8_t)(C)            \
   1244         : (VECE) == MO_16 ? 0x00010001ul * (uint16_t)(C)           \
   1245         : (VECE) == MO_32 ? 0x00000001ul * (uint32_t)(C)           \
   1246         : (qemu_build_not_reached_always(), 0))                    \
   1247      :  (target_long)dup_const(VECE, C))
   1248 #endif
   1249 
   1250 #ifdef CONFIG_DEBUG_TCG
   1251 void tcg_assert_listed_vecop(TCGOpcode);
   1252 #else
   1253 static inline void tcg_assert_listed_vecop(TCGOpcode op) { }
   1254 #endif
   1255 
   1256 static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n)
   1257 {
   1258 #ifdef CONFIG_DEBUG_TCG
   1259     const TCGOpcode *o = tcg_ctx->vecop_list;
   1260     tcg_ctx->vecop_list = n;
   1261     return o;
   1262 #else
   1263     return NULL;
   1264 #endif
   1265 }
   1266 
   1267 bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned);
   1268 
   1269 #endif /* TCG_H */