qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

translate.c (54818B)


      1 /*
      2  *  Xilinx MicroBlaze emulation for qemu: main translation routines.
      3  *
      4  *  Copyright (c) 2009 Edgar E. Iglesias.
      5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
      6  *
      7  * This library is free software; you can redistribute it and/or
      8  * modify it under the terms of the GNU Lesser General Public
      9  * License as published by the Free Software Foundation; either
     10  * version 2.1 of the License, or (at your option) any later version.
     11  *
     12  * This library is distributed in the hope that it will be useful,
     13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     15  * Lesser General Public License for more details.
     16  *
     17  * You should have received a copy of the GNU Lesser General Public
     18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     19  */
     20 
     21 #include "qemu/osdep.h"
     22 #include "cpu.h"
     23 #include "disas/disas.h"
     24 #include "exec/exec-all.h"
     25 #include "tcg/tcg-op.h"
     26 #include "exec/helper-proto.h"
     27 #include "exec/cpu_ldst.h"
     28 #include "exec/helper-gen.h"
     29 #include "exec/translator.h"
     30 #include "qemu/qemu-print.h"
     31 
     32 #include "exec/log.h"
     33 
     34 #define EXTRACT_FIELD(src, start, end) \
     35             (((src) >> start) & ((1 << (end - start + 1)) - 1))
     36 
     37 /* is_jmp field values */
     38 #define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
     39 #define DISAS_EXIT    DISAS_TARGET_1 /* all cpu state modified dynamically */
     40 
     41 /* cpu state besides pc was modified dynamically; update pc to next */
     42 #define DISAS_EXIT_NEXT DISAS_TARGET_2
     43 /* cpu state besides pc was modified dynamically; update pc to btarget */
     44 #define DISAS_EXIT_JUMP DISAS_TARGET_3
     45 
     46 static TCGv_i32 cpu_R[32];
     47 static TCGv_i32 cpu_pc;
     48 static TCGv_i32 cpu_msr;
     49 static TCGv_i32 cpu_msr_c;
     50 static TCGv_i32 cpu_imm;
     51 static TCGv_i32 cpu_bvalue;
     52 static TCGv_i32 cpu_btarget;
     53 static TCGv_i32 cpu_iflags;
     54 static TCGv cpu_res_addr;
     55 static TCGv_i32 cpu_res_val;
     56 
     57 #include "exec/gen-icount.h"
     58 
     59 /* This is the state at translation time.  */
     60 typedef struct DisasContext {
     61     DisasContextBase base;
     62     const MicroBlazeCPUConfig *cfg;
     63 
     64     /* TCG op of the current insn_start.  */
     65     TCGOp *insn_start;
     66 
     67     TCGv_i32 r0;
     68     bool r0_set;
     69 
     70     /* Decoder.  */
     71     uint32_t ext_imm;
     72     unsigned int tb_flags;
     73     unsigned int tb_flags_to_set;
     74     int mem_index;
     75 
     76     /* Condition under which to jump, including NEVER and ALWAYS. */
     77     TCGCond jmp_cond;
     78 
     79     /* Immediate branch-taken destination, or -1 for indirect. */
     80     uint32_t jmp_dest;
     81 } DisasContext;
     82 
     83 static int typeb_imm(DisasContext *dc, int x)
     84 {
     85     if (dc->tb_flags & IMM_FLAG) {
     86         return deposit32(dc->ext_imm, 0, 16, x);
     87     }
     88     return x;
     89 }
     90 
     91 /* Include the auto-generated decoder.  */
     92 #include "decode-insns.c.inc"
     93 
     94 static void t_sync_flags(DisasContext *dc)
     95 {
     96     /* Synch the tb dependent flags between translator and runtime.  */
     97     if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
     98         tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
     99     }
    100 }
    101 
    102 static void gen_raise_exception(DisasContext *dc, uint32_t index)
    103 {
    104     TCGv_i32 tmp = tcg_const_i32(index);
    105 
    106     gen_helper_raise_exception(cpu_env, tmp);
    107     tcg_temp_free_i32(tmp);
    108     dc->base.is_jmp = DISAS_NORETURN;
    109 }
    110 
    111 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
    112 {
    113     t_sync_flags(dc);
    114     tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
    115     gen_raise_exception(dc, index);
    116 }
    117 
    118 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
    119 {
    120     TCGv_i32 tmp = tcg_const_i32(esr_ec);
    121     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
    122     tcg_temp_free_i32(tmp);
    123 
    124     gen_raise_exception_sync(dc, EXCP_HW_EXCP);
    125 }
    126 
    127 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
    128 {
    129     if (translator_use_goto_tb(&dc->base, dest)) {
    130         tcg_gen_goto_tb(n);
    131         tcg_gen_movi_i32(cpu_pc, dest);
    132         tcg_gen_exit_tb(dc->base.tb, n);
    133     } else {
    134         tcg_gen_movi_i32(cpu_pc, dest);
    135         tcg_gen_lookup_and_goto_ptr();
    136     }
    137     dc->base.is_jmp = DISAS_NORETURN;
    138 }
    139 
    140 /*
    141  * Returns true if the insn an illegal operation.
    142  * If exceptions are enabled, an exception is raised.
    143  */
    144 static bool trap_illegal(DisasContext *dc, bool cond)
    145 {
    146     if (cond && (dc->tb_flags & MSR_EE)
    147         && dc->cfg->illegal_opcode_exception) {
    148         gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
    149     }
    150     return cond;
    151 }
    152 
    153 /*
    154  * Returns true if the insn is illegal in userspace.
    155  * If exceptions are enabled, an exception is raised.
    156  */
    157 static bool trap_userspace(DisasContext *dc, bool cond)
    158 {
    159     bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
    160 
    161     if (cond_user && (dc->tb_flags & MSR_EE)) {
    162         gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
    163     }
    164     return cond_user;
    165 }
    166 
    167 /*
    168  * Return true, and log an error, if the current insn is
    169  * within a delay slot.
    170  */
    171 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
    172 {
    173     if (dc->tb_flags & D_FLAG) {
    174         qemu_log_mask(LOG_GUEST_ERROR,
    175                       "Invalid insn in delay slot: %s at %08x\n",
    176                       insn_type, (uint32_t)dc->base.pc_next);
    177         return true;
    178     }
    179     return false;
    180 }
    181 
    182 static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
    183 {
    184     if (likely(reg != 0)) {
    185         return cpu_R[reg];
    186     }
    187     if (!dc->r0_set) {
    188         if (dc->r0 == NULL) {
    189             dc->r0 = tcg_temp_new_i32();
    190         }
    191         tcg_gen_movi_i32(dc->r0, 0);
    192         dc->r0_set = true;
    193     }
    194     return dc->r0;
    195 }
    196 
    197 static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
    198 {
    199     if (likely(reg != 0)) {
    200         return cpu_R[reg];
    201     }
    202     if (dc->r0 == NULL) {
    203         dc->r0 = tcg_temp_new_i32();
    204     }
    205     return dc->r0;
    206 }
    207 
    208 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
    209                      void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
    210 {
    211     TCGv_i32 rd, ra, rb;
    212 
    213     if (arg->rd == 0 && !side_effects) {
    214         return true;
    215     }
    216 
    217     rd = reg_for_write(dc, arg->rd);
    218     ra = reg_for_read(dc, arg->ra);
    219     rb = reg_for_read(dc, arg->rb);
    220     fn(rd, ra, rb);
    221     return true;
    222 }
    223 
    224 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
    225                       void (*fn)(TCGv_i32, TCGv_i32))
    226 {
    227     TCGv_i32 rd, ra;
    228 
    229     if (arg->rd == 0 && !side_effects) {
    230         return true;
    231     }
    232 
    233     rd = reg_for_write(dc, arg->rd);
    234     ra = reg_for_read(dc, arg->ra);
    235     fn(rd, ra);
    236     return true;
    237 }
    238 
    239 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
    240                          void (*fni)(TCGv_i32, TCGv_i32, int32_t))
    241 {
    242     TCGv_i32 rd, ra;
    243 
    244     if (arg->rd == 0 && !side_effects) {
    245         return true;
    246     }
    247 
    248     rd = reg_for_write(dc, arg->rd);
    249     ra = reg_for_read(dc, arg->ra);
    250     fni(rd, ra, arg->imm);
    251     return true;
    252 }
    253 
    254 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
    255                          void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
    256 {
    257     TCGv_i32 rd, ra, imm;
    258 
    259     if (arg->rd == 0 && !side_effects) {
    260         return true;
    261     }
    262 
    263     rd = reg_for_write(dc, arg->rd);
    264     ra = reg_for_read(dc, arg->ra);
    265     imm = tcg_const_i32(arg->imm);
    266 
    267     fn(rd, ra, imm);
    268 
    269     tcg_temp_free_i32(imm);
    270     return true;
    271 }
    272 
    273 #define DO_TYPEA(NAME, SE, FN) \
    274     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
    275     { return do_typea(dc, a, SE, FN); }
    276 
    277 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
    278     static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
    279     { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
    280 
    281 #define DO_TYPEA0(NAME, SE, FN) \
    282     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
    283     { return do_typea0(dc, a, SE, FN); }
    284 
    285 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
    286     static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
    287     { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
    288 
    289 #define DO_TYPEBI(NAME, SE, FNI) \
    290     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
    291     { return do_typeb_imm(dc, a, SE, FNI); }
    292 
    293 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
    294     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
    295     { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
    296 
    297 #define DO_TYPEBV(NAME, SE, FN) \
    298     static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
    299     { return do_typeb_val(dc, a, SE, FN); }
    300 
    301 #define ENV_WRAPPER2(NAME, HELPER) \
    302     static void NAME(TCGv_i32 out, TCGv_i32 ina) \
    303     { HELPER(out, cpu_env, ina); }
    304 
    305 #define ENV_WRAPPER3(NAME, HELPER) \
    306     static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
    307     { HELPER(out, cpu_env, ina, inb); }
    308 
    309 /* No input carry, but output carry. */
    310 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    311 {
    312     TCGv_i32 zero = tcg_const_i32(0);
    313 
    314     tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
    315 
    316     tcg_temp_free_i32(zero);
    317 }
    318 
    319 /* Input and output carry. */
    320 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    321 {
    322     TCGv_i32 zero = tcg_const_i32(0);
    323     TCGv_i32 tmp = tcg_temp_new_i32();
    324 
    325     tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
    326     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
    327 
    328     tcg_temp_free_i32(tmp);
    329     tcg_temp_free_i32(zero);
    330 }
    331 
    332 /* Input carry, but no output carry. */
    333 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    334 {
    335     tcg_gen_add_i32(out, ina, inb);
    336     tcg_gen_add_i32(out, out, cpu_msr_c);
    337 }
    338 
    339 DO_TYPEA(add, true, gen_add)
    340 DO_TYPEA(addc, true, gen_addc)
    341 DO_TYPEA(addk, false, tcg_gen_add_i32)
    342 DO_TYPEA(addkc, true, gen_addkc)
    343 
    344 DO_TYPEBV(addi, true, gen_add)
    345 DO_TYPEBV(addic, true, gen_addc)
    346 DO_TYPEBI(addik, false, tcg_gen_addi_i32)
    347 DO_TYPEBV(addikc, true, gen_addkc)
    348 
    349 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
    350 {
    351     tcg_gen_andi_i32(out, ina, ~imm);
    352 }
    353 
    354 DO_TYPEA(and, false, tcg_gen_and_i32)
    355 DO_TYPEBI(andi, false, tcg_gen_andi_i32)
    356 DO_TYPEA(andn, false, tcg_gen_andc_i32)
    357 DO_TYPEBI(andni, false, gen_andni)
    358 
    359 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    360 {
    361     TCGv_i32 tmp = tcg_temp_new_i32();
    362     tcg_gen_andi_i32(tmp, inb, 31);
    363     tcg_gen_sar_i32(out, ina, tmp);
    364     tcg_temp_free_i32(tmp);
    365 }
    366 
    367 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    368 {
    369     TCGv_i32 tmp = tcg_temp_new_i32();
    370     tcg_gen_andi_i32(tmp, inb, 31);
    371     tcg_gen_shr_i32(out, ina, tmp);
    372     tcg_temp_free_i32(tmp);
    373 }
    374 
    375 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    376 {
    377     TCGv_i32 tmp = tcg_temp_new_i32();
    378     tcg_gen_andi_i32(tmp, inb, 31);
    379     tcg_gen_shl_i32(out, ina, tmp);
    380     tcg_temp_free_i32(tmp);
    381 }
    382 
    383 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
    384 {
    385     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
    386     int imm_w = extract32(imm, 5, 5);
    387     int imm_s = extract32(imm, 0, 5);
    388 
    389     if (imm_w + imm_s > 32 || imm_w == 0) {
    390         /* These inputs have an undefined behavior.  */
    391         qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
    392                       imm_w, imm_s);
    393     } else {
    394         tcg_gen_extract_i32(out, ina, imm_s, imm_w);
    395     }
    396 }
    397 
    398 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
    399 {
    400     /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
    401     int imm_w = extract32(imm, 5, 5);
    402     int imm_s = extract32(imm, 0, 5);
    403     int width = imm_w - imm_s + 1;
    404 
    405     if (imm_w < imm_s) {
    406         /* These inputs have an undefined behavior.  */
    407         qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
    408                       imm_w, imm_s);
    409     } else {
    410         tcg_gen_deposit_i32(out, out, ina, imm_s, width);
    411     }
    412 }
    413 
    414 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
    415 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
    416 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
    417 
    418 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
    419 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
    420 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
    421 
    422 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
    423 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
    424 
    425 static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
    426 {
    427     tcg_gen_clzi_i32(out, ina, 32);
    428 }
    429 
    430 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
    431 
    432 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    433 {
    434     TCGv_i32 lt = tcg_temp_new_i32();
    435 
    436     tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
    437     tcg_gen_sub_i32(out, inb, ina);
    438     tcg_gen_deposit_i32(out, out, lt, 31, 1);
    439     tcg_temp_free_i32(lt);
    440 }
    441 
    442 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    443 {
    444     TCGv_i32 lt = tcg_temp_new_i32();
    445 
    446     tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
    447     tcg_gen_sub_i32(out, inb, ina);
    448     tcg_gen_deposit_i32(out, out, lt, 31, 1);
    449     tcg_temp_free_i32(lt);
    450 }
    451 
    452 DO_TYPEA(cmp, false, gen_cmp)
    453 DO_TYPEA(cmpu, false, gen_cmpu)
    454 
    455 ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
    456 ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
    457 ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
    458 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
    459 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
    460 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
    461 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
    462 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
    463 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
    464 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
    465 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
    466 
    467 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
    468 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
    469 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
    470 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
    471 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
    472 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
    473 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
    474 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
    475 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
    476 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
    477 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
    478 
    479 ENV_WRAPPER2(gen_flt, gen_helper_flt)
    480 ENV_WRAPPER2(gen_fint, gen_helper_fint)
    481 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
    482 
    483 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
    484 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
    485 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
    486 
    487 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
    488 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    489 {
    490     gen_helper_divs(out, cpu_env, inb, ina);
    491 }
    492 
    493 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    494 {
    495     gen_helper_divu(out, cpu_env, inb, ina);
    496 }
    497 
    498 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
    499 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
    500 
    501 static bool trans_imm(DisasContext *dc, arg_imm *arg)
    502 {
    503     if (invalid_delay_slot(dc, "imm")) {
    504         return true;
    505     }
    506     dc->ext_imm = arg->imm << 16;
    507     tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
    508     dc->tb_flags_to_set = IMM_FLAG;
    509     return true;
    510 }
    511 
    512 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    513 {
    514     TCGv_i32 tmp = tcg_temp_new_i32();
    515     tcg_gen_muls2_i32(tmp, out, ina, inb);
    516     tcg_temp_free_i32(tmp);
    517 }
    518 
    519 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    520 {
    521     TCGv_i32 tmp = tcg_temp_new_i32();
    522     tcg_gen_mulu2_i32(tmp, out, ina, inb);
    523     tcg_temp_free_i32(tmp);
    524 }
    525 
    526 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    527 {
    528     TCGv_i32 tmp = tcg_temp_new_i32();
    529     tcg_gen_mulsu2_i32(tmp, out, ina, inb);
    530     tcg_temp_free_i32(tmp);
    531 }
    532 
    533 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
    534 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
    535 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
    536 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
    537 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
    538 
    539 DO_TYPEA(or, false, tcg_gen_or_i32)
    540 DO_TYPEBI(ori, false, tcg_gen_ori_i32)
    541 
    542 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    543 {
    544     tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
    545 }
    546 
    547 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    548 {
    549     tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
    550 }
    551 
    552 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
    553 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
    554 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
    555 
    556 /* No input carry, but output carry. */
    557 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    558 {
    559     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
    560     tcg_gen_sub_i32(out, inb, ina);
    561 }
    562 
    563 /* Input and output carry. */
    564 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    565 {
    566     TCGv_i32 zero = tcg_const_i32(0);
    567     TCGv_i32 tmp = tcg_temp_new_i32();
    568 
    569     tcg_gen_not_i32(tmp, ina);
    570     tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
    571     tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
    572 
    573     tcg_temp_free_i32(zero);
    574     tcg_temp_free_i32(tmp);
    575 }
    576 
    577 /* No input or output carry. */
    578 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    579 {
    580     tcg_gen_sub_i32(out, inb, ina);
    581 }
    582 
    583 /* Input carry, no output carry. */
    584 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
    585 {
    586     TCGv_i32 nota = tcg_temp_new_i32();
    587 
    588     tcg_gen_not_i32(nota, ina);
    589     tcg_gen_add_i32(out, inb, nota);
    590     tcg_gen_add_i32(out, out, cpu_msr_c);
    591 
    592     tcg_temp_free_i32(nota);
    593 }
    594 
    595 DO_TYPEA(rsub, true, gen_rsub)
    596 DO_TYPEA(rsubc, true, gen_rsubc)
    597 DO_TYPEA(rsubk, false, gen_rsubk)
    598 DO_TYPEA(rsubkc, true, gen_rsubkc)
    599 
    600 DO_TYPEBV(rsubi, true, gen_rsub)
    601 DO_TYPEBV(rsubic, true, gen_rsubc)
    602 DO_TYPEBV(rsubik, false, gen_rsubk)
    603 DO_TYPEBV(rsubikc, true, gen_rsubkc)
    604 
    605 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
    606 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
    607 
    608 static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
    609 {
    610     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
    611     tcg_gen_sari_i32(out, ina, 1);
    612 }
    613 
    614 static void gen_src(TCGv_i32 out, TCGv_i32 ina)
    615 {
    616     TCGv_i32 tmp = tcg_temp_new_i32();
    617 
    618     tcg_gen_mov_i32(tmp, cpu_msr_c);
    619     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
    620     tcg_gen_extract2_i32(out, ina, tmp, 1);
    621 
    622     tcg_temp_free_i32(tmp);
    623 }
    624 
    625 static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
    626 {
    627     tcg_gen_andi_i32(cpu_msr_c, ina, 1);
    628     tcg_gen_shri_i32(out, ina, 1);
    629 }
    630 
    631 DO_TYPEA0(sra, false, gen_sra)
    632 DO_TYPEA0(src, false, gen_src)
    633 DO_TYPEA0(srl, false, gen_srl)
    634 
    635 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
    636 {
    637     tcg_gen_rotri_i32(out, ina, 16);
    638 }
    639 
    640 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
    641 DO_TYPEA0(swaph, false, gen_swaph)
    642 
    643 static bool trans_wdic(DisasContext *dc, arg_wdic *a)
    644 {
    645     /* Cache operations are nops: only check for supervisor mode.  */
    646     trap_userspace(dc, true);
    647     return true;
    648 }
    649 
    650 DO_TYPEA(xor, false, tcg_gen_xor_i32)
    651 DO_TYPEBI(xori, false, tcg_gen_xori_i32)
    652 
    653 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
    654 {
    655     TCGv ret = tcg_temp_new();
    656 
    657     /* If any of the regs is r0, set t to the value of the other reg.  */
    658     if (ra && rb) {
    659         TCGv_i32 tmp = tcg_temp_new_i32();
    660         tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
    661         tcg_gen_extu_i32_tl(ret, tmp);
    662         tcg_temp_free_i32(tmp);
    663     } else if (ra) {
    664         tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
    665     } else if (rb) {
    666         tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
    667     } else {
    668         tcg_gen_movi_tl(ret, 0);
    669     }
    670 
    671     if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
    672         gen_helper_stackprot(cpu_env, ret);
    673     }
    674     return ret;
    675 }
    676 
    677 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
    678 {
    679     TCGv ret = tcg_temp_new();
    680 
    681     /* If any of the regs is r0, set t to the value of the other reg.  */
    682     if (ra) {
    683         TCGv_i32 tmp = tcg_temp_new_i32();
    684         tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
    685         tcg_gen_extu_i32_tl(ret, tmp);
    686         tcg_temp_free_i32(tmp);
    687     } else {
    688         tcg_gen_movi_tl(ret, (uint32_t)imm);
    689     }
    690 
    691     if (ra == 1 && dc->cfg->stackprot) {
    692         gen_helper_stackprot(cpu_env, ret);
    693     }
    694     return ret;
    695 }
    696 
    697 #ifndef CONFIG_USER_ONLY
    698 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
    699 {
    700     int addr_size = dc->cfg->addr_size;
    701     TCGv ret = tcg_temp_new();
    702 
    703     if (addr_size == 32 || ra == 0) {
    704         if (rb) {
    705             tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
    706         } else {
    707             tcg_gen_movi_tl(ret, 0);
    708         }
    709     } else {
    710         if (rb) {
    711             tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
    712         } else {
    713             tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
    714             tcg_gen_shli_tl(ret, ret, 32);
    715         }
    716         if (addr_size < 64) {
    717             /* Mask off out of range bits.  */
    718             tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
    719         }
    720     }
    721     return ret;
    722 }
    723 #endif
    724 
    725 #ifndef CONFIG_USER_ONLY
    726 static void record_unaligned_ess(DisasContext *dc, int rd,
    727                                  MemOp size, bool store)
    728 {
    729     uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
    730 
    731     iflags |= ESR_ESS_FLAG;
    732     iflags |= rd << 5;
    733     iflags |= store * ESR_S;
    734     iflags |= (size == MO_32) * ESR_W;
    735 
    736     tcg_set_insn_start_param(dc->insn_start, 1, iflags);
    737 }
    738 #endif
    739 
    740 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
    741                     int mem_index, bool rev)
    742 {
    743     MemOp size = mop & MO_SIZE;
    744 
    745     /*
    746      * When doing reverse accesses we need to do two things.
    747      *
    748      * 1. Reverse the address wrt endianness.
    749      * 2. Byteswap the data lanes on the way back into the CPU core.
    750      */
    751     if (rev) {
    752         if (size > MO_8) {
    753             mop ^= MO_BSWAP;
    754         }
    755         if (size < MO_32) {
    756             tcg_gen_xori_tl(addr, addr, 3 - size);
    757         }
    758     }
    759 
    760     /*
    761      * For system mode, enforce alignment if the cpu configuration
    762      * requires it.  For user-mode, the Linux kernel will have fixed up
    763      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
    764      */
    765 #ifndef CONFIG_USER_ONLY
    766     if (size > MO_8 &&
    767         (dc->tb_flags & MSR_EE) &&
    768         dc->cfg->unaligned_exceptions) {
    769         record_unaligned_ess(dc, rd, size, false);
    770         mop |= MO_ALIGN;
    771     }
    772 #endif
    773 
    774     tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
    775 
    776     tcg_temp_free(addr);
    777     return true;
    778 }
    779 
    780 static bool trans_lbu(DisasContext *dc, arg_typea *arg)
    781 {
    782     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
    783     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
    784 }
    785 
    786 static bool trans_lbur(DisasContext *dc, arg_typea *arg)
    787 {
    788     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
    789     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
    790 }
    791 
    792 static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
    793 {
    794     if (trap_userspace(dc, true)) {
    795         return true;
    796     }
    797 #ifdef CONFIG_USER_ONLY
    798     return true;
    799 #else
    800     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
    801     return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
    802 #endif
    803 }
    804 
    805 static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
    806 {
    807     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
    808     return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
    809 }
    810 
    811 static bool trans_lhu(DisasContext *dc, arg_typea *arg)
    812 {
    813     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
    814     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
    815 }
    816 
    817 static bool trans_lhur(DisasContext *dc, arg_typea *arg)
    818 {
    819     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
    820     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
    821 }
    822 
    823 static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
    824 {
    825     if (trap_userspace(dc, true)) {
    826         return true;
    827     }
    828 #ifdef CONFIG_USER_ONLY
    829     return true;
    830 #else
    831     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
    832     return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
    833 #endif
    834 }
    835 
    836 static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
    837 {
    838     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
    839     return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
    840 }
    841 
    842 static bool trans_lw(DisasContext *dc, arg_typea *arg)
    843 {
    844     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
    845     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
    846 }
    847 
    848 static bool trans_lwr(DisasContext *dc, arg_typea *arg)
    849 {
    850     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
    851     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
    852 }
    853 
    854 static bool trans_lwea(DisasContext *dc, arg_typea *arg)
    855 {
    856     if (trap_userspace(dc, true)) {
    857         return true;
    858     }
    859 #ifdef CONFIG_USER_ONLY
    860     return true;
    861 #else
    862     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
    863     return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
    864 #endif
    865 }
    866 
    867 static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
    868 {
    869     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
    870     return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
    871 }
    872 
    873 static bool trans_lwx(DisasContext *dc, arg_typea *arg)
    874 {
    875     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
    876 
    877     /* lwx does not throw unaligned access errors, so force alignment */
    878     tcg_gen_andi_tl(addr, addr, ~3);
    879 
    880     tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
    881     tcg_gen_mov_tl(cpu_res_addr, addr);
    882     tcg_temp_free(addr);
    883 
    884     if (arg->rd) {
    885         tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
    886     }
    887 
    888     /* No support for AXI exclusive so always clear C */
    889     tcg_gen_movi_i32(cpu_msr_c, 0);
    890     return true;
    891 }
    892 
    893 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
    894                      int mem_index, bool rev)
    895 {
    896     MemOp size = mop & MO_SIZE;
    897 
    898     /*
    899      * When doing reverse accesses we need to do two things.
    900      *
    901      * 1. Reverse the address wrt endianness.
    902      * 2. Byteswap the data lanes on the way back into the CPU core.
    903      */
    904     if (rev) {
    905         if (size > MO_8) {
    906             mop ^= MO_BSWAP;
    907         }
    908         if (size < MO_32) {
    909             tcg_gen_xori_tl(addr, addr, 3 - size);
    910         }
    911     }
    912 
    913     /*
    914      * For system mode, enforce alignment if the cpu configuration
    915      * requires it.  For user-mode, the Linux kernel will have fixed up
    916      * any unaligned access, so emulate that by *not* setting MO_ALIGN.
    917      */
    918 #ifndef CONFIG_USER_ONLY
    919     if (size > MO_8 &&
    920         (dc->tb_flags & MSR_EE) &&
    921         dc->cfg->unaligned_exceptions) {
    922         record_unaligned_ess(dc, rd, size, true);
    923         mop |= MO_ALIGN;
    924     }
    925 #endif
    926 
    927     tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
    928 
    929     tcg_temp_free(addr);
    930     return true;
    931 }
    932 
    933 static bool trans_sb(DisasContext *dc, arg_typea *arg)
    934 {
    935     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
    936     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
    937 }
    938 
    939 static bool trans_sbr(DisasContext *dc, arg_typea *arg)
    940 {
    941     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
    942     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
    943 }
    944 
    945 static bool trans_sbea(DisasContext *dc, arg_typea *arg)
    946 {
    947     if (trap_userspace(dc, true)) {
    948         return true;
    949     }
    950 #ifdef CONFIG_USER_ONLY
    951     return true;
    952 #else
    953     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
    954     return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
    955 #endif
    956 }
    957 
    958 static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
    959 {
    960     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
    961     return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
    962 }
    963 
    964 static bool trans_sh(DisasContext *dc, arg_typea *arg)
    965 {
    966     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
    967     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
    968 }
    969 
    970 static bool trans_shr(DisasContext *dc, arg_typea *arg)
    971 {
    972     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
    973     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
    974 }
    975 
    976 static bool trans_shea(DisasContext *dc, arg_typea *arg)
    977 {
    978     if (trap_userspace(dc, true)) {
    979         return true;
    980     }
    981 #ifdef CONFIG_USER_ONLY
    982     return true;
    983 #else
    984     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
    985     return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
    986 #endif
    987 }
    988 
    989 static bool trans_shi(DisasContext *dc, arg_typeb *arg)
    990 {
    991     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
    992     return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
    993 }
    994 
    995 static bool trans_sw(DisasContext *dc, arg_typea *arg)
    996 {
    997     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
    998     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
    999 }
   1000 
   1001 static bool trans_swr(DisasContext *dc, arg_typea *arg)
   1002 {
   1003     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
   1004     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
   1005 }
   1006 
   1007 static bool trans_swea(DisasContext *dc, arg_typea *arg)
   1008 {
   1009     if (trap_userspace(dc, true)) {
   1010         return true;
   1011     }
   1012 #ifdef CONFIG_USER_ONLY
   1013     return true;
   1014 #else
   1015     TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
   1016     return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
   1017 #endif
   1018 }
   1019 
   1020 static bool trans_swi(DisasContext *dc, arg_typeb *arg)
   1021 {
   1022     TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
   1023     return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
   1024 }
   1025 
   1026 static bool trans_swx(DisasContext *dc, arg_typea *arg)
   1027 {
   1028     TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
   1029     TCGLabel *swx_done = gen_new_label();
   1030     TCGLabel *swx_fail = gen_new_label();
   1031     TCGv_i32 tval;
   1032 
   1033     /* swx does not throw unaligned access errors, so force alignment */
   1034     tcg_gen_andi_tl(addr, addr, ~3);
   1035 
   1036     /*
   1037      * Compare the address vs the one we used during lwx.
   1038      * On mismatch, the operation fails.  On match, addr dies at the
   1039      * branch, but we know we can use the equal version in the global.
   1040      * In either case, addr is no longer needed.
   1041      */
   1042     tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
   1043     tcg_temp_free(addr);
   1044 
   1045     /*
   1046      * Compare the value loaded during lwx with current contents of
   1047      * the reserved location.
   1048      */
   1049     tval = tcg_temp_new_i32();
   1050 
   1051     tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
   1052                                reg_for_write(dc, arg->rd),
   1053                                dc->mem_index, MO_TEUL);
   1054 
   1055     tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
   1056     tcg_temp_free_i32(tval);
   1057 
   1058     /* Success */
   1059     tcg_gen_movi_i32(cpu_msr_c, 0);
   1060     tcg_gen_br(swx_done);
   1061 
   1062     /* Failure */
   1063     gen_set_label(swx_fail);
   1064     tcg_gen_movi_i32(cpu_msr_c, 1);
   1065 
   1066     gen_set_label(swx_done);
   1067 
   1068     /*
   1069      * Prevent the saved address from working again without another ldx.
   1070      * Akin to the pseudocode setting reservation = 0.
   1071      */
   1072     tcg_gen_movi_tl(cpu_res_addr, -1);
   1073     return true;
   1074 }
   1075 
   1076 static void setup_dslot(DisasContext *dc, bool type_b)
   1077 {
   1078     dc->tb_flags_to_set |= D_FLAG;
   1079     if (type_b && (dc->tb_flags & IMM_FLAG)) {
   1080         dc->tb_flags_to_set |= BIMM_FLAG;
   1081     }
   1082 }
   1083 
   1084 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
   1085                       bool delay, bool abs, int link)
   1086 {
   1087     uint32_t add_pc;
   1088 
   1089     if (invalid_delay_slot(dc, "branch")) {
   1090         return true;
   1091     }
   1092     if (delay) {
   1093         setup_dslot(dc, dest_rb < 0);
   1094     }
   1095 
   1096     if (link) {
   1097         tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
   1098     }
   1099 
   1100     /* Store the branch taken destination into btarget.  */
   1101     add_pc = abs ? 0 : dc->base.pc_next;
   1102     if (dest_rb > 0) {
   1103         dc->jmp_dest = -1;
   1104         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
   1105     } else {
   1106         dc->jmp_dest = add_pc + dest_imm;
   1107         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
   1108     }
   1109     dc->jmp_cond = TCG_COND_ALWAYS;
   1110     return true;
   1111 }
   1112 
   1113 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK)                               \
   1114     static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg)          \
   1115     { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); }  \
   1116     static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg)         \
   1117     { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
   1118 
   1119 DO_BR(br, bri, false, false, false)
   1120 DO_BR(bra, brai, false, true, false)
   1121 DO_BR(brd, brid, true, false, false)
   1122 DO_BR(brad, braid, true, true, false)
   1123 DO_BR(brld, brlid, true, false, true)
   1124 DO_BR(brald, bralid, true, true, true)
   1125 
   1126 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
   1127                    TCGCond cond, int ra, bool delay)
   1128 {
   1129     TCGv_i32 zero, next;
   1130 
   1131     if (invalid_delay_slot(dc, "bcc")) {
   1132         return true;
   1133     }
   1134     if (delay) {
   1135         setup_dslot(dc, dest_rb < 0);
   1136     }
   1137 
   1138     dc->jmp_cond = cond;
   1139 
   1140     /* Cache the condition register in cpu_bvalue across any delay slot.  */
   1141     tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
   1142 
   1143     /* Store the branch taken destination into btarget.  */
   1144     if (dest_rb > 0) {
   1145         dc->jmp_dest = -1;
   1146         tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
   1147     } else {
   1148         dc->jmp_dest = dc->base.pc_next + dest_imm;
   1149         tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
   1150     }
   1151 
   1152     /* Compute the final destination into btarget.  */
   1153     zero = tcg_const_i32(0);
   1154     next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4);
   1155     tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
   1156                         reg_for_read(dc, ra), zero,
   1157                         cpu_btarget, next);
   1158     tcg_temp_free_i32(zero);
   1159     tcg_temp_free_i32(next);
   1160 
   1161     return true;
   1162 }
   1163 
   1164 #define DO_BCC(NAME, COND)                                              \
   1165     static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg)       \
   1166     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); }            \
   1167     static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg)    \
   1168     { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); }             \
   1169     static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg)    \
   1170     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); }          \
   1171     static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg)   \
   1172     { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
   1173 
   1174 DO_BCC(beq, TCG_COND_EQ)
   1175 DO_BCC(bge, TCG_COND_GE)
   1176 DO_BCC(bgt, TCG_COND_GT)
   1177 DO_BCC(ble, TCG_COND_LE)
   1178 DO_BCC(blt, TCG_COND_LT)
   1179 DO_BCC(bne, TCG_COND_NE)
   1180 
   1181 static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
   1182 {
   1183     if (trap_userspace(dc, true)) {
   1184         return true;
   1185     }
   1186     if (invalid_delay_slot(dc, "brk")) {
   1187         return true;
   1188     }
   1189 
   1190     tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
   1191     if (arg->rd) {
   1192         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
   1193     }
   1194     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
   1195     tcg_gen_movi_tl(cpu_res_addr, -1);
   1196 
   1197     dc->base.is_jmp = DISAS_EXIT;
   1198     return true;
   1199 }
   1200 
   1201 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
   1202 {
   1203     uint32_t imm = arg->imm;
   1204 
   1205     if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
   1206         return true;
   1207     }
   1208     if (invalid_delay_slot(dc, "brki")) {
   1209         return true;
   1210     }
   1211 
   1212     tcg_gen_movi_i32(cpu_pc, imm);
   1213     if (arg->rd) {
   1214         tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
   1215     }
   1216     tcg_gen_movi_tl(cpu_res_addr, -1);
   1217 
   1218 #ifdef CONFIG_USER_ONLY
   1219     switch (imm) {
   1220     case 0x8:  /* syscall trap */
   1221         gen_raise_exception_sync(dc, EXCP_SYSCALL);
   1222         break;
   1223     case 0x18: /* debug trap */
   1224         gen_raise_exception_sync(dc, EXCP_DEBUG);
   1225         break;
   1226     default:   /* eliminated with trap_userspace check */
   1227         g_assert_not_reached();
   1228     }
   1229 #else
   1230     uint32_t msr_to_set = 0;
   1231 
   1232     if (imm != 0x18) {
   1233         msr_to_set |= MSR_BIP;
   1234     }
   1235     if (imm == 0x8 || imm == 0x18) {
   1236         /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
   1237         msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
   1238         tcg_gen_andi_i32(cpu_msr, cpu_msr,
   1239                          ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
   1240     }
   1241     tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
   1242     dc->base.is_jmp = DISAS_EXIT;
   1243 #endif
   1244 
   1245     return true;
   1246 }
   1247 
   1248 static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
   1249 {
   1250     int mbar_imm = arg->imm;
   1251 
   1252     /* Note that mbar is a specialized branch instruction. */
   1253     if (invalid_delay_slot(dc, "mbar")) {
   1254         return true;
   1255     }
   1256 
   1257     /* Data access memory barrier.  */
   1258     if ((mbar_imm & 2) == 0) {
   1259         tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
   1260     }
   1261 
   1262     /* Sleep. */
   1263     if (mbar_imm & 16) {
   1264         TCGv_i32 tmp_1;
   1265 
   1266         if (trap_userspace(dc, true)) {
   1267             /* Sleep is a privileged instruction.  */
   1268             return true;
   1269         }
   1270 
   1271         t_sync_flags(dc);
   1272 
   1273         tmp_1 = tcg_const_i32(1);
   1274         tcg_gen_st_i32(tmp_1, cpu_env,
   1275                        -offsetof(MicroBlazeCPU, env)
   1276                        +offsetof(CPUState, halted));
   1277         tcg_temp_free_i32(tmp_1);
   1278 
   1279         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
   1280 
   1281         gen_raise_exception(dc, EXCP_HLT);
   1282     }
   1283 
   1284     /*
   1285      * If !(mbar_imm & 1), this is an instruction access memory barrier
   1286      * and we need to end the TB so that we recognize self-modified
   1287      * code immediately.
   1288      *
   1289      * However, there are some data mbars that need the TB break
   1290      * (and return to main loop) to recognize interrupts right away.
   1291      * E.g. recognizing a change to an interrupt controller register.
   1292      *
   1293      * Therefore, choose to end the TB always.
   1294      */
   1295     dc->base.is_jmp = DISAS_EXIT_NEXT;
   1296     return true;
   1297 }
   1298 
   1299 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
   1300 {
   1301     if (trap_userspace(dc, to_set)) {
   1302         return true;
   1303     }
   1304     if (invalid_delay_slot(dc, "rts")) {
   1305         return true;
   1306     }
   1307 
   1308     dc->tb_flags_to_set |= to_set;
   1309     setup_dslot(dc, true);
   1310 
   1311     dc->jmp_cond = TCG_COND_ALWAYS;
   1312     dc->jmp_dest = -1;
   1313     tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
   1314     return true;
   1315 }
   1316 
   1317 #define DO_RTS(NAME, IFLAG) \
   1318     static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
   1319     { return do_rts(dc, arg, IFLAG); }
   1320 
   1321 DO_RTS(rtbd, DRTB_FLAG)
   1322 DO_RTS(rtid, DRTI_FLAG)
   1323 DO_RTS(rted, DRTE_FLAG)
   1324 DO_RTS(rtsd, 0)
   1325 
   1326 static bool trans_zero(DisasContext *dc, arg_zero *arg)
   1327 {
   1328     /* If opcode_0_illegal, trap.  */
   1329     if (dc->cfg->opcode_0_illegal) {
   1330         trap_illegal(dc, true);
   1331         return true;
   1332     }
   1333     /*
   1334      * Otherwise, this is "add r0, r0, r0".
   1335      * Continue to trans_add so that MSR[C] gets cleared.
   1336      */
   1337     return false;
   1338 }
   1339 
   1340 static void msr_read(DisasContext *dc, TCGv_i32 d)
   1341 {
   1342     TCGv_i32 t;
   1343 
   1344     /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
   1345     t = tcg_temp_new_i32();
   1346     tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
   1347     tcg_gen_or_i32(d, cpu_msr, t);
   1348     tcg_temp_free_i32(t);
   1349 }
   1350 
   1351 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
   1352 {
   1353     uint32_t imm = arg->imm;
   1354 
   1355     if (trap_userspace(dc, imm != MSR_C)) {
   1356         return true;
   1357     }
   1358 
   1359     if (arg->rd) {
   1360         msr_read(dc, cpu_R[arg->rd]);
   1361     }
   1362 
   1363     /*
   1364      * Handle the carry bit separately.
   1365      * This is the only bit that userspace can modify.
   1366      */
   1367     if (imm & MSR_C) {
   1368         tcg_gen_movi_i32(cpu_msr_c, set);
   1369     }
   1370 
   1371     /*
   1372      * MSR_C and MSR_CC set above.
   1373      * MSR_PVR is not writable, and is always clear.
   1374      */
   1375     imm &= ~(MSR_C | MSR_CC | MSR_PVR);
   1376 
   1377     if (imm != 0) {
   1378         if (set) {
   1379             tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
   1380         } else {
   1381             tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
   1382         }
   1383         dc->base.is_jmp = DISAS_EXIT_NEXT;
   1384     }
   1385     return true;
   1386 }
   1387 
   1388 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
   1389 {
   1390     return do_msrclrset(dc, arg, false);
   1391 }
   1392 
   1393 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
   1394 {
   1395     return do_msrclrset(dc, arg, true);
   1396 }
   1397 
   1398 static bool trans_mts(DisasContext *dc, arg_mts *arg)
   1399 {
   1400     if (trap_userspace(dc, true)) {
   1401         return true;
   1402     }
   1403 
   1404 #ifdef CONFIG_USER_ONLY
   1405     g_assert_not_reached();
   1406 #else
   1407     if (arg->e && arg->rs != 0x1003) {
   1408         qemu_log_mask(LOG_GUEST_ERROR,
   1409                       "Invalid extended mts reg 0x%x\n", arg->rs);
   1410         return true;
   1411     }
   1412 
   1413     TCGv_i32 src = reg_for_read(dc, arg->ra);
   1414     switch (arg->rs) {
   1415     case SR_MSR:
   1416         /* Install MSR_C.  */
   1417         tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
   1418         /*
   1419          * Clear MSR_C and MSR_CC;
   1420          * MSR_PVR is not writable, and is always clear.
   1421          */
   1422         tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
   1423         break;
   1424     case SR_FSR:
   1425         tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
   1426         break;
   1427     case 0x800:
   1428         tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
   1429         break;
   1430     case 0x802:
   1431         tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
   1432         break;
   1433 
   1434     case 0x1000: /* PID */
   1435     case 0x1001: /* ZPR */
   1436     case 0x1002: /* TLBX */
   1437     case 0x1003: /* TLBLO */
   1438     case 0x1004: /* TLBHI */
   1439     case 0x1005: /* TLBSX */
   1440         {
   1441             TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
   1442             TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
   1443 
   1444             gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
   1445             tcg_temp_free_i32(tmp_reg);
   1446             tcg_temp_free_i32(tmp_ext);
   1447         }
   1448         break;
   1449 
   1450     default:
   1451         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
   1452         return true;
   1453     }
   1454     dc->base.is_jmp = DISAS_EXIT_NEXT;
   1455     return true;
   1456 #endif
   1457 }
   1458 
   1459 static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
   1460 {
   1461     TCGv_i32 dest = reg_for_write(dc, arg->rd);
   1462 
   1463     if (arg->e) {
   1464         switch (arg->rs) {
   1465         case SR_EAR:
   1466             {
   1467                 TCGv_i64 t64 = tcg_temp_new_i64();
   1468                 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
   1469                 tcg_gen_extrh_i64_i32(dest, t64);
   1470                 tcg_temp_free_i64(t64);
   1471             }
   1472             return true;
   1473 #ifndef CONFIG_USER_ONLY
   1474         case 0x1003: /* TLBLO */
   1475             /* Handled below. */
   1476             break;
   1477 #endif
   1478         case 0x2006 ... 0x2009:
   1479             /* High bits of PVR6-9 not implemented. */
   1480             tcg_gen_movi_i32(dest, 0);
   1481             return true;
   1482         default:
   1483             qemu_log_mask(LOG_GUEST_ERROR,
   1484                           "Invalid extended mfs reg 0x%x\n", arg->rs);
   1485             return true;
   1486         }
   1487     }
   1488 
   1489     switch (arg->rs) {
   1490     case SR_PC:
   1491         tcg_gen_movi_i32(dest, dc->base.pc_next);
   1492         break;
   1493     case SR_MSR:
   1494         msr_read(dc, dest);
   1495         break;
   1496     case SR_EAR:
   1497         {
   1498             TCGv_i64 t64 = tcg_temp_new_i64();
   1499             tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
   1500             tcg_gen_extrl_i64_i32(dest, t64);
   1501             tcg_temp_free_i64(t64);
   1502         }
   1503         break;
   1504     case SR_ESR:
   1505         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
   1506         break;
   1507     case SR_FSR:
   1508         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
   1509         break;
   1510     case SR_BTR:
   1511         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
   1512         break;
   1513     case SR_EDR:
   1514         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
   1515         break;
   1516     case 0x800:
   1517         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
   1518         break;
   1519     case 0x802:
   1520         tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
   1521         break;
   1522 
   1523 #ifndef CONFIG_USER_ONLY
   1524     case 0x1000: /* PID */
   1525     case 0x1001: /* ZPR */
   1526     case 0x1002: /* TLBX */
   1527     case 0x1003: /* TLBLO */
   1528     case 0x1004: /* TLBHI */
   1529     case 0x1005: /* TLBSX */
   1530         {
   1531             TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
   1532             TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
   1533 
   1534             gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
   1535             tcg_temp_free_i32(tmp_reg);
   1536             tcg_temp_free_i32(tmp_ext);
   1537         }
   1538         break;
   1539 #endif
   1540 
   1541     case 0x2000 ... 0x200c:
   1542         tcg_gen_ld_i32(dest, cpu_env,
   1543                        offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
   1544                        - offsetof(MicroBlazeCPU, env));
   1545         break;
   1546     default:
   1547         qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
   1548         break;
   1549     }
   1550     return true;
   1551 }
   1552 
   1553 static void do_rti(DisasContext *dc)
   1554 {
   1555     TCGv_i32 tmp = tcg_temp_new_i32();
   1556 
   1557     tcg_gen_shri_i32(tmp, cpu_msr, 1);
   1558     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
   1559     tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
   1560     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
   1561     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
   1562 
   1563     tcg_temp_free_i32(tmp);
   1564 }
   1565 
   1566 static void do_rtb(DisasContext *dc)
   1567 {
   1568     TCGv_i32 tmp = tcg_temp_new_i32();
   1569 
   1570     tcg_gen_shri_i32(tmp, cpu_msr, 1);
   1571     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
   1572     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
   1573     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
   1574 
   1575     tcg_temp_free_i32(tmp);
   1576 }
   1577 
   1578 static void do_rte(DisasContext *dc)
   1579 {
   1580     TCGv_i32 tmp = tcg_temp_new_i32();
   1581 
   1582     tcg_gen_shri_i32(tmp, cpu_msr, 1);
   1583     tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
   1584     tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
   1585     tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
   1586     tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
   1587 
   1588     tcg_temp_free_i32(tmp);
   1589 }
   1590 
   1591 /* Insns connected to FSL or AXI stream attached devices.  */
   1592 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
   1593 {
   1594     TCGv_i32 t_id, t_ctrl;
   1595 
   1596     if (trap_userspace(dc, true)) {
   1597         return true;
   1598     }
   1599 
   1600     t_id = tcg_temp_new_i32();
   1601     if (rb) {
   1602         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
   1603     } else {
   1604         tcg_gen_movi_i32(t_id, imm);
   1605     }
   1606 
   1607     t_ctrl = tcg_const_i32(ctrl);
   1608     gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
   1609     tcg_temp_free_i32(t_id);
   1610     tcg_temp_free_i32(t_ctrl);
   1611     return true;
   1612 }
   1613 
   1614 static bool trans_get(DisasContext *dc, arg_get *arg)
   1615 {
   1616     return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
   1617 }
   1618 
   1619 static bool trans_getd(DisasContext *dc, arg_getd *arg)
   1620 {
   1621     return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
   1622 }
   1623 
   1624 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
   1625 {
   1626     TCGv_i32 t_id, t_ctrl;
   1627 
   1628     if (trap_userspace(dc, true)) {
   1629         return true;
   1630     }
   1631 
   1632     t_id = tcg_temp_new_i32();
   1633     if (rb) {
   1634         tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
   1635     } else {
   1636         tcg_gen_movi_i32(t_id, imm);
   1637     }
   1638 
   1639     t_ctrl = tcg_const_i32(ctrl);
   1640     gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
   1641     tcg_temp_free_i32(t_id);
   1642     tcg_temp_free_i32(t_ctrl);
   1643     return true;
   1644 }
   1645 
   1646 static bool trans_put(DisasContext *dc, arg_put *arg)
   1647 {
   1648     return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
   1649 }
   1650 
   1651 static bool trans_putd(DisasContext *dc, arg_putd *arg)
   1652 {
   1653     return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
   1654 }
   1655 
   1656 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
   1657 {
   1658     DisasContext *dc = container_of(dcb, DisasContext, base);
   1659     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
   1660     int bound;
   1661 
   1662     dc->cfg = &cpu->cfg;
   1663     dc->tb_flags = dc->base.tb->flags;
   1664     dc->ext_imm = dc->base.tb->cs_base;
   1665     dc->r0 = NULL;
   1666     dc->r0_set = false;
   1667     dc->mem_index = cpu_mmu_index(&cpu->env, false);
   1668     dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
   1669     dc->jmp_dest = -1;
   1670 
   1671     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
   1672     dc->base.max_insns = MIN(dc->base.max_insns, bound);
   1673 }
   1674 
   1675 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
   1676 {
   1677 }
   1678 
   1679 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
   1680 {
   1681     DisasContext *dc = container_of(dcb, DisasContext, base);
   1682 
   1683     tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
   1684     dc->insn_start = tcg_last_op();
   1685 }
   1686 
   1687 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
   1688 {
   1689     DisasContext *dc = container_of(dcb, DisasContext, base);
   1690     CPUMBState *env = cs->env_ptr;
   1691     uint32_t ir;
   1692 
   1693     /* TODO: This should raise an exception, not terminate qemu. */
   1694     if (dc->base.pc_next & 3) {
   1695         cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
   1696                   (uint32_t)dc->base.pc_next);
   1697     }
   1698 
   1699     dc->tb_flags_to_set = 0;
   1700 
   1701     ir = cpu_ldl_code(env, dc->base.pc_next);
   1702     if (!decode(dc, ir)) {
   1703         trap_illegal(dc, true);
   1704     }
   1705 
   1706     if (dc->r0) {
   1707         tcg_temp_free_i32(dc->r0);
   1708         dc->r0 = NULL;
   1709         dc->r0_set = false;
   1710     }
   1711 
   1712     /* Discard the imm global when its contents cannot be used. */
   1713     if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
   1714         tcg_gen_discard_i32(cpu_imm);
   1715     }
   1716 
   1717     dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
   1718     dc->tb_flags |= dc->tb_flags_to_set;
   1719     dc->base.pc_next += 4;
   1720 
   1721     if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
   1722         /*
   1723          * Finish any return-from branch.
   1724          */
   1725         uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
   1726         if (unlikely(rt_ibe != 0)) {
   1727             dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
   1728             if (rt_ibe & DRTI_FLAG) {
   1729                 do_rti(dc);
   1730             } else if (rt_ibe & DRTB_FLAG) {
   1731                 do_rtb(dc);
   1732             } else {
   1733                 do_rte(dc);
   1734             }
   1735         }
   1736 
   1737         /* Complete the branch, ending the TB. */
   1738         switch (dc->base.is_jmp) {
   1739         case DISAS_NORETURN:
   1740             /*
   1741              * E.g. illegal insn in a delay slot.  We've already exited
   1742              * and will handle D_FLAG in mb_cpu_do_interrupt.
   1743              */
   1744             break;
   1745         case DISAS_NEXT:
   1746             /*
   1747              * Normal insn a delay slot.
   1748              * However, the return-from-exception type insns should
   1749              * return to the main loop, as they have adjusted MSR.
   1750              */
   1751             dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
   1752             break;
   1753         case DISAS_EXIT_NEXT:
   1754             /*
   1755              * E.g. mts insn in a delay slot.  Continue with btarget,
   1756              * but still return to the main loop.
   1757              */
   1758             dc->base.is_jmp = DISAS_EXIT_JUMP;
   1759             break;
   1760         default:
   1761             g_assert_not_reached();
   1762         }
   1763     }
   1764 }
   1765 
   1766 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
   1767 {
   1768     DisasContext *dc = container_of(dcb, DisasContext, base);
   1769 
   1770     if (dc->base.is_jmp == DISAS_NORETURN) {
   1771         /* We have already exited the TB. */
   1772         return;
   1773     }
   1774 
   1775     t_sync_flags(dc);
   1776 
   1777     switch (dc->base.is_jmp) {
   1778     case DISAS_TOO_MANY:
   1779         gen_goto_tb(dc, 0, dc->base.pc_next);
   1780         return;
   1781 
   1782     case DISAS_EXIT:
   1783         break;
   1784     case DISAS_EXIT_NEXT:
   1785         tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
   1786         break;
   1787     case DISAS_EXIT_JUMP:
   1788         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
   1789         tcg_gen_discard_i32(cpu_btarget);
   1790         break;
   1791 
   1792     case DISAS_JUMP:
   1793         if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
   1794             /* Direct jump. */
   1795             tcg_gen_discard_i32(cpu_btarget);
   1796 
   1797             if (dc->jmp_cond != TCG_COND_ALWAYS) {
   1798                 /* Conditional direct jump. */
   1799                 TCGLabel *taken = gen_new_label();
   1800                 TCGv_i32 tmp = tcg_temp_new_i32();
   1801 
   1802                 /*
   1803                  * Copy bvalue to a temp now, so we can discard bvalue.
   1804                  * This can avoid writing bvalue to memory when the
   1805                  * delay slot cannot raise an exception.
   1806                  */
   1807                 tcg_gen_mov_i32(tmp, cpu_bvalue);
   1808                 tcg_gen_discard_i32(cpu_bvalue);
   1809 
   1810                 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
   1811                 gen_goto_tb(dc, 1, dc->base.pc_next);
   1812                 gen_set_label(taken);
   1813             }
   1814             gen_goto_tb(dc, 0, dc->jmp_dest);
   1815             return;
   1816         }
   1817 
   1818         /* Indirect jump (or direct jump w/ goto_tb disabled) */
   1819         tcg_gen_mov_i32(cpu_pc, cpu_btarget);
   1820         tcg_gen_discard_i32(cpu_btarget);
   1821         tcg_gen_lookup_and_goto_ptr();
   1822         return;
   1823 
   1824     default:
   1825         g_assert_not_reached();
   1826     }
   1827 
   1828     /* Finish DISAS_EXIT_* */
   1829     if (unlikely(cs->singlestep_enabled)) {
   1830         gen_raise_exception(dc, EXCP_DEBUG);
   1831     } else {
   1832         tcg_gen_exit_tb(NULL, 0);
   1833     }
   1834 }
   1835 
   1836 static void mb_tr_disas_log(const DisasContextBase *dcb,
   1837                             CPUState *cs, FILE *logfile)
   1838 {
   1839     fprintf(logfile, "IN: %s\n", lookup_symbol(dcb->pc_first));
   1840     target_disas(logfile, cs, dcb->pc_first, dcb->tb->size);
   1841 }
   1842 
   1843 static const TranslatorOps mb_tr_ops = {
   1844     .init_disas_context = mb_tr_init_disas_context,
   1845     .tb_start           = mb_tr_tb_start,
   1846     .insn_start         = mb_tr_insn_start,
   1847     .translate_insn     = mb_tr_translate_insn,
   1848     .tb_stop            = mb_tr_tb_stop,
   1849     .disas_log          = mb_tr_disas_log,
   1850 };
   1851 
   1852 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
   1853                            target_ulong pc, void *host_pc)
   1854 {
   1855     DisasContext dc;
   1856     translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
   1857 }
   1858 
   1859 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
   1860 {
   1861     MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
   1862     CPUMBState *env = &cpu->env;
   1863     uint32_t iflags;
   1864     int i;
   1865 
   1866     qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
   1867                  env->pc, env->msr,
   1868                  (env->msr & MSR_UM) ? "user" : "kernel",
   1869                  (env->msr & MSR_UMS) ? "user" : "kernel",
   1870                  (bool)(env->msr & MSR_EIP),
   1871                  (bool)(env->msr & MSR_IE));
   1872 
   1873     iflags = env->iflags;
   1874     qemu_fprintf(f, "iflags: 0x%08x", iflags);
   1875     if (iflags & IMM_FLAG) {
   1876         qemu_fprintf(f, " IMM(0x%08x)", env->imm);
   1877     }
   1878     if (iflags & BIMM_FLAG) {
   1879         qemu_fprintf(f, " BIMM");
   1880     }
   1881     if (iflags & D_FLAG) {
   1882         qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
   1883     }
   1884     if (iflags & DRTI_FLAG) {
   1885         qemu_fprintf(f, " DRTI");
   1886     }
   1887     if (iflags & DRTE_FLAG) {
   1888         qemu_fprintf(f, " DRTE");
   1889     }
   1890     if (iflags & DRTB_FLAG) {
   1891         qemu_fprintf(f, " DRTB");
   1892     }
   1893     if (iflags & ESR_ESS_FLAG) {
   1894         qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
   1895     }
   1896 
   1897     qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
   1898                  "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
   1899                  env->esr, env->fsr, env->btr, env->edr,
   1900                  env->ear, env->slr, env->shr);
   1901 
   1902     for (i = 0; i < 32; i++) {
   1903         qemu_fprintf(f, "r%2.2d=%08x%c",
   1904                      i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
   1905     }
   1906     qemu_fprintf(f, "\n");
   1907 }
   1908 
   1909 void mb_tcg_init(void)
   1910 {
   1911 #define R(X)  { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
   1912 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
   1913 
   1914     static const struct {
   1915         TCGv_i32 *var; int ofs; char name[8];
   1916     } i32s[] = {
   1917         /*
   1918          * Note that r0 is handled specially in reg_for_read
   1919          * and reg_for_write.  Nothing should touch cpu_R[0].
   1920          * Leave that element NULL, which will assert quickly
   1921          * inside the tcg generator functions.
   1922          */
   1923                R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
   1924         R(8),  R(9),  R(10), R(11), R(12), R(13), R(14), R(15),
   1925         R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
   1926         R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
   1927 
   1928         SP(pc),
   1929         SP(msr),
   1930         SP(msr_c),
   1931         SP(imm),
   1932         SP(iflags),
   1933         SP(bvalue),
   1934         SP(btarget),
   1935         SP(res_val),
   1936     };
   1937 
   1938 #undef R
   1939 #undef SP
   1940 
   1941     for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
   1942         *i32s[i].var =
   1943           tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
   1944     }
   1945 
   1946     cpu_res_addr =
   1947         tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
   1948 }