qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

trans_rvf.c.inc (15025B)


      1 /*
      2  * RISC-V translation routines for the RV64F Standard Extension.
      3  *
      4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
      5  * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
      6  *                    Bastian Koppelmann, kbastian@mail.uni-paderborn.de
      7  *
      8  * This program is free software; you can redistribute it and/or modify it
      9  * under the terms and conditions of the GNU General Public License,
     10  * version 2 or later, as published by the Free Software Foundation.
     11  *
     12  * This program is distributed in the hope it will be useful, but WITHOUT
     13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
     14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
     15  * more details.
     16  *
     17  * You should have received a copy of the GNU General Public License along with
     18  * this program.  If not, see <http://www.gnu.org/licenses/>.
     19  */
     20 
     21 #define REQUIRE_FPU do {\
     22     if (ctx->mstatus_fs == 0) \
     23         if (!ctx->cfg_ptr->ext_zfinx) \
     24             return false; \
     25 } while (0)
     26 
     27 #define REQUIRE_ZFINX_OR_F(ctx) do {\
     28     if (!ctx->cfg_ptr->ext_zfinx) { \
     29         REQUIRE_EXT(ctx, RVF); \
     30     } \
     31 } while (0)
     32 
     33 static bool trans_flw(DisasContext *ctx, arg_flw *a)
     34 {
     35     TCGv_i64 dest;
     36     TCGv addr;
     37 
     38     REQUIRE_FPU;
     39     REQUIRE_EXT(ctx, RVF);
     40 
     41     addr = get_address(ctx, a->rs1, a->imm);
     42     dest = cpu_fpr[a->rd];
     43     tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL);
     44     gen_nanbox_s(dest, dest);
     45 
     46     mark_fs_dirty(ctx);
     47     return true;
     48 }
     49 
     50 static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
     51 {
     52     TCGv addr;
     53 
     54     REQUIRE_FPU;
     55     REQUIRE_EXT(ctx, RVF);
     56 
     57     addr = get_address(ctx, a->rs1, a->imm);
     58     tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL);
     59     return true;
     60 }
     61 
     62 static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a)
     63 {
     64     REQUIRE_FPU;
     65     REQUIRE_ZFINX_OR_F(ctx);
     66 
     67     TCGv_i64 dest = dest_fpr(ctx, a->rd);
     68     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
     69     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
     70     TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
     71 
     72     gen_set_rm(ctx, a->rm);
     73     gen_helper_fmadd_s(dest, cpu_env, src1, src2, src3);
     74     gen_set_fpr_hs(ctx, a->rd, dest);
     75     mark_fs_dirty(ctx);
     76     return true;
     77 }
     78 
     79 static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a)
     80 {
     81     REQUIRE_FPU;
     82     REQUIRE_ZFINX_OR_F(ctx);
     83 
     84     TCGv_i64 dest = dest_fpr(ctx, a->rd);
     85     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
     86     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
     87     TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
     88 
     89     gen_set_rm(ctx, a->rm);
     90     gen_helper_fmsub_s(dest, cpu_env, src1, src2, src3);
     91     gen_set_fpr_hs(ctx, a->rd, dest);
     92     mark_fs_dirty(ctx);
     93     return true;
     94 }
     95 
     96 static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a)
     97 {
     98     REQUIRE_FPU;
     99     REQUIRE_ZFINX_OR_F(ctx);
    100 
    101     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    102     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    103     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    104     TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
    105 
    106     gen_set_rm(ctx, a->rm);
    107     gen_helper_fnmsub_s(dest, cpu_env, src1, src2, src3);
    108     gen_set_fpr_hs(ctx, a->rd, dest);
    109     mark_fs_dirty(ctx);
    110     return true;
    111 }
    112 
    113 static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a)
    114 {
    115     REQUIRE_FPU;
    116     REQUIRE_ZFINX_OR_F(ctx);
    117 
    118     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    119     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    120     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    121     TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
    122 
    123     gen_set_rm(ctx, a->rm);
    124     gen_helper_fnmadd_s(dest, cpu_env, src1, src2, src3);
    125     gen_set_fpr_hs(ctx, a->rd, dest);
    126     mark_fs_dirty(ctx);
    127     return true;
    128 }
    129 
    130 static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a)
    131 {
    132     REQUIRE_FPU;
    133     REQUIRE_ZFINX_OR_F(ctx);
    134 
    135     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    136     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    137     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    138 
    139     gen_set_rm(ctx, a->rm);
    140     gen_helper_fadd_s(dest, cpu_env, src1, src2);
    141     gen_set_fpr_hs(ctx, a->rd, dest);
    142     mark_fs_dirty(ctx);
    143     return true;
    144 }
    145 
    146 static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a)
    147 {
    148     REQUIRE_FPU;
    149     REQUIRE_ZFINX_OR_F(ctx);
    150 
    151     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    152     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    153     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    154 
    155     gen_set_rm(ctx, a->rm);
    156     gen_helper_fsub_s(dest, cpu_env, src1, src2);
    157     gen_set_fpr_hs(ctx, a->rd, dest);
    158     mark_fs_dirty(ctx);
    159     return true;
    160 }
    161 
    162 static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a)
    163 {
    164     REQUIRE_FPU;
    165     REQUIRE_ZFINX_OR_F(ctx);
    166 
    167     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    168     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    169     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    170 
    171     gen_set_rm(ctx, a->rm);
    172     gen_helper_fmul_s(dest, cpu_env, src1, src2);
    173     gen_set_fpr_hs(ctx, a->rd, dest);
    174     mark_fs_dirty(ctx);
    175     return true;
    176 }
    177 
    178 static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a)
    179 {
    180     REQUIRE_FPU;
    181     REQUIRE_ZFINX_OR_F(ctx);
    182 
    183     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    184     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    185     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    186 
    187     gen_set_rm(ctx, a->rm);
    188     gen_helper_fdiv_s(dest, cpu_env, src1, src2);
    189     gen_set_fpr_hs(ctx, a->rd, dest);
    190     mark_fs_dirty(ctx);
    191     return true;
    192 }
    193 
    194 static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a)
    195 {
    196     REQUIRE_FPU;
    197     REQUIRE_ZFINX_OR_F(ctx);
    198 
    199     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    200     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    201 
    202     gen_set_rm(ctx, a->rm);
    203     gen_helper_fsqrt_s(dest, cpu_env, src1);
    204     gen_set_fpr_hs(ctx, a->rd, dest);
    205     mark_fs_dirty(ctx);
    206     return true;
    207 }
    208 
    209 static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a)
    210 {
    211     REQUIRE_FPU;
    212     REQUIRE_ZFINX_OR_F(ctx);
    213 
    214     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    215     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    216 
    217     if (a->rs1 == a->rs2) { /* FMOV */
    218         if (!ctx->cfg_ptr->ext_zfinx) {
    219             gen_check_nanbox_s(dest, src1);
    220         } else {
    221             tcg_gen_ext32s_i64(dest, src1);
    222         }
    223     } else { /* FSGNJ */
    224         TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    225 
    226         if (!ctx->cfg_ptr->ext_zfinx) {
    227             TCGv_i64 rs1 = tcg_temp_new_i64();
    228             TCGv_i64 rs2 = tcg_temp_new_i64();
    229             gen_check_nanbox_s(rs1, src1);
    230             gen_check_nanbox_s(rs2, src2);
    231 
    232             /* This formulation retains the nanboxing of rs2 in normal 'F'. */
    233             tcg_gen_deposit_i64(dest, rs2, rs1, 0, 31);
    234 
    235             tcg_temp_free_i64(rs1);
    236             tcg_temp_free_i64(rs2);
    237         } else {
    238             tcg_gen_deposit_i64(dest, src2, src1, 0, 31);
    239             tcg_gen_ext32s_i64(dest, dest);
    240         }
    241     }
    242     gen_set_fpr_hs(ctx, a->rd, dest);
    243     mark_fs_dirty(ctx);
    244     return true;
    245 }
    246 
    247 static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a)
    248 {
    249     TCGv_i64 rs1, rs2, mask;
    250 
    251     REQUIRE_FPU;
    252     REQUIRE_ZFINX_OR_F(ctx);
    253 
    254     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    255     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    256 
    257     rs1 = tcg_temp_new_i64();
    258     if (!ctx->cfg_ptr->ext_zfinx) {
    259         gen_check_nanbox_s(rs1, src1);
    260     } else {
    261         tcg_gen_mov_i64(rs1, src1);
    262     }
    263     if (a->rs1 == a->rs2) { /* FNEG */
    264         tcg_gen_xori_i64(dest, rs1, MAKE_64BIT_MASK(31, 1));
    265     } else {
    266         TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    267         rs2 = tcg_temp_new_i64();
    268         if (!ctx->cfg_ptr->ext_zfinx) {
    269             gen_check_nanbox_s(rs2, src2);
    270         } else {
    271             tcg_gen_mov_i64(rs2, src2);
    272         }
    273 
    274         /*
    275          * Replace bit 31 in rs1 with inverse in rs2.
    276          * This formulation retains the nanboxing of rs1.
    277          */
    278         mask = tcg_constant_i64(~MAKE_64BIT_MASK(31, 1));
    279         tcg_gen_nor_i64(rs2, rs2, mask);
    280         tcg_gen_and_i64(dest, mask, rs1);
    281         tcg_gen_or_i64(dest, dest, rs2);
    282 
    283         tcg_temp_free_i64(rs2);
    284     }
    285     /* signed-extended intead of nanboxing for result if enable zfinx */
    286     if (ctx->cfg_ptr->ext_zfinx) {
    287         tcg_gen_ext32s_i64(dest, dest);
    288     }
    289     gen_set_fpr_hs(ctx, a->rd, dest);
    290     tcg_temp_free_i64(rs1);
    291     mark_fs_dirty(ctx);
    292     return true;
    293 }
    294 
    295 static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a)
    296 {
    297     TCGv_i64 rs1, rs2;
    298 
    299     REQUIRE_FPU;
    300     REQUIRE_ZFINX_OR_F(ctx);
    301 
    302     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    303     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    304     rs1 = tcg_temp_new_i64();
    305 
    306     if (!ctx->cfg_ptr->ext_zfinx) {
    307         gen_check_nanbox_s(rs1, src1);
    308     } else {
    309         tcg_gen_mov_i64(rs1, src1);
    310     }
    311 
    312     if (a->rs1 == a->rs2) { /* FABS */
    313         tcg_gen_andi_i64(dest, rs1, ~MAKE_64BIT_MASK(31, 1));
    314     } else {
    315         TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    316         rs2 = tcg_temp_new_i64();
    317 
    318         if (!ctx->cfg_ptr->ext_zfinx) {
    319             gen_check_nanbox_s(rs2, src2);
    320         } else {
    321             tcg_gen_mov_i64(rs2, src2);
    322         }
    323 
    324         /*
    325          * Xor bit 31 in rs1 with that in rs2.
    326          * This formulation retains the nanboxing of rs1.
    327          */
    328         tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(31, 1));
    329         tcg_gen_xor_i64(dest, rs1, dest);
    330 
    331         tcg_temp_free_i64(rs2);
    332     }
    333     /* signed-extended intead of nanboxing for result if enable zfinx */
    334     if (ctx->cfg_ptr->ext_zfinx) {
    335         tcg_gen_ext32s_i64(dest, dest);
    336     }
    337     tcg_temp_free_i64(rs1);
    338     gen_set_fpr_hs(ctx, a->rd, dest);
    339     mark_fs_dirty(ctx);
    340     return true;
    341 }
    342 
    343 static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a)
    344 {
    345     REQUIRE_FPU;
    346     REQUIRE_ZFINX_OR_F(ctx);
    347 
    348     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    349     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    350     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    351 
    352     gen_helper_fmin_s(dest, cpu_env, src1, src2);
    353     gen_set_fpr_hs(ctx, a->rd, dest);
    354     mark_fs_dirty(ctx);
    355     return true;
    356 }
    357 
    358 static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a)
    359 {
    360     REQUIRE_FPU;
    361     REQUIRE_ZFINX_OR_F(ctx);
    362 
    363     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    364     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    365     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    366 
    367     gen_helper_fmax_s(dest, cpu_env, src1, src2);
    368     gen_set_fpr_hs(ctx, a->rd, dest);
    369     mark_fs_dirty(ctx);
    370     return true;
    371 }
    372 
    373 static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a)
    374 {
    375     REQUIRE_FPU;
    376     REQUIRE_ZFINX_OR_F(ctx);
    377 
    378     TCGv dest = dest_gpr(ctx, a->rd);
    379     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    380 
    381     gen_set_rm(ctx, a->rm);
    382     gen_helper_fcvt_w_s(dest, cpu_env, src1);
    383     gen_set_gpr(ctx, a->rd, dest);
    384     return true;
    385 }
    386 
    387 static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a)
    388 {
    389     REQUIRE_FPU;
    390     REQUIRE_ZFINX_OR_F(ctx);
    391 
    392     TCGv dest = dest_gpr(ctx, a->rd);
    393     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    394 
    395     gen_set_rm(ctx, a->rm);
    396     gen_helper_fcvt_wu_s(dest, cpu_env, src1);
    397     gen_set_gpr(ctx, a->rd, dest);
    398     return true;
    399 }
    400 
    401 static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a)
    402 {
    403     /* NOTE: This was FMV.X.S in an earlier version of the ISA spec! */
    404     REQUIRE_FPU;
    405     REQUIRE_ZFINX_OR_F(ctx);
    406 
    407     TCGv dest = dest_gpr(ctx, a->rd);
    408     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    409 #if defined(TARGET_RISCV64)
    410     tcg_gen_ext32s_tl(dest, src1);
    411 #else
    412     tcg_gen_extrl_i64_i32(dest, src1);
    413 #endif
    414 
    415     gen_set_gpr(ctx, a->rd, dest);
    416     return true;
    417 }
    418 
    419 static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a)
    420 {
    421     REQUIRE_FPU;
    422     REQUIRE_ZFINX_OR_F(ctx);
    423 
    424     TCGv dest = dest_gpr(ctx, a->rd);
    425     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    426     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    427 
    428     gen_helper_feq_s(dest, cpu_env, src1, src2);
    429     gen_set_gpr(ctx, a->rd, dest);
    430     return true;
    431 }
    432 
    433 static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a)
    434 {
    435     REQUIRE_FPU;
    436     REQUIRE_ZFINX_OR_F(ctx);
    437 
    438     TCGv dest = dest_gpr(ctx, a->rd);
    439     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    440     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    441 
    442     gen_helper_flt_s(dest, cpu_env, src1, src2);
    443     gen_set_gpr(ctx, a->rd, dest);
    444     return true;
    445 }
    446 
    447 static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a)
    448 {
    449     REQUIRE_FPU;
    450     REQUIRE_ZFINX_OR_F(ctx);
    451 
    452     TCGv dest = dest_gpr(ctx, a->rd);
    453     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    454     TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
    455 
    456     gen_helper_fle_s(dest, cpu_env, src1, src2);
    457     gen_set_gpr(ctx, a->rd, dest);
    458     return true;
    459 }
    460 
    461 static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
    462 {
    463     REQUIRE_FPU;
    464     REQUIRE_ZFINX_OR_F(ctx);
    465 
    466     TCGv dest = dest_gpr(ctx, a->rd);
    467     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    468 
    469     gen_helper_fclass_s(dest, cpu_env, src1);
    470     gen_set_gpr(ctx, a->rd, dest);
    471     return true;
    472 }
    473 
    474 static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a)
    475 {
    476     REQUIRE_FPU;
    477     REQUIRE_ZFINX_OR_F(ctx);
    478 
    479     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    480     TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
    481 
    482     gen_set_rm(ctx, a->rm);
    483     gen_helper_fcvt_s_w(dest, cpu_env, src);
    484     gen_set_fpr_hs(ctx, a->rd, dest);
    485     mark_fs_dirty(ctx);
    486     return true;
    487 }
    488 
    489 static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a)
    490 {
    491     REQUIRE_FPU;
    492     REQUIRE_ZFINX_OR_F(ctx);
    493 
    494     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    495     TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
    496 
    497     gen_set_rm(ctx, a->rm);
    498     gen_helper_fcvt_s_wu(dest, cpu_env, src);
    499     gen_set_fpr_hs(ctx, a->rd, dest);
    500     mark_fs_dirty(ctx);
    501     return true;
    502 }
    503 
    504 static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a)
    505 {
    506     /* NOTE: This was FMV.S.X in an earlier version of the ISA spec! */
    507     REQUIRE_FPU;
    508     REQUIRE_ZFINX_OR_F(ctx);
    509 
    510     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    511     TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
    512 
    513     tcg_gen_extu_tl_i64(dest, src);
    514     gen_nanbox_s(dest, dest);
    515     gen_set_fpr_hs(ctx, a->rd, dest);
    516     mark_fs_dirty(ctx);
    517     return true;
    518 }
    519 
    520 static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a)
    521 {
    522     REQUIRE_64BIT(ctx);
    523     REQUIRE_FPU;
    524     REQUIRE_ZFINX_OR_F(ctx);
    525 
    526     TCGv dest = dest_gpr(ctx, a->rd);
    527     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    528 
    529     gen_set_rm(ctx, a->rm);
    530     gen_helper_fcvt_l_s(dest, cpu_env, src1);
    531     gen_set_gpr(ctx, a->rd, dest);
    532     return true;
    533 }
    534 
    535 static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a)
    536 {
    537     REQUIRE_64BIT(ctx);
    538     REQUIRE_FPU;
    539     REQUIRE_ZFINX_OR_F(ctx);
    540 
    541     TCGv dest = dest_gpr(ctx, a->rd);
    542     TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
    543 
    544     gen_set_rm(ctx, a->rm);
    545     gen_helper_fcvt_lu_s(dest, cpu_env, src1);
    546     gen_set_gpr(ctx, a->rd, dest);
    547     return true;
    548 }
    549 
    550 static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a)
    551 {
    552     REQUIRE_64BIT(ctx);
    553     REQUIRE_FPU;
    554     REQUIRE_ZFINX_OR_F(ctx);
    555 
    556     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    557     TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
    558 
    559     gen_set_rm(ctx, a->rm);
    560     gen_helper_fcvt_s_l(dest, cpu_env, src);
    561     gen_set_fpr_hs(ctx, a->rd, dest);
    562     mark_fs_dirty(ctx);
    563     return true;
    564 }
    565 
    566 static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a)
    567 {
    568     REQUIRE_64BIT(ctx);
    569     REQUIRE_FPU;
    570     REQUIRE_ZFINX_OR_F(ctx);
    571 
    572     TCGv_i64 dest = dest_fpr(ctx, a->rd);
    573     TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
    574 
    575     gen_set_rm(ctx, a->rm);
    576     gen_helper_fcvt_s_lu(dest, cpu_env, src);
    577     gen_set_fpr_hs(ctx, a->rd, dest);
    578     mark_fs_dirty(ctx);
    579     return true;
    580 }