qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

trans_rvk.c.inc (11518B)


      1 /*
      2  * RISC-V translation routines for the Zk[nd,ne,nh,sed,sh] Standard Extension.
      3  *
      4  * Copyright (c) 2021 Ruibo Lu, luruibo2000@163.com
      5  * Copyright (c) 2021 Zewen Ye, lustrew@foxmail.com
      6  *
      7  * This program is free software; you can redistribute it and/or modify it
      8  * under the terms and conditions of the GNU General Public License,
      9  * version 2 or later, as published by the Free Software Foundation.
     10  *
     11  * This program is distributed in the hope it will be useful, but WITHOUT
     12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
     13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
     14  * more details.
     15  *
     16  * You should have received a copy of the GNU General Public License along with
     17  * this program.  If not, see <http://www.gnu.org/licenses/>.
     18  */
     19 
     20 #define REQUIRE_ZKND(ctx) do {                  \
     21     if (!ctx->cfg_ptr->ext_zknd) {              \
     22         return false;                           \
     23     }                                           \
     24 } while (0)
     25 
     26 #define REQUIRE_ZKNE(ctx) do {                  \
     27     if (!ctx->cfg_ptr->ext_zkne) {              \
     28         return false;                           \
     29     }                                           \
     30 } while (0)
     31 
     32 #define REQUIRE_ZKNH(ctx) do {                  \
     33     if (!ctx->cfg_ptr->ext_zknh) {              \
     34         return false;                           \
     35     }                                           \
     36 } while (0)
     37 
     38 #define REQUIRE_ZKSED(ctx) do {                 \
     39     if (!ctx->cfg_ptr->ext_zksed) {             \
     40         return false;                           \
     41     }                                           \
     42 } while (0)
     43 
     44 #define REQUIRE_ZKSH(ctx) do {                  \
     45     if (!ctx->cfg_ptr->ext_zksh) {              \
     46         return false;                           \
     47     }                                           \
     48 } while (0)
     49 
     50 static bool gen_aes32_sm4(DisasContext *ctx, arg_k_aes *a,
     51                           void (*func)(TCGv, TCGv, TCGv, TCGv))
     52 {
     53     TCGv shamt = tcg_constant_tl(a->shamt);
     54     TCGv dest = dest_gpr(ctx, a->rd);
     55     TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
     56     TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
     57 
     58     func(dest, src1, src2, shamt);
     59     gen_set_gpr(ctx, a->rd, dest);
     60     return true;
     61 }
     62 
     63 static bool trans_aes32esmi(DisasContext *ctx, arg_aes32esmi *a)
     64 {
     65     REQUIRE_32BIT(ctx);
     66     REQUIRE_ZKNE(ctx);
     67     return gen_aes32_sm4(ctx, a, gen_helper_aes32esmi);
     68 }
     69 
     70 static bool trans_aes32esi(DisasContext *ctx, arg_aes32esi *a)
     71 {
     72     REQUIRE_32BIT(ctx);
     73     REQUIRE_ZKNE(ctx);
     74     return gen_aes32_sm4(ctx, a, gen_helper_aes32esi);
     75 }
     76 
     77 static bool trans_aes32dsmi(DisasContext *ctx, arg_aes32dsmi *a)
     78 {
     79     REQUIRE_32BIT(ctx);
     80     REQUIRE_ZKND(ctx);
     81     return gen_aes32_sm4(ctx, a, gen_helper_aes32dsmi);
     82 }
     83 
     84 static bool trans_aes32dsi(DisasContext *ctx, arg_aes32dsi *a)
     85 {
     86     REQUIRE_32BIT(ctx);
     87     REQUIRE_ZKND(ctx);
     88     return gen_aes32_sm4(ctx, a, gen_helper_aes32dsi);
     89 }
     90 
     91 static bool trans_aes64es(DisasContext *ctx, arg_aes64es *a)
     92 {
     93     REQUIRE_64BIT(ctx);
     94     REQUIRE_ZKNE(ctx);
     95     return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64es, NULL);
     96 }
     97 
     98 static bool trans_aes64esm(DisasContext *ctx, arg_aes64esm *a)
     99 {
    100     REQUIRE_64BIT(ctx);
    101     REQUIRE_ZKNE(ctx);
    102     return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64esm, NULL);
    103 }
    104 
    105 static bool trans_aes64ds(DisasContext *ctx, arg_aes64ds *a)
    106 {
    107     REQUIRE_64BIT(ctx);
    108     REQUIRE_ZKND(ctx);
    109     return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64ds, NULL);
    110 }
    111 
    112 static bool trans_aes64dsm(DisasContext *ctx, arg_aes64dsm *a)
    113 {
    114     REQUIRE_64BIT(ctx);
    115     REQUIRE_ZKND(ctx);
    116     return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64dsm, NULL);
    117 }
    118 
    119 static bool trans_aes64ks2(DisasContext *ctx, arg_aes64ks2 *a)
    120 {
    121     REQUIRE_64BIT(ctx);
    122     REQUIRE_EITHER_EXT(ctx, zknd, zkne);
    123     return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64ks2, NULL);
    124 }
    125 
    126 static bool trans_aes64ks1i(DisasContext *ctx, arg_aes64ks1i *a)
    127 {
    128     REQUIRE_64BIT(ctx);
    129     REQUIRE_EITHER_EXT(ctx, zknd, zkne);
    130 
    131     if (a->imm > 0xA) {
    132         return false;
    133     }
    134 
    135     return gen_arith_imm_tl(ctx, a, EXT_NONE, gen_helper_aes64ks1i, NULL);
    136 }
    137 
    138 static bool trans_aes64im(DisasContext *ctx, arg_aes64im *a)
    139 {
    140     REQUIRE_64BIT(ctx);
    141     REQUIRE_ZKND(ctx);
    142     return gen_unary(ctx, a, EXT_NONE, gen_helper_aes64im);
    143 }
    144 
    145 static bool gen_sha256(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
    146                        void (*func)(TCGv_i32, TCGv_i32, int32_t),
    147                        int32_t num1, int32_t num2, int32_t num3)
    148 {
    149     TCGv dest = dest_gpr(ctx, a->rd);
    150     TCGv src1 = get_gpr(ctx, a->rs1, ext);
    151     TCGv_i32 t0 = tcg_temp_new_i32();
    152     TCGv_i32 t1 = tcg_temp_new_i32();
    153     TCGv_i32 t2 = tcg_temp_new_i32();
    154 
    155     tcg_gen_trunc_tl_i32(t0, src1);
    156     tcg_gen_rotri_i32(t1, t0, num1);
    157     tcg_gen_rotri_i32(t2, t0, num2);
    158     tcg_gen_xor_i32(t1, t1, t2);
    159     func(t2, t0, num3);
    160     tcg_gen_xor_i32(t1, t1, t2);
    161     tcg_gen_ext_i32_tl(dest, t1);
    162 
    163     gen_set_gpr(ctx, a->rd, dest);
    164     tcg_temp_free_i32(t0);
    165     tcg_temp_free_i32(t1);
    166     tcg_temp_free_i32(t2);
    167     return true;
    168 }
    169 
    170 static bool trans_sha256sig0(DisasContext *ctx, arg_sha256sig0 *a)
    171 {
    172     REQUIRE_ZKNH(ctx);
    173     return gen_sha256(ctx, a, EXT_NONE, tcg_gen_shri_i32, 7, 18, 3);
    174 }
    175 
    176 static bool trans_sha256sig1(DisasContext *ctx, arg_sha256sig1 *a)
    177 {
    178     REQUIRE_ZKNH(ctx);
    179     return gen_sha256(ctx, a, EXT_NONE, tcg_gen_shri_i32, 17, 19, 10);
    180 }
    181 
    182 static bool trans_sha256sum0(DisasContext *ctx, arg_sha256sum0 *a)
    183 {
    184     REQUIRE_ZKNH(ctx);
    185     return gen_sha256(ctx, a, EXT_NONE, tcg_gen_rotri_i32, 2, 13, 22);
    186 }
    187 
    188 static bool trans_sha256sum1(DisasContext *ctx, arg_sha256sum1 *a)
    189 {
    190     REQUIRE_ZKNH(ctx);
    191     return gen_sha256(ctx, a, EXT_NONE, tcg_gen_rotri_i32, 6, 11, 25);
    192 }
    193 
    194 static bool gen_sha512_rv32(DisasContext *ctx, arg_r *a, DisasExtend ext,
    195                             void (*func1)(TCGv_i64, TCGv_i64, int64_t),
    196                             void (*func2)(TCGv_i64, TCGv_i64, int64_t),
    197                             int64_t num1, int64_t num2, int64_t num3)
    198 {
    199     TCGv dest = dest_gpr(ctx, a->rd);
    200     TCGv src1 = get_gpr(ctx, a->rs1, ext);
    201     TCGv src2 = get_gpr(ctx, a->rs2, ext);
    202     TCGv_i64 t0 = tcg_temp_new_i64();
    203     TCGv_i64 t1 = tcg_temp_new_i64();
    204     TCGv_i64 t2 = tcg_temp_new_i64();
    205 
    206     tcg_gen_concat_tl_i64(t0, src1, src2);
    207     func1(t1, t0, num1);
    208     func2(t2, t0, num2);
    209     tcg_gen_xor_i64(t1, t1, t2);
    210     tcg_gen_rotri_i64(t2, t0, num3);
    211     tcg_gen_xor_i64(t1, t1, t2);
    212     tcg_gen_trunc_i64_tl(dest, t1);
    213 
    214     gen_set_gpr(ctx, a->rd, dest);
    215     tcg_temp_free_i64(t0);
    216     tcg_temp_free_i64(t1);
    217     tcg_temp_free_i64(t2);
    218     return true;
    219 }
    220 
    221 static bool trans_sha512sum0r(DisasContext *ctx, arg_sha512sum0r *a)
    222 {
    223     REQUIRE_32BIT(ctx);
    224     REQUIRE_ZKNH(ctx);
    225     return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64,
    226                            tcg_gen_rotli_i64, 25, 30, 28);
    227 }
    228 
    229 static bool trans_sha512sum1r(DisasContext *ctx, arg_sha512sum1r *a)
    230 {
    231     REQUIRE_32BIT(ctx);
    232     REQUIRE_ZKNH(ctx);
    233     return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64,
    234                            tcg_gen_rotri_i64, 23, 14, 18);
    235 }
    236 
    237 static bool trans_sha512sig0l(DisasContext *ctx, arg_sha512sig0l *a)
    238 {
    239     REQUIRE_32BIT(ctx);
    240     REQUIRE_ZKNH(ctx);
    241     return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotri_i64,
    242                            tcg_gen_rotri_i64, 1, 7, 8);
    243 }
    244 
    245 static bool trans_sha512sig1l(DisasContext *ctx, arg_sha512sig1l *a)
    246 {
    247     REQUIRE_32BIT(ctx);
    248     REQUIRE_ZKNH(ctx);
    249     return gen_sha512_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64,
    250                            tcg_gen_rotri_i64, 3, 6, 19);
    251 }
    252 
    253 static bool gen_sha512h_rv32(DisasContext *ctx, arg_r *a, DisasExtend ext,
    254                              void (*func)(TCGv_i64, TCGv_i64, int64_t),
    255                              int64_t num1, int64_t num2, int64_t num3)
    256 {
    257     TCGv dest = dest_gpr(ctx, a->rd);
    258     TCGv src1 = get_gpr(ctx, a->rs1, ext);
    259     TCGv src2 = get_gpr(ctx, a->rs2, ext);
    260     TCGv_i64 t0 = tcg_temp_new_i64();
    261     TCGv_i64 t1 = tcg_temp_new_i64();
    262     TCGv_i64 t2 = tcg_temp_new_i64();
    263 
    264     tcg_gen_concat_tl_i64(t0, src1, src2);
    265     func(t1, t0, num1);
    266     tcg_gen_ext32u_i64(t2, t0);
    267     tcg_gen_shri_i64(t2, t2, num2);
    268     tcg_gen_xor_i64(t1, t1, t2);
    269     tcg_gen_rotri_i64(t2, t0, num3);
    270     tcg_gen_xor_i64(t1, t1, t2);
    271     tcg_gen_trunc_i64_tl(dest, t1);
    272 
    273     gen_set_gpr(ctx, a->rd, dest);
    274     tcg_temp_free_i64(t0);
    275     tcg_temp_free_i64(t1);
    276     tcg_temp_free_i64(t2);
    277     return true;
    278 }
    279 
    280 static bool trans_sha512sig0h(DisasContext *ctx, arg_sha512sig0h *a)
    281 {
    282     REQUIRE_32BIT(ctx);
    283     REQUIRE_ZKNH(ctx);
    284     return gen_sha512h_rv32(ctx, a, EXT_NONE, tcg_gen_rotri_i64, 1, 7, 8);
    285 }
    286 
    287 static bool trans_sha512sig1h(DisasContext *ctx, arg_sha512sig1h *a)
    288 {
    289     REQUIRE_32BIT(ctx);
    290     REQUIRE_ZKNH(ctx);
    291     return gen_sha512h_rv32(ctx, a, EXT_NONE, tcg_gen_rotli_i64, 3, 6, 19);
    292 }
    293 
    294 static bool gen_sha512_rv64(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
    295                             void (*func)(TCGv_i64, TCGv_i64, int64_t),
    296                             int64_t num1, int64_t num2, int64_t num3)
    297 {
    298     TCGv dest = dest_gpr(ctx, a->rd);
    299     TCGv src1 = get_gpr(ctx, a->rs1, ext);
    300     TCGv_i64 t0 = tcg_temp_new_i64();
    301     TCGv_i64 t1 = tcg_temp_new_i64();
    302     TCGv_i64 t2 = tcg_temp_new_i64();
    303 
    304     tcg_gen_extu_tl_i64(t0, src1);
    305     tcg_gen_rotri_i64(t1, t0, num1);
    306     tcg_gen_rotri_i64(t2, t0, num2);
    307     tcg_gen_xor_i64(t1, t1, t2);
    308     func(t2, t0, num3);
    309     tcg_gen_xor_i64(t1, t1, t2);
    310     tcg_gen_trunc_i64_tl(dest, t1);
    311 
    312     gen_set_gpr(ctx, a->rd, dest);
    313     tcg_temp_free_i64(t0);
    314     tcg_temp_free_i64(t1);
    315     tcg_temp_free_i64(t2);
    316     return true;
    317 }
    318 
    319 static bool trans_sha512sig0(DisasContext *ctx, arg_sha512sig0 *a)
    320 {
    321     REQUIRE_64BIT(ctx);
    322     REQUIRE_ZKNH(ctx);
    323     return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_shri_i64, 1, 8, 7);
    324 }
    325 
    326 static bool trans_sha512sig1(DisasContext *ctx, arg_sha512sig1 *a)
    327 {
    328     REQUIRE_64BIT(ctx);
    329     REQUIRE_ZKNH(ctx);
    330     return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_shri_i64, 19, 61, 6);
    331 }
    332 
    333 static bool trans_sha512sum0(DisasContext *ctx, arg_sha512sum0 *a)
    334 {
    335     REQUIRE_64BIT(ctx);
    336     REQUIRE_ZKNH(ctx);
    337     return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_rotri_i64, 28, 34, 39);
    338 }
    339 
    340 static bool trans_sha512sum1(DisasContext *ctx, arg_sha512sum1 *a)
    341 {
    342     REQUIRE_64BIT(ctx);
    343     REQUIRE_ZKNH(ctx);
    344     return gen_sha512_rv64(ctx, a, EXT_NONE, tcg_gen_rotri_i64, 14, 18, 41);
    345 }
    346 
    347 /* SM3 */
    348 static bool gen_sm3(DisasContext *ctx, arg_r2 *a, int32_t b, int32_t c)
    349 {
    350     TCGv dest = dest_gpr(ctx, a->rd);
    351     TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
    352     TCGv_i32 t0 = tcg_temp_new_i32();
    353     TCGv_i32 t1 = tcg_temp_new_i32();
    354 
    355     tcg_gen_trunc_tl_i32(t0, src1);
    356     tcg_gen_rotli_i32(t1, t0, b);
    357     tcg_gen_xor_i32(t1, t0, t1);
    358     tcg_gen_rotli_i32(t0, t0, c);
    359     tcg_gen_xor_i32(t1, t1, t0);
    360     tcg_gen_ext_i32_tl(dest, t1);
    361     gen_set_gpr(ctx, a->rd, dest);
    362 
    363     tcg_temp_free_i32(t0);
    364     tcg_temp_free_i32(t1);
    365     return true;
    366 }
    367 
    368 static bool trans_sm3p0(DisasContext *ctx, arg_sm3p0 *a)
    369 {
    370     REQUIRE_ZKSH(ctx);
    371     return gen_sm3(ctx, a, 9, 17);
    372 }
    373 
    374 static bool trans_sm3p1(DisasContext *ctx, arg_sm3p1 *a)
    375 {
    376     REQUIRE_ZKSH(ctx);
    377     return gen_sm3(ctx, a, 15, 23);
    378 }
    379 
    380 /* SM4 */
    381 static bool trans_sm4ed(DisasContext *ctx, arg_sm4ed *a)
    382 {
    383     REQUIRE_ZKSED(ctx);
    384     return gen_aes32_sm4(ctx, a, gen_helper_sm4ed);
    385 }
    386 
    387 static bool trans_sm4ks(DisasContext *ctx, arg_sm4ks *a)
    388 {
    389     REQUIRE_ZKSED(ctx);
    390     return gen_aes32_sm4(ctx, a, gen_helper_sm4ks);
    391 }