duckstation

duckstation, but archived from the revision just before upstream changed it to a proprietary software project, this version is the libre one
git clone https://git.neptards.moe/u3shit/duckstation.git
Log | Files | Refs | README | LICENSE

assembler_vector.cpp (68416B)


      1 #include <biscuit/assert.hpp>
      2 #include <biscuit/assembler.hpp>
      3 
      4 namespace biscuit {
      5 namespace {
      6 
      7 enum class AddressingMode : uint32_t {
      8     // clang-format off
      9     UnitStride       = 0b00,
     10     IndexedUnordered = 0b01,
     11     Strided          = 0b10,
     12     IndexedOrdered   = 0b11,
     13     // clang-format on
     14 };
     15 
     16 enum class UnitStrideLoadAddressingMode : uint32_t {
     17     // clang-format off
     18     Load               = 0b00000,
     19     MaskLoad           = 0b01011,
     20     LoadFaultOnlyFirst = 0b10000,
     21     // clang-format on
     22 };
     23 
     24 enum class UnitStrideStoreAddressingMode : uint32_t {
     25     // clang-format off
     26     Store     = 0b00000,
     27     MaskStore = 0b01011,
     28     // clang-format on
     29 };
     30 
     31 enum class WidthEncoding : uint32_t {
     32     // clang-format off
     33     E8  = 0b000,
     34     E16 = 0b101,
     35     E32 = 0b110,
     36     E64 = 0b111,
     37     // clang-format on
     38 };
     39 
     40 void EmitVectorLoadImpl(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
     41                         VecMask vm, uint32_t lumop, GPR rs, WidthEncoding width, Vec vd) noexcept {
     42     BISCUIT_ASSERT(nf <= 8);
     43 
     44     // Fit to encoding space. Allows for being more explicit about the size in calling functions
     45     // (e.g. using 8 for 8 elements instead of 7).
     46     if (nf != 0) {
     47         nf -= 1;
     48     }
     49 
     50     // clang-format off
     51     const auto value = (nf << 29) |
     52                        (static_cast<uint32_t>(mew) << 28) |
     53                        (static_cast<uint32_t>(mop) << 26) |
     54                        (static_cast<uint32_t>(vm) << 25) |
     55                        (lumop << 20) |
     56                        (rs.Index() << 15) |
     57                        (static_cast<uint32_t>(width) << 12) |
     58                        (vd.Index() << 7);
     59     // clang-format on
     60 
     61     buffer.Emit32(value | 0b111);
     62 }
     63 
     64 void EmitVectorLoad(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
     65                     VecMask vm, UnitStrideLoadAddressingMode lumop, GPR rs,
     66                     WidthEncoding width, Vec vd) noexcept {
     67     EmitVectorLoadImpl(buffer, nf, mew, mop, vm, static_cast<uint32_t>(lumop), rs, width, vd);
     68 }
     69 
     70 void EmitVectorLoad(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
     71                     VecMask vm, GPR rs2, GPR rs1, WidthEncoding width, Vec vd) noexcept {
     72     EmitVectorLoadImpl(buffer, nf, mew, mop, vm, rs2.Index(), rs1, width, vd);
     73 }
     74 
     75 void EmitVectorLoad(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
     76                     VecMask vm, Vec vs2, GPR rs1, WidthEncoding width, Vec vd) noexcept {
     77     EmitVectorLoadImpl(buffer, nf, mew, mop, vm, vs2.Index(), rs1, width, vd);
     78 }
     79 
     80 void EmitVectorLoadWholeReg(CodeBuffer& buffer, uint32_t nf, bool mew, GPR rs,
     81                             WidthEncoding width, Vec vd) noexcept {
     82     // RISC-V V extension spec (as of 1.0RC) only allows these nf values.
     83     BISCUIT_ASSERT(nf == 1 || nf == 2 || nf == 4 || nf == 8);
     84 
     85     EmitVectorLoadImpl(buffer, nf, mew, AddressingMode::UnitStride,
     86                        VecMask::No, 0b01000, rs, width, vd);
     87 }
     88 
     89 void EmitVectorStoreImpl(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
     90                          VecMask vm, uint32_t sumop, GPR rs, WidthEncoding width, Vec vd) noexcept {
     91     BISCUIT_ASSERT(nf <= 8);
     92 
     93     // Fit to encoding space. Allows for being more explicit about the size in calling functions
     94     // (e.g. using 8 for 8 elements instead of 7).
     95     if (nf != 0) {
     96         nf -= 1;
     97     }
     98 
     99     // clang-format off
    100     const auto value = (nf << 29) |
    101                        (static_cast<uint32_t>(mew) << 28) |
    102                        (static_cast<uint32_t>(mop) << 26) |
    103                        (static_cast<uint32_t>(vm) << 25) |
    104                        (sumop << 20) |
    105                        (rs.Index() << 15) |
    106                        (static_cast<uint32_t>(width) << 12) |
    107                        (vd.Index() << 7);
    108     // clang-format on
    109 
    110     buffer.Emit32(value | 0b100111);
    111 }
    112 
    113 void EmitVectorStore(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
    114                      VecMask vm, UnitStrideStoreAddressingMode lumop, GPR rs,
    115                      WidthEncoding width, Vec vs) noexcept {
    116     EmitVectorStoreImpl(buffer, nf, mew, mop, vm, static_cast<uint32_t>(lumop), rs, width, vs);
    117 }
    118 
    119 void EmitVectorStore(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
    120                      VecMask vm, GPR rs2, GPR rs1, WidthEncoding width, Vec vs3) noexcept {
    121     EmitVectorStoreImpl(buffer, nf, mew, mop, vm, rs2.Index(), rs1, width, vs3);
    122 }
    123 
    124 void EmitVectorStore(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop,
    125                      VecMask vm, Vec vs2, GPR rs1, WidthEncoding width, Vec vs3) noexcept {
    126     EmitVectorStoreImpl(buffer, nf, mew, mop, vm, vs2.Index(), rs1, width, vs3);
    127 }
    128 
    129 void EmitVectorStoreWholeReg(CodeBuffer& buffer, uint32_t nf, GPR rs, Vec vs) noexcept {
    130     // RISC-V V extension spec (as of 1.0RC) only allows these nf values.
    131     BISCUIT_ASSERT(nf == 1 || nf == 2 || nf == 4 || nf == 8);
    132 
    133     EmitVectorStoreImpl(buffer, nf, false, AddressingMode::UnitStride, VecMask::No,
    134                         0b01000, rs, WidthEncoding::E8, vs);
    135 }
    136 
    137 void EmitVectorOPIVIImpl(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, uint32_t imm5, Vec vd) noexcept {
    138     // clang-format off
    139     const auto value = (funct6 << 26) |
    140                        (static_cast<uint32_t>(vm) << 25) |
    141                        (vs2.Index() << 20) |
    142                        ((imm5 & 0b11111) << 15) |
    143                        (0b011U << 12) |
    144                        (vd.Index() << 7);
    145     // clang-format on
    146 
    147     buffer.Emit32(value | 0b1010111);
    148 }
    149 
    150 void EmitVectorOPIVI(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, int32_t simm5, Vec vd) noexcept {
    151     BISCUIT_ASSERT(simm5 >= -16 && simm5 <= 15);
    152     EmitVectorOPIVIImpl(buffer, funct6, vm, vs2, static_cast<uint32_t>(simm5), vd);
    153 }
    154 
    155 void EmitVectorOPIVUI(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, uint32_t uimm5, Vec vd) noexcept {
    156     BISCUIT_ASSERT(uimm5 <= 31);
    157     EmitVectorOPIVIImpl(buffer, funct6, vm, vs2, uimm5, vd);
    158 }
    159 
    160 void EmitVectorOPIVV(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, Vec vs1, Vec vd) noexcept {
    161     // clang-format off
    162     const auto value = (funct6 << 26) |
    163                        (static_cast<uint32_t>(vm) << 25) |
    164                        (vs2.Index() << 20) |
    165                        (vs1.Index() << 15) |
    166                        (vd.Index() << 7);
    167     // clang-format on
    168 
    169     buffer.Emit32(value | 0b1010111);
    170 }
    171 
    172 void EmitVectorOPIVX(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, GPR rs1, Vec vd) noexcept {
    173     // clang-format off
    174     const auto value = (funct6 << 26) |
    175                        (static_cast<uint32_t>(vm) << 25) |
    176                        (vs2.Index() << 20) |
    177                        (rs1.Index() << 15) |
    178                        (0b100U << 12) |
    179                        (vd.Index() << 7);
    180     // clang-format on
    181 
    182     buffer.Emit32(value | 0b1010111);
    183 }
    184 
    185 void EmitVectorOPMVV(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, Vec vs1, Vec vd) noexcept {
    186     // clang-format off
    187     const auto value = (funct6 << 26) |
    188                        (static_cast<uint32_t>(vm) << 25) |
    189                        (vs2.Index() << 20) |
    190                        (vs1.Index() << 15) |
    191                        (0b010U << 12) |
    192                        (vd.Index() << 7);
    193     // clang-format on
    194 
    195     buffer.Emit32(value | 0b1010111);
    196 }
    197 
    198 void EmitVectorOPMVX(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, GPR rs1, Vec vd) noexcept {
    199     // clang-format off
    200     const auto value = (funct6 << 26) |
    201                        (static_cast<uint32_t>(vm) << 25) |
    202                        (vs2.Index() << 20) |
    203                        (rs1.Index() << 15) |
    204                        (0b110U << 12) |
    205                        (vd.Index() << 7);
    206     // clang-format on
    207 
    208     buffer.Emit32(value | 0b1010111);
    209 }
    210 
    211 void EmitVectorOPFVV(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, Vec vs1, Vec vd) noexcept {
    212     // clang-format off
    213     const auto value = (funct6 << 26) |
    214                        (static_cast<uint32_t>(vm) << 25) |
    215                        (vs2.Index() << 20) |
    216                        (vs1.Index() << 15) |
    217                        (0b001U << 12) |
    218                        (vd.Index() << 7);
    219     // clang-format on
    220 
    221     buffer.Emit32(value | 0b1010111);
    222 }
    223 
    224 void EmitVectorOPFVF(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, FPR rs1, Vec vd) noexcept {
    225     // clang-format off
    226     const auto value = (funct6 << 26) |
    227                        (static_cast<uint32_t>(vm) << 25) |
    228                        (vs2.Index() << 20) |
    229                        (rs1.Index() << 15) |
    230                        (0b101U << 12) |
    231                        (vd.Index() << 7);
    232     // clang-format on
    233 
    234     buffer.Emit32(value | 0b1010111);
    235 }
    236 } // Anonymous namespace
    237 
    238 // Vector Integer Arithmetic Instructions
    239 
    240 void Assembler::VAADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    241     EmitVectorOPMVV(m_buffer, 0b001001, mask, vs2, vs1, vd);
    242 }
    243 
    244 void Assembler::VAADD(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    245     EmitVectorOPMVX(m_buffer, 0b001001, mask, vs2, rs1, vd);
    246 }
    247 
    248 void Assembler::VAADDU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    249     EmitVectorOPMVV(m_buffer, 0b001000, mask, vs2, vs1, vd);
    250 }
    251 
    252 void Assembler::VAADDU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    253     EmitVectorOPMVX(m_buffer, 0b001000, mask, vs2, rs1, vd);
    254 }
    255 
    256 void Assembler::VADC(Vec vd, Vec vs2, Vec vs1) noexcept {
    257     EmitVectorOPIVV(m_buffer, 0b010000, VecMask::Yes, vs2, vs1, vd);
    258 }
    259 
    260 void Assembler::VADC(Vec vd, Vec vs2, GPR rs1) noexcept {
    261     EmitVectorOPIVX(m_buffer, 0b010000, VecMask::Yes, vs2, rs1, vd);
    262 }
    263 
    264 void Assembler::VADC(Vec vd, Vec vs2, int32_t simm) noexcept {
    265     EmitVectorOPIVI(m_buffer, 0b010000, VecMask::Yes, vs2, simm, vd);
    266 }
    267 
    268 void Assembler::VADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    269     EmitVectorOPIVV(m_buffer, 0b000000, mask, vs2, vs1, vd);
    270 }
    271 
    272 void Assembler::VADD(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    273     EmitVectorOPIVX(m_buffer, 0b000000, mask, vs2, rs1, vd);
    274 }
    275 
    276 void Assembler::VADD(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    277     EmitVectorOPIVI(m_buffer, 0b000000, mask, vs2, simm, vd);
    278 }
    279 
    280 void Assembler::VAND(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    281     EmitVectorOPIVV(m_buffer, 0b001001, mask, vs2, vs1, vd);
    282 }
    283 
    284 void Assembler::VAND(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    285     EmitVectorOPIVX(m_buffer, 0b001001, mask, vs2, rs1, vd);
    286 }
    287 
    288 void Assembler::VAND(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    289     EmitVectorOPIVI(m_buffer, 0b001001, mask, vs2, simm, vd);
    290 }
    291 
    292 void Assembler::VASUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    293     EmitVectorOPMVV(m_buffer, 0b001011, mask, vs2, vs1, vd);
    294 }
    295 
    296 void Assembler::VASUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    297     EmitVectorOPMVX(m_buffer, 0b001011, mask, vs2, rs1, vd);
    298 }
    299 
    300 void Assembler::VASUBU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    301     EmitVectorOPMVV(m_buffer, 0b001010, mask, vs2, vs1, vd);
    302 }
    303 
    304 void Assembler::VASUBU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    305     EmitVectorOPMVX(m_buffer, 0b001010, mask, vs2, rs1, vd);
    306 }
    307 
    308 void Assembler::VCOMPRESS(Vec vd, Vec vs2, Vec vs1) noexcept {
    309     // Note: Destination register may not overlap any of the source registers,
    310     //       as per the RVV spec (as of 1.0RC; see section 16.5)
    311     EmitVectorOPMVV(m_buffer, 0b010111, VecMask::No, vs2, vs1, vd);
    312 }
    313 
    314 void Assembler::VDIV(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    315     EmitVectorOPMVV(m_buffer, 0b100001, mask, vs2, vs1, vd);
    316 }
    317 
    318 void Assembler::VDIV(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    319     EmitVectorOPMVX(m_buffer, 0b100001, mask, vs2, rs1, vd);
    320 }
    321 
    322 void Assembler::VDIVU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    323     EmitVectorOPMVV(m_buffer, 0b100000, mask, vs2, vs1, vd);
    324 }
    325 
    326 void Assembler::VDIVU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    327     EmitVectorOPMVX(m_buffer, 0b100000, mask, vs2, rs1, vd);
    328 }
    329 
    330 void Assembler::VFIRST(GPR rd, Vec vs, VecMask mask) noexcept {
    331     EmitVectorOPMVV(m_buffer, 0b010000, mask, vs, v17, Vec{rd.Index()});
    332 }
    333 
    334 void Assembler::VID(Vec vd, VecMask mask) noexcept {
    335     EmitVectorOPMVV(m_buffer, 0b010100, mask, v0, v17, vd);
    336 }
    337 
    338 void Assembler::VIOTA(Vec vd, Vec vs, VecMask mask) noexcept {
    339     EmitVectorOPMVV(m_buffer, 0b010100, mask, vs, v16, vd);
    340 }
    341 
    342 void Assembler::VMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
    343     EmitVectorOPMVV(m_buffer, 0b101101, mask, vs2, vs1, vd);
    344 }
    345 
    346 void Assembler::VMACC(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
    347     EmitVectorOPMVX(m_buffer, 0b101101, mask, vs2, rs1, vd);
    348 }
    349 
    350 void Assembler::VMADC(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    351     EmitVectorOPIVV(m_buffer, 0b010001, mask, vs2, vs1, vd);
    352 }
    353 
    354 void Assembler::VMADC(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    355     EmitVectorOPIVX(m_buffer, 0b010001, mask, vs2, rs1, vd);
    356 }
    357 
    358 void Assembler::VMADC(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    359     EmitVectorOPIVI(m_buffer, 0b010001, mask, vs2, simm, vd);
    360 }
    361 
    362 void Assembler::VMADD(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
    363     EmitVectorOPMVV(m_buffer, 0b101001, mask, vs2, vs1, vd);
    364 }
    365 
    366 void Assembler::VMADD(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
    367     EmitVectorOPMVX(m_buffer, 0b101001, mask, vs2, rs1, vd);
    368 }
    369 
    370 void Assembler::VMAND(Vec vd, Vec vs2, Vec vs1) noexcept {
    371     EmitVectorOPMVV(m_buffer, 0b011001, VecMask::No, vs2, vs1, vd);
    372 }
    373 
    374 void Assembler::VMANDNOT(Vec vd, Vec vs2, Vec vs1) noexcept {
    375     EmitVectorOPMVV(m_buffer, 0b011000, VecMask::No, vs2, vs1, vd);
    376 }
    377 
    378 void Assembler::VMNAND(Vec vd, Vec vs2, Vec vs1) noexcept {
    379     EmitVectorOPMVV(m_buffer, 0b011101, VecMask::No, vs2, vs1, vd);
    380 }
    381 
    382 void Assembler::VMNOR(Vec vd, Vec vs2, Vec vs1) noexcept {
    383     EmitVectorOPMVV(m_buffer, 0b011110, VecMask::No, vs2, vs1, vd);
    384 }
    385 
    386 void Assembler::VMOR(Vec vd, Vec vs2, Vec vs1) noexcept {
    387     EmitVectorOPMVV(m_buffer, 0b011010, VecMask::No, vs2, vs1, vd);
    388 }
    389 
    390 void Assembler::VMORNOT(Vec vd, Vec vs2, Vec vs1) noexcept {
    391     EmitVectorOPMVV(m_buffer, 0b011100, VecMask::No, vs2, vs1, vd);
    392 }
    393 
    394 void Assembler::VMXNOR(Vec vd, Vec vs2, Vec vs1) noexcept {
    395     EmitVectorOPMVV(m_buffer, 0b011111, VecMask::No, vs2, vs1, vd);
    396 }
    397 
    398 void Assembler::VMXOR(Vec vd, Vec vs2, Vec vs1) noexcept {
    399     EmitVectorOPMVV(m_buffer, 0b011011, VecMask::No, vs2, vs1, vd);
    400 }
    401 
    402 void Assembler::VMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    403     EmitVectorOPIVV(m_buffer, 0b000111, mask, vs2, vs1, vd);
    404 }
    405 
    406 void Assembler::VMAX(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    407     EmitVectorOPIVX(m_buffer, 0b000111, mask, vs2, rs1, vd);
    408 }
    409 
    410 void Assembler::VMAXU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    411     EmitVectorOPIVV(m_buffer, 0b000110, mask, vs2, vs1, vd);
    412 }
    413 
    414 void Assembler::VMAXU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    415     EmitVectorOPIVX(m_buffer, 0b000110, mask, vs2, rs1, vd);
    416 }
    417 
    418 void Assembler::VMERGE(Vec vd, Vec vs2, Vec vs1) noexcept {
    419     EmitVectorOPIVV(m_buffer, 0b010111, VecMask::Yes, vs2, vs1, vd);
    420 }
    421 
    422 void Assembler::VMERGE(Vec vd, Vec vs2, GPR rs1) noexcept {
    423     EmitVectorOPIVX(m_buffer, 0b010111, VecMask::Yes, vs2, rs1, vd);
    424 }
    425 
    426 void Assembler::VMERGE(Vec vd, Vec vs2, int32_t simm) noexcept {
    427     EmitVectorOPIVI(m_buffer, 0b010111, VecMask::Yes, vs2, simm, vd);
    428 }
    429 
    430 void Assembler::VMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    431     EmitVectorOPIVV(m_buffer, 0b000101, mask, vs2, vs1, vd);
    432 }
    433 
    434 void Assembler::VMIN(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    435     EmitVectorOPIVX(m_buffer, 0b000101, mask, vs2, rs1, vd);
    436 }
    437 
    438 void Assembler::VMINU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    439     EmitVectorOPIVV(m_buffer, 0b000100, mask, vs2, vs1, vd);
    440 }
    441 
    442 void Assembler::VMINU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    443     EmitVectorOPIVX(m_buffer, 0b000100, mask, vs2, rs1, vd);
    444 }
    445 
    446 void Assembler::VMSBC(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    447     EmitVectorOPIVV(m_buffer, 0b010011, mask, vs2, vs1, vd);
    448 }
    449 
    450 void Assembler::VMSBC(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    451     EmitVectorOPIVX(m_buffer, 0b010011, mask, vs2, rs1, vd);
    452 }
    453 
    454 void Assembler::VMSBF(Vec vd, Vec vs, VecMask mask) noexcept {
    455     EmitVectorOPMVV(m_buffer, 0b010100, mask, vs, v1, vd);
    456 }
    457 
    458 void Assembler::VMSIF(Vec vd, Vec vs, VecMask mask) noexcept {
    459     EmitVectorOPMVV(m_buffer, 0b010100, mask, vs, v3, vd);
    460 }
    461 
    462 void Assembler::VMSOF(Vec vd, Vec vs, VecMask mask) noexcept {
    463     EmitVectorOPMVV(m_buffer, 0b010100, mask, vs, v2, vd);
    464 }
    465 
    466 void Assembler::VMSEQ(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    467     EmitVectorOPIVV(m_buffer, 0b011000, mask, vs2, vs1, vd);
    468 }
    469 
    470 void Assembler::VMSEQ(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    471     EmitVectorOPIVX(m_buffer, 0b011000, mask, vs2, rs1, vd);
    472 }
    473 
    474 void Assembler::VMSEQ(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    475     EmitVectorOPIVI(m_buffer, 0b011000, mask, vs2, simm, vd);
    476 }
    477 
    478 void Assembler::VMSGT(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    479     EmitVectorOPIVX(m_buffer, 0b011111, mask, vs2, rs1, vd);
    480 }
    481 
    482 void Assembler::VMSGT(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    483     EmitVectorOPIVI(m_buffer, 0b011111, mask, vs2, simm, vd);
    484 }
    485 
    486 void Assembler::VMSGTU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    487     EmitVectorOPIVX(m_buffer, 0b011110, mask, vs2, rs1, vd);
    488 }
    489 
    490 void Assembler::VMSGTU(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    491     EmitVectorOPIVI(m_buffer, 0b011110, mask, vs2, simm, vd);
    492 }
    493 
    494 void Assembler::VMSLE(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    495     EmitVectorOPIVV(m_buffer, 0b011101, mask, vs2, vs1, vd);
    496 }
    497 
    498 void Assembler::VMSLE(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    499     EmitVectorOPIVX(m_buffer, 0b011101, mask, vs2, rs1, vd);
    500 }
    501 
    502 void Assembler::VMSLE(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    503     EmitVectorOPIVI(m_buffer, 0b011101, mask, vs2, simm, vd);
    504 }
    505 
    506 void Assembler::VMSLEU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    507     EmitVectorOPIVV(m_buffer, 0b011100, mask, vs2, vs1, vd);
    508 }
    509 
    510 void Assembler::VMSLEU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    511     EmitVectorOPIVX(m_buffer, 0b011100, mask, vs2, rs1, vd);
    512 }
    513 
    514 void Assembler::VMSLEU(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    515     EmitVectorOPIVI(m_buffer, 0b011100, mask, vs2, simm, vd);
    516 }
    517 
    518 void Assembler::VMSLT(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    519     EmitVectorOPIVV(m_buffer, 0b011011, mask, vs2, vs1, vd);
    520 }
    521 
    522 void Assembler::VMSLT(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    523     EmitVectorOPIVX(m_buffer, 0b011011, mask, vs2, rs1, vd);
    524 }
    525 
    526 void Assembler::VMSLTU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    527     EmitVectorOPIVV(m_buffer, 0b011010, mask, vs2, vs1, vd);
    528 }
    529 
    530 void Assembler::VMSLTU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    531     EmitVectorOPIVX(m_buffer, 0b011010, mask, vs2, rs1, vd);
    532 }
    533 
    534 void Assembler::VMSNE(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    535     EmitVectorOPIVV(m_buffer, 0b011001, mask, vs2, vs1, vd);
    536 }
    537 
    538 void Assembler::VMSNE(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    539     EmitVectorOPIVX(m_buffer, 0b011001, mask, vs2, rs1, vd);
    540 }
    541 
    542 void Assembler::VMSNE(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    543     EmitVectorOPIVI(m_buffer, 0b011001, mask, vs2, simm, vd);
    544 }
    545 
    546 void Assembler::VMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    547     EmitVectorOPMVV(m_buffer, 0b100101, mask, vs2, vs1, vd);
    548 }
    549 
    550 void Assembler::VMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    551     EmitVectorOPMVX(m_buffer, 0b100101, mask, vs2, rs1, vd);
    552 }
    553 
    554 void Assembler::VMULH(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    555     EmitVectorOPMVV(m_buffer, 0b100111, mask, vs2, vs1, vd);
    556 }
    557 
    558 void Assembler::VMULH(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    559     EmitVectorOPMVX(m_buffer, 0b100111, mask, vs2, rs1, vd);
    560 }
    561 
    562 void Assembler::VMULHSU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    563     EmitVectorOPMVV(m_buffer, 0b100110, mask, vs2, vs1, vd);
    564 }
    565 
    566 void Assembler::VMULHSU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    567     EmitVectorOPMVX(m_buffer, 0b100110, mask, vs2, rs1, vd);
    568 }
    569 
    570 void Assembler::VMULHU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    571     EmitVectorOPMVV(m_buffer, 0b100100, mask, vs2, vs1, vd);
    572 }
    573 
    574 void Assembler::VMULHU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    575     EmitVectorOPMVX(m_buffer, 0b100100, mask, vs2, rs1, vd);
    576 }
    577 
    578 void Assembler::VMV(Vec vd, Vec vs1) noexcept {
    579     EmitVectorOPIVV(m_buffer, 0b010111, VecMask::No, v0, vs1, vd);
    580 }
    581 
    582 void Assembler::VMV(Vec vd, GPR rs1) noexcept {
    583     EmitVectorOPIVX(m_buffer, 0b010111, VecMask::No, v0, rs1, vd);
    584 }
    585 
    586 void Assembler::VMV(Vec vd, int32_t simm) noexcept {
    587     EmitVectorOPIVI(m_buffer, 0b010111, VecMask::No, v0, simm, vd);
    588 }
    589 
    590 void Assembler::VMV1R(Vec vd, Vec vs) noexcept {
    591     EmitVectorOPIVI(m_buffer, 0b100111, VecMask::No, vs, 0b00000, vd);
    592 }
    593 
    594 void Assembler::VMV2R(Vec vd, Vec vs) noexcept {
    595     // Registers must be aligned to the register group size, per the
    596     // RVV spec (as of 1.0RC)
    597     BISCUIT_ASSERT(vd.Index() % 2 == 0);
    598     BISCUIT_ASSERT(vs.Index() % 2 == 0);
    599 
    600     EmitVectorOPIVI(m_buffer, 0b100111, VecMask::No, vs, 0b00001, vd);
    601 }
    602 
    603 void Assembler::VMV4R(Vec vd, Vec vs) noexcept {
    604     // Registers must be aligned to the register group size, per the
    605     // RVV spec (as of 1.0RC)
    606     BISCUIT_ASSERT(vd.Index() % 4 == 0);
    607     BISCUIT_ASSERT(vs.Index() % 4 == 0);
    608 
    609     EmitVectorOPIVI(m_buffer, 0b100111, VecMask::No, vs, 0b00011, vd);
    610 }
    611 
    612 void Assembler::VMV8R(Vec vd, Vec vs) noexcept {
    613     // Registers must be aligned to the register group size, per the
    614     // RVV spec (as of 1.0RC)
    615     BISCUIT_ASSERT(vd.Index() % 8 == 0);
    616     BISCUIT_ASSERT(vs.Index() % 8 == 0);
    617 
    618     EmitVectorOPIVI(m_buffer, 0b100111, VecMask::No, vs, 0b00111, vd);
    619 }
    620 
    621 void Assembler::VMV_SX(Vec vd, GPR rs) noexcept {
    622     EmitVectorOPMVX(m_buffer, 0b010000, VecMask::No, v0, rs, vd);
    623 }
    624 
    625 void Assembler::VMV_XS(GPR rd, Vec vs) noexcept {
    626     EmitVectorOPMVV(m_buffer, 0b010000, VecMask::No, vs, v0, Vec{rd.Index()});
    627 }
    628 
    629 void Assembler::VNCLIP(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    630     EmitVectorOPIVV(m_buffer, 0b101111, mask, vs2, vs1, vd);
    631 }
    632 
    633 void Assembler::VNCLIP(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    634     EmitVectorOPIVX(m_buffer, 0b101111, mask, vs2, rs1, vd);
    635 }
    636 
    637 void Assembler::VNCLIP(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
    638     EmitVectorOPIVUI(m_buffer, 0b101111, mask, vs2, uimm, vd);
    639 }
    640 
    641 void Assembler::VNCLIPU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    642     EmitVectorOPIVV(m_buffer, 0b101110, mask, vs2, vs1, vd);
    643 }
    644 
    645 void Assembler::VNCLIPU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    646     EmitVectorOPIVX(m_buffer, 0b101110, mask, vs2, rs1, vd);
    647 }
    648 
    649 void Assembler::VNCLIPU(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
    650     EmitVectorOPIVUI(m_buffer, 0b101110, mask, vs2, uimm, vd);
    651 }
    652 
    653 void Assembler::VNMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
    654     EmitVectorOPMVV(m_buffer, 0b101111, mask, vs2, vs1, vd);
    655 }
    656 
    657 void Assembler::VNMSAC(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
    658     EmitVectorOPMVX(m_buffer, 0b101111, mask, vs2, rs1, vd);
    659 }
    660 
    661 void Assembler::VNMSUB(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
    662     EmitVectorOPMVV(m_buffer, 0b101011, mask, vs2, vs1, vd);
    663 }
    664 
    665 void Assembler::VNMSUB(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
    666     EmitVectorOPMVX(m_buffer, 0b101011, mask, vs2, rs1, vd);
    667 }
    668 
    669 void Assembler::VNSRA(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    670     EmitVectorOPIVV(m_buffer, 0b101101, mask, vs2, vs1, vd);
    671 }
    672 
    673 void Assembler::VNSRA(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    674     EmitVectorOPIVX(m_buffer, 0b101101, mask, vs2, rs1, vd);
    675 }
    676 
    677 void Assembler::VNSRA(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
    678     EmitVectorOPIVUI(m_buffer, 0b101101, mask, vs2, uimm, vd);
    679 }
    680 
    681 void Assembler::VNSRL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    682     EmitVectorOPIVV(m_buffer, 0b101100, mask, vs2, vs1, vd);
    683 }
    684 
    685 void Assembler::VNSRL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    686     EmitVectorOPIVX(m_buffer, 0b101100, mask, vs2, rs1, vd);
    687 }
    688 
    689 void Assembler::VNSRL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
    690     EmitVectorOPIVUI(m_buffer, 0b101100, mask, vs2, uimm, vd);
    691 }
    692 
    693 void Assembler::VOR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    694     EmitVectorOPIVV(m_buffer, 0b001010, mask, vs2, vs1, vd);
    695 }
    696 
    697 void Assembler::VOR(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    698     EmitVectorOPIVX(m_buffer, 0b001010, mask, vs2, rs1, vd);
    699 }
    700 
    701 void Assembler::VOR(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    702     EmitVectorOPIVI(m_buffer, 0b001010, mask, vs2, simm, vd);
    703 }
    704 
    705 void Assembler::VPOPC(GPR rd, Vec vs, VecMask mask) noexcept {
    706     EmitVectorOPMVV(m_buffer, 0b010000, mask, vs, v16, Vec{rd.Index()});
    707 }
    708 
    709 void Assembler::VREDAND(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    710     EmitVectorOPMVV(m_buffer, 0b000001, mask, vs2, vs1, vd);
    711 }
    712 
    713 void Assembler::VREDMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    714     EmitVectorOPMVV(m_buffer, 0b000111, mask, vs2, vs1, vd);
    715 }
    716 
    717 void Assembler::VREDMAXU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    718     EmitVectorOPMVV(m_buffer, 0b000110, mask, vs2, vs1, vd);
    719 }
    720 
    721 void Assembler::VREDMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    722     EmitVectorOPMVV(m_buffer, 0b000101, mask, vs2, vs1, vd);
    723 }
    724 
    725 void Assembler::VREDMINU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    726     EmitVectorOPMVV(m_buffer, 0b000100, mask, vs2, vs1, vd);
    727 }
    728 
    729 void Assembler::VREDOR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    730     EmitVectorOPMVV(m_buffer, 0b000010, mask, vs2, vs1, vd);
    731 }
    732 
    733 void Assembler::VREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    734     EmitVectorOPMVV(m_buffer, 0b000000, mask, vs2, vs1, vd);
    735 }
    736 
    737 void Assembler::VREDXOR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    738     EmitVectorOPMVV(m_buffer, 0b000011, mask, vs2, vs1, vd);
    739 }
    740 
    741 void Assembler::VREM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    742     EmitVectorOPMVV(m_buffer, 0b100011, mask, vs2, vs1, vd);
    743 }
    744 
    745 void Assembler::VREM(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    746     EmitVectorOPMVX(m_buffer, 0b100011, mask, vs2, rs1, vd);
    747 }
    748 
    749 void Assembler::VREMU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    750     EmitVectorOPMVV(m_buffer, 0b100010, mask, vs2, vs1, vd);
    751 }
    752 
    753 void Assembler::VREMU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    754     EmitVectorOPMVX(m_buffer, 0b100010, mask, vs2, rs1, vd);
    755 }
    756 
    757 void Assembler::VRGATHER(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    758     EmitVectorOPIVV(m_buffer, 0b001100, mask, vs2, vs1, vd);
    759 }
    760 
    761 void Assembler::VRGATHER(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    762     EmitVectorOPIVX(m_buffer, 0b001100, mask, vs2, rs1, vd);
    763 }
    764 
    765 void Assembler::VRGATHER(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
    766     EmitVectorOPIVUI(m_buffer, 0b001100, mask, vs2, uimm, vd);
    767 }
    768 
    769 void Assembler::VRGATHEREI16(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    770     EmitVectorOPIVV(m_buffer, 0b001110, mask, vs2, vs1, vd);
    771 }
    772 
    773 void Assembler::VRSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    774     EmitVectorOPIVX(m_buffer, 0b000011, mask, vs2, rs1, vd);
    775 }
    776 
    777 void Assembler::VRSUB(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    778     EmitVectorOPIVI(m_buffer, 0b000011, mask, vs2, simm, vd);
    779 }
    780 
    781 void Assembler::VSADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    782     EmitVectorOPIVV(m_buffer, 0b100001, mask, vs2, vs1, vd);
    783 }
    784 
    785 void Assembler::VSADD(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    786     EmitVectorOPIVX(m_buffer, 0b100001, mask, vs2, rs1, vd);
    787 }
    788 
    789 void Assembler::VSADD(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    790     EmitVectorOPIVI(m_buffer, 0b100001, mask, vs2, simm, vd);
    791 }
    792 
    793 void Assembler::VSADDU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    794     EmitVectorOPIVV(m_buffer, 0b100000, mask, vs2, vs1, vd);
    795 }
    796 
    797 void Assembler::VSADDU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    798     EmitVectorOPIVX(m_buffer, 0b100000, mask, vs2, rs1, vd);
    799 }
    800 
    801 void Assembler::VSADDU(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
    802     EmitVectorOPIVI(m_buffer, 0b100000, mask, vs2, simm, vd);
    803 }
    804 
    805 void Assembler::VSBC(Vec vd, Vec vs2, Vec vs1) noexcept {
    806     EmitVectorOPIVV(m_buffer, 0b010010, VecMask::Yes, vs2, vs1, vd);
    807 }
    808 
    809 void Assembler::VSBC(Vec vd, Vec vs2, GPR rs1) noexcept {
    810     EmitVectorOPIVX(m_buffer, 0b010010, VecMask::Yes, vs2, rs1, vd);
    811 }
    812 
    813 void Assembler::VSEXTVF2(Vec vd, Vec vs, VecMask mask) noexcept {
    814     EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v7, vd);
    815 }
    816 
    817 void Assembler::VSEXTVF4(Vec vd, Vec vs, VecMask mask) noexcept {
    818     EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v5, vd);
    819 }
    820 
    821 void Assembler::VSEXTVF8(Vec vd, Vec vs, VecMask mask) noexcept {
    822     EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v3, vd);
    823 }
    824 
    825 void Assembler::VSLIDE1DOWN(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    826     EmitVectorOPMVX(m_buffer, 0b001111, mask, vs2, rs1, vd);
    827 }
    828 
    829 void Assembler::VSLIDEDOWN(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    830     EmitVectorOPIVX(m_buffer, 0b001111, mask, vs2, rs1, vd);
    831 }
    832 
    833 void Assembler::VSLIDEDOWN(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
    834     EmitVectorOPIVUI(m_buffer, 0b001111, mask, vs2, uimm, vd);
    835 }
    836 
    837 void Assembler::VSLIDE1UP(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    838     EmitVectorOPMVX(m_buffer, 0b001110, mask, vs2, rs1, vd);
    839 }
    840 
    841 void Assembler::VSLIDEUP(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    842     EmitVectorOPIVX(m_buffer, 0b001110, mask, vs2, rs1, vd);
    843 }
    844 
    845 void Assembler::VSLIDEUP(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
    846     EmitVectorOPIVUI(m_buffer, 0b001110, mask, vs2, uimm, vd);
    847 }
    848 
    849 void Assembler::VSLL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    850     EmitVectorOPIVV(m_buffer, 0b100101, mask, vs2, vs1, vd);
    851 }
    852 
    853 void Assembler::VSLL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    854     EmitVectorOPIVX(m_buffer, 0b100101, mask, vs2, rs1, vd);
    855 }
    856 
    857 void Assembler::VSLL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
    858     EmitVectorOPIVUI(m_buffer, 0b100101, mask, vs2, uimm, vd);
    859 }
    860 
    861 void Assembler::VSMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    862     EmitVectorOPIVV(m_buffer, 0b100111, mask, vs2, vs1, vd);
    863 }
    864 
    865 void Assembler::VSMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    866     EmitVectorOPIVX(m_buffer, 0b100111, mask, vs2, rs1, vd);
    867 }
    868 
    869 void Assembler::VSRA(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    870     EmitVectorOPIVV(m_buffer, 0b101001, mask, vs2, vs1, vd);
    871 }
    872 
    873 void Assembler::VSRA(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    874     EmitVectorOPIVX(m_buffer, 0b101001, mask, vs2, rs1, vd);
    875 }
    876 
    877 void Assembler::VSRA(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
    878     EmitVectorOPIVUI(m_buffer, 0b101001, mask, vs2, uimm, vd);
    879 }
    880 
    881 void Assembler::VSRL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    882     EmitVectorOPIVV(m_buffer, 0b101000, mask, vs2, vs1, vd);
    883 }
    884 
    885 void Assembler::VSRL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    886     EmitVectorOPIVX(m_buffer, 0b101000, mask, vs2, rs1, vd);
    887 }
    888 
    889 void Assembler::VSRL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
    890     EmitVectorOPIVUI(m_buffer, 0b101000, mask, vs2, uimm, vd);
    891 }
    892 
    893 void Assembler::VSSRA(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    894     EmitVectorOPIVV(m_buffer, 0b101011, mask, vs2, vs1, vd);
    895 }
    896 
    897 void Assembler::VSSRA(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    898     EmitVectorOPIVX(m_buffer, 0b101011, mask, vs2, rs1, vd);
    899 }
    900 
    901 void Assembler::VSSRA(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
    902     EmitVectorOPIVUI(m_buffer, 0b101011, mask, vs2, uimm, vd);
    903 }
    904 
    905 void Assembler::VSSRL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    906     EmitVectorOPIVV(m_buffer, 0b101010, mask, vs2, vs1, vd);
    907 }
    908 
    909 void Assembler::VSSRL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    910     EmitVectorOPIVX(m_buffer, 0b101010, mask, vs2, rs1, vd);
    911 }
    912 
    913 void Assembler::VSSRL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept {
    914     EmitVectorOPIVUI(m_buffer, 0b101010, mask, vs2, uimm, vd);
    915 }
    916 
    917 void Assembler::VSSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    918     EmitVectorOPIVV(m_buffer, 0b100011, mask, vs2, vs1, vd);
    919 }
    920 
    921 void Assembler::VSSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    922     EmitVectorOPIVX(m_buffer, 0b100011, mask, vs2, rs1, vd);
    923 }
    924 
    925 void Assembler::VSSUBU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    926     EmitVectorOPIVV(m_buffer, 0b100010, mask, vs2, vs1, vd);
    927 }
    928 
    929 void Assembler::VSSUBU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    930     EmitVectorOPIVX(m_buffer, 0b100010, mask, vs2, rs1, vd);
    931 }
    932 
    933 void Assembler::VSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    934     EmitVectorOPIVV(m_buffer, 0b000010, mask, vs2, vs1, vd);
    935 }
    936 
    937 void Assembler::VSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    938     EmitVectorOPIVX(m_buffer, 0b000010, mask, vs2, rs1, vd);
    939 }
    940 
    941 void Assembler::VWADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    942     EmitVectorOPMVV(m_buffer, 0b110001, mask, vs2, vs1, vd);
    943 }
    944 
    945 void Assembler::VWADD(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    946     EmitVectorOPMVX(m_buffer, 0b110001, mask, vs2, rs1, vd);
    947 }
    948 
    949 void Assembler::VWADDW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    950     EmitVectorOPMVV(m_buffer, 0b110101, mask, vs2, vs1, vd);
    951 }
    952 
    953 void Assembler::VWADDW(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    954     EmitVectorOPMVX(m_buffer, 0b110101, mask, vs2, rs1, vd);
    955 }
    956 
    957 void Assembler::VWADDU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    958     EmitVectorOPMVV(m_buffer, 0b110000, mask, vs2, vs1, vd);
    959 }
    960 
    961 void Assembler::VWADDU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    962     EmitVectorOPMVX(m_buffer, 0b110000, mask, vs2, rs1, vd);
    963 }
    964 
    965 void Assembler::VWADDUW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
    966     EmitVectorOPMVV(m_buffer, 0b110100, mask, vs2, vs1, vd);
    967 }
    968 
    969 void Assembler::VWADDUW(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
    970     EmitVectorOPMVX(m_buffer, 0b110100, mask, vs2, rs1, vd);
    971 }
    972 
    973 void Assembler::VWMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
    974     EmitVectorOPMVV(m_buffer, 0b111101, mask, vs2, vs1, vd);
    975 }
    976 
    977 void Assembler::VWMACC(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
    978     EmitVectorOPMVX(m_buffer, 0b111101, mask, vs2, rs1, vd);
    979 }
    980 
    981 void Assembler::VWMACCSU(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
    982     EmitVectorOPMVV(m_buffer, 0b111111, mask, vs2, vs1, vd);
    983 }
    984 
    985 void Assembler::VWMACCSU(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
    986     EmitVectorOPMVX(m_buffer, 0b111111, mask, vs2, rs1, vd);
    987 }
    988 
    989 void Assembler::VWMACCU(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
    990     EmitVectorOPMVV(m_buffer, 0b111100, mask, vs2, vs1, vd);
    991 }
    992 
    993 void Assembler::VWMACCU(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
    994     EmitVectorOPMVX(m_buffer, 0b111100, mask, vs2, rs1, vd);
    995 }
    996 
    997 void Assembler::VWMACCUS(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept {
    998     EmitVectorOPMVX(m_buffer, 0b111110, mask, vs2, rs1, vd);
    999 }
   1000 
   1001 void Assembler::VWMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1002     EmitVectorOPMVV(m_buffer, 0b111011, mask, vs2, vs1, vd);
   1003 }
   1004 
   1005 void Assembler::VWMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
   1006     EmitVectorOPMVX(m_buffer, 0b111011, mask, vs2, rs1, vd);
   1007 }
   1008 
   1009 void Assembler::VWMULSU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1010     EmitVectorOPMVV(m_buffer, 0b111010, mask, vs2, vs1, vd);
   1011 }
   1012 
   1013 void Assembler::VWMULSU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
   1014     EmitVectorOPMVX(m_buffer, 0b111010, mask, vs2, rs1, vd);
   1015 }
   1016 
   1017 void Assembler::VWMULU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1018     EmitVectorOPMVV(m_buffer, 0b111000, mask, vs2, vs1, vd);
   1019 }
   1020 
   1021 void Assembler::VWMULU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
   1022     EmitVectorOPMVX(m_buffer, 0b111000, mask, vs2, rs1, vd);
   1023 }
   1024 
   1025 void Assembler::VWREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1026     EmitVectorOPIVV(m_buffer, 0b110001, mask, vs2, vs1, vd);
   1027 }
   1028 
   1029 void Assembler::VWREDSUMU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1030     EmitVectorOPIVV(m_buffer, 0b110000, mask, vs2, vs1, vd);
   1031 }
   1032 
   1033 void Assembler::VWSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1034     EmitVectorOPMVV(m_buffer, 0b110011, mask, vs2, vs1, vd);
   1035 }
   1036 
   1037 void Assembler::VWSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
   1038     EmitVectorOPMVX(m_buffer, 0b110011, mask, vs2, rs1, vd);
   1039 }
   1040 
   1041 void Assembler::VWSUBW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1042     EmitVectorOPMVV(m_buffer, 0b110111, mask, vs2, vs1, vd);
   1043 }
   1044 
   1045 void Assembler::VWSUBW(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
   1046     EmitVectorOPMVX(m_buffer, 0b110111, mask, vs2, rs1, vd);
   1047 }
   1048 
   1049 void Assembler::VWSUBU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1050     EmitVectorOPMVV(m_buffer, 0b110010, mask, vs2, vs1, vd);
   1051 }
   1052 
   1053 void Assembler::VWSUBU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
   1054     EmitVectorOPMVX(m_buffer, 0b110010, mask, vs2, rs1, vd);
   1055 }
   1056 
   1057 void Assembler::VWSUBUW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1058     EmitVectorOPMVV(m_buffer, 0b110110, mask, vs2, vs1, vd);
   1059 }
   1060 
   1061 void Assembler::VWSUBUW(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
   1062     EmitVectorOPMVX(m_buffer, 0b110110, mask, vs2, rs1, vd);
   1063 }
   1064 
   1065 void Assembler::VXOR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1066     EmitVectorOPIVV(m_buffer, 0b001011, mask, vs2, vs1, vd);
   1067 }
   1068 
   1069 void Assembler::VXOR(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept {
   1070     EmitVectorOPIVX(m_buffer, 0b001011, mask, vs2, rs1, vd);
   1071 }
   1072 
   1073 void Assembler::VXOR(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept {
   1074     EmitVectorOPIVI(m_buffer, 0b001011, mask, vs2, simm, vd);
   1075 }
   1076 
   1077 void Assembler::VZEXTVF2(Vec vd, Vec vs, VecMask mask) noexcept {
   1078     EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v6, vd);
   1079 }
   1080 
   1081 void Assembler::VZEXTVF4(Vec vd, Vec vs, VecMask mask) noexcept {
   1082     EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v4, vd);
   1083 }
   1084 
   1085 void Assembler::VZEXTVF8(Vec vd, Vec vs, VecMask mask) noexcept {
   1086     EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v2, vd);
   1087 }
   1088 
   1089 // Vector Floating-Point Instructions
   1090 
   1091 void Assembler::VFADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1092     EmitVectorOPFVV(m_buffer, 0b000000, mask, vs2, vs1, vd);
   1093 }
   1094 
   1095 void Assembler::VFADD(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1096     EmitVectorOPFVF(m_buffer, 0b000000, mask, vs2, rs1, vd);
   1097 }
   1098 
   1099 void Assembler::VFCLASS(Vec vd, Vec vs, VecMask mask) noexcept {
   1100     EmitVectorOPFVV(m_buffer, 0b010011, mask, vs, v16, vd);
   1101 }
   1102 
   1103 void Assembler::VFCVT_F_X(Vec vd, Vec vs, VecMask mask) noexcept {
   1104     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v3, vd);
   1105 }
   1106 
   1107 void Assembler::VFCVT_F_XU(Vec vd, Vec vs, VecMask mask) noexcept {
   1108     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v2, vd);
   1109 }
   1110 
   1111 void Assembler::VFCVT_RTZ_X_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1112     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v7, vd);
   1113 }
   1114 
   1115 void Assembler::VFCVT_RTZ_XU_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1116     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v6, vd);
   1117 }
   1118 
   1119 void Assembler::VFCVT_X_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1120     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v1, vd);
   1121 }
   1122 
   1123 void Assembler::VFCVT_XU_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1124     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v0, vd);
   1125 }
   1126 
   1127 void Assembler::VFNCVT_F_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1128     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v20, vd);
   1129 }
   1130 
   1131 void Assembler::VFNCVT_F_X(Vec vd, Vec vs, VecMask mask) noexcept {
   1132     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v19, vd);
   1133 }
   1134 
   1135 void Assembler::VFNCVT_F_XU(Vec vd, Vec vs, VecMask mask) noexcept {
   1136     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v18, vd);
   1137 }
   1138 
   1139 void Assembler::VFNCVT_ROD_F_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1140     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v21, vd);
   1141 }
   1142 
   1143 void Assembler::VFNCVT_RTZ_X_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1144     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v23, vd);
   1145 }
   1146 
   1147 void Assembler::VFNCVT_RTZ_XU_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1148     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v22, vd);
   1149 }
   1150 
   1151 void Assembler::VFNCVT_X_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1152     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v17, vd);
   1153 }
   1154 
   1155 void Assembler::VFNCVT_XU_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1156     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v16, vd);
   1157 }
   1158 
   1159 void Assembler::VFWCVT_F_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1160     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v12, vd);
   1161 }
   1162 
   1163 void Assembler::VFWCVT_F_X(Vec vd, Vec vs, VecMask mask) noexcept {
   1164     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v11, vd);
   1165 }
   1166 
   1167 void Assembler::VFWCVT_F_XU(Vec vd, Vec vs, VecMask mask) noexcept {
   1168     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v10, vd);
   1169 }
   1170 
   1171 void Assembler::VFWCVT_RTZ_X_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1172     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v15, vd);
   1173 }
   1174 
   1175 void Assembler::VFWCVT_RTZ_XU_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1176     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v14, vd);
   1177 }
   1178 
   1179 void Assembler::VFWCVT_X_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1180     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v9, vd);
   1181 }
   1182 
   1183 void Assembler::VFWCVT_XU_F(Vec vd, Vec vs, VecMask mask) noexcept {
   1184     EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v8, vd);
   1185 }
   1186 
   1187 void Assembler::VFDIV(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1188     EmitVectorOPFVV(m_buffer, 0b100000, mask, vs2, vs1, vd);
   1189 }
   1190 
   1191 void Assembler::VFDIV(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1192     EmitVectorOPFVF(m_buffer, 0b100000, mask, vs2, rs1, vd);
   1193 }
   1194 
   1195 void Assembler::VFRDIV(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1196     EmitVectorOPFVF(m_buffer, 0b100001, mask, vs2, rs1, vd);
   1197 }
   1198 
   1199 void Assembler::VFREDMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1200     EmitVectorOPFVV(m_buffer, 0b000111, mask, vs2, vs1, vd);
   1201 }
   1202 
   1203 void Assembler::VFREDMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1204     EmitVectorOPFVV(m_buffer, 0b000101, mask, vs2, vs1, vd);
   1205 }
   1206 
   1207 void Assembler::VFREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1208     EmitVectorOPFVV(m_buffer, 0b000001, mask, vs2, vs1, vd);
   1209 }
   1210 
   1211 void Assembler::VFREDOSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1212     EmitVectorOPFVV(m_buffer, 0b000011, mask, vs2, vs1, vd);
   1213 }
   1214 
   1215 void Assembler::VFMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
   1216     EmitVectorOPFVV(m_buffer, 0b101100, mask, vs2, vs1, vd);
   1217 }
   1218 
   1219 void Assembler::VFMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
   1220     EmitVectorOPFVF(m_buffer, 0b101100, mask, vs2, rs1, vd);
   1221 }
   1222 
   1223 void Assembler::VFMADD(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
   1224     EmitVectorOPFVV(m_buffer, 0b101000, mask, vs2, vs1, vd);
   1225 }
   1226 
   1227 void Assembler::VFMADD(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
   1228     EmitVectorOPFVF(m_buffer, 0b101000, mask, vs2, rs1, vd);
   1229 }
   1230 
   1231 void Assembler::VFMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1232     EmitVectorOPFVV(m_buffer, 0b000110, mask, vs2, vs1, vd);
   1233 }
   1234 
   1235 void Assembler::VFMAX(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1236     EmitVectorOPFVF(m_buffer, 0b000110, mask, vs2, rs1, vd);
   1237 }
   1238 
   1239 void Assembler::VFMERGE(Vec vd, Vec vs2, FPR rs1) noexcept {
   1240     EmitVectorOPFVF(m_buffer, 0b010111, VecMask::Yes, vs2, rs1, vd);
   1241 }
   1242 
   1243 void Assembler::VFMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1244     EmitVectorOPFVV(m_buffer, 0b000100, mask, vs2, vs1, vd);
   1245 }
   1246 
   1247 void Assembler::VFMIN(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1248     EmitVectorOPFVF(m_buffer, 0b000100, mask, vs2, rs1, vd);
   1249 }
   1250 
   1251 void Assembler::VFMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
   1252     EmitVectorOPFVV(m_buffer, 0b101110, mask, vs2, vs1, vd);
   1253 }
   1254 
   1255 void Assembler::VFMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
   1256     EmitVectorOPFVF(m_buffer, 0b101110, mask, vs2, rs1, vd);
   1257 }
   1258 
   1259 void Assembler::VFMSUB(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
   1260     EmitVectorOPFVV(m_buffer, 0b101010, mask, vs2, vs1, vd);
   1261 }
   1262 
   1263 void Assembler::VFMSUB(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
   1264     EmitVectorOPFVF(m_buffer, 0b101010, mask, vs2, rs1, vd);
   1265 }
   1266 
   1267 void Assembler::VFMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1268     EmitVectorOPFVV(m_buffer, 0b100100, mask, vs2, vs1, vd);
   1269 }
   1270 
   1271 void Assembler::VFMUL(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1272     EmitVectorOPFVF(m_buffer, 0b100100, mask, vs2, rs1, vd);
   1273 }
   1274 
   1275 void Assembler::VFMV(Vec vd, FPR rs) noexcept {
   1276     EmitVectorOPFVF(m_buffer, 0b010111, VecMask::No, v0, rs, vd);
   1277 }
   1278 
   1279 void Assembler::VFMV_FS(FPR rd, Vec vs) noexcept {
   1280     EmitVectorOPFVV(m_buffer, 0b010000, VecMask::No, vs, v0, Vec{rd.Index()});
   1281 }
   1282 
   1283 void Assembler::VFMV_SF(Vec vd, FPR rs) noexcept {
   1284     EmitVectorOPFVF(m_buffer, 0b010000, VecMask::No, v0, rs, vd);
   1285 }
   1286 
   1287 void Assembler::VFNMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
   1288     EmitVectorOPFVV(m_buffer, 0b101101, mask, vs2, vs1, vd);
   1289 }
   1290 
   1291 void Assembler::VFNMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
   1292     EmitVectorOPFVF(m_buffer, 0b101101, mask, vs2, rs1, vd);
   1293 }
   1294 
   1295 void Assembler::VFNMADD(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
   1296     EmitVectorOPFVV(m_buffer, 0b101001, mask, vs2, vs1, vd);
   1297 }
   1298 
   1299 void Assembler::VFNMADD(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
   1300     EmitVectorOPFVF(m_buffer, 0b101001, mask, vs2, rs1, vd);
   1301 }
   1302 
   1303 void Assembler::VFNMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
   1304     EmitVectorOPFVV(m_buffer, 0b101111, mask, vs2, vs1, vd);
   1305 }
   1306 
   1307 void Assembler::VFNMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
   1308     EmitVectorOPFVF(m_buffer, 0b101111, mask, vs2, rs1, vd);
   1309 }
   1310 
   1311 void Assembler::VFNMSUB(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
   1312     EmitVectorOPFVV(m_buffer, 0b101011, mask, vs2, vs1, vd);
   1313 }
   1314 
   1315 void Assembler::VFNMSUB(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
   1316     EmitVectorOPFVF(m_buffer, 0b101011, mask, vs2, rs1, vd);
   1317 }
   1318 
   1319 void Assembler::VFREC7(Vec vd, Vec vs, VecMask mask) noexcept {
   1320     EmitVectorOPFVV(m_buffer, 0b010011, mask, vs, v5, vd);
   1321 }
   1322 
   1323 void Assembler::VFSGNJ(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1324     EmitVectorOPFVV(m_buffer, 0b001000, mask, vs2, vs1, vd);
   1325 }
   1326 
   1327 void Assembler::VFSGNJ(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1328     EmitVectorOPFVF(m_buffer, 0b001000, mask, vs2, rs1, vd);
   1329 }
   1330 
   1331 void Assembler::VFSGNJN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1332     EmitVectorOPFVV(m_buffer, 0b001001, mask, vs2, vs1, vd);
   1333 }
   1334 
   1335 void Assembler::VFSGNJN(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1336     EmitVectorOPFVF(m_buffer, 0b001001, mask, vs2, rs1, vd);
   1337 }
   1338 
   1339 void Assembler::VFSGNJX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1340     EmitVectorOPFVV(m_buffer, 0b001010, mask, vs2, vs1, vd);
   1341 }
   1342 
   1343 void Assembler::VFSGNJX(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1344     EmitVectorOPFVF(m_buffer, 0b001010, mask, vs2, rs1, vd);
   1345 }
   1346 
   1347 void Assembler::VFSQRT(Vec vd, Vec vs, VecMask mask) noexcept {
   1348     EmitVectorOPFVV(m_buffer, 0b010011, mask, vs, v0, vd);
   1349 }
   1350 
   1351 void Assembler::VFRSQRT7(Vec vd, Vec vs, VecMask mask) noexcept {
   1352     EmitVectorOPFVV(m_buffer, 0b010011, mask, vs, v4, vd);
   1353 }
   1354 
   1355 void Assembler::VFSLIDE1DOWN(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1356     EmitVectorOPFVF(m_buffer, 0b001111, mask, vs2, rs1, vd);
   1357 }
   1358 
   1359 void Assembler::VFSLIDE1UP(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1360     EmitVectorOPFVF(m_buffer, 0b001110, mask, vs2, rs1, vd);
   1361 }
   1362 
   1363 void Assembler::VFSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1364     EmitVectorOPFVV(m_buffer, 0b000010, mask, vs2, vs1, vd);
   1365 }
   1366 
   1367 void Assembler::VFSUB(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1368     EmitVectorOPFVF(m_buffer, 0b000010, mask, vs2, rs1, vd);
   1369 }
   1370 
   1371 void Assembler::VFRSUB(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1372     EmitVectorOPFVF(m_buffer, 0b100111, mask, vs2, rs1, vd);
   1373 }
   1374 
   1375 void Assembler::VFWADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1376     EmitVectorOPFVV(m_buffer, 0b110000, mask, vs2, vs1, vd);
   1377 }
   1378 
   1379 void Assembler::VFWADD(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1380     EmitVectorOPFVF(m_buffer, 0b110000, mask, vs2, rs1, vd);
   1381 }
   1382 
   1383 void Assembler::VFWADDW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1384     EmitVectorOPFVV(m_buffer, 0b110100, mask, vs2, vs1, vd);
   1385 }
   1386 
   1387 void Assembler::VFWADDW(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1388     EmitVectorOPFVF(m_buffer, 0b110100, mask, vs2, rs1, vd);
   1389 }
   1390 
   1391 void Assembler::VFWMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
   1392     EmitVectorOPFVV(m_buffer, 0b111100, mask, vs2, vs1, vd);
   1393 }
   1394 
   1395 void Assembler::VFWMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
   1396     EmitVectorOPFVF(m_buffer, 0b111100, mask, vs2, rs1, vd);
   1397 }
   1398 
   1399 void Assembler::VFWMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1400     EmitVectorOPFVV(m_buffer, 0b111000, mask, vs2, vs1, vd);
   1401 }
   1402 
   1403 void Assembler::VFWMUL(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1404     EmitVectorOPFVF(m_buffer, 0b111000, mask, vs2, rs1, vd);
   1405 }
   1406 
   1407 void Assembler::VFWNMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
   1408     EmitVectorOPFVV(m_buffer, 0b111101, mask, vs2, vs1, vd);
   1409 }
   1410 
   1411 void Assembler::VFWNMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
   1412     EmitVectorOPFVF(m_buffer, 0b111101, mask, vs2, rs1, vd);
   1413 }
   1414 
   1415 void Assembler::VFWNMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
   1416     EmitVectorOPFVV(m_buffer, 0b111111, mask, vs2, vs1, vd);
   1417 }
   1418 
   1419 void Assembler::VFWNMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
   1420     EmitVectorOPFVF(m_buffer, 0b111111, mask, vs2, rs1, vd);
   1421 }
   1422 
   1423 void Assembler::VFWREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1424     EmitVectorOPFVV(m_buffer, 0b110001, mask, vs2, vs1, vd);
   1425 }
   1426 
   1427 void Assembler::VFWREDOSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1428     EmitVectorOPFVV(m_buffer, 0b110011, mask, vs2, vs1, vd);
   1429 }
   1430 
   1431 void Assembler::VFWMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept {
   1432     EmitVectorOPFVV(m_buffer, 0b111110, mask, vs2, vs1, vd);
   1433 }
   1434 
   1435 void Assembler::VFWMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept {
   1436     EmitVectorOPFVF(m_buffer, 0b111110, mask, vs2, rs1, vd);
   1437 }
   1438 
   1439 void Assembler::VFWSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1440     EmitVectorOPFVV(m_buffer, 0b110010, mask, vs2, vs1, vd);
   1441 }
   1442 
   1443 void Assembler::VFWSUB(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1444     EmitVectorOPFVF(m_buffer, 0b110010, mask, vs2, rs1, vd);
   1445 }
   1446 
   1447 void Assembler::VFWSUBW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1448     EmitVectorOPFVV(m_buffer, 0b110110, mask, vs2, vs1, vd);
   1449 }
   1450 
   1451 void Assembler::VFWSUBW(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1452     EmitVectorOPFVF(m_buffer, 0b110110, mask, vs2, rs1, vd);
   1453 }
   1454 
   1455 void Assembler::VMFEQ(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1456     EmitVectorOPFVV(m_buffer, 0b011000, mask, vs2, vs1, vd);
   1457 }
   1458 
   1459 void Assembler::VMFEQ(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1460     EmitVectorOPFVF(m_buffer, 0b011000, mask, vs2, rs1, vd);
   1461 }
   1462 
   1463 void Assembler::VMFGE(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1464     EmitVectorOPFVF(m_buffer, 0b011111, mask, vs2, rs1, vd);
   1465 }
   1466 
   1467 void Assembler::VMFGT(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1468     EmitVectorOPFVF(m_buffer, 0b011101, mask, vs2, rs1, vd);
   1469 }
   1470 
   1471 void Assembler::VMFLE(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1472     EmitVectorOPFVV(m_buffer, 0b011001, mask, vs2, vs1, vd);
   1473 }
   1474 
   1475 void Assembler::VMFLE(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1476     EmitVectorOPFVF(m_buffer, 0b011001, mask, vs2, rs1, vd);
   1477 }
   1478 
   1479 void Assembler::VMFLT(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1480     EmitVectorOPFVV(m_buffer, 0b011011, mask, vs2, vs1, vd);
   1481 }
   1482 
   1483 void Assembler::VMFLT(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1484     EmitVectorOPFVF(m_buffer, 0b011011, mask, vs2, rs1, vd);
   1485 }
   1486 
   1487 void Assembler::VMFNE(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept {
   1488     EmitVectorOPFVV(m_buffer, 0b011100, mask, vs2, vs1, vd);
   1489 }
   1490 
   1491 void Assembler::VMFNE(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept {
   1492     EmitVectorOPFVF(m_buffer, 0b011100, mask, vs2, rs1, vd);
   1493 }
   1494 
   1495 // Vector Load/Store Instructions
   1496 
   1497 void Assembler::VLE8(Vec vd, GPR rs, VecMask mask) noexcept {
   1498     VLSEGE8(1, vd, rs, mask);
   1499 }
   1500 
   1501 void Assembler::VLE16(Vec vd, GPR rs, VecMask mask) noexcept {
   1502     VLSEGE16(1, vd, rs, mask);
   1503 }
   1504 
   1505 void Assembler::VLE32(Vec vd, GPR rs, VecMask mask) noexcept {
   1506     VLSEGE32(1, vd, rs, mask);
   1507 }
   1508 
   1509 void Assembler::VLE64(Vec vd, GPR rs, VecMask mask) noexcept {
   1510     VLSEGE64(1, vd, rs, mask);
   1511 }
   1512 
   1513 void Assembler::VLM(Vec vd, GPR rs) noexcept {
   1514     EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, VecMask::No,
   1515                    UnitStrideLoadAddressingMode::MaskLoad, rs, WidthEncoding::E8, vd);
   1516 }
   1517 
   1518 void Assembler::VLSE8(Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1519     VLSSEGE8(1, vd, rs1, rs2, mask);
   1520 }
   1521 
   1522 void Assembler::VLSE16(Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1523     VLSSEGE16(1, vd, rs1, rs2, mask);
   1524 }
   1525 
   1526 void Assembler::VLSE32(Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1527     VLSSEGE32(1, vd, rs1, rs2, mask);
   1528 }
   1529 
   1530 void Assembler::VLSE64(Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1531     VLSSEGE64(1, vd, rs1, rs2, mask);
   1532 }
   1533 
   1534 void Assembler::VLOXEI8(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1535     VLOXSEGEI8(1, vd, rs, vs, mask);
   1536 }
   1537 
   1538 void Assembler::VLOXEI16(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1539     VLOXSEGEI16(1, vd, rs, vs, mask);
   1540 }
   1541 
   1542 void Assembler::VLOXEI32(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1543     VLOXSEGEI32(1, vd, rs, vs, mask);
   1544 }
   1545 
   1546 void Assembler::VLOXEI64(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1547     VLOXSEGEI64(1, vd, rs, vs, mask);
   1548 }
   1549 
   1550 void Assembler::VLUXEI8(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1551     VLUXSEGEI8(1, vd, rs, vs, mask);
   1552 }
   1553 
   1554 void Assembler::VLUXEI16(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1555     VLUXSEGEI16(1, vd, rs, vs, mask);
   1556 }
   1557 
   1558 void Assembler::VLUXEI32(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1559     VLUXSEGEI32(1, vd, rs, vs, mask);
   1560 }
   1561 
   1562 void Assembler::VLUXEI64(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1563     VLUXSEGEI64(1, vd, rs, vs, mask);
   1564 }
   1565 
   1566 void Assembler::VLE8FF(Vec vd, GPR rs, VecMask mask) noexcept {
   1567     EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, mask,
   1568                    UnitStrideLoadAddressingMode::LoadFaultOnlyFirst, rs, WidthEncoding::E8, vd);
   1569 }
   1570 
   1571 void Assembler::VLE16FF(Vec vd, GPR rs, VecMask mask) noexcept {
   1572     EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, mask,
   1573                    UnitStrideLoadAddressingMode::LoadFaultOnlyFirst, rs, WidthEncoding::E16, vd);
   1574 }
   1575 
   1576 void Assembler::VLE32FF(Vec vd, GPR rs, VecMask mask) noexcept {
   1577     EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, mask,
   1578                    UnitStrideLoadAddressingMode::LoadFaultOnlyFirst, rs, WidthEncoding::E32, vd);
   1579 }
   1580 
   1581 void Assembler::VLE64FF(Vec vd, GPR rs, VecMask mask) noexcept {
   1582     EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, mask,
   1583                    UnitStrideLoadAddressingMode::LoadFaultOnlyFirst, rs, WidthEncoding::E64, vd);
   1584 }
   1585 
   1586 void Assembler::VLSEGE8(uint32_t num_segments, Vec vd, GPR rs, VecMask mask) noexcept {
   1587     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
   1588                    UnitStrideLoadAddressingMode::Load, rs, WidthEncoding::E8, vd);
   1589 }
   1590 
   1591 void Assembler::VLSEGE16(uint32_t num_segments, Vec vd, GPR rs, VecMask mask) noexcept {
   1592     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
   1593                    UnitStrideLoadAddressingMode::Load, rs, WidthEncoding::E16, vd);
   1594 }
   1595 
   1596 void Assembler::VLSEGE32(uint32_t num_segments, Vec vd, GPR rs, VecMask mask) noexcept {
   1597     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
   1598                    UnitStrideLoadAddressingMode::Load, rs, WidthEncoding::E32, vd);
   1599 }
   1600 
   1601 void Assembler::VLSEGE64(uint32_t num_segments, Vec vd, GPR rs, VecMask mask) noexcept {
   1602     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
   1603                    UnitStrideLoadAddressingMode::Load, rs, WidthEncoding::E64, vd);
   1604 }
   1605 
   1606 void Assembler::VLSSEGE8(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1607     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::Strided, mask,
   1608                    rs2, rs1, WidthEncoding::E8, vd);
   1609 }
   1610 
   1611 void Assembler::VLSSEGE16(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1612     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::Strided, mask,
   1613                    rs2, rs1, WidthEncoding::E16, vd);
   1614 }
   1615 
   1616 void Assembler::VLSSEGE32(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1617     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::Strided, mask,
   1618                    rs2, rs1, WidthEncoding::E32, vd);
   1619 }
   1620 
   1621 void Assembler::VLSSEGE64(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1622     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::Strided, mask,
   1623                    rs2, rs1, WidthEncoding::E64, vd);
   1624 }
   1625 
   1626 void Assembler::VLOXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1627     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
   1628                    vs, rs, WidthEncoding::E8, vd);
   1629 }
   1630 
   1631 void Assembler::VLOXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1632     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
   1633                    vs, rs, WidthEncoding::E16, vd);
   1634 }
   1635 
   1636 void Assembler::VLOXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1637     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
   1638                    vs, rs, WidthEncoding::E32, vd);
   1639 }
   1640 
   1641 void Assembler::VLOXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1642     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
   1643                    vs, rs, WidthEncoding::E64, vd);
   1644 }
   1645 
   1646 void Assembler::VLUXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1647     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
   1648                    vs, rs, WidthEncoding::E8, vd);
   1649 }
   1650 
   1651 void Assembler::VLUXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1652     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
   1653                    vs, rs, WidthEncoding::E16, vd);
   1654 }
   1655 
   1656 void Assembler::VLUXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1657     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
   1658                    vs, rs, WidthEncoding::E32, vd);
   1659 }
   1660 
   1661 void Assembler::VLUXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1662     EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
   1663                    vs, rs, WidthEncoding::E64, vd);
   1664 }
   1665 
   1666 void Assembler::VLRE8(uint32_t num_registers, Vec vd, GPR rs) noexcept {
   1667     BISCUIT_ASSERT(vd.Index() % num_registers == 0);
   1668     EmitVectorLoadWholeReg(m_buffer, num_registers, false, rs, WidthEncoding::E8, vd);
   1669 }
   1670 
   1671 void Assembler::VL1RE8(Vec vd, GPR rs) noexcept {
   1672     VLRE8(1, vd, rs);
   1673 }
   1674 
   1675 void Assembler::VL2RE8(Vec vd, GPR rs) noexcept {
   1676     VLRE8(2, vd, rs);
   1677 }
   1678 
   1679 void Assembler::VL4RE8(Vec vd, GPR rs) noexcept {
   1680     VLRE8(4, vd, rs);
   1681 }
   1682 
   1683 void Assembler::VL8RE8(Vec vd, GPR rs) noexcept {
   1684     VLRE8(8, vd, rs);
   1685 }
   1686 
   1687 void Assembler::VLRE16(uint32_t num_registers, Vec vd, GPR rs) noexcept {
   1688     BISCUIT_ASSERT(vd.Index() % num_registers == 0);
   1689     EmitVectorLoadWholeReg(m_buffer, num_registers, false, rs, WidthEncoding::E16, vd);
   1690 }
   1691 
   1692 void Assembler::VL1RE16(Vec vd, GPR rs) noexcept {
   1693     VLRE16(1, vd, rs);
   1694 }
   1695 
   1696 void Assembler::VL2RE16(Vec vd, GPR rs) noexcept {
   1697     VLRE16(2, vd, rs);
   1698 }
   1699 
   1700 void Assembler::VL4RE16(Vec vd, GPR rs) noexcept {
   1701     VLRE16(4, vd, rs);
   1702 }
   1703 
   1704 void Assembler::VL8RE16(Vec vd, GPR rs) noexcept {
   1705     VLRE16(8, vd, rs);
   1706 }
   1707 
   1708 void Assembler::VLRE32(uint32_t num_registers, Vec vd, GPR rs) noexcept {
   1709     BISCUIT_ASSERT(vd.Index() % num_registers == 0);
   1710     EmitVectorLoadWholeReg(m_buffer, num_registers, false, rs, WidthEncoding::E32, vd);
   1711 }
   1712 
   1713 void Assembler::VL1RE32(Vec vd, GPR rs) noexcept {
   1714     VLRE32(1, vd, rs);
   1715 }
   1716 
   1717 void Assembler::VL2RE32(Vec vd, GPR rs) noexcept {
   1718     VLRE32(2, vd, rs);
   1719 }
   1720 
   1721 void Assembler::VL4RE32(Vec vd, GPR rs) noexcept {
   1722     VLRE32(4, vd, rs);
   1723 }
   1724 
   1725 void Assembler::VL8RE32(Vec vd, GPR rs) noexcept {
   1726     VLRE32(8, vd, rs);
   1727 }
   1728 
   1729 void Assembler::VLRE64(uint32_t num_registers, Vec vd, GPR rs) noexcept {
   1730     BISCUIT_ASSERT(vd.Index() % num_registers == 0);
   1731     EmitVectorLoadWholeReg(m_buffer, num_registers, false, rs, WidthEncoding::E64, vd);
   1732 }
   1733 
   1734 void Assembler::VL1RE64(Vec vd, GPR rs) noexcept {
   1735     VLRE64(1, vd, rs);
   1736 }
   1737 
   1738 void Assembler::VL2RE64(Vec vd, GPR rs) noexcept {
   1739     VLRE64(2, vd, rs);
   1740 }
   1741 
   1742 void Assembler::VL4RE64(Vec vd, GPR rs) noexcept {
   1743     VLRE64(4, vd, rs);
   1744 }
   1745 
   1746 void Assembler::VL8RE64(Vec vd, GPR rs) noexcept {
   1747     VLRE64(8, vd, rs);
   1748 }
   1749 
   1750 void Assembler::VSE8(Vec vs, GPR rs, VecMask mask) noexcept {
   1751     VSSEGE8(1, vs, rs, mask);
   1752 }
   1753 
   1754 void Assembler::VSE16(Vec vs, GPR rs, VecMask mask) noexcept {
   1755     VSSEGE16(1, vs, rs, mask);
   1756 }
   1757 
   1758 void Assembler::VSE32(Vec vs, GPR rs, VecMask mask) noexcept {
   1759     VSSEGE32(1, vs, rs, mask);
   1760 }
   1761 
   1762 void Assembler::VSE64(Vec vs, GPR rs, VecMask mask) noexcept {
   1763     VSSEGE64(1, vs, rs, mask);
   1764 }
   1765 
   1766 void Assembler::VSM(Vec vs, GPR rs) noexcept {
   1767     EmitVectorStore(m_buffer, 0b000, false, AddressingMode::UnitStride, VecMask::No,
   1768                     UnitStrideStoreAddressingMode::MaskStore, rs, WidthEncoding::E8, vs);
   1769 }
   1770 
   1771 void Assembler::VSSE8(Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1772     VSSSEGE8(1, vs, rs1, rs2, mask);
   1773 }
   1774 
   1775 void Assembler::VSSE16(Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1776     VSSSEGE16(1, vs, rs1, rs2, mask);
   1777 }
   1778 
   1779 void Assembler::VSSE32(Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1780     VSSSEGE32(1, vs, rs1, rs2, mask);
   1781 }
   1782 
   1783 void Assembler::VSSE64(Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1784     VSSSEGE64(1, vs, rs1, rs2, mask);
   1785 }
   1786 
   1787 void Assembler::VSOXEI8(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1788     VSOXSEGEI8(1, vd, rs, vs, mask);
   1789 }
   1790 
   1791 void Assembler::VSOXEI16(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1792     VSOXSEGEI16(1, vd, rs, vs, mask);
   1793 }
   1794 
   1795 void Assembler::VSOXEI32(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1796     VSOXSEGEI32(1, vd, rs, vs, mask);
   1797 }
   1798 
   1799 void Assembler::VSOXEI64(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1800     VSOXSEGEI64(1, vd, rs, vs, mask);
   1801 }
   1802 
   1803 void Assembler::VSUXEI8(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1804     VSUXSEGEI8(1, vd, rs, vs, mask);
   1805 }
   1806 
   1807 void Assembler::VSUXEI16(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1808     VSUXSEGEI16(1, vd, rs, vs, mask);
   1809 }
   1810 
   1811 void Assembler::VSUXEI32(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1812     VSUXSEGEI32(1, vd, rs, vs, mask);
   1813 }
   1814 
   1815 void Assembler::VSUXEI64(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1816     VSUXSEGEI64(1, vd, rs, vs, mask);
   1817 }
   1818 
   1819 void Assembler::VSSEGE8(uint32_t num_segments, Vec vs, GPR rs, VecMask mask) noexcept {
   1820     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
   1821                     UnitStrideStoreAddressingMode::Store, rs, WidthEncoding::E8, vs);
   1822 }
   1823 
   1824 void Assembler::VSSEGE16(uint32_t num_segments, Vec vs, GPR rs, VecMask mask) noexcept {
   1825     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
   1826                     UnitStrideStoreAddressingMode::Store, rs, WidthEncoding::E16, vs);
   1827 }
   1828 
   1829 void Assembler::VSSEGE32(uint32_t num_segments, Vec vs, GPR rs, VecMask mask) noexcept {
   1830     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
   1831                     UnitStrideStoreAddressingMode::Store, rs, WidthEncoding::E32, vs);
   1832 }
   1833 
   1834 void Assembler::VSSEGE64(uint32_t num_segments, Vec vs, GPR rs, VecMask mask) noexcept {
   1835     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::UnitStride, mask,
   1836                     UnitStrideStoreAddressingMode::Store, rs, WidthEncoding::E64, vs);
   1837 }
   1838 
   1839 void Assembler::VSSSEGE8(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1840     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::Strided, mask,
   1841                     rs2, rs1, WidthEncoding::E8, vs);
   1842 }
   1843 
   1844 void Assembler::VSSSEGE16(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1845     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::Strided, mask,
   1846                     rs2, rs1, WidthEncoding::E16, vs);
   1847 }
   1848 
   1849 void Assembler::VSSSEGE32(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1850     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::Strided, mask,
   1851                     rs2, rs1, WidthEncoding::E32, vs);
   1852 }
   1853 
   1854 void Assembler::VSSSEGE64(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept {
   1855     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::Strided, mask,
   1856                     rs2, rs1, WidthEncoding::E64, vs);
   1857 }
   1858 
   1859 void Assembler::VSOXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1860     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
   1861         vs, rs, WidthEncoding::E8, vd);
   1862 }
   1863 
   1864 void Assembler::VSOXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1865     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
   1866         vs, rs, WidthEncoding::E16, vd);
   1867 }
   1868 
   1869 void Assembler::VSOXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1870     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
   1871         vs, rs, WidthEncoding::E32, vd);
   1872 }
   1873 
   1874 void Assembler::VSOXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1875     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask,
   1876         vs, rs, WidthEncoding::E64, vd);
   1877 }
   1878 
   1879 void Assembler::VSUXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1880     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
   1881                     vs, rs, WidthEncoding::E8, vd);
   1882 }
   1883 
   1884 void Assembler::VSUXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1885     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
   1886                     vs, rs, WidthEncoding::E16, vd);
   1887 }
   1888 
   1889 void Assembler::VSUXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1890     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
   1891                     vs, rs, WidthEncoding::E32, vd);
   1892 }
   1893 
   1894 void Assembler::VSUXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept {
   1895     EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask,
   1896                     vs, rs, WidthEncoding::E64, vd);
   1897 }
   1898 
   1899 void Assembler::VSR(uint32_t num_registers, Vec vs, GPR rs) noexcept {
   1900     EmitVectorStoreWholeReg(m_buffer, num_registers, rs, vs);
   1901 }
   1902 
   1903 void Assembler::VS1R(Vec vs, GPR rs) noexcept {
   1904     VSR(1, vs, rs);
   1905 }
   1906 
   1907 void Assembler::VS2R(Vec vs, GPR rs) noexcept {
   1908     BISCUIT_ASSERT(vs.Index() % 2 == 0);
   1909     VSR(2, vs, rs);
   1910 }
   1911 
   1912 void Assembler::VS4R(Vec vs, GPR rs) noexcept {
   1913     BISCUIT_ASSERT(vs.Index() % 4 == 0);
   1914     VSR(4, vs, rs);
   1915 }
   1916 
   1917 void Assembler::VS8R(Vec vs, GPR rs) noexcept {
   1918     BISCUIT_ASSERT(vs.Index() % 8 == 0);
   1919     VSR(8, vs, rs);
   1920 }
   1921 
   1922 void Assembler::VSETIVLI(GPR rd, uint32_t imm, SEW sew, LMUL lmul, VTA vta, VMA vma) noexcept {
   1923     // Immediate must be able to fit in 5 bits.
   1924     BISCUIT_ASSERT(imm <= 31);
   1925 
   1926     // clang-format off
   1927     const auto zimm = static_cast<uint32_t>(lmul) |
   1928                       (static_cast<uint32_t>(sew) << 3) |
   1929                       (static_cast<uint32_t>(vta) << 6) |
   1930                       (static_cast<uint32_t>(vma) << 7);
   1931     // clang-format on
   1932 
   1933     m_buffer.Emit32(0xC0007057U | (zimm << 20) | (imm << 15) | (rd.Index() << 7));
   1934 }
   1935 
   1936 void Assembler::VSETVL(GPR rd, GPR rs1, GPR rs2) noexcept {
   1937     m_buffer.Emit32(0x80007057U | (rs2.Index() << 20) | (rs1.Index() << 15) | (rd.Index() << 7));
   1938 }
   1939 
   1940 void Assembler::VSETVLI(GPR rd, GPR rs, SEW sew, LMUL lmul, VTA vta, VMA vma) noexcept {
   1941     // clang-format off
   1942     const auto zimm = static_cast<uint32_t>(lmul) |
   1943                       (static_cast<uint32_t>(sew) << 3) |
   1944                       (static_cast<uint32_t>(vta) << 6) |
   1945                       (static_cast<uint32_t>(vma) << 7);
   1946     // clang-format on
   1947 
   1948     m_buffer.Emit32(0x00007057U | (zimm << 20) | (rs.Index() << 15) | (rd.Index() << 7));
   1949 }
   1950 
   1951 } // namespace biscuit