duckstation

duckstation, but archived from the revision just before upstream changed it to a proprietary software project, this version is the libre one
git clone https://git.neptards.moe/u3shit/duckstation.git
Log | Files | Refs | README | LICENSE

assembler-aarch64.cc (234192B)


      1 // Copyright 2015, VIXL authors
      2 // All rights reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are met:
      6 //
      7 //   * Redistributions of source code must retain the above copyright notice,
      8 //     this list of conditions and the following disclaimer.
      9 //   * Redistributions in binary form must reproduce the above copyright notice,
     10 //     this list of conditions and the following disclaimer in the documentation
     11 //     and/or other materials provided with the distribution.
     12 //   * Neither the name of ARM Limited nor the names of its contributors may be
     13 //     used to endorse or promote products derived from this software without
     14 //     specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
     17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
     20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26 
     27 
     28 #include "assembler-aarch64.h"
     29 
     30 #include <cmath>
     31 
     32 #include "macro-assembler-aarch64.h"
     33 
     34 namespace vixl {
     35 namespace aarch64 {
     36 
     37 RawLiteral::RawLiteral(size_t size,
     38                        LiteralPool* literal_pool,
     39                        DeletionPolicy deletion_policy)
     40     : size_(size),
     41       offset_(0),
     42       low64_(0),
     43       high64_(0),
     44       literal_pool_(literal_pool),
     45       deletion_policy_(deletion_policy) {
     46   VIXL_ASSERT((deletion_policy == kManuallyDeleted) || (literal_pool_ != NULL));
     47   if (deletion_policy == kDeletedOnPoolDestruction) {
     48     literal_pool_->DeleteOnDestruction(this);
     49   }
     50 }
     51 
     52 
     53 void Assembler::Reset() { GetBuffer()->Reset(); }
     54 
     55 
     56 void Assembler::bind(Label* label) {
     57   BindToOffset(label, GetBuffer()->GetCursorOffset());
     58 }
     59 
     60 
     61 void Assembler::BindToOffset(Label* label, ptrdiff_t offset) {
     62   VIXL_ASSERT((offset >= 0) && (offset <= GetBuffer()->GetCursorOffset()));
     63   VIXL_ASSERT(offset % kInstructionSize == 0);
     64 
     65   label->Bind(offset);
     66 
     67   for (Label::LabelLinksIterator it(label); !it.Done(); it.Advance()) {
     68     Instruction* link =
     69         GetBuffer()->GetOffsetAddress<Instruction*>(*it.Current());
     70     link->SetImmPCOffsetTarget(GetLabelAddress<Instruction*>(label));
     71   }
     72   label->ClearAllLinks();
     73 }
     74 
     75 
     76 // A common implementation for the LinkAndGet<Type>OffsetTo helpers.
     77 //
     78 // The offset is calculated by aligning the PC and label addresses down to a
     79 // multiple of 1 << element_shift, then calculating the (scaled) offset between
     80 // them. This matches the semantics of adrp, for example.
     81 template <int element_shift>
     82 ptrdiff_t Assembler::LinkAndGetOffsetTo(Label* label) {
     83   VIXL_STATIC_ASSERT(element_shift < (sizeof(ptrdiff_t) * 8));
     84 
     85   if (label->IsBound()) {
     86     uintptr_t pc_offset = GetCursorAddress<uintptr_t>() >> element_shift;
     87     uintptr_t label_offset = GetLabelAddress<uintptr_t>(label) >> element_shift;
     88     return label_offset - pc_offset;
     89   } else {
     90     label->AddLink(GetBuffer()->GetCursorOffset());
     91     return 0;
     92   }
     93 }
     94 
     95 
     96 ptrdiff_t Assembler::LinkAndGetByteOffsetTo(Label* label) {
     97   return LinkAndGetOffsetTo<0>(label);
     98 }
     99 
    100 
    101 ptrdiff_t Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
    102   return LinkAndGetOffsetTo<kInstructionSizeLog2>(label);
    103 }
    104 
    105 
    106 ptrdiff_t Assembler::LinkAndGetPageOffsetTo(Label* label) {
    107   return LinkAndGetOffsetTo<kPageSizeLog2>(label);
    108 }
    109 
    110 
    111 void Assembler::place(RawLiteral* literal) {
    112   VIXL_ASSERT(!literal->IsPlaced());
    113 
    114   // Patch instructions using this literal.
    115   if (literal->IsUsed()) {
    116     Instruction* target = GetCursorAddress<Instruction*>();
    117     ptrdiff_t offset = literal->GetLastUse();
    118     bool done;
    119     do {
    120       Instruction* ldr = GetBuffer()->GetOffsetAddress<Instruction*>(offset);
    121       VIXL_ASSERT(ldr->IsLoadLiteral());
    122 
    123       ptrdiff_t imm19 = ldr->GetImmLLiteral();
    124       VIXL_ASSERT(imm19 <= 0);
    125       done = (imm19 == 0);
    126       offset += imm19 * kLiteralEntrySize;
    127 
    128       ldr->SetImmLLiteral(target);
    129     } while (!done);
    130   }
    131 
    132   // "bind" the literal.
    133   literal->SetOffset(GetCursorOffset());
    134   // Copy the data into the pool.
    135   switch (literal->GetSize()) {
    136     case kSRegSizeInBytes:
    137       dc32(literal->GetRawValue32());
    138       break;
    139     case kDRegSizeInBytes:
    140       dc64(literal->GetRawValue64());
    141       break;
    142     default:
    143       VIXL_ASSERT(literal->GetSize() == kQRegSizeInBytes);
    144       dc64(literal->GetRawValue128Low64());
    145       dc64(literal->GetRawValue128High64());
    146   }
    147 
    148   literal->literal_pool_ = NULL;
    149 }
    150 
    151 
    152 ptrdiff_t Assembler::LinkAndGetWordOffsetTo(RawLiteral* literal) {
    153   VIXL_ASSERT(IsWordAligned(GetCursorOffset()));
    154 
    155   bool register_first_use =
    156       (literal->GetLiteralPool() != NULL) && !literal->IsUsed();
    157 
    158   if (literal->IsPlaced()) {
    159     // The literal is "behind", the offset will be negative.
    160     VIXL_ASSERT((literal->GetOffset() - GetCursorOffset()) <= 0);
    161     return (literal->GetOffset() - GetCursorOffset()) >> kLiteralEntrySizeLog2;
    162   }
    163 
    164   ptrdiff_t offset = 0;
    165   // Link all uses together.
    166   if (literal->IsUsed()) {
    167     offset =
    168         (literal->GetLastUse() - GetCursorOffset()) >> kLiteralEntrySizeLog2;
    169   }
    170   literal->SetLastUse(GetCursorOffset());
    171 
    172   if (register_first_use) {
    173     literal->GetLiteralPool()->AddEntry(literal);
    174   }
    175 
    176   return offset;
    177 }
    178 
    179 
    180 // Code generation.
    181 void Assembler::br(const Register& xn) {
    182   VIXL_ASSERT(xn.Is64Bits());
    183   Emit(BR | Rn(xn));
    184 }
    185 
    186 
    187 void Assembler::blr(const Register& xn) {
    188   VIXL_ASSERT(xn.Is64Bits());
    189   Emit(BLR | Rn(xn));
    190 }
    191 
    192 
    193 void Assembler::ret(const Register& xn) {
    194   VIXL_ASSERT(xn.Is64Bits());
    195   Emit(RET | Rn(xn));
    196 }
    197 
    198 
    199 void Assembler::braaz(const Register& xn) {
    200   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
    201   VIXL_ASSERT(xn.Is64Bits());
    202   Emit(BRAAZ | Rn(xn) | Rd_mask);
    203 }
    204 
    205 void Assembler::brabz(const Register& xn) {
    206   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
    207   VIXL_ASSERT(xn.Is64Bits());
    208   Emit(BRABZ | Rn(xn) | Rd_mask);
    209 }
    210 
    211 void Assembler::blraaz(const Register& xn) {
    212   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
    213   VIXL_ASSERT(xn.Is64Bits());
    214   Emit(BLRAAZ | Rn(xn) | Rd_mask);
    215 }
    216 
    217 void Assembler::blrabz(const Register& xn) {
    218   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
    219   VIXL_ASSERT(xn.Is64Bits());
    220   Emit(BLRABZ | Rn(xn) | Rd_mask);
    221 }
    222 
    223 void Assembler::retaa() {
    224   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
    225   Emit(RETAA | Rn_mask | Rd_mask);
    226 }
    227 
    228 void Assembler::retab() {
    229   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
    230   Emit(RETAB | Rn_mask | Rd_mask);
    231 }
    232 
    233 // The Arm ARM names the register Xm but encodes it in the Xd bitfield.
    234 void Assembler::braa(const Register& xn, const Register& xm) {
    235   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
    236   VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits());
    237   Emit(BRAA | Rn(xn) | RdSP(xm));
    238 }
    239 
    240 void Assembler::brab(const Register& xn, const Register& xm) {
    241   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
    242   VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits());
    243   Emit(BRAB | Rn(xn) | RdSP(xm));
    244 }
    245 
    246 void Assembler::blraa(const Register& xn, const Register& xm) {
    247   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
    248   VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits());
    249   Emit(BLRAA | Rn(xn) | RdSP(xm));
    250 }
    251 
    252 void Assembler::blrab(const Register& xn, const Register& xm) {
    253   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
    254   VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits());
    255   Emit(BLRAB | Rn(xn) | RdSP(xm));
    256 }
    257 
    258 
    259 void Assembler::b(int64_t imm26) { Emit(B | ImmUncondBranch(imm26)); }
    260 
    261 
    262 void Assembler::b(int64_t imm19, Condition cond) {
    263   Emit(B_cond | ImmCondBranch(imm19) | cond);
    264 }
    265 
    266 
    267 void Assembler::b(Label* label) {
    268   int64_t offset = LinkAndGetInstructionOffsetTo(label);
    269   VIXL_ASSERT(Instruction::IsValidImmPCOffset(UncondBranchType, offset));
    270   b(static_cast<int>(offset));
    271 }
    272 
    273 
    274 void Assembler::b(Label* label, Condition cond) {
    275   int64_t offset = LinkAndGetInstructionOffsetTo(label);
    276   VIXL_ASSERT(Instruction::IsValidImmPCOffset(CondBranchType, offset));
    277   b(static_cast<int>(offset), cond);
    278 }
    279 
    280 
    281 void Assembler::bl(int64_t imm26) { Emit(BL | ImmUncondBranch(imm26)); }
    282 
    283 
    284 void Assembler::bl(Label* label) {
    285   int64_t offset = LinkAndGetInstructionOffsetTo(label);
    286   VIXL_ASSERT(Instruction::IsValidImmPCOffset(UncondBranchType, offset));
    287   bl(static_cast<int>(offset));
    288 }
    289 
    290 
    291 void Assembler::cbz(const Register& rt, int64_t imm19) {
    292   Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
    293 }
    294 
    295 
    296 void Assembler::cbz(const Register& rt, Label* label) {
    297   int64_t offset = LinkAndGetInstructionOffsetTo(label);
    298   VIXL_ASSERT(Instruction::IsValidImmPCOffset(CompareBranchType, offset));
    299   cbz(rt, static_cast<int>(offset));
    300 }
    301 
    302 
    303 void Assembler::cbnz(const Register& rt, int64_t imm19) {
    304   Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
    305 }
    306 
    307 
    308 void Assembler::cbnz(const Register& rt, Label* label) {
    309   int64_t offset = LinkAndGetInstructionOffsetTo(label);
    310   VIXL_ASSERT(Instruction::IsValidImmPCOffset(CompareBranchType, offset));
    311   cbnz(rt, static_cast<int>(offset));
    312 }
    313 
    314 
    315 void Assembler::NEONTable(const VRegister& vd,
    316                           const VRegister& vn,
    317                           const VRegister& vm,
    318                           NEONTableOp op) {
    319   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
    320   VIXL_ASSERT(vd.Is16B() || vd.Is8B());
    321   VIXL_ASSERT(vn.Is16B());
    322   VIXL_ASSERT(AreSameFormat(vd, vm));
    323   Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd));
    324 }
    325 
    326 
    327 void Assembler::tbl(const VRegister& vd,
    328                     const VRegister& vn,
    329                     const VRegister& vm) {
    330   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
    331   NEONTable(vd, vn, vm, NEON_TBL_1v);
    332 }
    333 
    334 
    335 void Assembler::tbl(const VRegister& vd,
    336                     const VRegister& vn,
    337                     const VRegister& vn2,
    338                     const VRegister& vm) {
    339   USE(vn2);
    340   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
    341   VIXL_ASSERT(AreSameFormat(vn, vn2));
    342   VIXL_ASSERT(AreConsecutive(vn, vn2));
    343   NEONTable(vd, vn, vm, NEON_TBL_2v);
    344 }
    345 
    346 
    347 void Assembler::tbl(const VRegister& vd,
    348                     const VRegister& vn,
    349                     const VRegister& vn2,
    350                     const VRegister& vn3,
    351                     const VRegister& vm) {
    352   USE(vn2, vn3);
    353   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
    354   VIXL_ASSERT(AreSameFormat(vn, vn2, vn3));
    355   VIXL_ASSERT(AreConsecutive(vn, vn2, vn3));
    356   NEONTable(vd, vn, vm, NEON_TBL_3v);
    357 }
    358 
    359 
    360 void Assembler::tbl(const VRegister& vd,
    361                     const VRegister& vn,
    362                     const VRegister& vn2,
    363                     const VRegister& vn3,
    364                     const VRegister& vn4,
    365                     const VRegister& vm) {
    366   USE(vn2, vn3, vn4);
    367   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
    368   VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4));
    369   VIXL_ASSERT(AreConsecutive(vn, vn2, vn3, vn4));
    370   NEONTable(vd, vn, vm, NEON_TBL_4v);
    371 }
    372 
    373 
    374 void Assembler::tbx(const VRegister& vd,
    375                     const VRegister& vn,
    376                     const VRegister& vm) {
    377   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
    378   NEONTable(vd, vn, vm, NEON_TBX_1v);
    379 }
    380 
    381 
    382 void Assembler::tbx(const VRegister& vd,
    383                     const VRegister& vn,
    384                     const VRegister& vn2,
    385                     const VRegister& vm) {
    386   USE(vn2);
    387   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
    388   VIXL_ASSERT(AreSameFormat(vn, vn2));
    389   VIXL_ASSERT(AreConsecutive(vn, vn2));
    390   NEONTable(vd, vn, vm, NEON_TBX_2v);
    391 }
    392 
    393 
    394 void Assembler::tbx(const VRegister& vd,
    395                     const VRegister& vn,
    396                     const VRegister& vn2,
    397                     const VRegister& vn3,
    398                     const VRegister& vm) {
    399   USE(vn2, vn3);
    400   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
    401   VIXL_ASSERT(AreSameFormat(vn, vn2, vn3));
    402   VIXL_ASSERT(AreConsecutive(vn, vn2, vn3));
    403   NEONTable(vd, vn, vm, NEON_TBX_3v);
    404 }
    405 
    406 
    407 void Assembler::tbx(const VRegister& vd,
    408                     const VRegister& vn,
    409                     const VRegister& vn2,
    410                     const VRegister& vn3,
    411                     const VRegister& vn4,
    412                     const VRegister& vm) {
    413   USE(vn2, vn3, vn4);
    414   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
    415   VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4));
    416   VIXL_ASSERT(AreConsecutive(vn, vn2, vn3, vn4));
    417   NEONTable(vd, vn, vm, NEON_TBX_4v);
    418 }
    419 
    420 
    421 void Assembler::tbz(const Register& rt, unsigned bit_pos, int64_t imm14) {
    422   VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
    423   Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
    424 }
    425 
    426 
    427 void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) {
    428   ptrdiff_t offset = LinkAndGetInstructionOffsetTo(label);
    429   VIXL_ASSERT(Instruction::IsValidImmPCOffset(TestBranchType, offset));
    430   tbz(rt, bit_pos, static_cast<int>(offset));
    431 }
    432 
    433 
    434 void Assembler::tbnz(const Register& rt, unsigned bit_pos, int64_t imm14) {
    435   VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
    436   Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
    437 }
    438 
    439 
    440 void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) {
    441   ptrdiff_t offset = LinkAndGetInstructionOffsetTo(label);
    442   VIXL_ASSERT(Instruction::IsValidImmPCOffset(TestBranchType, offset));
    443   tbnz(rt, bit_pos, static_cast<int>(offset));
    444 }
    445 
    446 
    447 void Assembler::adr(const Register& xd, int64_t imm21) {
    448   VIXL_ASSERT(xd.Is64Bits());
    449   Emit(ADR | ImmPCRelAddress(imm21) | Rd(xd));
    450 }
    451 
    452 
    453 void Assembler::adr(const Register& xd, Label* label) {
    454   adr(xd, static_cast<int>(LinkAndGetByteOffsetTo(label)));
    455 }
    456 
    457 
    458 void Assembler::adrp(const Register& xd, int64_t imm21) {
    459   VIXL_ASSERT(xd.Is64Bits());
    460   Emit(ADRP | ImmPCRelAddress(imm21) | Rd(xd));
    461 }
    462 
    463 
    464 void Assembler::adrp(const Register& xd, Label* label) {
    465   VIXL_ASSERT(AllowPageOffsetDependentCode());
    466   adrp(xd, static_cast<int>(LinkAndGetPageOffsetTo(label)));
    467 }
    468 
    469 
    470 void Assembler::add(const Register& rd,
    471                     const Register& rn,
    472                     const Operand& operand) {
    473   AddSub(rd, rn, operand, LeaveFlags, ADD);
    474 }
    475 
    476 
    477 void Assembler::adds(const Register& rd,
    478                      const Register& rn,
    479                      const Operand& operand) {
    480   AddSub(rd, rn, operand, SetFlags, ADD);
    481 }
    482 
    483 
    484 void Assembler::cmn(const Register& rn, const Operand& operand) {
    485   Register zr = AppropriateZeroRegFor(rn);
    486   adds(zr, rn, operand);
    487 }
    488 
    489 
    490 void Assembler::sub(const Register& rd,
    491                     const Register& rn,
    492                     const Operand& operand) {
    493   AddSub(rd, rn, operand, LeaveFlags, SUB);
    494 }
    495 
    496 
    497 void Assembler::subs(const Register& rd,
    498                      const Register& rn,
    499                      const Operand& operand) {
    500   AddSub(rd, rn, operand, SetFlags, SUB);
    501 }
    502 
    503 
    504 void Assembler::cmp(const Register& rn, const Operand& operand) {
    505   Register zr = AppropriateZeroRegFor(rn);
    506   subs(zr, rn, operand);
    507 }
    508 
    509 
    510 void Assembler::neg(const Register& rd, const Operand& operand) {
    511   Register zr = AppropriateZeroRegFor(rd);
    512   sub(rd, zr, operand);
    513 }
    514 
    515 
    516 void Assembler::negs(const Register& rd, const Operand& operand) {
    517   Register zr = AppropriateZeroRegFor(rd);
    518   subs(rd, zr, operand);
    519 }
    520 
    521 
    522 void Assembler::adc(const Register& rd,
    523                     const Register& rn,
    524                     const Operand& operand) {
    525   AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
    526 }
    527 
    528 
    529 void Assembler::adcs(const Register& rd,
    530                      const Register& rn,
    531                      const Operand& operand) {
    532   AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
    533 }
    534 
    535 
    536 void Assembler::sbc(const Register& rd,
    537                     const Register& rn,
    538                     const Operand& operand) {
    539   AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
    540 }
    541 
    542 
    543 void Assembler::sbcs(const Register& rd,
    544                      const Register& rn,
    545                      const Operand& operand) {
    546   AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
    547 }
    548 
    549 
    550 void Assembler::rmif(const Register& xn, unsigned rotation, StatusFlags flags) {
    551   VIXL_ASSERT(CPUHas(CPUFeatures::kFlagM));
    552   VIXL_ASSERT(xn.Is64Bits());
    553   Emit(RMIF | Rn(xn) | ImmRMIFRotation(rotation) | Nzcv(flags));
    554 }
    555 
    556 
    557 void Assembler::setf8(const Register& rn) {
    558   VIXL_ASSERT(CPUHas(CPUFeatures::kFlagM));
    559   Emit(SETF8 | Rn(rn));
    560 }
    561 
    562 
    563 void Assembler::setf16(const Register& rn) {
    564   VIXL_ASSERT(CPUHas(CPUFeatures::kFlagM));
    565   Emit(SETF16 | Rn(rn));
    566 }
    567 
    568 
    569 void Assembler::ngc(const Register& rd, const Operand& operand) {
    570   Register zr = AppropriateZeroRegFor(rd);
    571   sbc(rd, zr, operand);
    572 }
    573 
    574 
    575 void Assembler::ngcs(const Register& rd, const Operand& operand) {
    576   Register zr = AppropriateZeroRegFor(rd);
    577   sbcs(rd, zr, operand);
    578 }
    579 
    580 
    581 // Logical instructions.
    582 void Assembler::and_(const Register& rd,
    583                      const Register& rn,
    584                      const Operand& operand) {
    585   Logical(rd, rn, operand, AND);
    586 }
    587 
    588 
    589 void Assembler::ands(const Register& rd,
    590                      const Register& rn,
    591                      const Operand& operand) {
    592   Logical(rd, rn, operand, ANDS);
    593 }
    594 
    595 
    596 void Assembler::tst(const Register& rn, const Operand& operand) {
    597   ands(AppropriateZeroRegFor(rn), rn, operand);
    598 }
    599 
    600 
    601 void Assembler::bic(const Register& rd,
    602                     const Register& rn,
    603                     const Operand& operand) {
    604   Logical(rd, rn, operand, BIC);
    605 }
    606 
    607 
    608 void Assembler::bics(const Register& rd,
    609                      const Register& rn,
    610                      const Operand& operand) {
    611   Logical(rd, rn, operand, BICS);
    612 }
    613 
    614 
    615 void Assembler::orr(const Register& rd,
    616                     const Register& rn,
    617                     const Operand& operand) {
    618   Logical(rd, rn, operand, ORR);
    619 }
    620 
    621 
    622 void Assembler::orn(const Register& rd,
    623                     const Register& rn,
    624                     const Operand& operand) {
    625   Logical(rd, rn, operand, ORN);
    626 }
    627 
    628 
    629 void Assembler::eor(const Register& rd,
    630                     const Register& rn,
    631                     const Operand& operand) {
    632   Logical(rd, rn, operand, EOR);
    633 }
    634 
    635 
    636 void Assembler::eon(const Register& rd,
    637                     const Register& rn,
    638                     const Operand& operand) {
    639   Logical(rd, rn, operand, EON);
    640 }
    641 
    642 
    643 void Assembler::lslv(const Register& rd,
    644                      const Register& rn,
    645                      const Register& rm) {
    646   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
    647   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
    648   Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
    649 }
    650 
    651 
    652 void Assembler::lsrv(const Register& rd,
    653                      const Register& rn,
    654                      const Register& rm) {
    655   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
    656   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
    657   Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
    658 }
    659 
    660 
    661 void Assembler::asrv(const Register& rd,
    662                      const Register& rn,
    663                      const Register& rm) {
    664   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
    665   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
    666   Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
    667 }
    668 
    669 
    670 void Assembler::rorv(const Register& rd,
    671                      const Register& rn,
    672                      const Register& rm) {
    673   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
    674   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
    675   Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
    676 }
    677 
    678 
    679 // Bitfield operations.
    680 void Assembler::bfm(const Register& rd,
    681                     const Register& rn,
    682                     unsigned immr,
    683                     unsigned imms) {
    684   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
    685   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
    686   Emit(SF(rd) | BFM | N | ImmR(immr, rd.GetSizeInBits()) |
    687        ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd));
    688 }
    689 
    690 
    691 void Assembler::sbfm(const Register& rd,
    692                      const Register& rn,
    693                      unsigned immr,
    694                      unsigned imms) {
    695   VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits());
    696   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
    697   Emit(SF(rd) | SBFM | N | ImmR(immr, rd.GetSizeInBits()) |
    698        ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd));
    699 }
    700 
    701 
    702 void Assembler::ubfm(const Register& rd,
    703                      const Register& rn,
    704                      unsigned immr,
    705                      unsigned imms) {
    706   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
    707   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
    708   Emit(SF(rd) | UBFM | N | ImmR(immr, rd.GetSizeInBits()) |
    709        ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd));
    710 }
    711 
    712 
    713 void Assembler::extr(const Register& rd,
    714                      const Register& rn,
    715                      const Register& rm,
    716                      unsigned lsb) {
    717   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
    718   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
    719   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
    720   Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.GetSizeInBits()) | Rn(rn) |
    721        Rd(rd));
    722 }
    723 
    724 
    725 void Assembler::csel(const Register& rd,
    726                      const Register& rn,
    727                      const Register& rm,
    728                      Condition cond) {
    729   ConditionalSelect(rd, rn, rm, cond, CSEL);
    730 }
    731 
    732 
    733 void Assembler::csinc(const Register& rd,
    734                       const Register& rn,
    735                       const Register& rm,
    736                       Condition cond) {
    737   ConditionalSelect(rd, rn, rm, cond, CSINC);
    738 }
    739 
    740 
    741 void Assembler::csinv(const Register& rd,
    742                       const Register& rn,
    743                       const Register& rm,
    744                       Condition cond) {
    745   ConditionalSelect(rd, rn, rm, cond, CSINV);
    746 }
    747 
    748 
    749 void Assembler::csneg(const Register& rd,
    750                       const Register& rn,
    751                       const Register& rm,
    752                       Condition cond) {
    753   ConditionalSelect(rd, rn, rm, cond, CSNEG);
    754 }
    755 
    756 
    757 void Assembler::cset(const Register& rd, Condition cond) {
    758   VIXL_ASSERT((cond != al) && (cond != nv));
    759   Register zr = AppropriateZeroRegFor(rd);
    760   csinc(rd, zr, zr, InvertCondition(cond));
    761 }
    762 
    763 
    764 void Assembler::csetm(const Register& rd, Condition cond) {
    765   VIXL_ASSERT((cond != al) && (cond != nv));
    766   Register zr = AppropriateZeroRegFor(rd);
    767   csinv(rd, zr, zr, InvertCondition(cond));
    768 }
    769 
    770 
    771 void Assembler::cinc(const Register& rd, const Register& rn, Condition cond) {
    772   VIXL_ASSERT((cond != al) && (cond != nv));
    773   csinc(rd, rn, rn, InvertCondition(cond));
    774 }
    775 
    776 
    777 void Assembler::cinv(const Register& rd, const Register& rn, Condition cond) {
    778   VIXL_ASSERT((cond != al) && (cond != nv));
    779   csinv(rd, rn, rn, InvertCondition(cond));
    780 }
    781 
    782 
    783 void Assembler::cneg(const Register& rd, const Register& rn, Condition cond) {
    784   VIXL_ASSERT((cond != al) && (cond != nv));
    785   csneg(rd, rn, rn, InvertCondition(cond));
    786 }
    787 
    788 
    789 void Assembler::ConditionalSelect(const Register& rd,
    790                                   const Register& rn,
    791                                   const Register& rm,
    792                                   Condition cond,
    793                                   ConditionalSelectOp op) {
    794   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
    795   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
    796   Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
    797 }
    798 
    799 
    800 void Assembler::ccmn(const Register& rn,
    801                      const Operand& operand,
    802                      StatusFlags nzcv,
    803                      Condition cond) {
    804   ConditionalCompare(rn, operand, nzcv, cond, CCMN);
    805 }
    806 
    807 
    808 void Assembler::ccmp(const Register& rn,
    809                      const Operand& operand,
    810                      StatusFlags nzcv,
    811                      Condition cond) {
    812   ConditionalCompare(rn, operand, nzcv, cond, CCMP);
    813 }
    814 
    815 
    816 void Assembler::DataProcessing3Source(const Register& rd,
    817                                       const Register& rn,
    818                                       const Register& rm,
    819                                       const Register& ra,
    820                                       DataProcessing3SourceOp op) {
    821   Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
    822 }
    823 
    824 
    825 void Assembler::crc32b(const Register& wd,
    826                        const Register& wn,
    827                        const Register& wm) {
    828   VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32));
    829   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits());
    830   Emit(SF(wm) | Rm(wm) | CRC32B | Rn(wn) | Rd(wd));
    831 }
    832 
    833 
    834 void Assembler::crc32h(const Register& wd,
    835                        const Register& wn,
    836                        const Register& wm) {
    837   VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32));
    838   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits());
    839   Emit(SF(wm) | Rm(wm) | CRC32H | Rn(wn) | Rd(wd));
    840 }
    841 
    842 
    843 void Assembler::crc32w(const Register& wd,
    844                        const Register& wn,
    845                        const Register& wm) {
    846   VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32));
    847   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits());
    848   Emit(SF(wm) | Rm(wm) | CRC32W | Rn(wn) | Rd(wd));
    849 }
    850 
    851 
    852 void Assembler::crc32x(const Register& wd,
    853                        const Register& wn,
    854                        const Register& xm) {
    855   VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32));
    856   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && xm.Is64Bits());
    857   Emit(SF(xm) | Rm(xm) | CRC32X | Rn(wn) | Rd(wd));
    858 }
    859 
    860 
    861 void Assembler::crc32cb(const Register& wd,
    862                         const Register& wn,
    863                         const Register& wm) {
    864   VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32));
    865   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits());
    866   Emit(SF(wm) | Rm(wm) | CRC32CB | Rn(wn) | Rd(wd));
    867 }
    868 
    869 
    870 void Assembler::crc32ch(const Register& wd,
    871                         const Register& wn,
    872                         const Register& wm) {
    873   VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32));
    874   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits());
    875   Emit(SF(wm) | Rm(wm) | CRC32CH | Rn(wn) | Rd(wd));
    876 }
    877 
    878 
    879 void Assembler::crc32cw(const Register& wd,
    880                         const Register& wn,
    881                         const Register& wm) {
    882   VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32));
    883   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits());
    884   Emit(SF(wm) | Rm(wm) | CRC32CW | Rn(wn) | Rd(wd));
    885 }
    886 
    887 
    888 void Assembler::crc32cx(const Register& wd,
    889                         const Register& wn,
    890                         const Register& xm) {
    891   VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32));
    892   VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && xm.Is64Bits());
    893   Emit(SF(xm) | Rm(xm) | CRC32CX | Rn(wn) | Rd(wd));
    894 }
    895 
    896 
    897 void Assembler::mul(const Register& rd,
    898                     const Register& rn,
    899                     const Register& rm) {
    900   VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
    901   DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD);
    902 }
    903 
    904 
    905 void Assembler::madd(const Register& rd,
    906                      const Register& rn,
    907                      const Register& rm,
    908                      const Register& ra) {
    909   DataProcessing3Source(rd, rn, rm, ra, MADD);
    910 }
    911 
    912 
    913 void Assembler::mneg(const Register& rd,
    914                      const Register& rn,
    915                      const Register& rm) {
    916   VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
    917   DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB);
    918 }
    919 
    920 
    921 void Assembler::msub(const Register& rd,
    922                      const Register& rn,
    923                      const Register& rm,
    924                      const Register& ra) {
    925   DataProcessing3Source(rd, rn, rm, ra, MSUB);
    926 }
    927 
    928 
    929 void Assembler::umaddl(const Register& xd,
    930                        const Register& wn,
    931                        const Register& wm,
    932                        const Register& xa) {
    933   VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits());
    934   VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits());
    935   DataProcessing3Source(xd, wn, wm, xa, UMADDL_x);
    936 }
    937 
    938 
    939 void Assembler::smaddl(const Register& xd,
    940                        const Register& wn,
    941                        const Register& wm,
    942                        const Register& xa) {
    943   VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits());
    944   VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits());
    945   DataProcessing3Source(xd, wn, wm, xa, SMADDL_x);
    946 }
    947 
    948 
    949 void Assembler::umsubl(const Register& xd,
    950                        const Register& wn,
    951                        const Register& wm,
    952                        const Register& xa) {
    953   VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits());
    954   VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits());
    955   DataProcessing3Source(xd, wn, wm, xa, UMSUBL_x);
    956 }
    957 
    958 
    959 void Assembler::smsubl(const Register& xd,
    960                        const Register& wn,
    961                        const Register& wm,
    962                        const Register& xa) {
    963   VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits());
    964   VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits());
    965   DataProcessing3Source(xd, wn, wm, xa, SMSUBL_x);
    966 }
    967 
    968 
    969 void Assembler::smull(const Register& xd,
    970                       const Register& wn,
    971                       const Register& wm) {
    972   VIXL_ASSERT(xd.Is64Bits());
    973   VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits());
    974   DataProcessing3Source(xd, wn, wm, xzr, SMADDL_x);
    975 }
    976 
    977 
    978 void Assembler::sdiv(const Register& rd,
    979                      const Register& rn,
    980                      const Register& rm) {
    981   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
    982   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
    983   Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
    984 }
    985 
    986 
    987 void Assembler::smulh(const Register& xd,
    988                       const Register& xn,
    989                       const Register& xm) {
    990   VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
    991   DataProcessing3Source(xd, xn, xm, xzr, SMULH_x);
    992 }
    993 
    994 
    995 void Assembler::umulh(const Register& xd,
    996                       const Register& xn,
    997                       const Register& xm) {
    998   VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
    999   DataProcessing3Source(xd, xn, xm, xzr, UMULH_x);
   1000 }
   1001 
   1002 
   1003 void Assembler::udiv(const Register& rd,
   1004                      const Register& rn,
   1005                      const Register& rm) {
   1006   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
   1007   VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits());
   1008   Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
   1009 }
   1010 
   1011 
   1012 void Assembler::rbit(const Register& rd, const Register& rn) {
   1013   DataProcessing1Source(rd, rn, RBIT);
   1014 }
   1015 
   1016 
   1017 void Assembler::rev16(const Register& rd, const Register& rn) {
   1018   DataProcessing1Source(rd, rn, REV16);
   1019 }
   1020 
   1021 
   1022 void Assembler::rev32(const Register& xd, const Register& xn) {
   1023   VIXL_ASSERT(xd.Is64Bits());
   1024   DataProcessing1Source(xd, xn, REV);
   1025 }
   1026 
   1027 
   1028 void Assembler::rev(const Register& rd, const Register& rn) {
   1029   DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
   1030 }
   1031 
   1032 
   1033 void Assembler::clz(const Register& rd, const Register& rn) {
   1034   DataProcessing1Source(rd, rn, CLZ);
   1035 }
   1036 
   1037 
   1038 void Assembler::cls(const Register& rd, const Register& rn) {
   1039   DataProcessing1Source(rd, rn, CLS);
   1040 }
   1041 
   1042 #define PAUTH_VARIATIONS(V) \
   1043   V(paci, PACI)             \
   1044   V(pacd, PACD)             \
   1045   V(auti, AUTI)             \
   1046   V(autd, AUTD)
   1047 
   1048 #define VIXL_DEFINE_ASM_FUNC(PRE, OP)                              \
   1049   void Assembler::PRE##a(const Register& xd, const Register& xn) { \
   1050     VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));                      \
   1051     VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits());                   \
   1052     Emit(SF(xd) | OP##A | Rd(xd) | RnSP(xn));                      \
   1053   }                                                                \
   1054                                                                    \
   1055   void Assembler::PRE##za(const Register& xd) {                    \
   1056     VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));                      \
   1057     VIXL_ASSERT(xd.Is64Bits());                                    \
   1058     Emit(SF(xd) | OP##ZA | Rd(xd) | Rn(xzr));                      \
   1059   }                                                                \
   1060                                                                    \
   1061   void Assembler::PRE##b(const Register& xd, const Register& xn) { \
   1062     VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));                      \
   1063     VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits());                   \
   1064     Emit(SF(xd) | OP##B | Rd(xd) | RnSP(xn));                      \
   1065   }                                                                \
   1066                                                                    \
   1067   void Assembler::PRE##zb(const Register& xd) {                    \
   1068     VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));                      \
   1069     VIXL_ASSERT(xd.Is64Bits());                                    \
   1070     Emit(SF(xd) | OP##ZB | Rd(xd) | Rn(xzr));                      \
   1071   }
   1072 
   1073 PAUTH_VARIATIONS(VIXL_DEFINE_ASM_FUNC)
   1074 #undef VIXL_DEFINE_ASM_FUNC
   1075 
   1076 void Assembler::pacga(const Register& xd,
   1077                       const Register& xn,
   1078                       const Register& xm) {
   1079   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth, CPUFeatures::kPAuthGeneric));
   1080   VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
   1081   Emit(SF(xd) | PACGA | Rd(xd) | Rn(xn) | RmSP(xm));
   1082 }
   1083 
   1084 void Assembler::xpaci(const Register& xd) {
   1085   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   1086   VIXL_ASSERT(xd.Is64Bits());
   1087   Emit(SF(xd) | XPACI | Rd(xd) | Rn(xzr));
   1088 }
   1089 
   1090 void Assembler::xpacd(const Register& xd) {
   1091   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   1092   VIXL_ASSERT(xd.Is64Bits());
   1093   Emit(SF(xd) | XPACD | Rd(xd) | Rn(xzr));
   1094 }
   1095 
   1096 
   1097 void Assembler::ldp(const CPURegister& rt,
   1098                     const CPURegister& rt2,
   1099                     const MemOperand& src) {
   1100   LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
   1101 }
   1102 
   1103 
   1104 void Assembler::stp(const CPURegister& rt,
   1105                     const CPURegister& rt2,
   1106                     const MemOperand& dst) {
   1107   LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
   1108 }
   1109 
   1110 
   1111 void Assembler::ldpsw(const Register& xt,
   1112                       const Register& xt2,
   1113                       const MemOperand& src) {
   1114   VIXL_ASSERT(xt.Is64Bits() && xt2.Is64Bits());
   1115   LoadStorePair(xt, xt2, src, LDPSW_x);
   1116 }
   1117 
   1118 
   1119 void Assembler::LoadStorePair(const CPURegister& rt,
   1120                               const CPURegister& rt2,
   1121                               const MemOperand& addr,
   1122                               LoadStorePairOp op) {
   1123   VIXL_ASSERT(CPUHas(rt, rt2));
   1124 
   1125   // 'rt' and 'rt2' can only be aliased for stores.
   1126   VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
   1127   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
   1128   VIXL_ASSERT(IsImmLSPair(addr.GetOffset(), CalcLSPairDataSize(op)));
   1129 
   1130   int offset = static_cast<int>(addr.GetOffset());
   1131   Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.GetBaseRegister()) |
   1132                 ImmLSPair(offset, CalcLSPairDataSize(op));
   1133 
   1134   Instr addrmodeop;
   1135   if (addr.IsImmediateOffset()) {
   1136     addrmodeop = LoadStorePairOffsetFixed;
   1137   } else {
   1138     if (addr.IsImmediatePreIndex()) {
   1139       addrmodeop = LoadStorePairPreIndexFixed;
   1140     } else {
   1141       VIXL_ASSERT(addr.IsImmediatePostIndex());
   1142       addrmodeop = LoadStorePairPostIndexFixed;
   1143     }
   1144   }
   1145 
   1146   Instr emitop = addrmodeop | memop;
   1147 
   1148   // Only X registers may be specified for ldpsw.
   1149   VIXL_ASSERT(((emitop & LoadStorePairMask) != LDPSW_x) || rt.IsX());
   1150 
   1151   Emit(emitop);
   1152 }
   1153 
   1154 
   1155 void Assembler::ldnp(const CPURegister& rt,
   1156                      const CPURegister& rt2,
   1157                      const MemOperand& src) {
   1158   LoadStorePairNonTemporal(rt, rt2, src, LoadPairNonTemporalOpFor(rt, rt2));
   1159 }
   1160 
   1161 
   1162 void Assembler::stnp(const CPURegister& rt,
   1163                      const CPURegister& rt2,
   1164                      const MemOperand& dst) {
   1165   LoadStorePairNonTemporal(rt, rt2, dst, StorePairNonTemporalOpFor(rt, rt2));
   1166 }
   1167 
   1168 
   1169 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
   1170                                          const CPURegister& rt2,
   1171                                          const MemOperand& addr,
   1172                                          LoadStorePairNonTemporalOp op) {
   1173   VIXL_ASSERT(CPUHas(rt, rt2));
   1174 
   1175   VIXL_ASSERT(!rt.Is(rt2));
   1176   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
   1177   VIXL_ASSERT(addr.IsImmediateOffset());
   1178 
   1179   unsigned size =
   1180       CalcLSPairDataSize(static_cast<LoadStorePairOp>(
   1181         static_cast<uint32_t>(op) & static_cast<uint32_t>(LoadStorePairMask)));
   1182   VIXL_ASSERT(IsImmLSPair(addr.GetOffset(), size));
   1183   int offset = static_cast<int>(addr.GetOffset());
   1184   Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.GetBaseRegister()) |
   1185        ImmLSPair(offset, size));
   1186 }
   1187 
   1188 
   1189 // Memory instructions.
   1190 void Assembler::ldrb(const Register& rt,
   1191                      const MemOperand& src,
   1192                      LoadStoreScalingOption option) {
   1193   VIXL_ASSERT(option != RequireUnscaledOffset);
   1194   VIXL_ASSERT(option != PreferUnscaledOffset);
   1195   LoadStore(rt, src, LDRB_w, option);
   1196 }
   1197 
   1198 
   1199 void Assembler::strb(const Register& rt,
   1200                      const MemOperand& dst,
   1201                      LoadStoreScalingOption option) {
   1202   VIXL_ASSERT(option != RequireUnscaledOffset);
   1203   VIXL_ASSERT(option != PreferUnscaledOffset);
   1204   LoadStore(rt, dst, STRB_w, option);
   1205 }
   1206 
   1207 
   1208 void Assembler::ldrsb(const Register& rt,
   1209                       const MemOperand& src,
   1210                       LoadStoreScalingOption option) {
   1211   VIXL_ASSERT(option != RequireUnscaledOffset);
   1212   VIXL_ASSERT(option != PreferUnscaledOffset);
   1213   LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option);
   1214 }
   1215 
   1216 
   1217 void Assembler::ldrh(const Register& rt,
   1218                      const MemOperand& src,
   1219                      LoadStoreScalingOption option) {
   1220   VIXL_ASSERT(option != RequireUnscaledOffset);
   1221   VIXL_ASSERT(option != PreferUnscaledOffset);
   1222   LoadStore(rt, src, LDRH_w, option);
   1223 }
   1224 
   1225 
   1226 void Assembler::strh(const Register& rt,
   1227                      const MemOperand& dst,
   1228                      LoadStoreScalingOption option) {
   1229   VIXL_ASSERT(option != RequireUnscaledOffset);
   1230   VIXL_ASSERT(option != PreferUnscaledOffset);
   1231   LoadStore(rt, dst, STRH_w, option);
   1232 }
   1233 
   1234 
   1235 void Assembler::ldrsh(const Register& rt,
   1236                       const MemOperand& src,
   1237                       LoadStoreScalingOption option) {
   1238   VIXL_ASSERT(option != RequireUnscaledOffset);
   1239   VIXL_ASSERT(option != PreferUnscaledOffset);
   1240   LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option);
   1241 }
   1242 
   1243 
   1244 void Assembler::ldr(const CPURegister& rt,
   1245                     const MemOperand& src,
   1246                     LoadStoreScalingOption option) {
   1247   VIXL_ASSERT(option != RequireUnscaledOffset);
   1248   VIXL_ASSERT(option != PreferUnscaledOffset);
   1249   LoadStore(rt, src, LoadOpFor(rt), option);
   1250 }
   1251 
   1252 
   1253 void Assembler::str(const CPURegister& rt,
   1254                     const MemOperand& dst,
   1255                     LoadStoreScalingOption option) {
   1256   VIXL_ASSERT(option != RequireUnscaledOffset);
   1257   VIXL_ASSERT(option != PreferUnscaledOffset);
   1258   LoadStore(rt, dst, StoreOpFor(rt), option);
   1259 }
   1260 
   1261 
   1262 void Assembler::ldrsw(const Register& xt,
   1263                       const MemOperand& src,
   1264                       LoadStoreScalingOption option) {
   1265   VIXL_ASSERT(xt.Is64Bits());
   1266   VIXL_ASSERT(option != RequireUnscaledOffset);
   1267   VIXL_ASSERT(option != PreferUnscaledOffset);
   1268   LoadStore(xt, src, LDRSW_x, option);
   1269 }
   1270 
   1271 
   1272 void Assembler::ldurb(const Register& rt,
   1273                       const MemOperand& src,
   1274                       LoadStoreScalingOption option) {
   1275   VIXL_ASSERT(option != RequireScaledOffset);
   1276   VIXL_ASSERT(option != PreferScaledOffset);
   1277   LoadStore(rt, src, LDRB_w, option);
   1278 }
   1279 
   1280 
   1281 void Assembler::sturb(const Register& rt,
   1282                       const MemOperand& dst,
   1283                       LoadStoreScalingOption option) {
   1284   VIXL_ASSERT(option != RequireScaledOffset);
   1285   VIXL_ASSERT(option != PreferScaledOffset);
   1286   LoadStore(rt, dst, STRB_w, option);
   1287 }
   1288 
   1289 
   1290 void Assembler::ldursb(const Register& rt,
   1291                        const MemOperand& src,
   1292                        LoadStoreScalingOption option) {
   1293   VIXL_ASSERT(option != RequireScaledOffset);
   1294   VIXL_ASSERT(option != PreferScaledOffset);
   1295   LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option);
   1296 }
   1297 
   1298 
   1299 void Assembler::ldurh(const Register& rt,
   1300                       const MemOperand& src,
   1301                       LoadStoreScalingOption option) {
   1302   VIXL_ASSERT(option != RequireScaledOffset);
   1303   VIXL_ASSERT(option != PreferScaledOffset);
   1304   LoadStore(rt, src, LDRH_w, option);
   1305 }
   1306 
   1307 
   1308 void Assembler::sturh(const Register& rt,
   1309                       const MemOperand& dst,
   1310                       LoadStoreScalingOption option) {
   1311   VIXL_ASSERT(option != RequireScaledOffset);
   1312   VIXL_ASSERT(option != PreferScaledOffset);
   1313   LoadStore(rt, dst, STRH_w, option);
   1314 }
   1315 
   1316 
   1317 void Assembler::ldursh(const Register& rt,
   1318                        const MemOperand& src,
   1319                        LoadStoreScalingOption option) {
   1320   VIXL_ASSERT(option != RequireScaledOffset);
   1321   VIXL_ASSERT(option != PreferScaledOffset);
   1322   LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option);
   1323 }
   1324 
   1325 
   1326 void Assembler::ldur(const CPURegister& rt,
   1327                      const MemOperand& src,
   1328                      LoadStoreScalingOption option) {
   1329   VIXL_ASSERT(option != RequireScaledOffset);
   1330   VIXL_ASSERT(option != PreferScaledOffset);
   1331   LoadStore(rt, src, LoadOpFor(rt), option);
   1332 }
   1333 
   1334 
   1335 void Assembler::stur(const CPURegister& rt,
   1336                      const MemOperand& dst,
   1337                      LoadStoreScalingOption option) {
   1338   VIXL_ASSERT(option != RequireScaledOffset);
   1339   VIXL_ASSERT(option != PreferScaledOffset);
   1340   LoadStore(rt, dst, StoreOpFor(rt), option);
   1341 }
   1342 
   1343 
   1344 void Assembler::ldursw(const Register& xt,
   1345                        const MemOperand& src,
   1346                        LoadStoreScalingOption option) {
   1347   VIXL_ASSERT(xt.Is64Bits());
   1348   VIXL_ASSERT(option != RequireScaledOffset);
   1349   VIXL_ASSERT(option != PreferScaledOffset);
   1350   LoadStore(xt, src, LDRSW_x, option);
   1351 }
   1352 
   1353 
   1354 void Assembler::ldraa(const Register& xt, const MemOperand& src) {
   1355   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   1356   LoadStorePAC(xt, src, LDRAA);
   1357 }
   1358 
   1359 
   1360 void Assembler::ldrab(const Register& xt, const MemOperand& src) {
   1361   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   1362   LoadStorePAC(xt, src, LDRAB);
   1363 }
   1364 
   1365 
   1366 void Assembler::ldrsw(const Register& xt, RawLiteral* literal) {
   1367   VIXL_ASSERT(xt.Is64Bits());
   1368   VIXL_ASSERT(literal->GetSize() == kWRegSizeInBytes);
   1369   ldrsw(xt, static_cast<int>(LinkAndGetWordOffsetTo(literal)));
   1370 }
   1371 
   1372 
   1373 void Assembler::ldr(const CPURegister& rt, RawLiteral* literal) {
   1374   VIXL_ASSERT(CPUHas(rt));
   1375   VIXL_ASSERT(literal->GetSize() == static_cast<size_t>(rt.GetSizeInBytes()));
   1376   ldr(rt, static_cast<int>(LinkAndGetWordOffsetTo(literal)));
   1377 }
   1378 
   1379 
   1380 void Assembler::ldrsw(const Register& rt, int64_t imm19) {
   1381   Emit(LDRSW_x_lit | ImmLLiteral(imm19) | Rt(rt));
   1382 }
   1383 
   1384 
   1385 void Assembler::ldr(const CPURegister& rt, int64_t imm19) {
   1386   VIXL_ASSERT(CPUHas(rt));
   1387   LoadLiteralOp op = LoadLiteralOpFor(rt);
   1388   Emit(op | ImmLLiteral(imm19) | Rt(rt));
   1389 }
   1390 
   1391 
   1392 void Assembler::prfm(int op, int64_t imm19) {
   1393   Emit(PRFM_lit | ImmPrefetchOperation(op) | ImmLLiteral(imm19));
   1394 }
   1395 
   1396 void Assembler::prfm(PrefetchOperation op, int64_t imm19) {
   1397   // Passing unnamed values in 'op' is undefined behaviour in C++.
   1398   VIXL_ASSERT(IsNamedPrefetchOperation(op));
   1399   prfm(static_cast<int>(op), imm19);
   1400 }
   1401 
   1402 
   1403 // Exclusive-access instructions.
   1404 void Assembler::stxrb(const Register& rs,
   1405                       const Register& rt,
   1406                       const MemOperand& dst) {
   1407   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1408   Emit(STXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
   1409 }
   1410 
   1411 
   1412 void Assembler::stxrh(const Register& rs,
   1413                       const Register& rt,
   1414                       const MemOperand& dst) {
   1415   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1416   Emit(STXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
   1417 }
   1418 
   1419 
   1420 void Assembler::stxr(const Register& rs,
   1421                      const Register& rt,
   1422                      const MemOperand& dst) {
   1423   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1424   LoadStoreExclusive op = rt.Is64Bits() ? STXR_x : STXR_w;
   1425   Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
   1426 }
   1427 
   1428 
   1429 void Assembler::ldxrb(const Register& rt, const MemOperand& src) {
   1430   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1431   Emit(LDXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
   1432 }
   1433 
   1434 
   1435 void Assembler::ldxrh(const Register& rt, const MemOperand& src) {
   1436   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1437   Emit(LDXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
   1438 }
   1439 
   1440 
   1441 void Assembler::ldxr(const Register& rt, const MemOperand& src) {
   1442   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1443   LoadStoreExclusive op = rt.Is64Bits() ? LDXR_x : LDXR_w;
   1444   Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
   1445 }
   1446 
   1447 
   1448 void Assembler::stxp(const Register& rs,
   1449                      const Register& rt,
   1450                      const Register& rt2,
   1451                      const MemOperand& dst) {
   1452   VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits());
   1453   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1454   LoadStoreExclusive op = rt.Is64Bits() ? STXP_x : STXP_w;
   1455   Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.GetBaseRegister()));
   1456 }
   1457 
   1458 
   1459 void Assembler::ldxp(const Register& rt,
   1460                      const Register& rt2,
   1461                      const MemOperand& src) {
   1462   VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits());
   1463   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1464   LoadStoreExclusive op = rt.Is64Bits() ? LDXP_x : LDXP_w;
   1465   Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.GetBaseRegister()));
   1466 }
   1467 
   1468 
   1469 void Assembler::stlxrb(const Register& rs,
   1470                        const Register& rt,
   1471                        const MemOperand& dst) {
   1472   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1473   Emit(STLXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
   1474 }
   1475 
   1476 
   1477 void Assembler::stlxrh(const Register& rs,
   1478                        const Register& rt,
   1479                        const MemOperand& dst) {
   1480   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1481   Emit(STLXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
   1482 }
   1483 
   1484 
   1485 void Assembler::stlxr(const Register& rs,
   1486                       const Register& rt,
   1487                       const MemOperand& dst) {
   1488   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1489   LoadStoreExclusive op = rt.Is64Bits() ? STLXR_x : STLXR_w;
   1490   Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
   1491 }
   1492 
   1493 
   1494 void Assembler::ldaxrb(const Register& rt, const MemOperand& src) {
   1495   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1496   Emit(LDAXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
   1497 }
   1498 
   1499 
   1500 void Assembler::ldaxrh(const Register& rt, const MemOperand& src) {
   1501   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1502   Emit(LDAXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
   1503 }
   1504 
   1505 
   1506 void Assembler::ldaxr(const Register& rt, const MemOperand& src) {
   1507   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1508   LoadStoreExclusive op = rt.Is64Bits() ? LDAXR_x : LDAXR_w;
   1509   Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
   1510 }
   1511 
   1512 
   1513 void Assembler::stlxp(const Register& rs,
   1514                       const Register& rt,
   1515                       const Register& rt2,
   1516                       const MemOperand& dst) {
   1517   VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits());
   1518   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1519   LoadStoreExclusive op = rt.Is64Bits() ? STLXP_x : STLXP_w;
   1520   Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.GetBaseRegister()));
   1521 }
   1522 
   1523 
   1524 void Assembler::ldaxp(const Register& rt,
   1525                       const Register& rt2,
   1526                       const MemOperand& src) {
   1527   VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits());
   1528   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1529   LoadStoreExclusive op = rt.Is64Bits() ? LDAXP_x : LDAXP_w;
   1530   Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.GetBaseRegister()));
   1531 }
   1532 
   1533 
   1534 void Assembler::stlrb(const Register& rt, const MemOperand& dst) {
   1535   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1536   Emit(STLRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
   1537 }
   1538 
   1539 void Assembler::stlurb(const Register& rt, const MemOperand& dst) {
   1540   VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm));
   1541   VIXL_ASSERT(dst.IsImmediateOffset() && IsImmLSUnscaled(dst.GetOffset()));
   1542 
   1543   Instr base = RnSP(dst.GetBaseRegister());
   1544   int64_t offset = dst.GetOffset();
   1545   Emit(STLURB | Rt(rt) | base | ImmLS(static_cast<int>(offset)));
   1546 }
   1547 
   1548 
   1549 void Assembler::stlrh(const Register& rt, const MemOperand& dst) {
   1550   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1551   Emit(STLRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
   1552 }
   1553 
   1554 void Assembler::stlurh(const Register& rt, const MemOperand& dst) {
   1555   VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm));
   1556   VIXL_ASSERT(dst.IsImmediateOffset() && IsImmLSUnscaled(dst.GetOffset()));
   1557 
   1558   Instr base = RnSP(dst.GetBaseRegister());
   1559   int64_t offset = dst.GetOffset();
   1560   Emit(STLURH | Rt(rt) | base | ImmLS(static_cast<int>(offset)));
   1561 }
   1562 
   1563 
   1564 void Assembler::stlr(const Register& rt, const MemOperand& dst) {
   1565   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1566   LoadStoreExclusive op = rt.Is64Bits() ? STLR_x : STLR_w;
   1567   Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
   1568 }
   1569 
   1570 void Assembler::stlur(const Register& rt, const MemOperand& dst) {
   1571   VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm));
   1572   VIXL_ASSERT(dst.IsImmediateOffset() && IsImmLSUnscaled(dst.GetOffset()));
   1573 
   1574   Instr base = RnSP(dst.GetBaseRegister());
   1575   int64_t offset = dst.GetOffset();
   1576   Instr op = rt.Is64Bits() ? STLUR_x : STLUR_w;
   1577   Emit(op | Rt(rt) | base | ImmLS(static_cast<int>(offset)));
   1578 }
   1579 
   1580 
   1581 void Assembler::ldarb(const Register& rt, const MemOperand& src) {
   1582   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1583   Emit(LDARB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
   1584 }
   1585 
   1586 
   1587 void Assembler::ldarh(const Register& rt, const MemOperand& src) {
   1588   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1589   Emit(LDARH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
   1590 }
   1591 
   1592 
   1593 void Assembler::ldar(const Register& rt, const MemOperand& src) {
   1594   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1595   LoadStoreExclusive op = rt.Is64Bits() ? LDAR_x : LDAR_w;
   1596   Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
   1597 }
   1598 
   1599 
   1600 void Assembler::stllrb(const Register& rt, const MemOperand& dst) {
   1601   VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions));
   1602   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1603   Emit(STLLRB | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
   1604 }
   1605 
   1606 
   1607 void Assembler::stllrh(const Register& rt, const MemOperand& dst) {
   1608   VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions));
   1609   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1610   Emit(STLLRH | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
   1611 }
   1612 
   1613 
   1614 void Assembler::stllr(const Register& rt, const MemOperand& dst) {
   1615   VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions));
   1616   VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0));
   1617   LoadStoreExclusive op = rt.Is64Bits() ? STLLR_x : STLLR_w;
   1618   Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister()));
   1619 }
   1620 
   1621 
   1622 void Assembler::ldlarb(const Register& rt, const MemOperand& src) {
   1623   VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions));
   1624   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1625   Emit(LDLARB | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
   1626 }
   1627 
   1628 
   1629 void Assembler::ldlarh(const Register& rt, const MemOperand& src) {
   1630   VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions));
   1631   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1632   Emit(LDLARH | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
   1633 }
   1634 
   1635 
   1636 void Assembler::ldlar(const Register& rt, const MemOperand& src) {
   1637   VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions));
   1638   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1639   LoadStoreExclusive op = rt.Is64Bits() ? LDLAR_x : LDLAR_w;
   1640   Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister()));
   1641 }
   1642 
   1643 
   1644 // clang-format off
   1645 #define COMPARE_AND_SWAP_W_X_LIST(V) \
   1646   V(cas,   CAS)                      \
   1647   V(casa,  CASA)                     \
   1648   V(casl,  CASL)                     \
   1649   V(casal, CASAL)
   1650 // clang-format on
   1651 
   1652 #define VIXL_DEFINE_ASM_FUNC(FN, OP)                                     \
   1653   void Assembler::FN(const Register& rs,                                 \
   1654                      const Register& rt,                                 \
   1655                      const MemOperand& src) {                            \
   1656     VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics));                          \
   1657     VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));      \
   1658     VIXL_ASSERT(AreSameFormat(rs, rt));                                  \
   1659     LoadStoreExclusive op = rt.Is64Bits() ? OP##_x : OP##_w;             \
   1660     Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); \
   1661   }
   1662 COMPARE_AND_SWAP_W_X_LIST(VIXL_DEFINE_ASM_FUNC)
   1663 #undef VIXL_DEFINE_ASM_FUNC
   1664 
   1665 // clang-format off
   1666 #define COMPARE_AND_SWAP_W_LIST(V) \
   1667   V(casb,   CASB)                  \
   1668   V(casab,  CASAB)                 \
   1669   V(caslb,  CASLB)                 \
   1670   V(casalb, CASALB)                \
   1671   V(cash,   CASH)                  \
   1672   V(casah,  CASAH)                 \
   1673   V(caslh,  CASLH)                 \
   1674   V(casalh, CASALH)
   1675 // clang-format on
   1676 
   1677 #define VIXL_DEFINE_ASM_FUNC(FN, OP)                                     \
   1678   void Assembler::FN(const Register& rs,                                 \
   1679                      const Register& rt,                                 \
   1680                      const MemOperand& src) {                            \
   1681     VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics));                          \
   1682     VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));      \
   1683     Emit(OP | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); \
   1684   }
   1685 COMPARE_AND_SWAP_W_LIST(VIXL_DEFINE_ASM_FUNC)
   1686 #undef VIXL_DEFINE_ASM_FUNC
   1687 
   1688 
   1689 // clang-format off
   1690 #define COMPARE_AND_SWAP_PAIR_LIST(V) \
   1691   V(casp,   CASP)                     \
   1692   V(caspa,  CASPA)                    \
   1693   V(caspl,  CASPL)                    \
   1694   V(caspal, CASPAL)
   1695 // clang-format on
   1696 
   1697 #define VIXL_DEFINE_ASM_FUNC(FN, OP)                                     \
   1698   void Assembler::FN(const Register& rs,                                 \
   1699                      const Register& rs1,                                \
   1700                      const Register& rt,                                 \
   1701                      const Register& rt1,                                \
   1702                      const MemOperand& src) {                            \
   1703     VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics));                          \
   1704     USE(rs1, rt1);                                                       \
   1705     VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));      \
   1706     VIXL_ASSERT(AreEven(rs, rt));                                        \
   1707     VIXL_ASSERT(AreConsecutive(rs, rs1));                                \
   1708     VIXL_ASSERT(AreConsecutive(rt, rt1));                                \
   1709     VIXL_ASSERT(AreSameFormat(rs, rs1, rt, rt1));                        \
   1710     LoadStoreExclusive op = rt.Is64Bits() ? OP##_x : OP##_w;             \
   1711     Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); \
   1712   }
   1713 COMPARE_AND_SWAP_PAIR_LIST(VIXL_DEFINE_ASM_FUNC)
   1714 #undef VIXL_DEFINE_ASM_FUNC
   1715 
   1716 // These macros generate all the variations of the atomic memory operations,
   1717 // e.g. ldadd, ldadda, ldaddb, staddl, etc.
   1718 // For a full list of the methods with comments, see the assembler header file.
   1719 
   1720 // clang-format off
   1721 #define ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(V, DEF) \
   1722   V(DEF, add,  LDADD)                               \
   1723   V(DEF, clr,  LDCLR)                               \
   1724   V(DEF, eor,  LDEOR)                               \
   1725   V(DEF, set,  LDSET)                               \
   1726   V(DEF, smax, LDSMAX)                              \
   1727   V(DEF, smin, LDSMIN)                              \
   1728   V(DEF, umax, LDUMAX)                              \
   1729   V(DEF, umin, LDUMIN)
   1730 
   1731 #define ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \
   1732   V(NAME,     OP##_x,   OP##_w)                \
   1733   V(NAME##l,  OP##L_x,  OP##L_w)               \
   1734   V(NAME##b,  OP##B,    OP##B)                 \
   1735   V(NAME##lb, OP##LB,   OP##LB)                \
   1736   V(NAME##h,  OP##H,    OP##H)                 \
   1737   V(NAME##lh, OP##LH,   OP##LH)
   1738 
   1739 #define ATOMIC_MEMORY_LOAD_MODES(V, NAME, OP) \
   1740   ATOMIC_MEMORY_STORE_MODES(V, NAME, OP)      \
   1741   V(NAME##a,   OP##A_x,  OP##A_w)             \
   1742   V(NAME##al,  OP##AL_x, OP##AL_w)            \
   1743   V(NAME##ab,  OP##AB,   OP##AB)              \
   1744   V(NAME##alb, OP##ALB,  OP##ALB)             \
   1745   V(NAME##ah,  OP##AH,   OP##AH)              \
   1746   V(NAME##alh, OP##ALH,  OP##ALH)
   1747 // clang-format on
   1748 
   1749 #define DEFINE_ASM_LOAD_FUNC(FN, OP_X, OP_W)                        \
   1750   void Assembler::ld##FN(const Register& rs,                        \
   1751                          const Register& rt,                        \
   1752                          const MemOperand& src) {                   \
   1753     VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics));                     \
   1754     VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \
   1755     AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W;                \
   1756     Emit(op | Rs(rs) | Rt(rt) | RnSP(src.GetBaseRegister()));       \
   1757   }
   1758 #define DEFINE_ASM_STORE_FUNC(FN, OP_X, OP_W)                         \
   1759   void Assembler::st##FN(const Register& rs, const MemOperand& src) { \
   1760     VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics));                       \
   1761     ld##FN(rs, AppropriateZeroRegFor(rs), src);                       \
   1762   }
   1763 
   1764 ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_LOAD_MODES,
   1765                                     DEFINE_ASM_LOAD_FUNC)
   1766 ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_STORE_MODES,
   1767                                     DEFINE_ASM_STORE_FUNC)
   1768 
   1769 #define DEFINE_ASM_SWP_FUNC(FN, OP_X, OP_W)                         \
   1770   void Assembler::FN(const Register& rs,                            \
   1771                      const Register& rt,                            \
   1772                      const MemOperand& src) {                       \
   1773     VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics));                     \
   1774     VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \
   1775     AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W;                \
   1776     Emit(op | Rs(rs) | Rt(rt) | RnSP(src.GetBaseRegister()));       \
   1777   }
   1778 
   1779 ATOMIC_MEMORY_LOAD_MODES(DEFINE_ASM_SWP_FUNC, swp, SWP)
   1780 
   1781 #undef DEFINE_ASM_LOAD_FUNC
   1782 #undef DEFINE_ASM_STORE_FUNC
   1783 #undef DEFINE_ASM_SWP_FUNC
   1784 
   1785 
   1786 void Assembler::ldaprb(const Register& rt, const MemOperand& src) {
   1787   VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc));
   1788   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1789   AtomicMemoryOp op = LDAPRB;
   1790   Emit(op | Rs(xzr) | Rt(rt) | RnSP(src.GetBaseRegister()));
   1791 }
   1792 
   1793 void Assembler::ldapurb(const Register& rt, const MemOperand& src) {
   1794   VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm));
   1795   VIXL_ASSERT(src.IsImmediateOffset() && IsImmLSUnscaled(src.GetOffset()));
   1796 
   1797   Instr base = RnSP(src.GetBaseRegister());
   1798   int64_t offset = src.GetOffset();
   1799   Emit(LDAPURB | Rt(rt) | base | ImmLS(static_cast<int>(offset)));
   1800 }
   1801 
   1802 void Assembler::ldapursb(const Register& rt, const MemOperand& src) {
   1803   VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm));
   1804   VIXL_ASSERT(src.IsImmediateOffset() && IsImmLSUnscaled(src.GetOffset()));
   1805 
   1806   Instr base = RnSP(src.GetBaseRegister());
   1807   int64_t offset = src.GetOffset();
   1808   Instr op = rt.Is64Bits() ? LDAPURSB_x : LDAPURSB_w;
   1809   Emit(op | Rt(rt) | base | ImmLS(static_cast<int>(offset)));
   1810 }
   1811 
   1812 void Assembler::ldaprh(const Register& rt, const MemOperand& src) {
   1813   VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc));
   1814   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1815   AtomicMemoryOp op = LDAPRH;
   1816   Emit(op | Rs(xzr) | Rt(rt) | RnSP(src.GetBaseRegister()));
   1817 }
   1818 
   1819 void Assembler::ldapurh(const Register& rt, const MemOperand& src) {
   1820   VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm));
   1821   VIXL_ASSERT(src.IsImmediateOffset() && IsImmLSUnscaled(src.GetOffset()));
   1822 
   1823   Instr base = RnSP(src.GetBaseRegister());
   1824   int64_t offset = src.GetOffset();
   1825   Emit(LDAPURH | Rt(rt) | base | ImmLS(static_cast<int>(offset)));
   1826 }
   1827 
   1828 void Assembler::ldapursh(const Register& rt, const MemOperand& src) {
   1829   VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm));
   1830   VIXL_ASSERT(src.IsImmediateOffset() && IsImmLSUnscaled(src.GetOffset()));
   1831 
   1832   Instr base = RnSP(src.GetBaseRegister());
   1833   int64_t offset = src.GetOffset();
   1834   LoadStoreRCpcUnscaledOffsetOp op = rt.Is64Bits() ? LDAPURSH_x : LDAPURSH_w;
   1835   Emit(op | Rt(rt) | base | ImmLS(static_cast<int>(offset)));
   1836 }
   1837 
   1838 void Assembler::ldapr(const Register& rt, const MemOperand& src) {
   1839   VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc));
   1840   VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0));
   1841   AtomicMemoryOp op = rt.Is64Bits() ? LDAPR_x : LDAPR_w;
   1842   Emit(op | Rs(xzr) | Rt(rt) | RnSP(src.GetBaseRegister()));
   1843 }
   1844 
   1845 void Assembler::ldapur(const Register& rt, const MemOperand& src) {
   1846   VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm));
   1847   VIXL_ASSERT(src.IsImmediateOffset() && IsImmLSUnscaled(src.GetOffset()));
   1848 
   1849   Instr base = RnSP(src.GetBaseRegister());
   1850   int64_t offset = src.GetOffset();
   1851   LoadStoreRCpcUnscaledOffsetOp op = rt.Is64Bits() ? LDAPUR_x : LDAPUR_w;
   1852   Emit(op | Rt(rt) | base | ImmLS(static_cast<int>(offset)));
   1853 }
   1854 
   1855 void Assembler::ldapursw(const Register& rt, const MemOperand& src) {
   1856   VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm));
   1857   VIXL_ASSERT(rt.Is64Bits());
   1858   VIXL_ASSERT(src.IsImmediateOffset() && IsImmLSUnscaled(src.GetOffset()));
   1859 
   1860   Instr base = RnSP(src.GetBaseRegister());
   1861   int64_t offset = src.GetOffset();
   1862   Emit(LDAPURSW | Rt(rt) | base | ImmLS(static_cast<int>(offset)));
   1863 }
   1864 
   1865 void Assembler::prfm(int op,
   1866                      const MemOperand& address,
   1867                      LoadStoreScalingOption option) {
   1868   VIXL_ASSERT(option != RequireUnscaledOffset);
   1869   VIXL_ASSERT(option != PreferUnscaledOffset);
   1870   Prefetch(op, address, option);
   1871 }
   1872 
   1873 void Assembler::prfm(PrefetchOperation op,
   1874                      const MemOperand& address,
   1875                      LoadStoreScalingOption option) {
   1876   // Passing unnamed values in 'op' is undefined behaviour in C++.
   1877   VIXL_ASSERT(IsNamedPrefetchOperation(op));
   1878   prfm(static_cast<int>(op), address, option);
   1879 }
   1880 
   1881 
   1882 void Assembler::prfum(int op,
   1883                       const MemOperand& address,
   1884                       LoadStoreScalingOption option) {
   1885   VIXL_ASSERT(option != RequireScaledOffset);
   1886   VIXL_ASSERT(option != PreferScaledOffset);
   1887   Prefetch(op, address, option);
   1888 }
   1889 
   1890 void Assembler::prfum(PrefetchOperation op,
   1891                       const MemOperand& address,
   1892                       LoadStoreScalingOption option) {
   1893   // Passing unnamed values in 'op' is undefined behaviour in C++.
   1894   VIXL_ASSERT(IsNamedPrefetchOperation(op));
   1895   prfum(static_cast<int>(op), address, option);
   1896 }
   1897 
   1898 
   1899 void Assembler::prfm(int op, RawLiteral* literal) {
   1900   prfm(op, static_cast<int>(LinkAndGetWordOffsetTo(literal)));
   1901 }
   1902 
   1903 void Assembler::prfm(PrefetchOperation op, RawLiteral* literal) {
   1904   // Passing unnamed values in 'op' is undefined behaviour in C++.
   1905   VIXL_ASSERT(IsNamedPrefetchOperation(op));
   1906   prfm(static_cast<int>(op), literal);
   1907 }
   1908 
   1909 
   1910 void Assembler::sys(int op1, int crn, int crm, int op2, const Register& xt) {
   1911   VIXL_ASSERT(xt.Is64Bits());
   1912   Emit(SYS | ImmSysOp1(op1) | CRn(crn) | CRm(crm) | ImmSysOp2(op2) | Rt(xt));
   1913 }
   1914 
   1915 
   1916 void Assembler::sys(int op, const Register& xt) {
   1917   VIXL_ASSERT(xt.Is64Bits());
   1918   Emit(SYS | SysOp(op) | Rt(xt));
   1919 }
   1920 
   1921 
   1922 void Assembler::dc(DataCacheOp op, const Register& rt) {
   1923   if (op == CVAP) VIXL_ASSERT(CPUHas(CPUFeatures::kDCPoP));
   1924   if (op == CVADP) VIXL_ASSERT(CPUHas(CPUFeatures::kDCCVADP));
   1925   sys(op, rt);
   1926 }
   1927 
   1928 
   1929 void Assembler::ic(InstructionCacheOp op, const Register& rt) {
   1930   VIXL_ASSERT(op == IVAU);
   1931   sys(op, rt);
   1932 }
   1933 
   1934 
   1935 void Assembler::hint(SystemHint code) { hint(static_cast<int>(code)); }
   1936 
   1937 
   1938 void Assembler::hint(int imm7) {
   1939   VIXL_ASSERT(IsUint7(imm7));
   1940   Emit(HINT | ImmHint(imm7) | Rt(xzr));
   1941 }
   1942 
   1943 
   1944 // MTE.
   1945 
   1946 void Assembler::addg(const Register& xd,
   1947                      const Register& xn,
   1948                      int offset,
   1949                      int tag_offset) {
   1950   VIXL_ASSERT(CPUHas(CPUFeatures::kMTE));
   1951   VIXL_ASSERT(IsMultiple(offset, kMTETagGranuleInBytes));
   1952 
   1953   Emit(0x91800000 | RdSP(xd) | RnSP(xn) |
   1954        ImmUnsignedField<21, 16>(offset / kMTETagGranuleInBytes) |
   1955        ImmUnsignedField<13, 10>(tag_offset));
   1956 }
   1957 
   1958 void Assembler::gmi(const Register& xd,
   1959                     const Register& xn,
   1960                     const Register& xm) {
   1961   VIXL_ASSERT(CPUHas(CPUFeatures::kMTE));
   1962 
   1963   Emit(0x9ac01400 | Rd(xd) | RnSP(xn) | Rm(xm));
   1964 }
   1965 
   1966 void Assembler::irg(const Register& xd,
   1967                     const Register& xn,
   1968                     const Register& xm) {
   1969   VIXL_ASSERT(CPUHas(CPUFeatures::kMTE));
   1970 
   1971   Emit(0x9ac01000 | RdSP(xd) | RnSP(xn) | Rm(xm));
   1972 }
   1973 
   1974 void Assembler::ldg(const Register& xt, const MemOperand& addr) {
   1975   VIXL_ASSERT(CPUHas(CPUFeatures::kMTE));
   1976   VIXL_ASSERT(addr.IsImmediateOffset());
   1977   int offset = static_cast<int>(addr.GetOffset());
   1978   VIXL_ASSERT(IsMultiple(offset, kMTETagGranuleInBytes));
   1979 
   1980   Emit(0xd9600000 | Rt(xt) | RnSP(addr.GetBaseRegister()) |
   1981        ImmField<20, 12>(offset / static_cast<int>(kMTETagGranuleInBytes)));
   1982 }
   1983 
   1984 void Assembler::StoreTagHelper(const Register& xt,
   1985                                const MemOperand& addr,
   1986                                Instr op) {
   1987   int offset = static_cast<int>(addr.GetOffset());
   1988   VIXL_ASSERT(IsMultiple(offset, kMTETagGranuleInBytes));
   1989 
   1990   Instr addr_mode;
   1991   if (addr.IsImmediateOffset()) {
   1992     addr_mode = 2;
   1993   } else if (addr.IsImmediatePreIndex()) {
   1994     addr_mode = 3;
   1995   } else {
   1996     VIXL_ASSERT(addr.IsImmediatePostIndex());
   1997     addr_mode = 1;
   1998   }
   1999 
   2000   Emit(op | RdSP(xt) | RnSP(addr.GetBaseRegister()) | (addr_mode << 10) |
   2001        ImmField<20, 12>(offset / static_cast<int>(kMTETagGranuleInBytes)));
   2002 }
   2003 
   2004 void Assembler::st2g(const Register& xt, const MemOperand& addr) {
   2005   VIXL_ASSERT(CPUHas(CPUFeatures::kMTE));
   2006   StoreTagHelper(xt, addr, 0xd9a00000);
   2007 }
   2008 
   2009 void Assembler::stg(const Register& xt, const MemOperand& addr) {
   2010   VIXL_ASSERT(CPUHas(CPUFeatures::kMTE));
   2011   StoreTagHelper(xt, addr, 0xd9200000);
   2012 }
   2013 
   2014 void Assembler::stgp(const Register& xt1,
   2015                      const Register& xt2,
   2016                      const MemOperand& addr) {
   2017   VIXL_ASSERT(CPUHas(CPUFeatures::kMTE));
   2018   int offset = static_cast<int>(addr.GetOffset());
   2019   VIXL_ASSERT(IsMultiple(offset, kMTETagGranuleInBytes));
   2020 
   2021   Instr addr_mode;
   2022   if (addr.IsImmediateOffset()) {
   2023     addr_mode = 2;
   2024   } else if (addr.IsImmediatePreIndex()) {
   2025     addr_mode = 3;
   2026   } else {
   2027     VIXL_ASSERT(addr.IsImmediatePostIndex());
   2028     addr_mode = 1;
   2029   }
   2030 
   2031   Emit(0x68000000 | RnSP(addr.GetBaseRegister()) | (addr_mode << 23) |
   2032        ImmField<21, 15>(offset / static_cast<int>(kMTETagGranuleInBytes)) |
   2033        Rt2(xt2) | Rt(xt1));
   2034 }
   2035 
   2036 void Assembler::stz2g(const Register& xt, const MemOperand& addr) {
   2037   VIXL_ASSERT(CPUHas(CPUFeatures::kMTE));
   2038   StoreTagHelper(xt, addr, 0xd9e00000);
   2039 }
   2040 
   2041 void Assembler::stzg(const Register& xt, const MemOperand& addr) {
   2042   VIXL_ASSERT(CPUHas(CPUFeatures::kMTE));
   2043   StoreTagHelper(xt, addr, 0xd9600000);
   2044 }
   2045 
   2046 void Assembler::subg(const Register& xd,
   2047                      const Register& xn,
   2048                      int offset,
   2049                      int tag_offset) {
   2050   VIXL_ASSERT(CPUHas(CPUFeatures::kMTE));
   2051   VIXL_ASSERT(IsMultiple(offset, kMTETagGranuleInBytes));
   2052 
   2053   Emit(0xd1800000 | RdSP(xd) | RnSP(xn) |
   2054        ImmUnsignedField<21, 16>(offset / kMTETagGranuleInBytes) |
   2055        ImmUnsignedField<13, 10>(tag_offset));
   2056 }
   2057 
   2058 void Assembler::subp(const Register& xd,
   2059                      const Register& xn,
   2060                      const Register& xm) {
   2061   VIXL_ASSERT(CPUHas(CPUFeatures::kMTE));
   2062 
   2063   Emit(0x9ac00000 | Rd(xd) | RnSP(xn) | RmSP(xm));
   2064 }
   2065 
   2066 void Assembler::subps(const Register& xd,
   2067                       const Register& xn,
   2068                       const Register& xm) {
   2069   VIXL_ASSERT(CPUHas(CPUFeatures::kMTE));
   2070 
   2071   Emit(0xbac00000 | Rd(xd) | RnSP(xn) | RmSP(xm));
   2072 }
   2073 
   2074 void Assembler::cpye(const Register& rd,
   2075                      const Register& rs,
   2076                      const Register& rn) {
   2077   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2078   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2079   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2080 
   2081   Emit(0x1d800400 | Rd(rd) | Rn(rn) | Rs(rs));
   2082 }
   2083 
   2084 void Assembler::cpyen(const Register& rd,
   2085                       const Register& rs,
   2086                       const Register& rn) {
   2087   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2088   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2089   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2090 
   2091   Emit(0x1d80c400 | Rd(rd) | Rn(rn) | Rs(rs));
   2092 }
   2093 
   2094 void Assembler::cpyern(const Register& rd,
   2095                        const Register& rs,
   2096                        const Register& rn) {
   2097   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2098   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2099   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2100 
   2101   Emit(0x1d808400 | Rd(rd) | Rn(rn) | Rs(rs));
   2102 }
   2103 
   2104 void Assembler::cpyewn(const Register& rd,
   2105                        const Register& rs,
   2106                        const Register& rn) {
   2107   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2108   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2109   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2110 
   2111   Emit(0x1d804400 | Rd(rd) | Rn(rn) | Rs(rs));
   2112 }
   2113 
   2114 void Assembler::cpyfe(const Register& rd,
   2115                       const Register& rs,
   2116                       const Register& rn) {
   2117   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2118   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2119   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2120 
   2121   Emit(0x19800400 | Rd(rd) | Rn(rn) | Rs(rs));
   2122 }
   2123 
   2124 void Assembler::cpyfen(const Register& rd,
   2125                        const Register& rs,
   2126                        const Register& rn) {
   2127   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2128   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2129   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2130 
   2131   Emit(0x1980c400 | Rd(rd) | Rn(rn) | Rs(rs));
   2132 }
   2133 
   2134 void Assembler::cpyfern(const Register& rd,
   2135                         const Register& rs,
   2136                         const Register& rn) {
   2137   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2138   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2139   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2140 
   2141   Emit(0x19808400 | Rd(rd) | Rn(rn) | Rs(rs));
   2142 }
   2143 
   2144 void Assembler::cpyfewn(const Register& rd,
   2145                         const Register& rs,
   2146                         const Register& rn) {
   2147   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2148   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2149   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2150 
   2151   Emit(0x19804400 | Rd(rd) | Rn(rn) | Rs(rs));
   2152 }
   2153 
   2154 void Assembler::cpyfm(const Register& rd,
   2155                       const Register& rs,
   2156                       const Register& rn) {
   2157   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2158   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2159   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2160 
   2161   Emit(0x19400400 | Rd(rd) | Rn(rn) | Rs(rs));
   2162 }
   2163 
   2164 void Assembler::cpyfmn(const Register& rd,
   2165                        const Register& rs,
   2166                        const Register& rn) {
   2167   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2168   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2169   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2170 
   2171   Emit(0x1940c400 | Rd(rd) | Rn(rn) | Rs(rs));
   2172 }
   2173 
   2174 void Assembler::cpyfmrn(const Register& rd,
   2175                         const Register& rs,
   2176                         const Register& rn) {
   2177   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2178   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2179   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2180 
   2181   Emit(0x19408400 | Rd(rd) | Rn(rn) | Rs(rs));
   2182 }
   2183 
   2184 void Assembler::cpyfmwn(const Register& rd,
   2185                         const Register& rs,
   2186                         const Register& rn) {
   2187   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2188   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2189   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2190 
   2191   Emit(0x19404400 | Rd(rd) | Rn(rn) | Rs(rs));
   2192 }
   2193 
   2194 void Assembler::cpyfp(const Register& rd,
   2195                       const Register& rs,
   2196                       const Register& rn) {
   2197   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2198   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2199   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2200 
   2201   Emit(0x19000400 | Rd(rd) | Rn(rn) | Rs(rs));
   2202 }
   2203 
   2204 void Assembler::cpyfpn(const Register& rd,
   2205                        const Register& rs,
   2206                        const Register& rn) {
   2207   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2208   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2209   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2210 
   2211   Emit(0x1900c400 | Rd(rd) | Rn(rn) | Rs(rs));
   2212 }
   2213 
   2214 void Assembler::cpyfprn(const Register& rd,
   2215                         const Register& rs,
   2216                         const Register& rn) {
   2217   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2218   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2219   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2220 
   2221   Emit(0x19008400 | Rd(rd) | Rn(rn) | Rs(rs));
   2222 }
   2223 
   2224 void Assembler::cpyfpwn(const Register& rd,
   2225                         const Register& rs,
   2226                         const Register& rn) {
   2227   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2228   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2229   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2230 
   2231   Emit(0x19004400 | Rd(rd) | Rn(rn) | Rs(rs));
   2232 }
   2233 
   2234 void Assembler::cpym(const Register& rd,
   2235                      const Register& rs,
   2236                      const Register& rn) {
   2237   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2238   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2239   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2240 
   2241   Emit(0x1d400400 | Rd(rd) | Rn(rn) | Rs(rs));
   2242 }
   2243 
   2244 void Assembler::cpymn(const Register& rd,
   2245                       const Register& rs,
   2246                       const Register& rn) {
   2247   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2248   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2249   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2250 
   2251   Emit(0x1d40c400 | Rd(rd) | Rn(rn) | Rs(rs));
   2252 }
   2253 
   2254 void Assembler::cpymrn(const Register& rd,
   2255                        const Register& rs,
   2256                        const Register& rn) {
   2257   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2258   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2259   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2260 
   2261   Emit(0x1d408400 | Rd(rd) | Rn(rn) | Rs(rs));
   2262 }
   2263 
   2264 void Assembler::cpymwn(const Register& rd,
   2265                        const Register& rs,
   2266                        const Register& rn) {
   2267   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2268   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2269   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2270 
   2271   Emit(0x1d404400 | Rd(rd) | Rn(rn) | Rs(rs));
   2272 }
   2273 
   2274 void Assembler::cpyp(const Register& rd,
   2275                      const Register& rs,
   2276                      const Register& rn) {
   2277   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2278   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2279   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2280 
   2281   Emit(0x1d000400 | Rd(rd) | Rn(rn) | Rs(rs));
   2282 }
   2283 
   2284 void Assembler::cpypn(const Register& rd,
   2285                       const Register& rs,
   2286                       const Register& rn) {
   2287   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2288   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2289   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2290 
   2291   Emit(0x1d00c400 | Rd(rd) | Rn(rn) | Rs(rs));
   2292 }
   2293 
   2294 void Assembler::cpyprn(const Register& rd,
   2295                        const Register& rs,
   2296                        const Register& rn) {
   2297   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2298   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2299   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2300 
   2301   Emit(0x1d008400 | Rd(rd) | Rn(rn) | Rs(rs));
   2302 }
   2303 
   2304 void Assembler::cpypwn(const Register& rd,
   2305                        const Register& rs,
   2306                        const Register& rn) {
   2307   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2308   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2309   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero() && !rs.IsZero());
   2310 
   2311   Emit(0x1d004400 | Rd(rd) | Rn(rn) | Rs(rs));
   2312 }
   2313 
   2314 void Assembler::sete(const Register& rd,
   2315                      const Register& rn,
   2316                      const Register& rs) {
   2317   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2318   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2319   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero());
   2320 
   2321   Emit(0x19c08400 | Rd(rd) | Rn(rn) | Rs(rs));
   2322 }
   2323 
   2324 void Assembler::seten(const Register& rd,
   2325                       const Register& rn,
   2326                       const Register& rs) {
   2327   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2328   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2329   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero());
   2330 
   2331   Emit(0x19c0a400 | Rd(rd) | Rn(rn) | Rs(rs));
   2332 }
   2333 
   2334 void Assembler::setge(const Register& rd,
   2335                       const Register& rn,
   2336                       const Register& rs) {
   2337   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2338   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2339   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero());
   2340 
   2341   Emit(0x1dc08400 | Rd(rd) | Rn(rn) | Rs(rs));
   2342 }
   2343 
   2344 void Assembler::setgen(const Register& rd,
   2345                        const Register& rn,
   2346                        const Register& rs) {
   2347   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2348   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2349   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero());
   2350 
   2351   Emit(0x1dc0a400 | Rd(rd) | Rn(rn) | Rs(rs));
   2352 }
   2353 
   2354 void Assembler::setgm(const Register& rd,
   2355                       const Register& rn,
   2356                       const Register& rs) {
   2357   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2358   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2359   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero());
   2360 
   2361   Emit(0x1dc04400 | Rd(rd) | Rn(rn) | Rs(rs));
   2362 }
   2363 
   2364 void Assembler::setgmn(const Register& rd,
   2365                        const Register& rn,
   2366                        const Register& rs) {
   2367   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2368   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2369   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero());
   2370 
   2371   Emit(0x1dc06400 | Rd(rd) | Rn(rn) | Rs(rs));
   2372 }
   2373 
   2374 void Assembler::setgp(const Register& rd,
   2375                       const Register& rn,
   2376                       const Register& rs) {
   2377   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2378   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2379   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero());
   2380 
   2381   Emit(0x1dc00400 | Rd(rd) | Rn(rn) | Rs(rs));
   2382 }
   2383 
   2384 void Assembler::setgpn(const Register& rd,
   2385                        const Register& rn,
   2386                        const Register& rs) {
   2387   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2388   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2389   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero());
   2390 
   2391   Emit(0x1dc02400 | Rd(rd) | Rn(rn) | Rs(rs));
   2392 }
   2393 
   2394 void Assembler::setm(const Register& rd,
   2395                      const Register& rn,
   2396                      const Register& rs) {
   2397   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2398   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2399   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero());
   2400 
   2401   Emit(0x19c04400 | Rd(rd) | Rn(rn) | Rs(rs));
   2402 }
   2403 
   2404 void Assembler::setmn(const Register& rd,
   2405                       const Register& rn,
   2406                       const Register& rs) {
   2407   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2408   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2409   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero());
   2410 
   2411   Emit(0x19c06400 | Rd(rd) | Rn(rn) | Rs(rs));
   2412 }
   2413 
   2414 void Assembler::setp(const Register& rd,
   2415                      const Register& rn,
   2416                      const Register& rs) {
   2417   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2418   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2419   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero());
   2420 
   2421   Emit(0x19c00400 | Rd(rd) | Rn(rn) | Rs(rs));
   2422 }
   2423 
   2424 void Assembler::setpn(const Register& rd,
   2425                       const Register& rn,
   2426                       const Register& rs) {
   2427   VIXL_ASSERT(CPUHas(CPUFeatures::kMOPS));
   2428   VIXL_ASSERT(!AreAliased(rd, rn, rs));
   2429   VIXL_ASSERT(!rd.IsZero() && !rn.IsZero());
   2430 
   2431   Emit(0x19c02400 | Rd(rd) | Rn(rn) | Rs(rs));
   2432 }
   2433 
   2434 void Assembler::abs(const Register& rd, const Register& rn) {
   2435   VIXL_ASSERT(CPUHas(CPUFeatures::kCSSC));
   2436   VIXL_ASSERT(rd.IsSameSizeAndType(rn));
   2437 
   2438   Emit(0x5ac02000 | SF(rd) | Rd(rd) | Rn(rn));
   2439 }
   2440 
   2441 void Assembler::cnt(const Register& rd, const Register& rn) {
   2442   VIXL_ASSERT(CPUHas(CPUFeatures::kCSSC));
   2443   VIXL_ASSERT(rd.IsSameSizeAndType(rn));
   2444 
   2445   Emit(0x5ac01c00 | SF(rd) | Rd(rd) | Rn(rn));
   2446 }
   2447 
   2448 void Assembler::ctz(const Register& rd, const Register& rn) {
   2449   VIXL_ASSERT(CPUHas(CPUFeatures::kCSSC));
   2450   VIXL_ASSERT(rd.IsSameSizeAndType(rn));
   2451 
   2452   Emit(0x5ac01800 | SF(rd) | Rd(rd) | Rn(rn));
   2453 }
   2454 
   2455 #define MINMAX(V)                        \
   2456   V(smax, 0x11c00000, 0x1ac06000, true)  \
   2457   V(smin, 0x11c80000, 0x1ac06800, true)  \
   2458   V(umax, 0x11c40000, 0x1ac06400, false) \
   2459   V(umin, 0x11cc0000, 0x1ac06c00, false)
   2460 
   2461 #define VIXL_DEFINE_ASM_FUNC(FN, IMMOP, REGOP, SIGNED)                     \
   2462   void Assembler::FN(const Register& rd,                                   \
   2463                      const Register& rn,                                   \
   2464                      const Operand& op) {                                  \
   2465     VIXL_ASSERT(rd.IsSameSizeAndType(rn));                                 \
   2466     Instr i = SF(rd) | Rd(rd) | Rn(rn);                                    \
   2467     if (op.IsImmediate()) {                                                \
   2468       int64_t imm = op.GetImmediate();                                     \
   2469       i |= SIGNED ? ImmField<17, 10>(imm) : ImmUnsignedField<17, 10>(imm); \
   2470       Emit(IMMOP | i);                                                     \
   2471     } else {                                                               \
   2472       VIXL_ASSERT(op.IsPlainRegister());                                   \
   2473       VIXL_ASSERT(op.GetRegister().IsSameSizeAndType(rd));                 \
   2474       Emit(REGOP | i | Rm(op.GetRegister()));                              \
   2475     }                                                                      \
   2476   }
   2477 MINMAX(VIXL_DEFINE_ASM_FUNC)
   2478 #undef VIXL_DEFINE_ASM_FUNC
   2479 
   2480 // NEON structure loads and stores.
   2481 Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) {
   2482   Instr addr_field = RnSP(addr.GetBaseRegister());
   2483 
   2484   if (addr.IsPostIndex()) {
   2485     VIXL_STATIC_ASSERT(NEONLoadStoreMultiStructPostIndex ==
   2486                        static_cast<NEONLoadStoreMultiStructPostIndexOp>(
   2487                            NEONLoadStoreSingleStructPostIndex));
   2488 
   2489     addr_field |= NEONLoadStoreMultiStructPostIndex;
   2490     if (addr.GetOffset() == 0) {
   2491       addr_field |= RmNot31(addr.GetRegisterOffset());
   2492     } else {
   2493       // The immediate post index addressing mode is indicated by rm = 31.
   2494       // The immediate is implied by the number of vector registers used.
   2495       addr_field |= (0x1f << Rm_offset);
   2496     }
   2497   } else {
   2498     VIXL_ASSERT(addr.IsImmediateOffset() && (addr.GetOffset() == 0));
   2499   }
   2500   return addr_field;
   2501 }
   2502 
   2503 void Assembler::LoadStoreStructVerify(const VRegister& vt,
   2504                                       const MemOperand& addr,
   2505                                       Instr op) {
   2506 #ifdef VIXL_DEBUG
   2507   // Assert that addressing mode is either offset (with immediate 0), post
   2508   // index by immediate of the size of the register list, or post index by a
   2509   // value in a core register.
   2510   VIXL_ASSERT(vt.HasSize() && vt.HasLaneSize());
   2511   if (addr.IsImmediateOffset()) {
   2512     VIXL_ASSERT(addr.GetOffset() == 0);
   2513   } else {
   2514     int offset = vt.GetSizeInBytes();
   2515     switch (op) {
   2516       case NEON_LD1_1v:
   2517       case NEON_ST1_1v:
   2518         offset *= 1;
   2519         break;
   2520       case NEONLoadStoreSingleStructLoad1:
   2521       case NEONLoadStoreSingleStructStore1:
   2522       case NEON_LD1R:
   2523         offset = (offset / vt.GetLanes()) * 1;
   2524         break;
   2525 
   2526       case NEON_LD1_2v:
   2527       case NEON_ST1_2v:
   2528       case NEON_LD2:
   2529       case NEON_ST2:
   2530         offset *= 2;
   2531         break;
   2532       case NEONLoadStoreSingleStructLoad2:
   2533       case NEONLoadStoreSingleStructStore2:
   2534       case NEON_LD2R:
   2535         offset = (offset / vt.GetLanes()) * 2;
   2536         break;
   2537 
   2538       case NEON_LD1_3v:
   2539       case NEON_ST1_3v:
   2540       case NEON_LD3:
   2541       case NEON_ST3:
   2542         offset *= 3;
   2543         break;
   2544       case NEONLoadStoreSingleStructLoad3:
   2545       case NEONLoadStoreSingleStructStore3:
   2546       case NEON_LD3R:
   2547         offset = (offset / vt.GetLanes()) * 3;
   2548         break;
   2549 
   2550       case NEON_LD1_4v:
   2551       case NEON_ST1_4v:
   2552       case NEON_LD4:
   2553       case NEON_ST4:
   2554         offset *= 4;
   2555         break;
   2556       case NEONLoadStoreSingleStructLoad4:
   2557       case NEONLoadStoreSingleStructStore4:
   2558       case NEON_LD4R:
   2559         offset = (offset / vt.GetLanes()) * 4;
   2560         break;
   2561       default:
   2562         VIXL_UNREACHABLE();
   2563     }
   2564     VIXL_ASSERT(!addr.GetRegisterOffset().Is(NoReg) ||
   2565                 addr.GetOffset() == offset);
   2566   }
   2567 #else
   2568   USE(vt, addr, op);
   2569 #endif
   2570 }
   2571 
   2572 void Assembler::LoadStoreStruct(const VRegister& vt,
   2573                                 const MemOperand& addr,
   2574                                 NEONLoadStoreMultiStructOp op) {
   2575   LoadStoreStructVerify(vt, addr, op);
   2576   VIXL_ASSERT(vt.IsVector() || vt.Is1D());
   2577   Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
   2578 }
   2579 
   2580 
   2581 void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt,
   2582                                               const MemOperand& addr,
   2583                                               NEONLoadStoreSingleStructOp op) {
   2584   LoadStoreStructVerify(vt, addr, op);
   2585   Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
   2586 }
   2587 
   2588 
   2589 void Assembler::ld1(const VRegister& vt, const MemOperand& src) {
   2590   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2591   LoadStoreStruct(vt, src, NEON_LD1_1v);
   2592 }
   2593 
   2594 
   2595 void Assembler::ld1(const VRegister& vt,
   2596                     const VRegister& vt2,
   2597                     const MemOperand& src) {
   2598   USE(vt2);
   2599   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2600   VIXL_ASSERT(AreSameFormat(vt, vt2));
   2601   VIXL_ASSERT(AreConsecutive(vt, vt2));
   2602   LoadStoreStruct(vt, src, NEON_LD1_2v);
   2603 }
   2604 
   2605 
   2606 void Assembler::ld1(const VRegister& vt,
   2607                     const VRegister& vt2,
   2608                     const VRegister& vt3,
   2609                     const MemOperand& src) {
   2610   USE(vt2, vt3);
   2611   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2612   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   2613   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   2614   LoadStoreStruct(vt, src, NEON_LD1_3v);
   2615 }
   2616 
   2617 
   2618 void Assembler::ld1(const VRegister& vt,
   2619                     const VRegister& vt2,
   2620                     const VRegister& vt3,
   2621                     const VRegister& vt4,
   2622                     const MemOperand& src) {
   2623   USE(vt2, vt3, vt4);
   2624   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2625   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   2626   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   2627   LoadStoreStruct(vt, src, NEON_LD1_4v);
   2628 }
   2629 
   2630 
   2631 void Assembler::ld2(const VRegister& vt,
   2632                     const VRegister& vt2,
   2633                     const MemOperand& src) {
   2634   USE(vt2);
   2635   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2636   VIXL_ASSERT(AreSameFormat(vt, vt2));
   2637   VIXL_ASSERT(AreConsecutive(vt, vt2));
   2638   LoadStoreStruct(vt, src, NEON_LD2);
   2639 }
   2640 
   2641 
   2642 void Assembler::ld2(const VRegister& vt,
   2643                     const VRegister& vt2,
   2644                     int lane,
   2645                     const MemOperand& src) {
   2646   USE(vt2);
   2647   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2648   VIXL_ASSERT(AreSameFormat(vt, vt2));
   2649   VIXL_ASSERT(AreConsecutive(vt, vt2));
   2650   LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2);
   2651 }
   2652 
   2653 
   2654 void Assembler::ld2r(const VRegister& vt,
   2655                      const VRegister& vt2,
   2656                      const MemOperand& src) {
   2657   USE(vt2);
   2658   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2659   VIXL_ASSERT(AreSameFormat(vt, vt2));
   2660   VIXL_ASSERT(AreConsecutive(vt, vt2));
   2661   LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R);
   2662 }
   2663 
   2664 
   2665 void Assembler::ld3(const VRegister& vt,
   2666                     const VRegister& vt2,
   2667                     const VRegister& vt3,
   2668                     const MemOperand& src) {
   2669   USE(vt2, vt3);
   2670   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2671   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   2672   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   2673   LoadStoreStruct(vt, src, NEON_LD3);
   2674 }
   2675 
   2676 
   2677 void Assembler::ld3(const VRegister& vt,
   2678                     const VRegister& vt2,
   2679                     const VRegister& vt3,
   2680                     int lane,
   2681                     const MemOperand& src) {
   2682   USE(vt2, vt3);
   2683   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2684   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   2685   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   2686   LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3);
   2687 }
   2688 
   2689 
   2690 void Assembler::ld3r(const VRegister& vt,
   2691                      const VRegister& vt2,
   2692                      const VRegister& vt3,
   2693                      const MemOperand& src) {
   2694   USE(vt2, vt3);
   2695   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2696   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   2697   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   2698   LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R);
   2699 }
   2700 
   2701 
   2702 void Assembler::ld4(const VRegister& vt,
   2703                     const VRegister& vt2,
   2704                     const VRegister& vt3,
   2705                     const VRegister& vt4,
   2706                     const MemOperand& src) {
   2707   USE(vt2, vt3, vt4);
   2708   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2709   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   2710   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   2711   LoadStoreStruct(vt, src, NEON_LD4);
   2712 }
   2713 
   2714 
   2715 void Assembler::ld4(const VRegister& vt,
   2716                     const VRegister& vt2,
   2717                     const VRegister& vt3,
   2718                     const VRegister& vt4,
   2719                     int lane,
   2720                     const MemOperand& src) {
   2721   USE(vt2, vt3, vt4);
   2722   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2723   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   2724   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   2725   LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4);
   2726 }
   2727 
   2728 
   2729 void Assembler::ld4r(const VRegister& vt,
   2730                      const VRegister& vt2,
   2731                      const VRegister& vt3,
   2732                      const VRegister& vt4,
   2733                      const MemOperand& src) {
   2734   USE(vt2, vt3, vt4);
   2735   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2736   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   2737   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   2738   LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R);
   2739 }
   2740 
   2741 
   2742 void Assembler::st1(const VRegister& vt, const MemOperand& src) {
   2743   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2744   LoadStoreStruct(vt, src, NEON_ST1_1v);
   2745 }
   2746 
   2747 
   2748 void Assembler::st1(const VRegister& vt,
   2749                     const VRegister& vt2,
   2750                     const MemOperand& src) {
   2751   USE(vt2);
   2752   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2753   VIXL_ASSERT(AreSameFormat(vt, vt2));
   2754   VIXL_ASSERT(AreConsecutive(vt, vt2));
   2755   LoadStoreStruct(vt, src, NEON_ST1_2v);
   2756 }
   2757 
   2758 
   2759 void Assembler::st1(const VRegister& vt,
   2760                     const VRegister& vt2,
   2761                     const VRegister& vt3,
   2762                     const MemOperand& src) {
   2763   USE(vt2, vt3);
   2764   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2765   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   2766   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   2767   LoadStoreStruct(vt, src, NEON_ST1_3v);
   2768 }
   2769 
   2770 
   2771 void Assembler::st1(const VRegister& vt,
   2772                     const VRegister& vt2,
   2773                     const VRegister& vt3,
   2774                     const VRegister& vt4,
   2775                     const MemOperand& src) {
   2776   USE(vt2, vt3, vt4);
   2777   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2778   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   2779   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   2780   LoadStoreStruct(vt, src, NEON_ST1_4v);
   2781 }
   2782 
   2783 
   2784 void Assembler::st2(const VRegister& vt,
   2785                     const VRegister& vt2,
   2786                     const MemOperand& dst) {
   2787   USE(vt2);
   2788   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2789   VIXL_ASSERT(AreSameFormat(vt, vt2));
   2790   VIXL_ASSERT(AreConsecutive(vt, vt2));
   2791   LoadStoreStruct(vt, dst, NEON_ST2);
   2792 }
   2793 
   2794 
   2795 void Assembler::st2(const VRegister& vt,
   2796                     const VRegister& vt2,
   2797                     int lane,
   2798                     const MemOperand& dst) {
   2799   USE(vt2);
   2800   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2801   VIXL_ASSERT(AreSameFormat(vt, vt2));
   2802   VIXL_ASSERT(AreConsecutive(vt, vt2));
   2803   LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2);
   2804 }
   2805 
   2806 
   2807 void Assembler::st3(const VRegister& vt,
   2808                     const VRegister& vt2,
   2809                     const VRegister& vt3,
   2810                     const MemOperand& dst) {
   2811   USE(vt2, vt3);
   2812   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2813   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   2814   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   2815   LoadStoreStruct(vt, dst, NEON_ST3);
   2816 }
   2817 
   2818 
   2819 void Assembler::st3(const VRegister& vt,
   2820                     const VRegister& vt2,
   2821                     const VRegister& vt3,
   2822                     int lane,
   2823                     const MemOperand& dst) {
   2824   USE(vt2, vt3);
   2825   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2826   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
   2827   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
   2828   LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3);
   2829 }
   2830 
   2831 
   2832 void Assembler::st4(const VRegister& vt,
   2833                     const VRegister& vt2,
   2834                     const VRegister& vt3,
   2835                     const VRegister& vt4,
   2836                     const MemOperand& dst) {
   2837   USE(vt2, vt3, vt4);
   2838   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2839   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   2840   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   2841   LoadStoreStruct(vt, dst, NEON_ST4);
   2842 }
   2843 
   2844 
   2845 void Assembler::st4(const VRegister& vt,
   2846                     const VRegister& vt2,
   2847                     const VRegister& vt3,
   2848                     const VRegister& vt4,
   2849                     int lane,
   2850                     const MemOperand& dst) {
   2851   USE(vt2, vt3, vt4);
   2852   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2853   VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
   2854   VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
   2855   LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4);
   2856 }
   2857 
   2858 
   2859 void Assembler::LoadStoreStructSingle(const VRegister& vt,
   2860                                       uint32_t lane,
   2861                                       const MemOperand& addr,
   2862                                       NEONLoadStoreSingleStructOp op) {
   2863   LoadStoreStructVerify(vt, addr, op);
   2864 
   2865   // We support vt arguments of the form vt.VxT() or vt.T(), where x is the
   2866   // number of lanes, and T is b, h, s or d.
   2867   unsigned lane_size = vt.GetLaneSizeInBytes();
   2868   VIXL_ASSERT(lane_size > 0);
   2869   VIXL_ASSERT(lane < (kQRegSizeInBytes / lane_size));
   2870 
   2871   // Lane size is encoded in the opcode field. Lane index is encoded in the Q,
   2872   // S and size fields.
   2873   lane *= lane_size;
   2874   if (lane_size == 8) lane++;
   2875 
   2876   Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask;
   2877   Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask;
   2878   Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask;
   2879 
   2880   Instr instr = op;
   2881   switch (lane_size) {
   2882     case 1:
   2883       instr |= NEONLoadStoreSingle_b;
   2884       break;
   2885     case 2:
   2886       instr |= NEONLoadStoreSingle_h;
   2887       break;
   2888     case 4:
   2889       instr |= NEONLoadStoreSingle_s;
   2890       break;
   2891     default:
   2892       VIXL_ASSERT(lane_size == 8);
   2893       instr |= NEONLoadStoreSingle_d;
   2894   }
   2895 
   2896   Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt));
   2897 }
   2898 
   2899 
   2900 void Assembler::ld1(const VRegister& vt, int lane, const MemOperand& src) {
   2901   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2902   LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1);
   2903 }
   2904 
   2905 
   2906 void Assembler::ld1r(const VRegister& vt, const MemOperand& src) {
   2907   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2908   LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R);
   2909 }
   2910 
   2911 
   2912 void Assembler::st1(const VRegister& vt, int lane, const MemOperand& dst) {
   2913   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2914   LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1);
   2915 }
   2916 
   2917 void Assembler::pmull(const VRegister& vd,
   2918                       const VRegister& vn,
   2919                       const VRegister& vm) {
   2920   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2921   VIXL_ASSERT(AreSameFormat(vn, vm));
   2922   VIXL_ASSERT((vn.Is8B() && vd.Is8H()) || (vn.Is1D() && vd.Is1Q()));
   2923   VIXL_ASSERT(CPUHas(CPUFeatures::kPmull1Q) || vd.Is8H());
   2924   Emit(VFormat(vn) | NEON_PMULL | Rm(vm) | Rn(vn) | Rd(vd));
   2925 }
   2926 
   2927 void Assembler::pmull2(const VRegister& vd,
   2928                        const VRegister& vn,
   2929                        const VRegister& vm) {
   2930   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   2931   VIXL_ASSERT(AreSameFormat(vn, vm));
   2932   VIXL_ASSERT((vn.Is16B() && vd.Is8H()) || (vn.Is2D() && vd.Is1Q()));
   2933   VIXL_ASSERT(CPUHas(CPUFeatures::kPmull1Q) || vd.Is8H());
   2934   Emit(VFormat(vn) | NEON_PMULL2 | Rm(vm) | Rn(vn) | Rd(vd));
   2935 }
   2936 
   2937 void Assembler::NEON3DifferentL(const VRegister& vd,
   2938                                 const VRegister& vn,
   2939                                 const VRegister& vm,
   2940                                 NEON3DifferentOp vop) {
   2941   VIXL_ASSERT(AreSameFormat(vn, vm));
   2942   VIXL_ASSERT((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) ||
   2943               (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
   2944               (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
   2945               (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
   2946   Instr format, op = vop;
   2947   if (vd.IsScalar()) {
   2948     op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
   2949     format = SFormat(vn);
   2950   } else {
   2951     format = VFormat(vn);
   2952   }
   2953   Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
   2954 }
   2955 
   2956 
   2957 void Assembler::NEON3DifferentW(const VRegister& vd,
   2958                                 const VRegister& vn,
   2959                                 const VRegister& vm,
   2960                                 NEON3DifferentOp vop) {
   2961   VIXL_ASSERT(AreSameFormat(vd, vn));
   2962   VIXL_ASSERT((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) ||
   2963               (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) ||
   2964               (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D()));
   2965   Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd));
   2966 }
   2967 
   2968 
   2969 void Assembler::NEON3DifferentHN(const VRegister& vd,
   2970                                  const VRegister& vn,
   2971                                  const VRegister& vm,
   2972                                  NEON3DifferentOp vop) {
   2973   VIXL_ASSERT(AreSameFormat(vm, vn));
   2974   VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
   2975               (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
   2976               (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
   2977   Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd));
   2978 }
   2979 
   2980 
   2981 // clang-format off
   2982 #define NEON_3DIFF_LONG_LIST(V) \
   2983   V(saddl,  NEON_SADDL,  vn.IsVector() && vn.IsD())                            \
   2984   V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ())                            \
   2985   V(sabal,  NEON_SABAL,  vn.IsVector() && vn.IsD())                            \
   2986   V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ())                            \
   2987   V(uabal,  NEON_UABAL,  vn.IsVector() && vn.IsD())                            \
   2988   V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ())                            \
   2989   V(sabdl,  NEON_SABDL,  vn.IsVector() && vn.IsD())                            \
   2990   V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ())                            \
   2991   V(uabdl,  NEON_UABDL,  vn.IsVector() && vn.IsD())                            \
   2992   V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ())                            \
   2993   V(smlal,  NEON_SMLAL,  vn.IsVector() && vn.IsD())                            \
   2994   V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ())                            \
   2995   V(umlal,  NEON_UMLAL,  vn.IsVector() && vn.IsD())                            \
   2996   V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ())                            \
   2997   V(smlsl,  NEON_SMLSL,  vn.IsVector() && vn.IsD())                            \
   2998   V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ())                            \
   2999   V(umlsl,  NEON_UMLSL,  vn.IsVector() && vn.IsD())                            \
   3000   V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ())                            \
   3001   V(smull,  NEON_SMULL,  vn.IsVector() && vn.IsD())                            \
   3002   V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ())                            \
   3003   V(umull,  NEON_UMULL,  vn.IsVector() && vn.IsD())                            \
   3004   V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ())                            \
   3005   V(ssubl,  NEON_SSUBL,  vn.IsVector() && vn.IsD())                            \
   3006   V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ())                            \
   3007   V(uaddl,  NEON_UADDL,  vn.IsVector() && vn.IsD())                            \
   3008   V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ())                            \
   3009   V(usubl,  NEON_USUBL,  vn.IsVector() && vn.IsD())                            \
   3010   V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ())                            \
   3011   V(sqdmlal,  NEON_SQDMLAL,  vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
   3012   V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
   3013   V(sqdmlsl,  NEON_SQDMLSL,  vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
   3014   V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
   3015   V(sqdmull,  NEON_SQDMULL,  vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
   3016   V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
   3017 // clang-format on
   3018 
   3019 
   3020 #define VIXL_DEFINE_ASM_FUNC(FN, OP, AS)                   \
   3021 void Assembler::FN(const VRegister& vd,               \
   3022                    const VRegister& vn,               \
   3023                    const VRegister& vm) {             \
   3024   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));            \
   3025   VIXL_ASSERT(AS);                                    \
   3026   NEON3DifferentL(vd, vn, vm, OP);                    \
   3027 }
   3028 NEON_3DIFF_LONG_LIST(VIXL_DEFINE_ASM_FUNC)
   3029 #undef VIXL_DEFINE_ASM_FUNC
   3030 
   3031 // clang-format off
   3032 #define NEON_3DIFF_HN_LIST(V)         \
   3033   V(addhn,   NEON_ADDHN,   vd.IsD())  \
   3034   V(addhn2,  NEON_ADDHN2,  vd.IsQ())  \
   3035   V(raddhn,  NEON_RADDHN,  vd.IsD())  \
   3036   V(raddhn2, NEON_RADDHN2, vd.IsQ())  \
   3037   V(subhn,   NEON_SUBHN,   vd.IsD())  \
   3038   V(subhn2,  NEON_SUBHN2,  vd.IsQ())  \
   3039   V(rsubhn,  NEON_RSUBHN,  vd.IsD())  \
   3040   V(rsubhn2, NEON_RSUBHN2, vd.IsQ())
   3041 // clang-format on
   3042 
   3043 #define VIXL_DEFINE_ASM_FUNC(FN, OP, AS)     \
   3044   void Assembler::FN(const VRegister& vd,    \
   3045                      const VRegister& vn,    \
   3046                      const VRegister& vm) {  \
   3047     VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \
   3048     VIXL_ASSERT(AS);                         \
   3049     NEON3DifferentHN(vd, vn, vm, OP);        \
   3050   }
   3051 NEON_3DIFF_HN_LIST(VIXL_DEFINE_ASM_FUNC)
   3052 #undef VIXL_DEFINE_ASM_FUNC
   3053 
   3054 void Assembler::uaddw(const VRegister& vd,
   3055                       const VRegister& vn,
   3056                       const VRegister& vm) {
   3057   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   3058   VIXL_ASSERT(vm.IsD());
   3059   NEON3DifferentW(vd, vn, vm, NEON_UADDW);
   3060 }
   3061 
   3062 
   3063 void Assembler::uaddw2(const VRegister& vd,
   3064                        const VRegister& vn,
   3065                        const VRegister& vm) {
   3066   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   3067   VIXL_ASSERT(vm.IsQ());
   3068   NEON3DifferentW(vd, vn, vm, NEON_UADDW2);
   3069 }
   3070 
   3071 
   3072 void Assembler::saddw(const VRegister& vd,
   3073                       const VRegister& vn,
   3074                       const VRegister& vm) {
   3075   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   3076   VIXL_ASSERT(vm.IsD());
   3077   NEON3DifferentW(vd, vn, vm, NEON_SADDW);
   3078 }
   3079 
   3080 
   3081 void Assembler::saddw2(const VRegister& vd,
   3082                        const VRegister& vn,
   3083                        const VRegister& vm) {
   3084   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   3085   VIXL_ASSERT(vm.IsQ());
   3086   NEON3DifferentW(vd, vn, vm, NEON_SADDW2);
   3087 }
   3088 
   3089 
   3090 void Assembler::usubw(const VRegister& vd,
   3091                       const VRegister& vn,
   3092                       const VRegister& vm) {
   3093   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   3094   VIXL_ASSERT(vm.IsD());
   3095   NEON3DifferentW(vd, vn, vm, NEON_USUBW);
   3096 }
   3097 
   3098 
   3099 void Assembler::usubw2(const VRegister& vd,
   3100                        const VRegister& vn,
   3101                        const VRegister& vm) {
   3102   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   3103   VIXL_ASSERT(vm.IsQ());
   3104   NEON3DifferentW(vd, vn, vm, NEON_USUBW2);
   3105 }
   3106 
   3107 
   3108 void Assembler::ssubw(const VRegister& vd,
   3109                       const VRegister& vn,
   3110                       const VRegister& vm) {
   3111   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   3112   VIXL_ASSERT(vm.IsD());
   3113   NEON3DifferentW(vd, vn, vm, NEON_SSUBW);
   3114 }
   3115 
   3116 
   3117 void Assembler::ssubw2(const VRegister& vd,
   3118                        const VRegister& vn,
   3119                        const VRegister& vm) {
   3120   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   3121   VIXL_ASSERT(vm.IsQ());
   3122   NEON3DifferentW(vd, vn, vm, NEON_SSUBW2);
   3123 }
   3124 
   3125 
   3126 void Assembler::mov(const Register& rd, const Register& rm) {
   3127   // Moves involving the stack pointer are encoded as add immediate with
   3128   // second operand of zero. Otherwise, orr with first operand zr is
   3129   // used.
   3130   if (rd.IsSP() || rm.IsSP()) {
   3131     add(rd, rm, 0);
   3132   } else {
   3133     orr(rd, AppropriateZeroRegFor(rd), rm);
   3134   }
   3135 }
   3136 
   3137 void Assembler::xpaclri() {
   3138   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3139   Emit(XPACLRI);
   3140 }
   3141 
   3142 void Assembler::pacia1716() {
   3143   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3144   Emit(PACIA1716);
   3145 }
   3146 
   3147 void Assembler::pacib1716() {
   3148   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3149   Emit(PACIB1716);
   3150 }
   3151 
   3152 void Assembler::autia1716() {
   3153   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3154   Emit(AUTIA1716);
   3155 }
   3156 
   3157 void Assembler::autib1716() {
   3158   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3159   Emit(AUTIB1716);
   3160 }
   3161 
   3162 void Assembler::paciaz() {
   3163   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3164   Emit(PACIAZ);
   3165 }
   3166 
   3167 void Assembler::pacibz() {
   3168   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3169   Emit(PACIBZ);
   3170 }
   3171 
   3172 void Assembler::autiaz() {
   3173   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3174   Emit(AUTIAZ);
   3175 }
   3176 
   3177 void Assembler::autibz() {
   3178   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3179   Emit(AUTIBZ);
   3180 }
   3181 
   3182 void Assembler::paciasp() {
   3183   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3184   Emit(PACIASP);
   3185 }
   3186 
   3187 void Assembler::pacibsp() {
   3188   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3189   Emit(PACIBSP);
   3190 }
   3191 
   3192 void Assembler::autiasp() {
   3193   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3194   Emit(AUTIASP);
   3195 }
   3196 
   3197 void Assembler::autibsp() {
   3198   VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth));
   3199   Emit(AUTIBSP);
   3200 }
   3201 
   3202 void Assembler::bti(BranchTargetIdentifier id) {
   3203   VIXL_ASSERT((id != EmitPACIASP) && (id != EmitPACIBSP));  // Not modes of Bti.
   3204   VIXL_ASSERT(id != EmitBTI_none);  // Always generate an instruction.
   3205   VIXL_ASSERT(CPUHas(CPUFeatures::kBTI));
   3206   hint(static_cast<SystemHint>(id));
   3207 }
   3208 
   3209 void Assembler::mvn(const Register& rd, const Operand& operand) {
   3210   orn(rd, AppropriateZeroRegFor(rd), operand);
   3211 }
   3212 
   3213 
   3214 void Assembler::mrs(const Register& xt, SystemRegister sysreg) {
   3215   VIXL_ASSERT(xt.Is64Bits());
   3216   VIXL_ASSERT(CPUHas(sysreg));
   3217   Emit(MRS | ImmSystemRegister(sysreg) | Rt(xt));
   3218 }
   3219 
   3220 
   3221 void Assembler::msr(SystemRegister sysreg, const Register& xt) {
   3222   VIXL_ASSERT(xt.Is64Bits());
   3223   VIXL_ASSERT(CPUHas(sysreg));
   3224   Emit(MSR | Rt(xt) | ImmSystemRegister(sysreg));
   3225 }
   3226 
   3227 
   3228 void Assembler::cfinv() {
   3229   VIXL_ASSERT(CPUHas(CPUFeatures::kFlagM));
   3230   Emit(CFINV);
   3231 }
   3232 
   3233 
   3234 void Assembler::axflag() {
   3235   VIXL_ASSERT(CPUHas(CPUFeatures::kAXFlag));
   3236   Emit(AXFLAG);
   3237 }
   3238 
   3239 
   3240 void Assembler::xaflag() {
   3241   VIXL_ASSERT(CPUHas(CPUFeatures::kAXFlag));
   3242   Emit(XAFLAG);
   3243 }
   3244 
   3245 
   3246 void Assembler::clrex(int imm4) { Emit(CLREX | CRm(imm4)); }
   3247 
   3248 
   3249 void Assembler::dmb(BarrierDomain domain, BarrierType type) {
   3250   Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
   3251 }
   3252 
   3253 
   3254 void Assembler::dsb(BarrierDomain domain, BarrierType type) {
   3255   Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
   3256 }
   3257 
   3258 
   3259 void Assembler::isb() {
   3260   Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
   3261 }
   3262 
   3263 void Assembler::esb() {
   3264   VIXL_ASSERT(CPUHas(CPUFeatures::kRAS));
   3265   hint(ESB);
   3266 }
   3267 
   3268 void Assembler::csdb() { hint(CSDB); }
   3269 
   3270 void Assembler::fmov(const VRegister& vd, double imm) {
   3271   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3272   if (vd.IsScalar()) {
   3273     VIXL_ASSERT(vd.Is1D());
   3274     Emit(FMOV_d_imm | Rd(vd) | ImmFP64(imm));
   3275   } else {
   3276     VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   3277     VIXL_ASSERT(vd.Is2D());
   3278     Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
   3279     Instr q = NEON_Q;
   3280     uint32_t encoded_imm = FP64ToImm8(imm);
   3281     Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
   3282   }
   3283 }
   3284 
   3285 
   3286 void Assembler::fmov(const VRegister& vd, float imm) {
   3287   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3288   if (vd.IsScalar()) {
   3289     VIXL_ASSERT(vd.Is1S());
   3290     Emit(FMOV_s_imm | Rd(vd) | ImmFP32(imm));
   3291   } else {
   3292     VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   3293     VIXL_ASSERT(vd.Is2S() || vd.Is4S());
   3294     Instr op = NEONModifiedImmediate_MOVI;
   3295     Instr q = vd.Is4S() ? NEON_Q : 0;
   3296     uint32_t encoded_imm = FP32ToImm8(imm);
   3297     Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
   3298   }
   3299 }
   3300 
   3301 
   3302 void Assembler::fmov(const VRegister& vd, Float16 imm) {
   3303   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3304   if (vd.IsScalar()) {
   3305     VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3306     VIXL_ASSERT(vd.Is1H());
   3307     Emit(FMOV_h_imm | Rd(vd) | ImmFP16(imm));
   3308   } else {
   3309     VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf));
   3310     VIXL_ASSERT(vd.Is4H() || vd.Is8H());
   3311     Instr q = vd.Is8H() ? NEON_Q : 0;
   3312     uint32_t encoded_imm = FP16ToImm8(imm);
   3313     Emit(q | NEONModifiedImmediate_FMOV | ImmNEONabcdefgh(encoded_imm) |
   3314          NEONCmode(0xf) | Rd(vd));
   3315   }
   3316 }
   3317 
   3318 
   3319 void Assembler::fmov(const Register& rd, const VRegister& vn) {
   3320   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3321   VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D());
   3322   VIXL_ASSERT((rd.GetSizeInBits() == vn.GetSizeInBits()) || vn.Is1H());
   3323   FPIntegerConvertOp op;
   3324   switch (vn.GetSizeInBits()) {
   3325     case 16:
   3326       VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3327       op = rd.Is64Bits() ? FMOV_xh : FMOV_wh;
   3328       break;
   3329     case 32:
   3330       op = FMOV_ws;
   3331       break;
   3332     default:
   3333       op = FMOV_xd;
   3334   }
   3335   Emit(op | Rd(rd) | Rn(vn));
   3336 }
   3337 
   3338 
   3339 void Assembler::fmov(const VRegister& vd, const Register& rn) {
   3340   VIXL_ASSERT(CPUHas(CPUFeatures::kFP) ||
   3341               (vd.Is1D() && CPUHas(CPUFeatures::kNEON)));
   3342   VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
   3343   VIXL_ASSERT((vd.GetSizeInBits() == rn.GetSizeInBits()) || vd.Is1H());
   3344   FPIntegerConvertOp op;
   3345   switch (vd.GetSizeInBits()) {
   3346     case 16:
   3347       VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3348       op = rn.Is64Bits() ? FMOV_hx : FMOV_hw;
   3349       break;
   3350     case 32:
   3351       op = FMOV_sw;
   3352       break;
   3353     default:
   3354       op = FMOV_dx;
   3355   }
   3356   Emit(op | Rd(vd) | Rn(rn));
   3357 }
   3358 
   3359 
   3360 void Assembler::fmov(const VRegister& vd, const VRegister& vn) {
   3361   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3362   if (vd.Is1H()) {
   3363     VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3364   }
   3365   VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
   3366   VIXL_ASSERT(vd.IsSameFormat(vn));
   3367   Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn));
   3368 }
   3369 
   3370 
   3371 void Assembler::fmov(const VRegister& vd, int index, const Register& rn) {
   3372   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kFP));
   3373   VIXL_ASSERT((index == 1) && vd.Is1D() && rn.IsX());
   3374   USE(index);
   3375   Emit(FMOV_d1_x | Rd(vd) | Rn(rn));
   3376 }
   3377 
   3378 
   3379 void Assembler::fmov(const Register& rd, const VRegister& vn, int index) {
   3380   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kFP));
   3381   VIXL_ASSERT((index == 1) && vn.Is1D() && rd.IsX());
   3382   USE(index);
   3383   Emit(FMOV_x_d1 | Rd(rd) | Rn(vn));
   3384 }
   3385 
   3386 
   3387 void Assembler::fmadd(const VRegister& vd,
   3388                       const VRegister& vn,
   3389                       const VRegister& vm,
   3390                       const VRegister& va) {
   3391   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3392   FPDataProcessing3SourceOp op;
   3393   if (vd.Is1H()) {
   3394     VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3395     op = FMADD_h;
   3396   } else if (vd.Is1S()) {
   3397     op = FMADD_s;
   3398   } else {
   3399     VIXL_ASSERT(vd.Is1D());
   3400     op = FMADD_d;
   3401   }
   3402   FPDataProcessing3Source(vd, vn, vm, va, op);
   3403 }
   3404 
   3405 
   3406 void Assembler::fmsub(const VRegister& vd,
   3407                       const VRegister& vn,
   3408                       const VRegister& vm,
   3409                       const VRegister& va) {
   3410   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3411   FPDataProcessing3SourceOp op;
   3412   if (vd.Is1H()) {
   3413     VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3414     op = FMSUB_h;
   3415   } else if (vd.Is1S()) {
   3416     op = FMSUB_s;
   3417   } else {
   3418     VIXL_ASSERT(vd.Is1D());
   3419     op = FMSUB_d;
   3420   }
   3421   FPDataProcessing3Source(vd, vn, vm, va, op);
   3422 }
   3423 
   3424 
   3425 void Assembler::fnmadd(const VRegister& vd,
   3426                        const VRegister& vn,
   3427                        const VRegister& vm,
   3428                        const VRegister& va) {
   3429   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3430   FPDataProcessing3SourceOp op;
   3431   if (vd.Is1H()) {
   3432     VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3433     op = FNMADD_h;
   3434   } else if (vd.Is1S()) {
   3435     op = FNMADD_s;
   3436   } else {
   3437     VIXL_ASSERT(vd.Is1D());
   3438     op = FNMADD_d;
   3439   }
   3440   FPDataProcessing3Source(vd, vn, vm, va, op);
   3441 }
   3442 
   3443 
   3444 void Assembler::fnmsub(const VRegister& vd,
   3445                        const VRegister& vn,
   3446                        const VRegister& vm,
   3447                        const VRegister& va) {
   3448   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3449   FPDataProcessing3SourceOp op;
   3450   if (vd.Is1H()) {
   3451     VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3452     op = FNMSUB_h;
   3453   } else if (vd.Is1S()) {
   3454     op = FNMSUB_s;
   3455   } else {
   3456     VIXL_ASSERT(vd.Is1D());
   3457     op = FNMSUB_d;
   3458   }
   3459   FPDataProcessing3Source(vd, vn, vm, va, op);
   3460 }
   3461 
   3462 
   3463 void Assembler::fnmul(const VRegister& vd,
   3464                       const VRegister& vn,
   3465                       const VRegister& vm) {
   3466   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3467   VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm));
   3468   Instr op;
   3469   if (vd.Is1H()) {
   3470     VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3471     op = FNMUL_h;
   3472   } else if (vd.Is1S()) {
   3473     op = FNMUL_s;
   3474   } else {
   3475     VIXL_ASSERT(vd.Is1D());
   3476     op = FNMUL_d;
   3477   }
   3478   Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
   3479 }
   3480 
   3481 
   3482 void Assembler::FPCompareMacro(const VRegister& vn,
   3483                                double value,
   3484                                FPTrapFlags trap) {
   3485   USE(value);
   3486   // Although the fcmp{e} instructions can strictly only take an immediate
   3487   // value of +0.0, we don't need to check for -0.0 because the sign of 0.0
   3488   // doesn't affect the result of the comparison.
   3489   VIXL_ASSERT(value == 0.0);
   3490   VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D());
   3491   Instr op = (trap == EnableTrap) ? FCMPE_zero : FCMP_zero;
   3492   Emit(FPType(vn) | op | Rn(vn));
   3493 }
   3494 
   3495 
   3496 void Assembler::FPCompareMacro(const VRegister& vn,
   3497                                const VRegister& vm,
   3498                                FPTrapFlags trap) {
   3499   VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D());
   3500   VIXL_ASSERT(vn.IsSameSizeAndType(vm));
   3501   Instr op = (trap == EnableTrap) ? FCMPE : FCMP;
   3502   Emit(FPType(vn) | op | Rm(vm) | Rn(vn));
   3503 }
   3504 
   3505 
   3506 void Assembler::fcmp(const VRegister& vn, const VRegister& vm) {
   3507   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3508   if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3509   FPCompareMacro(vn, vm, DisableTrap);
   3510 }
   3511 
   3512 
   3513 void Assembler::fcmpe(const VRegister& vn, const VRegister& vm) {
   3514   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3515   if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3516   FPCompareMacro(vn, vm, EnableTrap);
   3517 }
   3518 
   3519 
   3520 void Assembler::fcmp(const VRegister& vn, double value) {
   3521   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3522   if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3523   FPCompareMacro(vn, value, DisableTrap);
   3524 }
   3525 
   3526 
   3527 void Assembler::fcmpe(const VRegister& vn, double value) {
   3528   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3529   if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3530   FPCompareMacro(vn, value, EnableTrap);
   3531 }
   3532 
   3533 
   3534 void Assembler::FPCCompareMacro(const VRegister& vn,
   3535                                 const VRegister& vm,
   3536                                 StatusFlags nzcv,
   3537                                 Condition cond,
   3538                                 FPTrapFlags trap) {
   3539   VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D());
   3540   VIXL_ASSERT(vn.IsSameSizeAndType(vm));
   3541   Instr op = (trap == EnableTrap) ? FCCMPE : FCCMP;
   3542   Emit(FPType(vn) | op | Rm(vm) | Cond(cond) | Rn(vn) | Nzcv(nzcv));
   3543 }
   3544 
   3545 void Assembler::fccmp(const VRegister& vn,
   3546                       const VRegister& vm,
   3547                       StatusFlags nzcv,
   3548                       Condition cond) {
   3549   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3550   if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3551   FPCCompareMacro(vn, vm, nzcv, cond, DisableTrap);
   3552 }
   3553 
   3554 
   3555 void Assembler::fccmpe(const VRegister& vn,
   3556                        const VRegister& vm,
   3557                        StatusFlags nzcv,
   3558                        Condition cond) {
   3559   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3560   if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3561   FPCCompareMacro(vn, vm, nzcv, cond, EnableTrap);
   3562 }
   3563 
   3564 
   3565 void Assembler::fcsel(const VRegister& vd,
   3566                       const VRegister& vn,
   3567                       const VRegister& vm,
   3568                       Condition cond) {
   3569   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3570   if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3571   VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
   3572   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   3573   Emit(FPType(vd) | FCSEL | Rm(vm) | Cond(cond) | Rn(vn) | Rd(vd));
   3574 }
   3575 
   3576 
   3577 void Assembler::fcvt(const VRegister& vd, const VRegister& vn) {
   3578   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3579   FPDataProcessing1SourceOp op;
   3580   // The half-precision variants belong to base FP, and do not require kFPHalf.
   3581   if (vd.Is1D()) {
   3582     VIXL_ASSERT(vn.Is1S() || vn.Is1H());
   3583     op = vn.Is1S() ? FCVT_ds : FCVT_dh;
   3584   } else if (vd.Is1S()) {
   3585     VIXL_ASSERT(vn.Is1D() || vn.Is1H());
   3586     op = vn.Is1D() ? FCVT_sd : FCVT_sh;
   3587   } else {
   3588     VIXL_ASSERT(vd.Is1H());
   3589     VIXL_ASSERT(vn.Is1D() || vn.Is1S());
   3590     op = vn.Is1D() ? FCVT_hd : FCVT_hs;
   3591   }
   3592   FPDataProcessing1Source(vd, vn, op);
   3593 }
   3594 
   3595 
   3596 void Assembler::fcvtl(const VRegister& vd, const VRegister& vn) {
   3597   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   3598   VIXL_ASSERT((vd.Is4S() && vn.Is4H()) || (vd.Is2D() && vn.Is2S()));
   3599   // The half-precision variants belong to base FP, and do not require kFPHalf.
   3600   Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
   3601   Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd));
   3602 }
   3603 
   3604 
   3605 void Assembler::fcvtl2(const VRegister& vd, const VRegister& vn) {
   3606   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   3607   VIXL_ASSERT((vd.Is4S() && vn.Is8H()) || (vd.Is2D() && vn.Is4S()));
   3608   // The half-precision variants belong to base FP, and do not require kFPHalf.
   3609   Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
   3610   Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd));
   3611 }
   3612 
   3613 
   3614 void Assembler::fcvtn(const VRegister& vd, const VRegister& vn) {
   3615   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   3616   VIXL_ASSERT((vn.Is4S() && vd.Is4H()) || (vn.Is2D() && vd.Is2S()));
   3617   // The half-precision variants belong to base FP, and do not require kFPHalf.
   3618   Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
   3619   Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd));
   3620 }
   3621 
   3622 
   3623 void Assembler::fcvtn2(const VRegister& vd, const VRegister& vn) {
   3624   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   3625   VIXL_ASSERT((vn.Is4S() && vd.Is8H()) || (vn.Is2D() && vd.Is4S()));
   3626   // The half-precision variants belong to base FP, and do not require kFPHalf.
   3627   Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
   3628   Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd));
   3629 }
   3630 
   3631 
   3632 void Assembler::fcvtxn(const VRegister& vd, const VRegister& vn) {
   3633   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   3634   Instr format = 1 << NEONSize_offset;
   3635   if (vd.IsScalar()) {
   3636     VIXL_ASSERT(vd.Is1S() && vn.Is1D());
   3637     Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd));
   3638   } else {
   3639     VIXL_ASSERT(vd.Is2S() && vn.Is2D());
   3640     Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd));
   3641   }
   3642 }
   3643 
   3644 
   3645 void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) {
   3646   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   3647   VIXL_ASSERT(vd.Is4S() && vn.Is2D());
   3648   Instr format = 1 << NEONSize_offset;
   3649   Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
   3650 }
   3651 
   3652 void Assembler::fjcvtzs(const Register& rd, const VRegister& vn) {
   3653   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kJSCVT));
   3654   VIXL_ASSERT(rd.IsW() && vn.Is1D());
   3655   Emit(FJCVTZS | Rn(vn) | Rd(rd));
   3656 }
   3657 
   3658 
   3659 void Assembler::NEONFPConvertToInt(const Register& rd,
   3660                                    const VRegister& vn,
   3661                                    Instr op) {
   3662   Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd));
   3663 }
   3664 
   3665 
   3666 void Assembler::NEONFPConvertToInt(const VRegister& vd,
   3667                                    const VRegister& vn,
   3668                                    Instr op) {
   3669   if (vn.IsScalar()) {
   3670     VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
   3671     op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
   3672   }
   3673   Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
   3674 }
   3675 
   3676 
   3677 void Assembler::NEONFP16ConvertToInt(const VRegister& vd,
   3678                                      const VRegister& vn,
   3679                                      Instr op) {
   3680   VIXL_ASSERT(AreSameFormat(vd, vn));
   3681   VIXL_ASSERT(vn.IsLaneSizeH());
   3682   if (vn.IsScalar()) {
   3683     op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
   3684   } else if (vn.Is8H()) {
   3685     op |= static_cast<Instr>(NEON_Q);
   3686   }
   3687   Emit(op | Rn(vn) | Rd(vd));
   3688 }
   3689 
   3690 
   3691 #define NEON_FP2REGMISC_FCVT_LIST(V) \
   3692   V(fcvtnu, NEON_FCVTNU, FCVTNU)     \
   3693   V(fcvtns, NEON_FCVTNS, FCVTNS)     \
   3694   V(fcvtpu, NEON_FCVTPU, FCVTPU)     \
   3695   V(fcvtps, NEON_FCVTPS, FCVTPS)     \
   3696   V(fcvtmu, NEON_FCVTMU, FCVTMU)     \
   3697   V(fcvtms, NEON_FCVTMS, FCVTMS)     \
   3698   V(fcvtau, NEON_FCVTAU, FCVTAU)     \
   3699   V(fcvtas, NEON_FCVTAS, FCVTAS)
   3700 
   3701 #define VIXL_DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP)                 \
   3702   void Assembler::FN(const Register& rd, const VRegister& vn) {  \
   3703     VIXL_ASSERT(CPUHas(CPUFeatures::kFP));                       \
   3704     if (vn.IsH()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));     \
   3705     NEONFPConvertToInt(rd, vn, SCA_OP);                          \
   3706   }                                                              \
   3707   void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
   3708     VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));   \
   3709     if (vd.IsLaneSizeH()) {                                      \
   3710       VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));               \
   3711       NEONFP16ConvertToInt(vd, vn, VEC_OP##_H);                  \
   3712     } else {                                                     \
   3713       NEONFPConvertToInt(vd, vn, VEC_OP);                        \
   3714     }                                                            \
   3715   }
   3716 NEON_FP2REGMISC_FCVT_LIST(VIXL_DEFINE_ASM_FUNC)
   3717 #undef VIXL_DEFINE_ASM_FUNC
   3718 
   3719 
   3720 void Assembler::fcvtzs(const Register& rd, const VRegister& vn, int fbits) {
   3721   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3722   if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3723   VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D());
   3724   VIXL_ASSERT((fbits >= 0) && (fbits <= rd.GetSizeInBits()));
   3725   if (fbits == 0) {
   3726     Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd));
   3727   } else {
   3728     Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) |
   3729          Rd(rd));
   3730   }
   3731 }
   3732 
   3733 
   3734 void Assembler::fcvtzs(const VRegister& vd, const VRegister& vn, int fbits) {
   3735   // This form is a NEON scalar FP instruction.
   3736   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   3737   if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   3738   VIXL_ASSERT(fbits >= 0);
   3739   if (fbits == 0) {
   3740     if (vd.IsLaneSizeH()) {
   3741       NEONFP2RegMiscFP16(vd, vn, NEON_FCVTZS_H);
   3742     } else {
   3743       NEONFP2RegMisc(vd, vn, NEON_FCVTZS);
   3744     }
   3745   } else {
   3746     VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() ||
   3747                 vd.Is1H() || vd.Is4H() || vd.Is8H());
   3748     NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm);
   3749   }
   3750 }
   3751 
   3752 
   3753 void Assembler::fcvtzu(const Register& rd, const VRegister& vn, int fbits) {
   3754   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3755   if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3756   VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D());
   3757   VIXL_ASSERT((fbits >= 0) && (fbits <= rd.GetSizeInBits()));
   3758   if (fbits == 0) {
   3759     Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd));
   3760   } else {
   3761     Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) |
   3762          Rd(rd));
   3763   }
   3764 }
   3765 
   3766 
   3767 void Assembler::fcvtzu(const VRegister& vd, const VRegister& vn, int fbits) {
   3768   // This form is a NEON scalar FP instruction.
   3769   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   3770   if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   3771   VIXL_ASSERT(fbits >= 0);
   3772   if (fbits == 0) {
   3773     if (vd.IsLaneSizeH()) {
   3774       NEONFP2RegMiscFP16(vd, vn, NEON_FCVTZU_H);
   3775     } else {
   3776       NEONFP2RegMisc(vd, vn, NEON_FCVTZU);
   3777     }
   3778   } else {
   3779     VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() ||
   3780                 vd.Is1H() || vd.Is4H() || vd.Is8H());
   3781     NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm);
   3782   }
   3783 }
   3784 
   3785 void Assembler::ucvtf(const VRegister& vd, const VRegister& vn, int fbits) {
   3786   // This form is a NEON scalar FP instruction.
   3787   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   3788   if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   3789   VIXL_ASSERT(fbits >= 0);
   3790   if (fbits == 0) {
   3791     if (vd.IsLaneSizeH()) {
   3792       NEONFP2RegMiscFP16(vd, vn, NEON_UCVTF_H);
   3793     } else {
   3794       NEONFP2RegMisc(vd, vn, NEON_UCVTF);
   3795     }
   3796   } else {
   3797     VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() ||
   3798                 vd.Is1H() || vd.Is4H() || vd.Is8H());
   3799     NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm);
   3800   }
   3801 }
   3802 
   3803 void Assembler::scvtf(const VRegister& vd, const VRegister& vn, int fbits) {
   3804   // This form is a NEON scalar FP instruction.
   3805   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   3806   if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   3807   VIXL_ASSERT(fbits >= 0);
   3808   if (fbits == 0) {
   3809     if (vd.IsLaneSizeH()) {
   3810       NEONFP2RegMiscFP16(vd, vn, NEON_SCVTF_H);
   3811     } else {
   3812       NEONFP2RegMisc(vd, vn, NEON_SCVTF);
   3813     }
   3814   } else {
   3815     VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() ||
   3816                 vd.Is1H() || vd.Is4H() || vd.Is8H());
   3817     NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm);
   3818   }
   3819 }
   3820 
   3821 
   3822 void Assembler::scvtf(const VRegister& vd, const Register& rn, int fbits) {
   3823   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3824   if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3825   VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
   3826   VIXL_ASSERT(fbits >= 0);
   3827   if (fbits == 0) {
   3828     Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd));
   3829   } else {
   3830     Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
   3831          Rd(vd));
   3832   }
   3833 }
   3834 
   3835 
   3836 void Assembler::ucvtf(const VRegister& vd, const Register& rn, int fbits) {
   3837   VIXL_ASSERT(CPUHas(CPUFeatures::kFP));
   3838   if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));
   3839   VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
   3840   VIXL_ASSERT(fbits >= 0);
   3841   if (fbits == 0) {
   3842     Emit(SF(rn) | FPType(vd) | UCVTF | Rn(rn) | Rd(vd));
   3843   } else {
   3844     Emit(SF(rn) | FPType(vd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
   3845          Rd(vd));
   3846   }
   3847 }
   3848 
   3849 
   3850 void Assembler::NEON3Same(const VRegister& vd,
   3851                           const VRegister& vn,
   3852                           const VRegister& vm,
   3853                           NEON3SameOp vop) {
   3854   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   3855   VIXL_ASSERT(vd.IsVector() || !vd.IsQ());
   3856 
   3857   Instr format, op = vop;
   3858   if (vd.IsScalar()) {
   3859     op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
   3860     format = SFormat(vd);
   3861   } else {
   3862     format = VFormat(vd);
   3863   }
   3864 
   3865   Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
   3866 }
   3867 
   3868 
   3869 void Assembler::NEONFP3Same(const VRegister& vd,
   3870                             const VRegister& vn,
   3871                             const VRegister& vm,
   3872                             Instr op) {
   3873   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   3874   Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
   3875 }
   3876 
   3877 
   3878 void Assembler::NEON3SameFP16(const VRegister& vd,
   3879                               const VRegister& vn,
   3880                               const VRegister& vm,
   3881                               Instr op) {
   3882   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   3883   VIXL_ASSERT(vd.GetLaneSizeInBytes() == kHRegSizeInBytes);
   3884   if (vd.Is8H()) op |= NEON_Q;
   3885   Emit(op | Rm(vm) | Rn(vn) | Rd(vd));
   3886 }
   3887 
   3888 
   3889 // clang-format off
   3890 #define NEON_FP2REGMISC_LIST(V)                                        \
   3891   V(fabs,    NEON_FABS,    FABS,                FABS_h)                \
   3892   V(fneg,    NEON_FNEG,    FNEG,                FNEG_h)                \
   3893   V(fsqrt,   NEON_FSQRT,   FSQRT,               FSQRT_h)               \
   3894   V(frintn,  NEON_FRINTN,  FRINTN,              FRINTN_h)              \
   3895   V(frinta,  NEON_FRINTA,  FRINTA,              FRINTA_h)              \
   3896   V(frintp,  NEON_FRINTP,  FRINTP,              FRINTP_h)              \
   3897   V(frintm,  NEON_FRINTM,  FRINTM,              FRINTM_h)              \
   3898   V(frintx,  NEON_FRINTX,  FRINTX,              FRINTX_h)              \
   3899   V(frintz,  NEON_FRINTZ,  FRINTZ,              FRINTZ_h)              \
   3900   V(frinti,  NEON_FRINTI,  FRINTI,              FRINTI_h)              \
   3901   V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar, NEON_FRSQRTE_H_scalar) \
   3902   V(frecpe,  NEON_FRECPE,  NEON_FRECPE_scalar,  NEON_FRECPE_H_scalar)
   3903 // clang-format on
   3904 
   3905 #define VIXL_DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP, SCA_OP_H)                   \
   3906   void Assembler::FN(const VRegister& vd, const VRegister& vn) {             \
   3907     VIXL_ASSERT(CPUHas(CPUFeatures::kFP));                                   \
   3908     Instr op;                                                                \
   3909     if (vd.IsScalar()) {                                                     \
   3910       if (vd.Is1H()) {                                                       \
   3911         if ((static_cast<uint32_t>(SCA_OP_H) &                               \
   3912             static_cast<uint32_t>(NEONScalar2RegMiscFP16FMask)) ==           \
   3913             static_cast<uint32_t>(NEONScalar2RegMiscFP16Fixed)) {            \
   3914           VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf));   \
   3915         } else {                                                             \
   3916           VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));                         \
   3917         }                                                                    \
   3918         op = SCA_OP_H;                                                       \
   3919       } else {                                                               \
   3920         if ((static_cast<uint32_t>(SCA_OP) &                                 \
   3921              static_cast<uint32_t>(NEONScalar2RegMiscFMask)) ==              \
   3922              static_cast<uint32_t>(NEONScalar2RegMiscFixed)) {               \
   3923           VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));                           \
   3924         }                                                                    \
   3925         VIXL_ASSERT(vd.Is1S() || vd.Is1D());                                 \
   3926         op = SCA_OP;                                                         \
   3927       }                                                                      \
   3928     } else {                                                                 \
   3929       VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));                               \
   3930       VIXL_ASSERT(vd.Is4H() || vd.Is8H() || vd.Is2S() || vd.Is2D() ||        \
   3931                   vd.Is4S());                                                \
   3932       if (vd.IsLaneSizeH()) {                                                \
   3933         VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));                         \
   3934         op = VEC_OP##_H;                                                     \
   3935         if (vd.Is8H()) {                                                     \
   3936           op |= static_cast<Instr>(NEON_Q);                                  \
   3937         }                                                                    \
   3938       } else {                                                               \
   3939         op = VEC_OP;                                                         \
   3940       }                                                                      \
   3941     }                                                                        \
   3942     if (vd.IsLaneSizeH()) {                                                  \
   3943       NEONFP2RegMiscFP16(vd, vn, op);                                        \
   3944     } else {                                                                 \
   3945       NEONFP2RegMisc(vd, vn, op);                                            \
   3946     }                                                                        \
   3947   }
   3948 NEON_FP2REGMISC_LIST(VIXL_DEFINE_ASM_FUNC)
   3949 #undef VIXL_DEFINE_ASM_FUNC
   3950 
   3951 // clang-format off
   3952 #define NEON_FP2REGMISC_V85_LIST(V)       \
   3953   V(frint32x,  NEON_FRINT32X,  FRINT32X)  \
   3954   V(frint32z,  NEON_FRINT32Z,  FRINT32Z)  \
   3955   V(frint64x,  NEON_FRINT64X,  FRINT64X)  \
   3956   V(frint64z,  NEON_FRINT64Z,  FRINT64Z)
   3957 // clang-format on
   3958 
   3959 #define VIXL_DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP)                               \
   3960   void Assembler::FN(const VRegister& vd, const VRegister& vn) {               \
   3961     VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kFrintToFixedSizedInt)); \
   3962     Instr op;                                                                  \
   3963     if (vd.IsScalar()) {                                                       \
   3964       VIXL_ASSERT(vd.Is1S() || vd.Is1D());                                     \
   3965       op = SCA_OP;                                                             \
   3966     } else {                                                                   \
   3967       VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));                                 \
   3968       VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());                        \
   3969       op = VEC_OP;                                                             \
   3970     }                                                                          \
   3971     NEONFP2RegMisc(vd, vn, op);                                                \
   3972   }
   3973 NEON_FP2REGMISC_V85_LIST(VIXL_DEFINE_ASM_FUNC)
   3974 #undef VIXL_DEFINE_ASM_FUNC
   3975 
   3976 void Assembler::NEONFP2RegMiscFP16(const VRegister& vd,
   3977                                    const VRegister& vn,
   3978                                    Instr op) {
   3979   VIXL_ASSERT(AreSameFormat(vd, vn));
   3980   Emit(op | Rn(vn) | Rd(vd));
   3981 }
   3982 
   3983 
   3984 void Assembler::NEONFP2RegMisc(const VRegister& vd,
   3985                                const VRegister& vn,
   3986                                Instr op) {
   3987   VIXL_ASSERT(AreSameFormat(vd, vn));
   3988   Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
   3989 }
   3990 
   3991 
   3992 void Assembler::NEON2RegMisc(const VRegister& vd,
   3993                              const VRegister& vn,
   3994                              NEON2RegMiscOp vop,
   3995                              int value) {
   3996   VIXL_ASSERT(AreSameFormat(vd, vn));
   3997   VIXL_ASSERT(value == 0);
   3998   USE(value);
   3999 
   4000   Instr format, op = vop;
   4001   if (vd.IsScalar()) {
   4002     op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
   4003     format = SFormat(vd);
   4004   } else {
   4005     format = VFormat(vd);
   4006   }
   4007 
   4008   Emit(format | op | Rn(vn) | Rd(vd));
   4009 }
   4010 
   4011 
   4012 void Assembler::cmeq(const VRegister& vd, const VRegister& vn, int value) {
   4013   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4014   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   4015   NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value);
   4016 }
   4017 
   4018 
   4019 void Assembler::cmge(const VRegister& vd, const VRegister& vn, int value) {
   4020   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4021   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   4022   NEON2RegMisc(vd, vn, NEON_CMGE_zero, value);
   4023 }
   4024 
   4025 
   4026 void Assembler::cmgt(const VRegister& vd, const VRegister& vn, int value) {
   4027   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4028   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   4029   NEON2RegMisc(vd, vn, NEON_CMGT_zero, value);
   4030 }
   4031 
   4032 
   4033 void Assembler::cmle(const VRegister& vd, const VRegister& vn, int value) {
   4034   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   4035   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4036   NEON2RegMisc(vd, vn, NEON_CMLE_zero, value);
   4037 }
   4038 
   4039 
   4040 void Assembler::cmlt(const VRegister& vd, const VRegister& vn, int value) {
   4041   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4042   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   4043   NEON2RegMisc(vd, vn, NEON_CMLT_zero, value);
   4044 }
   4045 
   4046 
   4047 void Assembler::shll(const VRegister& vd, const VRegister& vn, int shift) {
   4048   USE(shift);
   4049   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4050   VIXL_ASSERT((vd.Is8H() && vn.Is8B() && shift == 8) ||
   4051               (vd.Is4S() && vn.Is4H() && shift == 16) ||
   4052               (vd.Is2D() && vn.Is2S() && shift == 32));
   4053   Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
   4054 }
   4055 
   4056 
   4057 void Assembler::shll2(const VRegister& vd, const VRegister& vn, int shift) {
   4058   USE(shift);
   4059   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4060   VIXL_ASSERT((vd.Is8H() && vn.Is16B() && shift == 8) ||
   4061               (vd.Is4S() && vn.Is8H() && shift == 16) ||
   4062               (vd.Is2D() && vn.Is4S() && shift == 32));
   4063   Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
   4064 }
   4065 
   4066 
   4067 void Assembler::NEONFP2RegMisc(const VRegister& vd,
   4068                                const VRegister& vn,
   4069                                NEON2RegMiscOp vop,
   4070                                double value) {
   4071   VIXL_ASSERT(AreSameFormat(vd, vn));
   4072   VIXL_ASSERT(value == 0.0);
   4073   USE(value);
   4074 
   4075   Instr op = vop;
   4076   if (vd.IsScalar()) {
   4077     VIXL_ASSERT(vd.Is1S() || vd.Is1D());
   4078     op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
   4079   } else {
   4080     VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());
   4081   }
   4082 
   4083   Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
   4084 }
   4085 
   4086 
   4087 void Assembler::NEONFP2RegMiscFP16(const VRegister& vd,
   4088                                    const VRegister& vn,
   4089                                    NEON2RegMiscFP16Op vop,
   4090                                    double value) {
   4091   VIXL_ASSERT(AreSameFormat(vd, vn));
   4092   VIXL_ASSERT(value == 0.0);
   4093   USE(value);
   4094 
   4095   Instr op = vop;
   4096   if (vd.IsScalar()) {
   4097     VIXL_ASSERT(vd.Is1H());
   4098     op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
   4099   } else {
   4100     VIXL_ASSERT(vd.Is4H() || vd.Is8H());
   4101     if (vd.Is8H()) {
   4102       op |= static_cast<Instr>(NEON_Q);
   4103     }
   4104   }
   4105 
   4106   Emit(op | Rn(vn) | Rd(vd));
   4107 }
   4108 
   4109 
   4110 void Assembler::fcmeq(const VRegister& vd, const VRegister& vn, double value) {
   4111   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   4112   if (vd.IsLaneSizeH()) {
   4113     VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4114     NEONFP2RegMiscFP16(vd, vn, NEON_FCMEQ_H_zero, value);
   4115   } else {
   4116     NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value);
   4117   }
   4118 }
   4119 
   4120 
   4121 void Assembler::fcmge(const VRegister& vd, const VRegister& vn, double value) {
   4122   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   4123   if (vd.IsLaneSizeH()) {
   4124     VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4125     NEONFP2RegMiscFP16(vd, vn, NEON_FCMGE_H_zero, value);
   4126   } else {
   4127     NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value);
   4128   }
   4129 }
   4130 
   4131 
   4132 void Assembler::fcmgt(const VRegister& vd, const VRegister& vn, double value) {
   4133   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   4134   if (vd.IsLaneSizeH()) {
   4135     VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4136     NEONFP2RegMiscFP16(vd, vn, NEON_FCMGT_H_zero, value);
   4137   } else {
   4138     NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value);
   4139   }
   4140 }
   4141 
   4142 
   4143 void Assembler::fcmle(const VRegister& vd, const VRegister& vn, double value) {
   4144   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   4145   if (vd.IsLaneSizeH()) {
   4146     VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4147     NEONFP2RegMiscFP16(vd, vn, NEON_FCMLE_H_zero, value);
   4148   } else {
   4149     NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value);
   4150   }
   4151 }
   4152 
   4153 
   4154 void Assembler::fcmlt(const VRegister& vd, const VRegister& vn, double value) {
   4155   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   4156   if (vd.IsLaneSizeH()) {
   4157     VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4158     NEONFP2RegMiscFP16(vd, vn, NEON_FCMLT_H_zero, value);
   4159   } else {
   4160     NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value);
   4161   }
   4162 }
   4163 
   4164 
   4165 void Assembler::frecpx(const VRegister& vd, const VRegister& vn) {
   4166   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   4167   VIXL_ASSERT(vd.IsScalar());
   4168   VIXL_ASSERT(AreSameFormat(vd, vn));
   4169   Instr op;
   4170   if (vd.Is1H()) {
   4171     VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4172     op = NEON_FRECPX_H_scalar;
   4173   } else {
   4174     VIXL_ASSERT(vd.Is1S() || vd.Is1D());
   4175     op = NEON_FRECPX_scalar;
   4176   }
   4177   Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
   4178 }
   4179 
   4180 
   4181 // clang-format off
   4182 #define NEON_3SAME_LIST(V) \
   4183   V(add,      NEON_ADD,      vd.IsVector() || vd.Is1D())            \
   4184   V(addp,     NEON_ADDP,     vd.IsVector() || vd.Is1D())            \
   4185   V(sub,      NEON_SUB,      vd.IsVector() || vd.Is1D())            \
   4186   V(cmeq,     NEON_CMEQ,     vd.IsVector() || vd.Is1D())            \
   4187   V(cmge,     NEON_CMGE,     vd.IsVector() || vd.Is1D())            \
   4188   V(cmgt,     NEON_CMGT,     vd.IsVector() || vd.Is1D())            \
   4189   V(cmhi,     NEON_CMHI,     vd.IsVector() || vd.Is1D())            \
   4190   V(cmhs,     NEON_CMHS,     vd.IsVector() || vd.Is1D())            \
   4191   V(cmtst,    NEON_CMTST,    vd.IsVector() || vd.Is1D())            \
   4192   V(sshl,     NEON_SSHL,     vd.IsVector() || vd.Is1D())            \
   4193   V(ushl,     NEON_USHL,     vd.IsVector() || vd.Is1D())            \
   4194   V(srshl,    NEON_SRSHL,    vd.IsVector() || vd.Is1D())            \
   4195   V(urshl,    NEON_URSHL,    vd.IsVector() || vd.Is1D())            \
   4196   V(sqdmulh,  NEON_SQDMULH,  vd.IsLaneSizeH() || vd.IsLaneSizeS())  \
   4197   V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS())  \
   4198   V(shadd,    NEON_SHADD,    vd.IsVector() && !vd.IsLaneSizeD())    \
   4199   V(uhadd,    NEON_UHADD,    vd.IsVector() && !vd.IsLaneSizeD())    \
   4200   V(srhadd,   NEON_SRHADD,   vd.IsVector() && !vd.IsLaneSizeD())    \
   4201   V(urhadd,   NEON_URHADD,   vd.IsVector() && !vd.IsLaneSizeD())    \
   4202   V(shsub,    NEON_SHSUB,    vd.IsVector() && !vd.IsLaneSizeD())    \
   4203   V(uhsub,    NEON_UHSUB,    vd.IsVector() && !vd.IsLaneSizeD())    \
   4204   V(smax,     NEON_SMAX,     vd.IsVector() && !vd.IsLaneSizeD())    \
   4205   V(smaxp,    NEON_SMAXP,    vd.IsVector() && !vd.IsLaneSizeD())    \
   4206   V(smin,     NEON_SMIN,     vd.IsVector() && !vd.IsLaneSizeD())    \
   4207   V(sminp,    NEON_SMINP,    vd.IsVector() && !vd.IsLaneSizeD())    \
   4208   V(umax,     NEON_UMAX,     vd.IsVector() && !vd.IsLaneSizeD())    \
   4209   V(umaxp,    NEON_UMAXP,    vd.IsVector() && !vd.IsLaneSizeD())    \
   4210   V(umin,     NEON_UMIN,     vd.IsVector() && !vd.IsLaneSizeD())    \
   4211   V(uminp,    NEON_UMINP,    vd.IsVector() && !vd.IsLaneSizeD())    \
   4212   V(saba,     NEON_SABA,     vd.IsVector() && !vd.IsLaneSizeD())    \
   4213   V(sabd,     NEON_SABD,     vd.IsVector() && !vd.IsLaneSizeD())    \
   4214   V(uaba,     NEON_UABA,     vd.IsVector() && !vd.IsLaneSizeD())    \
   4215   V(uabd,     NEON_UABD,     vd.IsVector() && !vd.IsLaneSizeD())    \
   4216   V(mla,      NEON_MLA,      vd.IsVector() && !vd.IsLaneSizeD())    \
   4217   V(mls,      NEON_MLS,      vd.IsVector() && !vd.IsLaneSizeD())    \
   4218   V(mul,      NEON_MUL,      vd.IsVector() && !vd.IsLaneSizeD())    \
   4219   V(and_,     NEON_AND,      vd.Is8B() || vd.Is16B())               \
   4220   V(orr,      NEON_ORR,      vd.Is8B() || vd.Is16B())               \
   4221   V(orn,      NEON_ORN,      vd.Is8B() || vd.Is16B())               \
   4222   V(eor,      NEON_EOR,      vd.Is8B() || vd.Is16B())               \
   4223   V(bic,      NEON_BIC,      vd.Is8B() || vd.Is16B())               \
   4224   V(bit,      NEON_BIT,      vd.Is8B() || vd.Is16B())               \
   4225   V(bif,      NEON_BIF,      vd.Is8B() || vd.Is16B())               \
   4226   V(bsl,      NEON_BSL,      vd.Is8B() || vd.Is16B())               \
   4227   V(pmul,     NEON_PMUL,     vd.Is8B() || vd.Is16B())               \
   4228   V(uqadd,    NEON_UQADD,    true)                                  \
   4229   V(sqadd,    NEON_SQADD,    true)                                  \
   4230   V(uqsub,    NEON_UQSUB,    true)                                  \
   4231   V(sqsub,    NEON_SQSUB,    true)                                  \
   4232   V(sqshl,    NEON_SQSHL,    true)                                  \
   4233   V(uqshl,    NEON_UQSHL,    true)                                  \
   4234   V(sqrshl,   NEON_SQRSHL,   true)                                  \
   4235   V(uqrshl,   NEON_UQRSHL,   true)
   4236 // clang-format on
   4237 
   4238 #define VIXL_DEFINE_ASM_FUNC(FN, OP, AS)     \
   4239   void Assembler::FN(const VRegister& vd,    \
   4240                      const VRegister& vn,    \
   4241                      const VRegister& vm) {  \
   4242     VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \
   4243     VIXL_ASSERT(AS);                         \
   4244     NEON3Same(vd, vn, vm, OP);               \
   4245   }
   4246 NEON_3SAME_LIST(VIXL_DEFINE_ASM_FUNC)
   4247 #undef VIXL_DEFINE_ASM_FUNC
   4248 
   4249 // clang-format off
   4250 #define NEON_FP3SAME_OP_LIST(V)                                        \
   4251   V(fmulx,   NEON_FMULX,   NEON_FMULX_scalar,   NEON_FMULX_H_scalar)   \
   4252   V(frecps,  NEON_FRECPS,  NEON_FRECPS_scalar,  NEON_FRECPS_H_scalar)  \
   4253   V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar, NEON_FRSQRTS_H_scalar) \
   4254   V(fabd,    NEON_FABD,    NEON_FABD_scalar,    NEON_FABD_H_scalar)    \
   4255   V(fmla,    NEON_FMLA,    0,                   0)                     \
   4256   V(fmls,    NEON_FMLS,    0,                   0)                     \
   4257   V(facge,   NEON_FACGE,   NEON_FACGE_scalar,   NEON_FACGE_H_scalar)   \
   4258   V(facgt,   NEON_FACGT,   NEON_FACGT_scalar,   NEON_FACGT_H_scalar)   \
   4259   V(fcmeq,   NEON_FCMEQ,   NEON_FCMEQ_scalar,   NEON_FCMEQ_H_scalar)   \
   4260   V(fcmge,   NEON_FCMGE,   NEON_FCMGE_scalar,   NEON_FCMGE_H_scalar)   \
   4261   V(fcmgt,   NEON_FCMGT,   NEON_FCMGT_scalar,   NEON_FCMGT_H_scalar)   \
   4262   V(faddp,   NEON_FADDP,   0,                   0)                     \
   4263   V(fmaxp,   NEON_FMAXP,   0,                   0)                     \
   4264   V(fminp,   NEON_FMINP,   0,                   0)                     \
   4265   V(fmaxnmp, NEON_FMAXNMP, 0,                   0)                     \
   4266   V(fadd,    NEON_FADD,    FADD,                0)                     \
   4267   V(fsub,    NEON_FSUB,    FSUB,                0)                     \
   4268   V(fmul,    NEON_FMUL,    FMUL,                0)                     \
   4269   V(fdiv,    NEON_FDIV,    FDIV,                0)                     \
   4270   V(fmax,    NEON_FMAX,    FMAX,                0)                     \
   4271   V(fmin,    NEON_FMIN,    FMIN,                0)                     \
   4272   V(fmaxnm,  NEON_FMAXNM,  FMAXNM,              0)                     \
   4273   V(fminnm,  NEON_FMINNM,  FMINNM,              0)                     \
   4274   V(fminnmp, NEON_FMINNMP, 0,                   0)
   4275 // clang-format on
   4276 
   4277 // TODO: This macro is complicated because it classifies the instructions in the
   4278 // macro list above, and treats each case differently. It could be somewhat
   4279 // simpler if we were to split the macro, at the cost of some duplication.
   4280 #define VIXL_DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP, SCA_OP_H)               \
   4281   void Assembler::FN(const VRegister& vd,                                \
   4282                      const VRegister& vn,                                \
   4283                      const VRegister& vm) {                              \
   4284     VIXL_ASSERT(CPUHas(CPUFeatures::kFP));                               \
   4285     Instr op;                                                            \
   4286     bool is_fp16 = false;                                                \
   4287     if ((SCA_OP != 0) && vd.IsScalar()) {                                \
   4288       if ((SCA_OP_H != 0) && vd.Is1H()) {                                \
   4289         VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf)); \
   4290         is_fp16 = true;                                                  \
   4291         op = SCA_OP_H;                                                   \
   4292       } else {                                                           \
   4293         VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());                \
   4294         if ((static_cast<uint32_t>(SCA_OP) &                             \
   4295              static_cast<uint32_t>(NEONScalar3SameFMask)) ==             \
   4296              static_cast<uint32_t>(NEONScalar3SameFixed)) {              \
   4297           VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));                       \
   4298           if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));    \
   4299         } else if (vd.Is1H()) {                                          \
   4300           VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf));                     \
   4301         }                                                                \
   4302         op = SCA_OP;                                                     \
   4303       }                                                                  \
   4304     } else {                                                             \
   4305       VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));                           \
   4306       VIXL_ASSERT(vd.IsVector());                                        \
   4307       if (vd.Is4H() || vd.Is8H()) {                                      \
   4308         VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));                     \
   4309         is_fp16 = true;                                                  \
   4310         op = VEC_OP##_H;                                                 \
   4311       } else {                                                           \
   4312         VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());                \
   4313         op = VEC_OP;                                                     \
   4314       }                                                                  \
   4315     }                                                                    \
   4316     if (is_fp16) {                                                       \
   4317       NEON3SameFP16(vd, vn, vm, op);                                     \
   4318     } else {                                                             \
   4319       NEONFP3Same(vd, vn, vm, op);                                       \
   4320     }                                                                    \
   4321   }
   4322 NEON_FP3SAME_OP_LIST(VIXL_DEFINE_ASM_FUNC)
   4323 #undef VIXL_DEFINE_ASM_FUNC
   4324 
   4325 
   4326 // clang-format off
   4327 #define NEON_FHM_LIST(V) \
   4328   V(fmlal,   NEON_FMLAL)   \
   4329   V(fmlal2,  NEON_FMLAL2)  \
   4330   V(fmlsl,   NEON_FMLSL)   \
   4331   V(fmlsl2,  NEON_FMLSL2)
   4332 // clang-format on
   4333 
   4334 #define VIXL_DEFINE_ASM_FUNC(FN, VEC_OP)                    \
   4335   void Assembler::FN(const VRegister& vd,                   \
   4336                      const VRegister& vn,                   \
   4337                      const VRegister& vm) {                 \
   4338     VIXL_ASSERT(CPUHas(CPUFeatures::kNEON,                  \
   4339                        CPUFeatures::kFP,                    \
   4340                        CPUFeatures::kNEONHalf,              \
   4341                        CPUFeatures::kFHM));                 \
   4342     VIXL_ASSERT((vd.Is2S() && vn.Is2H() && vm.Is2H()) ||    \
   4343                 (vd.Is4S() && vn.Is4H() && vm.Is4H()));     \
   4344     Emit(FPFormat(vd) | VEC_OP | Rm(vm) | Rn(vn) | Rd(vd)); \
   4345   }
   4346 NEON_FHM_LIST(VIXL_DEFINE_ASM_FUNC)
   4347 #undef VIXL_DEFINE_ASM_FUNC
   4348 
   4349 
   4350 void Assembler::addp(const VRegister& vd, const VRegister& vn) {
   4351   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4352   VIXL_ASSERT((vd.Is1D() && vn.Is2D()));
   4353   Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd));
   4354 }
   4355 
   4356 
   4357 void Assembler::sqrdmlah(const VRegister& vd,
   4358                          const VRegister& vn,
   4359                          const VRegister& vm) {
   4360   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM));
   4361   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   4362   VIXL_ASSERT(vd.IsVector() || !vd.IsQ());
   4363 
   4364   Instr format, op = NEON_SQRDMLAH;
   4365   if (vd.IsScalar()) {
   4366     op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
   4367     format = SFormat(vd);
   4368   } else {
   4369     format = VFormat(vd);
   4370   }
   4371 
   4372   Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
   4373 }
   4374 
   4375 
   4376 void Assembler::sqrdmlsh(const VRegister& vd,
   4377                          const VRegister& vn,
   4378                          const VRegister& vm) {
   4379   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM));
   4380   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   4381   VIXL_ASSERT(vd.IsVector() || !vd.IsQ());
   4382 
   4383   Instr format, op = NEON_SQRDMLSH;
   4384   if (vd.IsScalar()) {
   4385     op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
   4386     format = SFormat(vd);
   4387   } else {
   4388     format = VFormat(vd);
   4389   }
   4390 
   4391   Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
   4392 }
   4393 
   4394 
   4395 void Assembler::sdot(const VRegister& vd,
   4396                      const VRegister& vn,
   4397                      const VRegister& vm) {
   4398   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct));
   4399   VIXL_ASSERT(AreSameFormat(vn, vm));
   4400   VIXL_ASSERT((vd.Is2S() && vn.Is8B()) || (vd.Is4S() && vn.Is16B()));
   4401 
   4402   Emit(VFormat(vd) | NEON_SDOT | Rm(vm) | Rn(vn) | Rd(vd));
   4403 }
   4404 
   4405 
   4406 void Assembler::udot(const VRegister& vd,
   4407                      const VRegister& vn,
   4408                      const VRegister& vm) {
   4409   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct));
   4410   VIXL_ASSERT(AreSameFormat(vn, vm));
   4411   VIXL_ASSERT((vd.Is2S() && vn.Is8B()) || (vd.Is4S() && vn.Is16B()));
   4412 
   4413   Emit(VFormat(vd) | NEON_UDOT | Rm(vm) | Rn(vn) | Rd(vd));
   4414 }
   4415 
   4416 void Assembler::usdot(const VRegister& vd,
   4417                       const VRegister& vn,
   4418                       const VRegister& vm) {
   4419   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kI8MM));
   4420   VIXL_ASSERT(AreSameFormat(vn, vm));
   4421   VIXL_ASSERT((vd.Is2S() && vn.Is8B()) || (vd.Is4S() && vn.Is16B()));
   4422 
   4423   Emit(VFormat(vd) | 0x0e809c00 | Rm(vm) | Rn(vn) | Rd(vd));
   4424 }
   4425 
   4426 void Assembler::faddp(const VRegister& vd, const VRegister& vn) {
   4427   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   4428   VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) ||
   4429               (vd.Is1H() && vn.Is2H()));
   4430   if (vd.Is1H()) {
   4431     VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4432     Emit(NEON_FADDP_h_scalar | Rn(vn) | Rd(vd));
   4433   } else {
   4434     Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd));
   4435   }
   4436 }
   4437 
   4438 
   4439 void Assembler::fmaxp(const VRegister& vd, const VRegister& vn) {
   4440   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   4441   VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) ||
   4442               (vd.Is1H() && vn.Is2H()));
   4443   if (vd.Is1H()) {
   4444     VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4445     Emit(NEON_FMAXP_h_scalar | Rn(vn) | Rd(vd));
   4446   } else {
   4447     Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd));
   4448   }
   4449 }
   4450 
   4451 
   4452 void Assembler::fminp(const VRegister& vd, const VRegister& vn) {
   4453   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   4454   VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) ||
   4455               (vd.Is1H() && vn.Is2H()));
   4456   if (vd.Is1H()) {
   4457     VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4458     Emit(NEON_FMINP_h_scalar | Rn(vn) | Rd(vd));
   4459   } else {
   4460     Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd));
   4461   }
   4462 }
   4463 
   4464 
   4465 void Assembler::fmaxnmp(const VRegister& vd, const VRegister& vn) {
   4466   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   4467   VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) ||
   4468               (vd.Is1H() && vn.Is2H()));
   4469   if (vd.Is1H()) {
   4470     VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4471     Emit(NEON_FMAXNMP_h_scalar | Rn(vn) | Rd(vd));
   4472   } else {
   4473     Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd));
   4474   }
   4475 }
   4476 
   4477 
   4478 void Assembler::fminnmp(const VRegister& vd, const VRegister& vn) {
   4479   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));
   4480   VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) ||
   4481               (vd.Is1H() && vn.Is2H()));
   4482   if (vd.Is1H()) {
   4483     VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4484     Emit(NEON_FMINNMP_h_scalar | Rn(vn) | Rd(vd));
   4485   } else {
   4486     Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd));
   4487   }
   4488 }
   4489 
   4490 
   4491 // v8.3 complex numbers - floating-point complex multiply accumulate.
   4492 void Assembler::fcmla(const VRegister& vd,
   4493                       const VRegister& vn,
   4494                       const VRegister& vm,
   4495                       int vm_index,
   4496                       int rot) {
   4497   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kFcma));
   4498   VIXL_ASSERT(vd.IsVector() && AreSameFormat(vd, vn));
   4499   VIXL_ASSERT((vm.IsH() && (vd.Is8H() || vd.Is4H())) ||
   4500               (vm.IsS() && vd.Is4S()));
   4501   if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4502   int index_num_bits = vd.Is4S() ? 1 : 2;
   4503   Emit(VFormat(vd) | Rm(vm) | NEON_FCMLA_byelement |
   4504        ImmNEONHLM(vm_index, index_num_bits) | ImmRotFcmlaSca(rot) | Rn(vn) |
   4505        Rd(vd));
   4506 }
   4507 
   4508 
   4509 void Assembler::fcmla(const VRegister& vd,
   4510                       const VRegister& vn,
   4511                       const VRegister& vm,
   4512                       int rot) {
   4513   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kFcma));
   4514   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   4515   VIXL_ASSERT(vd.IsVector() && !vd.IsLaneSizeB());
   4516   if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4517   Emit(VFormat(vd) | Rm(vm) | NEON_FCMLA | ImmRotFcmlaVec(rot) | Rn(vn) |
   4518        Rd(vd));
   4519 }
   4520 
   4521 
   4522 // v8.3 complex numbers - floating-point complex add.
   4523 void Assembler::fcadd(const VRegister& vd,
   4524                       const VRegister& vn,
   4525                       const VRegister& vm,
   4526                       int rot) {
   4527   VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kFcma));
   4528   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   4529   VIXL_ASSERT(vd.IsVector() && !vd.IsLaneSizeB());
   4530   if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));
   4531   Emit(VFormat(vd) | Rm(vm) | NEON_FCADD | ImmRotFcadd(rot) | Rn(vn) | Rd(vd));
   4532 }
   4533 
   4534 
   4535 void Assembler::orr(const VRegister& vd, const int imm8, const int left_shift) {
   4536   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4537   NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_ORR);
   4538 }
   4539 
   4540 
   4541 void Assembler::mov(const VRegister& vd, const VRegister& vn) {
   4542   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4543   VIXL_ASSERT(AreSameFormat(vd, vn));
   4544   if (vd.IsD()) {
   4545     orr(vd.V8B(), vn.V8B(), vn.V8B());
   4546   } else {
   4547     VIXL_ASSERT(vd.IsQ());
   4548     orr(vd.V16B(), vn.V16B(), vn.V16B());
   4549   }
   4550 }
   4551 
   4552 
   4553 void Assembler::bic(const VRegister& vd, const int imm8, const int left_shift) {
   4554   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4555   NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_BIC);
   4556 }
   4557 
   4558 
   4559 void Assembler::movi(const VRegister& vd,
   4560                      const uint64_t imm,
   4561                      Shift shift,
   4562                      const int shift_amount) {
   4563   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4564   VIXL_ASSERT((shift == LSL) || (shift == MSL));
   4565   if (vd.Is2D() || vd.Is1D()) {
   4566     VIXL_ASSERT(shift_amount == 0);
   4567     int imm8 = 0;
   4568     for (int i = 0; i < 8; ++i) {
   4569       int byte = (imm >> (i * 8)) & 0xff;
   4570       VIXL_ASSERT((byte == 0) || (byte == 0xff));
   4571       if (byte == 0xff) {
   4572         imm8 |= (1 << i);
   4573       }
   4574     }
   4575     int q = vd.Is2D() ? NEON_Q : 0;
   4576     Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
   4577          ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
   4578   } else if (shift == LSL) {
   4579     VIXL_ASSERT(IsUint8(imm));
   4580     NEONModifiedImmShiftLsl(vd,
   4581                             static_cast<int>(imm),
   4582                             shift_amount,
   4583                             NEONModifiedImmediate_MOVI);
   4584   } else {
   4585     VIXL_ASSERT(IsUint8(imm));
   4586     NEONModifiedImmShiftMsl(vd,
   4587                             static_cast<int>(imm),
   4588                             shift_amount,
   4589                             NEONModifiedImmediate_MOVI);
   4590   }
   4591 }
   4592 
   4593 
   4594 void Assembler::mvn(const VRegister& vd, const VRegister& vn) {
   4595   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4596   VIXL_ASSERT(AreSameFormat(vd, vn));
   4597   if (vd.IsD()) {
   4598     not_(vd.V8B(), vn.V8B());
   4599   } else {
   4600     VIXL_ASSERT(vd.IsQ());
   4601     not_(vd.V16B(), vn.V16B());
   4602   }
   4603 }
   4604 
   4605 
   4606 void Assembler::mvni(const VRegister& vd,
   4607                      const int imm8,
   4608                      Shift shift,
   4609                      const int shift_amount) {
   4610   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4611   VIXL_ASSERT((shift == LSL) || (shift == MSL));
   4612   if (shift == LSL) {
   4613     NEONModifiedImmShiftLsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
   4614   } else {
   4615     NEONModifiedImmShiftMsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
   4616   }
   4617 }
   4618 
   4619 
   4620 void Assembler::NEONFPByElement(const VRegister& vd,
   4621                                 const VRegister& vn,
   4622                                 const VRegister& vm,
   4623                                 int vm_index,
   4624                                 NEONByIndexedElementOp vop,
   4625                                 NEONByIndexedElementOp vop_half) {
   4626   VIXL_ASSERT(AreSameFormat(vd, vn));
   4627   VIXL_ASSERT((vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) ||
   4628               (vd.Is1S() && vm.Is1S()) || (vd.Is2D() && vm.Is1D()) ||
   4629               (vd.Is1D() && vm.Is1D()) || (vd.Is4H() && vm.Is1H()) ||
   4630               (vd.Is8H() && vm.Is1H()) || (vd.Is1H() && vm.Is1H()));
   4631   VIXL_ASSERT((vm.Is1S() && (vm_index < 4)) || (vm.Is1D() && (vm_index < 2)) ||
   4632               (vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8)));
   4633 
   4634   Instr op = vop;
   4635   int index_num_bits;
   4636   if (vm.Is1D()) {
   4637     index_num_bits = 1;
   4638   } else if (vm.Is1S()) {
   4639     index_num_bits = 2;
   4640   } else {
   4641     index_num_bits = 3;
   4642     op = vop_half;
   4643   }
   4644 
   4645   if (vd.IsScalar()) {
   4646     op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
   4647   }
   4648 
   4649   if (!vm.Is1H()) {
   4650     op |= FPFormat(vd);
   4651   } else if (vd.Is8H()) {
   4652     op |= static_cast<Instr>(NEON_Q);
   4653   }
   4654 
   4655   Emit(op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd));
   4656 }
   4657 
   4658 
   4659 void Assembler::NEONByElement(const VRegister& vd,
   4660                               const VRegister& vn,
   4661                               const VRegister& vm,
   4662                               int vm_index,
   4663                               NEONByIndexedElementOp vop) {
   4664   VIXL_ASSERT(AreSameFormat(vd, vn));
   4665   VIXL_ASSERT((vd.Is4H() && vm.Is1H()) || (vd.Is8H() && vm.Is1H()) ||
   4666               (vd.Is1H() && vm.Is1H()) || (vd.Is2S() && vm.Is1S()) ||
   4667               (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S()));
   4668   VIXL_ASSERT((vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8)) ||
   4669               (vm.Is1S() && (vm_index < 4)));
   4670 
   4671   Instr format, op = vop;
   4672   int index_num_bits = vm.Is1H() ? 3 : 2;
   4673   if (vd.IsScalar()) {
   4674     op |= static_cast<Instr>(NEONScalar) | static_cast<Instr>(NEON_Q);
   4675     format = SFormat(vn);
   4676   } else {
   4677     format = VFormat(vn);
   4678   }
   4679   Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
   4680        Rd(vd));
   4681 }
   4682 
   4683 
   4684 void Assembler::NEONByElementL(const VRegister& vd,
   4685                                const VRegister& vn,
   4686                                const VRegister& vm,
   4687                                int vm_index,
   4688                                NEONByIndexedElementOp vop) {
   4689   VIXL_ASSERT((vd.Is4S() && vn.Is4H() && vm.Is1H()) ||
   4690               (vd.Is4S() && vn.Is8H() && vm.Is1H()) ||
   4691               (vd.Is1S() && vn.Is1H() && vm.Is1H()) ||
   4692               (vd.Is2D() && vn.Is2S() && vm.Is1S()) ||
   4693               (vd.Is2D() && vn.Is4S() && vm.Is1S()) ||
   4694               (vd.Is1D() && vn.Is1S() && vm.Is1S()));
   4695 
   4696   VIXL_ASSERT((vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8)) ||
   4697               (vm.Is1S() && (vm_index < 4)));
   4698 
   4699   Instr format, op = vop;
   4700   int index_num_bits = vm.Is1H() ? 3 : 2;
   4701   if (vd.IsScalar()) {
   4702     op |= static_cast<Instr>(NEONScalar) | static_cast<Instr>(NEON_Q);
   4703     format = SFormat(vn);
   4704   } else {
   4705     format = VFormat(vn);
   4706   }
   4707   Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
   4708        Rd(vd));
   4709 }
   4710 
   4711 
   4712 void Assembler::sdot(const VRegister& vd,
   4713                      const VRegister& vn,
   4714                      const VRegister& vm,
   4715                      int vm_index) {
   4716   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct));
   4717   VIXL_ASSERT((vd.Is2S() && vn.Is8B() && vm.Is1S4B()) ||
   4718               (vd.Is4S() && vn.Is16B() && vm.Is1S4B()));
   4719 
   4720   int index_num_bits = 2;
   4721   Emit(VFormat(vd) | NEON_SDOT_byelement |
   4722        ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd));
   4723 }
   4724 
   4725 
   4726 void Assembler::udot(const VRegister& vd,
   4727                      const VRegister& vn,
   4728                      const VRegister& vm,
   4729                      int vm_index) {
   4730   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct));
   4731   VIXL_ASSERT((vd.Is2S() && vn.Is8B() && vm.Is1S4B()) ||
   4732               (vd.Is4S() && vn.Is16B() && vm.Is1S4B()));
   4733 
   4734   int index_num_bits = 2;
   4735   Emit(VFormat(vd) | NEON_UDOT_byelement |
   4736        ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd));
   4737 }
   4738 
   4739 void Assembler::sudot(const VRegister& vd,
   4740                       const VRegister& vn,
   4741                       const VRegister& vm,
   4742                       int vm_index) {
   4743   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kI8MM));
   4744   VIXL_ASSERT((vd.Is2S() && vn.Is8B() && vm.Is1S4B()) ||
   4745               (vd.Is4S() && vn.Is16B() && vm.Is1S4B()));
   4746   int q = vd.Is4S() ? (1U << NEONQ_offset) : 0;
   4747   int index_num_bits = 2;
   4748   Emit(q | 0x0f00f000 | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
   4749        Rd(vd));
   4750 }
   4751 
   4752 
   4753 void Assembler::usdot(const VRegister& vd,
   4754                       const VRegister& vn,
   4755                       const VRegister& vm,
   4756                       int vm_index) {
   4757   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kI8MM));
   4758   VIXL_ASSERT((vd.Is2S() && vn.Is8B() && vm.Is1S4B()) ||
   4759               (vd.Is4S() && vn.Is16B() && vm.Is1S4B()));
   4760   int q = vd.Is4S() ? (1U << NEONQ_offset) : 0;
   4761   int index_num_bits = 2;
   4762   Emit(q | 0x0f80f000 | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
   4763        Rd(vd));
   4764 }
   4765 
   4766 // clang-format off
   4767 #define NEON_BYELEMENT_LIST(V)                        \
   4768   V(mul,      NEON_MUL_byelement,      vn.IsVector()) \
   4769   V(mla,      NEON_MLA_byelement,      vn.IsVector()) \
   4770   V(mls,      NEON_MLS_byelement,      vn.IsVector()) \
   4771   V(sqdmulh,  NEON_SQDMULH_byelement,  true)          \
   4772   V(sqrdmulh, NEON_SQRDMULH_byelement, true)          \
   4773 // clang-format on
   4774 
   4775 #define VIXL_DEFINE_ASM_FUNC(FN, OP, AS)                     \
   4776   void Assembler::FN(const VRegister& vd,               \
   4777                      const VRegister& vn,               \
   4778                      const VRegister& vm,               \
   4779                      int vm_index) {                    \
   4780     VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));            \
   4781     VIXL_ASSERT(AS);                                    \
   4782     NEONByElement(vd, vn, vm, vm_index, OP);            \
   4783   }
   4784 NEON_BYELEMENT_LIST(VIXL_DEFINE_ASM_FUNC)
   4785 #undef VIXL_DEFINE_ASM_FUNC
   4786 
   4787 
   4788 // clang-format off
   4789 #define NEON_BYELEMENT_RDM_LIST(V)     \
   4790   V(sqrdmlah, NEON_SQRDMLAH_byelement) \
   4791   V(sqrdmlsh, NEON_SQRDMLSH_byelement)
   4792 // clang-format on
   4793 
   4794 #define VIXL_DEFINE_ASM_FUNC(FN, OP)                            \
   4795   void Assembler::FN(const VRegister& vd,                       \
   4796                      const VRegister& vn,                       \
   4797                      const VRegister& vm,                       \
   4798                      int vm_index) {                            \
   4799     VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM)); \
   4800     NEONByElement(vd, vn, vm, vm_index, OP);                    \
   4801   }
   4802 NEON_BYELEMENT_RDM_LIST(VIXL_DEFINE_ASM_FUNC)
   4803 #undef VIXL_DEFINE_ASM_FUNC
   4804 
   4805 
   4806 // clang-format off
   4807 #define NEON_FPBYELEMENT_LIST(V) \
   4808   V(fmul,  NEON_FMUL_byelement,  NEON_FMUL_H_byelement)  \
   4809   V(fmla,  NEON_FMLA_byelement,  NEON_FMLA_H_byelement)  \
   4810   V(fmls,  NEON_FMLS_byelement,  NEON_FMLS_H_byelement)  \
   4811   V(fmulx, NEON_FMULX_byelement, NEON_FMULX_H_byelement)
   4812 // clang-format on
   4813 
   4814 #define VIXL_DEFINE_ASM_FUNC(FN, OP, OP_H)                             \
   4815   void Assembler::FN(const VRegister& vd,                              \
   4816                      const VRegister& vn,                              \
   4817                      const VRegister& vm,                              \
   4818                      int vm_index) {                                   \
   4819     VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));         \
   4820     if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \
   4821     NEONFPByElement(vd, vn, vm, vm_index, OP, OP_H);                   \
   4822   }
   4823 NEON_FPBYELEMENT_LIST(VIXL_DEFINE_ASM_FUNC)
   4824 #undef VIXL_DEFINE_ASM_FUNC
   4825 
   4826 
   4827 // clang-format off
   4828 #define NEON_BYELEMENT_LONG_LIST(V)                               \
   4829   V(sqdmull,  NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD())  \
   4830   V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ())  \
   4831   V(sqdmlal,  NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD())  \
   4832   V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ())  \
   4833   V(sqdmlsl,  NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD())  \
   4834   V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ())  \
   4835   V(smull,    NEON_SMULL_byelement,   vn.IsVector() && vn.IsD())  \
   4836   V(smull2,   NEON_SMULL_byelement,   vn.IsVector() && vn.IsQ())  \
   4837   V(umull,    NEON_UMULL_byelement,   vn.IsVector() && vn.IsD())  \
   4838   V(umull2,   NEON_UMULL_byelement,   vn.IsVector() && vn.IsQ())  \
   4839   V(smlal,    NEON_SMLAL_byelement,   vn.IsVector() && vn.IsD())  \
   4840   V(smlal2,   NEON_SMLAL_byelement,   vn.IsVector() && vn.IsQ())  \
   4841   V(umlal,    NEON_UMLAL_byelement,   vn.IsVector() && vn.IsD())  \
   4842   V(umlal2,   NEON_UMLAL_byelement,   vn.IsVector() && vn.IsQ())  \
   4843   V(smlsl,    NEON_SMLSL_byelement,   vn.IsVector() && vn.IsD())  \
   4844   V(smlsl2,   NEON_SMLSL_byelement,   vn.IsVector() && vn.IsQ())  \
   4845   V(umlsl,    NEON_UMLSL_byelement,   vn.IsVector() && vn.IsD())  \
   4846   V(umlsl2,   NEON_UMLSL_byelement,   vn.IsVector() && vn.IsQ())
   4847 // clang-format on
   4848 
   4849 
   4850 #define VIXL_DEFINE_ASM_FUNC(FN, OP, AS)      \
   4851   void Assembler::FN(const VRegister& vd,     \
   4852                      const VRegister& vn,     \
   4853                      const VRegister& vm,     \
   4854                      int vm_index) {          \
   4855     VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));  \
   4856     VIXL_ASSERT(AS);                          \
   4857     NEONByElementL(vd, vn, vm, vm_index, OP); \
   4858   }
   4859 NEON_BYELEMENT_LONG_LIST(VIXL_DEFINE_ASM_FUNC)
   4860 #undef VIXL_DEFINE_ASM_FUNC
   4861 
   4862 
   4863 // clang-format off
   4864 #define NEON_BYELEMENT_FHM_LIST(V)    \
   4865   V(fmlal, NEON_FMLAL_H_byelement)    \
   4866   V(fmlal2, NEON_FMLAL2_H_byelement)  \
   4867   V(fmlsl, NEON_FMLSL_H_byelement)    \
   4868   V(fmlsl2, NEON_FMLSL2_H_byelement)
   4869 // clang-format on
   4870 
   4871 
   4872 #define VIXL_DEFINE_ASM_FUNC(FN, OP)                                   \
   4873   void Assembler::FN(const VRegister& vd,                              \
   4874                      const VRegister& vn,                              \
   4875                      const VRegister& vm,                              \
   4876                      int vm_index) {                                   \
   4877     VIXL_ASSERT(CPUHas(CPUFeatures::kNEON,                             \
   4878                        CPUFeatures::kFP,                               \
   4879                        CPUFeatures::kNEONHalf,                         \
   4880                        CPUFeatures::kFHM));                            \
   4881     VIXL_ASSERT((vd.Is2S() && vn.Is2H()) || (vd.Is4S() && vn.Is4H())); \
   4882     VIXL_ASSERT(vm.IsH());                                             \
   4883     VIXL_ASSERT((vm_index >= 0) && (vm_index < 8));                    \
   4884     /* Vm itself can only be in the bottom 16 registers. */            \
   4885     VIXL_ASSERT(vm.GetCode() < 16);                                    \
   4886     Emit(FPFormat(vd) | OP | Rd(vd) | Rn(vn) | Rm(vm) |                \
   4887          ImmNEONHLM(vm_index, 3));                                     \
   4888   }
   4889 NEON_BYELEMENT_FHM_LIST(VIXL_DEFINE_ASM_FUNC)
   4890 #undef VIXL_DEFINE_ASM_FUNC
   4891 
   4892 void Assembler::suqadd(const VRegister& vd, const VRegister& vn) {
   4893   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4894   NEON2RegMisc(vd, vn, NEON_SUQADD);
   4895 }
   4896 
   4897 
   4898 void Assembler::usqadd(const VRegister& vd, const VRegister& vn) {
   4899   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4900   NEON2RegMisc(vd, vn, NEON_USQADD);
   4901 }
   4902 
   4903 
   4904 void Assembler::abs(const VRegister& vd, const VRegister& vn) {
   4905   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4906   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   4907   NEON2RegMisc(vd, vn, NEON_ABS);
   4908 }
   4909 
   4910 
   4911 void Assembler::sqabs(const VRegister& vd, const VRegister& vn) {
   4912   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4913   NEON2RegMisc(vd, vn, NEON_SQABS);
   4914 }
   4915 
   4916 
   4917 void Assembler::neg(const VRegister& vd, const VRegister& vn) {
   4918   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4919   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   4920   NEON2RegMisc(vd, vn, NEON_NEG);
   4921 }
   4922 
   4923 
   4924 void Assembler::sqneg(const VRegister& vd, const VRegister& vn) {
   4925   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4926   NEON2RegMisc(vd, vn, NEON_SQNEG);
   4927 }
   4928 
   4929 
   4930 void Assembler::NEONXtn(const VRegister& vd,
   4931                         const VRegister& vn,
   4932                         NEON2RegMiscOp vop) {
   4933   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4934   Instr format, op = vop;
   4935   if (vd.IsScalar()) {
   4936     VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
   4937                 (vd.Is1S() && vn.Is1D()));
   4938     op |= static_cast<Instr>(NEON_Q) | static_cast<Instr>(NEONScalar);
   4939     format = SFormat(vd);
   4940   } else {
   4941     VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
   4942                 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
   4943                 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
   4944     format = VFormat(vd);
   4945   }
   4946   Emit(format | op | Rn(vn) | Rd(vd));
   4947 }
   4948 
   4949 
   4950 void Assembler::xtn(const VRegister& vd, const VRegister& vn) {
   4951   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4952   VIXL_ASSERT(vd.IsVector() && vd.IsD());
   4953   NEONXtn(vd, vn, NEON_XTN);
   4954 }
   4955 
   4956 
   4957 void Assembler::xtn2(const VRegister& vd, const VRegister& vn) {
   4958   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4959   VIXL_ASSERT(vd.IsVector() && vd.IsQ());
   4960   NEONXtn(vd, vn, NEON_XTN);
   4961 }
   4962 
   4963 
   4964 void Assembler::sqxtn(const VRegister& vd, const VRegister& vn) {
   4965   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4966   VIXL_ASSERT(vd.IsScalar() || vd.IsD());
   4967   NEONXtn(vd, vn, NEON_SQXTN);
   4968 }
   4969 
   4970 
   4971 void Assembler::sqxtn2(const VRegister& vd, const VRegister& vn) {
   4972   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4973   VIXL_ASSERT(vd.IsVector() && vd.IsQ());
   4974   NEONXtn(vd, vn, NEON_SQXTN);
   4975 }
   4976 
   4977 
   4978 void Assembler::sqxtun(const VRegister& vd, const VRegister& vn) {
   4979   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4980   VIXL_ASSERT(vd.IsScalar() || vd.IsD());
   4981   NEONXtn(vd, vn, NEON_SQXTUN);
   4982 }
   4983 
   4984 
   4985 void Assembler::sqxtun2(const VRegister& vd, const VRegister& vn) {
   4986   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4987   VIXL_ASSERT(vd.IsVector() && vd.IsQ());
   4988   NEONXtn(vd, vn, NEON_SQXTUN);
   4989 }
   4990 
   4991 
   4992 void Assembler::uqxtn(const VRegister& vd, const VRegister& vn) {
   4993   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   4994   VIXL_ASSERT(vd.IsScalar() || vd.IsD());
   4995   NEONXtn(vd, vn, NEON_UQXTN);
   4996 }
   4997 
   4998 
   4999 void Assembler::uqxtn2(const VRegister& vd, const VRegister& vn) {
   5000   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5001   VIXL_ASSERT(vd.IsVector() && vd.IsQ());
   5002   NEONXtn(vd, vn, NEON_UQXTN);
   5003 }
   5004 
   5005 
   5006 // NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size".
   5007 void Assembler::not_(const VRegister& vd, const VRegister& vn) {
   5008   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5009   VIXL_ASSERT(AreSameFormat(vd, vn));
   5010   VIXL_ASSERT(vd.Is8B() || vd.Is16B());
   5011   Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
   5012 }
   5013 
   5014 
   5015 void Assembler::rbit(const VRegister& vd, const VRegister& vn) {
   5016   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5017   VIXL_ASSERT(AreSameFormat(vd, vn));
   5018   VIXL_ASSERT(vd.Is8B() || vd.Is16B());
   5019   Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
   5020 }
   5021 
   5022 
   5023 void Assembler::ext(const VRegister& vd,
   5024                     const VRegister& vn,
   5025                     const VRegister& vm,
   5026                     int index) {
   5027   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5028   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   5029   VIXL_ASSERT(vd.Is8B() || vd.Is16B());
   5030   VIXL_ASSERT((0 <= index) && (index < vd.GetLanes()));
   5031   Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd));
   5032 }
   5033 
   5034 
   5035 void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) {
   5036   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5037   Instr q, scalar;
   5038 
   5039   // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
   5040   // number of lanes, and T is b, h, s or d.
   5041   int lane_size = vn.GetLaneSizeInBytes();
   5042   NEONFormatField format;
   5043   switch (lane_size) {
   5044     case 1:
   5045       format = NEON_16B;
   5046       break;
   5047     case 2:
   5048       format = NEON_8H;
   5049       break;
   5050     case 4:
   5051       format = NEON_4S;
   5052       break;
   5053     default:
   5054       VIXL_ASSERT(lane_size == 8);
   5055       format = NEON_2D;
   5056       break;
   5057   }
   5058 
   5059   if (vd.IsScalar()) {
   5060     q = NEON_Q;
   5061     scalar = NEONScalar;
   5062   } else {
   5063     VIXL_ASSERT(!vd.Is1D());
   5064     q = vd.IsD() ? 0 : NEON_Q;
   5065     scalar = 0;
   5066   }
   5067   Emit(q | scalar | NEON_DUP_ELEMENT | ImmNEON5(format, vn_index) | Rn(vn) |
   5068        Rd(vd));
   5069 }
   5070 
   5071 
   5072 void Assembler::mov(const VRegister& vd, const VRegister& vn, int vn_index) {
   5073   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5074   VIXL_ASSERT(vd.IsScalar());
   5075   dup(vd, vn, vn_index);
   5076 }
   5077 
   5078 
   5079 void Assembler::dup(const VRegister& vd, const Register& rn) {
   5080   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5081   VIXL_ASSERT(!vd.Is1D());
   5082   VIXL_ASSERT(vd.Is2D() == rn.IsX());
   5083   int q = vd.IsD() ? 0 : NEON_Q;
   5084   Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd));
   5085 }
   5086 
   5087 
   5088 void Assembler::ins(const VRegister& vd,
   5089                     int vd_index,
   5090                     const VRegister& vn,
   5091                     int vn_index) {
   5092   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5093   VIXL_ASSERT(AreSameFormat(vd, vn));
   5094   // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
   5095   // number of lanes, and T is b, h, s or d.
   5096   int lane_size = vd.GetLaneSizeInBytes();
   5097   NEONFormatField format;
   5098   switch (lane_size) {
   5099     case 1:
   5100       format = NEON_16B;
   5101       break;
   5102     case 2:
   5103       format = NEON_8H;
   5104       break;
   5105     case 4:
   5106       format = NEON_4S;
   5107       break;
   5108     default:
   5109       VIXL_ASSERT(lane_size == 8);
   5110       format = NEON_2D;
   5111       break;
   5112   }
   5113 
   5114   VIXL_ASSERT(
   5115       (0 <= vd_index) &&
   5116       (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
   5117   VIXL_ASSERT(
   5118       (0 <= vn_index) &&
   5119       (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
   5120   Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) |
   5121        ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd));
   5122 }
   5123 
   5124 
   5125 void Assembler::mov(const VRegister& vd,
   5126                     int vd_index,
   5127                     const VRegister& vn,
   5128                     int vn_index) {
   5129   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5130   ins(vd, vd_index, vn, vn_index);
   5131 }
   5132 
   5133 
   5134 void Assembler::ins(const VRegister& vd, int vd_index, const Register& rn) {
   5135   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5136   // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
   5137   // number of lanes, and T is b, h, s or d.
   5138   int lane_size = vd.GetLaneSizeInBytes();
   5139   NEONFormatField format;
   5140   switch (lane_size) {
   5141     case 1:
   5142       format = NEON_16B;
   5143       VIXL_ASSERT(rn.IsW());
   5144       break;
   5145     case 2:
   5146       format = NEON_8H;
   5147       VIXL_ASSERT(rn.IsW());
   5148       break;
   5149     case 4:
   5150       format = NEON_4S;
   5151       VIXL_ASSERT(rn.IsW());
   5152       break;
   5153     default:
   5154       VIXL_ASSERT(lane_size == 8);
   5155       VIXL_ASSERT(rn.IsX());
   5156       format = NEON_2D;
   5157       break;
   5158   }
   5159 
   5160   VIXL_ASSERT(
   5161       (0 <= vd_index) &&
   5162       (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
   5163   Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd));
   5164 }
   5165 
   5166 
   5167 void Assembler::mov(const VRegister& vd, int vd_index, const Register& rn) {
   5168   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5169   ins(vd, vd_index, rn);
   5170 }
   5171 
   5172 
   5173 void Assembler::umov(const Register& rd, const VRegister& vn, int vn_index) {
   5174   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5175   // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
   5176   // number of lanes, and T is b, h, s or d.
   5177   int lane_size = vn.GetLaneSizeInBytes();
   5178   NEONFormatField format;
   5179   Instr q = 0;
   5180   switch (lane_size) {
   5181     case 1:
   5182       format = NEON_16B;
   5183       VIXL_ASSERT(rd.IsW());
   5184       break;
   5185     case 2:
   5186       format = NEON_8H;
   5187       VIXL_ASSERT(rd.IsW());
   5188       break;
   5189     case 4:
   5190       format = NEON_4S;
   5191       VIXL_ASSERT(rd.IsW());
   5192       break;
   5193     default:
   5194       VIXL_ASSERT(lane_size == 8);
   5195       VIXL_ASSERT(rd.IsX());
   5196       format = NEON_2D;
   5197       q = NEON_Q;
   5198       break;
   5199   }
   5200 
   5201   VIXL_ASSERT(
   5202       (0 <= vn_index) &&
   5203       (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
   5204   Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
   5205 }
   5206 
   5207 
   5208 void Assembler::mov(const Register& rd, const VRegister& vn, int vn_index) {
   5209   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5210   VIXL_ASSERT(vn.GetSizeInBytes() >= 4);
   5211   umov(rd, vn, vn_index);
   5212 }
   5213 
   5214 
   5215 void Assembler::smov(const Register& rd, const VRegister& vn, int vn_index) {
   5216   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5217   // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
   5218   // number of lanes, and T is b, h, s.
   5219   int lane_size = vn.GetLaneSizeInBytes();
   5220   NEONFormatField format;
   5221   Instr q = 0;
   5222   VIXL_ASSERT(lane_size != 8);
   5223   switch (lane_size) {
   5224     case 1:
   5225       format = NEON_16B;
   5226       break;
   5227     case 2:
   5228       format = NEON_8H;
   5229       break;
   5230     default:
   5231       VIXL_ASSERT(lane_size == 4);
   5232       VIXL_ASSERT(rd.IsX());
   5233       format = NEON_4S;
   5234       break;
   5235   }
   5236   q = rd.IsW() ? 0 : NEON_Q;
   5237   VIXL_ASSERT(
   5238       (0 <= vn_index) &&
   5239       (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
   5240   Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
   5241 }
   5242 
   5243 
   5244 void Assembler::cls(const VRegister& vd, const VRegister& vn) {
   5245   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5246   VIXL_ASSERT(AreSameFormat(vd, vn));
   5247   VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
   5248   Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd));
   5249 }
   5250 
   5251 
   5252 void Assembler::clz(const VRegister& vd, const VRegister& vn) {
   5253   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5254   VIXL_ASSERT(AreSameFormat(vd, vn));
   5255   VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
   5256   Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd));
   5257 }
   5258 
   5259 
   5260 void Assembler::cnt(const VRegister& vd, const VRegister& vn) {
   5261   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5262   VIXL_ASSERT(AreSameFormat(vd, vn));
   5263   VIXL_ASSERT(vd.Is8B() || vd.Is16B());
   5264   Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd));
   5265 }
   5266 
   5267 
   5268 void Assembler::rev16(const VRegister& vd, const VRegister& vn) {
   5269   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5270   VIXL_ASSERT(AreSameFormat(vd, vn));
   5271   VIXL_ASSERT(vd.Is8B() || vd.Is16B());
   5272   Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd));
   5273 }
   5274 
   5275 
   5276 void Assembler::rev32(const VRegister& vd, const VRegister& vn) {
   5277   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5278   VIXL_ASSERT(AreSameFormat(vd, vn));
   5279   VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H());
   5280   Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd));
   5281 }
   5282 
   5283 
   5284 void Assembler::rev64(const VRegister& vd, const VRegister& vn) {
   5285   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5286   VIXL_ASSERT(AreSameFormat(vd, vn));
   5287   VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
   5288   Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd));
   5289 }
   5290 
   5291 
   5292 void Assembler::ursqrte(const VRegister& vd, const VRegister& vn) {
   5293   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5294   VIXL_ASSERT(AreSameFormat(vd, vn));
   5295   VIXL_ASSERT(vd.Is2S() || vd.Is4S());
   5296   Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd));
   5297 }
   5298 
   5299 
   5300 void Assembler::urecpe(const VRegister& vd, const VRegister& vn) {
   5301   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5302   VIXL_ASSERT(AreSameFormat(vd, vn));
   5303   VIXL_ASSERT(vd.Is2S() || vd.Is4S());
   5304   Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd));
   5305 }
   5306 
   5307 
   5308 void Assembler::NEONAddlp(const VRegister& vd,
   5309                           const VRegister& vn,
   5310                           NEON2RegMiscOp op) {
   5311   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5312   VIXL_ASSERT((op == NEON_SADDLP) || (op == NEON_UADDLP) ||
   5313               (op == NEON_SADALP) || (op == NEON_UADALP));
   5314 
   5315   VIXL_ASSERT((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) ||
   5316               (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) ||
   5317               (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
   5318   Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
   5319 }
   5320 
   5321 
   5322 void Assembler::saddlp(const VRegister& vd, const VRegister& vn) {
   5323   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5324   NEONAddlp(vd, vn, NEON_SADDLP);
   5325 }
   5326 
   5327 
   5328 void Assembler::uaddlp(const VRegister& vd, const VRegister& vn) {
   5329   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5330   NEONAddlp(vd, vn, NEON_UADDLP);
   5331 }
   5332 
   5333 
   5334 void Assembler::sadalp(const VRegister& vd, const VRegister& vn) {
   5335   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5336   NEONAddlp(vd, vn, NEON_SADALP);
   5337 }
   5338 
   5339 
   5340 void Assembler::uadalp(const VRegister& vd, const VRegister& vn) {
   5341   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5342   NEONAddlp(vd, vn, NEON_UADALP);
   5343 }
   5344 
   5345 
   5346 void Assembler::NEONAcrossLanesL(const VRegister& vd,
   5347                                  const VRegister& vn,
   5348                                  NEONAcrossLanesOp op) {
   5349   VIXL_ASSERT((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) ||
   5350               (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) ||
   5351               (vn.Is4S() && vd.Is1D()));
   5352   Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
   5353 }
   5354 
   5355 
   5356 void Assembler::saddlv(const VRegister& vd, const VRegister& vn) {
   5357   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5358   NEONAcrossLanesL(vd, vn, NEON_SADDLV);
   5359 }
   5360 
   5361 
   5362 void Assembler::uaddlv(const VRegister& vd, const VRegister& vn) {
   5363   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5364   NEONAcrossLanesL(vd, vn, NEON_UADDLV);
   5365 }
   5366 
   5367 
   5368 void Assembler::NEONAcrossLanes(const VRegister& vd,
   5369                                 const VRegister& vn,
   5370                                 NEONAcrossLanesOp op,
   5371                                 Instr op_half) {
   5372   VIXL_ASSERT((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) ||
   5373               (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) ||
   5374               (vn.Is4S() && vd.Is1S()));
   5375   if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
   5376     if (vd.Is1H()) {
   5377       VIXL_ASSERT(op_half != 0);
   5378       Instr vop = op_half;
   5379       if (vn.Is8H()) {
   5380         vop |= NEON_Q;
   5381       }
   5382       Emit(vop | Rn(vn) | Rd(vd));
   5383     } else {
   5384       Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
   5385     }
   5386   } else {
   5387     Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
   5388   }
   5389 }
   5390 
   5391 // clang-format off
   5392 #define NEON_ACROSSLANES_LIST(V)           \
   5393   V(addv,    NEON_ADDV)                    \
   5394   V(smaxv,   NEON_SMAXV)                   \
   5395   V(sminv,   NEON_SMINV)                   \
   5396   V(umaxv,   NEON_UMAXV)                   \
   5397   V(uminv,   NEON_UMINV)
   5398 // clang-format on
   5399 
   5400 #define VIXL_DEFINE_ASM_FUNC(FN, OP)                             \
   5401   void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
   5402     VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));                     \
   5403     NEONAcrossLanes(vd, vn, OP, 0);                              \
   5404   }
   5405 NEON_ACROSSLANES_LIST(VIXL_DEFINE_ASM_FUNC)
   5406 #undef VIXL_DEFINE_ASM_FUNC
   5407 
   5408 
   5409 // clang-format off
   5410 #define NEON_ACROSSLANES_FP_LIST(V)   \
   5411   V(fmaxv,   NEON_FMAXV,   NEON_FMAXV_H) \
   5412   V(fminv,   NEON_FMINV,   NEON_FMINV_H) \
   5413   V(fmaxnmv, NEON_FMAXNMV, NEON_FMAXNMV_H) \
   5414   V(fminnmv, NEON_FMINNMV, NEON_FMINNMV_H) \
   5415 // clang-format on
   5416 
   5417 #define VIXL_DEFINE_ASM_FUNC(FN, OP, OP_H)                            \
   5418   void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
   5419     VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON));   \
   5420     if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf));  \
   5421     VIXL_ASSERT(vd.Is1S() || vd.Is1H());                         \
   5422     NEONAcrossLanes(vd, vn, OP, OP_H);                           \
   5423   }
   5424 NEON_ACROSSLANES_FP_LIST(VIXL_DEFINE_ASM_FUNC)
   5425 #undef VIXL_DEFINE_ASM_FUNC
   5426 
   5427 
   5428 void Assembler::NEONPerm(const VRegister& vd,
   5429                          const VRegister& vn,
   5430                          const VRegister& vm,
   5431                          NEONPermOp op) {
   5432   VIXL_ASSERT(AreSameFormat(vd, vn, vm));
   5433   VIXL_ASSERT(!vd.Is1D());
   5434   Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
   5435 }
   5436 
   5437 
   5438 void Assembler::trn1(const VRegister& vd,
   5439                      const VRegister& vn,
   5440                      const VRegister& vm) {
   5441   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5442   NEONPerm(vd, vn, vm, NEON_TRN1);
   5443 }
   5444 
   5445 
   5446 void Assembler::trn2(const VRegister& vd,
   5447                      const VRegister& vn,
   5448                      const VRegister& vm) {
   5449   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5450   NEONPerm(vd, vn, vm, NEON_TRN2);
   5451 }
   5452 
   5453 
   5454 void Assembler::uzp1(const VRegister& vd,
   5455                      const VRegister& vn,
   5456                      const VRegister& vm) {
   5457   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5458   NEONPerm(vd, vn, vm, NEON_UZP1);
   5459 }
   5460 
   5461 
   5462 void Assembler::uzp2(const VRegister& vd,
   5463                      const VRegister& vn,
   5464                      const VRegister& vm) {
   5465   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5466   NEONPerm(vd, vn, vm, NEON_UZP2);
   5467 }
   5468 
   5469 
   5470 void Assembler::zip1(const VRegister& vd,
   5471                      const VRegister& vn,
   5472                      const VRegister& vm) {
   5473   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5474   NEONPerm(vd, vn, vm, NEON_ZIP1);
   5475 }
   5476 
   5477 
   5478 void Assembler::zip2(const VRegister& vd,
   5479                      const VRegister& vn,
   5480                      const VRegister& vm) {
   5481   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5482   NEONPerm(vd, vn, vm, NEON_ZIP2);
   5483 }
   5484 
   5485 
   5486 void Assembler::NEONShiftImmediate(const VRegister& vd,
   5487                                    const VRegister& vn,
   5488                                    NEONShiftImmediateOp op,
   5489                                    int immh_immb) {
   5490   VIXL_ASSERT(AreSameFormat(vd, vn));
   5491   Instr q, scalar;
   5492   if (vn.IsScalar()) {
   5493     q = NEON_Q;
   5494     scalar = NEONScalar;
   5495   } else {
   5496     q = vd.IsD() ? 0 : NEON_Q;
   5497     scalar = 0;
   5498   }
   5499   Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
   5500 }
   5501 
   5502 
   5503 void Assembler::NEONShiftLeftImmediate(const VRegister& vd,
   5504                                        const VRegister& vn,
   5505                                        int shift,
   5506                                        NEONShiftImmediateOp op) {
   5507   int lane_size_in_bits = vn.GetLaneSizeInBits();
   5508   VIXL_ASSERT((shift >= 0) && (shift < lane_size_in_bits));
   5509   NEONShiftImmediate(vd, vn, op, (lane_size_in_bits + shift) << 16);
   5510 }
   5511 
   5512 
   5513 void Assembler::NEONShiftRightImmediate(const VRegister& vd,
   5514                                         const VRegister& vn,
   5515                                         int shift,
   5516                                         NEONShiftImmediateOp op) {
   5517   int lane_size_in_bits = vn.GetLaneSizeInBits();
   5518   VIXL_ASSERT((shift >= 1) && (shift <= lane_size_in_bits));
   5519   NEONShiftImmediate(vd, vn, op, ((2 * lane_size_in_bits) - shift) << 16);
   5520 }
   5521 
   5522 
   5523 void Assembler::NEONShiftImmediateL(const VRegister& vd,
   5524                                     const VRegister& vn,
   5525                                     int shift,
   5526                                     NEONShiftImmediateOp op) {
   5527   int lane_size_in_bits = vn.GetLaneSizeInBits();
   5528   VIXL_ASSERT((shift >= 0) && (shift < lane_size_in_bits));
   5529   int immh_immb = (lane_size_in_bits + shift) << 16;
   5530 
   5531   VIXL_ASSERT((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
   5532               (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
   5533               (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
   5534   Instr q;
   5535   q = vn.IsD() ? 0 : NEON_Q;
   5536   Emit(q | op | immh_immb | Rn(vn) | Rd(vd));
   5537 }
   5538 
   5539 
   5540 void Assembler::NEONShiftImmediateN(const VRegister& vd,
   5541                                     const VRegister& vn,
   5542                                     int shift,
   5543                                     NEONShiftImmediateOp op) {
   5544   Instr q, scalar;
   5545   int lane_size_in_bits = vd.GetLaneSizeInBits();
   5546   VIXL_ASSERT((shift >= 1) && (shift <= lane_size_in_bits));
   5547   int immh_immb = (2 * lane_size_in_bits - shift) << 16;
   5548 
   5549   if (vn.IsScalar()) {
   5550     VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
   5551                 (vd.Is1S() && vn.Is1D()));
   5552     q = NEON_Q;
   5553     scalar = NEONScalar;
   5554   } else {
   5555     VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
   5556                 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
   5557                 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
   5558     scalar = 0;
   5559     q = vd.IsD() ? 0 : NEON_Q;
   5560   }
   5561   Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
   5562 }
   5563 
   5564 
   5565 void Assembler::shl(const VRegister& vd, const VRegister& vn, int shift) {
   5566   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5567   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   5568   NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL);
   5569 }
   5570 
   5571 
   5572 void Assembler::sli(const VRegister& vd, const VRegister& vn, int shift) {
   5573   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5574   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   5575   NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI);
   5576 }
   5577 
   5578 
   5579 void Assembler::sqshl(const VRegister& vd, const VRegister& vn, int shift) {
   5580   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5581   NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm);
   5582 }
   5583 
   5584 
   5585 void Assembler::sqshlu(const VRegister& vd, const VRegister& vn, int shift) {
   5586   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5587   NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU);
   5588 }
   5589 
   5590 
   5591 void Assembler::uqshl(const VRegister& vd, const VRegister& vn, int shift) {
   5592   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5593   NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm);
   5594 }
   5595 
   5596 
   5597 void Assembler::sshll(const VRegister& vd, const VRegister& vn, int shift) {
   5598   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5599   VIXL_ASSERT(vn.IsD());
   5600   NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
   5601 }
   5602 
   5603 
   5604 void Assembler::sshll2(const VRegister& vd, const VRegister& vn, int shift) {
   5605   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5606   VIXL_ASSERT(vn.IsQ());
   5607   NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
   5608 }
   5609 
   5610 
   5611 void Assembler::sxtl(const VRegister& vd, const VRegister& vn) {
   5612   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5613   sshll(vd, vn, 0);
   5614 }
   5615 
   5616 
   5617 void Assembler::sxtl2(const VRegister& vd, const VRegister& vn) {
   5618   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5619   sshll2(vd, vn, 0);
   5620 }
   5621 
   5622 
   5623 void Assembler::ushll(const VRegister& vd, const VRegister& vn, int shift) {
   5624   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5625   VIXL_ASSERT(vn.IsD());
   5626   NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
   5627 }
   5628 
   5629 
   5630 void Assembler::ushll2(const VRegister& vd, const VRegister& vn, int shift) {
   5631   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5632   VIXL_ASSERT(vn.IsQ());
   5633   NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
   5634 }
   5635 
   5636 
   5637 void Assembler::uxtl(const VRegister& vd, const VRegister& vn) {
   5638   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5639   ushll(vd, vn, 0);
   5640 }
   5641 
   5642 
   5643 void Assembler::uxtl2(const VRegister& vd, const VRegister& vn) {
   5644   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5645   ushll2(vd, vn, 0);
   5646 }
   5647 
   5648 
   5649 void Assembler::sri(const VRegister& vd, const VRegister& vn, int shift) {
   5650   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5651   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   5652   NEONShiftRightImmediate(vd, vn, shift, NEON_SRI);
   5653 }
   5654 
   5655 
   5656 void Assembler::sshr(const VRegister& vd, const VRegister& vn, int shift) {
   5657   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5658   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   5659   NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR);
   5660 }
   5661 
   5662 
   5663 void Assembler::ushr(const VRegister& vd, const VRegister& vn, int shift) {
   5664   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5665   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   5666   NEONShiftRightImmediate(vd, vn, shift, NEON_USHR);
   5667 }
   5668 
   5669 
   5670 void Assembler::srshr(const VRegister& vd, const VRegister& vn, int shift) {
   5671   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5672   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   5673   NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR);
   5674 }
   5675 
   5676 
   5677 void Assembler::urshr(const VRegister& vd, const VRegister& vn, int shift) {
   5678   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5679   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   5680   NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR);
   5681 }
   5682 
   5683 
   5684 void Assembler::ssra(const VRegister& vd, const VRegister& vn, int shift) {
   5685   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5686   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   5687   NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA);
   5688 }
   5689 
   5690 
   5691 void Assembler::usra(const VRegister& vd, const VRegister& vn, int shift) {
   5692   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5693   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   5694   NEONShiftRightImmediate(vd, vn, shift, NEON_USRA);
   5695 }
   5696 
   5697 
   5698 void Assembler::srsra(const VRegister& vd, const VRegister& vn, int shift) {
   5699   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5700   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   5701   NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA);
   5702 }
   5703 
   5704 
   5705 void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift) {
   5706   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5707   VIXL_ASSERT(vd.IsVector() || vd.Is1D());
   5708   NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA);
   5709 }
   5710 
   5711 
   5712 void Assembler::shrn(const VRegister& vd, const VRegister& vn, int shift) {
   5713   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5714   VIXL_ASSERT(vn.IsVector() && vd.IsD());
   5715   NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
   5716 }
   5717 
   5718 
   5719 void Assembler::shrn2(const VRegister& vd, const VRegister& vn, int shift) {
   5720   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5721   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   5722   NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
   5723 }
   5724 
   5725 
   5726 void Assembler::rshrn(const VRegister& vd, const VRegister& vn, int shift) {
   5727   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5728   VIXL_ASSERT(vn.IsVector() && vd.IsD());
   5729   NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
   5730 }
   5731 
   5732 
   5733 void Assembler::rshrn2(const VRegister& vd, const VRegister& vn, int shift) {
   5734   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5735   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   5736   NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
   5737 }
   5738 
   5739 
   5740 void Assembler::sqshrn(const VRegister& vd, const VRegister& vn, int shift) {
   5741   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5742   VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
   5743   NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
   5744 }
   5745 
   5746 
   5747 void Assembler::sqshrn2(const VRegister& vd, const VRegister& vn, int shift) {
   5748   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5749   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   5750   NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
   5751 }
   5752 
   5753 
   5754 void Assembler::sqrshrn(const VRegister& vd, const VRegister& vn, int shift) {
   5755   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5756   VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
   5757   NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
   5758 }
   5759 
   5760 
   5761 void Assembler::sqrshrn2(const VRegister& vd, const VRegister& vn, int shift) {
   5762   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5763   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   5764   NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
   5765 }
   5766 
   5767 
   5768 void Assembler::sqshrun(const VRegister& vd, const VRegister& vn, int shift) {
   5769   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5770   VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
   5771   NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
   5772 }
   5773 
   5774 
   5775 void Assembler::sqshrun2(const VRegister& vd, const VRegister& vn, int shift) {
   5776   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5777   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   5778   NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
   5779 }
   5780 
   5781 
   5782 void Assembler::sqrshrun(const VRegister& vd, const VRegister& vn, int shift) {
   5783   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5784   VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
   5785   NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
   5786 }
   5787 
   5788 
   5789 void Assembler::sqrshrun2(const VRegister& vd, const VRegister& vn, int shift) {
   5790   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5791   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   5792   NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
   5793 }
   5794 
   5795 
   5796 void Assembler::uqshrn(const VRegister& vd, const VRegister& vn, int shift) {
   5797   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5798   VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
   5799   NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
   5800 }
   5801 
   5802 
   5803 void Assembler::uqshrn2(const VRegister& vd, const VRegister& vn, int shift) {
   5804   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5805   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   5806   NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
   5807 }
   5808 
   5809 
   5810 void Assembler::uqrshrn(const VRegister& vd, const VRegister& vn, int shift) {
   5811   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5812   VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
   5813   NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
   5814 }
   5815 
   5816 
   5817 void Assembler::uqrshrn2(const VRegister& vd, const VRegister& vn, int shift) {
   5818   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5819   VIXL_ASSERT(vn.IsVector() && vd.IsQ());
   5820   NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
   5821 }
   5822 
   5823 void Assembler::smmla(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
   5824   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5825   VIXL_ASSERT(CPUHas(CPUFeatures::kI8MM));
   5826   VIXL_ASSERT(vd.IsLaneSizeS());
   5827   VIXL_ASSERT(vn.IsLaneSizeB() && vm.IsLaneSizeB());
   5828 
   5829   Emit(0x4e80a400 | Rd(vd) | Rn(vn) | Rm(vm));
   5830 }
   5831 
   5832 void Assembler::usmmla(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
   5833   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5834   VIXL_ASSERT(CPUHas(CPUFeatures::kI8MM));
   5835   VIXL_ASSERT(vd.IsLaneSizeS());
   5836   VIXL_ASSERT(vn.IsLaneSizeB() && vm.IsLaneSizeB());
   5837 
   5838   Emit(0x4e80ac00 | Rd(vd) | Rn(vn) | Rm(vm));
   5839 }
   5840 
   5841 void Assembler::ummla(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
   5842   VIXL_ASSERT(CPUHas(CPUFeatures::kNEON));
   5843   VIXL_ASSERT(CPUHas(CPUFeatures::kI8MM));
   5844   VIXL_ASSERT(vd.IsLaneSizeS());
   5845   VIXL_ASSERT(vn.IsLaneSizeB() && vm.IsLaneSizeB());
   5846 
   5847   Emit(0x6e80a400 | Rd(vd) | Rn(vn) | Rm(vm));
   5848 }
   5849 
   5850 // Note:
   5851 // For all ToImm instructions below, a difference in case
   5852 // for the same letter indicates a negated bit.
   5853 // If b is 1, then B is 0.
   5854 uint32_t Assembler::FP16ToImm8(Float16 imm) {
   5855   VIXL_ASSERT(IsImmFP16(imm));
   5856   // Half: aBbb.cdef.gh00.0000 (16 bits)
   5857   uint16_t bits = Float16ToRawbits(imm);
   5858   // bit7: a000.0000
   5859   uint16_t bit7 = ((bits >> 15) & 0x1) << 7;
   5860   // bit6: 0b00.0000
   5861   uint16_t bit6 = ((bits >> 13) & 0x1) << 6;
   5862   // bit5_to_0: 00cd.efgh
   5863   uint16_t bit5_to_0 = (bits >> 6) & 0x3f;
   5864   uint32_t result = static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
   5865   return result;
   5866 }
   5867 
   5868 
   5869 Instr Assembler::ImmFP16(Float16 imm) {
   5870   return FP16ToImm8(imm) << ImmFP_offset;
   5871 }
   5872 
   5873 
   5874 uint32_t Assembler::FP32ToImm8(float imm) {
   5875   // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
   5876   uint32_t bits = FloatToRawbits(imm);
   5877   VIXL_ASSERT(IsImmFP32(bits));
   5878   // bit7: a000.0000
   5879   uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
   5880   // bit6: 0b00.0000
   5881   uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
   5882   // bit5_to_0: 00cd.efgh
   5883   uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
   5884 
   5885   return bit7 | bit6 | bit5_to_0;
   5886 }
   5887 
   5888 
   5889 Instr Assembler::ImmFP32(float imm) { return FP32ToImm8(imm) << ImmFP_offset; }
   5890 
   5891 
   5892 uint32_t Assembler::FP64ToImm8(double imm) {
   5893   // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
   5894   //       0000.0000.0000.0000.0000.0000.0000.0000
   5895   uint64_t bits = DoubleToRawbits(imm);
   5896   VIXL_ASSERT(IsImmFP64(bits));
   5897   // bit7: a000.0000
   5898   uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
   5899   // bit6: 0b00.0000
   5900   uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
   5901   // bit5_to_0: 00cd.efgh
   5902   uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
   5903 
   5904   return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
   5905 }
   5906 
   5907 
   5908 Instr Assembler::ImmFP64(double imm) { return FP64ToImm8(imm) << ImmFP_offset; }
   5909 
   5910 
   5911 // Code generation helpers.
   5912 bool Assembler::OneInstrMoveImmediateHelper(Assembler* assm,
   5913                                             const Register& dst,
   5914                                             uint64_t imm) {
   5915   bool emit_code = assm != NULL;
   5916   unsigned n, imm_s, imm_r;
   5917   int reg_size = dst.GetSizeInBits();
   5918 
   5919   if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
   5920     // Immediate can be represented in a move zero instruction. Movz can't write
   5921     // to the stack pointer.
   5922     if (emit_code) {
   5923       assm->movz(dst, imm);
   5924     }
   5925     return true;
   5926   } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
   5927     // Immediate can be represented in a move negative instruction. Movn can't
   5928     // write to the stack pointer.
   5929     if (emit_code) {
   5930       assm->movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
   5931     }
   5932     return true;
   5933   } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
   5934     // Immediate can be represented in a logical orr instruction.
   5935     VIXL_ASSERT(!dst.IsZero());
   5936     if (emit_code) {
   5937       assm->LogicalImmediate(dst,
   5938                              AppropriateZeroRegFor(dst),
   5939                              n,
   5940                              imm_s,
   5941                              imm_r,
   5942                              ORR);
   5943     }
   5944     return true;
   5945   }
   5946   return false;
   5947 }
   5948 
   5949 
   5950 void Assembler::MoveWide(const Register& rd,
   5951                          uint64_t imm,
   5952                          int shift,
   5953                          MoveWideImmediateOp mov_op) {
   5954   // Ignore the top 32 bits of an immediate if we're moving to a W register.
   5955   if (rd.Is32Bits()) {
   5956     // Check that the top 32 bits are zero (a positive 32-bit number) or top
   5957     // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
   5958     VIXL_ASSERT(((imm >> kWRegSize) == 0) ||
   5959                 ((imm >> (kWRegSize - 1)) == 0x1ffffffff));
   5960     imm &= kWRegMask;
   5961   }
   5962 
   5963   if (shift >= 0) {
   5964     // Explicit shift specified.
   5965     VIXL_ASSERT((shift == 0) || (shift == 16) || (shift == 32) ||
   5966                 (shift == 48));
   5967     VIXL_ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
   5968     shift /= 16;
   5969   } else {
   5970     // Calculate a new immediate and shift combination to encode the immediate
   5971     // argument.
   5972     VIXL_ASSERT(shift == -1);
   5973     shift = 0;
   5974     if ((imm & 0xffffffffffff0000) == 0) {
   5975       // Nothing to do.
   5976     } else if ((imm & 0xffffffff0000ffff) == 0) {
   5977       imm >>= 16;
   5978       shift = 1;
   5979     } else if ((imm & 0xffff0000ffffffff) == 0) {
   5980       VIXL_ASSERT(rd.Is64Bits());
   5981       imm >>= 32;
   5982       shift = 2;
   5983     } else if ((imm & 0x0000ffffffffffff) == 0) {
   5984       VIXL_ASSERT(rd.Is64Bits());
   5985       imm >>= 48;
   5986       shift = 3;
   5987     }
   5988   }
   5989 
   5990   VIXL_ASSERT(IsUint16(imm));
   5991 
   5992   Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) | ImmMoveWide(imm) |
   5993        ShiftMoveWide(shift));
   5994 }
   5995 
   5996 
   5997 void Assembler::AddSub(const Register& rd,
   5998                        const Register& rn,
   5999                        const Operand& operand,
   6000                        FlagsUpdate S,
   6001                        AddSubOp op) {
   6002   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
   6003   if (operand.IsImmediate()) {
   6004     int64_t immediate = operand.GetImmediate();
   6005     VIXL_ASSERT(IsImmAddSub(immediate));
   6006     Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
   6007     Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
   6008          ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
   6009   } else if (operand.IsShiftedRegister()) {
   6010     VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits());
   6011     VIXL_ASSERT(operand.GetShift() != ROR);
   6012 
   6013     // For instructions of the form:
   6014     //   add/sub   wsp, <Wn>, <Wm> [, LSL #0-3 ]
   6015     //   add/sub   <Wd>, wsp, <Wm> [, LSL #0-3 ]
   6016     //   add/sub   wsp, wsp, <Wm> [, LSL #0-3 ]
   6017     //   adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
   6018     // or their 64-bit register equivalents, convert the operand from shifted to
   6019     // extended register mode, and emit an add/sub extended instruction.
   6020     if (rn.IsSP() || rd.IsSP()) {
   6021       VIXL_ASSERT(!(rd.IsSP() && (S == SetFlags)));
   6022       DataProcExtendedRegister(rd,
   6023                                rn,
   6024                                operand.ToExtendedRegister(),
   6025                                S,
   6026                                static_cast<Instr>(AddSubExtendedFixed) | static_cast<Instr>(op));
   6027     } else {
   6028       DataProcShiftedRegister(rd, rn, operand, S,
   6029         static_cast<Instr>(AddSubShiftedFixed) | static_cast<Instr>(op));
   6030     }
   6031   } else {
   6032     VIXL_ASSERT(operand.IsExtendedRegister());
   6033     DataProcExtendedRegister(rd, rn, operand, S,
   6034       static_cast<Instr>(AddSubExtendedFixed) | static_cast<Instr>(op));
   6035   }
   6036 }
   6037 
   6038 
   6039 void Assembler::AddSubWithCarry(const Register& rd,
   6040                                 const Register& rn,
   6041                                 const Operand& operand,
   6042                                 FlagsUpdate S,
   6043                                 AddSubWithCarryOp op) {
   6044   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
   6045   VIXL_ASSERT(rd.GetSizeInBits() == operand.GetRegister().GetSizeInBits());
   6046   VIXL_ASSERT(operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0));
   6047   Emit(SF(rd) | op | Flags(S) | Rm(operand.GetRegister()) | Rn(rn) | Rd(rd));
   6048 }
   6049 
   6050 
   6051 void Assembler::hlt(int code) {
   6052   VIXL_ASSERT(IsUint16(code));
   6053   Emit(HLT | ImmException(code));
   6054 }
   6055 
   6056 
   6057 void Assembler::brk(int code) {
   6058   VIXL_ASSERT(IsUint16(code));
   6059   Emit(BRK | ImmException(code));
   6060 }
   6061 
   6062 
   6063 void Assembler::svc(int code) { Emit(SVC | ImmException(code)); }
   6064 
   6065 void Assembler::udf(int code) { Emit(UDF | ImmUdf(code)); }
   6066 
   6067 
   6068 // TODO(all): The third parameter should be passed by reference but gcc 4.8.2
   6069 // reports a bogus uninitialised warning then.
   6070 void Assembler::Logical(const Register& rd,
   6071                         const Register& rn,
   6072                         const Operand operand,
   6073                         LogicalOp op) {
   6074   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
   6075   if (operand.IsImmediate()) {
   6076     int64_t immediate = operand.GetImmediate();
   6077     unsigned reg_size = rd.GetSizeInBits();
   6078 
   6079     VIXL_ASSERT(immediate != 0);
   6080     VIXL_ASSERT(immediate != -1);
   6081     VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate));
   6082 
   6083     // If the operation is NOT, invert the operation and immediate.
   6084     if ((op & NOT) == NOT) {
   6085       op = static_cast<LogicalOp>(op & ~NOT);
   6086       immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
   6087     }
   6088 
   6089     unsigned n, imm_s, imm_r;
   6090     if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
   6091       // Immediate can be encoded in the instruction.
   6092       LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
   6093     } else {
   6094       // This case is handled in the macro assembler.
   6095       VIXL_UNREACHABLE();
   6096     }
   6097   } else {
   6098     VIXL_ASSERT(operand.IsShiftedRegister());
   6099     VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits());
   6100     Instr dp_op = static_cast<Instr>(op) | static_cast<Instr>(LogicalShiftedFixed);
   6101     DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
   6102   }
   6103 }
   6104 
   6105 
   6106 void Assembler::LogicalImmediate(const Register& rd,
   6107                                  const Register& rn,
   6108                                  unsigned n,
   6109                                  unsigned imm_s,
   6110                                  unsigned imm_r,
   6111                                  LogicalOp op) {
   6112   unsigned reg_size = rd.GetSizeInBits();
   6113   Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
   6114   Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
   6115        ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
   6116        Rn(rn));
   6117 }
   6118 
   6119 
   6120 void Assembler::ConditionalCompare(const Register& rn,
   6121                                    const Operand& operand,
   6122                                    StatusFlags nzcv,
   6123                                    Condition cond,
   6124                                    ConditionalCompareOp op) {
   6125   Instr ccmpop;
   6126   if (operand.IsImmediate()) {
   6127     int64_t immediate = operand.GetImmediate();
   6128     VIXL_ASSERT(IsImmConditionalCompare(immediate));
   6129     ccmpop = static_cast<Instr>(ConditionalCompareImmediateFixed) |
   6130              static_cast<Instr>(op) |
   6131              ImmCondCmp(static_cast<unsigned>(immediate));
   6132   } else {
   6133     VIXL_ASSERT(operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0));
   6134     ccmpop = static_cast<Instr>(ConditionalCompareRegisterFixed) |
   6135              static_cast<Instr>(op) |
   6136              Rm(operand.GetRegister());
   6137   }
   6138   Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
   6139 }
   6140 
   6141 
   6142 void Assembler::DataProcessing1Source(const Register& rd,
   6143                                       const Register& rn,
   6144                                       DataProcessing1SourceOp op) {
   6145   VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits());
   6146   Emit(SF(rn) | op | Rn(rn) | Rd(rd));
   6147 }
   6148 
   6149 
   6150 void Assembler::FPDataProcessing1Source(const VRegister& vd,
   6151                                         const VRegister& vn,
   6152                                         FPDataProcessing1SourceOp op) {
   6153   VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
   6154   Emit(FPType(vn) | op | Rn(vn) | Rd(vd));
   6155 }
   6156 
   6157 
   6158 void Assembler::FPDataProcessing3Source(const VRegister& vd,
   6159                                         const VRegister& vn,
   6160                                         const VRegister& vm,
   6161                                         const VRegister& va,
   6162                                         FPDataProcessing3SourceOp op) {
   6163   VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
   6164   VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm, va));
   6165   Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd) | Ra(va));
   6166 }
   6167 
   6168 
   6169 void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd,
   6170                                         const int imm8,
   6171                                         const int left_shift,
   6172                                         NEONModifiedImmediateOp op) {
   6173   VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || vd.Is2S() ||
   6174               vd.Is4S());
   6175   VIXL_ASSERT((left_shift == 0) || (left_shift == 8) || (left_shift == 16) ||
   6176               (left_shift == 24));
   6177   VIXL_ASSERT(IsUint8(imm8));
   6178 
   6179   int cmode_1, cmode_2, cmode_3;
   6180   if (vd.Is8B() || vd.Is16B()) {
   6181     VIXL_ASSERT(op == NEONModifiedImmediate_MOVI);
   6182     cmode_1 = 1;
   6183     cmode_2 = 1;
   6184     cmode_3 = 1;
   6185   } else {
   6186     cmode_1 = (left_shift >> 3) & 1;
   6187     cmode_2 = left_shift >> 4;
   6188     cmode_3 = 0;
   6189     if (vd.Is4H() || vd.Is8H()) {
   6190       VIXL_ASSERT((left_shift == 0) || (left_shift == 8));
   6191       cmode_3 = 1;
   6192     }
   6193   }
   6194   int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1);
   6195 
   6196   int q = vd.IsQ() ? NEON_Q : 0;
   6197 
   6198   Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
   6199 }
   6200 
   6201 
   6202 void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd,
   6203                                         const int imm8,
   6204                                         const int shift_amount,
   6205                                         NEONModifiedImmediateOp op) {
   6206   VIXL_ASSERT(vd.Is2S() || vd.Is4S());
   6207   VIXL_ASSERT((shift_amount == 8) || (shift_amount == 16));
   6208   VIXL_ASSERT(IsUint8(imm8));
   6209 
   6210   int cmode_0 = (shift_amount >> 4) & 1;
   6211   int cmode = 0xc | cmode_0;
   6212 
   6213   int q = vd.IsQ() ? NEON_Q : 0;
   6214 
   6215   Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
   6216 }
   6217 
   6218 
   6219 void Assembler::EmitShift(const Register& rd,
   6220                           const Register& rn,
   6221                           Shift shift,
   6222                           unsigned shift_amount) {
   6223   switch (shift) {
   6224     case LSL:
   6225       lsl(rd, rn, shift_amount);
   6226       break;
   6227     case LSR:
   6228       lsr(rd, rn, shift_amount);
   6229       break;
   6230     case ASR:
   6231       asr(rd, rn, shift_amount);
   6232       break;
   6233     case ROR:
   6234       ror(rd, rn, shift_amount);
   6235       break;
   6236     default:
   6237       VIXL_UNREACHABLE();
   6238   }
   6239 }
   6240 
   6241 
   6242 void Assembler::EmitExtendShift(const Register& rd,
   6243                                 const Register& rn,
   6244                                 Extend extend,
   6245                                 unsigned left_shift) {
   6246   VIXL_ASSERT(rd.GetSizeInBits() >= rn.GetSizeInBits());
   6247   unsigned reg_size = rd.GetSizeInBits();
   6248   // Use the correct size of register.
   6249   Register rn_ = Register(rn.GetCode(), rd.GetSizeInBits());
   6250   // Bits extracted are high_bit:0.
   6251   unsigned high_bit = (8 << (extend & 0x3)) - 1;
   6252   // Number of bits left in the result that are not introduced by the shift.
   6253   unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
   6254 
   6255   if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
   6256     switch (extend) {
   6257       case UXTB:
   6258       case UXTH:
   6259       case UXTW:
   6260         ubfm(rd, rn_, non_shift_bits, high_bit);
   6261         break;
   6262       case SXTB:
   6263       case SXTH:
   6264       case SXTW:
   6265         sbfm(rd, rn_, non_shift_bits, high_bit);
   6266         break;
   6267       case UXTX:
   6268       case SXTX: {
   6269         VIXL_ASSERT(rn.GetSizeInBits() == kXRegSize);
   6270         // Nothing to extend. Just shift.
   6271         lsl(rd, rn_, left_shift);
   6272         break;
   6273       }
   6274       default:
   6275         VIXL_UNREACHABLE();
   6276     }
   6277   } else {
   6278     // No need to extend as the extended bits would be shifted away.
   6279     lsl(rd, rn_, left_shift);
   6280   }
   6281 }
   6282 
   6283 
   6284 void Assembler::DataProcShiftedRegister(const Register& rd,
   6285                                         const Register& rn,
   6286                                         const Operand& operand,
   6287                                         FlagsUpdate S,
   6288                                         Instr op) {
   6289   VIXL_ASSERT(operand.IsShiftedRegister());
   6290   VIXL_ASSERT(rn.Is64Bits() ||
   6291               (rn.Is32Bits() && IsUint5(operand.GetShiftAmount())));
   6292   Emit(SF(rd) | op | Flags(S) | ShiftDP(operand.GetShift()) |
   6293        ImmDPShift(operand.GetShiftAmount()) | Rm(operand.GetRegister()) |
   6294        Rn(rn) | Rd(rd));
   6295 }
   6296 
   6297 
   6298 void Assembler::DataProcExtendedRegister(const Register& rd,
   6299                                          const Register& rn,
   6300                                          const Operand& operand,
   6301                                          FlagsUpdate S,
   6302                                          Instr op) {
   6303   Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
   6304   Emit(SF(rd) | op | Flags(S) | Rm(operand.GetRegister()) |
   6305        ExtendMode(operand.GetExtend()) |
   6306        ImmExtendShift(operand.GetShiftAmount()) | dest_reg | RnSP(rn));
   6307 }
   6308 
   6309 
   6310 Instr Assembler::LoadStoreMemOperand(const MemOperand& addr,
   6311                                      unsigned access_size_in_bytes_log2,
   6312                                      LoadStoreScalingOption option) {
   6313   Instr base = RnSP(addr.GetBaseRegister());
   6314   int64_t offset = addr.GetOffset();
   6315 
   6316   if (addr.IsImmediateOffset()) {
   6317     bool prefer_unscaled =
   6318         (option == PreferUnscaledOffset) || (option == RequireUnscaledOffset);
   6319     if (prefer_unscaled && IsImmLSUnscaled(offset)) {
   6320       // Use the unscaled addressing mode.
   6321       return base | LoadStoreUnscaledOffsetFixed | ImmLS(offset);
   6322     }
   6323 
   6324     if ((option != RequireUnscaledOffset) &&
   6325         IsImmLSScaled(offset, access_size_in_bytes_log2)) {
   6326       // We need `offset` to be positive for the shift to be well-defined.
   6327       // IsImmLSScaled should check this.
   6328       VIXL_ASSERT(offset >= 0);
   6329       // Use the scaled addressing mode.
   6330       return base | LoadStoreUnsignedOffsetFixed |
   6331              ImmLSUnsigned(offset >> access_size_in_bytes_log2);
   6332     }
   6333 
   6334     if ((option != RequireScaledOffset) && IsImmLSUnscaled(offset)) {
   6335       // Use the unscaled addressing mode.
   6336       return base | LoadStoreUnscaledOffsetFixed | ImmLS(offset);
   6337     }
   6338   }
   6339 
   6340   // All remaining addressing modes are register-offset, pre-indexed or
   6341   // post-indexed modes.
   6342   VIXL_ASSERT((option != RequireUnscaledOffset) &&
   6343               (option != RequireScaledOffset));
   6344 
   6345   if (addr.IsRegisterOffset()) {
   6346     Extend ext = addr.GetExtend();
   6347     Shift shift = addr.GetShift();
   6348     unsigned shift_amount = addr.GetShiftAmount();
   6349 
   6350     // LSL is encoded in the option field as UXTX.
   6351     if (shift == LSL) {
   6352       ext = UXTX;
   6353     }
   6354 
   6355     // Shifts are encoded in one bit, indicating a left shift by the memory
   6356     // access size.
   6357     VIXL_ASSERT((shift_amount == 0) || (shift_amount == access_size_in_bytes_log2));
   6358     return base | LoadStoreRegisterOffsetFixed | Rm(addr.GetRegisterOffset()) |
   6359            ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0);
   6360   }
   6361 
   6362   if (addr.IsImmediatePreIndex() && IsImmLSUnscaled(offset)) {
   6363     return base | LoadStorePreIndexFixed | ImmLS(offset);
   6364   }
   6365 
   6366   if (addr.IsImmediatePostIndex() && IsImmLSUnscaled(offset)) {
   6367     return base | LoadStorePostIndexFixed | ImmLS(offset);
   6368   }
   6369 
   6370   // If this point is reached, the MemOperand (addr) cannot be encoded.
   6371   VIXL_UNREACHABLE();
   6372   return 0;
   6373 }
   6374 
   6375 
   6376 void Assembler::LoadStore(const CPURegister& rt,
   6377                           const MemOperand& addr,
   6378                           LoadStoreOp op,
   6379                           LoadStoreScalingOption option) {
   6380   VIXL_ASSERT(CPUHas(rt));
   6381   Emit(op | Rt(rt) | LoadStoreMemOperand(addr, CalcLSDataSize(op), option));
   6382 }
   6383 
   6384 void Assembler::LoadStorePAC(const Register& xt,
   6385                              const MemOperand& addr,
   6386                              LoadStorePACOp op) {
   6387   VIXL_ASSERT(xt.Is64Bits());
   6388   VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsImmediatePreIndex());
   6389 
   6390   Instr pac_op = op;
   6391   if (addr.IsImmediatePreIndex()) {
   6392     pac_op |= LoadStorePACPreBit;
   6393   }
   6394 
   6395   Instr base = RnSP(addr.GetBaseRegister());
   6396   int64_t offset = addr.GetOffset();
   6397 
   6398   Emit(pac_op | Rt(xt) | base | ImmLSPAC(static_cast<int>(offset)));
   6399 }
   6400 
   6401 
   6402 void Assembler::Prefetch(int op,
   6403                          const MemOperand& addr,
   6404                          LoadStoreScalingOption option) {
   6405   VIXL_ASSERT(addr.IsRegisterOffset() || addr.IsImmediateOffset());
   6406 
   6407   Instr prfop = ImmPrefetchOperation(op);
   6408   Emit(PRFM | prfop | LoadStoreMemOperand(addr, kXRegSizeInBytesLog2, option));
   6409 }
   6410 
   6411 void Assembler::Prefetch(PrefetchOperation op,
   6412                          const MemOperand& addr,
   6413                          LoadStoreScalingOption option) {
   6414   // Passing unnamed values in 'op' is undefined behaviour in C++.
   6415   VIXL_ASSERT(IsNamedPrefetchOperation(op));
   6416   Prefetch(static_cast<int>(op), addr, option);
   6417 }
   6418 
   6419 
   6420 bool Assembler::IsImmAddSub(int64_t immediate) {
   6421   return IsUint12(immediate) ||
   6422          (IsUint12(immediate >> 12) && ((immediate & 0xfff) == 0));
   6423 }
   6424 
   6425 
   6426 bool Assembler::IsImmConditionalCompare(int64_t immediate) {
   6427   return IsUint5(immediate);
   6428 }
   6429 
   6430 
   6431 bool Assembler::IsImmFP16(Float16 imm) {
   6432   // Valid values will have the form:
   6433   // aBbb.cdef.gh00.000
   6434   uint16_t bits = Float16ToRawbits(imm);
   6435   // bits[6..0] are cleared.
   6436   if ((bits & 0x3f) != 0) {
   6437     return false;
   6438   }
   6439 
   6440   // bits[13..12] are all set or all cleared.
   6441   uint16_t b_pattern = (bits >> 12) & 0x03;
   6442   if (b_pattern != 0 && b_pattern != 0x03) {
   6443     return false;
   6444   }
   6445 
   6446   // bit[15] and bit[14] are opposite.
   6447   if (((bits ^ (bits << 1)) & 0x4000) == 0) {
   6448     return false;
   6449   }
   6450 
   6451   return true;
   6452 }
   6453 
   6454 
   6455 bool Assembler::IsImmFP32(uint32_t bits) {
   6456   // Valid values will have the form:
   6457   // aBbb.bbbc.defg.h000.0000.0000.0000.0000
   6458   // bits[19..0] are cleared.
   6459   if ((bits & 0x7ffff) != 0) {
   6460     return false;
   6461   }
   6462 
   6463   // bits[29..25] are all set or all cleared.
   6464   uint32_t b_pattern = (bits >> 16) & 0x3e00;
   6465   if (b_pattern != 0 && b_pattern != 0x3e00) {
   6466     return false;
   6467   }
   6468 
   6469   // bit[30] and bit[29] are opposite.
   6470   if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
   6471     return false;
   6472   }
   6473 
   6474   return true;
   6475 }
   6476 
   6477 
   6478 bool Assembler::IsImmFP64(uint64_t bits) {
   6479   // Valid values will have the form:
   6480   // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
   6481   // 0000.0000.0000.0000.0000.0000.0000.0000
   6482   // bits[47..0] are cleared.
   6483   if ((bits & 0x0000ffffffffffff) != 0) {
   6484     return false;
   6485   }
   6486 
   6487   // bits[61..54] are all set or all cleared.
   6488   uint32_t b_pattern = (bits >> 48) & 0x3fc0;
   6489   if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
   6490     return false;
   6491   }
   6492 
   6493   // bit[62] and bit[61] are opposite.
   6494   if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
   6495     return false;
   6496   }
   6497 
   6498   return true;
   6499 }
   6500 
   6501 
   6502 bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size_in_bytes_log2) {
   6503   const auto access_size_in_bytes = 1U << access_size_in_bytes_log2;
   6504   VIXL_ASSERT(access_size_in_bytes_log2 <= kQRegSizeInBytesLog2);
   6505   return IsMultiple(offset, access_size_in_bytes) &&
   6506          IsInt7(offset / access_size_in_bytes);
   6507 }
   6508 
   6509 
   6510 bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size_in_bytes_log2) {
   6511   const auto access_size_in_bytes = 1U << access_size_in_bytes_log2;
   6512   VIXL_ASSERT(access_size_in_bytes_log2 <= kQRegSizeInBytesLog2);
   6513   return IsMultiple(offset, access_size_in_bytes) &&
   6514          IsUint12(offset / access_size_in_bytes);
   6515 }
   6516 
   6517 
   6518 bool Assembler::IsImmLSUnscaled(int64_t offset) { return IsInt9(offset); }
   6519 
   6520 
   6521 // The movn instruction can generate immediates containing an arbitrary 16-bit
   6522 // value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
   6523 bool Assembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
   6524   return IsImmMovz(~imm, reg_size);
   6525 }
   6526 
   6527 
   6528 // The movz instruction can generate immediates containing an arbitrary 16-bit
   6529 // value, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
   6530 bool Assembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
   6531   VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
   6532   return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
   6533 }
   6534 
   6535 
   6536 // Test if a given value can be encoded in the immediate field of a logical
   6537 // instruction.
   6538 // If it can be encoded, the function returns true, and values pointed to by n,
   6539 // imm_s and imm_r are updated with immediates encoded in the format required
   6540 // by the corresponding fields in the logical instruction.
   6541 // If it can not be encoded, the function returns false, and the values pointed
   6542 // to by n, imm_s and imm_r are undefined.
   6543 bool Assembler::IsImmLogical(uint64_t value,
   6544                              unsigned width,
   6545                              unsigned* n,
   6546                              unsigned* imm_s,
   6547                              unsigned* imm_r) {
   6548   VIXL_ASSERT((width == kBRegSize) || (width == kHRegSize) ||
   6549               (width == kSRegSize) || (width == kDRegSize));
   6550 
   6551   bool negate = false;
   6552 
   6553   // Logical immediates are encoded using parameters n, imm_s and imm_r using
   6554   // the following table:
   6555   //
   6556   //    N   imms    immr    size        S             R
   6557   //    1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
   6558   //    0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
   6559   //    0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
   6560   //    0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
   6561   //    0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
   6562   //    0  11110s  xxxxxr     2    UInt(s)       UInt(r)
   6563   // (s bits must not be all set)
   6564   //
   6565   // A pattern is constructed of size bits, where the least significant S+1 bits
   6566   // are set. The pattern is rotated right by R, and repeated across a 32 or
   6567   // 64-bit value, depending on destination register width.
   6568   //
   6569   // Put another way: the basic format of a logical immediate is a single
   6570   // contiguous stretch of 1 bits, repeated across the whole word at intervals
   6571   // given by a power of 2. To identify them quickly, we first locate the
   6572   // lowest stretch of 1 bits, then the next 1 bit above that; that combination
   6573   // is different for every logical immediate, so it gives us all the
   6574   // information we need to identify the only logical immediate that our input
   6575   // could be, and then we simply check if that's the value we actually have.
   6576   //
   6577   // (The rotation parameter does give the possibility of the stretch of 1 bits
   6578   // going 'round the end' of the word. To deal with that, we observe that in
   6579   // any situation where that happens the bitwise NOT of the value is also a
   6580   // valid logical immediate. So we simply invert the input whenever its low bit
   6581   // is set, and then we know that the rotated case can't arise.)
   6582 
   6583   if (value & 1) {
   6584     // If the low bit is 1, negate the value, and set a flag to remember that we
   6585     // did (so that we can adjust the return values appropriately).
   6586     negate = true;
   6587     value = ~value;
   6588   }
   6589 
   6590   if (width <= kWRegSize) {
   6591     // To handle 8/16/32-bit logical immediates, the very easiest thing is to repeat
   6592     // the input value to fill a 64-bit word. The correct encoding of that as a
   6593     // logical immediate will also be the correct encoding of the value.
   6594 
   6595     // Avoid making the assumption that the most-significant 56/48/32 bits are zero by
   6596     // shifting the value left and duplicating it.
   6597     for (unsigned bits = width; bits <= kWRegSize; bits *= 2) {
   6598       value <<= bits;
   6599       uint64_t mask = (UINT64_C(1) << bits) - 1;
   6600       value |= ((value >> bits) & mask);
   6601     }
   6602   }
   6603 
   6604   // The basic analysis idea: imagine our input word looks like this.
   6605   //
   6606   //    0011111000111110001111100011111000111110001111100011111000111110
   6607   //                                                          c  b    a
   6608   //                                                          |<--d-->|
   6609   //
   6610   // We find the lowest set bit (as an actual power-of-2 value, not its index)
   6611   // and call it a. Then we add a to our original number, which wipes out the
   6612   // bottommost stretch of set bits and replaces it with a 1 carried into the
   6613   // next zero bit. Then we look for the new lowest set bit, which is in
   6614   // position b, and subtract it, so now our number is just like the original
   6615   // but with the lowest stretch of set bits completely gone. Now we find the
   6616   // lowest set bit again, which is position c in the diagram above. Then we'll
   6617   // measure the distance d between bit positions a and c (using CLZ), and that
   6618   // tells us that the only valid logical immediate that could possibly be equal
   6619   // to this number is the one in which a stretch of bits running from a to just
   6620   // below b is replicated every d bits.
   6621   uint64_t a = LowestSetBit(value);
   6622   uint64_t value_plus_a = value + a;
   6623   uint64_t b = LowestSetBit(value_plus_a);
   6624   uint64_t value_plus_a_minus_b = value_plus_a - b;
   6625   uint64_t c = LowestSetBit(value_plus_a_minus_b);
   6626 
   6627   int d, clz_a, out_n;
   6628   uint64_t mask;
   6629 
   6630   if (c != 0) {
   6631     // The general case, in which there is more than one stretch of set bits.
   6632     // Compute the repeat distance d, and set up a bitmask covering the basic
   6633     // unit of repetition (i.e. a word with the bottom d bits set). Also, in all
   6634     // of these cases the N bit of the output will be zero.
   6635     clz_a = CountLeadingZeros(a, kXRegSize);
   6636     int clz_c = CountLeadingZeros(c, kXRegSize);
   6637     d = clz_a - clz_c;
   6638     mask = ((UINT64_C(1) << d) - 1);
   6639     out_n = 0;
   6640   } else {
   6641     // Handle degenerate cases.
   6642     //
   6643     // If any of those 'find lowest set bit' operations didn't find a set bit at
   6644     // all, then the word will have been zero thereafter, so in particular the
   6645     // last lowest_set_bit operation will have returned zero. So we can test for
   6646     // all the special case conditions in one go by seeing if c is zero.
   6647     if (a == 0) {
   6648       // The input was zero (or all 1 bits, which will come to here too after we
   6649       // inverted it at the start of the function), for which we just return
   6650       // false.
   6651       return false;
   6652     } else {
   6653       // Otherwise, if c was zero but a was not, then there's just one stretch
   6654       // of set bits in our word, meaning that we have the trivial case of
   6655       // d == 64 and only one 'repetition'. Set up all the same variables as in
   6656       // the general case above, and set the N bit in the output.
   6657       clz_a = CountLeadingZeros(a, kXRegSize);
   6658       d = 64;
   6659       mask = ~UINT64_C(0);
   6660       out_n = 1;
   6661     }
   6662   }
   6663 
   6664   // If the repeat period d is not a power of two, it can't be encoded.
   6665   if (!IsPowerOf2(d)) {
   6666     return false;
   6667   }
   6668 
   6669   if (((b - a) & ~mask) != 0) {
   6670     // If the bit stretch (b - a) does not fit within the mask derived from the
   6671     // repeat period, then fail.
   6672     return false;
   6673   }
   6674 
   6675   // The only possible option is b - a repeated every d bits. Now we're going to
   6676   // actually construct the valid logical immediate derived from that
   6677   // specification, and see if it equals our original input.
   6678   //
   6679   // To repeat a value every d bits, we multiply it by a number of the form
   6680   // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
   6681   // be derived using a table lookup on CLZ(d).
   6682   static const uint64_t multipliers[] = {
   6683       0x0000000000000001UL,
   6684       0x0000000100000001UL,
   6685       0x0001000100010001UL,
   6686       0x0101010101010101UL,
   6687       0x1111111111111111UL,
   6688       0x5555555555555555UL,
   6689   };
   6690   uint64_t multiplier = multipliers[CountLeadingZeros(d, kXRegSize) - 57];
   6691   uint64_t candidate = (b - a) * multiplier;
   6692 
   6693   if (value != candidate) {
   6694     // The candidate pattern doesn't match our input value, so fail.
   6695     return false;
   6696   }
   6697 
   6698   // We have a match! This is a valid logical immediate, so now we have to
   6699   // construct the bits and pieces of the instruction encoding that generates
   6700   // it.
   6701 
   6702   // Count the set bits in our basic stretch. The special case of clz(0) == -1
   6703   // makes the answer come out right for stretches that reach the very top of
   6704   // the word (e.g. numbers like 0xffffc00000000000).
   6705   int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSize);
   6706   int s = clz_a - clz_b;
   6707 
   6708   // Decide how many bits to rotate right by, to put the low bit of that basic
   6709   // stretch in position a.
   6710   int r;
   6711   if (negate) {
   6712     // If we inverted the input right at the start of this function, here's
   6713     // where we compensate: the number of set bits becomes the number of clear
   6714     // bits, and the rotation count is based on position b rather than position
   6715     // a (since b is the location of the 'lowest' 1 bit after inversion).
   6716     s = d - s;
   6717     r = (clz_b + 1) & (d - 1);
   6718   } else {
   6719     r = (clz_a + 1) & (d - 1);
   6720   }
   6721 
   6722   // Now we're done, except for having to encode the S output in such a way that
   6723   // it gives both the number of set bits and the length of the repeated
   6724   // segment. The s field is encoded like this:
   6725   //
   6726   //     imms    size        S
   6727   //    ssssss    64    UInt(ssssss)
   6728   //    0sssss    32    UInt(sssss)
   6729   //    10ssss    16    UInt(ssss)
   6730   //    110sss     8    UInt(sss)
   6731   //    1110ss     4    UInt(ss)
   6732   //    11110s     2    UInt(s)
   6733   //
   6734   // So we 'or' (2 * -d) with our computed s to form imms.
   6735   if ((n != NULL) || (imm_s != NULL) || (imm_r != NULL)) {
   6736     *n = out_n;
   6737     *imm_s = ((2 * -d) | (s - 1)) & 0x3f;
   6738     *imm_r = r;
   6739   }
   6740 
   6741   return true;
   6742 }
   6743 
   6744 
   6745 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
   6746   VIXL_ASSERT(rt.IsValid());
   6747   if (rt.IsRegister()) {
   6748     return rt.Is64Bits() ? LDR_x : LDR_w;
   6749   } else {
   6750     VIXL_ASSERT(rt.IsVRegister());
   6751     switch (rt.GetSizeInBits()) {
   6752       case kBRegSize:
   6753         return LDR_b;
   6754       case kHRegSize:
   6755         return LDR_h;
   6756       case kSRegSize:
   6757         return LDR_s;
   6758       case kDRegSize:
   6759         return LDR_d;
   6760       default:
   6761         VIXL_ASSERT(rt.IsQ());
   6762         return LDR_q;
   6763     }
   6764   }
   6765 }
   6766 
   6767 
   6768 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
   6769   VIXL_ASSERT(rt.IsValid());
   6770   if (rt.IsRegister()) {
   6771     return rt.Is64Bits() ? STR_x : STR_w;
   6772   } else {
   6773     VIXL_ASSERT(rt.IsVRegister());
   6774     switch (rt.GetSizeInBits()) {
   6775       case kBRegSize:
   6776         return STR_b;
   6777       case kHRegSize:
   6778         return STR_h;
   6779       case kSRegSize:
   6780         return STR_s;
   6781       case kDRegSize:
   6782         return STR_d;
   6783       default:
   6784         VIXL_ASSERT(rt.IsQ());
   6785         return STR_q;
   6786     }
   6787   }
   6788 }
   6789 
   6790 
   6791 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
   6792                                           const CPURegister& rt2) {
   6793   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
   6794   USE(rt2);
   6795   if (rt.IsRegister()) {
   6796     return rt.Is64Bits() ? STP_x : STP_w;
   6797   } else {
   6798     VIXL_ASSERT(rt.IsVRegister());
   6799     switch (rt.GetSizeInBytes()) {
   6800       case kSRegSizeInBytes:
   6801         return STP_s;
   6802       case kDRegSizeInBytes:
   6803         return STP_d;
   6804       default:
   6805         VIXL_ASSERT(rt.IsQ());
   6806         return STP_q;
   6807     }
   6808   }
   6809 }
   6810 
   6811 
   6812 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
   6813                                          const CPURegister& rt2) {
   6814   VIXL_ASSERT((STP_w | LoadStorePairLBit) == LDP_w);
   6815   return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
   6816                                       LoadStorePairLBit);
   6817 }
   6818 
   6819 
   6820 LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
   6821     const CPURegister& rt, const CPURegister& rt2) {
   6822   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
   6823   USE(rt2);
   6824   if (rt.IsRegister()) {
   6825     return rt.Is64Bits() ? STNP_x : STNP_w;
   6826   } else {
   6827     VIXL_ASSERT(rt.IsVRegister());
   6828     switch (rt.GetSizeInBytes()) {
   6829       case kSRegSizeInBytes:
   6830         return STNP_s;
   6831       case kDRegSizeInBytes:
   6832         return STNP_d;
   6833       default:
   6834         VIXL_ASSERT(rt.IsQ());
   6835         return STNP_q;
   6836     }
   6837   }
   6838 }
   6839 
   6840 
   6841 LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
   6842     const CPURegister& rt, const CPURegister& rt2) {
   6843   VIXL_ASSERT((STNP_w | LoadStorePairNonTemporalLBit) == LDNP_w);
   6844   return static_cast<LoadStorePairNonTemporalOp>(
   6845       StorePairNonTemporalOpFor(rt, rt2) | LoadStorePairNonTemporalLBit);
   6846 }
   6847 
   6848 
   6849 LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
   6850   if (rt.IsRegister()) {
   6851     return rt.IsX() ? LDR_x_lit : LDR_w_lit;
   6852   } else {
   6853     VIXL_ASSERT(rt.IsVRegister());
   6854     switch (rt.GetSizeInBytes()) {
   6855       case kSRegSizeInBytes:
   6856         return LDR_s_lit;
   6857       case kDRegSizeInBytes:
   6858         return LDR_d_lit;
   6859       default:
   6860         VIXL_ASSERT(rt.IsQ());
   6861         return LDR_q_lit;
   6862     }
   6863   }
   6864 }
   6865 
   6866 
   6867 bool Assembler::CPUHas(const CPURegister& rt) const {
   6868   // Core registers are available without any particular CPU features.
   6869   if (rt.IsRegister()) return true;
   6870   VIXL_ASSERT(rt.IsVRegister());
   6871   // The architecture does not allow FP and NEON to be implemented separately,
   6872   // but we can crudely categorise them based on register size, since FP only
   6873   // uses D, S and (occasionally) H registers.
   6874   if (rt.IsH() || rt.IsS() || rt.IsD()) {
   6875     return CPUHas(CPUFeatures::kFP) || CPUHas(CPUFeatures::kNEON);
   6876   }
   6877   VIXL_ASSERT(rt.IsB() || rt.IsQ());
   6878   return CPUHas(CPUFeatures::kNEON);
   6879 }
   6880 
   6881 
   6882 bool Assembler::CPUHas(const CPURegister& rt, const CPURegister& rt2) const {
   6883   // This is currently only used for loads and stores, where rt and rt2 must
   6884   // have the same size and type. We could extend this to cover other cases if
   6885   // necessary, but for now we can avoid checking both registers.
   6886   VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
   6887   USE(rt2);
   6888   return CPUHas(rt);
   6889 }
   6890 
   6891 
   6892 bool Assembler::CPUHas(SystemRegister sysreg) const {
   6893   switch (sysreg) {
   6894     case RNDR:
   6895     case RNDRRS:
   6896       return CPUHas(CPUFeatures::kRNG);
   6897     case FPCR:
   6898     case NZCV:
   6899       break;
   6900   }
   6901   return true;
   6902 }
   6903 
   6904 
   6905 }  // namespace aarch64
   6906 }  // namespace vixl