duckstation

duckstation, but archived from the revision just before upstream changed it to a proprietary software project, this version is the libre one
git clone https://git.neptards.moe/u3shit/duckstation.git
Log | Files | Refs | README | LICENSE

operands-aarch64.cc (14444B)


      1 // Copyright 2016, VIXL authors
      2 // All rights reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are met:
      6 //
      7 //   * Redistributions of source code must retain the above copyright notice,
      8 //     this list of conditions and the following disclaimer.
      9 //   * Redistributions in binary form must reproduce the above copyright notice,
     10 //     this list of conditions and the following disclaimer in the documentation
     11 //     and/or other materials provided with the distribution.
     12 //   * Neither the name of ARM Limited nor the names of its contributors may be
     13 //     used to endorse or promote products derived from this software without
     14 //     specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
     17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
     20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26 
     27 #include "operands-aarch64.h"
     28 
     29 namespace vixl {
     30 namespace aarch64 {
     31 
     32 // CPURegList utilities.
     33 CPURegister CPURegList::PopLowestIndex(RegList mask) {
     34   RegList list = list_ & mask;
     35   if (list == 0) return NoCPUReg;
     36   int index = CountTrailingZeros(list);
     37   VIXL_ASSERT(((static_cast<RegList>(1) << index) & list) != 0);
     38   Remove(index);
     39   return CPURegister(index, size_, type_);
     40 }
     41 
     42 
     43 CPURegister CPURegList::PopHighestIndex(RegList mask) {
     44   RegList list = list_ & mask;
     45   if (list == 0) return NoCPUReg;
     46   int index = CountLeadingZeros(list);
     47   index = kRegListSizeInBits - 1 - index;
     48   VIXL_ASSERT(((static_cast<RegList>(1) << index) & list) != 0);
     49   Remove(index);
     50   return CPURegister(index, size_, type_);
     51 }
     52 
     53 
     54 bool CPURegList::IsValid() const {
     55   if (type_ == CPURegister::kNoRegister) {
     56     // We can't use IsEmpty here because that asserts IsValid().
     57     return list_ == 0;
     58   } else {
     59     bool is_valid = true;
     60     // Try to create a CPURegister for each element in the list.
     61     for (int i = 0; i < kRegListSizeInBits; i++) {
     62       if (((list_ >> i) & 1) != 0) {
     63         is_valid &= CPURegister(i, size_, type_).IsValid();
     64       }
     65     }
     66     return is_valid;
     67   }
     68 }
     69 
     70 
     71 void CPURegList::RemoveCalleeSaved() {
     72   if (GetType() == CPURegister::kRegister) {
     73     Remove(GetCalleeSaved(GetRegisterSizeInBits()));
     74   } else if (GetType() == CPURegister::kVRegister) {
     75     Remove(GetCalleeSavedV(GetRegisterSizeInBits()));
     76   } else {
     77     VIXL_ASSERT(GetType() == CPURegister::kNoRegister);
     78     VIXL_ASSERT(IsEmpty());
     79     // The list must already be empty, so do nothing.
     80   }
     81 }
     82 
     83 
     84 CPURegList CPURegList::Union(const CPURegList& list_1,
     85                              const CPURegList& list_2,
     86                              const CPURegList& list_3) {
     87   return Union(list_1, Union(list_2, list_3));
     88 }
     89 
     90 
     91 CPURegList CPURegList::Union(const CPURegList& list_1,
     92                              const CPURegList& list_2,
     93                              const CPURegList& list_3,
     94                              const CPURegList& list_4) {
     95   return Union(Union(list_1, list_2), Union(list_3, list_4));
     96 }
     97 
     98 
     99 CPURegList CPURegList::Intersection(const CPURegList& list_1,
    100                                     const CPURegList& list_2,
    101                                     const CPURegList& list_3) {
    102   return Intersection(list_1, Intersection(list_2, list_3));
    103 }
    104 
    105 
    106 CPURegList CPURegList::Intersection(const CPURegList& list_1,
    107                                     const CPURegList& list_2,
    108                                     const CPURegList& list_3,
    109                                     const CPURegList& list_4) {
    110   return Intersection(Intersection(list_1, list_2),
    111                       Intersection(list_3, list_4));
    112 }
    113 
    114 
    115 CPURegList CPURegList::GetCalleeSaved(unsigned size) {
    116   return CPURegList(CPURegister::kRegister, size, 19, 29);
    117 }
    118 
    119 
    120 CPURegList CPURegList::GetCalleeSavedV(unsigned size) {
    121   return CPURegList(CPURegister::kVRegister, size, 8, 15);
    122 }
    123 
    124 
    125 CPURegList CPURegList::GetCallerSaved(unsigned size) {
    126   // Registers x0-x18 and lr (x30) are caller-saved.
    127   CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
    128   // Do not use lr directly to avoid initialisation order fiasco bugs for users.
    129   list.Combine(Register(30, kXRegSize));
    130   return list;
    131 }
    132 
    133 
    134 CPURegList CPURegList::GetCallerSavedV(unsigned size) {
    135   // Registers d0-d7 and d16-d31 are caller-saved.
    136   CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
    137   list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
    138   return list;
    139 }
    140 
    141 
    142 const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
    143 const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV();
    144 const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
    145 const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV();
    146 
    147 // Operand.
    148 Operand::Operand(int64_t immediate)
    149     : immediate_(immediate),
    150       reg_(NoReg),
    151       shift_(NO_SHIFT),
    152       extend_(NO_EXTEND),
    153       shift_amount_(0) {}
    154 
    155 Operand::Operand(IntegerOperand immediate)
    156     : immediate_(immediate.AsIntN(64)),
    157       reg_(NoReg),
    158       shift_(NO_SHIFT),
    159       extend_(NO_EXTEND),
    160       shift_amount_(0) {}
    161 
    162 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
    163     : reg_(reg),
    164       shift_(shift),
    165       extend_(NO_EXTEND),
    166       shift_amount_(shift_amount) {
    167   VIXL_ASSERT(shift != MSL);
    168   VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
    169   VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
    170   VIXL_ASSERT(!reg.IsSP());
    171 }
    172 
    173 
    174 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
    175     : reg_(reg),
    176       shift_(NO_SHIFT),
    177       extend_(extend),
    178       shift_amount_(shift_amount) {
    179   VIXL_ASSERT(reg.IsValid());
    180   VIXL_ASSERT(shift_amount <= 4);
    181   VIXL_ASSERT(!reg.IsSP());
    182 
    183   // Extend modes SXTX and UXTX require a 64-bit register.
    184   VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
    185 }
    186 
    187 
    188 bool Operand::IsImmediate() const { return reg_.Is(NoReg); }
    189 
    190 
    191 bool Operand::IsPlainRegister() const {
    192   return reg_.IsValid() &&
    193          (((shift_ == NO_SHIFT) && (extend_ == NO_EXTEND)) ||
    194           // No-op shifts.
    195           ((shift_ != NO_SHIFT) && (shift_amount_ == 0)) ||
    196           // No-op extend operations.
    197           // We can't include [US]XTW here without knowing more about the
    198           // context; they are only no-ops for 32-bit operations.
    199           //
    200           // For example, this operand could be replaced with w1:
    201           //   __ Add(w0, w0, Operand(w1, UXTW));
    202           // However, no plain register can replace it in this context:
    203           //   __ Add(x0, x0, Operand(w1, UXTW));
    204           (((extend_ == UXTX) || (extend_ == SXTX)) && (shift_amount_ == 0)));
    205 }
    206 
    207 
    208 bool Operand::IsShiftedRegister() const {
    209   return reg_.IsValid() && (shift_ != NO_SHIFT);
    210 }
    211 
    212 
    213 bool Operand::IsExtendedRegister() const {
    214   return reg_.IsValid() && (extend_ != NO_EXTEND);
    215 }
    216 
    217 
    218 bool Operand::IsZero() const {
    219   if (IsImmediate()) {
    220     return GetImmediate() == 0;
    221   } else {
    222     return GetRegister().IsZero();
    223   }
    224 }
    225 
    226 
    227 Operand Operand::ToExtendedRegister() const {
    228   VIXL_ASSERT(IsShiftedRegister());
    229   VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
    230   return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
    231 }
    232 
    233 
    234 // MemOperand
    235 MemOperand::MemOperand()
    236     : base_(NoReg),
    237       regoffset_(NoReg),
    238       offset_(0),
    239       addrmode_(Offset),
    240       shift_(NO_SHIFT),
    241       extend_(NO_EXTEND) {}
    242 
    243 
    244 MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
    245     : base_(base),
    246       regoffset_(NoReg),
    247       offset_(offset),
    248       addrmode_(addrmode),
    249       shift_(NO_SHIFT),
    250       extend_(NO_EXTEND),
    251       shift_amount_(0) {
    252   VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
    253 }
    254 
    255 
    256 MemOperand::MemOperand(Register base,
    257                        Register regoffset,
    258                        Extend extend,
    259                        unsigned shift_amount)
    260     : base_(base),
    261       regoffset_(regoffset),
    262       offset_(0),
    263       addrmode_(Offset),
    264       shift_(NO_SHIFT),
    265       extend_(extend),
    266       shift_amount_(shift_amount) {
    267   VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
    268   VIXL_ASSERT(!regoffset.IsSP());
    269   VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
    270 
    271   // SXTX extend mode requires a 64-bit offset register.
    272   VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
    273 }
    274 
    275 
    276 MemOperand::MemOperand(Register base,
    277                        Register regoffset,
    278                        Shift shift,
    279                        unsigned shift_amount)
    280     : base_(base),
    281       regoffset_(regoffset),
    282       offset_(0),
    283       addrmode_(Offset),
    284       shift_(shift),
    285       extend_(NO_EXTEND),
    286       shift_amount_(shift_amount) {
    287   VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
    288   VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
    289   VIXL_ASSERT(shift == LSL);
    290 }
    291 
    292 
    293 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
    294     : base_(base),
    295       regoffset_(NoReg),
    296       addrmode_(addrmode),
    297       shift_(NO_SHIFT),
    298       extend_(NO_EXTEND),
    299       shift_amount_(0) {
    300   VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
    301 
    302   if (offset.IsImmediate()) {
    303     offset_ = offset.GetImmediate();
    304   } else if (offset.IsShiftedRegister()) {
    305     VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));
    306 
    307     regoffset_ = offset.GetRegister();
    308     shift_ = offset.GetShift();
    309     shift_amount_ = offset.GetShiftAmount();
    310 
    311     extend_ = NO_EXTEND;
    312     offset_ = 0;
    313 
    314     // These assertions match those in the shifted-register constructor.
    315     VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
    316     VIXL_ASSERT(shift_ == LSL);
    317   } else {
    318     VIXL_ASSERT(offset.IsExtendedRegister());
    319     VIXL_ASSERT(addrmode == Offset);
    320 
    321     regoffset_ = offset.GetRegister();
    322     extend_ = offset.GetExtend();
    323     shift_amount_ = offset.GetShiftAmount();
    324 
    325     shift_ = NO_SHIFT;
    326     offset_ = 0;
    327 
    328     // These assertions match those in the extended-register constructor.
    329     VIXL_ASSERT(!regoffset_.IsSP());
    330     VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
    331     VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
    332   }
    333 }
    334 
    335 
    336 bool MemOperand::IsPlainRegister() const {
    337   return IsImmediateOffset() && (GetOffset() == 0);
    338 }
    339 
    340 
    341 bool MemOperand::IsEquivalentToPlainRegister() const {
    342   if (regoffset_.Is(NoReg)) {
    343     // Immediate offset, pre-index or post-index.
    344     return GetOffset() == 0;
    345   } else if (GetRegisterOffset().IsZero()) {
    346     // Zero register offset, pre-index or post-index.
    347     // We can ignore shift and extend options because they all result in zero.
    348     return true;
    349   }
    350   return false;
    351 }
    352 
    353 
    354 bool MemOperand::IsImmediateOffset() const {
    355   return (addrmode_ == Offset) && regoffset_.Is(NoReg);
    356 }
    357 
    358 
    359 bool MemOperand::IsRegisterOffset() const {
    360   return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
    361 }
    362 
    363 bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
    364 bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
    365 
    366 bool MemOperand::IsImmediatePreIndex() const {
    367   return IsPreIndex() && regoffset_.Is(NoReg);
    368 }
    369 
    370 bool MemOperand::IsImmediatePostIndex() const {
    371   return IsPostIndex() && regoffset_.Is(NoReg);
    372 }
    373 
    374 void MemOperand::AddOffset(int64_t offset) {
    375   VIXL_ASSERT(IsImmediateOffset());
    376   offset_ += offset;
    377 }
    378 
    379 
    380 bool SVEMemOperand::IsValid() const {
    381 #ifdef VIXL_DEBUG
    382   {
    383     // It should not be possible for an SVEMemOperand to match multiple types.
    384     int count = 0;
    385     if (IsScalarPlusImmediate()) count++;
    386     if (IsScalarPlusScalar()) count++;
    387     if (IsScalarPlusVector()) count++;
    388     if (IsVectorPlusImmediate()) count++;
    389     if (IsVectorPlusScalar()) count++;
    390     if (IsVectorPlusVector()) count++;
    391     VIXL_ASSERT(count <= 1);
    392   }
    393 #endif
    394 
    395   // We can't have a register _and_ an immediate offset.
    396   if ((offset_ != 0) && (!regoffset_.IsNone())) return false;
    397 
    398   if (shift_amount_ != 0) {
    399     // Only shift and extend modifiers can take a shift amount.
    400     switch (mod_) {
    401       case NO_SVE_OFFSET_MODIFIER:
    402       case SVE_MUL_VL:
    403         return false;
    404       case SVE_LSL:
    405       case SVE_UXTW:
    406       case SVE_SXTW:
    407         // Fall through.
    408         break;
    409     }
    410   }
    411 
    412   return IsScalarPlusImmediate() || IsScalarPlusScalar() ||
    413          IsScalarPlusVector() || IsVectorPlusImmediate() ||
    414          IsVectorPlusScalar() || IsVectorPlusVector();
    415 }
    416 
    417 
    418 bool SVEMemOperand::IsEquivalentToScalar() const {
    419   if (IsScalarPlusImmediate()) {
    420     return GetImmediateOffset() == 0;
    421   }
    422   if (IsScalarPlusScalar()) {
    423     // We can ignore the shift because it will still result in zero.
    424     return GetScalarOffset().IsZero();
    425   }
    426   // Forms involving vectors are never equivalent to a single scalar.
    427   return false;
    428 }
    429 
    430 bool SVEMemOperand::IsPlainRegister() const {
    431   if (IsScalarPlusImmediate()) {
    432     return GetImmediateOffset() == 0;
    433   }
    434   return false;
    435 }
    436 
    437 GenericOperand::GenericOperand(const CPURegister& reg)
    438     : cpu_register_(reg), mem_op_size_(0) {
    439   if (reg.IsQ()) {
    440     VIXL_ASSERT(reg.GetSizeInBits() > static_cast<int>(kXRegSize));
    441     // Support for Q registers is not implemented yet.
    442     VIXL_UNIMPLEMENTED();
    443   }
    444 }
    445 
    446 
    447 GenericOperand::GenericOperand(const MemOperand& mem_op, size_t mem_op_size)
    448     : cpu_register_(NoReg), mem_op_(mem_op), mem_op_size_(mem_op_size) {
    449   if (mem_op_size_ > kXRegSizeInBytes) {
    450     // We only support generic operands up to the size of X registers.
    451     VIXL_UNIMPLEMENTED();
    452   }
    453 }
    454 
    455 bool GenericOperand::Equals(const GenericOperand& other) const {
    456   if (!IsValid() || !other.IsValid()) {
    457     // Two invalid generic operands are considered equal.
    458     return !IsValid() && !other.IsValid();
    459   }
    460   if (IsCPURegister() && other.IsCPURegister()) {
    461     return GetCPURegister().Is(other.GetCPURegister());
    462   } else if (IsMemOperand() && other.IsMemOperand()) {
    463     return GetMemOperand().Equals(other.GetMemOperand()) &&
    464            (GetMemOperandSizeInBytes() == other.GetMemOperandSizeInBytes());
    465   }
    466   return false;
    467 }
    468 }  // namespace aarch64
    469 }  // namespace vixl