qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

tlb_helper.c (9350B)


      1 /*
      2  * ARM TLB (Translation lookaside buffer) helpers.
      3  *
      4  * This code is licensed under the GNU GPL v2 or later.
      5  *
      6  * SPDX-License-Identifier: GPL-2.0-or-later
      7  */
      8 #include "qemu/osdep.h"
      9 #include "cpu.h"
     10 #include "internals.h"
     11 #include "exec/exec-all.h"
     12 #include "exec/helper-proto.h"
     13 
     14 
     15 /* Return true if the translation regime is using LPAE format page tables */
     16 bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
     17 {
     18     int el = regime_el(env, mmu_idx);
     19     if (el == 2 || arm_el_is_aa64(env, el)) {
     20         return true;
     21     }
     22     if (arm_feature(env, ARM_FEATURE_LPAE)
     23         && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
     24         return true;
     25     }
     26     return false;
     27 }
     28 
     29 /*
     30  * Returns true if the stage 1 translation regime is using LPAE format page
     31  * tables. Used when raising alignment exceptions, whose FSR changes depending
     32  * on whether the long or short descriptor format is in use.
     33  */
     34 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
     35 {
     36     mmu_idx = stage_1_mmu_idx(mmu_idx);
     37     return regime_using_lpae_format(env, mmu_idx);
     38 }
     39 
     40 static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
     41                                             unsigned int target_el,
     42                                             bool same_el, bool ea,
     43                                             bool s1ptw, bool is_write,
     44                                             int fsc)
     45 {
     46     uint32_t syn;
     47 
     48     /*
     49      * ISV is only set for data aborts routed to EL2 and
     50      * never for stage-1 page table walks faulting on stage 2.
     51      *
     52      * Furthermore, ISV is only set for certain kinds of load/stores.
     53      * If the template syndrome does not have ISV set, we should leave
     54      * it cleared.
     55      *
     56      * See ARMv8 specs, D7-1974:
     57      * ISS encoding for an exception from a Data Abort, the
     58      * ISV field.
     59      */
     60     if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
     61         syn = syn_data_abort_no_iss(same_el, 0,
     62                                     ea, 0, s1ptw, is_write, fsc);
     63     } else {
     64         /*
     65          * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
     66          * syndrome created at translation time.
     67          * Now we create the runtime syndrome with the remaining fields.
     68          */
     69         syn = syn_data_abort_with_iss(same_el,
     70                                       0, 0, 0, 0, 0,
     71                                       ea, 0, s1ptw, is_write, fsc,
     72                                       true);
     73         /* Merge the runtime syndrome with the template syndrome.  */
     74         syn |= template_syn;
     75     }
     76     return syn;
     77 }
     78 
     79 static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi,
     80                                 int target_el, int mmu_idx, uint32_t *ret_fsc)
     81 {
     82     ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
     83     uint32_t fsr, fsc;
     84 
     85     if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
     86         arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
     87         /*
     88          * LPAE format fault status register : bottom 6 bits are
     89          * status code in the same form as needed for syndrome
     90          */
     91         fsr = arm_fi_to_lfsc(fi);
     92         fsc = extract32(fsr, 0, 6);
     93     } else {
     94         fsr = arm_fi_to_sfsc(fi);
     95         /*
     96          * Short format FSR : this fault will never actually be reported
     97          * to an EL that uses a syndrome register. Use a (currently)
     98          * reserved FSR code in case the constructed syndrome does leak
     99          * into the guest somehow.
    100          */
    101         fsc = 0x3f;
    102     }
    103 
    104     *ret_fsc = fsc;
    105     return fsr;
    106 }
    107 
    108 static G_NORETURN
    109 void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
    110                        MMUAccessType access_type,
    111                        int mmu_idx, ARMMMUFaultInfo *fi)
    112 {
    113     CPUARMState *env = &cpu->env;
    114     int target_el;
    115     bool same_el;
    116     uint32_t syn, exc, fsr, fsc;
    117 
    118     target_el = exception_target_el(env);
    119     if (fi->stage2) {
    120         target_el = 2;
    121         env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
    122         if (arm_is_secure_below_el3(env) && fi->s1ns) {
    123             env->cp15.hpfar_el2 |= HPFAR_NS;
    124         }
    125     }
    126     same_el = (arm_current_el(env) == target_el);
    127 
    128     fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
    129 
    130     if (access_type == MMU_INST_FETCH) {
    131         syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
    132         exc = EXCP_PREFETCH_ABORT;
    133     } else {
    134         syn = merge_syn_data_abort(env->exception.syndrome, target_el,
    135                                    same_el, fi->ea, fi->s1ptw,
    136                                    access_type == MMU_DATA_STORE,
    137                                    fsc);
    138         if (access_type == MMU_DATA_STORE
    139             && arm_feature(env, ARM_FEATURE_V6)) {
    140             fsr |= (1 << 11);
    141         }
    142         exc = EXCP_DATA_ABORT;
    143     }
    144 
    145     env->exception.vaddress = addr;
    146     env->exception.fsr = fsr;
    147     raise_exception(env, exc, syn, target_el);
    148 }
    149 
    150 /* Raise a data fault alignment exception for the specified virtual address */
    151 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
    152                                  MMUAccessType access_type,
    153                                  int mmu_idx, uintptr_t retaddr)
    154 {
    155     ARMCPU *cpu = ARM_CPU(cs);
    156     ARMMMUFaultInfo fi = {};
    157 
    158     /* now we have a real cpu fault */
    159     cpu_restore_state(cs, retaddr);
    160 
    161     fi.type = ARMFault_Alignment;
    162     arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
    163 }
    164 
    165 void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)
    166 {
    167     ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };
    168     int target_el = exception_target_el(env);
    169     int mmu_idx = cpu_mmu_index(env, true);
    170     uint32_t fsc;
    171 
    172     env->exception.vaddress = pc;
    173 
    174     /*
    175      * Note that the fsc is not applicable to this exception,
    176      * since any syndrome is pcalignment not insn_abort.
    177      */
    178     env->exception.fsr = compute_fsr_fsc(env, &fi, target_el, mmu_idx, &fsc);
    179     raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);
    180 }
    181 
    182 #if !defined(CONFIG_USER_ONLY)
    183 
    184 /*
    185  * arm_cpu_do_transaction_failed: handle a memory system error response
    186  * (eg "no device/memory present at address") by raising an external abort
    187  * exception
    188  */
    189 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
    190                                    vaddr addr, unsigned size,
    191                                    MMUAccessType access_type,
    192                                    int mmu_idx, MemTxAttrs attrs,
    193                                    MemTxResult response, uintptr_t retaddr)
    194 {
    195     ARMCPU *cpu = ARM_CPU(cs);
    196     ARMMMUFaultInfo fi = {};
    197 
    198     /* now we have a real cpu fault */
    199     cpu_restore_state(cs, retaddr);
    200 
    201     fi.ea = arm_extabort_type(response);
    202     fi.type = ARMFault_SyncExternal;
    203     arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
    204 }
    205 
    206 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
    207                       MMUAccessType access_type, int mmu_idx,
    208                       bool probe, uintptr_t retaddr)
    209 {
    210     ARMCPU *cpu = ARM_CPU(cs);
    211     GetPhysAddrResult res = {};
    212     ARMMMUFaultInfo local_fi, *fi;
    213     int ret;
    214 
    215     /*
    216      * Allow S1_ptw_translate to see any fault generated here.
    217      * Since this may recurse, read and clear.
    218      */
    219     fi = cpu->env.tlb_fi;
    220     if (fi) {
    221         cpu->env.tlb_fi = NULL;
    222     } else {
    223         fi = memset(&local_fi, 0, sizeof(local_fi));
    224     }
    225 
    226     /*
    227      * Walk the page table and (if the mapping exists) add the page
    228      * to the TLB.  On success, return true.  Otherwise, if probing,
    229      * return false.  Otherwise populate fsr with ARM DFSR/IFSR fault
    230      * register format, and signal the fault.
    231      */
    232     ret = get_phys_addr(&cpu->env, address, access_type,
    233                         core_to_arm_mmu_idx(&cpu->env, mmu_idx),
    234                         &res, fi);
    235     if (likely(!ret)) {
    236         /*
    237          * Map a single [sub]page. Regions smaller than our declared
    238          * target page size are handled specially, so for those we
    239          * pass in the exact addresses.
    240          */
    241         if (res.f.lg_page_size >= TARGET_PAGE_BITS) {
    242             res.f.phys_addr &= TARGET_PAGE_MASK;
    243             address &= TARGET_PAGE_MASK;
    244         }
    245 
    246         res.f.pte_attrs = res.cacheattrs.attrs;
    247         res.f.shareability = res.cacheattrs.shareability;
    248 
    249         tlb_set_page_full(cs, mmu_idx, address, &res.f);
    250         return true;
    251     } else if (probe) {
    252         return false;
    253     } else {
    254         /* now we have a real cpu fault */
    255         cpu_restore_state(cs, retaddr);
    256         arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
    257     }
    258 }
    259 #else
    260 void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
    261                             MMUAccessType access_type,
    262                             bool maperr, uintptr_t ra)
    263 {
    264     ARMMMUFaultInfo fi = {
    265         .type = maperr ? ARMFault_Translation : ARMFault_Permission,
    266         .level = 3,
    267     };
    268     ARMCPU *cpu = ARM_CPU(cs);
    269 
    270     /*
    271      * We report both ESR and FAR to signal handlers.
    272      * For now, it's easiest to deliver the fault normally.
    273      */
    274     cpu_restore_state(cs, ra);
    275     arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
    276 }
    277 
    278 void arm_cpu_record_sigbus(CPUState *cs, vaddr addr,
    279                            MMUAccessType access_type, uintptr_t ra)
    280 {
    281     arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra);
    282 }
    283 #endif /* !defined(CONFIG_USER_ONLY) */