qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

mmu-hash64.c (35686B)


      1 /*
      2  *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
      3  *
      4  *  Copyright (c) 2003-2007 Jocelyn Mayer
      5  *  Copyright (c) 2013 David Gibson, IBM Corporation
      6  *
      7  * This library is free software; you can redistribute it and/or
      8  * modify it under the terms of the GNU Lesser General Public
      9  * License as published by the Free Software Foundation; either
     10  * version 2.1 of the License, or (at your option) any later version.
     11  *
     12  * This library is distributed in the hope that it will be useful,
     13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     15  * Lesser General Public License for more details.
     16  *
     17  * You should have received a copy of the GNU Lesser General Public
     18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     19  */
     20 #include "qemu/osdep.h"
     21 #include "qemu/units.h"
     22 #include "cpu.h"
     23 #include "exec/exec-all.h"
     24 #include "qemu/error-report.h"
     25 #include "qemu/qemu-print.h"
     26 #include "sysemu/hw_accel.h"
     27 #include "kvm_ppc.h"
     28 #include "mmu-hash64.h"
     29 #include "exec/log.h"
     30 #include "hw/hw.h"
     31 #include "internal.h"
     32 #include "mmu-book3s-v3.h"
     33 #include "helper_regs.h"
     34 
     35 #ifdef CONFIG_TCG
     36 #include "exec/helper-proto.h"
     37 #endif
     38 
     39 /* #define DEBUG_SLB */
     40 
     41 #ifdef DEBUG_SLB
     42 #  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
     43 #else
     44 #  define LOG_SLB(...) do { } while (0)
     45 #endif
     46 
     47 /*
     48  * SLB handling
     49  */
     50 
     51 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
     52 {
     53     CPUPPCState *env = &cpu->env;
     54     uint64_t esid_256M, esid_1T;
     55     int n;
     56 
     57     LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
     58 
     59     esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
     60     esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
     61 
     62     for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
     63         ppc_slb_t *slb = &env->slb[n];
     64 
     65         LOG_SLB("%s: slot %d %016" PRIx64 " %016"
     66                     PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
     67         /*
     68          * We check for 1T matches on all MMUs here - if the MMU
     69          * doesn't have 1T segment support, we will have prevented 1T
     70          * entries from being inserted in the slbmte code.
     71          */
     72         if (((slb->esid == esid_256M) &&
     73              ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
     74             || ((slb->esid == esid_1T) &&
     75                 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
     76             return slb;
     77         }
     78     }
     79 
     80     return NULL;
     81 }
     82 
     83 void dump_slb(PowerPCCPU *cpu)
     84 {
     85     CPUPPCState *env = &cpu->env;
     86     int i;
     87     uint64_t slbe, slbv;
     88 
     89     cpu_synchronize_state(CPU(cpu));
     90 
     91     qemu_printf("SLB\tESID\t\t\tVSID\n");
     92     for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
     93         slbe = env->slb[i].esid;
     94         slbv = env->slb[i].vsid;
     95         if (slbe == 0 && slbv == 0) {
     96             continue;
     97         }
     98         qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
     99                     i, slbe, slbv);
    100     }
    101 }
    102 
    103 #ifdef CONFIG_TCG
    104 void helper_SLBIA(CPUPPCState *env, uint32_t ih)
    105 {
    106     PowerPCCPU *cpu = env_archcpu(env);
    107     int starting_entry;
    108     int n;
    109 
    110     /*
    111      * slbia must always flush all TLB (which is equivalent to ERAT in ppc
    112      * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
    113      * can overwrite a valid SLB without flushing its lookaside information.
    114      *
    115      * It would be possible to keep the TLB in synch with the SLB by flushing
    116      * when a valid entry is overwritten by slbmte, and therefore slbia would
    117      * not have to flush unless it evicts a valid SLB entry. However it is
    118      * expected that slbmte is more common than slbia, and slbia is usually
    119      * going to evict valid SLB entries, so that tradeoff is unlikely to be a
    120      * good one.
    121      *
    122      * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate
    123      * the same SLB entries (everything but entry 0), but differ in what
    124      * "lookaside information" is invalidated. TCG can ignore this and flush
    125      * everything.
    126      *
    127      * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are
    128      * invalidated.
    129      */
    130 
    131     env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
    132 
    133     starting_entry = 1; /* default for IH=0,1,2,6 */
    134 
    135     if (env->mmu_model == POWERPC_MMU_3_00) {
    136         switch (ih) {
    137         case 0x7:
    138             /* invalidate no SLBs, but all lookaside information */
    139             return;
    140 
    141         case 0x3:
    142         case 0x4:
    143             /* also considers SLB entry 0 */
    144             starting_entry = 0;
    145             break;
    146 
    147         case 0x5:
    148             /* treat undefined values as ih==0, and warn */
    149             qemu_log_mask(LOG_GUEST_ERROR,
    150                           "slbia undefined IH field %u.\n", ih);
    151             break;
    152 
    153         default:
    154             /* 0,1,2,6 */
    155             break;
    156         }
    157     }
    158 
    159     for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) {
    160         ppc_slb_t *slb = &env->slb[n];
    161 
    162         if (!(slb->esid & SLB_ESID_V)) {
    163             continue;
    164         }
    165         if (env->mmu_model == POWERPC_MMU_3_00) {
    166             if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) {
    167                 /* preserves entries with a class value of 0 */
    168                 continue;
    169             }
    170         }
    171 
    172         slb->esid &= ~SLB_ESID_V;
    173     }
    174 }
    175 
    176 #if defined(TARGET_PPC64)
    177 void helper_SLBIAG(CPUPPCState *env, target_ulong rs, uint32_t l)
    178 {
    179     PowerPCCPU *cpu = env_archcpu(env);
    180     int n;
    181 
    182     /*
    183      * slbiag must always flush all TLB (which is equivalent to ERAT in ppc
    184      * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
    185      * can overwrite a valid SLB without flushing its lookaside information.
    186      *
    187      * It would be possible to keep the TLB in synch with the SLB by flushing
    188      * when a valid entry is overwritten by slbmte, and therefore slbiag would
    189      * not have to flush unless it evicts a valid SLB entry. However it is
    190      * expected that slbmte is more common than slbiag, and slbiag is usually
    191      * going to evict valid SLB entries, so that tradeoff is unlikely to be a
    192      * good one.
    193      */
    194     env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
    195 
    196     for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
    197         ppc_slb_t *slb = &env->slb[n];
    198         slb->esid &= ~SLB_ESID_V;
    199     }
    200 }
    201 #endif
    202 
    203 static void __helper_slbie(CPUPPCState *env, target_ulong addr,
    204                            target_ulong global)
    205 {
    206     PowerPCCPU *cpu = env_archcpu(env);
    207     ppc_slb_t *slb;
    208 
    209     slb = slb_lookup(cpu, addr);
    210     if (!slb) {
    211         return;
    212     }
    213 
    214     if (slb->esid & SLB_ESID_V) {
    215         slb->esid &= ~SLB_ESID_V;
    216 
    217         /*
    218          * XXX: given the fact that segment size is 256 MB or 1TB,
    219          *      and we still don't have a tlb_flush_mask(env, n, mask)
    220          *      in QEMU, we just invalidate all TLBs
    221          */
    222         env->tlb_need_flush |=
    223             (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
    224     }
    225 }
    226 
    227 void helper_SLBIE(CPUPPCState *env, target_ulong addr)
    228 {
    229     __helper_slbie(env, addr, false);
    230 }
    231 
    232 void helper_SLBIEG(CPUPPCState *env, target_ulong addr)
    233 {
    234     __helper_slbie(env, addr, true);
    235 }
    236 #endif
    237 
    238 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
    239                   target_ulong esid, target_ulong vsid)
    240 {
    241     CPUPPCState *env = &cpu->env;
    242     ppc_slb_t *slb = &env->slb[slot];
    243     const PPCHash64SegmentPageSizes *sps = NULL;
    244     int i;
    245 
    246     if (slot >= cpu->hash64_opts->slb_size) {
    247         return -1; /* Bad slot number */
    248     }
    249     if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
    250         return -1; /* Reserved bits set */
    251     }
    252     if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
    253         return -1; /* Bad segment size */
    254     }
    255     if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) {
    256         return -1; /* 1T segment on MMU that doesn't support it */
    257     }
    258 
    259     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
    260         const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
    261 
    262         if (!sps1->page_shift) {
    263             break;
    264         }
    265 
    266         if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
    267             sps = sps1;
    268             break;
    269         }
    270     }
    271 
    272     if (!sps) {
    273         error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
    274                      " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
    275                      slot, esid, vsid);
    276         return -1;
    277     }
    278 
    279     slb->esid = esid;
    280     slb->vsid = vsid;
    281     slb->sps = sps;
    282 
    283     LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx
    284             " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
    285             slb->esid, slb->vsid);
    286 
    287     return 0;
    288 }
    289 
    290 #ifdef CONFIG_TCG
    291 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
    292                              target_ulong *rt)
    293 {
    294     CPUPPCState *env = &cpu->env;
    295     int slot = rb & 0xfff;
    296     ppc_slb_t *slb = &env->slb[slot];
    297 
    298     if (slot >= cpu->hash64_opts->slb_size) {
    299         return -1;
    300     }
    301 
    302     *rt = slb->esid;
    303     return 0;
    304 }
    305 
    306 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
    307                              target_ulong *rt)
    308 {
    309     CPUPPCState *env = &cpu->env;
    310     int slot = rb & 0xfff;
    311     ppc_slb_t *slb = &env->slb[slot];
    312 
    313     if (slot >= cpu->hash64_opts->slb_size) {
    314         return -1;
    315     }
    316 
    317     *rt = slb->vsid;
    318     return 0;
    319 }
    320 
    321 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
    322                              target_ulong *rt)
    323 {
    324     CPUPPCState *env = &cpu->env;
    325     ppc_slb_t *slb;
    326 
    327     if (!msr_is_64bit(env, env->msr)) {
    328         rb &= 0xffffffff;
    329     }
    330     slb = slb_lookup(cpu, rb);
    331     if (slb == NULL) {
    332         *rt = (target_ulong)-1ul;
    333     } else {
    334         *rt = slb->vsid;
    335     }
    336     return 0;
    337 }
    338 
    339 void helper_SLBMTE(CPUPPCState *env, target_ulong rb, target_ulong rs)
    340 {
    341     PowerPCCPU *cpu = env_archcpu(env);
    342 
    343     if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
    344         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
    345                                POWERPC_EXCP_INVAL, GETPC());
    346     }
    347 }
    348 
    349 target_ulong helper_SLBMFEE(CPUPPCState *env, target_ulong rb)
    350 {
    351     PowerPCCPU *cpu = env_archcpu(env);
    352     target_ulong rt = 0;
    353 
    354     if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
    355         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
    356                                POWERPC_EXCP_INVAL, GETPC());
    357     }
    358     return rt;
    359 }
    360 
    361 target_ulong helper_SLBFEE(CPUPPCState *env, target_ulong rb)
    362 {
    363     PowerPCCPU *cpu = env_archcpu(env);
    364     target_ulong rt = 0;
    365 
    366     if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
    367         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
    368                                POWERPC_EXCP_INVAL, GETPC());
    369     }
    370     return rt;
    371 }
    372 
    373 target_ulong helper_SLBMFEV(CPUPPCState *env, target_ulong rb)
    374 {
    375     PowerPCCPU *cpu = env_archcpu(env);
    376     target_ulong rt = 0;
    377 
    378     if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
    379         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
    380                                POWERPC_EXCP_INVAL, GETPC());
    381     }
    382     return rt;
    383 }
    384 #endif
    385 
    386 /* Check No-Execute or Guarded Storage */
    387 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
    388                                               ppc_hash_pte64_t pte)
    389 {
    390     /* Exec permissions CANNOT take away read or write permissions */
    391     return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ?
    392             PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    393 }
    394 
    395 /* Check Basic Storage Protection */
    396 static int ppc_hash64_pte_prot(int mmu_idx,
    397                                ppc_slb_t *slb, ppc_hash_pte64_t pte)
    398 {
    399     unsigned pp, key;
    400     /*
    401      * Some pp bit combinations have undefined behaviour, so default
    402      * to no access in those cases
    403      */
    404     int prot = 0;
    405 
    406     key = !!(mmuidx_pr(mmu_idx) ? (slb->vsid & SLB_VSID_KP)
    407              : (slb->vsid & SLB_VSID_KS));
    408     pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
    409 
    410     if (key == 0) {
    411         switch (pp) {
    412         case 0x0:
    413         case 0x1:
    414         case 0x2:
    415             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    416             break;
    417 
    418         case 0x3:
    419         case 0x6:
    420             prot = PAGE_READ | PAGE_EXEC;
    421             break;
    422         }
    423     } else {
    424         switch (pp) {
    425         case 0x0:
    426         case 0x6:
    427             break;
    428 
    429         case 0x1:
    430         case 0x3:
    431             prot = PAGE_READ | PAGE_EXEC;
    432             break;
    433 
    434         case 0x2:
    435             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    436             break;
    437         }
    438     }
    439 
    440     return prot;
    441 }
    442 
    443 /* Check the instruction access permissions specified in the IAMR */
    444 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key)
    445 {
    446     CPUPPCState *env = &cpu->env;
    447     int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3;
    448 
    449     /*
    450      * An instruction fetch is permitted if the IAMR bit is 0.
    451      * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
    452      * can only take away EXEC permissions not READ or WRITE permissions.
    453      * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
    454      * EXEC permissions are allowed.
    455      */
    456     return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE :
    457                                PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    458 }
    459 
    460 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
    461 {
    462     CPUPPCState *env = &cpu->env;
    463     int key, amrbits;
    464     int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    465 
    466     /* Only recent MMUs implement Virtual Page Class Key Protection */
    467     if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) {
    468         return prot;
    469     }
    470 
    471     key = HPTE64_R_KEY(pte.pte1);
    472     amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3;
    473 
    474     /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
    475     /*         env->spr[SPR_AMR]); */
    476 
    477     /*
    478      * A store is permitted if the AMR bit is 0. Remove write
    479      * protection if it is set.
    480      */
    481     if (amrbits & 0x2) {
    482         prot &= ~PAGE_WRITE;
    483     }
    484     /*
    485      * A load is permitted if the AMR bit is 0. Remove read
    486      * protection if it is set.
    487      */
    488     if (amrbits & 0x1) {
    489         prot &= ~PAGE_READ;
    490     }
    491 
    492     switch (env->mmu_model) {
    493     /*
    494      * MMU version 2.07 and later support IAMR
    495      * Check if the IAMR allows the instruction access - it will return
    496      * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
    497      * if it does (and prot will be unchanged indicating execution support).
    498      */
    499     case POWERPC_MMU_2_07:
    500     case POWERPC_MMU_3_00:
    501         prot &= ppc_hash64_iamr_prot(cpu, key);
    502         break;
    503     default:
    504         break;
    505     }
    506 
    507     return prot;
    508 }
    509 
    510 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
    511                                              hwaddr ptex, int n)
    512 {
    513     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
    514     hwaddr base;
    515     hwaddr plen = n * HASH_PTE_SIZE_64;
    516     const ppc_hash_pte64_t *hptes;
    517 
    518     if (cpu->vhyp) {
    519         PPCVirtualHypervisorClass *vhc =
    520             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
    521         return vhc->map_hptes(cpu->vhyp, ptex, n);
    522     }
    523     base = ppc_hash64_hpt_base(cpu);
    524 
    525     if (!base) {
    526         return NULL;
    527     }
    528 
    529     hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false,
    530                               MEMTXATTRS_UNSPECIFIED);
    531     if (plen < (n * HASH_PTE_SIZE_64)) {
    532         hw_error("%s: Unable to map all requested HPTEs\n", __func__);
    533     }
    534     return hptes;
    535 }
    536 
    537 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
    538                             hwaddr ptex, int n)
    539 {
    540     if (cpu->vhyp) {
    541         PPCVirtualHypervisorClass *vhc =
    542             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
    543         vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n);
    544         return;
    545     }
    546 
    547     address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
    548                         false, n * HASH_PTE_SIZE_64);
    549 }
    550 
    551 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps,
    552                                 uint64_t pte0, uint64_t pte1)
    553 {
    554     int i;
    555 
    556     if (!(pte0 & HPTE64_V_LARGE)) {
    557         if (sps->page_shift != 12) {
    558             /* 4kiB page in a non 4kiB segment */
    559             return 0;
    560         }
    561         /* Normal 4kiB page */
    562         return 12;
    563     }
    564 
    565     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
    566         const PPCHash64PageSize *ps = &sps->enc[i];
    567         uint64_t mask;
    568 
    569         if (!ps->page_shift) {
    570             break;
    571         }
    572 
    573         if (ps->page_shift == 12) {
    574             /* L bit is set so this can't be a 4kiB page */
    575             continue;
    576         }
    577 
    578         mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
    579 
    580         if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
    581             return ps->page_shift;
    582         }
    583     }
    584 
    585     return 0; /* Bad page size encoding */
    586 }
    587 
    588 static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1)
    589 {
    590     /* Insert B into pte0 */
    591     *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) |
    592             ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) <<
    593              (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT));
    594 
    595     /* Remove B from pte1 */
    596     *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK;
    597 }
    598 
    599 
    600 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
    601                                      const PPCHash64SegmentPageSizes *sps,
    602                                      target_ulong ptem,
    603                                      ppc_hash_pte64_t *pte, unsigned *pshift)
    604 {
    605     int i;
    606     const ppc_hash_pte64_t *pteg;
    607     target_ulong pte0, pte1;
    608     target_ulong ptex;
    609 
    610     ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
    611     pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
    612     if (!pteg) {
    613         return -1;
    614     }
    615     for (i = 0; i < HPTES_PER_GROUP; i++) {
    616         pte0 = ppc_hash64_hpte0(cpu, pteg, i);
    617         /*
    618          * pte0 contains the valid bit and must be read before pte1,
    619          * otherwise we might see an old pte1 with a new valid bit and
    620          * thus an inconsistent hpte value
    621          */
    622         smp_rmb();
    623         pte1 = ppc_hash64_hpte1(cpu, pteg, i);
    624 
    625         /* Convert format if necessary */
    626         if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) {
    627             ppc64_v3_new_to_old_hpte(&pte0, &pte1);
    628         }
    629 
    630         /* This compares V, B, H (secondary) and the AVPN */
    631         if (HPTE64_V_COMPARE(pte0, ptem)) {
    632             *pshift = hpte_page_shift(sps, pte0, pte1);
    633             /*
    634              * If there is no match, ignore the PTE, it could simply
    635              * be for a different segment size encoding and the
    636              * architecture specifies we should not match. Linux will
    637              * potentially leave behind PTEs for the wrong base page
    638              * size when demoting segments.
    639              */
    640             if (*pshift == 0) {
    641                 continue;
    642             }
    643             /*
    644              * We don't do anything with pshift yet as qemu TLB only
    645              * deals with 4K pages anyway
    646              */
    647             pte->pte0 = pte0;
    648             pte->pte1 = pte1;
    649             ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
    650             return ptex + i;
    651         }
    652     }
    653     ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
    654     /*
    655      * We didn't find a valid entry.
    656      */
    657     return -1;
    658 }
    659 
    660 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
    661                                      ppc_slb_t *slb, target_ulong eaddr,
    662                                      ppc_hash_pte64_t *pte, unsigned *pshift)
    663 {
    664     CPUPPCState *env = &cpu->env;
    665     hwaddr hash, ptex;
    666     uint64_t vsid, epnmask, epn, ptem;
    667     const PPCHash64SegmentPageSizes *sps = slb->sps;
    668 
    669     /*
    670      * The SLB store path should prevent any bad page size encodings
    671      * getting in there, so:
    672      */
    673     assert(sps);
    674 
    675     /* If ISL is set in LPCR we need to clamp the page size to 4K */
    676     if (env->spr[SPR_LPCR] & LPCR_ISL) {
    677         /* We assume that when using TCG, 4k is first entry of SPS */
    678         sps = &cpu->hash64_opts->sps[0];
    679         assert(sps->page_shift == 12);
    680     }
    681 
    682     epnmask = ~((1ULL << sps->page_shift) - 1);
    683 
    684     if (slb->vsid & SLB_VSID_B) {
    685         /* 1TB segment */
    686         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
    687         epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
    688         hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
    689     } else {
    690         /* 256M segment */
    691         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
    692         epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
    693         hash = vsid ^ (epn >> sps->page_shift);
    694     }
    695     ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
    696     ptem |= HPTE64_V_VALID;
    697 
    698     /* Page address translation */
    699     qemu_log_mask(CPU_LOG_MMU,
    700             "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
    701             " hash " TARGET_FMT_plx "\n",
    702             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
    703 
    704     /* Primary PTEG lookup */
    705     qemu_log_mask(CPU_LOG_MMU,
    706             "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
    707             " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
    708             " hash=" TARGET_FMT_plx "\n",
    709             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
    710             vsid, ptem,  hash);
    711     ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
    712 
    713     if (ptex == -1) {
    714         /* Secondary PTEG lookup */
    715         ptem |= HPTE64_V_SECONDARY;
    716         qemu_log_mask(CPU_LOG_MMU,
    717                 "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
    718                 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
    719                 " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
    720                 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
    721 
    722         ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
    723     }
    724 
    725     return ptex;
    726 }
    727 
    728 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
    729                                           uint64_t pte0, uint64_t pte1)
    730 {
    731     int i;
    732 
    733     if (!(pte0 & HPTE64_V_LARGE)) {
    734         return 12;
    735     }
    736 
    737     /*
    738      * The encodings in env->sps need to be carefully chosen so that
    739      * this gives an unambiguous result.
    740      */
    741     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
    742         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
    743         unsigned shift;
    744 
    745         if (!sps->page_shift) {
    746             break;
    747         }
    748 
    749         shift = hpte_page_shift(sps, pte0, pte1);
    750         if (shift) {
    751             return shift;
    752         }
    753     }
    754 
    755     return 0;
    756 }
    757 
    758 static bool ppc_hash64_use_vrma(CPUPPCState *env)
    759 {
    760     switch (env->mmu_model) {
    761     case POWERPC_MMU_3_00:
    762         /*
    763          * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
    764          * register no longer exist
    765          */
    766         return true;
    767 
    768     default:
    769         return !!(env->spr[SPR_LPCR] & LPCR_VPM0);
    770     }
    771 }
    772 
    773 static void ppc_hash64_set_isi(CPUState *cs, int mmu_idx, uint64_t error_code)
    774 {
    775     CPUPPCState *env = &POWERPC_CPU(cs)->env;
    776     bool vpm;
    777 
    778     if (!mmuidx_real(mmu_idx)) {
    779         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
    780     } else {
    781         vpm = ppc_hash64_use_vrma(env);
    782     }
    783     if (vpm && !mmuidx_hv(mmu_idx)) {
    784         cs->exception_index = POWERPC_EXCP_HISI;
    785     } else {
    786         cs->exception_index = POWERPC_EXCP_ISI;
    787     }
    788     env->error_code = error_code;
    789 }
    790 
    791 static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t dar, uint64_t dsisr)
    792 {
    793     CPUPPCState *env = &POWERPC_CPU(cs)->env;
    794     bool vpm;
    795 
    796     if (!mmuidx_real(mmu_idx)) {
    797         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
    798     } else {
    799         vpm = ppc_hash64_use_vrma(env);
    800     }
    801     if (vpm && !mmuidx_hv(mmu_idx)) {
    802         cs->exception_index = POWERPC_EXCP_HDSI;
    803         env->spr[SPR_HDAR] = dar;
    804         env->spr[SPR_HDSISR] = dsisr;
    805     } else {
    806         cs->exception_index = POWERPC_EXCP_DSI;
    807         env->spr[SPR_DAR] = dar;
    808         env->spr[SPR_DSISR] = dsisr;
    809    }
    810     env->error_code = 0;
    811 }
    812 
    813 
    814 static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
    815 {
    816     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R;
    817 
    818     if (cpu->vhyp) {
    819         PPCVirtualHypervisorClass *vhc =
    820             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
    821         vhc->hpte_set_r(cpu->vhyp, ptex, pte1);
    822         return;
    823     }
    824     base = ppc_hash64_hpt_base(cpu);
    825 
    826 
    827     /* The HW performs a non-atomic byte update */
    828     stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
    829 }
    830 
    831 static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
    832 {
    833     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C;
    834 
    835     if (cpu->vhyp) {
    836         PPCVirtualHypervisorClass *vhc =
    837             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
    838         vhc->hpte_set_c(cpu->vhyp, ptex, pte1);
    839         return;
    840     }
    841     base = ppc_hash64_hpt_base(cpu);
    842 
    843     /* The HW performs a non-atomic byte update */
    844     stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
    845 }
    846 
    847 static target_ulong rmls_limit(PowerPCCPU *cpu)
    848 {
    849     CPUPPCState *env = &cpu->env;
    850     /*
    851      * In theory the meanings of RMLS values are implementation
    852      * dependent.  In practice, this seems to have been the set from
    853      * POWER4+..POWER8, and RMLS is no longer supported in POWER9.
    854      *
    855      * Unsupported values mean the OS has shot itself in the
    856      * foot. Return a 0-sized RMA in this case, which we expect
    857      * to trigger an immediate DSI or ISI
    858      */
    859     static const target_ulong rma_sizes[16] = {
    860         [0] = 256 * GiB,
    861         [1] = 16 * GiB,
    862         [2] = 1 * GiB,
    863         [3] = 64 * MiB,
    864         [4] = 256 * MiB,
    865         [7] = 128 * MiB,
    866         [8] = 32 * MiB,
    867     };
    868     target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT;
    869 
    870     return rma_sizes[rmls];
    871 }
    872 
    873 static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
    874 {
    875     CPUPPCState *env = &cpu->env;
    876     target_ulong lpcr = env->spr[SPR_LPCR];
    877     uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
    878     target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
    879     int i;
    880 
    881     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
    882         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
    883 
    884         if (!sps->page_shift) {
    885             break;
    886         }
    887 
    888         if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) {
    889             slb->esid = SLB_ESID_V;
    890             slb->vsid = vsid;
    891             slb->sps = sps;
    892             return 0;
    893         }
    894     }
    895 
    896     error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
    897                  TARGET_FMT_lx, lpcr);
    898 
    899     return -1;
    900 }
    901 
    902 bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
    903                       hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
    904                       bool guest_visible)
    905 {
    906     CPUState *cs = CPU(cpu);
    907     CPUPPCState *env = &cpu->env;
    908     ppc_slb_t vrma_slbe;
    909     ppc_slb_t *slb;
    910     unsigned apshift;
    911     hwaddr ptex;
    912     ppc_hash_pte64_t pte;
    913     int exec_prot, pp_prot, amr_prot, prot;
    914     int need_prot;
    915     hwaddr raddr;
    916 
    917     /*
    918      * Note on LPCR usage: 970 uses HID4, but our special variant of
    919      * store_spr copies relevant fields into env->spr[SPR_LPCR].
    920      * Similarly we filter unimplemented bits when storing into LPCR
    921      * depending on the MMU version. This code can thus just use the
    922      * LPCR "as-is".
    923      */
    924 
    925     /* 1. Handle real mode accesses */
    926     if (mmuidx_real(mmu_idx)) {
    927         /*
    928          * Translation is supposedly "off", but in real mode the top 4
    929          * effective address bits are (mostly) ignored
    930          */
    931         raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
    932 
    933         if (cpu->vhyp) {
    934             /*
    935              * In virtual hypervisor mode, there's nothing to do:
    936              *   EA == GPA == qemu guest address
    937              */
    938         } else if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) {
    939             /* In HV mode, add HRMOR if top EA bit is clear */
    940             if (!(eaddr >> 63)) {
    941                 raddr |= env->spr[SPR_HRMOR];
    942             }
    943         } else if (ppc_hash64_use_vrma(env)) {
    944             /* Emulated VRMA mode */
    945             slb = &vrma_slbe;
    946             if (build_vrma_slbe(cpu, slb) != 0) {
    947                 /* Invalid VRMA setup, machine check */
    948                 if (guest_visible) {
    949                     cs->exception_index = POWERPC_EXCP_MCHECK;
    950                     env->error_code = 0;
    951                 }
    952                 return false;
    953             }
    954 
    955             goto skip_slb_search;
    956         } else {
    957             target_ulong limit = rmls_limit(cpu);
    958 
    959             /* Emulated old-style RMO mode, bounds check against RMLS */
    960             if (raddr >= limit) {
    961                 if (!guest_visible) {
    962                     return false;
    963                 }
    964                 switch (access_type) {
    965                 case MMU_INST_FETCH:
    966                     ppc_hash64_set_isi(cs, mmu_idx, SRR1_PROTFAULT);
    967                     break;
    968                 case MMU_DATA_LOAD:
    969                     ppc_hash64_set_dsi(cs, mmu_idx, eaddr, DSISR_PROTFAULT);
    970                     break;
    971                 case MMU_DATA_STORE:
    972                     ppc_hash64_set_dsi(cs, mmu_idx, eaddr,
    973                                        DSISR_PROTFAULT | DSISR_ISSTORE);
    974                     break;
    975                 default:
    976                     g_assert_not_reached();
    977                 }
    978                 return false;
    979             }
    980 
    981             raddr |= env->spr[SPR_RMOR];
    982         }
    983 
    984         *raddrp = raddr;
    985         *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    986         *psizep = TARGET_PAGE_BITS;
    987         return true;
    988     }
    989 
    990     /* 2. Translation is on, so look up the SLB */
    991     slb = slb_lookup(cpu, eaddr);
    992     if (!slb) {
    993         /* No entry found, check if in-memory segment tables are in use */
    994         if (ppc64_use_proc_tbl(cpu)) {
    995             /* TODO - Unsupported */
    996             error_report("Segment Table Support Unimplemented");
    997             exit(1);
    998         }
    999         /* Segment still not found, generate the appropriate interrupt */
   1000         if (!guest_visible) {
   1001             return false;
   1002         }
   1003         switch (access_type) {
   1004         case MMU_INST_FETCH:
   1005             cs->exception_index = POWERPC_EXCP_ISEG;
   1006             env->error_code = 0;
   1007             break;
   1008         case MMU_DATA_LOAD:
   1009         case MMU_DATA_STORE:
   1010             cs->exception_index = POWERPC_EXCP_DSEG;
   1011             env->error_code = 0;
   1012             env->spr[SPR_DAR] = eaddr;
   1013             break;
   1014         default:
   1015             g_assert_not_reached();
   1016         }
   1017         return false;
   1018     }
   1019 
   1020  skip_slb_search:
   1021 
   1022     /* 3. Check for segment level no-execute violation */
   1023     if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) {
   1024         if (guest_visible) {
   1025             ppc_hash64_set_isi(cs, mmu_idx, SRR1_NOEXEC_GUARD);
   1026         }
   1027         return false;
   1028     }
   1029 
   1030     /* 4. Locate the PTE in the hash table */
   1031     ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
   1032     if (ptex == -1) {
   1033         if (!guest_visible) {
   1034             return false;
   1035         }
   1036         switch (access_type) {
   1037         case MMU_INST_FETCH:
   1038             ppc_hash64_set_isi(cs, mmu_idx, SRR1_NOPTE);
   1039             break;
   1040         case MMU_DATA_LOAD:
   1041             ppc_hash64_set_dsi(cs, mmu_idx, eaddr, DSISR_NOPTE);
   1042             break;
   1043         case MMU_DATA_STORE:
   1044             ppc_hash64_set_dsi(cs, mmu_idx, eaddr, DSISR_NOPTE | DSISR_ISSTORE);
   1045             break;
   1046         default:
   1047             g_assert_not_reached();
   1048         }
   1049         return false;
   1050     }
   1051     qemu_log_mask(CPU_LOG_MMU,
   1052                   "found PTE at index %08" HWADDR_PRIx "\n", ptex);
   1053 
   1054     /* 5. Check access permissions */
   1055 
   1056     exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
   1057     pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte);
   1058     amr_prot = ppc_hash64_amr_prot(cpu, pte);
   1059     prot = exec_prot & pp_prot & amr_prot;
   1060 
   1061     need_prot = prot_for_access_type(access_type);
   1062     if (need_prot & ~prot) {
   1063         /* Access right violation */
   1064         qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
   1065         if (!guest_visible) {
   1066             return false;
   1067         }
   1068         if (access_type == MMU_INST_FETCH) {
   1069             int srr1 = 0;
   1070             if (PAGE_EXEC & ~exec_prot) {
   1071                 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
   1072             } else if (PAGE_EXEC & ~pp_prot) {
   1073                 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */
   1074             }
   1075             if (PAGE_EXEC & ~amr_prot) {
   1076                 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
   1077             }
   1078             ppc_hash64_set_isi(cs, mmu_idx, srr1);
   1079         } else {
   1080             int dsisr = 0;
   1081             if (need_prot & ~pp_prot) {
   1082                 dsisr |= DSISR_PROTFAULT;
   1083             }
   1084             if (access_type == MMU_DATA_STORE) {
   1085                 dsisr |= DSISR_ISSTORE;
   1086             }
   1087             if (need_prot & ~amr_prot) {
   1088                 dsisr |= DSISR_AMR;
   1089             }
   1090             ppc_hash64_set_dsi(cs, mmu_idx, eaddr, dsisr);
   1091         }
   1092         return false;
   1093     }
   1094 
   1095     qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
   1096 
   1097     /* 6. Update PTE referenced and changed bits if necessary */
   1098 
   1099     if (!(pte.pte1 & HPTE64_R_R)) {
   1100         ppc_hash64_set_r(cpu, ptex, pte.pte1);
   1101     }
   1102     if (!(pte.pte1 & HPTE64_R_C)) {
   1103         if (access_type == MMU_DATA_STORE) {
   1104             ppc_hash64_set_c(cpu, ptex, pte.pte1);
   1105         } else {
   1106             /*
   1107              * Treat the page as read-only for now, so that a later write
   1108              * will pass through this function again to set the C bit
   1109              */
   1110             prot &= ~PAGE_WRITE;
   1111         }
   1112     }
   1113 
   1114     /* 7. Determine the real address from the PTE */
   1115 
   1116     *raddrp = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
   1117     *protp = prot;
   1118     *psizep = apshift;
   1119     return true;
   1120 }
   1121 
   1122 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
   1123                                target_ulong pte0, target_ulong pte1)
   1124 {
   1125     /*
   1126      * XXX: given the fact that there are too many segments to
   1127      * invalidate, and we still don't have a tlb_flush_mask(env, n,
   1128      * mask) in QEMU, we just invalidate all TLBs
   1129      */
   1130     cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
   1131 }
   1132 
   1133 #ifdef CONFIG_TCG
   1134 void helper_store_lpcr(CPUPPCState *env, target_ulong val)
   1135 {
   1136     PowerPCCPU *cpu = env_archcpu(env);
   1137 
   1138     ppc_store_lpcr(cpu, val);
   1139 }
   1140 #endif
   1141 
   1142 void ppc_hash64_init(PowerPCCPU *cpu)
   1143 {
   1144     CPUPPCState *env = &cpu->env;
   1145     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
   1146 
   1147     if (!pcc->hash64_opts) {
   1148         assert(!mmu_is_64bit(env->mmu_model));
   1149         return;
   1150     }
   1151 
   1152     cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts));
   1153 }
   1154 
   1155 void ppc_hash64_finalize(PowerPCCPU *cpu)
   1156 {
   1157     g_free(cpu->hash64_opts);
   1158 }
   1159 
   1160 const PPCHash64Options ppc_hash64_opts_basic = {
   1161     .flags = 0,
   1162     .slb_size = 64,
   1163     .sps = {
   1164         { .page_shift = 12, /* 4K */
   1165           .slb_enc = 0,
   1166           .enc = { { .page_shift = 12, .pte_enc = 0 } }
   1167         },
   1168         { .page_shift = 24, /* 16M */
   1169           .slb_enc = 0x100,
   1170           .enc = { { .page_shift = 24, .pte_enc = 0 } }
   1171         },
   1172     },
   1173 };
   1174 
   1175 const PPCHash64Options ppc_hash64_opts_POWER7 = {
   1176     .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE,
   1177     .slb_size = 32,
   1178     .sps = {
   1179         {
   1180             .page_shift = 12, /* 4K */
   1181             .slb_enc = 0,
   1182             .enc = { { .page_shift = 12, .pte_enc = 0 },
   1183                      { .page_shift = 16, .pte_enc = 0x7 },
   1184                      { .page_shift = 24, .pte_enc = 0x38 }, },
   1185         },
   1186         {
   1187             .page_shift = 16, /* 64K */
   1188             .slb_enc = SLB_VSID_64K,
   1189             .enc = { { .page_shift = 16, .pte_enc = 0x1 },
   1190                      { .page_shift = 24, .pte_enc = 0x8 }, },
   1191         },
   1192         {
   1193             .page_shift = 24, /* 16M */
   1194             .slb_enc = SLB_VSID_16M,
   1195             .enc = { { .page_shift = 24, .pte_enc = 0 }, },
   1196         },
   1197         {
   1198             .page_shift = 34, /* 16G */
   1199             .slb_enc = SLB_VSID_16G,
   1200             .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
   1201         },
   1202     }
   1203 };
   1204 
   1205