qemu

FORK: QEMU emulator
git clone https://git.neptards.moe/neptards/qemu.git
Log | Files | Refs | Submodules | LICENSE

cpu.h (25606B)


      1 #ifndef SPARC_CPU_H
      2 #define SPARC_CPU_H
      3 
      4 #include "qemu/bswap.h"
      5 #include "cpu-qom.h"
      6 #include "exec/cpu-defs.h"
      7 #include "qemu/cpu-float.h"
      8 
      9 #if !defined(TARGET_SPARC64)
     10 #define TARGET_DPREGS 16
     11 #else
     12 #define TARGET_DPREGS 32
     13 #endif
     14 
     15 /*#define EXCP_INTERRUPT 0x100*/
     16 
     17 /* Windowed register indexes.  */
     18 enum {
     19     WREG_O0,
     20     WREG_O1,
     21     WREG_O2,
     22     WREG_O3,
     23     WREG_O4,
     24     WREG_O5,
     25     WREG_O6,
     26     WREG_O7,
     27 
     28     WREG_L0,
     29     WREG_L1,
     30     WREG_L2,
     31     WREG_L3,
     32     WREG_L4,
     33     WREG_L5,
     34     WREG_L6,
     35     WREG_L7,
     36 
     37     WREG_I0,
     38     WREG_I1,
     39     WREG_I2,
     40     WREG_I3,
     41     WREG_I4,
     42     WREG_I5,
     43     WREG_I6,
     44     WREG_I7,
     45 
     46     WREG_SP = WREG_O6,
     47     WREG_FP = WREG_I6,
     48 };
     49 
     50 /* trap definitions */
     51 #ifndef TARGET_SPARC64
     52 #define TT_TFAULT   0x01
     53 #define TT_ILL_INSN 0x02
     54 #define TT_PRIV_INSN 0x03
     55 #define TT_NFPU_INSN 0x04
     56 #define TT_WIN_OVF  0x05
     57 #define TT_WIN_UNF  0x06
     58 #define TT_UNALIGNED 0x07
     59 #define TT_FP_EXCP  0x08
     60 #define TT_DFAULT   0x09
     61 #define TT_TOVF     0x0a
     62 #define TT_EXTINT   0x10
     63 #define TT_CODE_ACCESS 0x21
     64 #define TT_UNIMP_FLUSH 0x25
     65 #define TT_DATA_ACCESS 0x29
     66 #define TT_DIV_ZERO 0x2a
     67 #define TT_NCP_INSN 0x24
     68 #define TT_TRAP     0x80
     69 #else
     70 #define TT_POWER_ON_RESET 0x01
     71 #define TT_TFAULT   0x08
     72 #define TT_CODE_ACCESS 0x0a
     73 #define TT_ILL_INSN 0x10
     74 #define TT_UNIMP_FLUSH TT_ILL_INSN
     75 #define TT_PRIV_INSN 0x11
     76 #define TT_NFPU_INSN 0x20
     77 #define TT_FP_EXCP  0x21
     78 #define TT_TOVF     0x23
     79 #define TT_CLRWIN   0x24
     80 #define TT_DIV_ZERO 0x28
     81 #define TT_DFAULT   0x30
     82 #define TT_DATA_ACCESS 0x32
     83 #define TT_UNALIGNED 0x34
     84 #define TT_PRIV_ACT 0x37
     85 #define TT_INSN_REAL_TRANSLATION_MISS 0x3e
     86 #define TT_DATA_REAL_TRANSLATION_MISS 0x3f
     87 #define TT_EXTINT   0x40
     88 #define TT_IVEC     0x60
     89 #define TT_TMISS    0x64
     90 #define TT_DMISS    0x68
     91 #define TT_DPROT    0x6c
     92 #define TT_SPILL    0x80
     93 #define TT_FILL     0xc0
     94 #define TT_WOTHER   (1 << 5)
     95 #define TT_TRAP     0x100
     96 #define TT_HTRAP    0x180
     97 #endif
     98 
     99 #define PSR_NEG_SHIFT 23
    100 #define PSR_NEG   (1 << PSR_NEG_SHIFT)
    101 #define PSR_ZERO_SHIFT 22
    102 #define PSR_ZERO  (1 << PSR_ZERO_SHIFT)
    103 #define PSR_OVF_SHIFT 21
    104 #define PSR_OVF   (1 << PSR_OVF_SHIFT)
    105 #define PSR_CARRY_SHIFT 20
    106 #define PSR_CARRY (1 << PSR_CARRY_SHIFT)
    107 #define PSR_ICC   (PSR_NEG|PSR_ZERO|PSR_OVF|PSR_CARRY)
    108 #if !defined(TARGET_SPARC64)
    109 #define PSR_EF    (1<<12)
    110 #define PSR_PIL   0xf00
    111 #define PSR_S     (1<<7)
    112 #define PSR_PS    (1<<6)
    113 #define PSR_ET    (1<<5)
    114 #define PSR_CWP   0x1f
    115 #endif
    116 
    117 #define CC_SRC (env->cc_src)
    118 #define CC_SRC2 (env->cc_src2)
    119 #define CC_DST (env->cc_dst)
    120 #define CC_OP  (env->cc_op)
    121 
    122 /* Even though lazy evaluation of CPU condition codes tends to be less
    123  * important on RISC systems where condition codes are only updated
    124  * when explicitly requested, SPARC uses it to update 32-bit and 64-bit
    125  * condition codes.
    126  */
    127 enum {
    128     CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
    129     CC_OP_FLAGS,   /* all cc are back in status register */
    130     CC_OP_DIV,     /* modify N, Z and V, C = 0*/
    131     CC_OP_ADD,     /* modify all flags, CC_DST = res, CC_SRC = src1 */
    132     CC_OP_ADDX,    /* modify all flags, CC_DST = res, CC_SRC = src1 */
    133     CC_OP_TADD,    /* modify all flags, CC_DST = res, CC_SRC = src1 */
    134     CC_OP_TADDTV,  /* modify all flags except V, CC_DST = res, CC_SRC = src1 */
    135     CC_OP_SUB,     /* modify all flags, CC_DST = res, CC_SRC = src1 */
    136     CC_OP_SUBX,    /* modify all flags, CC_DST = res, CC_SRC = src1 */
    137     CC_OP_TSUB,    /* modify all flags, CC_DST = res, CC_SRC = src1 */
    138     CC_OP_TSUBTV,  /* modify all flags except V, CC_DST = res, CC_SRC = src1 */
    139     CC_OP_LOGIC,   /* modify N and Z, C = V = 0, CC_DST = res */
    140     CC_OP_NB,
    141 };
    142 
    143 /* Trap base register */
    144 #define TBR_BASE_MASK 0xfffff000
    145 
    146 #if defined(TARGET_SPARC64)
    147 #define PS_TCT   (1<<12) /* UA2007, impl.dep. trap on control transfer */
    148 #define PS_IG    (1<<11) /* v9, zero on UA2007 */
    149 #define PS_MG    (1<<10) /* v9, zero on UA2007 */
    150 #define PS_CLE   (1<<9) /* UA2007 */
    151 #define PS_TLE   (1<<8) /* UA2007 */
    152 #define PS_RMO   (1<<7)
    153 #define PS_RED   (1<<5) /* v9, zero on UA2007 */
    154 #define PS_PEF   (1<<4) /* enable fpu */
    155 #define PS_AM    (1<<3) /* address mask */
    156 #define PS_PRIV  (1<<2)
    157 #define PS_IE    (1<<1)
    158 #define PS_AG    (1<<0) /* v9, zero on UA2007 */
    159 
    160 #define FPRS_DL (1 << 0)
    161 #define FPRS_DU (1 << 1)
    162 #define FPRS_FEF (1 << 2)
    163 
    164 #define HS_PRIV  (1<<2)
    165 #endif
    166 
    167 /* Fcc */
    168 #define FSR_RD1        (1ULL << 31)
    169 #define FSR_RD0        (1ULL << 30)
    170 #define FSR_RD_MASK    (FSR_RD1 | FSR_RD0)
    171 #define FSR_RD_NEAREST 0
    172 #define FSR_RD_ZERO    FSR_RD0
    173 #define FSR_RD_POS     FSR_RD1
    174 #define FSR_RD_NEG     (FSR_RD1 | FSR_RD0)
    175 
    176 #define FSR_NVM   (1ULL << 27)
    177 #define FSR_OFM   (1ULL << 26)
    178 #define FSR_UFM   (1ULL << 25)
    179 #define FSR_DZM   (1ULL << 24)
    180 #define FSR_NXM   (1ULL << 23)
    181 #define FSR_TEM_MASK (FSR_NVM | FSR_OFM | FSR_UFM | FSR_DZM | FSR_NXM)
    182 
    183 #define FSR_NVA   (1ULL << 9)
    184 #define FSR_OFA   (1ULL << 8)
    185 #define FSR_UFA   (1ULL << 7)
    186 #define FSR_DZA   (1ULL << 6)
    187 #define FSR_NXA   (1ULL << 5)
    188 #define FSR_AEXC_MASK (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
    189 
    190 #define FSR_NVC   (1ULL << 4)
    191 #define FSR_OFC   (1ULL << 3)
    192 #define FSR_UFC   (1ULL << 2)
    193 #define FSR_DZC   (1ULL << 1)
    194 #define FSR_NXC   (1ULL << 0)
    195 #define FSR_CEXC_MASK (FSR_NVC | FSR_OFC | FSR_UFC | FSR_DZC | FSR_NXC)
    196 
    197 #define FSR_FTT2   (1ULL << 16)
    198 #define FSR_FTT1   (1ULL << 15)
    199 #define FSR_FTT0   (1ULL << 14)
    200 //gcc warns about constant overflow for ~FSR_FTT_MASK
    201 //#define FSR_FTT_MASK (FSR_FTT2 | FSR_FTT1 | FSR_FTT0)
    202 #ifdef TARGET_SPARC64
    203 #define FSR_FTT_NMASK      0xfffffffffffe3fffULL
    204 #define FSR_FTT_CEXC_NMASK 0xfffffffffffe3fe0ULL
    205 #define FSR_LDFSR_OLDMASK  0x0000003f000fc000ULL
    206 #define FSR_LDXFSR_MASK    0x0000003fcfc00fffULL
    207 #define FSR_LDXFSR_OLDMASK 0x00000000000fc000ULL
    208 #else
    209 #define FSR_FTT_NMASK      0xfffe3fffULL
    210 #define FSR_FTT_CEXC_NMASK 0xfffe3fe0ULL
    211 #define FSR_LDFSR_OLDMASK  0x000fc000ULL
    212 #endif
    213 #define FSR_LDFSR_MASK     0xcfc00fffULL
    214 #define FSR_FTT_IEEE_EXCP (1ULL << 14)
    215 #define FSR_FTT_UNIMPFPOP (3ULL << 14)
    216 #define FSR_FTT_SEQ_ERROR (4ULL << 14)
    217 #define FSR_FTT_INVAL_FPR (6ULL << 14)
    218 
    219 #define FSR_FCC1_SHIFT 11
    220 #define FSR_FCC1  (1ULL << FSR_FCC1_SHIFT)
    221 #define FSR_FCC0_SHIFT 10
    222 #define FSR_FCC0  (1ULL << FSR_FCC0_SHIFT)
    223 
    224 /* MMU */
    225 #define MMU_E     (1<<0)
    226 #define MMU_NF    (1<<1)
    227 
    228 #define PTE_ENTRYTYPE_MASK 3
    229 #define PTE_ACCESS_MASK    0x1c
    230 #define PTE_ACCESS_SHIFT   2
    231 #define PTE_PPN_SHIFT      7
    232 #define PTE_ADDR_MASK      0xffffff00
    233 
    234 #define PG_ACCESSED_BIT 5
    235 #define PG_MODIFIED_BIT 6
    236 #define PG_CACHE_BIT    7
    237 
    238 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
    239 #define PG_MODIFIED_MASK (1 << PG_MODIFIED_BIT)
    240 #define PG_CACHE_MASK    (1 << PG_CACHE_BIT)
    241 
    242 /* 3 <= NWINDOWS <= 32. */
    243 #define MIN_NWINDOWS 3
    244 #define MAX_NWINDOWS 32
    245 
    246 #ifdef TARGET_SPARC64
    247 typedef struct trap_state {
    248     uint64_t tpc;
    249     uint64_t tnpc;
    250     uint64_t tstate;
    251     uint32_t tt;
    252 } trap_state;
    253 #endif
    254 #define TARGET_INSN_START_EXTRA_WORDS 1
    255 
    256 struct sparc_def_t {
    257     const char *name;
    258     target_ulong iu_version;
    259     uint32_t fpu_version;
    260     uint32_t mmu_version;
    261     uint32_t mmu_bm;
    262     uint32_t mmu_ctpr_mask;
    263     uint32_t mmu_cxr_mask;
    264     uint32_t mmu_sfsr_mask;
    265     uint32_t mmu_trcr_mask;
    266     uint32_t mxcc_version;
    267     uint32_t features;
    268     uint32_t nwindows;
    269     uint32_t maxtl;
    270 };
    271 
    272 #define CPU_FEATURE_FLOAT        (1 << 0)
    273 #define CPU_FEATURE_FLOAT128     (1 << 1)
    274 #define CPU_FEATURE_SWAP         (1 << 2)
    275 #define CPU_FEATURE_MUL          (1 << 3)
    276 #define CPU_FEATURE_DIV          (1 << 4)
    277 #define CPU_FEATURE_FLUSH        (1 << 5)
    278 #define CPU_FEATURE_FSQRT        (1 << 6)
    279 #define CPU_FEATURE_FMUL         (1 << 7)
    280 #define CPU_FEATURE_VIS1         (1 << 8)
    281 #define CPU_FEATURE_VIS2         (1 << 9)
    282 #define CPU_FEATURE_FSMULD       (1 << 10)
    283 #define CPU_FEATURE_HYPV         (1 << 11)
    284 #define CPU_FEATURE_CMT          (1 << 12)
    285 #define CPU_FEATURE_GL           (1 << 13)
    286 #define CPU_FEATURE_TA0_SHUTDOWN (1 << 14) /* Shutdown on "ta 0x0" */
    287 #define CPU_FEATURE_ASR17        (1 << 15)
    288 #define CPU_FEATURE_CACHE_CTRL   (1 << 16)
    289 #define CPU_FEATURE_POWERDOWN    (1 << 17)
    290 #define CPU_FEATURE_CASA         (1 << 18)
    291 
    292 #ifndef TARGET_SPARC64
    293 #define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP |  \
    294                               CPU_FEATURE_MUL | CPU_FEATURE_DIV |     \
    295                               CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \
    296                               CPU_FEATURE_FMUL | CPU_FEATURE_FSMULD)
    297 #else
    298 #define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP |  \
    299                               CPU_FEATURE_MUL | CPU_FEATURE_DIV |     \
    300                               CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \
    301                               CPU_FEATURE_FMUL | CPU_FEATURE_VIS1 |   \
    302                               CPU_FEATURE_VIS2 | CPU_FEATURE_FSMULD | \
    303                               CPU_FEATURE_CASA)
    304 enum {
    305     mmu_us_12, // Ultrasparc < III (64 entry TLB)
    306     mmu_us_3,  // Ultrasparc III (512 entry TLB)
    307     mmu_us_4,  // Ultrasparc IV (several TLBs, 32 and 256MB pages)
    308     mmu_sun4v, // T1, T2
    309 };
    310 #endif
    311 
    312 #define TTE_VALID_BIT       (1ULL << 63)
    313 #define TTE_NFO_BIT         (1ULL << 60)
    314 #define TTE_IE_BIT          (1ULL << 59)
    315 #define TTE_USED_BIT        (1ULL << 41)
    316 #define TTE_LOCKED_BIT      (1ULL <<  6)
    317 #define TTE_SIDEEFFECT_BIT  (1ULL <<  3)
    318 #define TTE_PRIV_BIT        (1ULL <<  2)
    319 #define TTE_W_OK_BIT        (1ULL <<  1)
    320 #define TTE_GLOBAL_BIT      (1ULL <<  0)
    321 
    322 #define TTE_NFO_BIT_UA2005  (1ULL << 62)
    323 #define TTE_USED_BIT_UA2005 (1ULL << 47)
    324 #define TTE_LOCKED_BIT_UA2005 (1ULL <<  61)
    325 #define TTE_SIDEEFFECT_BIT_UA2005 (1ULL <<  11)
    326 #define TTE_PRIV_BIT_UA2005 (1ULL <<  8)
    327 #define TTE_W_OK_BIT_UA2005 (1ULL <<  6)
    328 
    329 #define TTE_IS_VALID(tte)   ((tte) & TTE_VALID_BIT)
    330 #define TTE_IS_NFO(tte)     ((tte) & TTE_NFO_BIT)
    331 #define TTE_IS_IE(tte)      ((tte) & TTE_IE_BIT)
    332 #define TTE_IS_USED(tte)    ((tte) & TTE_USED_BIT)
    333 #define TTE_IS_LOCKED(tte)  ((tte) & TTE_LOCKED_BIT)
    334 #define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT)
    335 #define TTE_IS_SIDEEFFECT_UA2005(tte) ((tte) & TTE_SIDEEFFECT_BIT_UA2005)
    336 #define TTE_IS_PRIV(tte)    ((tte) & TTE_PRIV_BIT)
    337 #define TTE_IS_W_OK(tte)    ((tte) & TTE_W_OK_BIT)
    338 
    339 #define TTE_IS_NFO_UA2005(tte)     ((tte) & TTE_NFO_BIT_UA2005)
    340 #define TTE_IS_USED_UA2005(tte)    ((tte) & TTE_USED_BIT_UA2005)
    341 #define TTE_IS_LOCKED_UA2005(tte)  ((tte) & TTE_LOCKED_BIT_UA2005)
    342 #define TTE_IS_SIDEEFFECT_UA2005(tte) ((tte) & TTE_SIDEEFFECT_BIT_UA2005)
    343 #define TTE_IS_PRIV_UA2005(tte)    ((tte) & TTE_PRIV_BIT_UA2005)
    344 #define TTE_IS_W_OK_UA2005(tte)    ((tte) & TTE_W_OK_BIT_UA2005)
    345 
    346 #define TTE_IS_GLOBAL(tte)  ((tte) & TTE_GLOBAL_BIT)
    347 
    348 #define TTE_SET_USED(tte)   ((tte) |= TTE_USED_BIT)
    349 #define TTE_SET_UNUSED(tte) ((tte) &= ~TTE_USED_BIT)
    350 
    351 #define TTE_PGSIZE(tte)     (((tte) >> 61) & 3ULL)
    352 #define TTE_PGSIZE_UA2005(tte)     ((tte) & 7ULL)
    353 #define TTE_PA(tte)         ((tte) & 0x1ffffffe000ULL)
    354 
    355 /* UltraSPARC T1 specific */
    356 #define TLB_UST1_IS_REAL_BIT   (1ULL << 9)  /* Real translation entry */
    357 #define TLB_UST1_IS_SUN4V_BIT  (1ULL << 10) /* sun4u/sun4v TTE format switch */
    358 
    359 #define SFSR_NF_BIT         (1ULL << 24)   /* JPS1 NoFault */
    360 #define SFSR_TM_BIT         (1ULL << 15)   /* JPS1 TLB Miss */
    361 #define SFSR_FT_VA_IMMU_BIT (1ULL << 13)   /* USIIi VA out of range (IMMU) */
    362 #define SFSR_FT_VA_DMMU_BIT (1ULL << 12)   /* USIIi VA out of range (DMMU) */
    363 #define SFSR_FT_NFO_BIT     (1ULL << 11)   /* NFO page access */
    364 #define SFSR_FT_ILL_BIT     (1ULL << 10)   /* illegal LDA/STA ASI */
    365 #define SFSR_FT_ATOMIC_BIT  (1ULL <<  9)   /* atomic op on noncacheable area */
    366 #define SFSR_FT_NF_E_BIT    (1ULL <<  8)   /* NF access on side effect area */
    367 #define SFSR_FT_PRIV_BIT    (1ULL <<  7)   /* privilege violation */
    368 #define SFSR_PR_BIT         (1ULL <<  3)   /* privilege mode */
    369 #define SFSR_WRITE_BIT      (1ULL <<  2)   /* write access mode */
    370 #define SFSR_OW_BIT         (1ULL <<  1)   /* status overwritten */
    371 #define SFSR_VALID_BIT      (1ULL <<  0)   /* status valid */
    372 
    373 #define SFSR_ASI_SHIFT      16             /* 23:16 ASI value */
    374 #define SFSR_ASI_MASK       (0xffULL << SFSR_ASI_SHIFT)
    375 #define SFSR_CT_PRIMARY     (0ULL <<  4)   /* 5:4 context type */
    376 #define SFSR_CT_SECONDARY   (1ULL <<  4)
    377 #define SFSR_CT_NUCLEUS     (2ULL <<  4)
    378 #define SFSR_CT_NOTRANS     (3ULL <<  4)
    379 #define SFSR_CT_MASK        (3ULL <<  4)
    380 
    381 /* Leon3 cache control */
    382 
    383 /* Cache control: emulate the behavior of cache control registers but without
    384    any effect on the emulated */
    385 
    386 #define CACHE_STATE_MASK 0x3
    387 #define CACHE_DISABLED   0x0
    388 #define CACHE_FROZEN     0x1
    389 #define CACHE_ENABLED    0x3
    390 
    391 /* Cache Control register fields */
    392 
    393 #define CACHE_CTRL_IF (1 <<  4)  /* Instruction Cache Freeze on Interrupt */
    394 #define CACHE_CTRL_DF (1 <<  5)  /* Data Cache Freeze on Interrupt */
    395 #define CACHE_CTRL_DP (1 << 14)  /* Data cache flush pending */
    396 #define CACHE_CTRL_IP (1 << 15)  /* Instruction cache flush pending */
    397 #define CACHE_CTRL_IB (1 << 16)  /* Instruction burst fetch */
    398 #define CACHE_CTRL_FI (1 << 21)  /* Flush Instruction cache (Write only) */
    399 #define CACHE_CTRL_FD (1 << 22)  /* Flush Data cache (Write only) */
    400 #define CACHE_CTRL_DS (1 << 23)  /* Data cache snoop enable */
    401 
    402 #define CONVERT_BIT(X, SRC, DST) \
    403          (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
    404 
    405 typedef struct SparcTLBEntry {
    406     uint64_t tag;
    407     uint64_t tte;
    408 } SparcTLBEntry;
    409 
    410 struct CPUTimer
    411 {
    412     const char *name;
    413     uint32_t    frequency;
    414     uint32_t    disabled;
    415     uint64_t    disabled_mask;
    416     uint32_t    npt;
    417     uint64_t    npt_mask;
    418     int64_t     clock_offset;
    419     QEMUTimer  *qtimer;
    420 };
    421 
    422 typedef struct CPUTimer CPUTimer;
    423 
    424 typedef struct CPUArchState CPUSPARCState;
    425 #if defined(TARGET_SPARC64)
    426 typedef union {
    427    uint64_t mmuregs[16];
    428    struct {
    429     uint64_t tsb_tag_target;
    430     uint64_t mmu_primary_context;
    431     uint64_t mmu_secondary_context;
    432     uint64_t sfsr;
    433     uint64_t sfar;
    434     uint64_t tsb;
    435     uint64_t tag_access;
    436     uint64_t virtual_watchpoint;
    437     uint64_t physical_watchpoint;
    438     uint64_t sun4v_ctx_config[2];
    439     uint64_t sun4v_tsb_pointers[4];
    440    };
    441 } SparcV9MMU;
    442 #endif
    443 struct CPUArchState {
    444     target_ulong gregs[8]; /* general registers */
    445     target_ulong *regwptr; /* pointer to current register window */
    446     target_ulong pc;       /* program counter */
    447     target_ulong npc;      /* next program counter */
    448     target_ulong y;        /* multiply/divide register */
    449 
    450     /* emulator internal flags handling */
    451     target_ulong cc_src, cc_src2;
    452     target_ulong cc_dst;
    453     uint32_t cc_op;
    454 
    455     target_ulong cond; /* conditional branch result (XXX: save it in a
    456                           temporary register when possible) */
    457 
    458     uint32_t psr;      /* processor state register */
    459     target_ulong fsr;      /* FPU state register */
    460     CPU_DoubleU fpr[TARGET_DPREGS];  /* floating point registers */
    461     uint32_t cwp;      /* index of current register window (extracted
    462                           from PSR) */
    463 #if !defined(TARGET_SPARC64) || defined(TARGET_ABI32)
    464     uint32_t wim;      /* window invalid mask */
    465 #endif
    466     target_ulong tbr;  /* trap base register */
    467 #if !defined(TARGET_SPARC64)
    468     int      psrs;     /* supervisor mode (extracted from PSR) */
    469     int      psrps;    /* previous supervisor mode */
    470     int      psret;    /* enable traps */
    471 #endif
    472     uint32_t psrpil;   /* interrupt blocking level */
    473     uint32_t pil_in;   /* incoming interrupt level bitmap */
    474 #if !defined(TARGET_SPARC64)
    475     int      psref;    /* enable fpu */
    476 #endif
    477     int interrupt_index;
    478     /* NOTE: we allow 8 more registers to handle wrapping */
    479     target_ulong regbase[MAX_NWINDOWS * 16 + 8];
    480 
    481     /* Fields up to this point are cleared by a CPU reset */
    482     struct {} end_reset_fields;
    483 
    484     /* Fields from here on are preserved across CPU reset. */
    485     target_ulong version;
    486     uint32_t nwindows;
    487 
    488     /* MMU regs */
    489 #if defined(TARGET_SPARC64)
    490     uint64_t lsu;
    491 #define DMMU_E 0x8
    492 #define IMMU_E 0x4
    493     SparcV9MMU immu;
    494     SparcV9MMU dmmu;
    495     SparcTLBEntry itlb[64];
    496     SparcTLBEntry dtlb[64];
    497     uint32_t mmu_version;
    498 #else
    499     uint32_t mmuregs[32];
    500     uint64_t mxccdata[4];
    501     uint64_t mxccregs[8];
    502     uint32_t mmubpctrv, mmubpctrc, mmubpctrs;
    503     uint64_t mmubpaction;
    504     uint64_t mmubpregs[4];
    505     uint64_t prom_addr;
    506 #endif
    507     /* temporary float registers */
    508     float128 qt0, qt1;
    509     float_status fp_status;
    510 #if defined(TARGET_SPARC64)
    511 #define MAXTL_MAX 8
    512 #define MAXTL_MASK (MAXTL_MAX - 1)
    513     trap_state ts[MAXTL_MAX];
    514     uint32_t xcc;               /* Extended integer condition codes */
    515     uint32_t asi;
    516     uint32_t pstate;
    517     uint32_t tl;
    518     uint32_t maxtl;
    519     uint32_t cansave, canrestore, otherwin, wstate, cleanwin;
    520     uint64_t agregs[8]; /* alternate general registers */
    521     uint64_t bgregs[8]; /* backup for normal global registers */
    522     uint64_t igregs[8]; /* interrupt general registers */
    523     uint64_t mgregs[8]; /* mmu general registers */
    524     uint64_t glregs[8 * MAXTL_MAX];
    525     uint64_t fprs;
    526     uint64_t tick_cmpr, stick_cmpr;
    527     CPUTimer *tick, *stick;
    528 #define TICK_NPT_MASK        0x8000000000000000ULL
    529 #define TICK_INT_DIS         0x8000000000000000ULL
    530     uint64_t gsr;
    531     uint32_t gl; // UA2005
    532     /* UA 2005 hyperprivileged registers */
    533     uint64_t hpstate, htstate[MAXTL_MAX], hintp, htba, hver, hstick_cmpr, ssr;
    534     uint64_t scratch[8];
    535     CPUTimer *hstick; // UA 2005
    536     /* Interrupt vector registers */
    537     uint64_t ivec_status;
    538     uint64_t ivec_data[3];
    539     uint32_t softint;
    540 #define SOFTINT_TIMER   1
    541 #define SOFTINT_STIMER  (1 << 16)
    542 #define SOFTINT_INTRMASK (0xFFFE)
    543 #define SOFTINT_REG_MASK (SOFTINT_STIMER|SOFTINT_INTRMASK|SOFTINT_TIMER)
    544 #endif
    545     sparc_def_t def;
    546 
    547     void *irq_manager;
    548     void (*qemu_irq_ack)(CPUSPARCState *env, void *irq_manager, int intno);
    549 
    550     /* Leon3 cache control */
    551     uint32_t cache_control;
    552 };
    553 
    554 /**
    555  * SPARCCPU:
    556  * @env: #CPUSPARCState
    557  *
    558  * A SPARC CPU.
    559  */
    560 struct ArchCPU {
    561     /*< private >*/
    562     CPUState parent_obj;
    563     /*< public >*/
    564 
    565     CPUNegativeOffsetState neg;
    566     CPUSPARCState env;
    567 };
    568 
    569 
    570 #ifndef CONFIG_USER_ONLY
    571 extern const VMStateDescription vmstate_sparc_cpu;
    572 #endif
    573 
    574 void sparc_cpu_do_interrupt(CPUState *cpu);
    575 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
    576 int sparc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
    577 int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
    578 G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
    579                                               MMUAccessType access_type,
    580                                               int mmu_idx,
    581                                               uintptr_t retaddr);
    582 G_NORETURN void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t);
    583 
    584 #ifndef NO_CPU_IO_DEFS
    585 /* cpu_init.c */
    586 void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
    587 void sparc_cpu_list(void);
    588 /* mmu_helper.c */
    589 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
    590                         MMUAccessType access_type, int mmu_idx,
    591                         bool probe, uintptr_t retaddr);
    592 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev);
    593 void dump_mmu(CPUSPARCState *env);
    594 
    595 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
    596 int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
    597                               uint8_t *buf, int len, bool is_write);
    598 #endif
    599 
    600 
    601 /* translate.c */
    602 void sparc_tcg_init(void);
    603 void sparc_restore_state_to_opc(CPUState *cs,
    604                                 const TranslationBlock *tb,
    605                                 const uint64_t *data);
    606 
    607 /* cpu-exec.c */
    608 
    609 /* win_helper.c */
    610 target_ulong cpu_get_psr(CPUSPARCState *env1);
    611 void cpu_put_psr(CPUSPARCState *env1, target_ulong val);
    612 void cpu_put_psr_raw(CPUSPARCState *env1, target_ulong val);
    613 #ifdef TARGET_SPARC64
    614 void cpu_change_pstate(CPUSPARCState *env1, uint32_t new_pstate);
    615 void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl);
    616 #endif
    617 int cpu_cwp_inc(CPUSPARCState *env1, int cwp);
    618 int cpu_cwp_dec(CPUSPARCState *env1, int cwp);
    619 void cpu_set_cwp(CPUSPARCState *env1, int new_cwp);
    620 
    621 /* sun4m.c, sun4u.c */
    622 void cpu_check_irqs(CPUSPARCState *env);
    623 
    624 #if defined (TARGET_SPARC64)
    625 
    626 static inline int compare_masked(uint64_t x, uint64_t y, uint64_t mask)
    627 {
    628     return (x & mask) == (y & mask);
    629 }
    630 
    631 #define MMU_CONTEXT_BITS 13
    632 #define MMU_CONTEXT_MASK ((1 << MMU_CONTEXT_BITS) - 1)
    633 
    634 static inline int tlb_compare_context(const SparcTLBEntry *tlb,
    635                                       uint64_t context)
    636 {
    637     return compare_masked(context, tlb->tag, MMU_CONTEXT_MASK);
    638 }
    639 
    640 #endif
    641 #endif
    642 
    643 /* cpu-exec.c */
    644 #if !defined(CONFIG_USER_ONLY)
    645 void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
    646                                      vaddr addr, unsigned size,
    647                                      MMUAccessType access_type,
    648                                      int mmu_idx, MemTxAttrs attrs,
    649                                      MemTxResult response, uintptr_t retaddr);
    650 #if defined(TARGET_SPARC64)
    651 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
    652                                            int mmu_idx);
    653 #endif
    654 #endif
    655 
    656 #define SPARC_CPU_TYPE_SUFFIX "-" TYPE_SPARC_CPU
    657 #define SPARC_CPU_TYPE_NAME(model) model SPARC_CPU_TYPE_SUFFIX
    658 #define CPU_RESOLVING_TYPE TYPE_SPARC_CPU
    659 
    660 #define cpu_list sparc_cpu_list
    661 
    662 /* MMU modes definitions */
    663 #if defined (TARGET_SPARC64)
    664 #define MMU_USER_IDX   0
    665 #define MMU_USER_SECONDARY_IDX   1
    666 #define MMU_KERNEL_IDX 2
    667 #define MMU_KERNEL_SECONDARY_IDX 3
    668 #define MMU_NUCLEUS_IDX 4
    669 #define MMU_PHYS_IDX   5
    670 #else
    671 #define MMU_USER_IDX   0
    672 #define MMU_KERNEL_IDX 1
    673 #define MMU_PHYS_IDX   2
    674 #endif
    675 
    676 #if defined (TARGET_SPARC64)
    677 static inline int cpu_has_hypervisor(CPUSPARCState *env1)
    678 {
    679     return env1->def.features & CPU_FEATURE_HYPV;
    680 }
    681 
    682 static inline int cpu_hypervisor_mode(CPUSPARCState *env1)
    683 {
    684     return cpu_has_hypervisor(env1) && (env1->hpstate & HS_PRIV);
    685 }
    686 
    687 static inline int cpu_supervisor_mode(CPUSPARCState *env1)
    688 {
    689     return env1->pstate & PS_PRIV;
    690 }
    691 #else
    692 static inline int cpu_supervisor_mode(CPUSPARCState *env1)
    693 {
    694     return env1->psrs;
    695 }
    696 #endif
    697 
    698 static inline int cpu_mmu_index(CPUSPARCState *env, bool ifetch)
    699 {
    700 #if defined(CONFIG_USER_ONLY)
    701     return MMU_USER_IDX;
    702 #elif !defined(TARGET_SPARC64)
    703     if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
    704         return MMU_PHYS_IDX;
    705     } else {
    706         return env->psrs;
    707     }
    708 #else
    709     /* IMMU or DMMU disabled.  */
    710     if (ifetch
    711         ? (env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0
    712         : (env->lsu & DMMU_E) == 0) {
    713         return MMU_PHYS_IDX;
    714     } else if (cpu_hypervisor_mode(env)) {
    715         return MMU_PHYS_IDX;
    716     } else if (env->tl > 0) {
    717         return MMU_NUCLEUS_IDX;
    718     } else if (cpu_supervisor_mode(env)) {
    719         return MMU_KERNEL_IDX;
    720     } else {
    721         return MMU_USER_IDX;
    722     }
    723 #endif
    724 }
    725 
    726 static inline int cpu_interrupts_enabled(CPUSPARCState *env1)
    727 {
    728 #if !defined (TARGET_SPARC64)
    729     if (env1->psret != 0)
    730         return 1;
    731 #else
    732     if ((env1->pstate & PS_IE) && !cpu_hypervisor_mode(env1)) {
    733         return 1;
    734     }
    735 #endif
    736 
    737     return 0;
    738 }
    739 
    740 static inline int cpu_pil_allowed(CPUSPARCState *env1, int pil)
    741 {
    742 #if !defined(TARGET_SPARC64)
    743     /* level 15 is non-maskable on sparc v8 */
    744     return pil == 15 || pil > env1->psrpil;
    745 #else
    746     return pil > env1->psrpil;
    747 #endif
    748 }
    749 
    750 #include "exec/cpu-all.h"
    751 
    752 #ifdef TARGET_SPARC64
    753 /* sun4u.c */
    754 void cpu_tick_set_count(CPUTimer *timer, uint64_t count);
    755 uint64_t cpu_tick_get_count(CPUTimer *timer);
    756 void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit);
    757 trap_state* cpu_tsptr(CPUSPARCState* env);
    758 #endif
    759 
    760 #define TB_FLAG_MMU_MASK     7
    761 #define TB_FLAG_FPU_ENABLED  (1 << 4)
    762 #define TB_FLAG_AM_ENABLED   (1 << 5)
    763 #define TB_FLAG_SUPER        (1 << 6)
    764 #define TB_FLAG_HYPER        (1 << 7)
    765 #define TB_FLAG_ASI_SHIFT    24
    766 
    767 static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc,
    768                                         target_ulong *cs_base, uint32_t *pflags)
    769 {
    770     uint32_t flags;
    771     *pc = env->pc;
    772     *cs_base = env->npc;
    773     flags = cpu_mmu_index(env, false);
    774 #ifndef CONFIG_USER_ONLY
    775     if (cpu_supervisor_mode(env)) {
    776         flags |= TB_FLAG_SUPER;
    777     }
    778 #endif
    779 #ifdef TARGET_SPARC64
    780 #ifndef CONFIG_USER_ONLY
    781     if (cpu_hypervisor_mode(env)) {
    782         flags |= TB_FLAG_HYPER;
    783     }
    784 #endif
    785     if (env->pstate & PS_AM) {
    786         flags |= TB_FLAG_AM_ENABLED;
    787     }
    788     if ((env->def.features & CPU_FEATURE_FLOAT)
    789         && (env->pstate & PS_PEF)
    790         && (env->fprs & FPRS_FEF)) {
    791         flags |= TB_FLAG_FPU_ENABLED;
    792     }
    793     flags |= env->asi << TB_FLAG_ASI_SHIFT;
    794 #else
    795     if ((env->def.features & CPU_FEATURE_FLOAT) && env->psref) {
    796         flags |= TB_FLAG_FPU_ENABLED;
    797     }
    798 #endif
    799     *pflags = flags;
    800 }
    801 
    802 static inline bool tb_fpu_enabled(int tb_flags)
    803 {
    804 #if defined(CONFIG_USER_ONLY)
    805     return true;
    806 #else
    807     return tb_flags & TB_FLAG_FPU_ENABLED;
    808 #endif
    809 }
    810 
    811 static inline bool tb_am_enabled(int tb_flags)
    812 {
    813 #ifndef TARGET_SPARC64
    814     return false;
    815 #else
    816     return tb_flags & TB_FLAG_AM_ENABLED;
    817 #endif
    818 }
    819 
    820 #ifdef TARGET_SPARC64
    821 /* win_helper.c */
    822 target_ulong cpu_get_ccr(CPUSPARCState *env1);
    823 void cpu_put_ccr(CPUSPARCState *env1, target_ulong val);
    824 target_ulong cpu_get_cwp64(CPUSPARCState *env1);
    825 void cpu_put_cwp64(CPUSPARCState *env1, int cwp);
    826 
    827 static inline uint64_t sparc64_tstate(CPUSPARCState *env)
    828 {
    829     uint64_t tstate = (cpu_get_ccr(env) << 32) |
    830         ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
    831         cpu_get_cwp64(env);
    832 
    833     if (env->def.features & CPU_FEATURE_GL) {
    834         tstate |= (env->gl & 7ULL) << 40;
    835     }
    836     return tstate;
    837 }
    838 #endif
    839 
    840 #endif