aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBo Hu <bohu@google.com>2015-06-02 22:55:31 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2015-06-02 22:55:31 +0000
commit5f25a532cb29ca9090343b1fda8815f46be9b213 (patch)
tree041ff37fc4f2cdb17ea23353892548ba97e56db2
parentc1b2e02c8b566deaf0a591a48ff7fad66250b356 (diff)
parentb509f134713abba397ef0ef509875001aed33667 (diff)
downloadqemu-android-5f25a532cb29ca9090343b1fda8815f46be9b213.tar.gz
Merge "target-mips: Add TLB Refill exception interpreter" into studio-master-dev
-rw-r--r--target-mips/cpu.h24
-rw-r--r--target-mips/helper.c647
-rw-r--r--target-mips/translate.c732
3 files changed, 916 insertions, 487 deletions
diff --git a/target-mips/cpu.h b/target-mips/cpu.h
index b6494518f4..21c8cfc50e 100644
--- a/target-mips/cpu.h
+++ b/target-mips/cpu.h
@@ -598,6 +598,29 @@ struct CPUMIPSState {
#include "cpu-qom.h"
#if !defined(CONFIG_USER_ONLY)
+
+typedef struct {
+ target_long gpr_reg[32];
+ uint32_t pc;
+ uint32_t branch_addr;
+ target_ulong CP0_EntryLo0;
+ target_ulong CP0_EntryLo1;
+ target_ulong CP0_BadVAddr;
+ target_ulong CP0_EntryHi;
+ target_ulong CP0_Context;
+ target_ulong CP0_XContext;
+ target_ulong CP0_PageMask;
+ target_ulong CP0_PageGrain;
+ target_ulong CP0_PageGrain_rw_bitmask;
+ target_ulong CP0_Cause;
+ int32_t CP0_Index;
+ target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM];
+ int do_tlbwr;
+} CPUInterpreterContext;
+
+extern CPUInterpreterContext CPU;
+
+int tlb_exception_interpreter(CPUMIPSState *env, uint32_t *handler, uint32_t size);
int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
target_ulong address, int rw, int access_type);
int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
@@ -921,5 +944,4 @@ static inline void compute_hflags(CPUMIPSState *env)
}
}
}
-
#endif /* !defined (__MIPS_CPU_H__) */
diff --git a/target-mips/helper.c b/target-mips/helper.c
index e3634d5347..48181f2296 100644
--- a/target-mips/helper.c
+++ b/target-mips/helper.c
@@ -313,447 +313,70 @@ static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
#if !defined(CONFIG_USER_ONLY)
-#define MUST_HAVE_FASTTLB 1
+#define TLB_HANDLER_SIZE (0x40)
-#define KERNEL_64_BIT (1 << 0)
-#define KERNEL_PGD_C0_CONTEXT (1 << 1)
-#define KERNEL_HUGE_TLB (1 << 2)
-#define KERNEL_RIXI (1 << 3)
-#define KERNEL_HIGHMEM (1 << 4)
+typedef enum { PROBE = 0, USEFASTTLB, USESLOWTLB } interpreter_state;
-/* TLB maintenance PTE software flags.
- *
- * Low bits are: CCC D V G RI XI [S H] A W R M(=F) P
- * TLB refill will do a ROTR 7/9 (in case of cpu_has_rixi),
- * or SRL/DSRL 7/9 to strip low bits.
- * PFN size in high bits is 49 or 51 bit --> 512TB or 4*512TB for 4KB pages
- *
- * take a look at <KERNEL>/arch/mips/include/asm/pgtable-bits.h
- */
-#define PTE_PAGE_PRESENT_SHIFT (0)
-#define PTE_PAGE_PRESENT (1 << PTE_PAGE_PRESENT_SHIFT)
-
-#define PTE_PAGE_MODIFIED_SHIFT (PTE_PAGE_PRESENT_SHIFT + 1)
-#define PTE_PAGE_MODIFIED (1 << PTE_PAGE_MODIFIED_SHIFT)
-
-#define PTE_PAGE_FILE (PTE_PAGE_MODIFIED)
-
-#define PTE_PAGE_READ_SHIFT (PTE_PAGE_MODIFIED_SHIFT + 1)
-#define PTE_PAGE_READ (1 << PTE_PAGE_READ_SHIFT)
+typedef struct {
+ interpreter_state state;
+ char name[32];
+ int32_t ebase;
+ uint32_t handler[TLB_HANDLER_SIZE];
+} TLBHandlerInfo;
-#define PTE_PAGE_WRITE_SHIFT (PTE_PAGE_READ_SHIFT + 1)
-#define PTE_PAGE_WRITE (1 << PTE_PAGE_WRITE_SHIFT)
-
-#define PTE_PAGE_ACCESSED_SHIFT (PTE_PAGE_WRITE_SHIFT + 1)
-#define PTE_PAGE_ACCESSED (1 << PTE_PAGE_ACCESSED_SHIFT)
-
-/* Huge TLB support maintenance bits */
-#define PTE_PAGE_HUGE_SHIFT (PTE_PAGE_ACCESSED_SHIFT + 1)
-#define PTE_PAGE_HUGE (1 << PTE_PAGE_HUGE_SHIFT)
-
-#define PTE_PAGE_SPLITTING_SHIFT (PTE_PAGE_HUGE_SHIFT + 1)
-#define PTE_PAGE_SPLITTING (1 << PTE_PAGE_SPLITTING_SHIFT)
+// #define DUMP_HANDLER 1
+#if defined(DUMP_HANDLER)
/*
- * Get the pgd_current from TLB refill handler
- * The kernel refill handler is generated by
- * function build_r4000_tlb_refill_handler.
+ * Print the contents of TLBHandlerInfo buffer holding
+ * holding the TLB exception handler
*/
-typedef void (*pagetable_walk_t)(CPUState *cs,
- target_ulong pgd_addr, target_ulong vaddr,
- target_ulong *entrylo0, target_ulong *entrylo1,
- target_ulong *sw_pte_lo0, target_ulong *sw_pte_lo1);
-static struct {
- enum {PROBE, USEFASTTLB, USESLOWTLB} state;
- uint32_t config;
- pagetable_walk_t pagetable_walk;
- target_ulong pgd_current_p;
- target_ulong swapper_pg_dir;
- int softshift;
-} linux_pte_info = {0};
-
-static inline target_ulong cpu_debug_translate_address(CPUMIPSState *env, target_ulong address) {
-
-#if defined(TARGET_MIPS64)
- if (!(linux_pte_info.config & KERNEL_64_BIT))
- address = (int32_t)address;
-#endif
-
- if (address <= USEG_LIMIT) {
- /* useg */
- if (env->CP0_Status & (1 << CP0St_ERL)) {
- return address & 0xFFFFFFFF;
- } else {
- return address;
- }
-#if defined(TARGET_MIPS64)
- } else if (address < 0x4000000000000000ULL) {
- return address;
- } else if (address < 0x8000000000000000ULL) {
- return address;
- } else if (address < 0xC000000000000000ULL) {
- /* xkphys */
- if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) {
- return address & env->PAMask;
- } else {
- return address;
- }
- } else if (address < 0xFFFFFFFF80000000ULL) {
- /* xkseg */
- return address;
-#endif
- } else if (address < (int32_t)KSEG1_BASE) {
- /* kseg0 */
- return address - (int32_t)KSEG0_BASE;
- } else if (address < (int32_t)KSEG2_BASE) {
- /* kseg1 */
- return address - (int32_t)KSEG1_BASE;
- } else
- return address;
-}
-
-static inline target_ulong get_mtc0_entrylo_mask(const CPUMIPSState *env)
-{
-#if defined(TARGET_MIPS64)
- return env->PAMask >> 6;
-#else
- return (env->PAMask >> 6) & 0x3FFFFFFF;
-#endif
-}
-
-static inline void pagetable_walk32(CPUState *cs,
- target_ulong pgd_addr, target_ulong vaddr,
- target_ulong *entrylo0, target_ulong *entrylo1,
- target_ulong *sw_pte_lo0, target_ulong *sw_pte_lo1)
+static inline void dump_handler(TLBHandlerInfo *handler_info)
{
- target_ulong ptw_phys, pt_addr, index;
- MIPSCPU *cpu = MIPS_CPU(cs);
- CPUMIPSState *env = &cpu->env;
-
-#if defined(TARGET_MIPS64)
- /* workaround when running a 32bit
- * emulation with the 64bit target emulator
- */
- vaddr = (uint32_t)vaddr;
-#endif
-
- /* 32bit PTE lookup */
- ptw_phys = cpu_debug_translate_address(env, pgd_addr);
- index = (vaddr >> 22) << 2; /* Use bits 31..22 to index pgd */
- ptw_phys += index;
-
- pt_addr = ldl_phys(cs->as, ptw_phys);
-
- ptw_phys = cpu_debug_translate_address(env, pt_addr);
- index = ((vaddr >> 13) & 0x1ff) << 3; /* Use bits 21..13 to index pgt */
- ptw_phys += index;
-
- /* Get the entrylo values from pgt */
- if (linux_pte_info.config & KERNEL_RIXI) {
- target_ulong mask = ~(-1 << linux_pte_info.softshift);
-
- *entrylo0 = ldl_phys(cs->as, ptw_phys);
- if (sw_pte_lo0) {
- if (linux_pte_info.config & KERNEL_HIGHMEM)
- *sw_pte_lo0 = *entrylo0 & ~(-1 << (linux_pte_info.softshift + 4));
- else
- *sw_pte_lo0 = *entrylo0 & ~(-1 << (linux_pte_info.softshift));
- }
- if (linux_pte_info.config & KERNEL_HIGHMEM)
- *entrylo0 = (*entrylo0) >> 4;
- *entrylo0 = ((*entrylo0 & mask) << (32 - linux_pte_info.softshift)) |
- ((uint32_t)*entrylo0 >> linux_pte_info.softshift);
- *entrylo0 = (*entrylo0 & get_mtc0_entrylo_mask(env)) |
- ((*entrylo0 & (env->CP0_PageGrain & (3u << CP0PG_XIE))) <<
- (CP0EnLo_XI - 30));
-
- *entrylo1 = ldl_phys(cs->as, ptw_phys + 4);
- if (sw_pte_lo1) {
- if (linux_pte_info.config & KERNEL_HIGHMEM)
- *sw_pte_lo1 = *entrylo1 & ~(-1 << (linux_pte_info.softshift + 4));
- else
- *sw_pte_lo1 = *entrylo1 & ~(-1 << (linux_pte_info.softshift));
- }
- if (linux_pte_info.config & KERNEL_HIGHMEM)
- *entrylo1 = (*entrylo1) >> 4;
- *entrylo1 = ((*entrylo1 & mask) << (32 - linux_pte_info.softshift)) |
- ((uint32_t)*entrylo1 >> linux_pte_info.softshift);
- *entrylo1 = (*entrylo1 & get_mtc0_entrylo_mask(env)) |
- ((*entrylo1 & (env->CP0_PageGrain & (3u << CP0PG_XIE))) <<
- (CP0EnLo_XI - 30));
- } else {
- *entrylo0 = ldl_phys(cs->as, ptw_phys);
- if (sw_pte_lo0) {
- *sw_pte_lo0 = *entrylo0 & ((target_ulong)(1 << linux_pte_info.softshift) - 1);
- }
- *entrylo0 >>= linux_pte_info.softshift;
-
- *entrylo1 = ldl_phys(cs->as, ptw_phys + 4);
- if (sw_pte_lo1) {
- *sw_pte_lo1 = *entrylo1 & ((target_ulong)(1 << linux_pte_info.softshift) - 1);
- }
- *entrylo1 >>= linux_pte_info.softshift;
+ int i;
+ fprintf(stderr, "\n===========================================\n");
+ fprintf(stderr, "%s exception handler\n", handler_info->name);
+ fprintf(stderr, "===========================================\n");
+ for (i = 0; i < TLB_HANDLER_SIZE; i++) {
+ fprintf(stderr, "0x" TARGET_FMT_lx " %08x\n",
+ (target_ulong)(handler_info->ebase + (i << 2)),
+ handler_info->handler[i]);
}
+ fprintf(stderr, "===========================================\n\n");
}
-
-static inline void pagetable_walk64(CPUState *cs,
- target_ulong pgd_addr, target_ulong vaddr,
- target_ulong *entrylo0, target_ulong *entrylo1,
- target_ulong *sw_pte_lo0, target_ulong *sw_pte_lo1)
+#else
+static inline void dump_handler(TLBHandlerInfo *handler_info)
{
- MIPSCPU *cpu = MIPS_CPU(cs);
- CPUMIPSState *env = &cpu->env;
- target_ulong ptw_phys, pt_addr, index;
-
- pgd_addr = cpu_debug_translate_address(env, pgd_addr);
- index = ((uint64_t)vaddr >> 0x1b) & 0x1ff8;
- pgd_addr += index;
-
- pgd_addr = ldq_phys(cs->as, pgd_addr);
-
- ptw_phys = cpu_debug_translate_address(env, pgd_addr);
- index = ((uint64_t)vaddr >> 0x12) & 0xff8;
- ptw_phys += index;
-
- pt_addr = ldq_phys(cs->as, ptw_phys);
-
- ptw_phys = cpu_debug_translate_address(env, pt_addr);
- index = (((vaddr & 0xC00000000000ULL) >> (55 - env->SEGBITS)) |
- ((vaddr & ((1ULL << env->SEGBITS) - 1) & 0xFFFFFFFFFFFFE000ULL) >> 9)) & 0xff0;
- ptw_phys += index;
-
- if (linux_pte_info.config & KERNEL_RIXI) {
- target_ulong mask = ~(-1 << linux_pte_info.softshift);
-
- *entrylo0 = ldq_phys(cs->as, ptw_phys);
- if (sw_pte_lo0) {
- *sw_pte_lo0 = *entrylo0 & ((target_ulong)(1 << linux_pte_info.softshift) - 1);
- }
- *entrylo0 = ((*entrylo0 & mask) << (64 - linux_pte_info.softshift)) |
- ((uint64_t)*entrylo0 >> linux_pte_info.softshift);
- *entrylo0 = (*entrylo0 & get_mtc0_entrylo_mask(env)) |
- (*entrylo0 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32));
-
- *entrylo1 = ldq_phys(cs->as, ptw_phys + 8);
- if (sw_pte_lo1) {
- *sw_pte_lo1 = *entrylo1 & ((target_ulong)(1 << linux_pte_info.softshift) - 1);
- }
- *entrylo1 = ((*entrylo1 & mask) << (64 - linux_pte_info.softshift)) |
- ((uint64_t)*entrylo1 >> linux_pte_info.softshift);
- *entrylo1 = (*entrylo1 & get_mtc0_entrylo_mask(env)) |
- (*entrylo1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32));
- } else {
- /* Get the entrylo values from pgt */
- *entrylo0 = ldq_phys(cs->as, ptw_phys);
- if (sw_pte_lo0) {
- *sw_pte_lo0 = *entrylo0 & ((target_ulong)(1 << linux_pte_info.softshift) - 1);
- }
- *entrylo0 >>= linux_pte_info.softshift;
-
- *entrylo1 = ldq_phys(cs->as, ptw_phys + 8);
- if (sw_pte_lo1) {
- *sw_pte_lo1 = *entrylo1 & ((target_ulong)(1 << linux_pte_info.softshift) - 1);
- }
- *entrylo1 >>= linux_pte_info.softshift;
- }
+ ;
}
-
-static inline target_ulong cpu_mips_get_pgd(CPUState *cs, target_long bad_vaddr)
-{
- MIPSCPU *cpu = MIPS_CPU(cs);
- CPUMIPSState *env = &cpu->env;
-
- if (unlikely(linux_pte_info.state == PROBE)) {
- int i;
- uint32_t lui_ins, lw_ins, srl_ins, config;
- target_ulong address;
- uint32_t ebase;
-
- /*
- * The exact TLB refill code varies depeing on the kernel version
- * and configuration. Examins the TLB handler to extract
- * pgd_current_p and the shift required to convert in memory PTE
- * to TLB format
- */
- static struct {
- uint32_t config;
- struct {
- uint32_t off;
- uint32_t op;
- uint32_t mask;
- } lui, lw, srl;
- } handlers[] = {
- /* 3.10+ 32-bit R6 Kernel */
- {
- KERNEL_RIXI,
- {0x00, 0x3c1b0000, 0xffff0000}, /* 0x3c1b803f : lui k1,%hi(pgd_current_p) */
- {0x08, 0x8f7b0000, 0xffff0000}, /* 0x8f7b3000 : lw k1,%lo(k1) */
- {0x2c, 0x003ad002, 0xfffff83f} /* 0x003ad082 : ror k0,k0,#shift */
- },
- /* 3.10+ 32-bit R6 Kernel */
- {
- KERNEL_RIXI | KERNEL_HIGHMEM,
- {0x00, 0x3c1b0000, 0xffff0000}, /* 0x3c1b803f : lui k1,%hi(pgd_current_p) */
- {0x08, 0x8f7b0000, 0xffff0000}, /* 0x8f7b3000 : lw k1,%lo(k1) */
- {0x34, 0x003ad002, 0xfffff83f} /* 0x003ad082 : ror k0,k0,#shift */
- },
- /* 3.10+ 32-bit R6 Kernel */
- {
- KERNEL_RIXI | KERNEL_HIGHMEM,
- {0x00, 0x3c1b0000, 0xffff0000}, /* 0x3c1b803f : lui k1,%hi(pgd_current_p) */
- {0x08, 0x8f7b0000, 0xffff0000}, /* 0x8f7b3000 : lw k1,%lo(k1) */
- {0x38, 0x003ad002, 0xfffff83f} /* 0x003ad082 : ror k0,k0,#shift */
- },
- /* 3.10+ 32-bit R2 Kernel */
- {
- 0,
- {0x00, 0x3c1b0000, 0xffff0000}, /* 0x3c1b803f : lui k1,%hi(pgd_current_p) */
- {0x08, 0x8f7b0000, 0xffff0000}, /* 0x8f7b3000 : lw k1,%lo(k1) */
- {0x2c, 0x001ad002, 0xfffff83f} /* 0x001ad002 : srl k0,k0,#shift */
- },
- /* 3.10+ 64-bit R2 Kernel */
- {
- KERNEL_64_BIT | KERNEL_PGD_C0_CONTEXT,
- {0x04, 0x3c1b0000, 0xffff0000}, /* 0x3c1b0000 : lui k1,%hi(swapper_pg_dir) */
- {0xac, 0xdf7b0000, 0xffff0000}, /* 0xdf7b0000 : ld k1,0(k1) */
- {0xd4, 0x001ad03a, 0xfffff83f} /* 0x001ad03a : dsrl k0,k0,#shift */
- },
- /* 3.10+ 64-bit R6 Kernel */
- {
- KERNEL_64_BIT | KERNEL_RIXI,
- {0x8c, 0x3c1b0000, 0xffff0000}, /* 0x3c1b0000 : lui k1,%hi(pgd_current_p) */
- {0x90, 0xdf7b0000, 0xffff0000}, /* 0xdf7b0000 : ld k1,%lo(k1) */
- {0xcc, 0x003ad03a, 0xfffff83f} /* 0x003ad03a : dror k0,k0,#shift */
- }
- };
-
- ebase = env->CP0_EBase - 0x80000000;
- linux_pte_info.config = 0;
-
- /* Match the kernel TLB refill exception handler against known code */
- for (i = 0; i < sizeof(handlers)/sizeof(handlers[0]); i++) {
- config = handlers[i].config;
- lui_ins = ldl_phys(cs->as, ebase + handlers[i].lui.off);
- lw_ins = ldl_phys(cs->as, ebase + handlers[i].lw.off);
- srl_ins = ldl_phys(cs->as, ebase + handlers[i].srl.off);
- if (((lui_ins & handlers[i].lui.mask) == handlers[i].lui.op) &&
- ((lw_ins & handlers[i].lw.mask) == handlers[i].lw.op) &&
- ((srl_ins & handlers[i].srl.mask) == handlers[i].srl.op))
- break;
- }
- if (i >= sizeof(handlers)/sizeof(handlers[0])) {
-#if defined(MUST_HAVE_FASTTLB)
- printf("TLBMiss handler dump:\n");
- for (i = 0; i < 0x100; i+= 4)
- printf("0x%08x: 0x%08x\n", ebase + i, ldl_phys(cs->as, ebase + i));
- fprintf(stderr, "TLBMiss handler signature not recognised\n");
- exit(1);
#endif
- fprintf(stderr, "TLBMiss handler signature not recognised, using slowpath\n");
- linux_pte_info.state = USESLOWTLB;
- linux_pte_info.pagetable_walk = NULL;
- goto done;
- }
- linux_pte_info.config = config;
+static TLBHandlerInfo tlb_refill_info = { PROBE, "Refill" };
- if (config & KERNEL_64_BIT) {
- linux_pte_info.pagetable_walk = &pagetable_walk64;
- } else {
- linux_pte_info.pagetable_walk = &pagetable_walk32;
- }
-
- /* swapper_pg_dir address */
- address = (int32_t)((lui_ins & 0xffff) << 16);
- linux_pte_info.swapper_pg_dir = cpu_debug_translate_address(env, address);
-
- if (!(config & KERNEL_PGD_C0_CONTEXT)) {
- address += (((int32_t)(lw_ins & 0xffff)) << 16) >> 16;
- address = cpu_debug_translate_address(env, address);
- }
-
- linux_pte_info.state = USEFASTTLB;
- linux_pte_info.pgd_current_p = address;
- linux_pte_info.softshift = (srl_ins >> 6) & 0x1f;
- }
+/*
+ * This function should save the TLB Refill handler to a
+ * tlb_refill_info.handler buffer which will later
+ * be used for the Interpreter
+ */
+static inline void tlb_refill_exception_prepare(CPUMIPSState *env)
+{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ int i;
-done:
- /* Get pgd_current */
- if (linux_pte_info.state == USEFASTTLB) {
- if (linux_pte_info.config & KERNEL_64_BIT) {
- target_ulong address = 0;
- /*
- * The kernel currently implicitely assumes that the
- * MIPS SEGBITS parameter for the processor is
- * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
- * allocate virtual addresses outside the maximum
- * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
- * that doesn't prevent user code from accessing the
- * higher xuseg addresses. Here, we make sure that
- * everything but the lower xuseg addresses goes down
- * the module_alloc/vmalloc path.
- */
- address = ((uint64_t)bad_vaddr) >> 40;
- if (likely(!address)) {
- if (linux_pte_info.config & KERNEL_PGD_C0_CONTEXT) {
- /*
- * &pgd << 11 stored in CONTEXT [23..63].
- */
- address = env->CP0_Context;
- address = ((uint64_t)address >> 23) << 23;
- /* 1 0 1 0 1 << 6 xkphys cached */
- address |= 0x540;
- /* dror k1,k1,0xb */
- address = ((uint64_t)address >> 11) |
- (((uint64_t)address & 0x7ff) << 53);
- return address;
- } else {
- return ldl_phys(cs->as, linux_pte_info.pgd_current_p);
- }
- } else if (bad_vaddr < 0) {
- /* swapper_pg_dir address */
- return linux_pte_info.swapper_pg_dir;
- } else {
- /*
- * We get here if we are an xsseg address, or if we are
- * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
- *
- * Ignoring xsseg (assume disabled so would generate
- * (address errors?), the only remaining possibility
- * is the upper xuseg addresses. On processors with
- * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
- * addresses would have taken an address error. We try
- * to mimic that here by taking a load/istream page
- * fault.
- */
- return 0; /* fallback to software handler and do page fault */
- }
- } else {
- return ldl_phys(cs->as, linux_pte_info.pgd_current_p);
+ if (unlikely(tlb_refill_info.state == PROBE)) {
+ tlb_refill_info.ebase = env->CP0_EBase;
+ for (i = 0; i < TLB_HANDLER_SIZE; i++) {
+ tlb_refill_info.handler[i] = ldl_phys(cs->as, tlb_refill_info.ebase - 0x80000000 + (i << 2));
+ CPU.do_tlbwr = 1;
}
+ tlb_refill_info.state = USEFASTTLB;
+ dump_handler(&tlb_refill_info);
}
- return 0;
}
-static inline int cpu_mips_tlb_refill(CPUState *cs, target_ulong address, int rw,
- int mmu_idx, int is_softmmu)
+static inline void tlb_exception_interpreter_prepare(CPUMIPSState *env, target_ulong address, int exception)
{
- MIPSCPU *cpu = MIPS_CPU(cs);
- CPUMIPSState *env = &cpu->env;
-
- int32_t saved_hflags;
- target_ulong saved_badvaddr,saved_entryhi,saved_context,saved_xcontext;
- target_ulong pgd_addr;
- target_ulong fault_addr;
- target_ulong entrylo0, entrylo1;
- int ret;
-
- saved_badvaddr = env->CP0_BadVAddr;
- saved_context = env->CP0_Context;
- saved_xcontext = env->CP0_XContext;
- saved_entryhi = env->CP0_EntryHi;
- saved_hflags = env->hflags;
-
env->CP0_BadVAddr = address;
env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
((address >> 9) & 0x007ffff0);
@@ -766,50 +389,58 @@ static inline int cpu_mips_tlb_refill(CPUState *cs, target_ulong address, int rw
((address & ((1ULL << env->SEGBITS) - 1) & 0xFFFFFFFFFFFFE000ULL) >> 9);
#endif
- pgd_addr = 0;
- pgd_addr = cpu_mips_get_pgd(cs, address);
-
- /* if pgd_addr is unknown return TLBRET_NOMATCH
- * to allow software handler to run
- */
- if (unlikely(pgd_addr == 0)) {
- ret = TLBRET_NOMATCH;
- goto out;
- }
+ CPU.pc = 0;
+#if defined(TARGET_MIPS64)
+ int R = env->CP0_BadVAddr >> 62;
+ int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
+ int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
+ int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
- env->hflags = MIPS_HFLAG_KM;
- fault_addr = env->CP0_BadVAddr;
+ if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) &&
+ (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F))))
+ CPU.pc = 0x080 / 4;
+#endif
- linux_pte_info.pagetable_walk(cs, pgd_addr, fault_addr, &entrylo0, &entrylo1, NULL, NULL);
+ if (likely(tlb_refill_info.state != PROBE))
+ return;
- /* Refill the TLB */
- env->CP0_EntryLo0 = entrylo0;
- env->CP0_EntryLo1 = entrylo1;
- r4k_helper_tlbwr(env);
+ switch (exception) {
+ case EXCP_TLBF:
+ tlb_refill_exception_prepare(env);
+ break;
+ default:
+ fprintf(stderr, "%s : Unexpected TLB exception %d\n", __func__, exception);
+ exit(1);
+ }
+}
- /* Since we know the TLB contents, we can
- * return the TLB lookup value here.
- */
+/*
+ * Once the page walk is done by the Interpreter,
+ * this function is responsible for verifying
+ * the TLB Entry. The performance boost comes from this
+ * function actually, because it prevents raising new
+ * exception if it goes through all check and returns TLBRET_MATCH.
+ */
+static inline int tlb_exception_return(CPUState *cs, target_ulong address, int rw,
+ int mmu_idx)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
target_ulong mask = env->CP0_PageMask | ~(TARGET_PAGE_MASK << 1);
- target_ulong lo = (address & mask & ~(mask >> 1)) ? entrylo1 : entrylo0;
+ target_ulong lo = (address & mask & ~(mask >> 1)) ? env->CP0_EntryLo1 : env->CP0_EntryLo0;
uint16_t RI = (lo >> CP0EnLo_RI) & 1;
uint16_t XI = (lo >> CP0EnLo_XI) & 1;
- if (rw == MMU_INST_FETCH && (XI)) {
- ret = TLBRET_XI;
- goto out;
- }
- if (rw == MMU_DATA_LOAD && (RI)) {
- ret = TLBRET_RI;
- goto out;
- }
+ if (rw == MMU_INST_FETCH && (XI))
+ return TLBRET_XI;
+
+ if (rw == MMU_DATA_LOAD && (RI))
+ return TLBRET_RI;
/* Is the TLB entry valid? */
- if ((lo & (1 << CP0EnLo_V)) == 0) {
- ret = TLBRET_INVALID;
- goto out;
- }
+ if ((lo & (1 << CP0EnLo_V)) == 0)
+ return TLBRET_INVALID;
/* Is this a read access or a write to a modifiable page? */
if (rw != MMU_DATA_STORE || (lo & (1 << CP0EnLo_D))) {
@@ -822,18 +453,54 @@ static inline int cpu_mips_tlb_refill(CPUState *cs, target_ulong address, int rw
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
- ret = TLBRET_MATCH;
- goto out;
+ return TLBRET_MATCH;
}
- ret = TLBRET_DIRTY;
+ return TLBRET_DIRTY;
+}
+
+/*
+ * This function routes Refill exceptions through the TLB exception Interpreter
+ * and all others are handled by the kernel.
+ */
+static inline int do_tlb_refill(CPUState *cs, vaddr address, int rw, int mmu_idx)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int tlbret = TLBRET_NOMATCH;
+
+ target_ulong saved_badvaddr;
+ target_ulong saved_entryhi;
+ target_ulong saved_context;
+ target_ulong saved_xcontext;
+ target_ulong saved_pagemask;
+ target_ulong saved_pagegrain;
+
+ saved_badvaddr = env->CP0_BadVAddr;
+ saved_context = env->CP0_Context;
+ saved_xcontext = env->CP0_XContext;
+ saved_entryhi = env->CP0_EntryHi;
+ saved_pagemask = env->CP0_PageMask;
+ saved_pagegrain = env->CP0_PageGrain;
+
+ if (unlikely(tlb_refill_info.state == USESLOWTLB))
+ goto out;
+
+ tlb_exception_interpreter_prepare(env, address, EXCP_TLBF);
+
+ if (tlb_exception_interpreter(env, tlb_refill_info.handler, TLB_HANDLER_SIZE))
+ goto out;
+
+ tlbret = tlb_exception_return(cs, address, rw, mmu_idx);
out:
env->CP0_BadVAddr = saved_badvaddr;
env->CP0_Context = saved_context;
env->CP0_XContext = saved_xcontext;
env->CP0_EntryHi = saved_entryhi;
- env->hflags = saved_hflags;
- return ret;
+ env->CP0_PageGrain = saved_pagegrain;
+ env->CP0_PageMask = saved_pagemask;
+
+ return tlbret;
}
hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
@@ -843,37 +510,45 @@ hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
hwaddr phys_addr;
int prot, ret;
-#if defined(TARGET_MIPS64)
- if (!(linux_pte_info.config & KERNEL_64_BIT))
- addr = (int32_t)addr;
-#endif
-
ret = get_physical_address(env, &phys_addr, &prot, addr, 0, ACCESS_INT);
if (ret != TLBRET_MATCH) {
- target_ulong pgd_addr = cpu_mips_get_pgd(cs, addr);
-
- phys_addr = -1;
- if (likely(pgd_addr)) {
- target_ulong entrylo0, entrylo1;
- target_ulong sw_pte_lo0, sw_pte_lo1;
-
- linux_pte_info.pagetable_walk(cs, pgd_addr, addr,
- &entrylo0, &entrylo1,
- &sw_pte_lo0, &sw_pte_lo1);
-
- target_ulong mask = env->CP0_PageMask | ~(TARGET_PAGE_MASK << 1);
- target_ulong lo = (addr & mask & ~(mask >> 1)) ? entrylo1 : entrylo0;
- target_ulong sw_pte = (addr & mask & ~(mask >> 1)) ? sw_pte_lo1 : sw_pte_lo0;
-
- if (sw_pte & PTE_PAGE_PRESENT) {
- phys_addr = ((lo >> CP0EnLo_PFN) << 12) | (addr & (mask >> 1));
+ target_ulong saved_badvaddr;
+ target_ulong saved_entryhi;
+ target_ulong saved_context;
+ target_ulong saved_xcontext;
+ target_ulong saved_pagemask;
+ target_ulong saved_pagegrain;
+
+ saved_pagemask = env->CP0_PageMask;
+ saved_pagegrain = env->CP0_PageGrain;
+ saved_badvaddr = env->CP0_BadVAddr;
+ saved_context = env->CP0_Context;
+ saved_xcontext = env->CP0_XContext;
+ saved_entryhi = env->CP0_EntryHi;
+
+ tlb_exception_interpreter_prepare(env, addr, EXCP_TLBF);
+
+ if (likely(tlb_refill_info.state == USEFASTTLB)) {
+ CPU.do_tlbwr = 0;
+ if (tlb_exception_interpreter(env, tlb_refill_info.handler, TLB_HANDLER_SIZE) != 0) {
+ phys_addr = -1;
+ fprintf(stderr, "cpu_get_phys_page_debug() fails for vaddr: 0x" TARGET_FMT_plx "\n", addr);
} else {
- qemu_log("cpu_get_phys_page_debug() invalid mapping for vaddr: 0x" TARGET_FMT_plx "\n", addr);
+ target_ulong mask = env->CP0_PageMask | ~(TARGET_PAGE_MASK << 1);
+ target_ulong lo = (addr & mask & ~(mask >> 1)) ? CPU.CP0_EntryLo1 : CPU.CP0_EntryLo0;
+ phys_addr = ((lo >> CP0EnLo_PFN) << 12) | (addr & (mask >> 1));
}
- } else {
- qemu_log("cpu_get_phys_page_debug() fails for vaddr: 0x" TARGET_FMT_plx "\n", addr);
+ CPU.do_tlbwr = 1;
}
+
+ env->CP0_BadVAddr = saved_badvaddr;
+ env->CP0_Context = saved_context;
+ env->CP0_XContext = saved_xcontext;
+ env->CP0_EntryHi = saved_entryhi;
+ env->CP0_PageGrain = saved_pagegrain;
+ env->CP0_PageMask = saved_pagemask;
}
+
return phys_addr;
}
#endif
@@ -912,15 +587,15 @@ int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
mmu_idx, TARGET_PAGE_SIZE);
ret = 0;
} else if (ret == TLBRET_NOMATCH)
- ret = cpu_mips_tlb_refill(cs, address, rw, mmu_idx, 1);
+ ret = do_tlb_refill(cs, address, rw, mmu_idx);
if (ret < 0)
#endif
+
{
raise_mmu_exception(env, address, rw, ret);
ret = 1;
}
-
return ret;
}
diff --git a/target-mips/translate.c b/target-mips/translate.c
index 84bdaba1fb..b4e49de2db 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -1348,6 +1348,738 @@ enum {
OPC_BINSRI_df = (0x7 << 23) | OPC_MSA_BIT_09,
};
+#if !defined(CONFIG_USER_ONLY)
+
+// #define DEBUG_INTERPRETER 1
+
+#if defined(DEBUG_INTERPRETER)
+#define DEBUG_DISSAS(opc) do { \
+ int i; \
+ fprintf(stderr, "%s:%d\t%s pc %u op 0x%x rs %d rt %d rd %d sa %d imm 0x%x\n", \
+ __func__, __LINE__, opc, CPU.pc, op, rs, rt, rd, sa, (uint32_t)imm);\
+ for (i = 0; i < 32; i++) { \
+ fprintf(stderr, "$%d = 0x" TARGET_FMT_lx " ", i, CPU.gpr_reg[i]); \
+ if ((i % 4) == 3) fprintf(stderr, "\n"); \
+ } \
+ fprintf(stderr, "\n"); \
+} while (0)
+#else
+#define DEBUG_DISSAS(opc)
+#endif
+
+#define DEBUG_ERROR(msg) do { \
+ fprintf(stderr, "%s:%d\t%s pc %u op 0x%x rs %d rt %d rd %d sa %d imm 0x%x\n", \
+ __func__, __LINE__, msg, CPU.pc, op, rs, rt, rd, sa, (uint32_t)imm);\
+ exit(1); \
+} while(0)
+
+CPUInterpreterContext CPU;
+
+static inline hwaddr translate_address(CPUMIPSState *env, target_ulong address) {
+
+#define USEG_LIMIT 0x7FFFFFFFUL
+#define KSEG0_BASE 0x80000000UL
+#define KSEG1_BASE 0xA0000000UL
+#define KSEG2_BASE 0xC0000000UL
+#define KSEG3_BASE 0xE0000000UL
+
+#define KVM_KSEG0_BASE 0x40000000UL
+#define KVM_KSEG2_BASE 0x60000000UL
+
+ if (address <= USEG_LIMIT) {
+ /* useg */
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ return address & 0xFFFFFFFF;
+ } else {
+ return address;
+ }
+#if defined(TARGET_MIPS64)
+ } else if (address < 0x4000000000000000ULL) {
+ return address;
+ } else if (address < 0x8000000000000000ULL) {
+ return address;
+ } else if (address < 0xC000000000000000ULL) {
+ /* xkphys */
+ if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) {
+ return address & env->PAMask;
+ } else {
+ return address;
+ }
+ } else if (address < 0xFFFFFFFF80000000ULL) {
+ /* xkseg */
+ return address;
+#endif
+ } else if (address < (int32_t)KSEG1_BASE) {
+ /* kseg0 */
+ return address - (int32_t)KSEG0_BASE;
+ } else if (address < (int32_t)KSEG2_BASE) {
+ /* kseg1 */
+ return address - (int32_t)KSEG1_BASE;
+ } else
+ return address;
+}
+
+int tlb_exception_interpreter(CPUMIPSState *env, uint32_t *code, uint32_t size)
+{
+ uint32_t opcode;
+ int rs, rt, rd, sa;
+ uint32_t op, op1;
+ int16_t imm;
+
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ CPU.branch_addr = 0;
+
+ CPU.CP0_EntryLo0 = env->CP0_EntryLo0;
+ CPU.CP0_EntryLo1 = env->CP0_EntryLo1;
+ CPU.CP0_BadVAddr = env->CP0_BadVAddr;
+ CPU.CP0_EntryHi = env->CP0_EntryHi;
+ CPU.CP0_Context = env->CP0_Context;
+ CPU.CP0_XContext = env->CP0_XContext;
+ CPU.CP0_PageMask = env->CP0_PageMask;
+ CPU.CP0_PageGrain = env->CP0_PageGrain;
+ CPU.CP0_PageGrain_rw_bitmask = env->CP0_PageGrain_rw_bitmask;
+ CPU.CP0_Index = env->CP0_Index;
+
+ while (1) {
+
+ op = MASK_OP_MAJOR(code[CPU.pc]);
+ rs = (code[CPU.pc] >> 21) & 0x1f;
+ rt = (code[CPU.pc] >> 16) & 0x1f;
+ rd = (code[CPU.pc] >> 11) & 0x1f;
+ sa = (code[CPU.pc] >> 6) & 0x1f;
+ imm = (int16_t)code[CPU.pc];
+ opcode = code[CPU.pc];
+
+ CPU.pc++;
+
+ switch (op) {
+ case OPC_SPECIAL:
+ op1 = MASK_SPECIAL(opcode);
+ switch (op1) {
+ case OPC_JR:
+ case OPC_JALR:
+ DEBUG_DISSAS("Jump");
+ return -2;
+ case OPC_SLL:
+ CPU.gpr_reg[rt] = (int32_t)(CPU.gpr_reg[rt] << sa);
+ DEBUG_DISSAS("sll");
+ break;
+ case OPC_SRA:
+ CPU.gpr_reg[rt] = (int32_t)((int32_t)CPU.gpr_reg[rt] >> sa);
+ DEBUG_DISSAS("sra");
+ break;
+ case OPC_SRL:
+ switch ((opcode >> 21) & 0x1f) {
+ case 1:
+ {
+ uint32_t mask = ~(-1 << sa);
+ CPU.gpr_reg[rd] = (int32_t)((((uint32_t)CPU.gpr_reg[rt] & mask) << (32 - sa)) |
+ (((uint32_t)CPU.gpr_reg[rt]) >> sa));
+ DEBUG_DISSAS("ror");
+ break;
+ }
+ case 0:
+ CPU.gpr_reg[rt] = (int32_t)((uint32_t)CPU.gpr_reg[rt] >> sa);
+ DEBUG_DISSAS("srl");
+ break;
+ default:
+ DEBUG_ERROR("srl Unknown");
+ break;
+ }
+ break;
+ case OPC_ROTR:
+ {
+ target_ulong mask = ~(-1 << sa);
+ CPU.gpr_reg[rd] = (int32_t)(((CPU.gpr_reg[rt] & mask) << (32 - sa)) |
+ (((target_ulong)CPU.gpr_reg[rt]) >> sa));
+ DEBUG_DISSAS("rotr");
+ break;
+ }
+ case OPC_ADDU:
+ CPU.gpr_reg[rd] = (int32_t)(CPU.gpr_reg[rs] + CPU.gpr_reg[rt]);
+ DEBUG_DISSAS("addu");
+ break;
+ case OPC_SUBU:
+ CPU.gpr_reg[rd] = (int32_t)(CPU.gpr_reg[rs] - CPU.gpr_reg[rt]);
+ DEBUG_DISSAS("subu");
+ break;
+ case OPC_AND: /* Logic*/
+ CPU.gpr_reg[rd] = CPU.gpr_reg[rs] & CPU.gpr_reg[rt];
+ DEBUG_DISSAS("and");
+ break;
+ case OPC_OR:
+ CPU.gpr_reg[rd] = CPU.gpr_reg[rs] | CPU.gpr_reg[rt];
+ DEBUG_DISSAS("or");
+ break;
+ case OPC_XOR:
+ CPU.gpr_reg[rd] = CPU.gpr_reg[rs] ^ CPU.gpr_reg[rt];
+ DEBUG_DISSAS("xor");
+ break;
+#if defined(TARGET_MIPS64)
+ /* MIPS64 specific opcodes */
+ case OPC_DSLL:
+ CPU.gpr_reg[rd] = CPU.gpr_reg[rt] << sa;
+ DEBUG_DISSAS("dsll");
+ break;
+ case OPC_DSRA:
+ CPU.gpr_reg[rd] = CPU.gpr_reg[rt] >> sa;
+ DEBUG_DISSAS("dsra");
+ break;
+ case OPC_DSLL32:
+ CPU.gpr_reg[rd] = CPU.gpr_reg[rt] << (sa + 32);
+ DEBUG_DISSAS("dsll32");
+ break;
+ case OPC_DSRA32:
+ CPU.gpr_reg[rd] = CPU.gpr_reg[rt] >> (sa + 32);
+ DEBUG_DISSAS("dsra32");
+ break;
+ case OPC_DSRL:
+ switch ((opcode >> 21) & 0x1f) {
+ case 1:
+ {
+ target_ulong mask = ~(-1 << sa);
+ CPU.gpr_reg[rd] = (((target_ulong)CPU.gpr_reg[rt] & mask) << (64 - sa)) |
+ (((target_ulong)CPU.gpr_reg[rt]) >> sa);
+ DEBUG_DISSAS("dror");
+ break;
+ }
+ case 0:
+ CPU.gpr_reg[rt] = ((target_ulong)CPU.gpr_reg[rt] >> sa);
+ DEBUG_DISSAS("dsrl");
+ break;
+ default:
+ DEBUG_DISSAS("dsrl Unknown");
+ break;
+ }
+ break;
+ case OPC_DROTR:
+ {
+ target_ulong mask = ~(-1 << sa);
+ CPU.gpr_reg[rd] = (((target_ulong)CPU.gpr_reg[rt] & mask) << (64 - sa)) |
+ (((target_ulong)CPU.gpr_reg[rt]) >> sa);
+ DEBUG_DISSAS("drotr");
+ break;
+ }
+ case OPC_DROTR32:
+ {
+ target_ulong mask = ~(-1 << (sa + 32));
+ CPU.gpr_reg[rd] = (((target_ulong)CPU.gpr_reg[rt] & mask) << (32 - sa)) |
+ (((target_ulong)CPU.gpr_reg[rt]) >> (sa + 32));
+ DEBUG_DISSAS("drotr32");
+ break;
+ }
+ case OPC_DSRL32:
+ CPU.gpr_reg[rd] = ((target_ulong)CPU.gpr_reg[rt]) >> (sa + 32);
+ DEBUG_DISSAS("dsrl32");
+ break;
+ case OPC_DADDU:
+ CPU.gpr_reg[rd] = CPU.gpr_reg[rs] + CPU.gpr_reg[rt];
+ DEBUG_DISSAS("daddu");
+ break;
+ case OPC_DSUBU:
+ CPU.gpr_reg[rd] = CPU.gpr_reg[rs] - CPU.gpr_reg[rt];
+ DEBUG_DISSAS("dsubu");
+ break;
+#endif
+ default:
+ DEBUG_ERROR("Unknown");
+ break;
+ }
+ break;
+ case OPC_SPECIAL3:
+ {
+ target_ulong mask;
+
+ op1 = MASK_SPECIAL3(opcode);
+ switch (op1) {
+ case OPC_EXT:
+ mask = (~(-1 << (rd + 1))) << sa;
+ CPU.gpr_reg[rt] = (int32_t)(((target_ulong)(CPU.gpr_reg[rs] & mask)) >> sa);
+ DEBUG_DISSAS("ext");
+ break;
+ case OPC_INS:
+ mask = ~(-1 << (rd - sa + 1));
+ CPU.gpr_reg[rt] = (int32_t)((CPU.gpr_reg[rt] & ~(mask << sa)) |
+ ((CPU.gpr_reg[rs] & mask) << sa));
+ DEBUG_DISSAS("ins");
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DEXT:
+ mask = (~(-1 << (rd + 1))) << sa;
+ CPU.gpr_reg[rt] = ((target_ulong)(CPU.gpr_reg[rs] & mask)) >> sa |
+ (CPU.gpr_reg[rt] & ~((target_ulong)mask >> sa));
+ DEBUG_DISSAS("dext");
+ break;
+ case OPC_DINSM:
+ mask = ~(-1 << ((rd + 32) - sa + 1));
+ CPU.gpr_reg[rt] = (CPU.gpr_reg[rt] & ~(mask << sa)) |
+ ((CPU.gpr_reg[rs] & mask) << sa);
+ DEBUG_DISSAS("dinsm");
+ break;
+ case OPC_DINS:
+ mask = ~(-1 << (rd - sa + 1));
+ CPU.gpr_reg[rt] = (CPU.gpr_reg[rt] & ~(mask << sa)) |
+ ((CPU.gpr_reg[rs] & mask) << sa);
+ DEBUG_DISSAS("dins");
+ break;
+#endif
+ default:
+ DEBUG_ERROR("Unknown");
+ break;
+ }
+ break;
+ }
+ case OPC_REGIMM:
+ op1 = MASK_REGIMM(opcode);
+ switch (op1) {
+ case OPC_BLTZL: /* REGIMM branches */
+ if (CPU.gpr_reg[rs] < 0) {
+ CPU.branch_addr = CPU.pc + (int32_t)imm;
+ DEBUG_DISSAS("bltzl");
+ continue;
+ } else {
+ CPU.pc++;
+ }
+ DEBUG_DISSAS("bltzl");
+ break;
+ case OPC_BGEZL:
+ if (CPU.gpr_reg[rs] >= 0) {
+ CPU.branch_addr = CPU.pc + (int32_t)imm;
+ DEBUG_DISSAS("bgezl");
+ continue;
+ } else {
+ CPU.pc++;
+ }
+ DEBUG_DISSAS("bgezl");
+ break;
+ case OPC_BLTZ:
+ if (CPU.gpr_reg[rs] < 0) {
+ CPU.branch_addr = CPU.pc + (int32_t)imm;
+ DEBUG_DISSAS("bltz");
+ continue;
+ }
+ DEBUG_DISSAS("bltz");
+ break;
+ case OPC_BGEZ:
+ if (CPU.gpr_reg[rs] >= 0) {
+ CPU.branch_addr = CPU.pc + (int32_t)imm;
+ DEBUG_DISSAS("bgez");
+ continue;
+ }
+ DEBUG_DISSAS("bgez");
+ break;
+ default: /* Invalid */
+ DEBUG_ERROR("Unknown");
+ break;
+ }
+ break;
+ case OPC_CP0:
+ op1 = MASK_CP0(opcode);
+ switch (op1) {
+ case OPC_MFC0:
+ switch (rd) {
+ case 4:
+ switch (opcode & 0x7) {
+ case 0:
+ // Context
+ CPU.gpr_reg[rt] = (int32_t)CPU.CP0_Context;
+ DEBUG_DISSAS("mfc0 Context");
+ break;
+ default:
+ DEBUG_DISSAS("mfc0 reg 4: Unknown select");
+ break;
+ }
+ break;
+ case 5:
+ switch (opcode & 0x7) {
+ case 0:
+ CPU.gpr_reg[rt] = CPU.CP0_PageMask;
+ DEBUG_DISSAS("mfc0 PageMask");
+ break;
+ case 1:
+ CPU.gpr_reg[rt] = CPU.CP0_PageGrain;
+ DEBUG_DISSAS("mfc0 PageGrain");
+ break;
+ default:
+ DEBUG_ERROR("mfc0 reg 5 Unknown select");
+ }
+ break;
+ case 8:
+ switch (opcode & 0x7) {
+ case 0:
+ // BadVAddr
+ CPU.gpr_reg[rt] = CPU.CP0_BadVAddr;
+ DEBUG_DISSAS("mfc0 BadVAddr");
+ break;
+ default:
+ DEBUG_ERROR("mfc0 reg 8 Unknown select");
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case 20:
+ switch (opcode & 0x7) {
+ case 0:
+ // XContext
+ CPU.gpr_reg[rt] = CPU.CP0_XContext;
+ DEBUG_DISSAS("mfc0 XContext");
+ break;
+ default:
+ DEBUG_DISSAS("mfc0 reg 20: Unknown select");
+ break;
+ }
+ break;
+#endif
+ case 13:
+ switch (opcode & 0x7) {
+ case 0:
+ CPU.gpr_reg[rt] = CPU.CP0_Cause;
+ DEBUG_DISSAS("mfc0 CP0_Cause");
+ break;
+ default:
+ DEBUG_ERROR("mfc0 Unknown select");
+ break;
+ }
+ break;
+ default:
+ DEBUG_ERROR("mfc0 Unknown");
+ break;
+ }
+ break;
+ case OPC_MTC0:
+ switch (rd) {
+ case 2:
+ switch (opcode & 0x7) {
+ case 0:
+ {
+ // EntryLo0
+ target_ulong rxi = CPU.gpr_reg[rt] & (CPU.CP0_PageGrain & (3u << CP0PG_XIE));
+ CPU.CP0_EntryLo0 = (CPU.gpr_reg[rt] & 0x3FFFFFFF) | (rxi << (CP0EnLo_XI));
+ DEBUG_DISSAS("mtc0 EntryLo0");
+ break;
+ }
+ default:
+ DEBUG_ERROR("mtc0 reg 2 Unknown select");
+ break;
+ }
+ break;
+ case 3:
+ switch (opcode & 0x7) {
+ case 0:
+ {
+ // EntryLo1
+ target_ulong rxi = CPU.gpr_reg[rt] & (CPU.CP0_PageGrain & (3u << CP0PG_XIE));
+ CPU.CP0_EntryLo1 = (CPU.gpr_reg[rt] & 0x3FFFFFFF) | (rxi << (CP0EnLo_XI));
+ DEBUG_DISSAS("mtc0 EntryLo1");
+ break;
+ }
+ default:
+ DEBUG_ERROR("mtc0 reg 3 Unknown select");
+ break;
+ }
+ break;
+ case 5:
+ switch (opcode & 0x7) {
+ case 0:
+ {
+ uint64_t mask = CPU.gpr_reg[rt] >> (TARGET_PAGE_BITS + 1);
+ if (!(env->insn_flags & ISA_MIPS32R6) || (CPU.gpr_reg[rt] == ~0) ||
+ (mask == 0x0000 || mask == 0x0003 || mask == 0x000F ||
+ mask == 0x003F || mask == 0x00FF || mask == 0x03FF ||
+ mask == 0x0FFF || mask == 0x3FFF || mask == 0xFFFF)) {
+ CPU.CP0_PageMask = CPU.gpr_reg[rt] & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
+ }
+ DEBUG_DISSAS("mtc0 PageMask");
+ break;
+ }
+ case 1:
+ CPU.CP0_PageGrain = (CPU.gpr_reg[rt] & CPU.CP0_PageGrain_rw_bitmask) |
+ (CPU.CP0_PageGrain & ~CPU.CP0_PageGrain_rw_bitmask);
+ DEBUG_DISSAS("mtc0 PageGrain");
+ break;
+ default:
+ DEBUG_ERROR("mtc0 reg 5 Unknown select");
+ }
+ break;
+ default:
+ DEBUG_ERROR("mtc0 Unknown");
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DMFC0:
+ switch (rd) {
+ case 4:
+ switch (opcode & 0x7) {
+ case 0:
+ // Context
+ CPU.gpr_reg[rt] = CPU.CP0_Context;
+ DEBUG_DISSAS("dmfc0 Context");
+ break;
+ default:
+ DEBUG_DISSAS("dmfc0 reg 4: Unknown select");
+ break;
+ }
+ break;
+ case 5:
+ switch (opcode & 0x7) {
+ case 0:
+ CPU.gpr_reg[rt] = CPU.CP0_PageMask;
+ DEBUG_DISSAS("mfc0 PageMask");
+ break;
+ case 1:
+ CPU.gpr_reg[rt] = CPU.CP0_PageGrain;
+ DEBUG_DISSAS("mfc0 PageGrain");
+ break;
+ default:
+ DEBUG_ERROR("mfc0 reg 5 Unknown select");
+ }
+ break;
+ case 8:
+ switch (opcode & 0x7) {
+ case 0:
+ // BadVAddr
+ CPU.gpr_reg[rt] = CPU.CP0_BadVAddr;
+ DEBUG_DISSAS("dmfc0 BadVAddr");
+ break;
+ default:
+ DEBUG_DISSAS("dmfc0 reg 8: Unknown select");
+ break;
+ }
+ break;
+ case 20:
+ switch (opcode & 0x7) {
+ case 0:
+ // XContext
+ CPU.gpr_reg[rt] = CPU.CP0_XContext;
+ DEBUG_DISSAS("dmfc0 XContext");
+ break;
+ default:
+ DEBUG_DISSAS("dmfc0 reg 20: Unknown select");
+ break;
+ }
+ break;
+ case 31:
+ switch(opcode & 0x7) {
+ case 2 ... 7:
+ CPU.gpr_reg[rt] = CPU.CP0_KScratch[(opcode & 0x7) - 2];
+ DEBUG_DISSAS("dmfc0 KScratch");
+ break;
+ default:
+ DEBUG_ERROR("dmfc0 Unknown sel");
+ }
+ break;
+ default:
+ DEBUG_ERROR("dmfc0 Unknown");
+ break;
+ }
+ break;
+ case OPC_DMTC0:
+ switch (rd) {
+ case 2:
+ switch (opcode & 0x7) {
+ case 0:
+ {
+ // EntryLo0
+ target_ulong rxi = CPU.gpr_reg[rt] & ((CPU.CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
+ CPU.CP0_EntryLo0 = (CPU.gpr_reg[rt] & 0x3FFFFFFF) | rxi;
+ DEBUG_DISSAS("dmtc0 EntryLo0");
+ break;
+
+ }
+ default:
+ DEBUG_ERROR("dmtc0 Unknown sel");
+ break;
+ }
+ break;
+ case 3:
+ switch (opcode & 0x7) {
+ case 0:
+ {
+ // EntryLo1
+ target_ulong rxi = CPU.gpr_reg[rt] & ((CPU.CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
+ CPU.CP0_EntryLo1 = (CPU.gpr_reg[rt] & 0x3FFFFFFF) | rxi;
+ DEBUG_DISSAS("dmtc0 EntryLo1");
+ break;
+ }
+ default:
+ DEBUG_ERROR("dmtc0 Unknown select");
+ break;
+ }
+ break;
+ case 5:
+ switch (opcode & 0x7) {
+ case 0:
+ {
+ uint64_t mask = CPU.gpr_reg[rt] >> (TARGET_PAGE_BITS + 1);
+ if (!(env->insn_flags & ISA_MIPS32R6) || (CPU.gpr_reg[rt] == ~0) ||
+ (mask == 0x0000 || mask == 0x0003 || mask == 0x000F ||
+ mask == 0x003F || mask == 0x00FF || mask == 0x03FF ||
+ mask == 0x0FFF || mask == 0x3FFF || mask == 0xFFFF)) {
+ CPU.CP0_PageMask = CPU.gpr_reg[rt] & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
+ }
+ DEBUG_DISSAS("mtc0 PageMask");
+ break;
+ }
+ case 1:
+ CPU.CP0_PageGrain = (CPU.gpr_reg[rt] & CPU.CP0_PageGrain_rw_bitmask) |
+ (CPU.CP0_PageGrain & ~CPU.CP0_PageGrain_rw_bitmask);
+ DEBUG_DISSAS("mtc0 PageGrain");
+ break;
+ default:
+ DEBUG_ERROR("mtc0 reg 5 Unknown select");
+ }
+ break;
+ case 31:
+ switch(opcode & 0x7) {
+ case 2 ... 7:
+ CPU.CP0_KScratch[(opcode & 0x7) - 2] = CPU.gpr_reg[rt];
+ DEBUG_DISSAS("dmtc0 KScratch");
+ break;
+ default:
+ DEBUG_ERROR("dmtc0 Unknown sel");
+ }
+ break;
+ default:
+ DEBUG_ERROR("dmtc0 Unknown");
+ break;
+ }
+ break;
+#endif
+
+ case OPC_C0_FIRST ... OPC_C0_LAST:
+ switch (MASK_C0(opcode)) {
+ case OPC_ERET:
+ // Exception return
+ DEBUG_DISSAS("eret");
+ return 0;
+ case OPC_TLBWR:
+ if (likely(CPU.do_tlbwr)) {
+ env->CP0_EntryLo0 = CPU.CP0_EntryLo0;
+ env->CP0_EntryLo1 = CPU.CP0_EntryLo1;
+ r4k_helper_tlbwr(env);
+ }
+ DEBUG_DISSAS("tlbwr");
+ break;
+ case OPC_TLBWI:
+ env->CP0_EntryLo0 = CPU.CP0_EntryLo0;
+ env->CP0_EntryLo1 = CPU.CP0_EntryLo1;
+ r4k_helper_tlbwi(env);
+ DEBUG_DISSAS("tlbwi");
+ break;
+ case OPC_TLBP:
+ r4k_helper_tlbp(env);
+ CPU.CP0_Index = env->CP0_Index;
+ DEBUG_DISSAS("tlbp");
+ break;
+ default:
+ DEBUG_ERROR("opc_c0 Unknown");
+ break;
+ }
+ break;
+ default:
+ DEBUG_ERROR("Unknown");
+ break;
+ }
+ break;
+ case OPC_ADDIU:
+ CPU.gpr_reg[rt] = CPU.gpr_reg[rs] + imm;
+ DEBUG_DISSAS("addiu");
+ break;
+ case OPC_ANDI: /* Arithmetic with immediate opcode */
+ CPU.gpr_reg[rt] = CPU.gpr_reg[rs] & (uint32_t)imm;
+ DEBUG_DISSAS("andi");
+ break;
+ case OPC_LUI: /* OPC_AUI */
+ CPU.gpr_reg[rt] = ((int32_t)imm) << 16;
+ DEBUG_DISSAS("lui");
+ break;
+ case OPC_ORI:
+ CPU.gpr_reg[rt] = CPU.gpr_reg[rs] | ((uint32_t)imm);
+ DEBUG_DISSAS("ori");
+ break;
+ case OPC_XORI:
+ CPU.gpr_reg[rt] = CPU.gpr_reg[rs] ^ ((uint32_t)imm);
+ DEBUG_DISSAS("xori");
+ break;
+ case OPC_J:
+ case OPC_JAL: /* Jump */
+ case OPC_JALX:
+ DEBUG_DISSAS("Jump");
+ return -1;
+ /* Branch */
+ case OPC_BEQ:
+ if (CPU.gpr_reg[rs] == CPU.gpr_reg[rt]) {
+ CPU.branch_addr = CPU.pc + (int32_t)imm;
+ DEBUG_DISSAS("beq");
+ continue;
+ }
+ DEBUG_DISSAS("beq");
+ break;
+ case OPC_BEQL:
+ if (CPU.gpr_reg[rs] == CPU.gpr_reg[rt]) {
+ CPU.branch_addr = CPU.pc + (int32_t)imm;
+ DEBUG_DISSAS("beql");
+ continue;
+ } else {
+ CPU.pc++;
+ }
+ DEBUG_DISSAS("beql");
+ break;
+ case OPC_BNE:
+ if (CPU.gpr_reg[rs] != CPU.gpr_reg[rt]) {
+ CPU.branch_addr = CPU.pc + (int32_t)imm;
+ DEBUG_DISSAS("bne");
+ continue;
+ }
+ DEBUG_DISSAS("bne");
+ break;
+ case OPC_LW:
+ {
+ CPU.gpr_reg[rt] = (int32_t)ldl_phys(cs->as, translate_address(env, CPU.gpr_reg[rs] + imm));
+ DEBUG_DISSAS("lw");
+ break;
+ }
+ case OPC_SW:
+ stl_phys(cs->as, translate_address(env, CPU.gpr_reg[rs] + imm), CPU.gpr_reg[rt]);
+ DEBUG_DISSAS("sw");
+ break;
+ #if defined(TARGET_MIPS64)
+ /* MIPS64 opcodes */
+ case OPC_LD:
+ CPU.gpr_reg[rt] = ldq_phys(cs->as, translate_address(env, CPU.gpr_reg[rs] + imm));
+ DEBUG_DISSAS("ld");
+ break;
+ case OPC_SD:
+ stq_phys(cs->as, translate_address(env, CPU.gpr_reg[rs] + imm), CPU.gpr_reg[rt]);
+ DEBUG_DISSAS("sd");
+ break;
+ case OPC_DADDIU:
+ CPU.gpr_reg[rt] = CPU.gpr_reg[rs] + imm;
+ DEBUG_DISSAS("daddiu");
+ break;
+ #endif
+ default: /* Invalid */
+ DEBUG_ERROR("Unknown");
+ break;
+ }
+
+ if (CPU.branch_addr) {
+ CPU.pc = CPU.branch_addr;
+ CPU.branch_addr = 0;
+ }
+
+ if (CPU.pc >= size) {
+ DEBUG_ERROR("Code buffer Index out of bounds");
+ return -3;
+ }
+
+ } //end of while
+}
+#endif
+
/* global register indices */
static TCGv_ptr cpu_env;
static TCGv cpu_gpr[32], cpu_PC;