00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020 #if DATA_SIZE == 8
00021 #define SUFFIX q
00022 #define USUFFIX q
00023 #define DATA_TYPE uint64_t
00024 #elif DATA_SIZE == 4
00025 #define SUFFIX l
00026 #define USUFFIX l
00027 #define DATA_TYPE uint32_t
00028 #elif DATA_SIZE == 2
00029 #define SUFFIX w
00030 #define USUFFIX uw
00031 #define DATA_TYPE uint16_t
00032 #define DATA_STYPE int16_t
00033 #elif DATA_SIZE == 1
00034 #define SUFFIX b
00035 #define USUFFIX ub
00036 #define DATA_TYPE uint8_t
00037 #define DATA_STYPE int8_t
00038 #else
00039 #error unsupported data size
00040 #endif
00041
00042 #if ACCESS_TYPE < (NB_MMU_MODES)
00043
00044 #define CPU_MMU_INDEX ACCESS_TYPE
00045 #define MMUSUFFIX _mmu
00046
00047 #elif ACCESS_TYPE == (NB_MMU_MODES)
00048
00049 #define CPU_MMU_INDEX (cpu_mmu_index(env))
00050 #define MMUSUFFIX _mmu
00051
00052 #elif ACCESS_TYPE == (NB_MMU_MODES + 1)
00053
00054 #define CPU_MMU_INDEX (cpu_mmu_index(env))
00055 #define MMUSUFFIX _cmmu
00056
00057 #else
00058 #error invalid ACCESS_TYPE
00059 #endif
00060
00061 #if DATA_SIZE == 8
00062 #define RES_TYPE uint64_t
00063 #else
00064 #define RES_TYPE int
00065 #endif
00066
00067 #if ACCESS_TYPE == (NB_MMU_MODES + 1)
00068 #define ADDR_READ addr_code
00069 #else
00070 #define ADDR_READ addr_read
00071 #endif
00072
00073 #undef ASM_SOFTMMU
00074
00075 #if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \
00076 (ACCESS_TYPE < NB_MMU_MODES) && defined(ASM_SOFTMMU)
00077
00078 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
00079 {
00080 int res;
00081
00082 asm volatile ("movl %1, %%edx\n"
00083 "movl %1, %%eax\n"
00084 "shrl %3, %%edx\n"
00085 "andl %4, %%eax\n"
00086 "andl %2, %%edx\n"
00087 "leal %5(%%edx, %%ebp), %%edx\n"
00088 "cmpl (%%edx), %%eax\n"
00089 "movl %1, %%eax\n"
00090 "je 1f\n"
00091 "movl %6, %%edx\n"
00092 "call %7\n"
00093 "movl %%eax, %0\n"
00094 "jmp 2f\n"
00095 "1:\n"
00096 "addl 12(%%edx), %%eax\n"
00097 #if DATA_SIZE == 1
00098 "movzbl (%%eax), %0\n"
00099 #elif DATA_SIZE == 2
00100 "movzwl (%%eax), %0\n"
00101 #elif DATA_SIZE == 4
00102 "movl (%%eax), %0\n"
00103 #else
00104 #error unsupported size
00105 #endif
00106 "2:\n"
00107 : "=r" (res)
00108 : "r" (ptr),
00109 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
00110 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
00111 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
00112 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
00113 "i" (CPU_MMU_INDEX),
00114 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
00115 : "%eax", "%ecx", "%edx", "memory", "cc");
00116 return res;
00117 }
00118
00119 #if DATA_SIZE <= 2
00120 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
00121 {
00122 int res;
00123
00124 asm volatile ("movl %1, %%edx\n"
00125 "movl %1, %%eax\n"
00126 "shrl %3, %%edx\n"
00127 "andl %4, %%eax\n"
00128 "andl %2, %%edx\n"
00129 "leal %5(%%edx, %%ebp), %%edx\n"
00130 "cmpl (%%edx), %%eax\n"
00131 "movl %1, %%eax\n"
00132 "je 1f\n"
00133 "movl %6, %%edx\n"
00134 "call %7\n"
00135 #if DATA_SIZE == 1
00136 "movsbl %%al, %0\n"
00137 #elif DATA_SIZE == 2
00138 "movswl %%ax, %0\n"
00139 #else
00140 #error unsupported size
00141 #endif
00142 "jmp 2f\n"
00143 "1:\n"
00144 "addl 12(%%edx), %%eax\n"
00145 #if DATA_SIZE == 1
00146 "movsbl (%%eax), %0\n"
00147 #elif DATA_SIZE == 2
00148 "movswl (%%eax), %0\n"
00149 #else
00150 #error unsupported size
00151 #endif
00152 "2:\n"
00153 : "=r" (res)
00154 : "r" (ptr),
00155 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
00156 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
00157 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
00158 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
00159 "i" (CPU_MMU_INDEX),
00160 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
00161 : "%eax", "%ecx", "%edx", "memory", "cc");
00162 return res;
00163 }
00164 #endif
00165
00166 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
00167 {
00168 asm volatile ("movl %0, %%edx\n"
00169 "movl %0, %%eax\n"
00170 "shrl %3, %%edx\n"
00171 "andl %4, %%eax\n"
00172 "andl %2, %%edx\n"
00173 "leal %5(%%edx, %%ebp), %%edx\n"
00174 "cmpl (%%edx), %%eax\n"
00175 "movl %0, %%eax\n"
00176 "je 1f\n"
00177 #if DATA_SIZE == 1
00178 "movzbl %b1, %%edx\n"
00179 #elif DATA_SIZE == 2
00180 "movzwl %w1, %%edx\n"
00181 #elif DATA_SIZE == 4
00182 "movl %1, %%edx\n"
00183 #else
00184 #error unsupported size
00185 #endif
00186 "movl %6, %%ecx\n"
00187 "call %7\n"
00188 "jmp 2f\n"
00189 "1:\n"
00190 "addl 8(%%edx), %%eax\n"
00191 #if DATA_SIZE == 1
00192 "movb %b1, (%%eax)\n"
00193 #elif DATA_SIZE == 2
00194 "movw %w1, (%%eax)\n"
00195 #elif DATA_SIZE == 4
00196 "movl %1, (%%eax)\n"
00197 #else
00198 #error unsupported size
00199 #endif
00200 "2:\n"
00201 :
00202 : "r" (ptr),
00203 #if DATA_SIZE == 1
00204 "q" (v),
00205 #else
00206 "r" (v),
00207 #endif
00208 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
00209 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
00210 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
00211 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_write)),
00212 "i" (CPU_MMU_INDEX),
00213 "m" (*(uint8_t *)&glue(glue(__st, SUFFIX), MMUSUFFIX))
00214 : "%eax", "%ecx", "%edx", "memory", "cc");
00215 }
00216
00217 #else
00218
00219
00220
00221 #ifndef __PPA_DEFD
00222 extern uint64_t prev_phys_addr;
00223 #define __PPA_DEFD
00224 #endif
00225
00226 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
00227 {
00228 int page_index;
00229 RES_TYPE res;
00230 target_ulong addr;
00231 unsigned long physaddr;
00232 int mmu_idx;
00233
00234 addr = ptr;
00235 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00236 mmu_idx = CPU_MMU_INDEX;
00237 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
00238 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
00239 res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
00240 } else {
00241 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
00242 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
00243 prev_phys_addr = physaddr;
00244 }
00245 return res;
00246 }
00247
00248 #ifndef _LDB_RA
00249 #define _LDB_RA
00250 static inline uint8_t ldub_data_ra(target_ulong ptr, void* ra) {
00251 int page_index;
00252 uint8_t res;
00253 target_ulong addr;
00254 unsigned long physaddr;
00255 int mmu_idx;
00256
00257 addr = ptr;
00258 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00259 mmu_idx = CPU_MMU_INDEX;
00260 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
00261 (addr & TARGET_PAGE_MASK))) {
00262 res = __ldb_mmu_ra(addr, mmu_idx, ra);
00263 } else {
00264 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
00265 res = ldub_raw((uint8_t *)physaddr);
00266 prev_phys_addr = physaddr;
00267 }
00268 return res;
00269 }
00270
00271 static inline uint32_t ldul_data_ra(target_ulong ptr, void* ra) {
00272 int page_index;
00273 uint32_t res;
00274 target_ulong addr;
00275 unsigned long physaddr;
00276 int mmu_idx;
00277
00278 addr = ptr;
00279 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00280 mmu_idx = CPU_MMU_INDEX;
00281 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
00282 (addr & (TARGET_PAGE_MASK | (4 - 1))))) {
00283 res = __ldl_mmu_ra(addr, mmu_idx, ra);
00284 } else {
00285 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
00286 res = ldl_raw((uint32_t *)physaddr);
00287 prev_phys_addr = physaddr;
00288 }
00289
00290 return res;
00291 }
00292
00293 static inline uint64_t lduq_data_ra(target_ulong ptr, void* ra) {
00294 int page_index;
00295 uint64_t res;
00296 target_ulong addr;
00297 unsigned long physaddr;
00298 int mmu_idx;
00299
00300 addr = ptr;
00301 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00302 mmu_idx = CPU_MMU_INDEX;
00303 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
00304 (addr & (TARGET_PAGE_MASK | (8 - 1))))) {
00305 res = __ldq_mmu_ra(addr, mmu_idx, ra);
00306 } else {
00307 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
00308 res = ldq_raw((uint64_t *)physaddr);
00309 prev_phys_addr = physaddr;
00310 }
00311
00312 return res;
00313 }
00314
00315 static inline void stb_data_ra(target_ulong ptr, uint8_t v, void *ra)
00316 {
00317 int page_index;
00318 target_ulong addr;
00319 unsigned long physaddr;
00320 int mmu_idx;
00321
00322 addr = ptr;
00323 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00324 mmu_idx = CPU_MMU_INDEX;
00325 if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
00326 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
00327 slow_stb_mmu(addr, v, mmu_idx, ra);
00328 } else {
00329 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
00330 stb_raw((uint8_t *)physaddr, v);
00331 }
00332 }
00333
00334 static inline void stl_data_ra(target_ulong ptr, uint32_t v, void *ra)
00335 {
00336 int page_index;
00337 target_ulong addr;
00338 unsigned long physaddr;
00339 int mmu_idx;
00340
00341 addr = ptr;
00342 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00343 mmu_idx = CPU_MMU_INDEX;
00344 if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
00345 (addr & (TARGET_PAGE_MASK | (4 - 1))))) {
00346 slow_stl_mmu(addr, v, mmu_idx, ra);
00347 } else {
00348 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
00349 stl_raw((uint32_t *)physaddr, v);
00350 }
00351 }
00352
00353 static inline void stq_data_ra(target_ulong ptr, uint64_t v, void *ra)
00354 {
00355 int page_index;
00356 target_ulong addr;
00357 unsigned long physaddr;
00358 int mmu_idx;
00359
00360 addr = ptr;
00361 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00362 mmu_idx = CPU_MMU_INDEX;
00363 if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
00364 (addr & (TARGET_PAGE_MASK | (8 - 1))))) {
00365 slow_stq_mmu(addr, v, mmu_idx, ra);
00366 } else {
00367 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
00368 stq_raw((uint64_t *)physaddr, v);
00369 }
00370 }
00371
00372 #endif
00373
00374 #if DATA_SIZE <= 2
00375 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
00376 {
00377 int res, page_index;
00378 target_ulong addr;
00379 unsigned long physaddr;
00380 int mmu_idx;
00381
00382 addr = ptr;
00383 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00384 mmu_idx = CPU_MMU_INDEX;
00385 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
00386 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
00387 res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
00388 } else {
00389 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
00390 res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr);
00391 }
00392 return res;
00393 }
00394 #endif
00395
00396 #if ACCESS_TYPE != (NB_MMU_MODES + 1)
00397
00398
00399
00400 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
00401 {
00402 int page_index;
00403 target_ulong addr;
00404 unsigned long physaddr;
00405 int mmu_idx;
00406
00407 addr = ptr;
00408 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00409 mmu_idx = CPU_MMU_INDEX;
00410 if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
00411 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
00412 glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, mmu_idx);
00413 } else {
00414 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
00415 glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v);
00416 }
00417 }
00418
00419 #endif
00420
00421 #endif
00422
00423 #if ACCESS_TYPE != (NB_MMU_MODES + 1)
00424
00425 #if DATA_SIZE == 8
00426 static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr)
00427 {
00428 union {
00429 float64 d;
00430 uint64_t i;
00431 } u;
00432 u.i = glue(ldq, MEMSUFFIX)(ptr);
00433 return u.d;
00434 }
00435
00436 static inline void glue(stfq, MEMSUFFIX)(target_ulong ptr, float64 v)
00437 {
00438 union {
00439 float64 d;
00440 uint64_t i;
00441 } u;
00442 u.d = v;
00443 glue(stq, MEMSUFFIX)(ptr, u.i);
00444 }
00445 #endif
00446
00447 #if DATA_SIZE == 4
00448 static inline float32 glue(ldfl, MEMSUFFIX)(target_ulong ptr)
00449 {
00450 union {
00451 float32 f;
00452 uint32_t i;
00453 } u;
00454 u.i = glue(ldl, MEMSUFFIX)(ptr);
00455 return u.f;
00456 }
00457
00458 static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v)
00459 {
00460 union {
00461 float32 f;
00462 uint32_t i;
00463 } u;
00464 u.f = v;
00465 glue(stl, MEMSUFFIX)(ptr, u.i);
00466 }
00467 #endif
00468
00469 #endif
00470
00471 #undef RES_TYPE
00472 #undef DATA_TYPE
00473 #undef DATA_STYPE
00474 #undef SUFFIX
00475 #undef USUFFIX
00476 #undef DATA_SIZE
00477 #undef CPU_MMU_INDEX
00478 #undef MMUSUFFIX
00479 #undef ADDR_READ