00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020 #define DATA_SIZE (1 << SHIFT)
00021
00022 #if DATA_SIZE == 8
00023 #define SUFFIX q
00024 #define USUFFIX q
00025 #define DATA_TYPE uint64_t
00026 #elif DATA_SIZE == 4
00027 #define SUFFIX l
00028 #define USUFFIX l
00029 #define DATA_TYPE uint32_t
00030 #elif DATA_SIZE == 2
00031 #define SUFFIX w
00032 #define USUFFIX uw
00033 #define DATA_TYPE uint16_t
00034 #elif DATA_SIZE == 1
00035 #define SUFFIX b
00036 #define USUFFIX ub
00037 #define DATA_TYPE uint8_t
00038 #else
00039 #error unsupported data size
00040 #endif
00041
00042 #ifdef SOFTMMU_CODE_ACCESS
00043 #define READ_ACCESS_TYPE 2
00044 #define ADDR_READ addr_code
00045 #else
00046 #define READ_ACCESS_TYPE 0
00047 #define ADDR_READ addr_read
00048 #endif
00049
00050 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
00051 int mmu_idx,
00052 void *retaddr);
00053 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
00054 target_ulong addr,
00055 void *retaddr)
00056 {
00057 DATA_TYPE res;
00058 int index;
00059 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
00060 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
00061 env->mem_io_pc = (unsigned long)retaddr;
00062 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
00063 && !can_do_io(env)) {
00064 cpu_io_recompile(env, retaddr);
00065 }
00066
00067 env->mem_io_vaddr = addr;
00068 #if SHIFT <= 2
00069 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
00070 #else
00071 #ifdef TARGET_WORDS_BIGENDIAN
00072 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
00073 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
00074 #else
00075 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
00076 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
00077 #endif
00078 #endif
00079 #ifdef USE_KQEMU
00080 env->last_io_time = cpu_get_time_fast();
00081 #endif
00082 return res;
00083 }
00084
00085
00086 DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
00087 int mmu_idx)
00088 {
00089 DATA_TYPE res;
00090 int index;
00091 target_ulong tlb_addr;
00092 target_phys_addr_t addend;
00093 void *retaddr;
00094
00095
00096
00097 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00098 redo:
00099 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
00100 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
00101 if (tlb_addr & ~TARGET_PAGE_MASK) {
00102
00103 if ((addr & (DATA_SIZE - 1)) != 0)
00104 goto do_unaligned_access;
00105 retaddr = GETPC();
00106 addend = env->iotlb[mmu_idx][index];
00107 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
00108 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
00109
00110 do_unaligned_access:
00111 retaddr = GETPC();
00112 #ifdef ALIGNED_ONLY
00113 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
00114 #endif
00115 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
00116 mmu_idx, retaddr);
00117 } else {
00118
00119 #ifdef ALIGNED_ONLY
00120 if ((addr & (DATA_SIZE - 1)) != 0) {
00121 retaddr = GETPC();
00122 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
00123 }
00124 #endif
00125 addend = env->tlb_table[mmu_idx][index].addend;
00126 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
00127 }
00128 } else {
00129
00130 retaddr = GETPC();
00131 #ifdef ALIGNED_ONLY
00132 if ((addr & (DATA_SIZE - 1)) != 0)
00133 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
00134 #endif
00135 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
00136 goto redo;
00137 }
00138 return res;
00139 }
00140
00141
00142 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
00143 int mmu_idx,
00144 void *retaddr)
00145 {
00146 DATA_TYPE res, res1, res2;
00147 int index, shift;
00148 target_phys_addr_t addend;
00149 target_ulong tlb_addr, addr1, addr2;
00150
00151 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00152 redo:
00153 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
00154 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
00155 if (tlb_addr & ~TARGET_PAGE_MASK) {
00156
00157 if ((addr & (DATA_SIZE - 1)) != 0)
00158 goto do_unaligned_access;
00159 retaddr = GETPC();
00160 addend = env->iotlb[mmu_idx][index];
00161 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
00162 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
00163 do_unaligned_access:
00164
00165 addr1 = addr & ~(DATA_SIZE - 1);
00166 addr2 = addr1 + DATA_SIZE;
00167 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
00168 mmu_idx, retaddr);
00169 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
00170 mmu_idx, retaddr);
00171 shift = (addr & (DATA_SIZE - 1)) * 8;
00172 #ifdef TARGET_WORDS_BIGENDIAN
00173 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
00174 #else
00175 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
00176 #endif
00177 res = (DATA_TYPE)res;
00178 } else {
00179
00180 addend = env->tlb_table[mmu_idx][index].addend;
00181 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
00182 }
00183 } else {
00184
00185 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
00186 goto redo;
00187 }
00188 return res;
00189 }
00190
00191 #ifndef SOFTMMU_CODE_ACCESS
00192
00193 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
00194 DATA_TYPE val,
00195 int mmu_idx,
00196 void *retaddr);
00197
00198 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
00199 DATA_TYPE val,
00200 target_ulong addr,
00201 void *retaddr)
00202 {
00203 int index;
00204 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
00205 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
00206 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
00207 && !can_do_io(env)) {
00208 cpu_io_recompile(env, retaddr);
00209 }
00210
00211 env->mem_io_vaddr = addr;
00212 env->mem_io_pc = (unsigned long)retaddr;
00213 #if SHIFT <= 2
00214 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
00215 #else
00216 #ifdef TARGET_WORDS_BIGENDIAN
00217 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
00218 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
00219 #else
00220 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
00221 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
00222 #endif
00223 #endif
00224 #ifdef USE_KQEMU
00225 env->last_io_time = cpu_get_time_fast();
00226 #endif
00227 }
00228
00229 void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
00230 DATA_TYPE val,
00231 int mmu_idx)
00232 {
00233 target_phys_addr_t addend;
00234 target_ulong tlb_addr;
00235 void *retaddr;
00236 int index;
00237
00238 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00239 redo:
00240 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
00241 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
00242 if (tlb_addr & ~TARGET_PAGE_MASK) {
00243
00244 if ((addr & (DATA_SIZE - 1)) != 0)
00245 goto do_unaligned_access;
00246 retaddr = GETPC();
00247 addend = env->iotlb[mmu_idx][index];
00248 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
00249 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
00250 do_unaligned_access:
00251 retaddr = GETPC();
00252 #ifdef ALIGNED_ONLY
00253 do_unaligned_access(addr, 1, mmu_idx, retaddr);
00254 #endif
00255 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
00256 mmu_idx, retaddr);
00257 } else {
00258
00259 #ifdef ALIGNED_ONLY
00260 if ((addr & (DATA_SIZE - 1)) != 0) {
00261 retaddr = GETPC();
00262 do_unaligned_access(addr, 1, mmu_idx, retaddr);
00263 }
00264 #endif
00265 addend = env->tlb_table[mmu_idx][index].addend;
00266 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
00267 }
00268 } else {
00269
00270 retaddr = GETPC();
00271 #ifdef ALIGNED_ONLY
00272 if ((addr & (DATA_SIZE - 1)) != 0)
00273 do_unaligned_access(addr, 1, mmu_idx, retaddr);
00274 #endif
00275 tlb_fill(addr, 1, mmu_idx, retaddr);
00276 goto redo;
00277 }
00278 }
00279
00280
00281 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
00282 DATA_TYPE val,
00283 int mmu_idx,
00284 void *retaddr)
00285 {
00286 target_phys_addr_t addend;
00287 target_ulong tlb_addr;
00288 int index, i;
00289
00290 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00291 redo:
00292 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
00293 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
00294 if (tlb_addr & ~TARGET_PAGE_MASK) {
00295
00296 if ((addr & (DATA_SIZE - 1)) != 0)
00297 goto do_unaligned_access;
00298 addend = env->iotlb[mmu_idx][index];
00299 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
00300 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
00301 do_unaligned_access:
00302
00303
00304
00305 for(i = DATA_SIZE - 1; i >= 0; i--) {
00306 #ifdef TARGET_WORDS_BIGENDIAN
00307 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
00308 mmu_idx, retaddr);
00309 #else
00310 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
00311 mmu_idx, retaddr);
00312 #endif
00313 }
00314 } else {
00315
00316 addend = env->tlb_table[mmu_idx][index].addend;
00317 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
00318 }
00319 } else {
00320
00321 tlb_fill(addr, 1, mmu_idx, retaddr);
00322 goto redo;
00323 }
00324 }
00325
00326 #endif
00327
00328
00329 #ifndef _LDB_MMU_RA_
00330 #if DATA_SIZE == 1
00331 #define _LDB_MMU_RA_
00332 uint8_t REGPARM __ldb_mmu_ra(
00333 target_ulong addr, int mmu_idx, void *ra
00334 ) {
00335 uint8_t res;
00336 int index;
00337 target_ulong tlb_addr;
00338 target_phys_addr_t addend;
00339
00340
00341
00342 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00343 redo:
00344 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
00345 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
00346 if (tlb_addr & ~TARGET_PAGE_MASK) {
00347
00348 if ((addr & (DATA_SIZE - 1)) != 0)
00349 goto do_unaligned_access;
00350 addend = env->iotlb[mmu_idx][index];
00351 res = glue(io_read, SUFFIX)(addend, addr, ra);
00352 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
00353
00354 do_unaligned_access:
00355 #ifdef ALIGNED_ONLY
00356 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, ra);
00357 #endif
00358 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
00359 mmu_idx, ra);
00360 } else {
00361
00362 #ifdef ALIGNED_ONLY
00363 if ((addr & (DATA_SIZE - 1)) != 0) {
00364 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, ra);
00365 }
00366 #endif
00367 addend = env->tlb_table[mmu_idx][index].addend;
00368 res = ldub_raw((uint8_t *)(long)(addr+addend));
00369 }
00370 } else {
00371
00372 #ifdef ALIGNED_ONLY
00373 if ((addr & (DATA_SIZE - 1)) != 0)
00374 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, ra);
00375 #endif
00376 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, ra);
00377 goto redo;
00378 }
00379 return res;
00380 }
00381 #endif
00382 #endif
00383
00384 #ifndef _LDL_MMU_RA_
00385 #if DATA_SIZE == 4
00386 #define _LDL_MMU_RA_
00387 uint32_t REGPARM __ldl_mmu_ra(
00388 target_ulong addr, int mmu_idx, void *ra
00389 ) {
00390 uint32_t res;
00391 int index;
00392 target_ulong tlb_addr;
00393 target_phys_addr_t addend;
00394
00395
00396
00397 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00398 redo:
00399 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
00400 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
00401 if (tlb_addr & ~TARGET_PAGE_MASK) {
00402
00403 if ((addr & (DATA_SIZE - 1)) != 0)
00404 goto do_unaligned_access;
00405 addend = env->iotlb[mmu_idx][index];
00406 res = glue(io_read, SUFFIX)(addend, addr, ra);
00407 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
00408
00409 do_unaligned_access:
00410 #ifdef ALIGNED_ONLY
00411 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, ra);
00412 #endif
00413 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
00414 mmu_idx, ra);
00415 } else {
00416
00417 #ifdef ALIGNED_ONLY
00418 if ((addr & (DATA_SIZE - 1)) != 0) {
00419 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, ra);
00420 }
00421 #endif
00422 addend = env->tlb_table[mmu_idx][index].addend;
00423 res = ldl_raw((uint32_t *)(long)(addr+addend));
00424 }
00425 } else {
00426
00427 #ifdef ALIGNED_ONLY
00428 if ((addr & (DATA_SIZE - 1)) != 0)
00429 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, ra);
00430 #endif
00431 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, ra);
00432 goto redo;
00433 }
00434 return res;
00435 }
00436 #endif
00437 #endif
00438
00439 #ifndef _LDQ_MMU_RA_
00440 #if DATA_SIZE == 8
00441 #define _LDQ_MMU_RA_
00442 uint64_t REGPARM __ldq_mmu_ra(
00443 target_ulong addr, int mmu_idx, void *ra
00444 ) {
00445 uint64_t res;
00446 int index;
00447 target_ulong tlb_addr;
00448 target_phys_addr_t addend;
00449
00450
00451
00452 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
00453 redo:
00454 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
00455 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
00456 if (tlb_addr & ~TARGET_PAGE_MASK) {
00457
00458 if ((addr & (DATA_SIZE - 1)) != 0)
00459 goto do_unaligned_access;
00460 addend = env->iotlb[mmu_idx][index];
00461 res = glue(io_read, SUFFIX)(addend, addr, ra);
00462 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
00463
00464 do_unaligned_access:
00465 #ifdef ALIGNED_ONLY
00466 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, ra);
00467 #endif
00468 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
00469 mmu_idx, ra);
00470 } else {
00471
00472 #ifdef ALIGNED_ONLY
00473 if ((addr & (DATA_SIZE - 1)) != 0) {
00474 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, ra);
00475 }
00476 #endif
00477 addend = env->tlb_table[mmu_idx][index].addend;
00478 res = ldq_raw((uint64_t *)(long)(addr+addend));
00479 }
00480 } else {
00481
00482 #ifdef ALIGNED_ONLY
00483 if ((addr & (DATA_SIZE - 1)) != 0)
00484 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, ra);
00485 #endif
00486 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, ra);
00487 goto redo;
00488 }
00489 return res;
00490 }
00491 #endif
00492 #endif
00493
00494 #undef READ_ACCESS_TYPE
00495 #undef SHIFT
00496 #undef DATA_TYPE
00497 #undef SUFFIX
00498 #undef USUFFIX
00499 #undef DATA_SIZE
00500 #undef ADDR_READ