00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020 #ifndef CPU_I386_H
00021 #define CPU_I386_H
00022
00023 #include "config.h"
00024
00025 #ifdef TARGET_X86_64
00026 #define TARGET_LONG_BITS 64
00027 #else
00028 #define TARGET_LONG_BITS 32
00029 #endif
00030
00031
00032 #define TARGET_HAS_SMC
00033
00034
00035 #define TARGET_HAS_PRECISE_SMC
00036
00037 #define TARGET_HAS_ICE 1
00038
00039 #ifdef TARGET_X86_64
00040 #define ELF_MACHINE EM_X86_64
00041 #else
00042 #define ELF_MACHINE EM_386
00043 #endif
00044
00045 #include "cpu-defs.h"
00046
00047 #include "softfloat.h"
00048
00049 #define R_EAX 0
00050 #define R_ECX 1
00051 #define R_EDX 2
00052 #define R_EBX 3
00053 #define R_ESP 4
00054 #define R_EBP 5
00055 #define R_ESI 6
00056 #define R_EDI 7
00057
00058 #define R_AL 0
00059 #define R_CL 1
00060 #define R_DL 2
00061 #define R_BL 3
00062 #define R_AH 4
00063 #define R_CH 5
00064 #define R_DH 6
00065 #define R_BH 7
00066
00067 #define R_ES 0
00068 #define R_CS 1
00069 #define R_SS 2
00070 #define R_DS 3
00071 #define R_FS 4
00072 #define R_GS 5
00073
00074
00075 #define DESC_G_MASK (1 << 23)
00076 #define DESC_B_SHIFT 22
00077 #define DESC_B_MASK (1 << DESC_B_SHIFT)
00078 #define DESC_L_SHIFT 21
00079 #define DESC_L_MASK (1 << DESC_L_SHIFT)
00080 #define DESC_AVL_MASK (1 << 20)
00081 #define DESC_P_MASK (1 << 15)
00082 #define DESC_DPL_SHIFT 13
00083 #define DESC_DPL_MASK (1 << DESC_DPL_SHIFT)
00084 #define DESC_S_MASK (1 << 12)
00085 #define DESC_TYPE_SHIFT 8
00086 #define DESC_A_MASK (1 << 8)
00087
00088 #define DESC_CS_MASK (1 << 11)
00089 #define DESC_C_MASK (1 << 10)
00090 #define DESC_R_MASK (1 << 9)
00091
00092 #define DESC_E_MASK (1 << 10)
00093 #define DESC_W_MASK (1 << 9)
00094
00095 #define DESC_TSS_BUSY_MASK (1 << 9)
00096
00097
00098 #define CC_C 0x0001
00099 #define CC_P 0x0004
00100 #define CC_A 0x0010
00101 #define CC_Z 0x0040
00102 #define CC_S 0x0080
00103 #define CC_O 0x0800
00104
00105 #define TF_SHIFT 8
00106 #define IOPL_SHIFT 12
00107 #define VM_SHIFT 17
00108
00109 #define TF_MASK 0x00000100
00110 #define IF_MASK 0x00000200
00111 #define DF_MASK 0x00000400
00112 #define IOPL_MASK 0x00003000
00113 #define NT_MASK 0x00004000
00114 #define RF_MASK 0x00010000
00115 #define VM_MASK 0x00020000
00116 #define AC_MASK 0x00040000
00117 #define VIF_MASK 0x00080000
00118 #define VIP_MASK 0x00100000
00119 #define ID_MASK 0x00200000
00120
00121
00122
00123
00124
00125
00126 #define HF_CPL_SHIFT 0
00127
00128 #define HF_SOFTMMU_SHIFT 2
00129
00130 #define HF_INHIBIT_IRQ_SHIFT 3
00131
00132 #define HF_CS32_SHIFT 4
00133 #define HF_SS32_SHIFT 5
00134
00135 #define HF_ADDSEG_SHIFT 6
00136
00137 #define HF_PE_SHIFT 7
00138 #define HF_TF_SHIFT 8
00139 #define HF_MP_SHIFT 9
00140 #define HF_EM_SHIFT 10
00141 #define HF_TS_SHIFT 11
00142 #define HF_IOPL_SHIFT 12
00143 #define HF_LMA_SHIFT 14
00144 #define HF_CS64_SHIFT 15
00145 #define HF_OSFXSR_SHIFT 16
00146 #define HF_VM_SHIFT 17
00147 #define HF_SMM_SHIFT 19
00148 #define HF_SVME_SHIFT 20
00149 #define HF_SVMI_SHIFT 21
00150
00151 #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
00152 #define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
00153 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
00154 #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
00155 #define HF_SS32_MASK (1 << HF_SS32_SHIFT)
00156 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
00157 #define HF_PE_MASK (1 << HF_PE_SHIFT)
00158 #define HF_TF_MASK (1 << HF_TF_SHIFT)
00159 #define HF_MP_MASK (1 << HF_MP_SHIFT)
00160 #define HF_EM_MASK (1 << HF_EM_SHIFT)
00161 #define HF_TS_MASK (1 << HF_TS_SHIFT)
00162 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
00163 #define HF_LMA_MASK (1 << HF_LMA_SHIFT)
00164 #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
00165 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
00166 #define HF_VM_MASK (1 << HF_VM_SHIFT)
00167 #define HF_SMM_MASK (1 << HF_SMM_SHIFT)
00168 #define HF_SVME_MASK (1 << HF_SVME_SHIFT)
00169 #define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
00170
00171
00172
00173 #define HF2_GIF_SHIFT 0
00174 #define HF2_HIF_SHIFT 1
00175 #define HF2_NMI_SHIFT 2
00176 #define HF2_VINTR_SHIFT 3
00177
00178 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
00179 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
00180 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
00181 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
00182
00183 #define CR0_PE_SHIFT 0
00184 #define CR0_MP_SHIFT 1
00185
00186 #define CR0_PE_MASK (1 << 0)
00187 #define CR0_MP_MASK (1 << 1)
00188 #define CR0_EM_MASK (1 << 2)
00189 #define CR0_TS_MASK (1 << 3)
00190 #define CR0_ET_MASK (1 << 4)
00191 #define CR0_NE_MASK (1 << 5)
00192 #define CR0_WP_MASK (1 << 16)
00193 #define CR0_AM_MASK (1 << 18)
00194 #define CR0_PG_MASK (1 << 31)
00195
00196 #define CR4_VME_MASK (1 << 0)
00197 #define CR4_PVI_MASK (1 << 1)
00198 #define CR4_TSD_MASK (1 << 2)
00199 #define CR4_DE_MASK (1 << 3)
00200 #define CR4_PSE_MASK (1 << 4)
00201 #define CR4_PAE_MASK (1 << 5)
00202 #define CR4_PGE_MASK (1 << 7)
00203 #define CR4_PCE_MASK (1 << 8)
00204 #define CR4_OSFXSR_SHIFT 9
00205 #define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT)
00206 #define CR4_OSXMMEXCPT_MASK (1 << 10)
00207
00208 #define DR6_BD (1 << 13)
00209 #define DR6_BS (1 << 14)
00210 #define DR6_BT (1 << 15)
00211 #define DR6_FIXED_1 0xffff0ff0
00212
00213 #define DR7_GD (1 << 13)
00214 #define DR7_TYPE_SHIFT 16
00215 #define DR7_LEN_SHIFT 18
00216 #define DR7_FIXED_1 0x00000400
00217
00218 #define PG_PRESENT_BIT 0
00219 #define PG_RW_BIT 1
00220 #define PG_USER_BIT 2
00221 #define PG_PWT_BIT 3
00222 #define PG_PCD_BIT 4
00223 #define PG_ACCESSED_BIT 5
00224 #define PG_DIRTY_BIT 6
00225 #define PG_PSE_BIT 7
00226 #define PG_GLOBAL_BIT 8
00227 #define PG_NX_BIT 63
00228
00229 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
00230 #define PG_RW_MASK (1 << PG_RW_BIT)
00231 #define PG_USER_MASK (1 << PG_USER_BIT)
00232 #define PG_PWT_MASK (1 << PG_PWT_BIT)
00233 #define PG_PCD_MASK (1 << PG_PCD_BIT)
00234 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
00235 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
00236 #define PG_PSE_MASK (1 << PG_PSE_BIT)
00237 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
00238 #define PG_NX_MASK (1LL << PG_NX_BIT)
00239
00240 #define PG_ERROR_W_BIT 1
00241
00242 #define PG_ERROR_P_MASK 0x01
00243 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
00244 #define PG_ERROR_U_MASK 0x04
00245 #define PG_ERROR_RSVD_MASK 0x08
00246 #define PG_ERROR_I_D_MASK 0x10
00247
00248 #define MSR_IA32_TSC 0x10
00249 #define MSR_IA32_APICBASE 0x1b
00250 #define MSR_IA32_APICBASE_BSP (1<<8)
00251 #define MSR_IA32_APICBASE_ENABLE (1<<11)
00252 #define MSR_IA32_APICBASE_BASE (0xfffff<<12)
00253
00254 #define MSR_MTRRcap 0xfe
00255 #define MSR_MTRRcap_VCNT 8
00256 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
00257 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
00258
00259 #define MSR_IA32_SYSENTER_CS 0x174
00260 #define MSR_IA32_SYSENTER_ESP 0x175
00261 #define MSR_IA32_SYSENTER_EIP 0x176
00262
00263 #define MSR_MCG_CAP 0x179
00264 #define MSR_MCG_STATUS 0x17a
00265 #define MSR_MCG_CTL 0x17b
00266
00267 #define MSR_IA32_PERF_STATUS 0x198
00268
00269 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
00270 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
00271
00272 #define MSR_MTRRfix64K_00000 0x250
00273 #define MSR_MTRRfix16K_80000 0x258
00274 #define MSR_MTRRfix16K_A0000 0x259
00275 #define MSR_MTRRfix4K_C0000 0x268
00276 #define MSR_MTRRfix4K_C8000 0x269
00277 #define MSR_MTRRfix4K_D0000 0x26a
00278 #define MSR_MTRRfix4K_D8000 0x26b
00279 #define MSR_MTRRfix4K_E0000 0x26c
00280 #define MSR_MTRRfix4K_E8000 0x26d
00281 #define MSR_MTRRfix4K_F0000 0x26e
00282 #define MSR_MTRRfix4K_F8000 0x26f
00283
00284 #define MSR_PAT 0x277
00285
00286 #define MSR_MTRRdefType 0x2ff
00287
00288 #define MSR_EFER 0xc0000080
00289
00290 #define MSR_EFER_SCE (1 << 0)
00291 #define MSR_EFER_LME (1 << 8)
00292 #define MSR_EFER_LMA (1 << 10)
00293 #define MSR_EFER_NXE (1 << 11)
00294 #define MSR_EFER_SVME (1 << 12)
00295 #define MSR_EFER_FFXSR (1 << 14)
00296
00297 #define MSR_STAR 0xc0000081
00298 #define MSR_LSTAR 0xc0000082
00299 #define MSR_CSTAR 0xc0000083
00300 #define MSR_FMASK 0xc0000084
00301 #define MSR_FSBASE 0xc0000100
00302 #define MSR_GSBASE 0xc0000101
00303 #define MSR_KERNELGSBASE 0xc0000102
00304
00305 #define MSR_VM_HSAVE_PA 0xc0010117
00306
00307
00308 #define CPUID_FP87 (1 << 0)
00309 #define CPUID_VME (1 << 1)
00310 #define CPUID_DE (1 << 2)
00311 #define CPUID_PSE (1 << 3)
00312 #define CPUID_TSC (1 << 4)
00313 #define CPUID_MSR (1 << 5)
00314 #define CPUID_PAE (1 << 6)
00315 #define CPUID_MCE (1 << 7)
00316 #define CPUID_CX8 (1 << 8)
00317 #define CPUID_APIC (1 << 9)
00318 #define CPUID_SEP (1 << 11)
00319 #define CPUID_MTRR (1 << 12)
00320 #define CPUID_PGE (1 << 13)
00321 #define CPUID_MCA (1 << 14)
00322 #define CPUID_CMOV (1 << 15)
00323 #define CPUID_PAT (1 << 16)
00324 #define CPUID_PSE36 (1 << 17)
00325 #define CPUID_PN (1 << 18)
00326 #define CPUID_CLFLUSH (1 << 19)
00327 #define CPUID_DTS (1 << 21)
00328 #define CPUID_ACPI (1 << 22)
00329 #define CPUID_MMX (1 << 23)
00330 #define CPUID_FXSR (1 << 24)
00331 #define CPUID_SSE (1 << 25)
00332 #define CPUID_SSE2 (1 << 26)
00333 #define CPUID_SS (1 << 27)
00334 #define CPUID_HT (1 << 28)
00335 #define CPUID_TM (1 << 29)
00336 #define CPUID_IA64 (1 << 30)
00337 #define CPUID_PBE (1 << 31)
00338
00339 #define CPUID_EXT_SSE3 (1 << 0)
00340 #define CPUID_EXT_DTES64 (1 << 2)
00341 #define CPUID_EXT_MONITOR (1 << 3)
00342 #define CPUID_EXT_DSCPL (1 << 4)
00343 #define CPUID_EXT_VMX (1 << 5)
00344 #define CPUID_EXT_SMX (1 << 6)
00345 #define CPUID_EXT_EST (1 << 7)
00346 #define CPUID_EXT_TM2 (1 << 8)
00347 #define CPUID_EXT_SSSE3 (1 << 9)
00348 #define CPUID_EXT_CID (1 << 10)
00349 #define CPUID_EXT_CX16 (1 << 13)
00350 #define CPUID_EXT_XTPR (1 << 14)
00351 #define CPUID_EXT_PDCM (1 << 15)
00352 #define CPUID_EXT_DCA (1 << 18)
00353 #define CPUID_EXT_SSE41 (1 << 19)
00354 #define CPUID_EXT_SSE42 (1 << 20)
00355 #define CPUID_EXT_X2APIC (1 << 21)
00356 #define CPUID_EXT_MOVBE (1 << 22)
00357 #define CPUID_EXT_POPCNT (1 << 23)
00358 #define CPUID_EXT_XSAVE (1 << 26)
00359 #define CPUID_EXT_OSXSAVE (1 << 27)
00360
00361 #define CPUID_EXT2_SYSCALL (1 << 11)
00362 #define CPUID_EXT2_MP (1 << 19)
00363 #define CPUID_EXT2_NX (1 << 20)
00364 #define CPUID_EXT2_MMXEXT (1 << 22)
00365 #define CPUID_EXT2_FFXSR (1 << 25)
00366 #define CPUID_EXT2_PDPE1GB (1 << 26)
00367 #define CPUID_EXT2_RDTSCP (1 << 27)
00368 #define CPUID_EXT2_LM (1 << 29)
00369 #define CPUID_EXT2_3DNOWEXT (1 << 30)
00370 #define CPUID_EXT2_3DNOW (1 << 31)
00371
00372 #define CPUID_EXT3_LAHF_LM (1 << 0)
00373 #define CPUID_EXT3_CMP_LEG (1 << 1)
00374 #define CPUID_EXT3_SVM (1 << 2)
00375 #define CPUID_EXT3_EXTAPIC (1 << 3)
00376 #define CPUID_EXT3_CR8LEG (1 << 4)
00377 #define CPUID_EXT3_ABM (1 << 5)
00378 #define CPUID_EXT3_SSE4A (1 << 6)
00379 #define CPUID_EXT3_MISALIGNSSE (1 << 7)
00380 #define CPUID_EXT3_3DNOWPREFETCH (1 << 8)
00381 #define CPUID_EXT3_OSVW (1 << 9)
00382 #define CPUID_EXT3_IBS (1 << 10)
00383 #define CPUID_EXT3_SKINIT (1 << 12)
00384
00385 #define CPUID_VENDOR_INTEL_1 0x756e6547
00386 #define CPUID_VENDOR_INTEL_2 0x49656e69
00387 #define CPUID_VENDOR_INTEL_3 0x6c65746e
00388
00389 #define CPUID_VENDOR_AMD_1 0x68747541
00390 #define CPUID_VENDOR_AMD_2 0x69746e65
00391 #define CPUID_VENDOR_AMD_3 0x444d4163
00392
00393 #define CPUID_MWAIT_IBE (1 << 1)
00394 #define CPUID_MWAIT_EMX (1 << 0)
00395
00396 #define EXCP00_DIVZ 0
00397 #define EXCP01_DB 1
00398 #define EXCP02_NMI 2
00399 #define EXCP03_INT3 3
00400 #define EXCP04_INTO 4
00401 #define EXCP05_BOUND 5
00402 #define EXCP06_ILLOP 6
00403 #define EXCP07_PREX 7
00404 #define EXCP08_DBLE 8
00405 #define EXCP09_XERR 9
00406 #define EXCP0A_TSS 10
00407 #define EXCP0B_NOSEG 11
00408 #define EXCP0C_STACK 12
00409 #define EXCP0D_GPF 13
00410 #define EXCP0E_PAGE 14
00411 #define EXCP10_COPR 16
00412 #define EXCP11_ALGN 17
00413 #define EXCP12_MCHK 18
00414
00415 #define EXCP_SYSCALL 0x100
00416
00417
00418 enum {
00419 CC_OP_DYNAMIC,
00420 CC_OP_EFLAGS,
00421
00422 CC_OP_MULB,
00423 CC_OP_MULW,
00424 CC_OP_MULL,
00425 CC_OP_MULQ,
00426
00427 CC_OP_ADDB,
00428 CC_OP_ADDW,
00429 CC_OP_ADDL,
00430 CC_OP_ADDQ,
00431
00432 CC_OP_ADCB,
00433 CC_OP_ADCW,
00434 CC_OP_ADCL,
00435 CC_OP_ADCQ,
00436
00437 CC_OP_SUBB,
00438 CC_OP_SUBW,
00439 CC_OP_SUBL,
00440 CC_OP_SUBQ,
00441
00442 CC_OP_SBBB,
00443 CC_OP_SBBW,
00444 CC_OP_SBBL,
00445 CC_OP_SBBQ,
00446
00447 CC_OP_LOGICB,
00448 CC_OP_LOGICW,
00449 CC_OP_LOGICL,
00450 CC_OP_LOGICQ,
00451
00452 CC_OP_INCB,
00453 CC_OP_INCW,
00454 CC_OP_INCL,
00455 CC_OP_INCQ,
00456
00457 CC_OP_DECB,
00458 CC_OP_DECW,
00459 CC_OP_DECL,
00460 CC_OP_DECQ,
00461
00462 CC_OP_SHLB,
00463 CC_OP_SHLW,
00464 CC_OP_SHLL,
00465 CC_OP_SHLQ,
00466
00467 CC_OP_SARB,
00468 CC_OP_SARW,
00469 CC_OP_SARL,
00470 CC_OP_SARQ,
00471
00472 CC_OP_NB,
00473 };
00474
00475 #ifdef FLOATX80
00476 #define USE_X86LDOUBLE
00477 #endif
00478
00479 #ifdef USE_X86LDOUBLE
00480 typedef floatx80 CPU86_LDouble;
00481 #else
00482 typedef float64 CPU86_LDouble;
00483 #endif
00484
00485 typedef struct SegmentCache {
00486 uint32_t selector;
00487 target_ulong base;
00488 uint32_t limit;
00489 uint32_t flags;
00490 } SegmentCache;
00491
00492 typedef union {
00493 uint8_t _b[16];
00494 uint16_t _w[8];
00495 uint32_t _l[4];
00496 uint64_t _q[2];
00497 float32 _s[4];
00498 float64 _d[2];
00499 } XMMReg;
00500
00501 typedef union {
00502 uint8_t _b[8];
00503 uint16_t _w[4];
00504 uint32_t _l[2];
00505 float32 _s[2];
00506 uint64_t q;
00507 } MMXReg;
00508
00509 #ifdef WORDS_BIGENDIAN
00510 #define XMM_B(n) _b[15 - (n)]
00511 #define XMM_W(n) _w[7 - (n)]
00512 #define XMM_L(n) _l[3 - (n)]
00513 #define XMM_S(n) _s[3 - (n)]
00514 #define XMM_Q(n) _q[1 - (n)]
00515 #define XMM_D(n) _d[1 - (n)]
00516
00517 #define MMX_B(n) _b[7 - (n)]
00518 #define MMX_W(n) _w[3 - (n)]
00519 #define MMX_L(n) _l[1 - (n)]
00520 #define MMX_S(n) _s[1 - (n)]
00521 #else
00522 #define XMM_B(n) _b[n]
00523 #define XMM_W(n) _w[n]
00524 #define XMM_L(n) _l[n]
00525 #define XMM_S(n) _s[n]
00526 #define XMM_Q(n) _q[n]
00527 #define XMM_D(n) _d[n]
00528
00529 #define MMX_B(n) _b[n]
00530 #define MMX_W(n) _w[n]
00531 #define MMX_L(n) _l[n]
00532 #define MMX_S(n) _s[n]
00533 #endif
00534 #define MMX_Q(n) q
00535
00536 #ifdef TARGET_X86_64
00537 #define CPU_NB_REGS 16
00538 #else
00539 #define CPU_NB_REGS 8
00540 #endif
00541
00542 #define NB_MMU_MODES 2
00543
00544 typedef struct CPUX86State {
00545
00546 target_ulong regs[CPU_NB_REGS];
00547 target_ulong eip;
00548 target_ulong eflags;
00549
00550
00551
00552
00553 target_ulong cc_src;
00554 target_ulong cc_dst;
00555 uint32_t cc_op;
00556 int32_t df;
00557 uint32_t hflags;
00558
00559 uint32_t hflags2;
00560
00561
00562 SegmentCache segs[6];
00563 SegmentCache ldt;
00564 SegmentCache tr;
00565 SegmentCache gdt;
00566 SegmentCache idt;
00567
00568 target_ulong cr[5];
00569 uint64_t a20_mask;
00570
00571
00572 unsigned int fpstt;
00573 unsigned int fpus;
00574 unsigned int fpuc;
00575 uint8_t fptags[8];
00576 union {
00577 #ifdef USE_X86LDOUBLE
00578 CPU86_LDouble d __attribute__((aligned(16)));
00579 #else
00580 CPU86_LDouble d;
00581 #endif
00582 MMXReg mmx;
00583 } fpregs[8];
00584
00585
00586 float_status fp_status;
00587 CPU86_LDouble ft0;
00588
00589 float_status mmx_status;
00590 float_status sse_status;
00591 uint32_t mxcsr;
00592 XMMReg xmm_regs[CPU_NB_REGS];
00593 XMMReg xmm_t0;
00594 MMXReg mmx_t0;
00595 target_ulong cc_tmp;
00596
00597
00598 uint32_t sysenter_cs;
00599 target_ulong sysenter_esp;
00600 target_ulong sysenter_eip;
00601 uint64_t efer;
00602 uint64_t star;
00603
00604 uint64_t vm_hsave;
00605 uint64_t vm_vmcb;
00606 uint64_t tsc_offset;
00607 uint64_t intercept;
00608 uint16_t intercept_cr_read;
00609 uint16_t intercept_cr_write;
00610 uint16_t intercept_dr_read;
00611 uint16_t intercept_dr_write;
00612 uint32_t intercept_exceptions;
00613 uint8_t v_tpr;
00614
00615 #ifdef TARGET_X86_64
00616 target_ulong lstar;
00617 target_ulong cstar;
00618 target_ulong fmask;
00619 target_ulong kernelgsbase;
00620 #endif
00621
00622 uint64_t tsc;
00623
00624 uint64_t pat;
00625
00626
00627 int error_code;
00628 int exception_is_int;
00629 target_ulong exception_next_eip;
00630 target_ulong dr[8];
00631 union {
00632 CPUBreakpoint *cpu_breakpoint[4];
00633 CPUWatchpoint *cpu_watchpoint[4];
00634 };
00635 uint32_t smbase;
00636 int old_exception;
00637
00638 CPU_COMMON
00639
00640
00641 uint32_t cpuid_level;
00642 uint32_t cpuid_vendor1;
00643 uint32_t cpuid_vendor2;
00644 uint32_t cpuid_vendor3;
00645 uint32_t cpuid_version;
00646 uint32_t cpuid_features;
00647 uint32_t cpuid_ext_features;
00648 uint32_t cpuid_xlevel;
00649 uint32_t cpuid_model[12];
00650 uint32_t cpuid_ext2_features;
00651 uint32_t cpuid_ext3_features;
00652 uint32_t cpuid_apic_id;
00653
00654
00655 uint64_t mtrr_fixed[11];
00656 uint64_t mtrr_deftype;
00657 struct {
00658 uint64_t base;
00659 uint64_t mask;
00660 } mtrr_var[8];
00661
00662 #ifdef USE_KQEMU
00663 int kqemu_enabled;
00664 int last_io_time;
00665 #endif
00666
00667
00668 uint64_t interrupt_bitmap[256 / 64];
00669
00670
00671
00672 struct APICState *apic_state;
00673 } CPUX86State;
00674
00675 CPUX86State *cpu_x86_init(const char *cpu_model);
00676 int cpu_x86_exec(CPUX86State *s);
00677 void cpu_x86_close(CPUX86State *s);
00678 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt,
00679 ...));
00680 int cpu_get_pic_interrupt(CPUX86State *s);
00681
00682 void cpu_set_ferr(CPUX86State *s);
00683
00684
00685
00686 static inline void cpu_x86_load_seg_cache(CPUX86State *env,
00687 int seg_reg, unsigned int selector,
00688 target_ulong base,
00689 unsigned int limit,
00690 unsigned int flags)
00691 {
00692 SegmentCache *sc;
00693 unsigned int new_hflags;
00694
00695 sc = &env->segs[seg_reg];
00696 sc->selector = selector;
00697 sc->base = base;
00698 sc->limit = limit;
00699 sc->flags = flags;
00700
00701
00702 {
00703 if (seg_reg == R_CS) {
00704 #ifdef TARGET_X86_64
00705 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
00706
00707 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
00708 env->hflags &= ~(HF_ADDSEG_MASK);
00709 } else
00710 #endif
00711 {
00712
00713 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
00714 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
00715 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
00716 new_hflags;
00717 }
00718 }
00719 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
00720 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
00721 if (env->hflags & HF_CS64_MASK) {
00722
00723 } else if (!(env->cr[0] & CR0_PE_MASK) ||
00724 (env->eflags & VM_MASK) ||
00725 !(env->hflags & HF_CS32_MASK)) {
00726
00727
00728
00729
00730
00731 new_hflags |= HF_ADDSEG_MASK;
00732 } else {
00733 new_hflags |= ((env->segs[R_DS].base |
00734 env->segs[R_ES].base |
00735 env->segs[R_SS].base) != 0) <<
00736 HF_ADDSEG_SHIFT;
00737 }
00738 env->hflags = (env->hflags &
00739 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
00740 }
00741 }
00742
00743
00744 static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
00745 {
00746 #if HF_CPL_MASK == 3
00747 s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
00748 #else
00749 #error HF_CPL_MASK is hardcoded
00750 #endif
00751 }
00752
00753
00754
00755 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f);
00756 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper);
00757
00758
00759
00760
00761 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
00762 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
00763 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
00764
00765
00766
00767
00768 int cpu_x86_signal_handler(int host_signum, void *pinfo,
00769 void *puc);
00770
00771
00772 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
00773 int is_write, int mmu_idx, int is_softmmu);
00774 void cpu_x86_set_a20(CPUX86State *env, int a20_state);
00775 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
00776 uint32_t *eax, uint32_t *ebx,
00777 uint32_t *ecx, uint32_t *edx);
00778
00779 static inline int hw_breakpoint_enabled(unsigned long dr7, int index)
00780 {
00781 return (dr7 >> (index * 2)) & 3;
00782 }
00783
00784 static inline int hw_breakpoint_type(unsigned long dr7, int index)
00785 {
00786 return (dr7 >> (DR7_TYPE_SHIFT + (index * 2))) & 3;
00787 }
00788
00789 static inline int hw_breakpoint_len(unsigned long dr7, int index)
00790 {
00791 int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 2))) & 3);
00792 return (len == 2) ? 8 : len + 1;
00793 }
00794
00795 void hw_breakpoint_insert(CPUX86State *env, int index);
00796 void hw_breakpoint_remove(CPUX86State *env, int index);
00797 int check_hw_breakpoints(CPUX86State *env, int force_dr6_update);
00798
00799
00800 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
00801 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
00802 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
00803
00804
00805 void cpu_set_apic_base(CPUX86State *env, uint64_t val);
00806 uint64_t cpu_get_apic_base(CPUX86State *env);
00807 void cpu_set_apic_tpr(CPUX86State *env, uint8_t val);
00808 #ifndef NO_CPU_IO_DEFS
00809 uint8_t cpu_get_apic_tpr(CPUX86State *env);
00810 #endif
00811
00812
00813 void cpu_smm_update(CPUX86State *env);
00814 uint64_t cpu_get_tsc(CPUX86State *env);
00815
00816
00817 #define X86_DUMP_FPU 0x0001
00818 #define X86_DUMP_CCOP 0x0002
00819
00820 #ifdef USE_KQEMU
00821 static inline int cpu_get_time_fast(void)
00822 {
00823 int low, high;
00824 asm volatile("rdtsc" : "=a" (low), "=d" (high));
00825 return low;
00826 }
00827 #endif
00828
00829 #define TARGET_PAGE_BITS 12
00830
00831 #define CPUState CPUX86State
00832 #define cpu_init cpu_x86_init
00833 #define cpu_exec cpu_x86_exec
00834 #define cpu_gen_code cpu_x86_gen_code
00835 #define cpu_signal_handler cpu_x86_signal_handler
00836 #define cpu_list x86_cpu_list
00837
00838 #define CPU_SAVE_VERSION 8
00839
00840
00841 #define MMU_MODE0_SUFFIX _kernel
00842 #define MMU_MODE1_SUFFIX _user
00843 #define MMU_USER_IDX 1
00844 static inline int cpu_mmu_index (CPUState *env)
00845 {
00846 return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
00847 }
00848
00849
00850 void optimize_flags_init(void);
00851
00852 typedef struct CCTable {
00853 int (*compute_all)(void);
00854 int (*compute_c)(void);
00855 } CCTable;
00856
00857 #if defined(CONFIG_USER_ONLY)
00858 static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
00859 {
00860 if (newsp)
00861 env->regs[R_ESP] = newsp;
00862 env->regs[R_EAX] = 0;
00863 }
00864 #endif
00865
00866 #include "cpu-all.h"
00867 #include "exec-all.h"
00868
00869 #include "svm.h"
00870
00871 static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
00872 {
00873 env->eip = tb->pc - tb->cs_base;
00874 }
00875
00876 static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
00877 target_ulong *cs_base, int *flags)
00878 {
00879 *cs_base = env->segs[R_CS].base;
00880 *pc = *cs_base + env->eip;
00881 *flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
00882 }
00883
00884 #endif