File:  [Qemu by Fabrice Bellard] / qemu / target-i386 / helper.c
Revision 1.1.1.9 (vendor branch): download - view: text, annotated - select for diffs
Tue Apr 24 17:19:24 2018 UTC (3 years, 9 months ago) by root
Branches: qemu, MAIN
CVS tags: qemu0105, HEAD
qemu 0.10.5

    1: /*
    2:  *  i386 helpers (without register variable usage)
    3:  *
    4:  *  Copyright (c) 2003 Fabrice Bellard
    5:  *
    6:  * This library is free software; you can redistribute it and/or
    7:  * modify it under the terms of the GNU Lesser General Public
    8:  * License as published by the Free Software Foundation; either
    9:  * version 2 of the License, or (at your option) any later version.
   10:  *
   11:  * This library is distributed in the hope that it will be useful,
   12:  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   13:  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   14:  * Lesser General Public License for more details.
   15:  *
   16:  * You should have received a copy of the GNU Lesser General Public
   17:  * License along with this library; if not, write to the Free Software
   18:  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
   19:  */
   20: #include <stdarg.h>
   21: #include <stdlib.h>
   22: #include <stdio.h>
   23: #include <string.h>
   24: #include <inttypes.h>
   25: #include <signal.h>
   26: #include <assert.h>
   27: 
   28: #include "cpu.h"
   29: #include "exec-all.h"
   30: #include "qemu-common.h"
   31: #include "kvm.h"
   32: 
   33: //#define DEBUG_MMU
   34: 
   35: /* feature flags taken from "Intel Processor Identification and the CPUID
   36:  * Instruction" and AMD's "CPUID Specification". In cases of disagreement
   37:  * about feature names, the Linux name is used. */
   38: static const char *feature_name[] = {
   39:     "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
   40:     "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
   41:     "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
   42:     "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
   43: };
   44: static const char *ext_feature_name[] = {
   45:     "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
   46:     "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
   47:     NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
   48:        NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
   49: };
   50: static const char *ext2_feature_name[] = {
   51:     "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
   52:     "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
   53:     "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
   54:     "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
   55: };
   56: static const char *ext3_feature_name[] = {
   57:     "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
   58:     "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
   59:     NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
   60:     NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
   61: };
   62: 
   63: static void add_flagname_to_bitmaps(char *flagname, uint32_t *features, 
   64:                                     uint32_t *ext_features, 
   65:                                     uint32_t *ext2_features, 
   66:                                     uint32_t *ext3_features)
   67: {
   68:     int i;
   69:     int found = 0;
   70: 
   71:     for ( i = 0 ; i < 32 ; i++ ) 
   72:         if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
   73:             *features |= 1 << i;
   74:             found = 1;
   75:         }
   76:     for ( i = 0 ; i < 32 ; i++ ) 
   77:         if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
   78:             *ext_features |= 1 << i;
   79:             found = 1;
   80:         }
   81:     for ( i = 0 ; i < 32 ; i++ ) 
   82:         if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
   83:             *ext2_features |= 1 << i;
   84:             found = 1;
   85:         }
   86:     for ( i = 0 ; i < 32 ; i++ ) 
   87:         if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
   88:             *ext3_features |= 1 << i;
   89:             found = 1;
   90:         }
   91:     if (!found) {
   92:         fprintf(stderr, "CPU feature %s not found\n", flagname);
   93:     }
   94: }
   95: 
   96: static void kvm_trim_features(uint32_t *features, uint32_t supported,
   97:                               const char *names[])
   98: {
   99:     int i;
  100:     uint32_t mask;
  101: 
  102:     for (i = 0; i < 32; ++i) {
  103:         mask = 1U << i;
  104:         if ((*features & mask) && !(supported & mask)) {
  105:             *features &= ~mask;
  106:         }
  107:     }
  108: }
  109: 
  110: typedef struct x86_def_t {
  111:     const char *name;
  112:     uint32_t level;
  113:     uint32_t vendor1, vendor2, vendor3;
  114:     int family;
  115:     int model;
  116:     int stepping;
  117:     uint32_t features, ext_features, ext2_features, ext3_features;
  118:     uint32_t xlevel;
  119:     char model_id[48];
  120: } x86_def_t;
  121: 
  122: #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
  123: #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
  124:           CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
  125: #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
  126:           CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
  127:           CPUID_PSE36 | CPUID_FXSR)
  128: #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
  129: #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
  130:           CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
  131:           CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
  132:           CPUID_PAE | CPUID_SEP | CPUID_APIC)
  133: static x86_def_t x86_defs[] = {
  134: #ifdef TARGET_X86_64
  135:     {
  136:         .name = "qemu64",
  137:         .level = 2,
  138:         .vendor1 = CPUID_VENDOR_AMD_1,
  139:         .vendor2 = CPUID_VENDOR_AMD_2,
  140:         .vendor3 = CPUID_VENDOR_AMD_3,
  141:         .family = 6,
  142:         .model = 2,
  143:         .stepping = 3,
  144:         .features = PPRO_FEATURES | 
  145:         /* these features are needed for Win64 and aren't fully implemented */
  146:             CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
  147:         /* this feature is needed for Solaris and isn't fully implemented */
  148:             CPUID_PSE36,
  149:         .ext_features = CPUID_EXT_SSE3,
  150:         .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
  151:             CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
  152:             CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
  153:         .ext3_features = CPUID_EXT3_SVM,
  154:         .xlevel = 0x8000000A,
  155:         .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
  156:     },
  157:     {
  158:         .name = "phenom",
  159:         .level = 5,
  160:         .vendor1 = CPUID_VENDOR_AMD_1,
  161:         .vendor2 = CPUID_VENDOR_AMD_2,
  162:         .vendor3 = CPUID_VENDOR_AMD_3,
  163:         .family = 16,
  164:         .model = 2,
  165:         .stepping = 3,
  166:         /* Missing: CPUID_VME, CPUID_HT */
  167:         .features = PPRO_FEATURES | 
  168:             CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
  169:             CPUID_PSE36,
  170:         /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
  171:         .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
  172:         /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
  173:         .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 
  174:             CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
  175:             CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
  176:             CPUID_EXT2_FFXSR,
  177:         /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
  178:                     CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
  179:                     CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
  180:                     CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
  181:         .ext3_features = CPUID_EXT3_SVM,
  182:         .xlevel = 0x8000001A,
  183:         .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
  184:     },
  185:     {
  186:         .name = "core2duo",
  187:         .level = 10,
  188:         .family = 6,
  189:         .model = 15,
  190:         .stepping = 11,
  191: 	/* The original CPU also implements these features:
  192:                CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
  193:                CPUID_TM, CPUID_PBE */
  194:         .features = PPRO_FEATURES |
  195:             CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
  196:             CPUID_PSE36,
  197: 	/* The original CPU also implements these ext features:
  198:                CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
  199:                CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
  200:         .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
  201:         .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
  202:         /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
  203:         .xlevel = 0x80000008,
  204:         .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
  205:     },
  206: #endif
  207:     {
  208:         .name = "qemu32",
  209:         .level = 2,
  210:         .family = 6,
  211:         .model = 3,
  212:         .stepping = 3,
  213:         .features = PPRO_FEATURES,
  214:         .ext_features = CPUID_EXT_SSE3,
  215:         .xlevel = 0,
  216:         .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
  217:     },
  218:     {
  219:         .name = "coreduo",
  220:         .level = 10,
  221:         .family = 6,
  222:         .model = 14,
  223:         .stepping = 8,
  224:         /* The original CPU also implements these features:
  225:                CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
  226:                CPUID_TM, CPUID_PBE */
  227:         .features = PPRO_FEATURES | CPUID_VME |
  228:             CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
  229:         /* The original CPU also implements these ext features:
  230:                CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
  231:                CPUID_EXT_PDCM */
  232:         .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
  233:         .ext2_features = CPUID_EXT2_NX,
  234:         .xlevel = 0x80000008,
  235:         .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
  236:     },
  237:     {
  238:         .name = "486",
  239:         .level = 0,
  240:         .family = 4,
  241:         .model = 0,
  242:         .stepping = 0,
  243:         .features = I486_FEATURES,
  244:         .xlevel = 0,
  245:     },
  246:     {
  247:         .name = "pentium",
  248:         .level = 1,
  249:         .family = 5,
  250:         .model = 4,
  251:         .stepping = 3,
  252:         .features = PENTIUM_FEATURES,
  253:         .xlevel = 0,
  254:     },
  255:     {
  256:         .name = "pentium2",
  257:         .level = 2,
  258:         .family = 6,
  259:         .model = 5,
  260:         .stepping = 2,
  261:         .features = PENTIUM2_FEATURES,
  262:         .xlevel = 0,
  263:     },
  264:     {
  265:         .name = "pentium3",
  266:         .level = 2,
  267:         .family = 6,
  268:         .model = 7,
  269:         .stepping = 3,
  270:         .features = PENTIUM3_FEATURES,
  271:         .xlevel = 0,
  272:     },
  273:     {
  274:         .name = "athlon",
  275:         .level = 2,
  276:         .vendor1 = 0x68747541, /* "Auth" */
  277:         .vendor2 = 0x69746e65, /* "enti" */
  278:         .vendor3 = 0x444d4163, /* "cAMD" */
  279:         .family = 6,
  280:         .model = 2,
  281:         .stepping = 3,
  282:         .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
  283:         .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
  284:         .xlevel = 0x80000008,
  285:         /* XXX: put another string ? */
  286:         .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
  287:     },
  288:     {
  289:         .name = "n270",
  290:         /* original is on level 10 */
  291:         .level = 5,
  292:         .family = 6,
  293:         .model = 28,
  294:         .stepping = 2,
  295:         .features = PPRO_FEATURES |
  296:             CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
  297:             /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
  298:              * CPUID_HT | CPUID_TM | CPUID_PBE */
  299:             /* Some CPUs got no CPUID_SEP */
  300:         .ext_features = CPUID_EXT_MONITOR |
  301:             CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
  302:             /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
  303:              * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
  304:         .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
  305:         /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
  306:         .xlevel = 0x8000000A,
  307:         .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
  308:     },
  309: };
  310: 
  311: static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
  312: {
  313:     unsigned int i;
  314:     x86_def_t *def;
  315: 
  316:     char *s = strdup(cpu_model);
  317:     char *featurestr, *name = strtok(s, ",");
  318:     uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
  319:     uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
  320:     int family = -1, model = -1, stepping = -1;
  321: 
  322:     def = NULL;
  323:     for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
  324:         if (strcmp(name, x86_defs[i].name) == 0) {
  325:             def = &x86_defs[i];
  326:             break;
  327:         }
  328:     }
  329:     if (!def)
  330:         goto error;
  331:     memcpy(x86_cpu_def, def, sizeof(*def));
  332: 
  333:     featurestr = strtok(NULL, ",");
  334: 
  335:     while (featurestr) {
  336:         char *val;
  337:         if (featurestr[0] == '+') {
  338:             add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
  339:         } else if (featurestr[0] == '-') {
  340:             add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
  341:         } else if ((val = strchr(featurestr, '='))) {
  342:             *val = 0; val++;
  343:             if (!strcmp(featurestr, "family")) {
  344:                 char *err;
  345:                 family = strtol(val, &err, 10);
  346:                 if (!*val || *err || family < 0) {
  347:                     fprintf(stderr, "bad numerical value %s\n", val);
  348:                     goto error;
  349:                 }
  350:                 x86_cpu_def->family = family;
  351:             } else if (!strcmp(featurestr, "model")) {
  352:                 char *err;
  353:                 model = strtol(val, &err, 10);
  354:                 if (!*val || *err || model < 0 || model > 0xff) {
  355:                     fprintf(stderr, "bad numerical value %s\n", val);
  356:                     goto error;
  357:                 }
  358:                 x86_cpu_def->model = model;
  359:             } else if (!strcmp(featurestr, "stepping")) {
  360:                 char *err;
  361:                 stepping = strtol(val, &err, 10);
  362:                 if (!*val || *err || stepping < 0 || stepping > 0xf) {
  363:                     fprintf(stderr, "bad numerical value %s\n", val);
  364:                     goto error;
  365:                 }
  366:                 x86_cpu_def->stepping = stepping;
  367:             } else if (!strcmp(featurestr, "vendor")) {
  368:                 if (strlen(val) != 12) {
  369:                     fprintf(stderr, "vendor string must be 12 chars long\n");
  370:                     goto error;
  371:                 }
  372:                 x86_cpu_def->vendor1 = 0;
  373:                 x86_cpu_def->vendor2 = 0;
  374:                 x86_cpu_def->vendor3 = 0;
  375:                 for(i = 0; i < 4; i++) {
  376:                     x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
  377:                     x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
  378:                     x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
  379:                 }
  380:             } else if (!strcmp(featurestr, "model_id")) {
  381:                 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
  382:                         val);
  383:             } else {
  384:                 fprintf(stderr, "unrecognized feature %s\n", featurestr);
  385:                 goto error;
  386:             }
  387:         } else {
  388:             fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
  389:             goto error;
  390:         }
  391:         featurestr = strtok(NULL, ",");
  392:     }
  393:     x86_cpu_def->features |= plus_features;
  394:     x86_cpu_def->ext_features |= plus_ext_features;
  395:     x86_cpu_def->ext2_features |= plus_ext2_features;
  396:     x86_cpu_def->ext3_features |= plus_ext3_features;
  397:     x86_cpu_def->features &= ~minus_features;
  398:     x86_cpu_def->ext_features &= ~minus_ext_features;
  399:     x86_cpu_def->ext2_features &= ~minus_ext2_features;
  400:     x86_cpu_def->ext3_features &= ~minus_ext3_features;
  401:     free(s);
  402:     return 0;
  403: 
  404: error:
  405:     free(s);
  406:     return -1;
  407: }
  408: 
  409: void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
  410: {
  411:     unsigned int i;
  412: 
  413:     for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
  414:         (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
  415: }
  416: 
  417: static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
  418: {
  419:     x86_def_t def1, *def = &def1;
  420: 
  421:     if (cpu_x86_find_by_name(def, cpu_model) < 0)
  422:         return -1;
  423:     if (def->vendor1) {
  424:         env->cpuid_vendor1 = def->vendor1;
  425:         env->cpuid_vendor2 = def->vendor2;
  426:         env->cpuid_vendor3 = def->vendor3;
  427:     } else {
  428:         env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
  429:         env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
  430:         env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
  431:     }
  432:     env->cpuid_level = def->level;
  433:     if (def->family > 0x0f)
  434:         env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
  435:     else
  436:         env->cpuid_version = def->family << 8;
  437:     env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
  438:     env->cpuid_version |= def->stepping;
  439:     env->cpuid_features = def->features;
  440:     env->pat = 0x0007040600070406ULL;
  441:     env->cpuid_ext_features = def->ext_features;
  442:     env->cpuid_ext2_features = def->ext2_features;
  443:     env->cpuid_xlevel = def->xlevel;
  444:     env->cpuid_ext3_features = def->ext3_features;
  445:     {
  446:         const char *model_id = def->model_id;
  447:         int c, len, i;
  448:         if (!model_id)
  449:             model_id = "";
  450:         len = strlen(model_id);
  451:         for(i = 0; i < 48; i++) {
  452:             if (i >= len)
  453:                 c = '\0';
  454:             else
  455:                 c = (uint8_t)model_id[i];
  456:             env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
  457:         }
  458:     }
  459:     return 0;
  460: }
  461: 
  462: /* NOTE: must be called outside the CPU execute loop */
  463: void cpu_reset(CPUX86State *env)
  464: {
  465:     int i;
  466: 
  467:     if (qemu_loglevel_mask(CPU_LOG_RESET)) {
  468:         qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
  469:         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
  470:     }
  471: 
  472:     memset(env, 0, offsetof(CPUX86State, breakpoints));
  473: 
  474:     tlb_flush(env, 1);
  475: 
  476:     env->old_exception = -1;
  477: 
  478:     /* init to reset state */
  479: 
  480: #ifdef CONFIG_SOFTMMU
  481:     env->hflags |= HF_SOFTMMU_MASK;
  482: #endif
  483:     env->hflags2 |= HF2_GIF_MASK;
  484: 
  485:     cpu_x86_update_cr0(env, 0x60000010);
  486:     env->a20_mask = ~0x0;
  487:     env->smbase = 0x30000;
  488: 
  489:     env->idt.limit = 0xffff;
  490:     env->gdt.limit = 0xffff;
  491:     env->ldt.limit = 0xffff;
  492:     env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
  493:     env->tr.limit = 0xffff;
  494:     env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
  495: 
  496:     cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
  497:                            DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
  498:     cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
  499:                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
  500:     cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
  501:                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
  502:     cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
  503:                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
  504:     cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
  505:                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
  506:     cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
  507:                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
  508: 
  509:     env->eip = 0xfff0;
  510:     env->regs[R_EDX] = env->cpuid_version;
  511: 
  512:     env->eflags = 0x2;
  513: 
  514:     /* FPU init */
  515:     for(i = 0;i < 8; i++)
  516:         env->fptags[i] = 1;
  517:     env->fpuc = 0x37f;
  518: 
  519:     env->mxcsr = 0x1f80;
  520: 
  521:     memset(env->dr, 0, sizeof(env->dr));
  522:     env->dr[6] = DR6_FIXED_1;
  523:     env->dr[7] = DR7_FIXED_1;
  524:     cpu_breakpoint_remove_all(env, BP_CPU);
  525:     cpu_watchpoint_remove_all(env, BP_CPU);
  526: }
  527: 
  528: void cpu_x86_close(CPUX86State *env)
  529: {
  530:     qemu_free(env);
  531: }
  532: 
  533: /***********************************************************/
  534: /* x86 debug */
  535: 
  536: static const char *cc_op_str[] = {
  537:     "DYNAMIC",
  538:     "EFLAGS",
  539: 
  540:     "MULB",
  541:     "MULW",
  542:     "MULL",
  543:     "MULQ",
  544: 
  545:     "ADDB",
  546:     "ADDW",
  547:     "ADDL",
  548:     "ADDQ",
  549: 
  550:     "ADCB",
  551:     "ADCW",
  552:     "ADCL",
  553:     "ADCQ",
  554: 
  555:     "SUBB",
  556:     "SUBW",
  557:     "SUBL",
  558:     "SUBQ",
  559: 
  560:     "SBBB",
  561:     "SBBW",
  562:     "SBBL",
  563:     "SBBQ",
  564: 
  565:     "LOGICB",
  566:     "LOGICW",
  567:     "LOGICL",
  568:     "LOGICQ",
  569: 
  570:     "INCB",
  571:     "INCW",
  572:     "INCL",
  573:     "INCQ",
  574: 
  575:     "DECB",
  576:     "DECW",
  577:     "DECL",
  578:     "DECQ",
  579: 
  580:     "SHLB",
  581:     "SHLW",
  582:     "SHLL",
  583:     "SHLQ",
  584: 
  585:     "SARB",
  586:     "SARW",
  587:     "SARL",
  588:     "SARQ",
  589: };
  590: 
  591: void cpu_dump_state(CPUState *env, FILE *f,
  592:                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
  593:                     int flags)
  594: {
  595:     int eflags, i, nb;
  596:     char cc_op_name[32];
  597:     static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
  598: 
  599:     if (kvm_enabled())
  600:         kvm_arch_get_registers(env);
  601: 
  602:     eflags = env->eflags;
  603: #ifdef TARGET_X86_64
  604:     if (env->hflags & HF_CS64_MASK) {
  605:         cpu_fprintf(f,
  606:                     "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
  607:                     "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
  608:                     "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
  609:                     "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
  610:                     "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
  611:                     env->regs[R_EAX],
  612:                     env->regs[R_EBX],
  613:                     env->regs[R_ECX],
  614:                     env->regs[R_EDX],
  615:                     env->regs[R_ESI],
  616:                     env->regs[R_EDI],
  617:                     env->regs[R_EBP],
  618:                     env->regs[R_ESP],
  619:                     env->regs[8],
  620:                     env->regs[9],
  621:                     env->regs[10],
  622:                     env->regs[11],
  623:                     env->regs[12],
  624:                     env->regs[13],
  625:                     env->regs[14],
  626:                     env->regs[15],
  627:                     env->eip, eflags,
  628:                     eflags & DF_MASK ? 'D' : '-',
  629:                     eflags & CC_O ? 'O' : '-',
  630:                     eflags & CC_S ? 'S' : '-',
  631:                     eflags & CC_Z ? 'Z' : '-',
  632:                     eflags & CC_A ? 'A' : '-',
  633:                     eflags & CC_P ? 'P' : '-',
  634:                     eflags & CC_C ? 'C' : '-',
  635:                     env->hflags & HF_CPL_MASK,
  636:                     (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
  637:                     (int)(env->a20_mask >> 20) & 1,
  638:                     (env->hflags >> HF_SMM_SHIFT) & 1,
  639:                     env->halted);
  640:     } else
  641: #endif
  642:     {
  643:         cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
  644:                     "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
  645:                     "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
  646:                     (uint32_t)env->regs[R_EAX],
  647:                     (uint32_t)env->regs[R_EBX],
  648:                     (uint32_t)env->regs[R_ECX],
  649:                     (uint32_t)env->regs[R_EDX],
  650:                     (uint32_t)env->regs[R_ESI],
  651:                     (uint32_t)env->regs[R_EDI],
  652:                     (uint32_t)env->regs[R_EBP],
  653:                     (uint32_t)env->regs[R_ESP],
  654:                     (uint32_t)env->eip, eflags,
  655:                     eflags & DF_MASK ? 'D' : '-',
  656:                     eflags & CC_O ? 'O' : '-',
  657:                     eflags & CC_S ? 'S' : '-',
  658:                     eflags & CC_Z ? 'Z' : '-',
  659:                     eflags & CC_A ? 'A' : '-',
  660:                     eflags & CC_P ? 'P' : '-',
  661:                     eflags & CC_C ? 'C' : '-',
  662:                     env->hflags & HF_CPL_MASK,
  663:                     (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
  664:                     (int)(env->a20_mask >> 20) & 1,
  665:                     (env->hflags >> HF_SMM_SHIFT) & 1,
  666:                     env->halted);
  667:     }
  668: 
  669: #ifdef TARGET_X86_64
  670:     if (env->hflags & HF_LMA_MASK) {
  671:         for(i = 0; i < 6; i++) {
  672:             SegmentCache *sc = &env->segs[i];
  673:             cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
  674:                         seg_name[i],
  675:                         sc->selector,
  676:                         sc->base,
  677:                         sc->limit,
  678:                         sc->flags);
  679:         }
  680:         cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
  681:                     env->ldt.selector,
  682:                     env->ldt.base,
  683:                     env->ldt.limit,
  684:                     env->ldt.flags);
  685:         cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
  686:                     env->tr.selector,
  687:                     env->tr.base,
  688:                     env->tr.limit,
  689:                     env->tr.flags);
  690:         cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
  691:                     env->gdt.base, env->gdt.limit);
  692:         cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
  693:                     env->idt.base, env->idt.limit);
  694:         cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
  695:                     (uint32_t)env->cr[0],
  696:                     env->cr[2],
  697:                     env->cr[3],
  698:                     (uint32_t)env->cr[4]);
  699:         for(i = 0; i < 4; i++)
  700:             cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
  701:         cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
  702:                     env->dr[6], env->dr[7]);
  703:     } else
  704: #endif
  705:     {
  706:         for(i = 0; i < 6; i++) {
  707:             SegmentCache *sc = &env->segs[i];
  708:             cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
  709:                         seg_name[i],
  710:                         sc->selector,
  711:                         (uint32_t)sc->base,
  712:                         sc->limit,
  713:                         sc->flags);
  714:         }
  715:         cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
  716:                     env->ldt.selector,
  717:                     (uint32_t)env->ldt.base,
  718:                     env->ldt.limit,
  719:                     env->ldt.flags);
  720:         cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
  721:                     env->tr.selector,
  722:                     (uint32_t)env->tr.base,
  723:                     env->tr.limit,
  724:                     env->tr.flags);
  725:         cpu_fprintf(f, "GDT=     %08x %08x\n",
  726:                     (uint32_t)env->gdt.base, env->gdt.limit);
  727:         cpu_fprintf(f, "IDT=     %08x %08x\n",
  728:                     (uint32_t)env->idt.base, env->idt.limit);
  729:         cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
  730:                     (uint32_t)env->cr[0],
  731:                     (uint32_t)env->cr[2],
  732:                     (uint32_t)env->cr[3],
  733:                     (uint32_t)env->cr[4]);
  734:         for(i = 0; i < 4; i++)
  735:             cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
  736:         cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
  737:     }
  738:     if (flags & X86_DUMP_CCOP) {
  739:         if ((unsigned)env->cc_op < CC_OP_NB)
  740:             snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
  741:         else
  742:             snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
  743: #ifdef TARGET_X86_64
  744:         if (env->hflags & HF_CS64_MASK) {
  745:             cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
  746:                         env->cc_src, env->cc_dst,
  747:                         cc_op_name);
  748:         } else
  749: #endif
  750:         {
  751:             cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
  752:                         (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
  753:                         cc_op_name);
  754:         }
  755:     }
  756:     if (flags & X86_DUMP_FPU) {
  757:         int fptag;
  758:         fptag = 0;
  759:         for(i = 0; i < 8; i++) {
  760:             fptag |= ((!env->fptags[i]) << i);
  761:         }
  762:         cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
  763:                     env->fpuc,
  764:                     (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
  765:                     env->fpstt,
  766:                     fptag,
  767:                     env->mxcsr);
  768:         for(i=0;i<8;i++) {
  769: #if defined(USE_X86LDOUBLE)
  770:             union {
  771:                 long double d;
  772:                 struct {
  773:                     uint64_t lower;
  774:                     uint16_t upper;
  775:                 } l;
  776:             } tmp;
  777:             tmp.d = env->fpregs[i].d;
  778:             cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
  779:                         i, tmp.l.lower, tmp.l.upper);
  780: #else
  781:             cpu_fprintf(f, "FPR%d=%016" PRIx64,
  782:                         i, env->fpregs[i].mmx.q);
  783: #endif
  784:             if ((i & 1) == 1)
  785:                 cpu_fprintf(f, "\n");
  786:             else
  787:                 cpu_fprintf(f, " ");
  788:         }
  789:         if (env->hflags & HF_CS64_MASK)
  790:             nb = 16;
  791:         else
  792:             nb = 8;
  793:         for(i=0;i<nb;i++) {
  794:             cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
  795:                         i,
  796:                         env->xmm_regs[i].XMM_L(3),
  797:                         env->xmm_regs[i].XMM_L(2),
  798:                         env->xmm_regs[i].XMM_L(1),
  799:                         env->xmm_regs[i].XMM_L(0));
  800:             if ((i & 1) == 1)
  801:                 cpu_fprintf(f, "\n");
  802:             else
  803:                 cpu_fprintf(f, " ");
  804:         }
  805:     }
  806: }
  807: 
  808: /***********************************************************/
  809: /* x86 mmu */
  810: /* XXX: add PGE support */
  811: 
  812: void cpu_x86_set_a20(CPUX86State *env, int a20_state)
  813: {
  814:     a20_state = (a20_state != 0);
  815:     if (a20_state != ((env->a20_mask >> 20) & 1)) {
  816: #if defined(DEBUG_MMU)
  817:         printf("A20 update: a20=%d\n", a20_state);
  818: #endif
  819:         /* if the cpu is currently executing code, we must unlink it and
  820:            all the potentially executing TB */
  821:         cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
  822: 
  823:         /* when a20 is changed, all the MMU mappings are invalid, so
  824:            we must flush everything */
  825:         tlb_flush(env, 1);
  826:         env->a20_mask = (~0x100000) | (a20_state << 20);
  827:     }
  828: }
  829: 
  830: void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
  831: {
  832:     int pe_state;
  833: 
  834: #if defined(DEBUG_MMU)
  835:     printf("CR0 update: CR0=0x%08x\n", new_cr0);
  836: #endif
  837:     if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
  838:         (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
  839:         tlb_flush(env, 1);
  840:     }
  841: 
  842: #ifdef TARGET_X86_64
  843:     if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
  844:         (env->efer & MSR_EFER_LME)) {
  845:         /* enter in long mode */
  846:         /* XXX: generate an exception */
  847:         if (!(env->cr[4] & CR4_PAE_MASK))
  848:             return;
  849:         env->efer |= MSR_EFER_LMA;
  850:         env->hflags |= HF_LMA_MASK;
  851:     } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
  852:                (env->efer & MSR_EFER_LMA)) {
  853:         /* exit long mode */
  854:         env->efer &= ~MSR_EFER_LMA;
  855:         env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
  856:         env->eip &= 0xffffffff;
  857:     }
  858: #endif
  859:     env->cr[0] = new_cr0 | CR0_ET_MASK;
  860: 
  861:     /* update PE flag in hidden flags */
  862:     pe_state = (env->cr[0] & CR0_PE_MASK);
  863:     env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
  864:     /* ensure that ADDSEG is always set in real mode */
  865:     env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
  866:     /* update FPU flags */
  867:     env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
  868:         ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
  869: }
  870: 
  871: /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
  872:    the PDPT */
  873: void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
  874: {
  875:     env->cr[3] = new_cr3;
  876:     if (env->cr[0] & CR0_PG_MASK) {
  877: #if defined(DEBUG_MMU)
  878:         printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
  879: #endif
  880:         tlb_flush(env, 0);
  881:     }
  882: }
  883: 
  884: void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
  885: {
  886: #if defined(DEBUG_MMU)
  887:     printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
  888: #endif
  889:     if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
  890:         (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
  891:         tlb_flush(env, 1);
  892:     }
  893:     /* SSE handling */
  894:     if (!(env->cpuid_features & CPUID_SSE))
  895:         new_cr4 &= ~CR4_OSFXSR_MASK;
  896:     if (new_cr4 & CR4_OSFXSR_MASK)
  897:         env->hflags |= HF_OSFXSR_MASK;
  898:     else
  899:         env->hflags &= ~HF_OSFXSR_MASK;
  900: 
  901:     env->cr[4] = new_cr4;
  902: }
  903: 
  904: #if defined(CONFIG_USER_ONLY)
  905: 
  906: int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
  907:                              int is_write, int mmu_idx, int is_softmmu)
  908: {
  909:     /* user mode only emulation */
  910:     is_write &= 1;
  911:     env->cr[2] = addr;
  912:     env->error_code = (is_write << PG_ERROR_W_BIT);
  913:     env->error_code |= PG_ERROR_U_MASK;
  914:     env->exception_index = EXCP0E_PAGE;
  915:     return 1;
  916: }
  917: 
  918: target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
  919: {
  920:     return addr;
  921: }
  922: 
  923: #else
  924: 
  925: /* XXX: This value should match the one returned by CPUID
  926:  * and in exec.c */
  927: #if defined(USE_KQEMU)
  928: #define PHYS_ADDR_MASK 0xfffff000LL
  929: #else
  930: # if defined(TARGET_X86_64)
  931: # define PHYS_ADDR_MASK 0xfffffff000LL
  932: # else
  933: # define PHYS_ADDR_MASK 0xffffff000LL
  934: # endif
  935: #endif
  936: 
  937: /* return value:
  938:    -1 = cannot handle fault
  939:    0  = nothing more to do
  940:    1  = generate PF fault
  941:    2  = soft MMU activation required for this block
  942: */
  943: int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
  944:                              int is_write1, int mmu_idx, int is_softmmu)
  945: {
  946:     uint64_t ptep, pte;
  947:     target_ulong pde_addr, pte_addr;
  948:     int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
  949:     target_phys_addr_t paddr;
  950:     uint32_t page_offset;
  951:     target_ulong vaddr, virt_addr;
  952: 
  953:     is_user = mmu_idx == MMU_USER_IDX;
  954: #if defined(DEBUG_MMU)
  955:     printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
  956:            addr, is_write1, is_user, env->eip);
  957: #endif
  958:     is_write = is_write1 & 1;
  959: 
  960:     if (!(env->cr[0] & CR0_PG_MASK)) {
  961:         pte = addr;
  962:         virt_addr = addr & TARGET_PAGE_MASK;
  963:         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
  964:         page_size = 4096;
  965:         goto do_mapping;
  966:     }
  967: 
  968:     if (env->cr[4] & CR4_PAE_MASK) {
  969:         uint64_t pde, pdpe;
  970:         target_ulong pdpe_addr;
  971: 
  972: #ifdef TARGET_X86_64
  973:         if (env->hflags & HF_LMA_MASK) {
  974:             uint64_t pml4e_addr, pml4e;
  975:             int32_t sext;
  976: 
  977:             /* test virtual address sign extension */
  978:             sext = (int64_t)addr >> 47;
  979:             if (sext != 0 && sext != -1) {
  980:                 env->error_code = 0;
  981:                 env->exception_index = EXCP0D_GPF;
  982:                 return 1;
  983:             }
  984: 
  985:             pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
  986:                 env->a20_mask;
  987:             pml4e = ldq_phys(pml4e_addr);
  988:             if (!(pml4e & PG_PRESENT_MASK)) {
  989:                 error_code = 0;
  990:                 goto do_fault;
  991:             }
  992:             if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
  993:                 error_code = PG_ERROR_RSVD_MASK;
  994:                 goto do_fault;
  995:             }
  996:             if (!(pml4e & PG_ACCESSED_MASK)) {
  997:                 pml4e |= PG_ACCESSED_MASK;
  998:                 stl_phys_notdirty(pml4e_addr, pml4e);
  999:             }
 1000:             ptep = pml4e ^ PG_NX_MASK;
 1001:             pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
 1002:                 env->a20_mask;
 1003:             pdpe = ldq_phys(pdpe_addr);
 1004:             if (!(pdpe & PG_PRESENT_MASK)) {
 1005:                 error_code = 0;
 1006:                 goto do_fault;
 1007:             }
 1008:             if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
 1009:                 error_code = PG_ERROR_RSVD_MASK;
 1010:                 goto do_fault;
 1011:             }
 1012:             ptep &= pdpe ^ PG_NX_MASK;
 1013:             if (!(pdpe & PG_ACCESSED_MASK)) {
 1014:                 pdpe |= PG_ACCESSED_MASK;
 1015:                 stl_phys_notdirty(pdpe_addr, pdpe);
 1016:             }
 1017:         } else
 1018: #endif
 1019:         {
 1020:             /* XXX: load them when cr3 is loaded ? */
 1021:             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
 1022:                 env->a20_mask;
 1023:             pdpe = ldq_phys(pdpe_addr);
 1024:             if (!(pdpe & PG_PRESENT_MASK)) {
 1025:                 error_code = 0;
 1026:                 goto do_fault;
 1027:             }
 1028:             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
 1029:         }
 1030: 
 1031:         pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
 1032:             env->a20_mask;
 1033:         pde = ldq_phys(pde_addr);
 1034:         if (!(pde & PG_PRESENT_MASK)) {
 1035:             error_code = 0;
 1036:             goto do_fault;
 1037:         }
 1038:         if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
 1039:             error_code = PG_ERROR_RSVD_MASK;
 1040:             goto do_fault;
 1041:         }
 1042:         ptep &= pde ^ PG_NX_MASK;
 1043:         if (pde & PG_PSE_MASK) {
 1044:             /* 2 MB page */
 1045:             page_size = 2048 * 1024;
 1046:             ptep ^= PG_NX_MASK;
 1047:             if ((ptep & PG_NX_MASK) && is_write1 == 2)
 1048:                 goto do_fault_protect;
 1049:             if (is_user) {
 1050:                 if (!(ptep & PG_USER_MASK))
 1051:                     goto do_fault_protect;
 1052:                 if (is_write && !(ptep & PG_RW_MASK))
 1053:                     goto do_fault_protect;
 1054:             } else {
 1055:                 if ((env->cr[0] & CR0_WP_MASK) &&
 1056:                     is_write && !(ptep & PG_RW_MASK))
 1057:                     goto do_fault_protect;
 1058:             }
 1059:             is_dirty = is_write && !(pde & PG_DIRTY_MASK);
 1060:             if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
 1061:                 pde |= PG_ACCESSED_MASK;
 1062:                 if (is_dirty)
 1063:                     pde |= PG_DIRTY_MASK;
 1064:                 stl_phys_notdirty(pde_addr, pde);
 1065:             }
 1066:             /* align to page_size */
 1067:             pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
 1068:             virt_addr = addr & ~(page_size - 1);
 1069:         } else {
 1070:             /* 4 KB page */
 1071:             if (!(pde & PG_ACCESSED_MASK)) {
 1072:                 pde |= PG_ACCESSED_MASK;
 1073:                 stl_phys_notdirty(pde_addr, pde);
 1074:             }
 1075:             pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
 1076:                 env->a20_mask;
 1077:             pte = ldq_phys(pte_addr);
 1078:             if (!(pte & PG_PRESENT_MASK)) {
 1079:                 error_code = 0;
 1080:                 goto do_fault;
 1081:             }
 1082:             if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
 1083:                 error_code = PG_ERROR_RSVD_MASK;
 1084:                 goto do_fault;
 1085:             }
 1086:             /* combine pde and pte nx, user and rw protections */
 1087:             ptep &= pte ^ PG_NX_MASK;
 1088:             ptep ^= PG_NX_MASK;
 1089:             if ((ptep & PG_NX_MASK) && is_write1 == 2)
 1090:                 goto do_fault_protect;
 1091:             if (is_user) {
 1092:                 if (!(ptep & PG_USER_MASK))
 1093:                     goto do_fault_protect;
 1094:                 if (is_write && !(ptep & PG_RW_MASK))
 1095:                     goto do_fault_protect;
 1096:             } else {
 1097:                 if ((env->cr[0] & CR0_WP_MASK) &&
 1098:                     is_write && !(ptep & PG_RW_MASK))
 1099:                     goto do_fault_protect;
 1100:             }
 1101:             is_dirty = is_write && !(pte & PG_DIRTY_MASK);
 1102:             if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
 1103:                 pte |= PG_ACCESSED_MASK;
 1104:                 if (is_dirty)
 1105:                     pte |= PG_DIRTY_MASK;
 1106:                 stl_phys_notdirty(pte_addr, pte);
 1107:             }
 1108:             page_size = 4096;
 1109:             virt_addr = addr & ~0xfff;
 1110:             pte = pte & (PHYS_ADDR_MASK | 0xfff);
 1111:         }
 1112:     } else {
 1113:         uint32_t pde;
 1114: 
 1115:         /* page directory entry */
 1116:         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
 1117:             env->a20_mask;
 1118:         pde = ldl_phys(pde_addr);
 1119:         if (!(pde & PG_PRESENT_MASK)) {
 1120:             error_code = 0;
 1121:             goto do_fault;
 1122:         }
 1123:         /* if PSE bit is set, then we use a 4MB page */
 1124:         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
 1125:             page_size = 4096 * 1024;
 1126:             if (is_user) {
 1127:                 if (!(pde & PG_USER_MASK))
 1128:                     goto do_fault_protect;
 1129:                 if (is_write && !(pde & PG_RW_MASK))
 1130:                     goto do_fault_protect;
 1131:             } else {
 1132:                 if ((env->cr[0] & CR0_WP_MASK) &&
 1133:                     is_write && !(pde & PG_RW_MASK))
 1134:                     goto do_fault_protect;
 1135:             }
 1136:             is_dirty = is_write && !(pde & PG_DIRTY_MASK);
 1137:             if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
 1138:                 pde |= PG_ACCESSED_MASK;
 1139:                 if (is_dirty)
 1140:                     pde |= PG_DIRTY_MASK;
 1141:                 stl_phys_notdirty(pde_addr, pde);
 1142:             }
 1143: 
 1144:             pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
 1145:             ptep = pte;
 1146:             virt_addr = addr & ~(page_size - 1);
 1147:         } else {
 1148:             if (!(pde & PG_ACCESSED_MASK)) {
 1149:                 pde |= PG_ACCESSED_MASK;
 1150:                 stl_phys_notdirty(pde_addr, pde);
 1151:             }
 1152: 
 1153:             /* page directory entry */
 1154:             pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
 1155:                 env->a20_mask;
 1156:             pte = ldl_phys(pte_addr);
 1157:             if (!(pte & PG_PRESENT_MASK)) {
 1158:                 error_code = 0;
 1159:                 goto do_fault;
 1160:             }
 1161:             /* combine pde and pte user and rw protections */
 1162:             ptep = pte & pde;
 1163:             if (is_user) {
 1164:                 if (!(ptep & PG_USER_MASK))
 1165:                     goto do_fault_protect;
 1166:                 if (is_write && !(ptep & PG_RW_MASK))
 1167:                     goto do_fault_protect;
 1168:             } else {
 1169:                 if ((env->cr[0] & CR0_WP_MASK) &&
 1170:                     is_write && !(ptep & PG_RW_MASK))
 1171:                     goto do_fault_protect;
 1172:             }
 1173:             is_dirty = is_write && !(pte & PG_DIRTY_MASK);
 1174:             if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
 1175:                 pte |= PG_ACCESSED_MASK;
 1176:                 if (is_dirty)
 1177:                     pte |= PG_DIRTY_MASK;
 1178:                 stl_phys_notdirty(pte_addr, pte);
 1179:             }
 1180:             page_size = 4096;
 1181:             virt_addr = addr & ~0xfff;
 1182:         }
 1183:     }
 1184:     /* the page can be put in the TLB */
 1185:     prot = PAGE_READ;
 1186:     if (!(ptep & PG_NX_MASK))
 1187:         prot |= PAGE_EXEC;
 1188:     if (pte & PG_DIRTY_MASK) {
 1189:         /* only set write access if already dirty... otherwise wait
 1190:            for dirty access */
 1191:         if (is_user) {
 1192:             if (ptep & PG_RW_MASK)
 1193:                 prot |= PAGE_WRITE;
 1194:         } else {
 1195:             if (!(env->cr[0] & CR0_WP_MASK) ||
 1196:                 (ptep & PG_RW_MASK))
 1197:                 prot |= PAGE_WRITE;
 1198:         }
 1199:     }
 1200:  do_mapping:
 1201:     pte = pte & env->a20_mask;
 1202: 
 1203:     /* Even if 4MB pages, we map only one 4KB page in the cache to
 1204:        avoid filling it too fast */
 1205:     page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
 1206:     paddr = (pte & TARGET_PAGE_MASK) + page_offset;
 1207:     vaddr = virt_addr + page_offset;
 1208: 
 1209:     ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
 1210:     return ret;
 1211:  do_fault_protect:
 1212:     error_code = PG_ERROR_P_MASK;
 1213:  do_fault:
 1214:     error_code |= (is_write << PG_ERROR_W_BIT);
 1215:     if (is_user)
 1216:         error_code |= PG_ERROR_U_MASK;
 1217:     if (is_write1 == 2 &&
 1218:         (env->efer & MSR_EFER_NXE) &&
 1219:         (env->cr[4] & CR4_PAE_MASK))
 1220:         error_code |= PG_ERROR_I_D_MASK;
 1221:     if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
 1222:         /* cr2 is not modified in case of exceptions */
 1223:         stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
 1224:                  addr);
 1225:     } else {
 1226:         env->cr[2] = addr;
 1227:     }
 1228:     env->error_code = error_code;
 1229:     env->exception_index = EXCP0E_PAGE;
 1230:     return 1;
 1231: }
 1232: 
 1233: target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
 1234: {
 1235:     target_ulong pde_addr, pte_addr;
 1236:     uint64_t pte;
 1237:     target_phys_addr_t paddr;
 1238:     uint32_t page_offset;
 1239:     int page_size;
 1240: 
 1241:     if (env->cr[4] & CR4_PAE_MASK) {
 1242:         target_ulong pdpe_addr;
 1243:         uint64_t pde, pdpe;
 1244: 
 1245: #ifdef TARGET_X86_64
 1246:         if (env->hflags & HF_LMA_MASK) {
 1247:             uint64_t pml4e_addr, pml4e;
 1248:             int32_t sext;
 1249: 
 1250:             /* test virtual address sign extension */
 1251:             sext = (int64_t)addr >> 47;
 1252:             if (sext != 0 && sext != -1)
 1253:                 return -1;
 1254: 
 1255:             pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
 1256:                 env->a20_mask;
 1257:             pml4e = ldq_phys(pml4e_addr);
 1258:             if (!(pml4e & PG_PRESENT_MASK))
 1259:                 return -1;
 1260: 
 1261:             pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
 1262:                 env->a20_mask;
 1263:             pdpe = ldq_phys(pdpe_addr);
 1264:             if (!(pdpe & PG_PRESENT_MASK))
 1265:                 return -1;
 1266:         } else
 1267: #endif
 1268:         {
 1269:             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
 1270:                 env->a20_mask;
 1271:             pdpe = ldq_phys(pdpe_addr);
 1272:             if (!(pdpe & PG_PRESENT_MASK))
 1273:                 return -1;
 1274:         }
 1275: 
 1276:         pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
 1277:             env->a20_mask;
 1278:         pde = ldq_phys(pde_addr);
 1279:         if (!(pde & PG_PRESENT_MASK)) {
 1280:             return -1;
 1281:         }
 1282:         if (pde & PG_PSE_MASK) {
 1283:             /* 2 MB page */
 1284:             page_size = 2048 * 1024;
 1285:             pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
 1286:         } else {
 1287:             /* 4 KB page */
 1288:             pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
 1289:                 env->a20_mask;
 1290:             page_size = 4096;
 1291:             pte = ldq_phys(pte_addr);
 1292:         }
 1293:         if (!(pte & PG_PRESENT_MASK))
 1294:             return -1;
 1295:     } else {
 1296:         uint32_t pde;
 1297: 
 1298:         if (!(env->cr[0] & CR0_PG_MASK)) {
 1299:             pte = addr;
 1300:             page_size = 4096;
 1301:         } else {
 1302:             /* page directory entry */
 1303:             pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
 1304:             pde = ldl_phys(pde_addr);
 1305:             if (!(pde & PG_PRESENT_MASK))
 1306:                 return -1;
 1307:             if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
 1308:                 pte = pde & ~0x003ff000; /* align to 4MB */
 1309:                 page_size = 4096 * 1024;
 1310:             } else {
 1311:                 /* page directory entry */
 1312:                 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
 1313:                 pte = ldl_phys(pte_addr);
 1314:                 if (!(pte & PG_PRESENT_MASK))
 1315:                     return -1;
 1316:                 page_size = 4096;
 1317:             }
 1318:         }
 1319:         pte = pte & env->a20_mask;
 1320:     }
 1321: 
 1322:     page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
 1323:     paddr = (pte & TARGET_PAGE_MASK) + page_offset;
 1324:     return paddr;
 1325: }
 1326: 
 1327: void hw_breakpoint_insert(CPUState *env, int index)
 1328: {
 1329:     int type, err = 0;
 1330: 
 1331:     switch (hw_breakpoint_type(env->dr[7], index)) {
 1332:     case 0:
 1333:         if (hw_breakpoint_enabled(env->dr[7], index))
 1334:             err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
 1335:                                         &env->cpu_breakpoint[index]);
 1336:         break;
 1337:     case 1:
 1338:         type = BP_CPU | BP_MEM_WRITE;
 1339:         goto insert_wp;
 1340:     case 2:
 1341:          /* No support for I/O watchpoints yet */
 1342:         break;
 1343:     case 3:
 1344:         type = BP_CPU | BP_MEM_ACCESS;
 1345:     insert_wp:
 1346:         err = cpu_watchpoint_insert(env, env->dr[index],
 1347:                                     hw_breakpoint_len(env->dr[7], index),
 1348:                                     type, &env->cpu_watchpoint[index]);
 1349:         break;
 1350:     }
 1351:     if (err)
 1352:         env->cpu_breakpoint[index] = NULL;
 1353: }
 1354: 
 1355: void hw_breakpoint_remove(CPUState *env, int index)
 1356: {
 1357:     if (!env->cpu_breakpoint[index])
 1358:         return;
 1359:     switch (hw_breakpoint_type(env->dr[7], index)) {
 1360:     case 0:
 1361:         if (hw_breakpoint_enabled(env->dr[7], index))
 1362:             cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
 1363:         break;
 1364:     case 1:
 1365:     case 3:
 1366:         cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
 1367:         break;
 1368:     case 2:
 1369:         /* No support for I/O watchpoints yet */
 1370:         break;
 1371:     }
 1372: }
 1373: 
 1374: int check_hw_breakpoints(CPUState *env, int force_dr6_update)
 1375: {
 1376:     target_ulong dr6;
 1377:     int reg, type;
 1378:     int hit_enabled = 0;
 1379: 
 1380:     dr6 = env->dr[6] & ~0xf;
 1381:     for (reg = 0; reg < 4; reg++) {
 1382:         type = hw_breakpoint_type(env->dr[7], reg);
 1383:         if ((type == 0 && env->dr[reg] == env->eip) ||
 1384:             ((type & 1) && env->cpu_watchpoint[reg] &&
 1385:              (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
 1386:             dr6 |= 1 << reg;
 1387:             if (hw_breakpoint_enabled(env->dr[7], reg))
 1388:                 hit_enabled = 1;
 1389:         }
 1390:     }
 1391:     if (hit_enabled || force_dr6_update)
 1392:         env->dr[6] = dr6;
 1393:     return hit_enabled;
 1394: }
 1395: 
 1396: static CPUDebugExcpHandler *prev_debug_excp_handler;
 1397: 
 1398: void raise_exception(int exception_index);
 1399: 
 1400: static void breakpoint_handler(CPUState *env)
 1401: {
 1402:     CPUBreakpoint *bp;
 1403: 
 1404:     if (env->watchpoint_hit) {
 1405:         if (env->watchpoint_hit->flags & BP_CPU) {
 1406:             env->watchpoint_hit = NULL;
 1407:             if (check_hw_breakpoints(env, 0))
 1408:                 raise_exception(EXCP01_DB);
 1409:             else
 1410:                 cpu_resume_from_signal(env, NULL);
 1411:         }
 1412:     } else {
 1413:         TAILQ_FOREACH(bp, &env->breakpoints, entry)
 1414:             if (bp->pc == env->eip) {
 1415:                 if (bp->flags & BP_CPU) {
 1416:                     check_hw_breakpoints(env, 1);
 1417:                     raise_exception(EXCP01_DB);
 1418:                 }
 1419:                 break;
 1420:             }
 1421:     }
 1422:     if (prev_debug_excp_handler)
 1423:         prev_debug_excp_handler(env);
 1424: }
 1425: #endif /* !CONFIG_USER_ONLY */
 1426: 
 1427: static void host_cpuid(uint32_t function, uint32_t count,
 1428:                        uint32_t *eax, uint32_t *ebx,
 1429:                        uint32_t *ecx, uint32_t *edx)
 1430: {
 1431: #if defined(CONFIG_KVM)
 1432:     uint32_t vec[4];
 1433: 
 1434: #ifdef __x86_64__
 1435:     asm volatile("cpuid"
 1436:                  : "=a"(vec[0]), "=b"(vec[1]),
 1437:                    "=c"(vec[2]), "=d"(vec[3])
 1438:                  : "0"(function), "c"(count) : "cc");
 1439: #else
 1440:     asm volatile("pusha \n\t"
 1441:                  "cpuid \n\t"
 1442:                  "mov %%eax, 0(%2) \n\t"
 1443:                  "mov %%ebx, 4(%2) \n\t"
 1444:                  "mov %%ecx, 8(%2) \n\t"
 1445:                  "mov %%edx, 12(%2) \n\t"
 1446:                  "popa"
 1447:                  : : "a"(function), "c"(count), "S"(vec)
 1448:                  : "memory", "cc");
 1449: #endif
 1450: 
 1451:     if (eax)
 1452: 	*eax = vec[0];
 1453:     if (ebx)
 1454: 	*ebx = vec[1];
 1455:     if (ecx)
 1456: 	*ecx = vec[2];
 1457:     if (edx)
 1458: 	*edx = vec[3];
 1459: #endif
 1460: }
 1461: 
 1462: void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
 1463:                    uint32_t *eax, uint32_t *ebx,
 1464:                    uint32_t *ecx, uint32_t *edx)
 1465: {
 1466:     /* test if maximum index reached */
 1467:     if (index & 0x80000000) {
 1468:         if (index > env->cpuid_xlevel)
 1469:             index = env->cpuid_level;
 1470:     } else {
 1471:         if (index > env->cpuid_level)
 1472:             index = env->cpuid_level;
 1473:     }
 1474: 
 1475:     switch(index) {
 1476:     case 0:
 1477:         *eax = env->cpuid_level;
 1478:         *ebx = env->cpuid_vendor1;
 1479:         *edx = env->cpuid_vendor2;
 1480:         *ecx = env->cpuid_vendor3;
 1481: 
 1482:         /* sysenter isn't supported on compatibility mode on AMD.  and syscall
 1483:          * isn't supported in compatibility mode on Intel.  so advertise the
 1484:          * actuall cpu, and say goodbye to migration between different vendors
 1485:          * is you use compatibility mode. */
 1486:         if (kvm_enabled())
 1487:             host_cpuid(0, 0, NULL, ebx, ecx, edx);
 1488:         break;
 1489:     case 1:
 1490:         *eax = env->cpuid_version;
 1491:         *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
 1492:         *ecx = env->cpuid_ext_features;
 1493:         *edx = env->cpuid_features;
 1494: 
 1495:         /* "Hypervisor present" bit required for Microsoft SVVP */
 1496:         if (kvm_enabled())
 1497:             *ecx |= (1 << 31);
 1498:         break;
 1499:     case 2:
 1500:         /* cache info: needed for Pentium Pro compatibility */
 1501:         *eax = 1;
 1502:         *ebx = 0;
 1503:         *ecx = 0;
 1504:         *edx = 0x2c307d;
 1505:         break;
 1506:     case 4:
 1507:         /* cache info: needed for Core compatibility */
 1508:         switch (count) {
 1509:             case 0: /* L1 dcache info */
 1510:                 *eax = 0x0000121;
 1511:                 *ebx = 0x1c0003f;
 1512:                 *ecx = 0x000003f;
 1513:                 *edx = 0x0000001;
 1514:                 break;
 1515:             case 1: /* L1 icache info */
 1516:                 *eax = 0x0000122;
 1517:                 *ebx = 0x1c0003f;
 1518:                 *ecx = 0x000003f;
 1519:                 *edx = 0x0000001;
 1520:                 break;
 1521:             case 2: /* L2 cache info */
 1522:                 *eax = 0x0000143;
 1523:                 *ebx = 0x3c0003f;
 1524:                 *ecx = 0x0000fff;
 1525:                 *edx = 0x0000001;
 1526:                 break;
 1527:             default: /* end of info */
 1528:                 *eax = 0;
 1529:                 *ebx = 0;
 1530:                 *ecx = 0;
 1531:                 *edx = 0;
 1532:                 break;
 1533:         }
 1534:         break;
 1535:     case 5:
 1536:         /* mwait info: needed for Core compatibility */
 1537:         *eax = 0; /* Smallest monitor-line size in bytes */
 1538:         *ebx = 0; /* Largest monitor-line size in bytes */
 1539:         *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
 1540:         *edx = 0;
 1541:         break;
 1542:     case 6:
 1543:         /* Thermal and Power Leaf */
 1544:         *eax = 0;
 1545:         *ebx = 0;
 1546:         *ecx = 0;
 1547:         *edx = 0;
 1548:         break;
 1549:     case 9:
 1550:         /* Direct Cache Access Information Leaf */
 1551:         *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
 1552:         *ebx = 0;
 1553:         *ecx = 0;
 1554:         *edx = 0;
 1555:         break;
 1556:     case 0xA:
 1557:         /* Architectural Performance Monitoring Leaf */
 1558:         *eax = 0;
 1559:         *ebx = 0;
 1560:         *ecx = 0;
 1561:         *edx = 0;
 1562:         break;
 1563:     case 0x80000000:
 1564:         *eax = env->cpuid_xlevel;
 1565:         *ebx = env->cpuid_vendor1;
 1566:         *edx = env->cpuid_vendor2;
 1567:         *ecx = env->cpuid_vendor3;
 1568:         break;
 1569:     case 0x80000001:
 1570:         *eax = env->cpuid_features;
 1571:         *ebx = 0;
 1572:         *ecx = env->cpuid_ext3_features;
 1573:         *edx = env->cpuid_ext2_features;
 1574: 
 1575:         if (kvm_enabled()) {
 1576:             uint32_t h_eax, h_edx;
 1577: 
 1578:             host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
 1579: 
 1580:             /* disable CPU features that the host does not support */
 1581: 
 1582:             /* long mode */
 1583:             if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
 1584:                 *edx &= ~0x20000000;
 1585:             /* syscall */
 1586:             if ((h_edx & 0x00000800) == 0)
 1587:                 *edx &= ~0x00000800;
 1588:             /* nx */
 1589:             if ((h_edx & 0x00100000) == 0)
 1590:                 *edx &= ~0x00100000;
 1591: 
 1592:             /* disable CPU features that KVM cannot support */
 1593: 
 1594:             /* svm */
 1595:             *ecx &= ~4UL;
 1596:             /* 3dnow */
 1597:             *edx &= ~0xc0000000;
 1598:         }
 1599:         break;
 1600:     case 0x80000002:
 1601:     case 0x80000003:
 1602:     case 0x80000004:
 1603:         *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
 1604:         *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
 1605:         *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
 1606:         *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
 1607:         break;
 1608:     case 0x80000005:
 1609:         /* cache info (L1 cache) */
 1610:         *eax = 0x01ff01ff;
 1611:         *ebx = 0x01ff01ff;
 1612:         *ecx = 0x40020140;
 1613:         *edx = 0x40020140;
 1614:         break;
 1615:     case 0x80000006:
 1616:         /* cache info (L2 cache) */
 1617:         *eax = 0;
 1618:         *ebx = 0x42004200;
 1619:         *ecx = 0x02008140;
 1620:         *edx = 0;
 1621:         break;
 1622:     case 0x80000008:
 1623:         /* virtual & phys address size in low 2 bytes. */
 1624: /* XXX: This value must match the one used in the MMU code. */ 
 1625:         if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
 1626:             /* 64 bit processor */
 1627: #if defined(USE_KQEMU)
 1628:             *eax = 0x00003020;	/* 48 bits virtual, 32 bits physical */
 1629: #else
 1630: /* XXX: The physical address space is limited to 42 bits in exec.c. */
 1631:             *eax = 0x00003028;	/* 48 bits virtual, 40 bits physical */
 1632: #endif
 1633:         } else {
 1634: #if defined(USE_KQEMU)
 1635:             *eax = 0x00000020;	/* 32 bits physical */
 1636: #else
 1637:             if (env->cpuid_features & CPUID_PSE36)
 1638:                 *eax = 0x00000024; /* 36 bits physical */
 1639:             else
 1640:                 *eax = 0x00000020; /* 32 bits physical */
 1641: #endif
 1642:         }
 1643:         *ebx = 0;
 1644:         *ecx = 0;
 1645:         *edx = 0;
 1646:         break;
 1647:     case 0x8000000A:
 1648:         *eax = 0x00000001; /* SVM Revision */
 1649:         *ebx = 0x00000010; /* nr of ASIDs */
 1650:         *ecx = 0;
 1651:         *edx = 0; /* optional features */
 1652:         break;
 1653:     default:
 1654:         /* reserved values: zero */
 1655:         *eax = 0;
 1656:         *ebx = 0;
 1657:         *ecx = 0;
 1658:         *edx = 0;
 1659:         break;
 1660:     }
 1661: }
 1662: 
 1663: CPUX86State *cpu_x86_init(const char *cpu_model)
 1664: {
 1665:     CPUX86State *env;
 1666:     static int inited;
 1667: 
 1668:     env = qemu_mallocz(sizeof(CPUX86State));
 1669:     cpu_exec_init(env);
 1670:     env->cpu_model_str = cpu_model;
 1671: 
 1672:     /* init various static tables */
 1673:     if (!inited) {
 1674:         inited = 1;
 1675:         optimize_flags_init();
 1676: #ifndef CONFIG_USER_ONLY
 1677:         prev_debug_excp_handler =
 1678:             cpu_set_debug_excp_handler(breakpoint_handler);
 1679: #endif
 1680:     }
 1681:     if (cpu_x86_register(env, cpu_model) < 0) {
 1682:         cpu_x86_close(env);
 1683:         return NULL;
 1684:     }
 1685:     cpu_reset(env);
 1686: #ifdef USE_KQEMU
 1687:     kqemu_init(env);
 1688: #endif
 1689:     if (kvm_enabled())
 1690:         kvm_init_vcpu(env);
 1691:     if (kvm_enabled()) {
 1692:         kvm_trim_features(&env->cpuid_features,
 1693:                           kvm_arch_get_supported_cpuid(env, 1, R_EDX),
 1694:                           feature_name);
 1695:         kvm_trim_features(&env->cpuid_ext_features,
 1696:                           kvm_arch_get_supported_cpuid(env, 1, R_ECX),
 1697:                           ext_feature_name);
 1698:         kvm_trim_features(&env->cpuid_ext2_features,
 1699:                           kvm_arch_get_supported_cpuid(env, 0x80000001, R_EDX),
 1700:                           ext2_feature_name);
 1701:         kvm_trim_features(&env->cpuid_ext3_features,
 1702:                           kvm_arch_get_supported_cpuid(env, 0x80000001, R_ECX),
 1703:                           ext3_feature_name);
 1704:     }
 1705: 
 1706:     return env;
 1707: }

unix.superglobalmegacorp.com