This patch contains the userspace part necessary to make use of the KVM 1GB page support. Signed-off-by: Joerg Roedel <joerg.roedel@xxxxxxx> --- libkvm/libkvm.c | 9 +++++++++ libkvm/libkvm.h | 1 + qemu/qemu-kvm.c | 6 ++++++ qemu/qemu-kvm.h | 3 +++ qemu/qemu-options.hx | 2 ++ qemu/target-i386/helper.c | 4 ++++ qemu/vl.c | 4 ++++ 7 files changed, 29 insertions(+), 0 deletions(-) diff --git a/libkvm/libkvm.c b/libkvm/libkvm.c index 0610e3f..a699fe9 100644 --- a/libkvm/libkvm.c +++ b/libkvm/libkvm.c @@ -1065,6 +1065,15 @@ int kvm_has_sync_mmu(kvm_context_t kvm) return r; } +int kvm_has_gbpages(kvm_context_t kvm) +{ + int r = 0; +#ifdef KVM_CAP_1GB_PAGES + r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_1GB_PAGES); +#endif + return r; +} + int kvm_inject_nmi(kvm_context_t kvm, int vcpu) { #ifdef KVM_CAP_USER_NMI diff --git a/libkvm/libkvm.h b/libkvm/libkvm.h index d3e431a..efbc23e 100644 --- a/libkvm/libkvm.h +++ b/libkvm/libkvm.h @@ -558,6 +558,7 @@ int kvm_dirty_pages_log_reset(kvm_context_t kvm); int kvm_irqchip_in_kernel(kvm_context_t kvm); int kvm_has_sync_mmu(kvm_context_t kvm); +int kvm_has_gbpages(kvm_context_t kvm); #ifdef KVM_CAP_IRQCHIP /*! diff --git a/qemu/qemu-kvm.c b/qemu/qemu-kvm.c index ed76367..bae0639 100644 --- a/qemu/qemu-kvm.c +++ b/qemu/qemu-kvm.c @@ -33,6 +33,7 @@ int kvm_irqchip = 1; int kvm_pit = 1; int kvm_pit_reinject = 1; int kvm_nested = 0; +int kvm_gb_pages = 0; kvm_context_t kvm_context; pthread_mutex_t qemu_mutex = PTHREAD_MUTEX_INITIALIZER; @@ -1401,6 +1402,11 @@ int qemu_kvm_has_sync_mmu(void) return kvm_has_sync_mmu(kvm_context); } +int qemu_kvm_has_gbpages() +{ + return kvm_has_gbpages(kvm_context); +} + void qemu_kvm_cpu_stop(CPUState *env) { if (kvm_enabled()) diff --git a/qemu/qemu-kvm.h b/qemu/qemu-kvm.h index ca59af8..6f2d99f 100644 --- a/qemu/qemu-kvm.h +++ b/qemu/qemu-kvm.h @@ -151,6 +151,7 @@ extern int kvm_irqchip; extern int kvm_pit; extern int kvm_pit_reinject; extern int kvm_nested; +extern int kvm_gb_pages; extern kvm_context_t kvm_context; struct ioperm_data { @@ -161,6 +162,7 @@ struct ioperm_data { }; int qemu_kvm_has_sync_mmu(void); +int qemu_kvm_has_gbpages(void); void qemu_kvm_cpu_stop(CPUState *env); #define kvm_enabled() (kvm_allowed) @@ -172,6 +174,7 @@ void kvm_load_tsc(CPUState *env); #else #define kvm_enabled() (0) #define kvm_nested 0 +#define kvm_gb_pages 0 #define qemu_kvm_irqchip_in_kernel() (0) #define qemu_kvm_pit_in_kernel() (0) #define kvm_has_sync_mmu() (0) diff --git a/qemu/qemu-options.hx b/qemu/qemu-options.hx index ba7f291..ccc1769 100644 --- a/qemu/qemu-options.hx +++ b/qemu/qemu-options.hx @@ -1490,6 +1490,8 @@ DEF("pcidevice", HAS_ARG, QEMU_OPTION_pcidevice, #endif DEF("enable-nesting", 0, QEMU_OPTION_enable_nesting, "-enable-nesting enable support for running a VM inside the VM (AMD only)\n") +DEF("enable-gb-pages", 0, QEMU_OPTION_enable_gb_pages, + "-enable-gb-pages enable GB pages in the guest (if host supports it)\n") DEF("cpu-vendor", HAS_ARG, QEMU_OPTION_cpu_vendor, "-cpu-vendor STRING override the cpuid vendor string\n") DEF("nvram", HAS_ARG, QEMU_OPTION_nvram, diff --git a/qemu/target-i386/helper.c b/qemu/target-i386/helper.c index be7b021..78a5db5 100644 --- a/qemu/target-i386/helper.c +++ b/qemu/target-i386/helper.c @@ -1585,6 +1585,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *ecx &= ~4UL; /* 3dnow */ *edx &= ~0xc0000000; +#ifdef USE_KVM + if (qemu_kvm_has_gbpages() && kvm_gb_pages) + *edx |= CPUID_EXT2_PDPE1GB; +#endif } break; case 0x80000002: diff --git a/qemu/vl.c b/qemu/vl.c index 8aa9ebc..ac28fc4 100644 --- a/qemu/vl.c +++ b/qemu/vl.c @@ -5024,6 +5024,10 @@ int main(int argc, char **argv, char **envp) kvm_nested = 1; break; } + case QEMU_OPTION_enable_gb_pages: { + kvm_gb_pages = 1; + break; + } #if defined(TARGET_I386) || defined(TARGET_X86_64) || defined(TARGET_IA64) || defined(__linux__) case QEMU_OPTION_pcidevice: if (assigned_devices_index >= MAX_DEV_ASSIGN_CMDLINE) { -- 1.6.2.3 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html