KMSAN: uninit-value in em_ret_far

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hello I found a bug in latest upstream 6.7-rc7 titled "KMSAN:
uninit-value in em_ret_far” and maybe is realted with kvm.

If you fix this issue, please add the following tag to the commit:
Reported-by: xingwei lee <xrivendell7@xxxxxxxxx>

kernel: mainline 861deac3b092f37b2c5e6871732f3e11486f7082
kernel config: https://syzkaller.appspot.com/text?tag=KernelConfig&x=4a65fa9f077ead01
with KMSAN enabled
compiler: Debian clang version 15.0.6, GNU ld (GNU Binutils for Debian) 2.40
run the repro.c for about 3minus and it crashed!

TITLE: KMSAN: uninit-value in em_ret_far
=====================================================
BUG: KMSAN: uninit-value in emulator_recalc_and_set_mode
arch/x86/kvm/emulate.c:797 [inline]
BUG: KMSAN: uninit-value in assign_eip_far arch/x86/kvm/emulate.c:833 [inline]
BUG: KMSAN: uninit-value in em_ret_far+0x348/0x350 arch/x86/kvm/emulate.c:2258
emulator_recalc_and_set_mode arch/x86/kvm/emulate.c:797 [inline]
assign_eip_far arch/x86/kvm/emulate.c:833 [inline]
em_ret_far+0x348/0x350 arch/x86/kvm/emulate.c:2258
em_ret_far_imm arch/x86/kvm/emulate.c:2273 [inline]
em_ret_far_imm+0x37/0x510 arch/x86/kvm/emulate.c:2266
string_registers_quirk arch/x86/kvm/emulate.c:2647 [inline]
x86_emulate_insn+0x1d59/0x5790 arch/x86/kvm/emulate.c:5229
kvm_vcpu_check_hw_bp arch/x86/kvm/x86.c:8813 [inline]
kvm_vcpu_check_code_breakpoint arch/x86/kvm/x86.c:8919 [inline]
x86_emulate_instruction+0x182b/0x3070 arch/x86/kvm/x86.c:9017
complete_emulated_mmio+0x6ed/0x890 arch/x86/kvm/x86.c:11199
kvm_queue_exception_vmexit arch/x86/kvm/x86.c:634 [inline]
kvm_arch_vcpu_ioctl_run+0x190c/0xc5f0 arch/x86/kvm/x86.c:11293
copy_from_user include/linux/uaccess.h:183 [inline]
kvm_vcpu_ioctl+0xc6c/0x1800 arch/x86/kvm/../../../virt/kvm/kvm_main.c:4274
__se_sys_ioctl+0x211/0x400
__do_sys_ioctl fs/ioctl.c:862 [inline]
__se_sys_ioctl fs/ioctl.c:857 [inline]
__x64_sys_ioctl+0x97/0xe0 fs/ioctl.c:857
do_syscall_x64 arch/x86/entry/common.c:52 [inline]
do_syscall_64+0x44/0x110 arch/x86/entry/common.c:83
entry_SYSCALL_64_after_hwframe+0x63/0x6b
Local variable eip created at:
em_ret_far+0x3a/0x350 arch/x86/kvm/emulate.c:2241
em_ret_far_imm arch/x86/kvm/emulate.c:2273 [inline]
em_ret_far_imm+0x37/0x510 arch/x86/kvm/emulate.c:2266
CPU: 3 PID: 15635 Comm: be9 Not tainted 6.7.0-rc7-dirty #9
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
1.16.2-1.fc38 04/01/2014
=====================================================


=* repro.c =*
#define _GNU_SOURCE

#include <dirent.h>
#include <endian.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/futex.h>
#include <linux/kvm.h>
#include <pthread.h>
#include <setjmp.h>
#include <signal.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/prctl.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>

#ifndef __NR_memfd_create
#define __NR_memfd_create 319
#endif

static unsigned long long procid;

static __thread int clone_ongoing;
static __thread int skip_segv;
static __thread jmp_buf segv_env;

static void segv_handler(int sig, siginfo_t* info, void* ctx) {
 if (__atomic_load_n(&clone_ongoing, __ATOMIC_RELAXED) != 0) {
   exit(sig);
 }
 uintptr_t addr = (uintptr_t)info->si_addr;
 const uintptr_t prog_start = 1 << 20;
 const uintptr_t prog_end = 100 << 20;
 int skip = __atomic_load_n(&skip_segv, __ATOMIC_RELAXED) != 0;
 int valid = addr < prog_start || addr > prog_end;
 if (skip && valid) {
   _longjmp(segv_env, 1);
 }
 exit(sig);
}

static void install_segv_handler(void) {
 struct sigaction sa;
 memset(&sa, 0, sizeof(sa));
 sa.sa_handler = SIG_IGN;
 syscall(SYS_rt_sigaction, 0x20, &sa, NULL, 8);
 syscall(SYS_rt_sigaction, 0x21, &sa, NULL, 8);
 memset(&sa, 0, sizeof(sa));
 sa.sa_sigaction = segv_handler;
 sa.sa_flags = SA_NODEFER | SA_SIGINFO;
 sigaction(SIGSEGV, &sa, NULL);
 sigaction(SIGBUS, &sa, NULL);
}

#define NONFAILING(...)                                  \
 ({                                                     \
   int ok = 1;                                          \
   __atomic_fetch_add(&skip_segv, 1, __ATOMIC_SEQ_CST); \
   if (_setjmp(segv_env) == 0) {                        \
     __VA_ARGS__;                                       \
   } else                                               \
     ok = 0;                                            \
   __atomic_fetch_sub(&skip_segv, 1, __ATOMIC_SEQ_CST); \
   ok;                                                  \
 })

static void sleep_ms(uint64_t ms) { usleep(ms * 1000); }

static uint64_t current_time_ms(void) {
 struct timespec ts;
 if (clock_gettime(CLOCK_MONOTONIC, &ts)) exit(1);
 return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
}

static void thread_start(void* (*fn)(void*), void* arg) {
 pthread_t th;
 pthread_attr_t attr;
 pthread_attr_init(&attr);
 pthread_attr_setstacksize(&attr, 128 << 10);
 int i = 0;
 for (; i < 100; i++) {
   if (pthread_create(&th, &attr, fn, arg) == 0) {
     pthread_attr_destroy(&attr);
     return;
   }
   if (errno == EAGAIN) {
     usleep(50);
     continue;
   }
   break;
 }
 exit(1);
}

typedef struct {
 int state;
} event_t;

static void event_init(event_t* ev) { ev->state = 0; }

static void event_reset(event_t* ev) { ev->state = 0; }

static void event_set(event_t* ev) {
 if (ev->state) exit(1);
 __atomic_store_n(&ev->state, 1, __ATOMIC_RELEASE);
 syscall(SYS_futex, &ev->state, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1000000);
}

static void event_wait(event_t* ev) {
 while (!__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE))
   syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, 0);
}

static int event_isset(event_t* ev) {
 return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE);
}

static int event_timedwait(event_t* ev, uint64_t timeout) {
 uint64_t start = current_time_ms();
 uint64_t now = start;
 for (;;) {
   uint64_t remain = timeout - (now - start);
   struct timespec ts;
   ts.tv_sec = remain / 1000;
   ts.tv_nsec = (remain % 1000) * 1000 * 1000;
   syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, &ts);
   if (__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE)) return 1;
   now = current_time_ms();
   if (now - start > timeout) return 0;
 }
}

static bool write_file(const char* file, const char* what, ...) {
 char buf[1024];
 va_list args;
 va_start(args, what);
 vsnprintf(buf, sizeof(buf), what, args);
 va_end(args);
 buf[sizeof(buf) - 1] = 0;
 int len = strlen(buf);
 int fd = open(file, O_WRONLY | O_CLOEXEC);
 if (fd == -1) return false;
 if (write(fd, buf, len) != len) {
   int err = errno;
   close(fd);
   errno = err;
   return false;
 }
 close(fd);
 return true;
}

#define ADDR_TEXT 0x0000
#define ADDR_GDT 0x1000
#define ADDR_LDT 0x1800
#define ADDR_PML4 0x2000
#define ADDR_PDP 0x3000
#define ADDR_PD 0x4000
#define ADDR_STACK0 0x0f80
#define ADDR_VAR_HLT 0x2800
#define ADDR_VAR_SYSRET 0x2808
#define ADDR_VAR_SYSEXIT 0x2810
#define ADDR_VAR_IDT 0x3800
#define ADDR_VAR_TSS64 0x3a00
#define ADDR_VAR_TSS64_CPL3 0x3c00
#define ADDR_VAR_TSS16 0x3d00
#define ADDR_VAR_TSS16_2 0x3e00
#define ADDR_VAR_TSS16_CPL3 0x3f00
#define ADDR_VAR_TSS32 0x4800
#define ADDR_VAR_TSS32_2 0x4a00
#define ADDR_VAR_TSS32_CPL3 0x4c00
#define ADDR_VAR_TSS32_VM86 0x4e00
#define ADDR_VAR_VMXON_PTR 0x5f00
#define ADDR_VAR_VMCS_PTR 0x5f08
#define ADDR_VAR_VMEXIT_PTR 0x5f10
#define ADDR_VAR_VMWRITE_FLD 0x5f18
#define ADDR_VAR_VMWRITE_VAL 0x5f20
#define ADDR_VAR_VMXON 0x6000
#define ADDR_VAR_VMCS 0x7000
#define ADDR_VAR_VMEXIT_CODE 0x9000
#define ADDR_VAR_USER_CODE 0x9100
#define ADDR_VAR_USER_CODE2 0x9120

#define SEL_LDT (1 << 3)
#define SEL_CS16 (2 << 3)
#define SEL_DS16 (3 << 3)
#define SEL_CS16_CPL3 ((4 << 3) + 3)
#define SEL_DS16_CPL3 ((5 << 3) + 3)
#define SEL_CS32 (6 << 3)
#define SEL_DS32 (7 << 3)
#define SEL_CS32_CPL3 ((8 << 3) + 3)
#define SEL_DS32_CPL3 ((9 << 3) + 3)
#define SEL_CS64 (10 << 3)
#define SEL_DS64 (11 << 3)
#define SEL_CS64_CPL3 ((12 << 3) + 3)
#define SEL_DS64_CPL3 ((13 << 3) + 3)
#define SEL_CGATE16 (14 << 3)
#define SEL_TGATE16 (15 << 3)
#define SEL_CGATE32 (16 << 3)
#define SEL_TGATE32 (17 << 3)
#define SEL_CGATE64 (18 << 3)
#define SEL_CGATE64_HI (19 << 3)
#define SEL_TSS16 (20 << 3)
#define SEL_TSS16_2 (21 << 3)
#define SEL_TSS16_CPL3 ((22 << 3) + 3)
#define SEL_TSS32 (23 << 3)
#define SEL_TSS32_2 (24 << 3)
#define SEL_TSS32_CPL3 ((25 << 3) + 3)
#define SEL_TSS32_VM86 (26 << 3)
#define SEL_TSS64 (27 << 3)
#define SEL_TSS64_HI (28 << 3)
#define SEL_TSS64_CPL3 ((29 << 3) + 3)
#define SEL_TSS64_CPL3_HI (30 << 3)

#define MSR_IA32_FEATURE_CONTROL 0x3a
#define MSR_IA32_VMX_BASIC 0x480
#define MSR_IA32_SMBASE 0x9e
#define MSR_IA32_SYSENTER_CS 0x174
#define MSR_IA32_SYSENTER_ESP 0x175
#define MSR_IA32_SYSENTER_EIP 0x176
#define MSR_IA32_STAR 0xC0000081
#define MSR_IA32_LSTAR 0xC0000082
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x48B

#define NEXT_INSN $0xbadc0de
#define PREFIX_SIZE 0xba1d
const char kvm_asm16_cpl3[] =
   "\x0f\x20\xc0\x66\x83\xc8\x01\x0f\x22\xc0\xb8\xa0\x00\x0f\x00\xd8\xb8\x2b"
   "\x00\x8e\xd8\x8e\xc0\x8e\xe0\x8e\xe8\xbc\x00\x01\xc7\x06\x00\x01\x1d\xba"
   "\xc7\x06\x02\x01\x23\x00\xc7\x06\x04\x01\x00\x01\xc7\x06\x06\x01\x2b\x00"
   "\xcb";
const char kvm_asm32_paged[] = "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0";
const char kvm_asm32_vm86[] =
   "\x66\xb8\xb8\x00\x0f\x00\xd8\xea\x00\x00\x00\x00\xd0\x00";
const char kvm_asm32_paged_vm86[] =
   "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0\x66\xb8\xb8\x00\x0f\x00\xd8"
   "\xea\x00\x00\x00\x00\xd0\x00";
const char kvm_asm64_enable_long[] =
   "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0\xea\xde\xc0\xad\x0b\x50\x00"
   "\x48\xc7\xc0\xd8\x00\x00\x00\x0f\x00\xd8";
const char kvm_asm64_init_vm[] =
   "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0\xea\xde\xc0\xad\x0b\x50\x00"
   "\x48\xc7\xc0\xd8\x00\x00\x00\x0f\x00\xd8\x48\xc7\xc1\x3a\x00\x00\x00\x0f"
   "\x32\x48\x83\xc8\x05\x0f\x30\x0f\x20\xe0\x48\x0d\x00\x20\x00\x00\x0f\x22"
   "\xe0\x48\xc7\xc1\x80\x04\x00\x00\x0f\x32\x48\xc7\xc2\x00\x60\x00\x00\x89"
   "\x02\x48\xc7\xc2\x00\x70\x00\x00\x89\x02\x48\xc7\xc0\x00\x5f\x00\x00\xf3"
   "\x0f\xc7\x30\x48\xc7\xc0\x08\x5f\x00\x00\x66\x0f\xc7\x30\x0f\xc7\x30\x48"
   "\xc7\xc1\x81\x04\x00\x00\x0f\x32\x48\x83\xc8\x00\x48\x21\xd0\x48\xc7\xc2"
   "\x00\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc1\x82\x04\x00\x00\x0f\x32\x48\x83"
   "\xc8\x00\x48\x21\xd0\x48\xc7\xc2\x02\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2"
   "\x1e\x40\x00\x00\x48\xc7\xc0\x81\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc1\x83"
   "\x04\x00\x00\x0f\x32\x48\x0d\xff\x6f\x03\x00\x48\x21\xd0\x48\xc7\xc2\x0c"
   "\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc1\x84\x04\x00\x00\x0f\x32\x48\x0d\xff"
   "\x17\x00\x00\x48\x21\xd0\x48\xc7\xc2\x12\x40\x00\x00\x0f\x79\xd0\x48\xc7"
   "\xc2\x04\x2c\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2"
   "\x00\x28\x00\x00\x48\xc7\xc0\xff\xff\xff\xff\x0f\x79\xd0\x48\xc7\xc2\x02"
   "\x0c\x00\x00\x48\xc7\xc0\x50\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc0\x58\x00"
   "\x00\x00\x48\xc7\xc2\x00\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x04\x0c\x00"
   "\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x08"
   "\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0a\x0c\x00\x00\x0f\x79\xd0\x48\xc7"
   "\xc0\xd8\x00\x00\x00\x48\xc7\xc2\x0c\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc2"
   "\x02\x2c\x00\x00\x48\xc7\xc0\x00\x05\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00"
   "\x4c\x00\x00\x48\xc7\xc0\x50\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x10\x6c"
   "\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x12\x6c\x00"
   "\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x0f\x20\xc0\x48\xc7\xc2\x00"
   "\x6c\x00\x00\x48\x89\xc0\x0f\x79\xd0\x0f\x20\xd8\x48\xc7\xc2\x02\x6c\x00"
   "\x00\x48\x89\xc0\x0f\x79\xd0\x0f\x20\xe0\x48\xc7\xc2\x04\x6c\x00\x00\x48"
   "\x89\xc0\x0f\x79\xd0\x48\xc7\xc2\x06\x6c\x00\x00\x48\xc7\xc0\x00\x00\x00"
   "\x00\x0f\x79\xd0\x48\xc7\xc2\x08\x6c\x00\x00\x48\xc7\xc0\x00\x00\x00\x00"
   "\x0f\x79\xd0\x48\xc7\xc2\x0a\x6c\x00\x00\x48\xc7\xc0\x00\x3a\x00\x00\x0f"
   "\x79\xd0\x48\xc7\xc2\x0c\x6c\x00\x00\x48\xc7\xc0\x00\x10\x00\x00\x0f\x79"
   "\xd0\x48\xc7\xc2\x0e\x6c\x00\x00\x48\xc7\xc0\x00\x38\x00\x00\x0f\x79\xd0"
   "\x48\xc7\xc2\x14\x6c\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48"
   "\xc7\xc2\x16\x6c\x00\x00\x48\x8b\x04\x25\x10\x5f\x00\x00\x0f\x79\xd0\x48"
   "\xc7\xc2\x00\x00\x00\x00\x48\xc7\xc0\x01\x00\x00\x00\x0f\x79\xd0\x48\xc7"
   "\xc2\x02\x00\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2"
   "\x00\x20\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x02"
   "\x20\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x04\x20"
   "\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x20\x00"
   "\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc1\x77\x02\x00\x00"
   "\x0f\x32\x48\xc1\xe2\x20\x48\x09\xd0\x48\xc7\xc2\x00\x2c\x00\x00\x48\x89"
   "\xc0\x0f\x79\xd0\x48\xc7\xc2\x04\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00"
   "\x0f\x79\xd0\x48\xc7\xc2\x0a\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f"
   "\x79\xd0\x48\xc7\xc2\x0e\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79"
   "\xd0\x48\xc7\xc2\x10\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0"
   "\x48\xc7\xc2\x16\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48"
   "\xc7\xc2\x14\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7"
   "\xc2\x00\x60\x00\x00\x48\xc7\xc0\xff\xff\xff\xff\x0f\x79\xd0\x48\xc7\xc2"
   "\x02\x60\x00\x00\x48\xc7\xc0\xff\xff\xff\xff\x0f\x79\xd0\x48\xc7\xc2\x1c"
   "\x20\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1e\x20"
   "\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x20\x20\x00"
   "\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x22\x20\x00\x00"
   "\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x08\x00\x00\x48"
   "\xc7\xc0\x58\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x02\x08\x00\x00\x48\xc7"
   "\xc0\x50\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x04\x08\x00\x00\x48\xc7\xc0"
   "\x58\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x08\x00\x00\x48\xc7\xc0\x58"
   "\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x08\x08\x00\x00\x48\xc7\xc0\x58\x00"
   "\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0a\x08\x00\x00\x48\xc7\xc0\x58\x00\x00"
   "\x00\x0f\x79\xd0\x48\xc7\xc2\x0c\x08\x00\x00\x48\xc7\xc0\x00\x00\x00\x00"
   "\x0f\x79\xd0\x48\xc7\xc2\x0e\x08\x00\x00\x48\xc7\xc0\xd8\x00\x00\x00\x0f"
   "\x79\xd0\x48\xc7\xc2\x12\x68\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79"
   "\xd0\x48\xc7\xc2\x14\x68\x00\x00\x48\xc7\xc0\x00\x3a\x00\x00\x0f\x79\xd0"
   "\x48\xc7\xc2\x16\x68\x00\x00\x48\xc7\xc0\x00\x10\x00\x00\x0f\x79\xd0\x48"
   "\xc7\xc2\x18\x68\x00\x00\x48\xc7\xc0\x00\x38\x00\x00\x0f\x79\xd0\x48\xc7"
   "\xc2\x00\x48\x00\x00\x48\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2"
   "\x02\x48\x00\x00\x48\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x04"
   "\x48\x00\x00\x48\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x48"
   "\x00\x00\x48\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x08\x48\x00"
   "\x00\x48\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x0a\x48\x00\x00"
   "\x48\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x0c\x48\x00\x00\x48"
   "\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0e\x48\x00\x00\x48\xc7"
   "\xc0\xff\x1f\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x10\x48\x00\x00\x48\xc7\xc0"
   "\xff\x1f\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x12\x48\x00\x00\x48\xc7\xc0\xff"
   "\x1f\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x14\x48\x00\x00\x48\xc7\xc0\x93\x40"
   "\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x16\x48\x00\x00\x48\xc7\xc0\x9b\x20\x00"
   "\x00\x0f\x79\xd0\x48\xc7\xc2\x18\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00"
   "\x0f\x79\xd0\x48\xc7\xc2\x1a\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f"
   "\x79\xd0\x48\xc7\xc2\x1c\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f\x79"
   "\xd0\x48\xc7\xc2\x1e\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f\x79\xd0"
   "\x48\xc7\xc2\x20\x48\x00\x00\x48\xc7\xc0\x82\x00\x00\x00\x0f\x79\xd0\x48"
   "\xc7\xc2\x22\x48\x00\x00\x48\xc7\xc0\x8b\x00\x00\x00\x0f\x79\xd0\x48\xc7"
   "\xc2\x1c\x68\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2"
   "\x1e\x68\x00\x00\x48\xc7\xc0\x00\x91\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x20"
   "\x68\x00\x00\x48\xc7\xc0\x02\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x28"
   "\x00\x00\x48\xc7\xc0\x00\x05\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0a\x28\x00"
   "\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0c\x28\x00\x00"
   "\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0e\x28\x00\x00\x48"
   "\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x10\x28\x00\x00\x48\xc7"
   "\xc0\x00\x00\x00\x00\x0f\x79\xd0\x0f\x20\xc0\x48\xc7\xc2\x00\x68\x00\x00"
   "\x48\x89\xc0\x0f\x79\xd0\x0f\x20\xd8\x48\xc7\xc2\x02\x68\x00\x00\x48\x89"
   "\xc0\x0f\x79\xd0\x0f\x20\xe0\x48\xc7\xc2\x04\x68\x00\x00\x48\x89\xc0\x0f"
   "\x79\xd0\x48\xc7\xc0\x18\x5f\x00\x00\x48\x8b\x10\x48\xc7\xc0\x20\x5f\x00"
   "\x00\x48\x8b\x08\x48\x31\xc0\x0f\x78\xd0\x48\x31\xc8\x0f\x79\xd0\x0f\x01"
   "\xc2\x48\xc7\xc2\x00\x44\x00\x00\x0f\x78\xd0\xf4";
const char kvm_asm64_vm_exit[] =
   "\x48\xc7\xc3\x00\x44\x00\x00\x0f\x78\xda\x48\xc7\xc3\x02\x44\x00\x00\x0f"
   "\x78\xd9\x48\xc7\xc0\x00\x64\x00\x00\x0f\x78\xc0\x48\xc7\xc3\x1e\x68\x00"
   "\x00\x0f\x78\xdb\xf4";
const char kvm_asm64_cpl3[] =
   "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0\xea\xde\xc0\xad\x0b\x50\x00"
   "\x48\xc7\xc0\xd8\x00\x00\x00\x0f\x00\xd8\x48\xc7\xc0\x6b\x00\x00\x00\x8e"
   "\xd8\x8e\xc0\x8e\xe0\x8e\xe8\x48\xc7\xc4\x80\x0f\x00\x00\x48\xc7\x04\x24"
   "\x1d\xba\x00\x00\x48\xc7\x44\x24\x04\x63\x00\x00\x00\x48\xc7\x44\x24\x08"
   "\x80\x0f\x00\x00\x48\xc7\x44\x24\x0c\x6b\x00\x00\x00\xcb";

#define KVM_SMI _IO(KVMIO, 0xb7)

#define CR0_PE 1
#define CR0_MP (1 << 1)
#define CR0_EM (1 << 2)
#define CR0_TS (1 << 3)
#define CR0_ET (1 << 4)
#define CR0_NE (1 << 5)
#define CR0_WP (1 << 16)
#define CR0_AM (1 << 18)
#define CR0_NW (1 << 29)
#define CR0_CD (1 << 30)
#define CR0_PG (1 << 31)

#define CR4_VME 1
#define CR4_PVI (1 << 1)
#define CR4_TSD (1 << 2)
#define CR4_DE (1 << 3)
#define CR4_PSE (1 << 4)
#define CR4_PAE (1 << 5)
#define CR4_MCE (1 << 6)
#define CR4_PGE (1 << 7)
#define CR4_PCE (1 << 8)
#define CR4_OSFXSR (1 << 8)
#define CR4_OSXMMEXCPT (1 << 10)
#define CR4_UMIP (1 << 11)
#define CR4_VMXE (1 << 13)
#define CR4_SMXE (1 << 14)
#define CR4_FSGSBASE (1 << 16)
#define CR4_PCIDE (1 << 17)
#define CR4_OSXSAVE (1 << 18)
#define CR4_SMEP (1 << 20)
#define CR4_SMAP (1 << 21)
#define CR4_PKE (1 << 22)

#define EFER_SCE 1
#define EFER_LME (1 << 8)
#define EFER_LMA (1 << 10)
#define EFER_NXE (1 << 11)
#define EFER_SVME (1 << 12)
#define EFER_LMSLE (1 << 13)
#define EFER_FFXSR (1 << 14)
#define EFER_TCE (1 << 15)
#define PDE32_PRESENT 1
#define PDE32_RW (1 << 1)
#define PDE32_USER (1 << 2)
#define PDE32_PS (1 << 7)
#define PDE64_PRESENT 1
#define PDE64_RW (1 << 1)
#define PDE64_USER (1 << 2)
#define PDE64_ACCESSED (1 << 5)
#define PDE64_DIRTY (1 << 6)
#define PDE64_PS (1 << 7)
#define PDE64_G (1 << 8)

struct tss16 {
 uint16_t prev;
 uint16_t sp0;
 uint16_t ss0;
 uint16_t sp1;
 uint16_t ss1;
 uint16_t sp2;
 uint16_t ss2;
 uint16_t ip;
 uint16_t flags;
 uint16_t ax;
 uint16_t cx;
 uint16_t dx;
 uint16_t bx;
 uint16_t sp;
 uint16_t bp;
 uint16_t si;
 uint16_t di;
 uint16_t es;
 uint16_t cs;
 uint16_t ss;
 uint16_t ds;
 uint16_t ldt;
} __attribute__((packed));

struct tss32 {
 uint16_t prev, prevh;
 uint32_t sp0;
 uint16_t ss0, ss0h;
 uint32_t sp1;
 uint16_t ss1, ss1h;
 uint32_t sp2;
 uint16_t ss2, ss2h;
 uint32_t cr3;
 uint32_t ip;
 uint32_t flags;
 uint32_t ax;
 uint32_t cx;
 uint32_t dx;
 uint32_t bx;
 uint32_t sp;
 uint32_t bp;
 uint32_t si;
 uint32_t di;
 uint16_t es, esh;
 uint16_t cs, csh;
 uint16_t ss, ssh;
 uint16_t ds, dsh;
 uint16_t fs, fsh;
 uint16_t gs, gsh;
 uint16_t ldt, ldth;
 uint16_t trace;
 uint16_t io_bitmap;
} __attribute__((packed));

struct tss64 {
 uint32_t reserved0;
 uint64_t rsp[3];
 uint64_t reserved1;
 uint64_t ist[7];
 uint64_t reserved2;
 uint32_t reserved3;
 uint32_t io_bitmap;
} __attribute__((packed));

static void fill_segment_descriptor(uint64_t* dt, uint64_t* lt,
                                   struct kvm_segment* seg) {
 uint16_t index = seg->selector >> 3;
 uint64_t limit = seg->g ? seg->limit >> 12 : seg->limit;
 uint64_t sd = (limit & 0xffff) | (seg->base & 0xffffff) << 16 |
               (uint64_t)seg->type << 40 | (uint64_t)seg->s << 44 |
               (uint64_t)seg->dpl << 45 | (uint64_t)seg->present << 47 |
               (limit & 0xf0000ULL) << 48 | (uint64_t)seg->avl << 52 |
               (uint64_t)seg->l << 53 | (uint64_t)seg->db << 54 |
               (uint64_t)seg->g << 55 | (seg->base & 0xff000000ULL) << 56;
 dt[index] = sd;
 lt[index] = sd;
}

static void fill_segment_descriptor_dword(uint64_t* dt, uint64_t* lt,
                                         struct kvm_segment* seg) {
 fill_segment_descriptor(dt, lt, seg);
 uint16_t index = seg->selector >> 3;
 dt[index + 1] = 0;
 lt[index + 1] = 0;
}

static void setup_syscall_msrs(int cpufd, uint16_t sel_cs,
                              uint16_t sel_cs_cpl3) {
 char buf[sizeof(struct kvm_msrs) + 5 * sizeof(struct kvm_msr_entry)];
 memset(buf, 0, sizeof(buf));
 struct kvm_msrs* msrs = (struct kvm_msrs*)buf;
 struct kvm_msr_entry* entries = msrs->entries;
 msrs->nmsrs = 5;
 entries[0].index = MSR_IA32_SYSENTER_CS;
 entries[0].data = sel_cs;
 entries[1].index = MSR_IA32_SYSENTER_ESP;
 entries[1].data = ADDR_STACK0;
 entries[2].index = MSR_IA32_SYSENTER_EIP;
 entries[2].data = ADDR_VAR_SYSEXIT;
 entries[3].index = MSR_IA32_STAR;
 entries[3].data = ((uint64_t)sel_cs << 32) | ((uint64_t)sel_cs_cpl3 << 48);
 entries[4].index = MSR_IA32_LSTAR;
 entries[4].data = ADDR_VAR_SYSRET;
 ioctl(cpufd, KVM_SET_MSRS, msrs);
}

static void setup_32bit_idt(struct kvm_sregs* sregs, char* host_mem,
                           uintptr_t guest_mem) {
 sregs->idt.base = guest_mem + ADDR_VAR_IDT;
 sregs->idt.limit = 0x1ff;
 uint64_t* idt = (uint64_t*)(host_mem + sregs->idt.base);
 for (int i = 0; i < 32; i++) {
   struct kvm_segment gate;
   gate.selector = i << 3;
   switch (i % 6) {
     case 0:
       gate.type = 6;
       gate.base = SEL_CS16;
       break;
     case 1:
       gate.type = 7;
       gate.base = SEL_CS16;
       break;
     case 2:
       gate.type = 3;
       gate.base = SEL_TGATE16;
       break;
     case 3:
       gate.type = 14;
       gate.base = SEL_CS32;
       break;
     case 4:
       gate.type = 15;
       gate.base = SEL_CS32;
       break;
     case 5:
       gate.type = 11;
       gate.base = SEL_TGATE32;
       break;
   }
   gate.limit = guest_mem + ADDR_VAR_USER_CODE2;
   gate.present = 1;
   gate.dpl = 0;
   gate.s = 0;
   gate.g = 0;
   gate.db = 0;
   gate.l = 0;
   gate.avl = 0;
   fill_segment_descriptor(idt, idt, &gate);
 }
}

static void setup_64bit_idt(struct kvm_sregs* sregs, char* host_mem,
                           uintptr_t guest_mem) {
 sregs->idt.base = guest_mem + ADDR_VAR_IDT;
 sregs->idt.limit = 0x1ff;
 uint64_t* idt = (uint64_t*)(host_mem + sregs->idt.base);
 for (int i = 0; i < 32; i++) {
   struct kvm_segment gate;
   gate.selector = (i * 2) << 3;
   gate.type = (i & 1) ? 14 : 15;
   gate.base = SEL_CS64;
   gate.limit = guest_mem + ADDR_VAR_USER_CODE2;
   gate.present = 1;
   gate.dpl = 0;
   gate.s = 0;
   gate.g = 0;
   gate.db = 0;
   gate.l = 0;
   gate.avl = 0;
   fill_segment_descriptor_dword(idt, idt, &gate);
 }
}

struct kvm_text {
 uintptr_t typ;
 const void* text;
 uintptr_t size;
};

struct kvm_opt {
 uint64_t typ;
 uint64_t val;
};

#define KVM_SETUP_PAGING (1 << 0)
#define KVM_SETUP_PAE (1 << 1)
#define KVM_SETUP_PROTECTED (1 << 2)
#define KVM_SETUP_CPL3 (1 << 3)
#define KVM_SETUP_VIRT86 (1 << 4)
#define KVM_SETUP_SMM (1 << 5)
#define KVM_SETUP_VM (1 << 6)
static volatile long syz_kvm_setup_cpu(volatile long a0, volatile long a1,
                                      volatile long a2, volatile long a3,
                                      volatile long a4, volatile long a5,
                                      volatile long a6, volatile long a7) {
 const int vmfd = a0;
 const int cpufd = a1;
 char* const host_mem = (char*)a2;
 const struct kvm_text* const text_array_ptr = (struct kvm_text*)a3;
 const uintptr_t text_count = a4;
 const uintptr_t flags = a5;
 const struct kvm_opt* const opt_array_ptr = (struct kvm_opt*)a6;
 uintptr_t opt_count = a7;
 const uintptr_t page_size = 4 << 10;
 const uintptr_t ioapic_page = 10;
 const uintptr_t guest_mem_size = 24 * page_size;
 const uintptr_t guest_mem = 0;
 (void)text_count;
 int text_type = text_array_ptr[0].typ;
 const void* text = text_array_ptr[0].text;
 uintptr_t text_size = text_array_ptr[0].size;
 for (uintptr_t i = 0; i < guest_mem_size / page_size; i++) {
   struct kvm_userspace_memory_region memreg;
   memreg.slot = i;
   memreg.flags = 0;
   memreg.guest_phys_addr = guest_mem + i * page_size;
   if (i == ioapic_page) memreg.guest_phys_addr = 0xfec00000;
   memreg.memory_size = page_size;
   memreg.userspace_addr = (uintptr_t)host_mem + i * page_size;
   ioctl(vmfd, KVM_SET_USER_MEMORY_REGION, &memreg);
 }
 struct kvm_userspace_memory_region memreg;
 memreg.slot = 1 + (1 << 16);
 memreg.flags = 0;
 memreg.guest_phys_addr = 0x30000;
 memreg.memory_size = 64 << 10;
 memreg.userspace_addr = (uintptr_t)host_mem;
 ioctl(vmfd, KVM_SET_USER_MEMORY_REGION, &memreg);
 struct kvm_sregs sregs;
 if (ioctl(cpufd, KVM_GET_SREGS, &sregs)) return -1;
 struct kvm_regs regs;
 memset(&regs, 0, sizeof(regs));
 regs.rip = guest_mem + ADDR_TEXT;
 regs.rsp = ADDR_STACK0;
 sregs.gdt.base = guest_mem + ADDR_GDT;
 sregs.gdt.limit = 256 * sizeof(uint64_t) - 1;
 uint64_t* gdt = (uint64_t*)(host_mem + sregs.gdt.base);
 struct kvm_segment seg_ldt;
 seg_ldt.selector = SEL_LDT;
 seg_ldt.type = 2;
 seg_ldt.base = guest_mem + ADDR_LDT;
 seg_ldt.limit = 256 * sizeof(uint64_t) - 1;
 seg_ldt.present = 1;
 seg_ldt.dpl = 0;
 seg_ldt.s = 0;
 seg_ldt.g = 0;
 seg_ldt.db = 1;
 seg_ldt.l = 0;
 sregs.ldt = seg_ldt;
 uint64_t* ldt = (uint64_t*)(host_mem + sregs.ldt.base);
 struct kvm_segment seg_cs16;
 seg_cs16.selector = SEL_CS16;
 seg_cs16.type = 11;
 seg_cs16.base = 0;
 seg_cs16.limit = 0xfffff;
 seg_cs16.present = 1;
 seg_cs16.dpl = 0;
 seg_cs16.s = 1;
 seg_cs16.g = 0;
 seg_cs16.db = 0;
 seg_cs16.l = 0;
 struct kvm_segment seg_ds16 = seg_cs16;
 seg_ds16.selector = SEL_DS16;
 seg_ds16.type = 3;
 struct kvm_segment seg_cs16_cpl3 = seg_cs16;
 seg_cs16_cpl3.selector = SEL_CS16_CPL3;
 seg_cs16_cpl3.dpl = 3;
 struct kvm_segment seg_ds16_cpl3 = seg_ds16;
 seg_ds16_cpl3.selector = SEL_DS16_CPL3;
 seg_ds16_cpl3.dpl = 3;
 struct kvm_segment seg_cs32 = seg_cs16;
 seg_cs32.selector = SEL_CS32;
 seg_cs32.db = 1;
 struct kvm_segment seg_ds32 = seg_ds16;
 seg_ds32.selector = SEL_DS32;
 seg_ds32.db = 1;
 struct kvm_segment seg_cs32_cpl3 = seg_cs32;
 seg_cs32_cpl3.selector = SEL_CS32_CPL3;
 seg_cs32_cpl3.dpl = 3;
 struct kvm_segment seg_ds32_cpl3 = seg_ds32;
 seg_ds32_cpl3.selector = SEL_DS32_CPL3;
 seg_ds32_cpl3.dpl = 3;
 struct kvm_segment seg_cs64 = seg_cs16;
 seg_cs64.selector = SEL_CS64;
 seg_cs64.l = 1;
 struct kvm_segment seg_ds64 = seg_ds32;
 seg_ds64.selector = SEL_DS64;
 struct kvm_segment seg_cs64_cpl3 = seg_cs64;
 seg_cs64_cpl3.selector = SEL_CS64_CPL3;
 seg_cs64_cpl3.dpl = 3;
 struct kvm_segment seg_ds64_cpl3 = seg_ds64;
 seg_ds64_cpl3.selector = SEL_DS64_CPL3;
 seg_ds64_cpl3.dpl = 3;
 struct kvm_segment seg_tss32;
 seg_tss32.selector = SEL_TSS32;
 seg_tss32.type = 9;
 seg_tss32.base = ADDR_VAR_TSS32;
 seg_tss32.limit = 0x1ff;
 seg_tss32.present = 1;
 seg_tss32.dpl = 0;
 seg_tss32.s = 0;
 seg_tss32.g = 0;
 seg_tss32.db = 0;
 seg_tss32.l = 0;
 struct kvm_segment seg_tss32_2 = seg_tss32;
 seg_tss32_2.selector = SEL_TSS32_2;
 seg_tss32_2.base = ADDR_VAR_TSS32_2;
 struct kvm_segment seg_tss32_cpl3 = seg_tss32;
 seg_tss32_cpl3.selector = SEL_TSS32_CPL3;
 seg_tss32_cpl3.base = ADDR_VAR_TSS32_CPL3;
 struct kvm_segment seg_tss32_vm86 = seg_tss32;
 seg_tss32_vm86.selector = SEL_TSS32_VM86;
 seg_tss32_vm86.base = ADDR_VAR_TSS32_VM86;
 struct kvm_segment seg_tss16 = seg_tss32;
 seg_tss16.selector = SEL_TSS16;
 seg_tss16.base = ADDR_VAR_TSS16;
 seg_tss16.limit = 0xff;
 seg_tss16.type = 1;
 struct kvm_segment seg_tss16_2 = seg_tss16;
 seg_tss16_2.selector = SEL_TSS16_2;
 seg_tss16_2.base = ADDR_VAR_TSS16_2;
 seg_tss16_2.dpl = 0;
 struct kvm_segment seg_tss16_cpl3 = seg_tss16;
 seg_tss16_cpl3.selector = SEL_TSS16_CPL3;
 seg_tss16_cpl3.base = ADDR_VAR_TSS16_CPL3;
 seg_tss16_cpl3.dpl = 3;
 struct kvm_segment seg_tss64 = seg_tss32;
 seg_tss64.selector = SEL_TSS64;
 seg_tss64.base = ADDR_VAR_TSS64;
 seg_tss64.limit = 0x1ff;
 struct kvm_segment seg_tss64_cpl3 = seg_tss64;
 seg_tss64_cpl3.selector = SEL_TSS64_CPL3;
 seg_tss64_cpl3.base = ADDR_VAR_TSS64_CPL3;
 seg_tss64_cpl3.dpl = 3;
 struct kvm_segment seg_cgate16;
 seg_cgate16.selector = SEL_CGATE16;
 seg_cgate16.type = 4;
 seg_cgate16.base = SEL_CS16 | (2 << 16);
 seg_cgate16.limit = ADDR_VAR_USER_CODE2;
 seg_cgate16.present = 1;
 seg_cgate16.dpl = 0;
 seg_cgate16.s = 0;
 seg_cgate16.g = 0;
 seg_cgate16.db = 0;
 seg_cgate16.l = 0;
 seg_cgate16.avl = 0;
 struct kvm_segment seg_tgate16 = seg_cgate16;
 seg_tgate16.selector = SEL_TGATE16;
 seg_tgate16.type = 3;
 seg_cgate16.base = SEL_TSS16_2;
 seg_tgate16.limit = 0;
 struct kvm_segment seg_cgate32 = seg_cgate16;
 seg_cgate32.selector = SEL_CGATE32;
 seg_cgate32.type = 12;
 seg_cgate32.base = SEL_CS32 | (2 << 16);
 struct kvm_segment seg_tgate32 = seg_cgate32;
 seg_tgate32.selector = SEL_TGATE32;
 seg_tgate32.type = 11;
 seg_tgate32.base = SEL_TSS32_2;
 seg_tgate32.limit = 0;
 struct kvm_segment seg_cgate64 = seg_cgate16;
 seg_cgate64.selector = SEL_CGATE64;
 seg_cgate64.type = 12;
 seg_cgate64.base = SEL_CS64;
 int kvmfd = open("/dev/kvm", O_RDWR);
 char buf[sizeof(struct kvm_cpuid2) + 128 * sizeof(struct kvm_cpuid_entry2)];
 memset(buf, 0, sizeof(buf));
 struct kvm_cpuid2* cpuid = (struct kvm_cpuid2*)buf;
 cpuid->nent = 128;
 ioctl(kvmfd, KVM_GET_SUPPORTED_CPUID, cpuid);
 ioctl(cpufd, KVM_SET_CPUID2, cpuid);
 close(kvmfd);
 const char* text_prefix = 0;
 int text_prefix_size = 0;
 char* host_text = host_mem + ADDR_TEXT;
 if (text_type == 8) {
   if (flags & KVM_SETUP_SMM) {
     if (flags & KVM_SETUP_PROTECTED) {
       sregs.cs = seg_cs16;
       sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds16;
       sregs.cr0 |= CR0_PE;
     } else {
       sregs.cs.selector = 0;
       sregs.cs.base = 0;
     }
     *(host_mem + ADDR_TEXT) = 0xf4;
     host_text = host_mem + 0x8000;
     ioctl(cpufd, KVM_SMI, 0);
   } else if (flags & KVM_SETUP_VIRT86) {
     sregs.cs = seg_cs32;
     sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
     sregs.cr0 |= CR0_PE;
     sregs.efer |= EFER_SCE;
     setup_syscall_msrs(cpufd, SEL_CS32, SEL_CS32_CPL3);
     setup_32bit_idt(&sregs, host_mem, guest_mem);
     if (flags & KVM_SETUP_PAGING) {
       uint64_t pd_addr = guest_mem + ADDR_PD;
       uint64_t* pd = (uint64_t*)(host_mem + ADDR_PD);
       pd[0] = PDE32_PRESENT | PDE32_RW | PDE32_USER | PDE32_PS;
       sregs.cr3 = pd_addr;
       sregs.cr4 |= CR4_PSE;
       text_prefix = kvm_asm32_paged_vm86;
       text_prefix_size = sizeof(kvm_asm32_paged_vm86) - 1;
     } else {
       text_prefix = kvm_asm32_vm86;
       text_prefix_size = sizeof(kvm_asm32_vm86) - 1;
     }
   } else {
     sregs.cs.selector = 0;
     sregs.cs.base = 0;
   }
 } else if (text_type == 16) {
   if (flags & KVM_SETUP_CPL3) {
     sregs.cs = seg_cs16;
     sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds16;
     text_prefix = kvm_asm16_cpl3;
     text_prefix_size = sizeof(kvm_asm16_cpl3) - 1;
   } else {
     sregs.cr0 |= CR0_PE;
     sregs.cs = seg_cs16;
     sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds16;
   }
 } else if (text_type == 32) {
   sregs.cr0 |= CR0_PE;
   sregs.efer |= EFER_SCE;
   setup_syscall_msrs(cpufd, SEL_CS32, SEL_CS32_CPL3);
   setup_32bit_idt(&sregs, host_mem, guest_mem);
   if (flags & KVM_SETUP_SMM) {
     sregs.cs = seg_cs32;
     sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
     *(host_mem + ADDR_TEXT) = 0xf4;
     host_text = host_mem + 0x8000;
     ioctl(cpufd, KVM_SMI, 0);
   } else if (flags & KVM_SETUP_PAGING) {
     sregs.cs = seg_cs32;
     sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
     uint64_t pd_addr = guest_mem + ADDR_PD;
     uint64_t* pd = (uint64_t*)(host_mem + ADDR_PD);
     pd[0] = PDE32_PRESENT | PDE32_RW | PDE32_USER | PDE32_PS;
     sregs.cr3 = pd_addr;
     sregs.cr4 |= CR4_PSE;
     text_prefix = kvm_asm32_paged;
     text_prefix_size = sizeof(kvm_asm32_paged) - 1;
   } else if (flags & KVM_SETUP_CPL3) {
     sregs.cs = seg_cs32_cpl3;
     sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32_cpl3;
   } else {
     sregs.cs = seg_cs32;
     sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
   }
 } else {
   sregs.efer |= EFER_LME | EFER_SCE;
   sregs.cr0 |= CR0_PE;
   setup_syscall_msrs(cpufd, SEL_CS64, SEL_CS64_CPL3);
   setup_64bit_idt(&sregs, host_mem, guest_mem);
   sregs.cs = seg_cs32;
   sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
   uint64_t pml4_addr = guest_mem + ADDR_PML4;
   uint64_t* pml4 = (uint64_t*)(host_mem + ADDR_PML4);
   uint64_t pdpt_addr = guest_mem + ADDR_PDP;
   uint64_t* pdpt = (uint64_t*)(host_mem + ADDR_PDP);
   uint64_t pd_addr = guest_mem + ADDR_PD;
   uint64_t* pd = (uint64_t*)(host_mem + ADDR_PD);
   pml4[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pdpt_addr;
   pdpt[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pd_addr;
   pd[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | PDE64_PS;
   sregs.cr3 = pml4_addr;
   sregs.cr4 |= CR4_PAE;
   if (flags & KVM_SETUP_VM) {
     sregs.cr0 |= CR0_NE;
     *((uint64_t*)(host_mem + ADDR_VAR_VMXON_PTR)) = ADDR_VAR_VMXON;
     *((uint64_t*)(host_mem + ADDR_VAR_VMCS_PTR)) = ADDR_VAR_VMCS;
     memcpy(host_mem + ADDR_VAR_VMEXIT_CODE, kvm_asm64_vm_exit,
            sizeof(kvm_asm64_vm_exit) - 1);
     *((uint64_t*)(host_mem + ADDR_VAR_VMEXIT_PTR)) = ADDR_VAR_VMEXIT_CODE;
     text_prefix = kvm_asm64_init_vm;
     text_prefix_size = sizeof(kvm_asm64_init_vm) - 1;
   } else if (flags & KVM_SETUP_CPL3) {
     text_prefix = kvm_asm64_cpl3;
     text_prefix_size = sizeof(kvm_asm64_cpl3) - 1;
   } else {
     text_prefix = kvm_asm64_enable_long;
     text_prefix_size = sizeof(kvm_asm64_enable_long) - 1;
   }
 }
 struct tss16 tss16;
 memset(&tss16, 0, sizeof(tss16));
 tss16.ss0 = tss16.ss1 = tss16.ss2 = SEL_DS16;
 tss16.sp0 = tss16.sp1 = tss16.sp2 = ADDR_STACK0;
 tss16.ip = ADDR_VAR_USER_CODE2;
 tss16.flags = (1 << 1);
 tss16.cs = SEL_CS16;
 tss16.es = tss16.ds = tss16.ss = SEL_DS16;
 tss16.ldt = SEL_LDT;
 struct tss16* tss16_addr = (struct tss16*)(host_mem + seg_tss16_2.base);
 memcpy(tss16_addr, &tss16, sizeof(tss16));
 memset(&tss16, 0, sizeof(tss16));
 tss16.ss0 = tss16.ss1 = tss16.ss2 = SEL_DS16;
 tss16.sp0 = tss16.sp1 = tss16.sp2 = ADDR_STACK0;
 tss16.ip = ADDR_VAR_USER_CODE2;
 tss16.flags = (1 << 1);
 tss16.cs = SEL_CS16_CPL3;
 tss16.es = tss16.ds = tss16.ss = SEL_DS16_CPL3;
 tss16.ldt = SEL_LDT;
 struct tss16* tss16_cpl3_addr =
     (struct tss16*)(host_mem + seg_tss16_cpl3.base);
 memcpy(tss16_cpl3_addr, &tss16, sizeof(tss16));
 struct tss32 tss32;
 memset(&tss32, 0, sizeof(tss32));
 tss32.ss0 = tss32.ss1 = tss32.ss2 = SEL_DS32;
 tss32.sp0 = tss32.sp1 = tss32.sp2 = ADDR_STACK0;
 tss32.ip = ADDR_VAR_USER_CODE;
 tss32.flags = (1 << 1) | (1 << 17);
 tss32.ldt = SEL_LDT;
 tss32.cr3 = sregs.cr3;
 tss32.io_bitmap = offsetof(struct tss32, io_bitmap);
 struct tss32* tss32_addr = (struct tss32*)(host_mem + seg_tss32_vm86.base);
 memcpy(tss32_addr, &tss32, sizeof(tss32));
 memset(&tss32, 0, sizeof(tss32));
 tss32.ss0 = tss32.ss1 = tss32.ss2 = SEL_DS32;
 tss32.sp0 = tss32.sp1 = tss32.sp2 = ADDR_STACK0;
 tss32.ip = ADDR_VAR_USER_CODE;
 tss32.flags = (1 << 1);
 tss32.cr3 = sregs.cr3;
 tss32.es = tss32.ds = tss32.ss = tss32.gs = tss32.fs = SEL_DS32;
 tss32.cs = SEL_CS32;
 tss32.ldt = SEL_LDT;
 tss32.cr3 = sregs.cr3;
 tss32.io_bitmap = offsetof(struct tss32, io_bitmap);
 struct tss32* tss32_cpl3_addr = (struct tss32*)(host_mem + seg_tss32_2.base);
 memcpy(tss32_cpl3_addr, &tss32, sizeof(tss32));
 struct tss64 tss64;
 memset(&tss64, 0, sizeof(tss64));
 tss64.rsp[0] = ADDR_STACK0;
 tss64.rsp[1] = ADDR_STACK0;
 tss64.rsp[2] = ADDR_STACK0;
 tss64.io_bitmap = offsetof(struct tss64, io_bitmap);
 struct tss64* tss64_addr = (struct tss64*)(host_mem + seg_tss64.base);
 memcpy(tss64_addr, &tss64, sizeof(tss64));
 memset(&tss64, 0, sizeof(tss64));
 tss64.rsp[0] = ADDR_STACK0;
 tss64.rsp[1] = ADDR_STACK0;
 tss64.rsp[2] = ADDR_STACK0;
 tss64.io_bitmap = offsetof(struct tss64, io_bitmap);
 struct tss64* tss64_cpl3_addr =
     (struct tss64*)(host_mem + seg_tss64_cpl3.base);
 memcpy(tss64_cpl3_addr, &tss64, sizeof(tss64));
 if (text_size > 1000) text_size = 1000;
 if (text_prefix) {
   memcpy(host_text, text_prefix, text_prefix_size);
   void* patch = memmem(host_text, text_prefix_size, "\xde\xc0\xad\x0b", 4);
   if (patch)
     *((uint32_t*)patch) =
         guest_mem + ADDR_TEXT + ((char*)patch - host_text) + 6;
   uint16_t magic = PREFIX_SIZE;
   patch = memmem(host_text, text_prefix_size, &magic, sizeof(magic));
   if (patch) *((uint16_t*)patch) = guest_mem + ADDR_TEXT + text_prefix_size;
 }
 memcpy((void*)(host_text + text_prefix_size), text, text_size);
 *(host_text + text_prefix_size + text_size) = 0xf4;
 memcpy(host_mem + ADDR_VAR_USER_CODE, text, text_size);
 *(host_mem + ADDR_VAR_USER_CODE + text_size) = 0xf4;
 *(host_mem + ADDR_VAR_HLT) = 0xf4;
 memcpy(host_mem + ADDR_VAR_SYSRET, "\x0f\x07\xf4", 3);
 memcpy(host_mem + ADDR_VAR_SYSEXIT, "\x0f\x35\xf4", 3);
 *(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_FLD) = 0;
 *(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_VAL) = 0;
 if (opt_count > 2) opt_count = 2;
 for (uintptr_t i = 0; i < opt_count; i++) {
   uint64_t typ = opt_array_ptr[i].typ;
   uint64_t val = opt_array_ptr[i].val;
   switch (typ % 9) {
     case 0:
       sregs.cr0 ^= val & (CR0_MP | CR0_EM | CR0_ET | CR0_NE | CR0_WP |
                           CR0_AM | CR0_NW | CR0_CD);
       break;
     case 1:
       sregs.cr4 ^=
           val & (CR4_VME | CR4_PVI | CR4_TSD | CR4_DE | CR4_MCE | CR4_PGE |
                  CR4_PCE | CR4_OSFXSR | CR4_OSXMMEXCPT | CR4_UMIP | CR4_VMXE |
                  CR4_SMXE | CR4_FSGSBASE | CR4_PCIDE | CR4_OSXSAVE |
                  CR4_SMEP | CR4_SMAP | CR4_PKE);
       break;
     case 2:
       sregs.efer ^= val & (EFER_SCE | EFER_NXE | EFER_SVME | EFER_LMSLE |
                            EFER_FFXSR | EFER_TCE);
       break;
     case 3:
       val &= ((1 << 8) | (1 << 9) | (1 << 10) | (1 << 12) | (1 << 13) |
               (1 << 14) | (1 << 15) | (1 << 18) | (1 << 19) | (1 << 20) |
               (1 << 21));
       regs.rflags ^= val;
       tss16_addr->flags ^= val;
       tss16_cpl3_addr->flags ^= val;
       tss32_addr->flags ^= val;
       tss32_cpl3_addr->flags ^= val;
       break;
     case 4:
       seg_cs16.type = val & 0xf;
       seg_cs32.type = val & 0xf;
       seg_cs64.type = val & 0xf;
       break;
     case 5:
       seg_cs16_cpl3.type = val & 0xf;
       seg_cs32_cpl3.type = val & 0xf;
       seg_cs64_cpl3.type = val & 0xf;
       break;
     case 6:
       seg_ds16.type = val & 0xf;
       seg_ds32.type = val & 0xf;
       seg_ds64.type = val & 0xf;
       break;
     case 7:
       seg_ds16_cpl3.type = val & 0xf;
       seg_ds32_cpl3.type = val & 0xf;
       seg_ds64_cpl3.type = val & 0xf;
       break;
     case 8:
       *(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_FLD) = (val & 0xffff);
       *(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_VAL) = (val >> 16);
       break;
     default:
       exit(1);
   }
 }
 regs.rflags |= 2;
 fill_segment_descriptor(gdt, ldt, &seg_ldt);
 fill_segment_descriptor(gdt, ldt, &seg_cs16);
 fill_segment_descriptor(gdt, ldt, &seg_ds16);
 fill_segment_descriptor(gdt, ldt, &seg_cs16_cpl3);
 fill_segment_descriptor(gdt, ldt, &seg_ds16_cpl3);
 fill_segment_descriptor(gdt, ldt, &seg_cs32);
 fill_segment_descriptor(gdt, ldt, &seg_ds32);
 fill_segment_descriptor(gdt, ldt, &seg_cs32_cpl3);
 fill_segment_descriptor(gdt, ldt, &seg_ds32_cpl3);
 fill_segment_descriptor(gdt, ldt, &seg_cs64);
 fill_segment_descriptor(gdt, ldt, &seg_ds64);
 fill_segment_descriptor(gdt, ldt, &seg_cs64_cpl3);
 fill_segment_descriptor(gdt, ldt, &seg_ds64_cpl3);
 fill_segment_descriptor(gdt, ldt, &seg_tss32);
 fill_segment_descriptor(gdt, ldt, &seg_tss32_2);
 fill_segment_descriptor(gdt, ldt, &seg_tss32_cpl3);
 fill_segment_descriptor(gdt, ldt, &seg_tss32_vm86);
 fill_segment_descriptor(gdt, ldt, &seg_tss16);
 fill_segment_descriptor(gdt, ldt, &seg_tss16_2);
 fill_segment_descriptor(gdt, ldt, &seg_tss16_cpl3);
 fill_segment_descriptor_dword(gdt, ldt, &seg_tss64);
 fill_segment_descriptor_dword(gdt, ldt, &seg_tss64_cpl3);
 fill_segment_descriptor(gdt, ldt, &seg_cgate16);
 fill_segment_descriptor(gdt, ldt, &seg_tgate16);
 fill_segment_descriptor(gdt, ldt, &seg_cgate32);
 fill_segment_descriptor(gdt, ldt, &seg_tgate32);
 fill_segment_descriptor_dword(gdt, ldt, &seg_cgate64);
 if (ioctl(cpufd, KVM_SET_SREGS, &sregs)) return -1;
 if (ioctl(cpufd, KVM_SET_REGS, &regs)) return -1;
 return 0;
}

static void kill_and_wait(int pid, int* status) {
 kill(-pid, SIGKILL);
 kill(pid, SIGKILL);
 for (int i = 0; i < 100; i++) {
   if (waitpid(-1, status, WNOHANG | __WALL) == pid) return;
   usleep(1000);
 }
 DIR* dir = opendir("/sys/fs/fuse/connections");
 if (dir) {
   for (;;) {
     struct dirent* ent = readdir(dir);
     if (!ent) break;
     if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0)
       continue;
     char abort[300];
     snprintf(abort, sizeof(abort), "/sys/fs/fuse/connections/%s/abort",
              ent->d_name);
     int fd = open(abort, O_WRONLY);
     if (fd == -1) {
       continue;
     }
     if (write(fd, abort, 1) < 0) {
     }
     close(fd);
   }
   closedir(dir);
 } else {
 }
 while (waitpid(-1, status, __WALL) != pid) {
 }
}

static void setup_test() {
 prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
 setpgrp();
 write_file("/proc/self/oom_score_adj", "1000");
}

struct thread_t {
 int created, call;
 event_t ready, done;
};

static struct thread_t threads[16];
static void execute_call(int call);
static int running;

static void* thr(void* arg) {
 struct thread_t* th = (struct thread_t*)arg;
 for (;;) {
   event_wait(&th->ready);
   event_reset(&th->ready);
   execute_call(th->call);
   __atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED);
   event_set(&th->done);
 }
 return 0;
}

static void execute_one(void) {
 int i, call, thread;
 for (call = 0; call < 10; call++) {
   for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0]));
        thread++) {
     struct thread_t* th = &threads[thread];
     if (!th->created) {
       th->created = 1;
       event_init(&th->ready);
       event_init(&th->done);
       event_set(&th->done);
       thread_start(thr, th);
     }
     if (!event_isset(&th->done)) continue;
     event_reset(&th->done);
     th->call = call;
     __atomic_fetch_add(&running, 1, __ATOMIC_RELAXED);
     event_set(&th->ready);
     event_timedwait(&th->done, 50);
     break;
   }
 }
 for (i = 0; i < 100 && __atomic_load_n(&running, __ATOMIC_RELAXED); i++)
   sleep_ms(1);
}

static void execute_one(void);

#define WAIT_FLAGS __WALL

static void loop(void) {
 int iter = 0;
 for (;; iter++) {
   int pid = fork();
   if (pid < 0) exit(1);
   if (pid == 0) {
     setup_test();
     execute_one();
     exit(0);
   }
   int status = 0;
   uint64_t start = current_time_ms();
   for (;;) {
     if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid) break;
     sleep_ms(1);
     if (current_time_ms() - start < 5000) continue;
     kill_and_wait(pid, &status);
     break;
   }
 }
}

uint64_t r[4] = {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff,
                0xffffffffffffffff};

void execute_call(int call) {
 intptr_t res = 0;
 switch (call) {
   case 0:
     NONFAILING(memcpy((void*)0x20000280, "/dev/kvm\000", 9));
     res = syscall(__NR_openat, /*fd=*/0xffffffffffffff9cul,
                   /*file=*/0x20000280ul, /*flags=*/0ul, /*mode=*/0ul);
     if (res != -1) r[0] = res;
     break;
   case 1:
     res = syscall(__NR_ioctl, /*fd=*/r[0], /*cmd=*/0xae01, /*type=*/0ul);
     if (res != -1) r[1] = res;
     break;
   case 2:
     NONFAILING(memcpy(
         (void*)0x20000180,
         "-B\325NI\305j\232ppp\360\b\204\242m\000\v\030\0004\246Ey\333\321\247"
         "\261S\361:)\000\312\327Uw\000\274\3722\263\273\215\254\254va}knh#"
         "\317)\017\310\300:\234c\020d\356\251\213\0066\270G\321c\341$"
         "\377\227k\336\305\3516\335U)\3118M\315\373\314\202n=\177="
         "\315Jx\252\217~\2710a\251\262\004K\230\223="
         "\253Q\367\005\035\241\316\213\031\352\357\343",
         123));
     syscall(__NR_memfd_create, /*name=*/0x20000180ul, /*flags=*/0ul);
     break;
   case 3:
     NONFAILING(syz_kvm_setup_cpu(
         /*fd=*/-1, /*cpufd=*/-1, /*usermem=*/0x2000e000, /*text=*/0,
         /*ntext=*/0, /*flags=*/0x70, /*opts=*/0, /*nopt=*/0));
     break;
   case 4:
     res = syscall(__NR_dup, /*oldfd=*/r[1]);
     if (res != -1) r[2] = res;
     break;
   case 5:
     res = syscall(__NR_ioctl, /*fd=*/r[2], /*cmd=*/0xae41, /*id=*/0ul);
     if (res != -1) r[3] = res;
     break;
   case 6:
     NONFAILING(*(uint32_t*)0x20000180 = 0);
     NONFAILING(*(uint32_t*)0x20000184 = 0);
     NONFAILING(*(uint64_t*)0x20000188 = 0);
     NONFAILING(*(uint64_t*)0x20000190 = 0x2000);
     NONFAILING(*(uint64_t*)0x20000198 = 0x20000000);
     syscall(__NR_ioctl, /*fd=*/r[1], /*cmd=*/0x4020ae46,
             /*arg=*/0x20000180ul);
     break;
   case 7:
     NONFAILING(*(uint64_t*)0x20000100 = 8);
     NONFAILING(*(uint64_t*)0x20000108 = 0);
     NONFAILING(*(uint64_t*)0x20000110 = 0);
     NONFAILING(syz_kvm_setup_cpu(/*fd=*/-1, /*cpufd=*/r[3],
                                  /*usermem=*/0x20000000, /*text=*/0x20000100,
                                  /*ntext=*/1, /*flags=*/0, /*opts=*/0,
                                  /*nopt=*/0));
     {
       int i;
       for (i = 0; i < 32; i++) {
         NONFAILING(syz_kvm_setup_cpu(/*fd=*/-1, /*cpufd=*/r[3],
                                      /*usermem=*/0x20000000,
                                      /*text=*/0x20000100, /*ntext=*/1,
                                      /*flags=*/0, /*opts=*/0, /*nopt=*/0));
       }
     }
     break;
   case 8:
     syscall(__NR_ioctl, /*fd=*/-1, /*cmd=*/0x4010ae67, /*arg=*/0ul);
     break;
   case 9:
     syscall(__NR_ioctl, /*fd=*/r[3], /*cmd=*/0xae80, /*arg=*/0ul);
     {
       int i;
       for (i = 0; i < 32; i++) {
         syscall(__NR_ioctl, /*fd=*/r[3], /*cmd=*/0xae80, /*arg=*/0ul);
       }
     }
     break;
 }
}
int main(void) {
 syscall(__NR_mmap, /*addr=*/0x1ffff000ul, /*len=*/0x1000ul, /*prot=*/0ul,
         /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
 syscall(__NR_mmap, /*addr=*/0x20000000ul, /*len=*/0x1000000ul, /*prot=*/7ul,
         /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
 syscall(__NR_mmap, /*addr=*/0x21000000ul, /*len=*/0x1000ul, /*prot=*/0ul,
         /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
 install_segv_handler();
 for (procid = 0; procid < 4; procid++) {
   if (fork() == 0) {
     loop();
   }
 }
 sleep(1000000);
 return 0;
}


=* repro.txt =*
r0 = openat$kvm(0xffffffffffffff9c, &(0x7f0000000280), 0x0, 0x0)
r1 = ioctl$KVM_CREATE_VM(r0, 0xae01, 0x0)
memfd_create(&(0x7f0000000180)='-B\xd5NI\xc5j\x9appp\xf0\b\x84\xa2m\x00\v\x18\x004\xa6Ey\xdb\xd1\xa7\xb1S\xf1:)\x00\xca\xd7Uw\x00\xbc\xfa2\xb3\xbb\x8d\xac\xacva}knh#\xcf)\x0f\xc8\xc0:\x9cc\x10d\xee\xa9\x8b\x066\xb8G\xd1c\xe1$\xff\x97k\xde\xc5\xe96\xddU)\xc98M\xcd\xfb\xcc\x82n=\x7f=\xcdJx\xaa\x8f~\xb90a\xa9\xb2\x04K\x98\x93=\xabQ\xf7\x05\x1d\xa1\xce\x8b\x19\xea\xef\xe3',
0x0)
syz_kvm_setup_cpu$x86(0xffffffffffffffff, 0xffffffffffffffff,
&(0x7f000000e000/0x18000)=nil, 0x0, 0x0, 0x70, 0x0, 0x0)
r2 = dup(r1)
r3 = ioctl$KVM_CREATE_VCPU(r2, 0xae41, 0x0)
ioctl$KVM_SET_USER_MEMORY_REGION(r1, 0x4020ae46,
&(0x7f0000000180)={0x0, 0x0, 0x0, 0x2000,
&(0x7f0000000000/0x2000)=nil})
syz_kvm_setup_cpu$x86(0xffffffffffffffff, r3,
&(0x7f0000000000/0x18000)=nil, &(0x7f0000000100)=[@textreal={0x8,
0x0}], 0x1, 0x0, 0x0, 0x0) (rerun: 32)
ioctl$KVM_REGISTER_COALESCED_MMIO(0xffffffffffffffff, 0x4010ae67, 0x0)
ioctl$KVM_RUN(r3, 0xae80, 0x0) (rerun: 32)


See aslo https://gist.github.com/xrivendell7/8d293339a1c88fda7301c43aa07dd3b9
I hope it helps.
Merry Christmas!
xingwei Lee





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux