blob: b9123da503258e168cfdd2dec402bd61ccfa682c [file] [log] [blame]
// BUG: unable to handle kernel paging request in vmx_vcpu_run
// https://syzkaller.appspot.com/bug?id=b67fcc95c0d84ea5424813a0d8703fc5c06de7ee
// status:open
// autogenerated by syzkaller (http://github.com/google/syzkaller)
#define _GNU_SOURCE
#include <dirent.h>
#include <endian.h>
#include <errno.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/futex.h>
#include <linux/kvm.h>
#include <linux/net.h>
#include <netinet/in.h>
#include <pthread.h>
#include <signal.h>
#include <stdarg.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
#include <sys/prctl.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
__attribute__((noreturn)) static void doexit(int status)
{
volatile unsigned i;
syscall(__NR_exit_group, status);
for (i = 0;; i++) {
}
}
#include <errno.h>
#include <setjmp.h>
#include <signal.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <sys/stat.h>
const int kFailStatus = 67;
const int kRetryStatus = 69;
static void fail(const char* msg, ...)
{
int e = errno;
va_list args;
va_start(args, msg);
vfprintf(stderr, msg, args);
va_end(args);
fprintf(stderr, " (errno %d)\n", e);
doexit((e == ENOMEM || e == EAGAIN) ? kRetryStatus : kFailStatus);
}
static void exitf(const char* msg, ...)
{
int e = errno;
va_list args;
va_start(args, msg);
vfprintf(stderr, msg, args);
va_end(args);
fprintf(stderr, " (errno %d)\n", e);
doexit(kRetryStatus);
}
static __thread int skip_segv;
static __thread jmp_buf segv_env;
static void segv_handler(int sig, siginfo_t* info, void* uctx)
{
uintptr_t addr = (uintptr_t)info->si_addr;
const uintptr_t prog_start = 1 << 20;
const uintptr_t prog_end = 100 << 20;
if (__atomic_load_n(&skip_segv, __ATOMIC_RELAXED) &&
(addr < prog_start || addr > prog_end)) {
_longjmp(segv_env, 1);
}
doexit(sig);
}
static void install_segv_handler()
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_IGN;
syscall(SYS_rt_sigaction, 0x20, &sa, NULL, 8);
syscall(SYS_rt_sigaction, 0x21, &sa, NULL, 8);
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = segv_handler;
sa.sa_flags = SA_NODEFER | SA_SIGINFO;
sigaction(SIGSEGV, &sa, NULL);
sigaction(SIGBUS, &sa, NULL);
}
#define NONFAILING(...) \
{ \
__atomic_fetch_add(&skip_segv, 1, __ATOMIC_SEQ_CST); \
if (_setjmp(segv_env) == 0) { \
__VA_ARGS__; \
} \
__atomic_fetch_sub(&skip_segv, 1, __ATOMIC_SEQ_CST); \
}
static uint64_t current_time_ms()
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
fail("clock_gettime failed");
return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
}
static void use_temporary_dir()
{
char tmpdir_template[] = "./syzkaller.XXXXXX";
char* tmpdir = mkdtemp(tmpdir_template);
if (!tmpdir)
fail("failed to mkdtemp");
if (chmod(tmpdir, 0777))
fail("failed to chmod");
if (chdir(tmpdir))
fail("failed to chdir");
}
const char kvm_asm16_cpl3[] = "\x0f\x20\xc0\x66\x83\xc8\x01\x0f\x22\xc0\xb8\xa0"
"\x00\x0f\x00\xd8\xb8\x2b\x00\x8e\xd8\x8e\xc0\x8e"
"\xe0\x8e\xe8\xbc\x00\x01\xc7\x06\x00\x01\x1d\xba"
"\xc7\x06\x02\x01\x23\x00\xc7\x06\x04\x01\x00\x01"
"\xc7\x06\x06\x01\x2b\x00\xcb";
const char kvm_asm32_paged[] = "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0";
const char kvm_asm32_vm86[] =
"\x66\xb8\xb8\x00\x0f\x00\xd8\xea\x00\x00\x00\x00\xd0\x00";
const char kvm_asm32_paged_vm86[] = "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22"
"\xc0\x66\xb8\xb8\x00\x0f\x00\xd8\xea\x00"
"\x00\x00\x00\xd0\x00";
const char kvm_asm64_vm86[] = "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0\x66"
"\xb8\xb8\x00\x0f\x00\xd8\xea\x00\x00\x00\x00\xd0"
"\x00";
const char kvm_asm64_enable_long[] = "\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22"
"\xc0\xea\xde\xc0\xad\x0b\x50\x00\x48\xc7"
"\xc0\xd8\x00\x00\x00\x0f\x00\xd8";
const char kvm_asm64_init_vm[] =
"\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0\xea\xde\xc0\xad\x0b\x50\x00"
"\x48\xc7\xc0\xd8\x00\x00\x00\x0f\x00\xd8\x48\xc7\xc1\x3a\x00\x00\x00\x0f"
"\x32\x48\x83\xc8\x05\x0f\x30\x0f\x20\xe0\x48\x0d\x00\x20\x00\x00\x0f\x22"
"\xe0\x48\xc7\xc1\x80\x04\x00\x00\x0f\x32\x48\xc7\xc2\x00\x60\x00\x00\x89"
"\x02\x48\xc7\xc2\x00\x70\x00\x00\x89\x02\x48\xc7\xc0\x00\x5f\x00\x00\xf3"
"\x0f\xc7\x30\x48\xc7\xc0\x08\x5f\x00\x00\x66\x0f\xc7\x30\x0f\xc7\x30\x48"
"\xc7\xc1\x81\x04\x00\x00\x0f\x32\x48\x83\xc8\x3f\x48\x21\xd0\x48\xc7\xc2"
"\x00\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x02\x40\x00\x00\x48\xb8\x84\x9e"
"\x99\xf3\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1e\x40\x00\x00\x48\xc7"
"\xc0\x81\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc1\x83\x04\x00\x00\x0f\x32\x48"
"\x0d\xff\x6f\x03\x00\x48\x21\xd0\x48\xc7\xc2\x0c\x40\x00\x00\x0f\x79\xd0"
"\x48\xc7\xc1\x84\x04\x00\x00\x0f\x32\x48\x0d\xff\x17\x00\x00\x48\x21\xd0"
"\x48\xc7\xc2\x12\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x04\x2c\x00\x00\x48"
"\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x28\x00\x00\x48\xc7"
"\xc0\xff\xff\xff\xff\x0f\x79\xd0\x48\xc7\xc2\x02\x0c\x00\x00\x48\xc7\xc0"
"\x50\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc0\x58\x00\x00\x00\x48\xc7\xc2\x00"
"\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x04\x0c\x00\x00\x0f\x79\xd0\x48\xc7"
"\xc2\x06\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x08\x0c\x00\x00\x0f\x79\xd0"
"\x48\xc7\xc2\x0a\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc0\xd8\x00\x00\x00\x48"
"\xc7\xc2\x0c\x0c\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x02\x2c\x00\x00\x48\xc7"
"\xc0\x00\x05\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x4c\x00\x00\x48\xc7\xc0"
"\x50\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x10\x6c\x00\x00\x48\xc7\xc0\x00"
"\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x12\x6c\x00\x00\x48\xc7\xc0\x00\x00"
"\x00\x00\x0f\x79\xd0\x0f\x20\xc0\x48\xc7\xc2\x00\x6c\x00\x00\x48\x89\xc0"
"\x0f\x79\xd0\x0f\x20\xd8\x48\xc7\xc2\x02\x6c\x00\x00\x48\x89\xc0\x0f\x79"
"\xd0\x0f\x20\xe0\x48\xc7\xc2\x04\x6c\x00\x00\x48\x89\xc0\x0f\x79\xd0\x48"
"\xc7\xc2\x06\x6c\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7"
"\xc2\x08\x6c\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2"
"\x0a\x6c\x00\x00\x48\xc7\xc0\x00\x3a\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0c"
"\x6c\x00\x00\x48\xc7\xc0\x00\x10\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0e\x6c"
"\x00\x00\x48\xc7\xc0\x00\x38\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x14\x6c\x00"
"\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x16\x6c\x00\x00"
"\x48\x8b\x04\x25\x10\x5f\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x00\x00\x00"
"\x48\xc7\xc0\x01\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x02\x00\x00\x00\x48"
"\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x20\x00\x00\x48\xc7"
"\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x02\x20\x00\x00\x48\xc7\xc0"
"\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x04\x20\x00\x00\x48\xc7\xc0\x00"
"\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x20\x00\x00\x48\xc7\xc0\x00\x00"
"\x00\x00\x0f\x79\xd0\x48\xc7\xc1\x77\x02\x00\x00\x0f\x32\x48\xc1\xe2\x20"
"\x48\x09\xd0\x48\xc7\xc2\x00\x2c\x00\x00\x48\x89\xc0\x0f\x79\xd0\x48\xc7"
"\xc2\x04\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2"
"\x0a\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0e"
"\x40\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x10\x40"
"\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x16\x40\x00"
"\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x14\x40\x00\x00"
"\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x60\x00\x00\x48"
"\xc7\xc0\xff\xff\xff\xff\x0f\x79\xd0\x48\xc7\xc2\x02\x60\x00\x00\x48\xc7"
"\xc0\xff\xff\xff\xff\x0f\x79\xd0\x48\xc7\xc2\x1c\x20\x00\x00\x48\xc7\xc0"
"\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1e\x20\x00\x00\x48\xc7\xc0\x00"
"\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x20\x20\x00\x00\x48\xc7\xc0\x00\x00"
"\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x22\x20\x00\x00\x48\xc7\xc0\x00\x00\x00"
"\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x08\x00\x00\x48\xc7\xc0\x58\x00\x00\x00"
"\x0f\x79\xd0\x48\xc7\xc2\x02\x08\x00\x00\x48\xc7\xc0\x50\x00\x00\x00\x0f"
"\x79\xd0\x48\xc7\xc2\x04\x08\x00\x00\x48\xc7\xc0\x58\x00\x00\x00\x0f\x79"
"\xd0\x48\xc7\xc2\x06\x08\x00\x00\x48\xc7\xc0\x58\x00\x00\x00\x0f\x79\xd0"
"\x48\xc7\xc2\x08\x08\x00\x00\x48\xc7\xc0\x58\x00\x00\x00\x0f\x79\xd0\x48"
"\xc7\xc2\x0a\x08\x00\x00\x48\xc7\xc0\x58\x00\x00\x00\x0f\x79\xd0\x48\xc7"
"\xc2\x0c\x08\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2"
"\x0e\x08\x00\x00\x48\xc7\xc0\xd8\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x12"
"\x68\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x14\x68"
"\x00\x00\x48\xc7\xc0\x00\x3a\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x16\x68\x00"
"\x00\x48\xc7\xc0\x00\x10\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x18\x68\x00\x00"
"\x48\xc7\xc0\x00\x38\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x00\x48\x00\x00\x48"
"\xc7\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x02\x48\x00\x00\x48\xc7"
"\xc0\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x04\x48\x00\x00\x48\xc7\xc0"
"\xff\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x48\x00\x00\x48\xc7\xc0\xff"
"\xff\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x08\x48\x00\x00\x48\xc7\xc0\xff\xff"
"\x0f\x00\x0f\x79\xd0\x48\xc7\xc2\x0a\x48\x00\x00\x48\xc7\xc0\xff\xff\x0f"
"\x00\x0f\x79\xd0\x48\xc7\xc2\x0c\x48\x00\x00\x48\xc7\xc0\x00\x00\x00\x00"
"\x0f\x79\xd0\x48\xc7\xc2\x0e\x48\x00\x00\x48\xc7\xc0\xff\x1f\x00\x00\x0f"
"\x79\xd0\x48\xc7\xc2\x10\x48\x00\x00\x48\xc7\xc0\xff\x1f\x00\x00\x0f\x79"
"\xd0\x48\xc7\xc2\x12\x48\x00\x00\x48\xc7\xc0\xff\x1f\x00\x00\x0f\x79\xd0"
"\x48\xc7\xc2\x14\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f\x79\xd0\x48"
"\xc7\xc2\x16\x48\x00\x00\x48\xc7\xc0\x9b\x20\x00\x00\x0f\x79\xd0\x48\xc7"
"\xc2\x18\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2"
"\x1a\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1c"
"\x48\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1e\x48"
"\x00\x00\x48\xc7\xc0\x93\x40\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x20\x48\x00"
"\x00\x48\xc7\xc0\x82\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x22\x48\x00\x00"
"\x48\xc7\xc0\x8b\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1c\x68\x00\x00\x48"
"\xc7\xc0\x00\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x1e\x68\x00\x00\x48\xc7"
"\xc0\x00\x91\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x20\x68\x00\x00\x48\xc7\xc0"
"\x02\x00\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x06\x28\x00\x00\x48\xc7\xc0\x00"
"\x05\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0a\x28\x00\x00\x48\xc7\xc0\x00\x00"
"\x00\x00\x0f\x79\xd0\x48\xc7\xc2\x0c\x28\x00\x00\x48\xc7\xc0\x00\x00\x00"
"\x00\x0f\x79\xd0\x48\xc7\xc2\x0e\x28\x00\x00\x48\xc7\xc0\x00\x00\x00\x00"
"\x0f\x79\xd0\x48\xc7\xc2\x10\x28\x00\x00\x48\xc7\xc0\x00\x00\x00\x00\x0f"
"\x79\xd0\x0f\x20\xc0\x48\xc7\xc2\x00\x68\x00\x00\x48\x89\xc0\x0f\x79\xd0"
"\x0f\x20\xd8\x48\xc7\xc2\x02\x68\x00\x00\x48\x89\xc0\x0f\x79\xd0\x0f\x20"
"\xe0\x48\xc7\xc2\x04\x68\x00\x00\x48\x89\xc0\x0f\x79\xd0\x48\xc7\xc0\x18"
"\x5f\x00\x00\x48\x8b\x10\x48\xc7\xc0\x20\x5f\x00\x00\x48\x8b\x08\x48\x31"
"\xc0\x0f\x78\xd0\x48\x31\xc8\x0f\x79\xd0\x0f\x01\xc2\x48\xc7\xc2\x00\x44"
"\x00\x00\x0f\x78\xd0\xf4";
const char kvm_asm64_vm_exit[] = "\x48\xc7\xc3\x00\x44\x00\x00\x0f\x78\xda\x48"
"\xc7\xc3\x02\x44\x00\x00\x0f\x78\xd9\x48\xc7"
"\xc0\x00\x64\x00\x00\x0f\x78\xc0\x48\xc7\xc3"
"\x1e\x68\x00\x00\x0f\x78\xdb\xf4";
const char kvm_asm64_cpl3[] =
"\x0f\x20\xc0\x0d\x00\x00\x00\x80\x0f\x22\xc0\xea\xde\xc0\xad\x0b\x50\x00"
"\x48\xc7\xc0\xd8\x00\x00\x00\x0f\x00\xd8\x48\xc7\xc0\x6b\x00\x00\x00\x8e"
"\xd8\x8e\xc0\x8e\xe0\x8e\xe8\x48\xc7\xc4\x80\x0f\x00\x00\x48\xc7\x04\x24"
"\x1d\xba\x00\x00\x48\xc7\x44\x24\x04\x63\x00\x00\x00\x48\xc7\x44\x24\x08"
"\x80\x0f\x00\x00\x48\xc7\x44\x24\x0c\x6b\x00\x00\x00\xcb";
#define ADDR_TEXT 0x0000
#define ADDR_GDT 0x1000
#define ADDR_LDT 0x1800
#define ADDR_PML4 0x2000
#define ADDR_PDP 0x3000
#define ADDR_PD 0x4000
#define ADDR_STACK0 0x0f80
#define ADDR_VAR_HLT 0x2800
#define ADDR_VAR_SYSRET 0x2808
#define ADDR_VAR_SYSEXIT 0x2810
#define ADDR_VAR_IDT 0x3800
#define ADDR_VAR_TSS64 0x3a00
#define ADDR_VAR_TSS64_CPL3 0x3c00
#define ADDR_VAR_TSS16 0x3d00
#define ADDR_VAR_TSS16_2 0x3e00
#define ADDR_VAR_TSS16_CPL3 0x3f00
#define ADDR_VAR_TSS32 0x4800
#define ADDR_VAR_TSS32_2 0x4a00
#define ADDR_VAR_TSS32_CPL3 0x4c00
#define ADDR_VAR_TSS32_VM86 0x4e00
#define ADDR_VAR_VMXON_PTR 0x5f00
#define ADDR_VAR_VMCS_PTR 0x5f08
#define ADDR_VAR_VMEXIT_PTR 0x5f10
#define ADDR_VAR_VMWRITE_FLD 0x5f18
#define ADDR_VAR_VMWRITE_VAL 0x5f20
#define ADDR_VAR_VMXON 0x6000
#define ADDR_VAR_VMCS 0x7000
#define ADDR_VAR_VMEXIT_CODE 0x9000
#define ADDR_VAR_USER_CODE 0x9100
#define ADDR_VAR_USER_CODE2 0x9120
#define SEL_LDT (1 << 3)
#define SEL_CS16 (2 << 3)
#define SEL_DS16 (3 << 3)
#define SEL_CS16_CPL3 ((4 << 3) + 3)
#define SEL_DS16_CPL3 ((5 << 3) + 3)
#define SEL_CS32 (6 << 3)
#define SEL_DS32 (7 << 3)
#define SEL_CS32_CPL3 ((8 << 3) + 3)
#define SEL_DS32_CPL3 ((9 << 3) + 3)
#define SEL_CS64 (10 << 3)
#define SEL_DS64 (11 << 3)
#define SEL_CS64_CPL3 ((12 << 3) + 3)
#define SEL_DS64_CPL3 ((13 << 3) + 3)
#define SEL_CGATE16 (14 << 3)
#define SEL_TGATE16 (15 << 3)
#define SEL_CGATE32 (16 << 3)
#define SEL_TGATE32 (17 << 3)
#define SEL_CGATE64 (18 << 3)
#define SEL_CGATE64_HI (19 << 3)
#define SEL_TSS16 (20 << 3)
#define SEL_TSS16_2 (21 << 3)
#define SEL_TSS16_CPL3 ((22 << 3) + 3)
#define SEL_TSS32 (23 << 3)
#define SEL_TSS32_2 (24 << 3)
#define SEL_TSS32_CPL3 ((25 << 3) + 3)
#define SEL_TSS32_VM86 (26 << 3)
#define SEL_TSS64 (27 << 3)
#define SEL_TSS64_HI (28 << 3)
#define SEL_TSS64_CPL3 ((29 << 3) + 3)
#define SEL_TSS64_CPL3_HI (30 << 3)
#define MSR_IA32_FEATURE_CONTROL 0x3a
#define MSR_IA32_VMX_BASIC 0x480
#define MSR_IA32_SMBASE 0x9e
#define MSR_IA32_SYSENTER_CS 0x174
#define MSR_IA32_SYSENTER_ESP 0x175
#define MSR_IA32_SYSENTER_EIP 0x176
#define MSR_IA32_STAR 0xC0000081
#define MSR_IA32_LSTAR 0xC0000082
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x48B
#define NEXT_INSN $0xbadc0de
#define PREFIX_SIZE 0xba1d
#define KVM_SMI _IO(KVMIO, 0xb7)
#define CR0_PE 1
#define CR0_MP (1 << 1)
#define CR0_EM (1 << 2)
#define CR0_TS (1 << 3)
#define CR0_ET (1 << 4)
#define CR0_NE (1 << 5)
#define CR0_WP (1 << 16)
#define CR0_AM (1 << 18)
#define CR0_NW (1 << 29)
#define CR0_CD (1 << 30)
#define CR0_PG (1 << 31)
#define CR4_VME 1
#define CR4_PVI (1 << 1)
#define CR4_TSD (1 << 2)
#define CR4_DE (1 << 3)
#define CR4_PSE (1 << 4)
#define CR4_PAE (1 << 5)
#define CR4_MCE (1 << 6)
#define CR4_PGE (1 << 7)
#define CR4_PCE (1 << 8)
#define CR4_OSFXSR (1 << 8)
#define CR4_OSXMMEXCPT (1 << 10)
#define CR4_UMIP (1 << 11)
#define CR4_VMXE (1 << 13)
#define CR4_SMXE (1 << 14)
#define CR4_FSGSBASE (1 << 16)
#define CR4_PCIDE (1 << 17)
#define CR4_OSXSAVE (1 << 18)
#define CR4_SMEP (1 << 20)
#define CR4_SMAP (1 << 21)
#define CR4_PKE (1 << 22)
#define EFER_SCE 1
#define EFER_LME (1 << 8)
#define EFER_LMA (1 << 10)
#define EFER_NXE (1 << 11)
#define EFER_SVME (1 << 12)
#define EFER_LMSLE (1 << 13)
#define EFER_FFXSR (1 << 14)
#define EFER_TCE (1 << 15)
#define PDE32_PRESENT 1
#define PDE32_RW (1 << 1)
#define PDE32_USER (1 << 2)
#define PDE32_PS (1 << 7)
#define PDE64_PRESENT 1
#define PDE64_RW (1 << 1)
#define PDE64_USER (1 << 2)
#define PDE64_ACCESSED (1 << 5)
#define PDE64_DIRTY (1 << 6)
#define PDE64_PS (1 << 7)
#define PDE64_G (1 << 8)
struct tss16 {
uint16_t prev;
uint16_t sp0;
uint16_t ss0;
uint16_t sp1;
uint16_t ss1;
uint16_t sp2;
uint16_t ss2;
uint16_t ip;
uint16_t flags;
uint16_t ax;
uint16_t cx;
uint16_t dx;
uint16_t bx;
uint16_t sp;
uint16_t bp;
uint16_t si;
uint16_t di;
uint16_t es;
uint16_t cs;
uint16_t ss;
uint16_t ds;
uint16_t ldt;
} __attribute__((packed));
struct tss32 {
uint16_t prev, prevh;
uint32_t sp0;
uint16_t ss0, ss0h;
uint32_t sp1;
uint16_t ss1, ss1h;
uint32_t sp2;
uint16_t ss2, ss2h;
uint32_t cr3;
uint32_t ip;
uint32_t flags;
uint32_t ax;
uint32_t cx;
uint32_t dx;
uint32_t bx;
uint32_t sp;
uint32_t bp;
uint32_t si;
uint32_t di;
uint16_t es, esh;
uint16_t cs, csh;
uint16_t ss, ssh;
uint16_t ds, dsh;
uint16_t fs, fsh;
uint16_t gs, gsh;
uint16_t ldt, ldth;
uint16_t trace;
uint16_t io_bitmap;
} __attribute__((packed));
struct tss64 {
uint32_t reserved0;
uint64_t rsp[3];
uint64_t reserved1;
uint64_t ist[7];
uint64_t reserved2;
uint32_t reserved3;
uint32_t io_bitmap;
} __attribute__((packed));
static void fill_segment_descriptor(uint64_t* dt, uint64_t* lt,
struct kvm_segment* seg)
{
uint16_t index = seg->selector >> 3;
uint64_t limit = seg->g ? seg->limit >> 12 : seg->limit;
uint64_t sd = (limit & 0xffff) | (seg->base & 0xffffff) << 16 |
(uint64_t)seg->type << 40 | (uint64_t)seg->s << 44 |
(uint64_t)seg->dpl << 45 | (uint64_t)seg->present << 47 |
(limit & 0xf0000ULL) << 48 | (uint64_t)seg->avl << 52 |
(uint64_t)seg->l << 53 | (uint64_t)seg->db << 54 |
(uint64_t)seg->g << 55 | (seg->base & 0xff000000ULL) << 56;
NONFAILING(dt[index] = sd);
NONFAILING(lt[index] = sd);
}
static void fill_segment_descriptor_dword(uint64_t* dt, uint64_t* lt,
struct kvm_segment* seg)
{
fill_segment_descriptor(dt, lt, seg);
uint16_t index = seg->selector >> 3;
NONFAILING(dt[index + 1] = 0);
NONFAILING(lt[index + 1] = 0);
}
static void setup_syscall_msrs(int cpufd, uint16_t sel_cs, uint16_t sel_cs_cpl3)
{
char buf[sizeof(struct kvm_msrs) + 5 * sizeof(struct kvm_msr_entry)];
memset(buf, 0, sizeof(buf));
struct kvm_msrs* msrs = (struct kvm_msrs*)buf;
struct kvm_msr_entry* entries = msrs->entries;
msrs->nmsrs = 5;
entries[0].index = MSR_IA32_SYSENTER_CS;
entries[0].data = sel_cs;
entries[1].index = MSR_IA32_SYSENTER_ESP;
entries[1].data = ADDR_STACK0;
entries[2].index = MSR_IA32_SYSENTER_EIP;
entries[2].data = ADDR_VAR_SYSEXIT;
entries[3].index = MSR_IA32_STAR;
entries[3].data = ((uint64_t)sel_cs << 32) | ((uint64_t)sel_cs_cpl3 << 48);
entries[4].index = MSR_IA32_LSTAR;
entries[4].data = ADDR_VAR_SYSRET;
ioctl(cpufd, KVM_SET_MSRS, msrs);
}
static void setup_32bit_idt(struct kvm_sregs* sregs, char* host_mem,
uintptr_t guest_mem)
{
sregs->idt.base = guest_mem + ADDR_VAR_IDT;
sregs->idt.limit = 0x1ff;
uint64_t* idt = (uint64_t*)(host_mem + sregs->idt.base);
int i;
for (i = 0; i < 32; i++) {
struct kvm_segment gate;
gate.selector = i << 3;
switch (i % 6) {
case 0:
gate.type = 6;
gate.base = SEL_CS16;
break;
case 1:
gate.type = 7;
gate.base = SEL_CS16;
break;
case 2:
gate.type = 3;
gate.base = SEL_TGATE16;
break;
case 3:
gate.type = 14;
gate.base = SEL_CS32;
break;
case 4:
gate.type = 15;
gate.base = SEL_CS32;
break;
case 6:
gate.type = 11;
gate.base = SEL_TGATE32;
break;
}
gate.limit = guest_mem + ADDR_VAR_USER_CODE2;
gate.present = 1;
gate.dpl = 0;
gate.s = 0;
gate.g = 0;
gate.db = 0;
gate.l = 0;
gate.avl = 0;
fill_segment_descriptor(idt, idt, &gate);
}
}
static void setup_64bit_idt(struct kvm_sregs* sregs, char* host_mem,
uintptr_t guest_mem)
{
sregs->idt.base = guest_mem + ADDR_VAR_IDT;
sregs->idt.limit = 0x1ff;
uint64_t* idt = (uint64_t*)(host_mem + sregs->idt.base);
int i;
for (i = 0; i < 32; i++) {
struct kvm_segment gate;
gate.selector = (i * 2) << 3;
gate.type = (i & 1) ? 14 : 15;
gate.base = SEL_CS64;
gate.limit = guest_mem + ADDR_VAR_USER_CODE2;
gate.present = 1;
gate.dpl = 0;
gate.s = 0;
gate.g = 0;
gate.db = 0;
gate.l = 0;
gate.avl = 0;
fill_segment_descriptor_dword(idt, idt, &gate);
}
}
struct kvm_text {
uintptr_t typ;
const void* text;
uintptr_t size;
};
struct kvm_opt {
uint64_t typ;
uint64_t val;
};
#define KVM_SETUP_PAGING (1 << 0)
#define KVM_SETUP_PAE (1 << 1)
#define KVM_SETUP_PROTECTED (1 << 2)
#define KVM_SETUP_CPL3 (1 << 3)
#define KVM_SETUP_VIRT86 (1 << 4)
#define KVM_SETUP_SMM (1 << 5)
#define KVM_SETUP_VM (1 << 6)
static uintptr_t syz_kvm_setup_cpu(uintptr_t a0, uintptr_t a1, uintptr_t a2,
uintptr_t a3, uintptr_t a4, uintptr_t a5,
uintptr_t a6, uintptr_t a7)
{
const int vmfd = a0;
const int cpufd = a1;
char* const host_mem = (char*)a2;
const struct kvm_text* const text_array_ptr = (struct kvm_text*)a3;
const uintptr_t text_count = a4;
const uintptr_t flags = a5;
const struct kvm_opt* const opt_array_ptr = (struct kvm_opt*)a6;
uintptr_t opt_count = a7;
const uintptr_t page_size = 4 << 10;
const uintptr_t ioapic_page = 10;
const uintptr_t guest_mem_size = 24 * page_size;
const uintptr_t guest_mem = 0;
(void)text_count;
int text_type = 0;
const void* text = 0;
uintptr_t text_size = 0;
NONFAILING(text_type = text_array_ptr[0].typ);
NONFAILING(text = text_array_ptr[0].text);
NONFAILING(text_size = text_array_ptr[0].size);
uintptr_t i;
for (i = 0; i < guest_mem_size / page_size; i++) {
struct kvm_userspace_memory_region memreg;
memreg.slot = i;
memreg.flags = 0;
memreg.guest_phys_addr = guest_mem + i * page_size;
if (i == ioapic_page)
memreg.guest_phys_addr = 0xfec00000;
memreg.memory_size = page_size;
memreg.userspace_addr = (uintptr_t)host_mem + i * page_size;
ioctl(vmfd, KVM_SET_USER_MEMORY_REGION, &memreg);
}
struct kvm_userspace_memory_region memreg;
memreg.slot = 1 + (1 << 16);
memreg.flags = 0;
memreg.guest_phys_addr = 0x30000;
memreg.memory_size = 64 << 10;
memreg.userspace_addr = (uintptr_t)host_mem;
ioctl(vmfd, KVM_SET_USER_MEMORY_REGION, &memreg);
struct kvm_sregs sregs;
if (ioctl(cpufd, KVM_GET_SREGS, &sregs))
return -1;
struct kvm_regs regs;
memset(&regs, 0, sizeof(regs));
regs.rip = guest_mem + ADDR_TEXT;
regs.rsp = ADDR_STACK0;
sregs.gdt.base = guest_mem + ADDR_GDT;
sregs.gdt.limit = 256 * sizeof(uint64_t) - 1;
uint64_t* gdt = (uint64_t*)(host_mem + sregs.gdt.base);
struct kvm_segment seg_ldt;
seg_ldt.selector = SEL_LDT;
seg_ldt.type = 2;
seg_ldt.base = guest_mem + ADDR_LDT;
seg_ldt.limit = 256 * sizeof(uint64_t) - 1;
seg_ldt.present = 1;
seg_ldt.dpl = 0;
seg_ldt.s = 0;
seg_ldt.g = 0;
seg_ldt.db = 1;
seg_ldt.l = 0;
sregs.ldt = seg_ldt;
uint64_t* ldt = (uint64_t*)(host_mem + sregs.ldt.base);
struct kvm_segment seg_cs16;
seg_cs16.selector = SEL_CS16;
seg_cs16.type = 11;
seg_cs16.base = 0;
seg_cs16.limit = 0xfffff;
seg_cs16.present = 1;
seg_cs16.dpl = 0;
seg_cs16.s = 1;
seg_cs16.g = 0;
seg_cs16.db = 0;
seg_cs16.l = 0;
struct kvm_segment seg_ds16 = seg_cs16;
seg_ds16.selector = SEL_DS16;
seg_ds16.type = 3;
struct kvm_segment seg_cs16_cpl3 = seg_cs16;
seg_cs16_cpl3.selector = SEL_CS16_CPL3;
seg_cs16_cpl3.dpl = 3;
struct kvm_segment seg_ds16_cpl3 = seg_ds16;
seg_ds16_cpl3.selector = SEL_DS16_CPL3;
seg_ds16_cpl3.dpl = 3;
struct kvm_segment seg_cs32 = seg_cs16;
seg_cs32.selector = SEL_CS32;
seg_cs32.db = 1;
struct kvm_segment seg_ds32 = seg_ds16;
seg_ds32.selector = SEL_DS32;
seg_ds32.db = 1;
struct kvm_segment seg_cs32_cpl3 = seg_cs32;
seg_cs32_cpl3.selector = SEL_CS32_CPL3;
seg_cs32_cpl3.dpl = 3;
struct kvm_segment seg_ds32_cpl3 = seg_ds32;
seg_ds32_cpl3.selector = SEL_DS32_CPL3;
seg_ds32_cpl3.dpl = 3;
struct kvm_segment seg_cs64 = seg_cs16;
seg_cs64.selector = SEL_CS64;
seg_cs64.l = 1;
struct kvm_segment seg_ds64 = seg_ds32;
seg_ds64.selector = SEL_DS64;
struct kvm_segment seg_cs64_cpl3 = seg_cs64;
seg_cs64_cpl3.selector = SEL_CS64_CPL3;
seg_cs64_cpl3.dpl = 3;
struct kvm_segment seg_ds64_cpl3 = seg_ds64;
seg_ds64_cpl3.selector = SEL_DS64_CPL3;
seg_ds64_cpl3.dpl = 3;
struct kvm_segment seg_tss32;
seg_tss32.selector = SEL_TSS32;
seg_tss32.type = 9;
seg_tss32.base = ADDR_VAR_TSS32;
seg_tss32.limit = 0x1ff;
seg_tss32.present = 1;
seg_tss32.dpl = 0;
seg_tss32.s = 0;
seg_tss32.g = 0;
seg_tss32.db = 0;
seg_tss32.l = 0;
struct kvm_segment seg_tss32_2 = seg_tss32;
seg_tss32_2.selector = SEL_TSS32_2;
seg_tss32_2.base = ADDR_VAR_TSS32_2;
struct kvm_segment seg_tss32_cpl3 = seg_tss32;
seg_tss32_cpl3.selector = SEL_TSS32_CPL3;
seg_tss32_cpl3.base = ADDR_VAR_TSS32_CPL3;
struct kvm_segment seg_tss32_vm86 = seg_tss32;
seg_tss32_vm86.selector = SEL_TSS32_VM86;
seg_tss32_vm86.base = ADDR_VAR_TSS32_VM86;
struct kvm_segment seg_tss16 = seg_tss32;
seg_tss16.selector = SEL_TSS16;
seg_tss16.base = ADDR_VAR_TSS16;
seg_tss16.limit = 0xff;
seg_tss16.type = 1;
struct kvm_segment seg_tss16_2 = seg_tss16;
seg_tss16_2.selector = SEL_TSS16_2;
seg_tss16_2.base = ADDR_VAR_TSS16_2;
seg_tss16_2.dpl = 0;
struct kvm_segment seg_tss16_cpl3 = seg_tss16;
seg_tss16_cpl3.selector = SEL_TSS16_CPL3;
seg_tss16_cpl3.base = ADDR_VAR_TSS16_CPL3;
seg_tss16_cpl3.dpl = 3;
struct kvm_segment seg_tss64 = seg_tss32;
seg_tss64.selector = SEL_TSS64;
seg_tss64.base = ADDR_VAR_TSS64;
seg_tss64.limit = 0x1ff;
struct kvm_segment seg_tss64_cpl3 = seg_tss64;
seg_tss64_cpl3.selector = SEL_TSS64_CPL3;
seg_tss64_cpl3.base = ADDR_VAR_TSS64_CPL3;
seg_tss64_cpl3.dpl = 3;
struct kvm_segment seg_cgate16;
seg_cgate16.selector = SEL_CGATE16;
seg_cgate16.type = 4;
seg_cgate16.base = SEL_CS16 | (2 << 16);
seg_cgate16.limit = ADDR_VAR_USER_CODE2;
seg_cgate16.present = 1;
seg_cgate16.dpl = 0;
seg_cgate16.s = 0;
seg_cgate16.g = 0;
seg_cgate16.db = 0;
seg_cgate16.l = 0;
seg_cgate16.avl = 0;
struct kvm_segment seg_tgate16 = seg_cgate16;
seg_tgate16.selector = SEL_TGATE16;
seg_tgate16.type = 3;
seg_cgate16.base = SEL_TSS16_2;
seg_tgate16.limit = 0;
struct kvm_segment seg_cgate32 = seg_cgate16;
seg_cgate32.selector = SEL_CGATE32;
seg_cgate32.type = 12;
seg_cgate32.base = SEL_CS32 | (2 << 16);
struct kvm_segment seg_tgate32 = seg_cgate32;
seg_tgate32.selector = SEL_TGATE32;
seg_tgate32.type = 11;
seg_tgate32.base = SEL_TSS32_2;
seg_tgate32.limit = 0;
struct kvm_segment seg_cgate64 = seg_cgate16;
seg_cgate64.selector = SEL_CGATE64;
seg_cgate64.type = 12;
seg_cgate64.base = SEL_CS64;
int kvmfd = open("/dev/kvm", O_RDWR);
char buf[sizeof(struct kvm_cpuid2) + 128 * sizeof(struct kvm_cpuid_entry2)];
memset(buf, 0, sizeof(buf));
struct kvm_cpuid2* cpuid = (struct kvm_cpuid2*)buf;
cpuid->nent = 128;
ioctl(kvmfd, KVM_GET_SUPPORTED_CPUID, cpuid);
ioctl(cpufd, KVM_SET_CPUID2, cpuid);
close(kvmfd);
const char* text_prefix = 0;
int text_prefix_size = 0;
char* host_text = host_mem + ADDR_TEXT;
if (text_type == 8) {
if (flags & KVM_SETUP_SMM) {
if (flags & KVM_SETUP_PROTECTED) {
sregs.cs = seg_cs16;
sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds16;
sregs.cr0 |= CR0_PE;
} else {
sregs.cs.selector = 0;
sregs.cs.base = 0;
}
NONFAILING(*(host_mem + ADDR_TEXT) = 0xf4);
host_text = host_mem + 0x8000;
ioctl(cpufd, KVM_SMI, 0);
} else if (flags & KVM_SETUP_VIRT86) {
sregs.cs = seg_cs32;
sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
sregs.cr0 |= CR0_PE;
sregs.efer |= EFER_SCE;
setup_syscall_msrs(cpufd, SEL_CS32, SEL_CS32_CPL3);
setup_32bit_idt(&sregs, host_mem, guest_mem);
if (flags & KVM_SETUP_PAGING) {
uint64_t pd_addr = guest_mem + ADDR_PD;
uint64_t* pd = (uint64_t*)(host_mem + ADDR_PD);
NONFAILING(pd[0] = PDE32_PRESENT | PDE32_RW | PDE32_USER | PDE32_PS);
sregs.cr3 = pd_addr;
sregs.cr4 |= CR4_PSE;
text_prefix = kvm_asm32_paged_vm86;
text_prefix_size = sizeof(kvm_asm32_paged_vm86) - 1;
} else {
text_prefix = kvm_asm32_vm86;
text_prefix_size = sizeof(kvm_asm32_vm86) - 1;
}
} else {
sregs.cs.selector = 0;
sregs.cs.base = 0;
}
} else if (text_type == 16) {
if (flags & KVM_SETUP_CPL3) {
sregs.cs = seg_cs16;
sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds16;
text_prefix = kvm_asm16_cpl3;
text_prefix_size = sizeof(kvm_asm16_cpl3) - 1;
} else {
sregs.cr0 |= CR0_PE;
sregs.cs = seg_cs16;
sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds16;
}
} else if (text_type == 32) {
sregs.cr0 |= CR0_PE;
sregs.efer |= EFER_SCE;
setup_syscall_msrs(cpufd, SEL_CS32, SEL_CS32_CPL3);
setup_32bit_idt(&sregs, host_mem, guest_mem);
if (flags & KVM_SETUP_SMM) {
sregs.cs = seg_cs32;
sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
NONFAILING(*(host_mem + ADDR_TEXT) = 0xf4);
host_text = host_mem + 0x8000;
ioctl(cpufd, KVM_SMI, 0);
} else if (flags & KVM_SETUP_PAGING) {
sregs.cs = seg_cs32;
sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
uint64_t pd_addr = guest_mem + ADDR_PD;
uint64_t* pd = (uint64_t*)(host_mem + ADDR_PD);
NONFAILING(pd[0] = PDE32_PRESENT | PDE32_RW | PDE32_USER | PDE32_PS);
sregs.cr3 = pd_addr;
sregs.cr4 |= CR4_PSE;
text_prefix = kvm_asm32_paged;
text_prefix_size = sizeof(kvm_asm32_paged) - 1;
} else if (flags & KVM_SETUP_CPL3) {
sregs.cs = seg_cs32_cpl3;
sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32_cpl3;
} else {
sregs.cs = seg_cs32;
sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
}
} else {
sregs.efer |= EFER_LME | EFER_SCE;
sregs.cr0 |= CR0_PE;
setup_syscall_msrs(cpufd, SEL_CS64, SEL_CS64_CPL3);
setup_64bit_idt(&sregs, host_mem, guest_mem);
sregs.cs = seg_cs32;
sregs.ds = sregs.es = sregs.fs = sregs.gs = sregs.ss = seg_ds32;
uint64_t pml4_addr = guest_mem + ADDR_PML4;
uint64_t* pml4 = (uint64_t*)(host_mem + ADDR_PML4);
uint64_t pdpt_addr = guest_mem + ADDR_PDP;
uint64_t* pdpt = (uint64_t*)(host_mem + ADDR_PDP);
uint64_t pd_addr = guest_mem + ADDR_PD;
uint64_t* pd = (uint64_t*)(host_mem + ADDR_PD);
NONFAILING(pml4[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pdpt_addr);
NONFAILING(pdpt[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | pd_addr);
NONFAILING(pd[0] = PDE64_PRESENT | PDE64_RW | PDE64_USER | PDE64_PS);
sregs.cr3 = pml4_addr;
sregs.cr4 |= CR4_PAE;
if (flags & KVM_SETUP_VM) {
sregs.cr0 |= CR0_NE;
NONFAILING(*((uint64_t*)(host_mem + ADDR_VAR_VMXON_PTR)) =
ADDR_VAR_VMXON);
NONFAILING(*((uint64_t*)(host_mem + ADDR_VAR_VMCS_PTR)) = ADDR_VAR_VMCS);
NONFAILING(memcpy(host_mem + ADDR_VAR_VMEXIT_CODE, kvm_asm64_vm_exit,
sizeof(kvm_asm64_vm_exit) - 1));
NONFAILING(*((uint64_t*)(host_mem + ADDR_VAR_VMEXIT_PTR)) =
ADDR_VAR_VMEXIT_CODE);
text_prefix = kvm_asm64_init_vm;
text_prefix_size = sizeof(kvm_asm64_init_vm) - 1;
} else if (flags & KVM_SETUP_CPL3) {
text_prefix = kvm_asm64_cpl3;
text_prefix_size = sizeof(kvm_asm64_cpl3) - 1;
} else {
text_prefix = kvm_asm64_enable_long;
text_prefix_size = sizeof(kvm_asm64_enable_long) - 1;
}
}
struct tss16 tss16;
memset(&tss16, 0, sizeof(tss16));
tss16.ss0 = tss16.ss1 = tss16.ss2 = SEL_DS16;
tss16.sp0 = tss16.sp1 = tss16.sp2 = ADDR_STACK0;
tss16.ip = ADDR_VAR_USER_CODE2;
tss16.flags = (1 << 1);
tss16.cs = SEL_CS16;
tss16.es = tss16.ds = tss16.ss = SEL_DS16;
tss16.ldt = SEL_LDT;
struct tss16* tss16_addr = (struct tss16*)(host_mem + seg_tss16_2.base);
NONFAILING(memcpy(tss16_addr, &tss16, sizeof(tss16)));
memset(&tss16, 0, sizeof(tss16));
tss16.ss0 = tss16.ss1 = tss16.ss2 = SEL_DS16;
tss16.sp0 = tss16.sp1 = tss16.sp2 = ADDR_STACK0;
tss16.ip = ADDR_VAR_USER_CODE2;
tss16.flags = (1 << 1);
tss16.cs = SEL_CS16_CPL3;
tss16.es = tss16.ds = tss16.ss = SEL_DS16_CPL3;
tss16.ldt = SEL_LDT;
struct tss16* tss16_cpl3_addr =
(struct tss16*)(host_mem + seg_tss16_cpl3.base);
NONFAILING(memcpy(tss16_cpl3_addr, &tss16, sizeof(tss16)));
struct tss32 tss32;
memset(&tss32, 0, sizeof(tss32));
tss32.ss0 = tss32.ss1 = tss32.ss2 = SEL_DS32;
tss32.sp0 = tss32.sp1 = tss32.sp2 = ADDR_STACK0;
tss32.ip = ADDR_VAR_USER_CODE;
tss32.flags = (1 << 1) | (1 << 17);
tss32.ldt = SEL_LDT;
tss32.cr3 = sregs.cr3;
tss32.io_bitmap = offsetof(struct tss32, io_bitmap);
struct tss32* tss32_addr = (struct tss32*)(host_mem + seg_tss32_vm86.base);
NONFAILING(memcpy(tss32_addr, &tss32, sizeof(tss32)));
memset(&tss32, 0, sizeof(tss32));
tss32.ss0 = tss32.ss1 = tss32.ss2 = SEL_DS32;
tss32.sp0 = tss32.sp1 = tss32.sp2 = ADDR_STACK0;
tss32.ip = ADDR_VAR_USER_CODE;
tss32.flags = (1 << 1);
tss32.cr3 = sregs.cr3;
tss32.es = tss32.ds = tss32.ss = tss32.gs = tss32.fs = SEL_DS32;
tss32.cs = SEL_CS32;
tss32.ldt = SEL_LDT;
tss32.cr3 = sregs.cr3;
tss32.io_bitmap = offsetof(struct tss32, io_bitmap);
struct tss32* tss32_cpl3_addr = (struct tss32*)(host_mem + seg_tss32_2.base);
NONFAILING(memcpy(tss32_cpl3_addr, &tss32, sizeof(tss32)));
struct tss64 tss64;
memset(&tss64, 0, sizeof(tss64));
tss64.rsp[0] = ADDR_STACK0;
tss64.rsp[1] = ADDR_STACK0;
tss64.rsp[2] = ADDR_STACK0;
tss64.io_bitmap = offsetof(struct tss64, io_bitmap);
struct tss64* tss64_addr = (struct tss64*)(host_mem + seg_tss64.base);
NONFAILING(memcpy(tss64_addr, &tss64, sizeof(tss64)));
memset(&tss64, 0, sizeof(tss64));
tss64.rsp[0] = ADDR_STACK0;
tss64.rsp[1] = ADDR_STACK0;
tss64.rsp[2] = ADDR_STACK0;
tss64.io_bitmap = offsetof(struct tss64, io_bitmap);
struct tss64* tss64_cpl3_addr =
(struct tss64*)(host_mem + seg_tss64_cpl3.base);
NONFAILING(memcpy(tss64_cpl3_addr, &tss64, sizeof(tss64)));
if (text_size > 1000)
text_size = 1000;
if (text_prefix) {
NONFAILING(memcpy(host_text, text_prefix, text_prefix_size));
void* patch = 0;
NONFAILING(patch =
memmem(host_text, text_prefix_size, "\xde\xc0\xad\x0b", 4));
if (patch)
NONFAILING(*((uint32_t*)patch) =
guest_mem + ADDR_TEXT + ((char*)patch - host_text) + 6);
uint16_t magic = PREFIX_SIZE;
patch = 0;
NONFAILING(patch =
memmem(host_text, text_prefix_size, &magic, sizeof(magic)));
if (patch)
NONFAILING(*((uint16_t*)patch) =
guest_mem + ADDR_TEXT + text_prefix_size);
}
NONFAILING(memcpy((void*)(host_text + text_prefix_size), text, text_size));
NONFAILING(*(host_text + text_prefix_size + text_size) = 0xf4);
NONFAILING(memcpy(host_mem + ADDR_VAR_USER_CODE, text, text_size));
NONFAILING(*(host_mem + ADDR_VAR_USER_CODE + text_size) = 0xf4);
NONFAILING(*(host_mem + ADDR_VAR_HLT) = 0xf4);
NONFAILING(memcpy(host_mem + ADDR_VAR_SYSRET, "\x0f\x07\xf4", 3));
NONFAILING(memcpy(host_mem + ADDR_VAR_SYSEXIT, "\x0f\x35\xf4", 3));
NONFAILING(*(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_FLD) = 0);
NONFAILING(*(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_VAL) = 0);
if (opt_count > 2)
opt_count = 2;
for (i = 0; i < opt_count; i++) {
uint64_t typ = 0;
uint64_t val = 0;
NONFAILING(typ = opt_array_ptr[i].typ);
NONFAILING(val = opt_array_ptr[i].val);
switch (typ % 9) {
case 0:
sregs.cr0 ^= val & (CR0_MP | CR0_EM | CR0_ET | CR0_NE | CR0_WP | CR0_AM |
CR0_NW | CR0_CD);
break;
case 1:
sregs.cr4 ^=
val & (CR4_VME | CR4_PVI | CR4_TSD | CR4_DE | CR4_MCE | CR4_PGE |
CR4_PCE | CR4_OSFXSR | CR4_OSXMMEXCPT | CR4_UMIP | CR4_VMXE |
CR4_SMXE | CR4_FSGSBASE | CR4_PCIDE | CR4_OSXSAVE | CR4_SMEP |
CR4_SMAP | CR4_PKE);
break;
case 2:
sregs.efer ^= val & (EFER_SCE | EFER_NXE | EFER_SVME | EFER_LMSLE |
EFER_FFXSR | EFER_TCE);
break;
case 3:
val &=
((1 << 8) | (1 << 9) | (1 << 10) | (1 << 12) | (1 << 13) | (1 << 14) |
(1 << 15) | (1 << 18) | (1 << 19) | (1 << 20) | (1 << 21));
regs.rflags ^= val;
NONFAILING(tss16_addr->flags ^= val);
NONFAILING(tss16_cpl3_addr->flags ^= val);
NONFAILING(tss32_addr->flags ^= val);
NONFAILING(tss32_cpl3_addr->flags ^= val);
break;
case 4:
seg_cs16.type = val & 0xf;
seg_cs32.type = val & 0xf;
seg_cs64.type = val & 0xf;
break;
case 5:
seg_cs16_cpl3.type = val & 0xf;
seg_cs32_cpl3.type = val & 0xf;
seg_cs64_cpl3.type = val & 0xf;
break;
case 6:
seg_ds16.type = val & 0xf;
seg_ds32.type = val & 0xf;
seg_ds64.type = val & 0xf;
break;
case 7:
seg_ds16_cpl3.type = val & 0xf;
seg_ds32_cpl3.type = val & 0xf;
seg_ds64_cpl3.type = val & 0xf;
break;
case 8:
NONFAILING(*(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_FLD) =
(val & 0xffff));
NONFAILING(*(uint64_t*)(host_mem + ADDR_VAR_VMWRITE_VAL) = (val >> 16));
break;
default:
fail("bad kvm setup opt");
}
}
regs.rflags |= 2;
fill_segment_descriptor(gdt, ldt, &seg_ldt);
fill_segment_descriptor(gdt, ldt, &seg_cs16);
fill_segment_descriptor(gdt, ldt, &seg_ds16);
fill_segment_descriptor(gdt, ldt, &seg_cs16_cpl3);
fill_segment_descriptor(gdt, ldt, &seg_ds16_cpl3);
fill_segment_descriptor(gdt, ldt, &seg_cs32);
fill_segment_descriptor(gdt, ldt, &seg_ds32);
fill_segment_descriptor(gdt, ldt, &seg_cs32_cpl3);
fill_segment_descriptor(gdt, ldt, &seg_ds32_cpl3);
fill_segment_descriptor(gdt, ldt, &seg_cs64);
fill_segment_descriptor(gdt, ldt, &seg_ds64);
fill_segment_descriptor(gdt, ldt, &seg_cs64_cpl3);
fill_segment_descriptor(gdt, ldt, &seg_ds64_cpl3);
fill_segment_descriptor(gdt, ldt, &seg_tss32);
fill_segment_descriptor(gdt, ldt, &seg_tss32_2);
fill_segment_descriptor(gdt, ldt, &seg_tss32_cpl3);
fill_segment_descriptor(gdt, ldt, &seg_tss32_vm86);
fill_segment_descriptor(gdt, ldt, &seg_tss16);
fill_segment_descriptor(gdt, ldt, &seg_tss16_2);
fill_segment_descriptor(gdt, ldt, &seg_tss16_cpl3);
fill_segment_descriptor_dword(gdt, ldt, &seg_tss64);
fill_segment_descriptor_dword(gdt, ldt, &seg_tss64_cpl3);
fill_segment_descriptor(gdt, ldt, &seg_cgate16);
fill_segment_descriptor(gdt, ldt, &seg_tgate16);
fill_segment_descriptor(gdt, ldt, &seg_cgate32);
fill_segment_descriptor(gdt, ldt, &seg_tgate32);
fill_segment_descriptor_dword(gdt, ldt, &seg_cgate64);
if (ioctl(cpufd, KVM_SET_SREGS, &sregs))
return -1;
if (ioctl(cpufd, KVM_SET_REGS, &regs))
return -1;
return 0;
}
#define XT_TABLE_SIZE 1536
#define XT_MAX_ENTRIES 10
struct xt_counters {
uint64_t pcnt, bcnt;
};
struct ipt_getinfo {
char name[32];
unsigned int valid_hooks;
unsigned int hook_entry[5];
unsigned int underflow[5];
unsigned int num_entries;
unsigned int size;
};
struct ipt_get_entries {
char name[32];
unsigned int size;
void* entrytable[XT_TABLE_SIZE / sizeof(void*)];
};
struct ipt_replace {
char name[32];
unsigned int valid_hooks;
unsigned int num_entries;
unsigned int size;
unsigned int hook_entry[5];
unsigned int underflow[5];
unsigned int num_counters;
struct xt_counters* counters;
char entrytable[XT_TABLE_SIZE];
};
struct ipt_table_desc {
const char* name;
struct ipt_getinfo info;
struct ipt_replace replace;
};
static struct ipt_table_desc ipv4_tables[] = {
{.name = "filter"}, {.name = "nat"}, {.name = "mangle"},
{.name = "raw"}, {.name = "security"},
};
static struct ipt_table_desc ipv6_tables[] = {
{.name = "filter"}, {.name = "nat"}, {.name = "mangle"},
{.name = "raw"}, {.name = "security"},
};
#define IPT_BASE_CTL 64
#define IPT_SO_SET_REPLACE (IPT_BASE_CTL)
#define IPT_SO_GET_INFO (IPT_BASE_CTL)
#define IPT_SO_GET_ENTRIES (IPT_BASE_CTL + 1)
struct arpt_getinfo {
char name[32];
unsigned int valid_hooks;
unsigned int hook_entry[3];
unsigned int underflow[3];
unsigned int num_entries;
unsigned int size;
};
struct arpt_get_entries {
char name[32];
unsigned int size;
void* entrytable[XT_TABLE_SIZE / sizeof(void*)];
};
struct arpt_replace {
char name[32];
unsigned int valid_hooks;
unsigned int num_entries;
unsigned int size;
unsigned int hook_entry[3];
unsigned int underflow[3];
unsigned int num_counters;
struct xt_counters* counters;
char entrytable[XT_TABLE_SIZE];
};
struct arpt_table_desc {
const char* name;
struct arpt_getinfo info;
struct arpt_replace replace;
};
static struct arpt_table_desc arpt_tables[] = {
{.name = "filter"},
};
#define ARPT_BASE_CTL 96
#define ARPT_SO_SET_REPLACE (ARPT_BASE_CTL)
#define ARPT_SO_GET_INFO (ARPT_BASE_CTL)
#define ARPT_SO_GET_ENTRIES (ARPT_BASE_CTL + 1)
static void checkpoint_iptables(struct ipt_table_desc* tables, int num_tables,
int family, int level)
{
struct ipt_get_entries entries;
socklen_t optlen;
int fd, i;
fd = socket(family, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1)
fail("socket(%d, SOCK_STREAM, IPPROTO_TCP)", family);
for (i = 0; i < num_tables; i++) {
struct ipt_table_desc* table = &tables[i];
strcpy(table->info.name, table->name);
strcpy(table->replace.name, table->name);
optlen = sizeof(table->info);
if (getsockopt(fd, level, IPT_SO_GET_INFO, &table->info, &optlen)) {
switch (errno) {
case EPERM:
case ENOENT:
case ENOPROTOOPT:
continue;
}
fail("getsockopt(IPT_SO_GET_INFO)");
}
if (table->info.size > sizeof(table->replace.entrytable))
fail("table size is too large: %u", table->info.size);
if (table->info.num_entries > XT_MAX_ENTRIES)
fail("too many counters: %u", table->info.num_entries);
memset(&entries, 0, sizeof(entries));
strcpy(entries.name, table->name);
entries.size = table->info.size;
optlen = sizeof(entries) - sizeof(entries.entrytable) + table->info.size;
if (getsockopt(fd, level, IPT_SO_GET_ENTRIES, &entries, &optlen))
fail("getsockopt(IPT_SO_GET_ENTRIES)");
table->replace.valid_hooks = table->info.valid_hooks;
table->replace.num_entries = table->info.num_entries;
table->replace.size = table->info.size;
memcpy(table->replace.hook_entry, table->info.hook_entry,
sizeof(table->replace.hook_entry));
memcpy(table->replace.underflow, table->info.underflow,
sizeof(table->replace.underflow));
memcpy(table->replace.entrytable, entries.entrytable, table->info.size);
}
close(fd);
}
static void reset_iptables(struct ipt_table_desc* tables, int num_tables,
int family, int level)
{
struct xt_counters counters[XT_MAX_ENTRIES];
struct ipt_get_entries entries;
struct ipt_getinfo info;
socklen_t optlen;
int fd, i;
fd = socket(family, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1)
fail("socket(%d, SOCK_STREAM, IPPROTO_TCP)", family);
for (i = 0; i < num_tables; i++) {
struct ipt_table_desc* table = &tables[i];
if (table->info.valid_hooks == 0)
continue;
memset(&info, 0, sizeof(info));
strcpy(info.name, table->name);
optlen = sizeof(info);
if (getsockopt(fd, level, IPT_SO_GET_INFO, &info, &optlen))
fail("getsockopt(IPT_SO_GET_INFO)");
if (memcmp(&table->info, &info, sizeof(table->info)) == 0) {
memset(&entries, 0, sizeof(entries));
strcpy(entries.name, table->name);
entries.size = table->info.size;
optlen = sizeof(entries) - sizeof(entries.entrytable) + entries.size;
if (getsockopt(fd, level, IPT_SO_GET_ENTRIES, &entries, &optlen))
fail("getsockopt(IPT_SO_GET_ENTRIES)");
if (memcmp(table->replace.entrytable, entries.entrytable,
table->info.size) == 0)
continue;
}
table->replace.num_counters = info.num_entries;
table->replace.counters = counters;
optlen = sizeof(table->replace) - sizeof(table->replace.entrytable) +
table->replace.size;
if (setsockopt(fd, level, IPT_SO_SET_REPLACE, &table->replace, optlen))
fail("setsockopt(IPT_SO_SET_REPLACE)");
}
close(fd);
}
static void checkpoint_arptables(void)
{
struct arpt_get_entries entries;
socklen_t optlen;
unsigned i;
int fd;
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1)
fail("socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)");
for (i = 0; i < sizeof(arpt_tables) / sizeof(arpt_tables[0]); i++) {
struct arpt_table_desc* table = &arpt_tables[i];
strcpy(table->info.name, table->name);
strcpy(table->replace.name, table->name);
optlen = sizeof(table->info);
if (getsockopt(fd, SOL_IP, ARPT_SO_GET_INFO, &table->info, &optlen)) {
switch (errno) {
case EPERM:
case ENOENT:
case ENOPROTOOPT:
continue;
}
fail("getsockopt(ARPT_SO_GET_INFO)");
}
if (table->info.size > sizeof(table->replace.entrytable))
fail("table size is too large: %u", table->info.size);
if (table->info.num_entries > XT_MAX_ENTRIES)
fail("too many counters: %u", table->info.num_entries);
memset(&entries, 0, sizeof(entries));
strcpy(entries.name, table->name);
entries.size = table->info.size;
optlen = sizeof(entries) - sizeof(entries.entrytable) + table->info.size;
if (getsockopt(fd, SOL_IP, ARPT_SO_GET_ENTRIES, &entries, &optlen))
fail("getsockopt(ARPT_SO_GET_ENTRIES)");
table->replace.valid_hooks = table->info.valid_hooks;
table->replace.num_entries = table->info.num_entries;
table->replace.size = table->info.size;
memcpy(table->replace.hook_entry, table->info.hook_entry,
sizeof(table->replace.hook_entry));
memcpy(table->replace.underflow, table->info.underflow,
sizeof(table->replace.underflow));
memcpy(table->replace.entrytable, entries.entrytable, table->info.size);
}
close(fd);
}
static void reset_arptables()
{
struct xt_counters counters[XT_MAX_ENTRIES];
struct arpt_get_entries entries;
struct arpt_getinfo info;
socklen_t optlen;
unsigned i;
int fd;
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1)
fail("socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)");
for (i = 0; i < sizeof(arpt_tables) / sizeof(arpt_tables[0]); i++) {
struct arpt_table_desc* table = &arpt_tables[i];
if (table->info.valid_hooks == 0)
continue;
memset(&info, 0, sizeof(info));
strcpy(info.name, table->name);
optlen = sizeof(info);
if (getsockopt(fd, SOL_IP, ARPT_SO_GET_INFO, &info, &optlen))
fail("getsockopt(ARPT_SO_GET_INFO)");
if (memcmp(&table->info, &info, sizeof(table->info)) == 0) {
memset(&entries, 0, sizeof(entries));
strcpy(entries.name, table->name);
entries.size = table->info.size;
optlen = sizeof(entries) - sizeof(entries.entrytable) + entries.size;
if (getsockopt(fd, SOL_IP, ARPT_SO_GET_ENTRIES, &entries, &optlen))
fail("getsockopt(ARPT_SO_GET_ENTRIES)");
if (memcmp(table->replace.entrytable, entries.entrytable,
table->info.size) == 0)
continue;
}
table->replace.num_counters = info.num_entries;
table->replace.counters = counters;
optlen = sizeof(table->replace) - sizeof(table->replace.entrytable) +
table->replace.size;
if (setsockopt(fd, SOL_IP, ARPT_SO_SET_REPLACE, &table->replace, optlen))
fail("setsockopt(ARPT_SO_SET_REPLACE)");
}
close(fd);
}
#include <linux/if.h>
#include <linux/netfilter_bridge/ebtables.h>
struct ebt_table_desc {
const char* name;
struct ebt_replace replace;
char entrytable[XT_TABLE_SIZE];
};
static struct ebt_table_desc ebt_tables[] = {
{.name = "filter"}, {.name = "nat"}, {.name = "broute"},
};
static void checkpoint_ebtables(void)
{
socklen_t optlen;
unsigned i;
int fd;
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1)
fail("socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)");
for (i = 0; i < sizeof(ebt_tables) / sizeof(ebt_tables[0]); i++) {
struct ebt_table_desc* table = &ebt_tables[i];
strcpy(table->replace.name, table->name);
optlen = sizeof(table->replace);
if (getsockopt(fd, SOL_IP, EBT_SO_GET_INIT_INFO, &table->replace,
&optlen)) {
switch (errno) {
case EPERM:
case ENOENT:
case ENOPROTOOPT:
continue;
}
fail("getsockopt(EBT_SO_GET_INIT_INFO)");
}
if (table->replace.entries_size > sizeof(table->entrytable))
fail("table size is too large: %u", table->replace.entries_size);
table->replace.num_counters = 0;
table->replace.entries = table->entrytable;
optlen = sizeof(table->replace) + table->replace.entries_size;
if (getsockopt(fd, SOL_IP, EBT_SO_GET_INIT_ENTRIES, &table->replace,
&optlen))
fail("getsockopt(EBT_SO_GET_INIT_ENTRIES)");
}
close(fd);
}
static void reset_ebtables()
{
struct ebt_replace replace;
char entrytable[XT_TABLE_SIZE];
socklen_t optlen;
unsigned i, j, h;
int fd;
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd == -1)
fail("socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)");
for (i = 0; i < sizeof(ebt_tables) / sizeof(ebt_tables[0]); i++) {
struct ebt_table_desc* table = &ebt_tables[i];
if (table->replace.valid_hooks == 0)
continue;
memset(&replace, 0, sizeof(replace));
strcpy(replace.name, table->name);
optlen = sizeof(replace);
if (getsockopt(fd, SOL_IP, EBT_SO_GET_INFO, &replace, &optlen))
fail("getsockopt(EBT_SO_GET_INFO)");
replace.num_counters = 0;
table->replace.entries = 0;
for (h = 0; h < NF_BR_NUMHOOKS; h++)
table->replace.hook_entry[h] = 0;
if (memcmp(&table->replace, &replace, sizeof(table->replace)) == 0) {
memset(&entrytable, 0, sizeof(entrytable));
replace.entries = entrytable;
optlen = sizeof(replace) + replace.entries_size;
if (getsockopt(fd, SOL_IP, EBT_SO_GET_ENTRIES, &replace, &optlen))
fail("getsockopt(EBT_SO_GET_ENTRIES)");
if (memcmp(table->entrytable, entrytable, replace.entries_size) == 0)
continue;
}
for (j = 0, h = 0; h < NF_BR_NUMHOOKS; h++) {
if (table->replace.valid_hooks & (1 << h)) {
table->replace.hook_entry[h] =
(struct ebt_entries*)table->entrytable + j;
j++;
}
}
table->replace.entries = table->entrytable;
optlen = sizeof(table->replace) + table->replace.entries_size;
if (setsockopt(fd, SOL_IP, EBT_SO_SET_ENTRIES, &table->replace, optlen))
fail("setsockopt(EBT_SO_SET_ENTRIES)");
}
close(fd);
}
static void checkpoint_net_namespace(void)
{
checkpoint_ebtables();
checkpoint_arptables();
checkpoint_iptables(ipv4_tables, sizeof(ipv4_tables) / sizeof(ipv4_tables[0]),
AF_INET, SOL_IP);
checkpoint_iptables(ipv6_tables, sizeof(ipv6_tables) / sizeof(ipv6_tables[0]),
AF_INET6, SOL_IPV6);
}
static void reset_net_namespace(void)
{
reset_ebtables();
reset_arptables();
reset_iptables(ipv4_tables, sizeof(ipv4_tables) / sizeof(ipv4_tables[0]),
AF_INET, SOL_IP);
reset_iptables(ipv6_tables, sizeof(ipv6_tables) / sizeof(ipv6_tables[0]),
AF_INET6, SOL_IPV6);
}
static void remove_dir(const char* dir)
{
DIR* dp;
struct dirent* ep;
int iter = 0;
retry:
while (umount2(dir, MNT_DETACH) == 0) {
}
dp = opendir(dir);
if (dp == NULL) {
if (errno == EMFILE) {
exitf("opendir(%s) failed due to NOFILE, exiting", dir);
}
exitf("opendir(%s) failed", dir);
}
while ((ep = readdir(dp))) {
if (strcmp(ep->d_name, ".") == 0 || strcmp(ep->d_name, "..") == 0)
continue;
char filename[FILENAME_MAX];
snprintf(filename, sizeof(filename), "%s/%s", dir, ep->d_name);
struct stat st;
if (lstat(filename, &st))
exitf("lstat(%s) failed", filename);
if (S_ISDIR(st.st_mode)) {
remove_dir(filename);
continue;
}
int i;
for (i = 0;; i++) {
if (unlink(filename) == 0)
break;
if (errno == EROFS) {
break;
}
if (errno != EBUSY || i > 100)
exitf("unlink(%s) failed", filename);
if (umount2(filename, MNT_DETACH))
exitf("umount(%s) failed", filename);
}
}
closedir(dp);
int i;
for (i = 0;; i++) {
if (rmdir(dir) == 0)
break;
if (i < 100) {
if (errno == EROFS) {
break;
}
if (errno == EBUSY) {
if (umount2(dir, MNT_DETACH))
exitf("umount(%s) failed", dir);
continue;
}
if (errno == ENOTEMPTY) {
if (iter < 100) {
iter++;
goto retry;
}
}
}
exitf("rmdir(%s) failed", dir);
}
}
static void execute_one();
extern unsigned long long procid;
static void loop()
{
checkpoint_net_namespace();
int iter;
for (iter = 0;; iter++) {
char cwdbuf[32];
sprintf(cwdbuf, "./%d", iter);
if (mkdir(cwdbuf, 0777))
fail("failed to mkdir");
int pid = fork();
if (pid < 0)
fail("clone failed");
if (pid == 0) {
prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
setpgrp();
if (chdir(cwdbuf))
fail("failed to chdir");
execute_one();
doexit(0);
}
int status = 0;
uint64_t start = current_time_ms();
for (;;) {
int res = waitpid(-1, &status, __WALL | WNOHANG);
if (res == pid) {
break;
}
usleep(1000);
if (current_time_ms() - start < 3 * 1000)
continue;
kill(-pid, SIGKILL);
kill(pid, SIGKILL);
while (waitpid(-1, &status, __WALL) != pid) {
}
break;
}
remove_dir(cwdbuf);
reset_net_namespace();
}
}
struct thread_t {
int created, running, call;
pthread_t th;
};
static struct thread_t threads[16];
static void execute_call(int call);
static int running;
static int collide;
static void* thr(void* arg)
{
struct thread_t* th = (struct thread_t*)arg;
for (;;) {
while (!__atomic_load_n(&th->running, __ATOMIC_ACQUIRE))
syscall(SYS_futex, &th->running, FUTEX_WAIT, 0, 0);
execute_call(th->call);
__atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED);
__atomic_store_n(&th->running, 0, __ATOMIC_RELEASE);
syscall(SYS_futex, &th->running, FUTEX_WAKE);
}
return 0;
}
static void execute(int num_calls)
{
int call, thread;
running = 0;
for (call = 0; call < num_calls; call++) {
for (thread = 0; thread < sizeof(threads) / sizeof(threads[0]); thread++) {
struct thread_t* th = &threads[thread];
if (!th->created) {
th->created = 1;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, 128 << 10);
pthread_create(&th->th, &attr, thr, th);
}
if (!__atomic_load_n(&th->running, __ATOMIC_ACQUIRE)) {
th->call = call;
__atomic_fetch_add(&running, 1, __ATOMIC_RELAXED);
__atomic_store_n(&th->running, 1, __ATOMIC_RELEASE);
syscall(SYS_futex, &th->running, FUTEX_WAKE);
if (collide && call % 2)
break;
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = 20 * 1000 * 1000;
syscall(SYS_futex, &th->running, FUTEX_WAIT, 1, &ts);
if (running)
usleep((call == num_calls - 1) ? 10000 : 1000);
break;
}
}
}
}
uint64_t r[6] = {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff,
0xffffffffffffffff, 0xffffffffffffffff, 0x0};
unsigned long long procid;
void execute_call(int call)
{
long res;
switch (call) {
case 0:
NONFAILING(memcpy((void*)0x20000180, "/dev/kvm", 9));
res = syscall(__NR_openat, 0xffffffffffffff9c, 0x20000180, 0, 0);
if (res != -1)
r[0] = res;
break;
case 1:
res = syscall(__NR_dup, r[0]);
if (res != -1)
r[1] = res;
break;
case 2:
res = syscall(__NR_ioctl, r[0], 0xae01, 0);
if (res != -1)
r[2] = res;
break;
case 3:
res = syscall(__NR_ioctl, r[2], 0xae41, 0);
if (res != -1)
r[3] = res;
break;
case 4:
NONFAILING(*(uint64_t*)0x20000040 = 0x20);
NONFAILING(*(uint64_t*)0x20000048 = 0x200000c0);
NONFAILING(memcpy(
(void*)0x200000c0,
"\xea\x00\x28\x00\x00\xb3\x00\x82\xd6\x54\x0f\x30\xea\x03\x00\x00\x00"
"\x2c\x00\x66\xb8\x50\x00\x0f\x00\xd0\x66\xba\x42\x00\xb8\x02\x00\x00"
"\x00\xef\xc7\x44\x24\x00\x0a\x00\x00\x00\xc7\x44\x24\x02\xac\xc7\x00"
"\x00\xc7\x44\x24\x06\x00\x00\x00\x00\x0f\x01\x14\x24\x0f\x20\xe0\x35"
"\x10\x00\x00\x00\x0f\x22\xe0\x3e\x64\x36\x0f\x01\xcf\x0f\x22\xc7",
84));
NONFAILING(*(uint64_t*)0x20000050 = 0x54);
NONFAILING(*(uint64_t*)0x20000380 = 7);
NONFAILING(*(uint64_t*)0x20000388 = 5);
syz_kvm_setup_cpu(r[3], r[3], 0x20016000, 0x20000040, 1, 0, 0x20000380, 1);
break;
case 5:
NONFAILING(memcpy((void*)0x20000000, "\x0f\x22", 2));
syscall(__NR_ioctl, -1, 0x4004ae8b, 0x20000000);
break;
case 6:
NONFAILING(memcpy((void*)0x20000080, "./file0", 8));
res = syscall(__NR_openat, r[3], 0x20000080, 0x40000, 0x20);
if (res != -1)
r[4] = res;
break;
case 7:
syscall(__NR_ioctl, r[4], 0x541b, 0x20000140);
break;
case 8:
NONFAILING(*(uint32_t*)0x20bf7000 = 0);
NONFAILING(*(uint32_t*)0x20bf7004 = 0);
NONFAILING(*(uint64_t*)0x20bf7008 = 0);
NONFAILING(*(uint64_t*)0x20bf7010 = 0x2000);
NONFAILING(*(uint64_t*)0x20bf7018 = 0x20000000);
syscall(__NR_ioctl, r[2], 0x4020ae46, 0x20bf7000);
break;
case 9:
NONFAILING(*(uint32_t*)0x200001c0 = 0);
NONFAILING(*(uint16_t*)0x200001c8 = 2);
NONFAILING(*(uint16_t*)0x200001ca = htobe16(0x4e22));
NONFAILING(*(uint32_t*)0x200001cc = htobe32(0));
NONFAILING(*(uint8_t*)0x200001d0 = 0);
NONFAILING(*(uint8_t*)0x200001d1 = 0);
NONFAILING(*(uint8_t*)0x200001d2 = 0);
NONFAILING(*(uint8_t*)0x200001d3 = 0);
NONFAILING(*(uint8_t*)0x200001d4 = 0);
NONFAILING(*(uint8_t*)0x200001d5 = 0);
NONFAILING(*(uint8_t*)0x200001d6 = 0);
NONFAILING(*(uint8_t*)0x200001d7 = 0);
NONFAILING(*(uint64_t*)0x20000248 = 1);
NONFAILING(*(uint64_t*)0x20000250 = 6);
NONFAILING(*(uint64_t*)0x20000258 = 5);
NONFAILING(*(uint64_t*)0x20000260 = 1);
NONFAILING(*(uint64_t*)0x20000268 = 4);
NONFAILING(*(uint64_t*)0x20000270 = 3);
NONFAILING(*(uint64_t*)0x20000278 = 0x987);
NONFAILING(*(uint64_t*)0x20000280 = 0x3ff);
NONFAILING(*(uint64_t*)0x20000288 = 0x3f);
NONFAILING(*(uint64_t*)0x20000290 = 0x101);
NONFAILING(*(uint64_t*)0x20000298 = 1);
NONFAILING(*(uint64_t*)0x200002a0 = 6);
NONFAILING(*(uint64_t*)0x200002a8 = 0x10000);
NONFAILING(*(uint64_t*)0x200002b0 = 9);
NONFAILING(*(uint64_t*)0x200002b8 = 7);
NONFAILING(*(uint32_t*)0x200002c0 = 0x100);
res = syscall(__NR_getsockopt, r[4], 0x84, 0x70, 0x200001c0, 0x200002c0);
if (res != -1)
NONFAILING(r[5] = *(uint32_t*)0x200001c0);
break;
case 10:
NONFAILING(*(uint32_t*)0x20000300 = r[5]);
NONFAILING(*(uint32_t*)0x20000304 = 1);
syscall(__NR_setsockopt, r[4], 0x84, 0x75, 0x20000300, 8);
break;
case 11:
NONFAILING(memcpy(
(void*)0x200003c0,
"\x65\xa4\x2d\x88\x05\x69\xbd\x05\x5f\x56\xa4\xec\xea\x7c\x51\x21\x12"
"\x8b\x1a\xd8\x4e\xda\xb5\x67\x66\xce\x33\x04\xeb\x69\x42\x63\xc6\x06"
"\x10\xad\xf9\xa8\x62\xcc\x66\x0f\x7b\xf6\xc6\xc9\xa1\xb4\x6f\xa5\xc5"
"\x71\x64\x2d\xfe\x9f\x45\xa5\x19\xea\xee\x67\x8a\xb1\x72\x87\x18\xe7"
"\x5a\xd9\xbe\x2b\x5f\x78\x5e\xd8\x87\x7b\xdf\x7d\xca\x30\x6e\x2b\x8f"
"\x23\x1a\xa8\x3a\xea\x66\xcf\x39\xa0\x18\x58\xf4\xe7\x4b\x99\x24\x30"
"\xb2\xff\xcb\x73\x4d\xeb\x23\xd4\x66\x7b\x67\x66\xc4\xa0\x4f\x2e\x87"
"\xea\x81\xe2\xc4\x79\x34\x75\xd1\xed\xd4\xfc\x78\x18\xa5\xa4\xf1\x8b"
"\x7f\xa6\x8e\x11\x9a\x52\x21\x1f\xb4\x17\x57\xc9\x0f\x05\x17\x06\x67"
"\xc9\xa9\x72\x59\x33\x41\xa0\xa1\x62\xc0\x12\x0e\x5a\x93\x2d\x47\x2f"
"\x7e\x99\x9b\xfc\x5b\x58\xdd\xa1\xfc\x35\x80\x62\x36\x0d\xfa\x0c\x0f"
"\x8f\x2c\x97\xa9\xd0\x33\x16\x52\x9e\xbc\x83\x00\x16\x00\xf2\x44\x1a"
"\xb6\x20\x11\x79\x6a\x5e\x9a\xf4\xe0\x95\xaa\xef\xdc\xf5\x65\xbc\x17"
"\xa9\x15\x16\xc5\xd1\xb2\x64\x53\xfb\xa7\x74\x14\x25\x99\x48\xa4\x3c"
"\xe4\x22\xf5\x4f\x65\x92\xf4\xd8\xfb\x39\xc4\x98",
250));
syscall(__NR_setsockopt, r[1], 6, 0x1c, 0x200003c0, 0xfa);
break;
case 12:
syscall(__NR_ioctl, r[3], 0xae80, 0);
break;
case 13:
NONFAILING(*(uint32_t*)0x200004c0 = 0);
NONFAILING(*(uint32_t*)0x200004c4 = 0x80000000);
NONFAILING(*(uint32_t*)0x200004c8 = 3);
NONFAILING(*(uint32_t*)0x200004cc = 6);
syscall(__NR_setsockopt, r[4], 0x107, 0xd, 0x200004c0, 0x10);
break;
}
}
void execute_one()
{
execute(14);
collide = 1;
execute(14);
}
int main()
{
syscall(__NR_mmap, 0x20000000, 0x1000000, 3, 0x32, -1, 0);
char* cwd = get_current_dir_name();
for (procid = 0; procid < 8; procid++) {
if (fork() == 0) {
install_segv_handler();
for (;;) {
if (chdir(cwd))
fail("failed to chdir");
use_temporary_dir();
loop();
}
}
}
sleep(1000000);
return 0;
}