python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021, Google LLC.
*
* Tests for adjusting the system counter from userspace
*/
#include <asm/kvm_para.h>
#include <stdint.h>
#include <string.h>
#include <sys/stat.h>
#include <time.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#ifdef __x86_64__
struct test_case {
uint64_t tsc_offset;
};
static struct test_case test_cases[] = {
{ 0 },
{ 180 * NSEC_PER_SEC },
{ -180 * NSEC_PER_SEC },
};
static void check_preconditions(struct kvm_vcpu *vcpu)
{
__TEST_REQUIRE(!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL,
KVM_VCPU_TSC_OFFSET),
"KVM_VCPU_TSC_OFFSET not supported; skipping test");
}
static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test)
{
vcpu_device_attr_set(vcpu, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET,
&test->tsc_offset);
}
static uint64_t guest_read_system_counter(struct test_case *test)
{
return rdtsc();
}
static uint64_t host_read_guest_system_counter(struct test_case *test)
{
return rdtsc() + test->tsc_offset;
}
#else /* __x86_64__ */
#error test not implemented for this architecture!
#endif
#define GUEST_SYNC_CLOCK(__stage, __val) \
GUEST_SYNC_ARGS(__stage, __val, 0, 0, 0)
static void guest_main(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
struct test_case *test = &test_cases[i];
GUEST_SYNC_CLOCK(i, guest_read_system_counter(test));
}
}
static void handle_sync(struct ucall *uc, uint64_t start, uint64_t end)
{
uint64_t obs = uc->args[2];
TEST_ASSERT(start <= obs && obs <= end,
"unexpected system counter value: %"PRIu64" expected range: [%"PRIu64", %"PRIu64"]",
obs, start, end);
pr_info("system counter value: %"PRIu64" expected range [%"PRIu64", %"PRIu64"]\n",
obs, start, end);
}
static void handle_abort(struct ucall *uc)
{
REPORT_GUEST_ASSERT(*uc);
}
static void enter_guest(struct kvm_vcpu *vcpu)
{
uint64_t start, end;
struct ucall uc;
int i;
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
struct test_case *test = &test_cases[i];
setup_system_counter(vcpu, test);
start = host_read_guest_system_counter(test);
vcpu_run(vcpu);
end = host_read_guest_system_counter(test);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
handle_sync(&uc, start, end);
break;
case UCALL_ABORT:
handle_abort(&uc);
return;
default:
TEST_ASSERT(0, "unhandled ucall %ld\n",
get_ucall(vcpu, &uc));
}
}
}
int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
check_preconditions(vcpu);
enter_guest(vcpu);
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/system_counter_offset_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* kvm_binary_stats_test
*
* Copyright (C) 2021, Google LLC.
*
* Test the fd-based interface for KVM statistics.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "test_util.h"
#include "kvm_util.h"
#include "asm/kvm.h"
#include "linux/kvm.h"
#include "kselftest.h"
static void stats_test(int stats_fd)
{
ssize_t ret;
int i;
size_t size_desc;
size_t size_data = 0;
struct kvm_stats_header header;
char *id;
struct kvm_stats_desc *stats_desc;
u64 *stats_data;
struct kvm_stats_desc *pdesc;
u32 type, unit, base;
/* Read kvm stats header */
read_stats_header(stats_fd, &header);
size_desc = get_stats_descriptor_size(&header);
/* Read kvm stats id string */
id = malloc(header.name_size);
TEST_ASSERT(id, "Allocate memory for id string");
ret = pread(stats_fd, id, header.name_size, sizeof(header));
TEST_ASSERT(ret == header.name_size,
"Expected header size '%u', read '%lu' bytes",
header.name_size, ret);
/* Check id string, that should start with "kvm" */
TEST_ASSERT(!strncmp(id, "kvm", 3) && strlen(id) < header.name_size,
"Invalid KVM stats type, id: %s", id);
/* Sanity check for other fields in header */
if (header.num_desc == 0) {
ksft_print_msg("No KVM stats defined!\n");
return;
}
/*
* The descriptor and data offsets must be valid, they must not overlap
* the header, and the descriptor and data blocks must not overlap each
* other. Note, the data block is rechecked after its size is known.
*/
TEST_ASSERT(header.desc_offset && header.desc_offset >= sizeof(header) &&
header.data_offset && header.data_offset >= sizeof(header),
"Invalid offset fields in header");
TEST_ASSERT(header.desc_offset > header.data_offset ||
(header.desc_offset + size_desc * header.num_desc <= header.data_offset),
"Descriptor block is overlapped with data block");
/* Read kvm stats descriptors */
stats_desc = read_stats_descriptors(stats_fd, &header);
/* Sanity check for fields in descriptors */
for (i = 0; i < header.num_desc; ++i) {
pdesc = get_stats_descriptor(stats_desc, i, &header);
type = pdesc->flags & KVM_STATS_TYPE_MASK;
unit = pdesc->flags & KVM_STATS_UNIT_MASK;
base = pdesc->flags & KVM_STATS_BASE_MASK;
/* Check name string */
TEST_ASSERT(strlen(pdesc->name) < header.name_size,
"KVM stats name (index: %d) too long", i);
/* Check type,unit,base boundaries */
TEST_ASSERT(type <= KVM_STATS_TYPE_MAX,
"Unknown KVM stats (%s) type: %u", pdesc->name, type);
TEST_ASSERT(unit <= KVM_STATS_UNIT_MAX,
"Unknown KVM stats (%s) unit: %u", pdesc->name, unit);
TEST_ASSERT(base <= KVM_STATS_BASE_MAX,
"Unknown KVM stats (%s) base: %u", pdesc->name, base);
/*
* Check exponent for stats unit
* Exponent for counter should be greater than or equal to 0
* Exponent for unit bytes should be greater than or equal to 0
* Exponent for unit seconds should be less than or equal to 0
* Exponent for unit clock cycles should be greater than or
* equal to 0
* Exponent for unit boolean should be 0
*/
switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
case KVM_STATS_UNIT_NONE:
case KVM_STATS_UNIT_BYTES:
case KVM_STATS_UNIT_CYCLES:
TEST_ASSERT(pdesc->exponent >= 0,
"Unsupported KVM stats (%s) exponent: %i",
pdesc->name, pdesc->exponent);
break;
case KVM_STATS_UNIT_SECONDS:
TEST_ASSERT(pdesc->exponent <= 0,
"Unsupported KVM stats (%s) exponent: %i",
pdesc->name, pdesc->exponent);
break;
case KVM_STATS_UNIT_BOOLEAN:
TEST_ASSERT(pdesc->exponent == 0,
"Unsupported KVM stats (%s) exponent: %d",
pdesc->name, pdesc->exponent);
break;
}
/* Check size field, which should not be zero */
TEST_ASSERT(pdesc->size,
"KVM descriptor(%s) with size of 0", pdesc->name);
/* Check bucket_size field */
switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
case KVM_STATS_TYPE_LINEAR_HIST:
TEST_ASSERT(pdesc->bucket_size,
"Bucket size of Linear Histogram stats (%s) is zero",
pdesc->name);
break;
default:
TEST_ASSERT(!pdesc->bucket_size,
"Bucket size of stats (%s) is not zero",
pdesc->name);
}
size_data = max(size_data, pdesc->offset + pdesc->size * sizeof(*stats_data));
}
/*
* Now that the size of the data block is known, verify the data block
* doesn't overlap the descriptor block.
*/
TEST_ASSERT(header.data_offset >= header.desc_offset ||
header.data_offset + size_data <= header.desc_offset,
"Data block is overlapped with Descriptor block");
/* Check validity of all stats data size */
TEST_ASSERT(size_data >= header.num_desc * sizeof(*stats_data),
"Data size is not correct");
/* Allocate memory for stats data */
stats_data = malloc(size_data);
TEST_ASSERT(stats_data, "Allocate memory for stats data");
/* Read kvm stats data as a bulk */
ret = pread(stats_fd, stats_data, size_data, header.data_offset);
TEST_ASSERT(ret == size_data, "Read KVM stats data");
/* Read kvm stats data one by one */
for (i = 0; i < header.num_desc; ++i) {
pdesc = get_stats_descriptor(stats_desc, i, &header);
read_stat_data(stats_fd, &header, pdesc, stats_data,
pdesc->size);
}
free(stats_data);
free(stats_desc);
free(id);
close(stats_fd);
TEST_ASSERT(fcntl(stats_fd, F_GETFD) == -1, "Stats fd not freed");
}
#define DEFAULT_NUM_VM 4
#define DEFAULT_NUM_VCPU 4
/*
* Usage: kvm_bin_form_stats [#vm] [#vcpu]
* The first parameter #vm set the number of VMs being created.
* The second parameter #vcpu set the number of VCPUs being created.
* By default, DEFAULT_NUM_VM VM and DEFAULT_NUM_VCPU VCPU for the VM would be
* created for testing.
*/
int main(int argc, char *argv[])
{
int vm_stats_fds, *vcpu_stats_fds;
int i, j;
struct kvm_vcpu **vcpus;
struct kvm_vm **vms;
int max_vm = DEFAULT_NUM_VM;
int max_vcpu = DEFAULT_NUM_VCPU;
/* Get the number of VMs and VCPUs that would be created for testing. */
if (argc > 1) {
max_vm = strtol(argv[1], NULL, 0);
if (max_vm <= 0)
max_vm = DEFAULT_NUM_VM;
}
if (argc > 2) {
max_vcpu = strtol(argv[2], NULL, 0);
if (max_vcpu <= 0)
max_vcpu = DEFAULT_NUM_VCPU;
}
ksft_print_header();
/* Check the extension for binary stats */
TEST_REQUIRE(kvm_has_cap(KVM_CAP_BINARY_STATS_FD));
ksft_set_plan(max_vm);
/* Create VMs and VCPUs */
vms = malloc(sizeof(vms[0]) * max_vm);
TEST_ASSERT(vms, "Allocate memory for storing VM pointers");
vcpus = malloc(sizeof(struct kvm_vcpu *) * max_vm * max_vcpu);
TEST_ASSERT(vcpus, "Allocate memory for storing vCPU pointers");
/*
* Not per-VM as the array is populated, used, and invalidated within a
* single for-loop iteration.
*/
vcpu_stats_fds = calloc(max_vm, sizeof(*vcpu_stats_fds));
TEST_ASSERT(vcpu_stats_fds, "Allocate memory for VM stats fds");
for (i = 0; i < max_vm; ++i) {
vms[i] = vm_create_barebones();
for (j = 0; j < max_vcpu; ++j)
vcpus[i * max_vcpu + j] = __vm_vcpu_add(vms[i], j);
}
/*
* Check stats read for every VM and vCPU, with a variety of flavors.
* Note, stats_test() closes the passed in stats fd.
*/
for (i = 0; i < max_vm; ++i) {
/*
* Verify that creating multiple userspace references to a
* single stats file works and doesn't cause explosions.
*/
vm_stats_fds = vm_get_stats_fd(vms[i]);
stats_test(dup(vm_stats_fds));
/* Verify userspace can instantiate multiple stats files. */
stats_test(vm_get_stats_fd(vms[i]));
for (j = 0; j < max_vcpu; ++j) {
vcpu_stats_fds[j] = vcpu_get_stats_fd(vcpus[i * max_vcpu + j]);
stats_test(dup(vcpu_stats_fds[j]));
stats_test(vcpu_get_stats_fd(vcpus[i * max_vcpu + j]));
}
/*
* Close the VM fd and redo the stats tests. KVM should gift a
* reference (to the VM) to each stats fd, i.e. stats should
* still be accessible even after userspace has put its last
* _direct_ reference to the VM.
*/
kvm_vm_free(vms[i]);
stats_test(vm_stats_fds);
for (j = 0; j < max_vcpu; ++j)
stats_test(vcpu_stats_fds[j]);
ksft_test_result_pass("vm%i\n", i);
}
free(vms);
free(vcpus);
free(vcpu_stats_fds);
ksft_finished(); /* Print results and exit() accordingly */
}
| linux-master | tools/testing/selftests/kvm/kvm_binary_stats_test.c |
// SPDX-License-Identifier: GPL-2.0-only
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <syscall.h>
#include <sys/ioctl.h>
#include <sys/sysinfo.h>
#include <asm/barrier.h>
#include <linux/atomic.h>
#include <linux/rseq.h>
#include <linux/unistd.h>
#include "kvm_util.h"
#include "processor.h"
#include "test_util.h"
#include "../rseq/rseq.c"
/*
* Any bug related to task migration is likely to be timing-dependent; perform
* a large number of migrations to reduce the odds of a false negative.
*/
#define NR_TASK_MIGRATIONS 100000
static pthread_t migration_thread;
static cpu_set_t possible_mask;
static int min_cpu, max_cpu;
static bool done;
static atomic_t seq_cnt;
static void guest_code(void)
{
for (;;)
GUEST_SYNC(0);
}
static int next_cpu(int cpu)
{
/*
* Advance to the next CPU, skipping those that weren't in the original
* affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's
* data storage is considered as opaque. Note, if this task is pinned
* to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will
* burn a lot cycles and the test will take longer than normal to
* complete.
*/
do {
cpu++;
if (cpu > max_cpu) {
cpu = min_cpu;
TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),
"Min CPU = %d must always be usable", cpu);
break;
}
} while (!CPU_ISSET(cpu, &possible_mask));
return cpu;
}
static void *migration_worker(void *__rseq_tid)
{
pid_t rseq_tid = (pid_t)(unsigned long)__rseq_tid;
cpu_set_t allowed_mask;
int r, i, cpu;
CPU_ZERO(&allowed_mask);
for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {
CPU_SET(cpu, &allowed_mask);
/*
* Bump the sequence count twice to allow the reader to detect
* that a migration may have occurred in between rseq and sched
* CPU ID reads. An odd sequence count indicates a migration
* is in-progress, while a completely different count indicates
* a migration occurred since the count was last read.
*/
atomic_inc(&seq_cnt);
/*
* Ensure the odd count is visible while getcpu() isn't
* stable, i.e. while changing affinity is in-progress.
*/
smp_wmb();
r = sched_setaffinity(rseq_tid, sizeof(allowed_mask), &allowed_mask);
TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",
errno, strerror(errno));
smp_wmb();
atomic_inc(&seq_cnt);
CPU_CLR(cpu, &allowed_mask);
/*
* Wait 1-10us before proceeding to the next iteration and more
* specifically, before bumping seq_cnt again. A delay is
* needed on three fronts:
*
* 1. To allow sched_setaffinity() to prompt migration before
* ioctl(KVM_RUN) enters the guest so that TIF_NOTIFY_RESUME
* (or TIF_NEED_RESCHED, which indirectly leads to handling
* NOTIFY_RESUME) is handled in KVM context.
*
* If NOTIFY_RESUME/NEED_RESCHED is set after KVM enters
* the guest, the guest will trigger a IO/MMIO exit all the
* way to userspace and the TIF flags will be handled by
* the generic "exit to userspace" logic, not by KVM. The
* exit to userspace is necessary to give the test a chance
* to check the rseq CPU ID (see #2).
*
* Alternatively, guest_code() could include an instruction
* to trigger an exit that is handled by KVM, but any such
* exit requires architecture specific code.
*
* 2. To let ioctl(KVM_RUN) make its way back to the test
* before the next round of migration. The test's check on
* the rseq CPU ID must wait for migration to complete in
* order to avoid false positive, thus any kernel rseq bug
* will be missed if the next migration starts before the
* check completes.
*
* 3. To ensure the read-side makes efficient forward progress,
* e.g. if getcpu() involves a syscall. Stalling the read-side
* means the test will spend more time waiting for getcpu()
* to stabilize and less time trying to hit the timing-dependent
* bug.
*
* Because any bug in this area is likely to be timing-dependent,
* run with a range of delays at 1us intervals from 1us to 10us
* as a best effort to avoid tuning the test to the point where
* it can hit _only_ the original bug and not detect future
* regressions.
*
* The original bug can reproduce with a delay up to ~500us on
* x86-64, but starts to require more iterations to reproduce
* as the delay creeps above ~10us, and the average runtime of
* each iteration obviously increases as well. Cap the delay
* at 10us to keep test runtime reasonable while minimizing
* potential coverage loss.
*
* The lower bound for reproducing the bug is likely below 1us,
* e.g. failures occur on x86-64 with nanosleep(0), but at that
* point the overhead of the syscall likely dominates the delay.
* Use usleep() for simplicity and to avoid unnecessary kernel
* dependencies.
*/
usleep((i % 10) + 1);
}
done = true;
return NULL;
}
static void calc_min_max_cpu(void)
{
int i, cnt, nproc;
TEST_REQUIRE(CPU_COUNT(&possible_mask) >= 2);
/*
* CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
* this task is affined to in order to reduce the time spent querying
* unusable CPUs, e.g. if this task is pinned to a small percentage of
* total CPUs.
*/
nproc = get_nprocs_conf();
min_cpu = -1;
max_cpu = -1;
cnt = 0;
for (i = 0; i < nproc; i++) {
if (!CPU_ISSET(i, &possible_mask))
continue;
if (min_cpu == -1)
min_cpu = i;
max_cpu = i;
cnt++;
}
__TEST_REQUIRE(cnt >= 2,
"Only one usable CPU, task migration not possible");
}
int main(int argc, char *argv[])
{
int r, i, snapshot;
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
u32 cpu, rseq_cpu;
r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
strerror(errno));
calc_min_max_cpu();
r = rseq_register_current_thread();
TEST_ASSERT(!r, "rseq_register_current_thread failed, errno = %d (%s)",
errno, strerror(errno));
/*
* Create and run a dummy VM that immediately exits to userspace via
* GUEST_SYNC, while concurrently migrating the process by setting its
* CPU affinity.
*/
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
pthread_create(&migration_thread, NULL, migration_worker,
(void *)(unsigned long)syscall(SYS_gettid));
for (i = 0; !done; i++) {
vcpu_run(vcpu);
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Guest failed?");
/*
* Verify rseq's CPU matches sched's CPU. Ensure migration
* doesn't occur between getcpu() and reading the rseq cpu_id
* by rereading both if the sequence count changes, or if the
* count is odd (migration in-progress).
*/
do {
/*
* Drop bit 0 to force a mismatch if the count is odd,
* i.e. if a migration is in-progress.
*/
snapshot = atomic_read(&seq_cnt) & ~1;
/*
* Ensure calling getcpu() and reading rseq.cpu_id complete
* in a single "no migration" window, i.e. are not reordered
* across the seq_cnt reads.
*/
smp_rmb();
r = sys_getcpu(&cpu, NULL);
TEST_ASSERT(!r, "getcpu failed, errno = %d (%s)",
errno, strerror(errno));
rseq_cpu = rseq_current_cpu_raw();
smp_rmb();
} while (snapshot != atomic_read(&seq_cnt));
TEST_ASSERT(rseq_cpu == cpu,
"rseq CPU = %d, sched CPU = %d\n", rseq_cpu, cpu);
}
/*
* Sanity check that the test was able to enter the guest a reasonable
* number of times, e.g. didn't get stalled too often/long waiting for
* getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a fairly
* conservative ratio on x86-64, which can do _more_ KVM_RUNs than
* migrations given the 1us+ delay in the migration task.
*/
TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),
"Only performed %d KVM_RUNs, task stalled too much?\n", i);
pthread_join(migration_thread, NULL);
kvm_vm_free(vm);
rseq_unregister_current_thread();
return 0;
}
| linux-master | tools/testing/selftests/kvm/rseq_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* steal/stolen time test
*
* Copyright (C) 2020, Red Hat, Inc.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <time.h>
#include <sched.h>
#include <pthread.h>
#include <linux/kernel.h>
#include <asm/kvm.h>
#include <asm/kvm_para.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#define NR_VCPUS 4
#define ST_GPA_BASE (1 << 30)
static void *st_gva[NR_VCPUS];
static uint64_t guest_stolen_time[NR_VCPUS];
#if defined(__x86_64__)
/* steal_time must have 64-byte alignment */
#define STEAL_TIME_SIZE ((sizeof(struct kvm_steal_time) + 63) & ~63)
static void check_status(struct kvm_steal_time *st)
{
GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0);
GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0);
}
static void guest_code(int cpu)
{
struct kvm_steal_time *st = st_gva[cpu];
uint32_t version;
GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
memset(st, 0, sizeof(*st));
GUEST_SYNC(0);
check_status(st);
WRITE_ONCE(guest_stolen_time[cpu], st->steal);
version = READ_ONCE(st->version);
check_status(st);
GUEST_SYNC(1);
check_status(st);
GUEST_ASSERT(version < READ_ONCE(st->version));
WRITE_ONCE(guest_stolen_time[cpu], st->steal);
check_status(st);
GUEST_DONE();
}
static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
{
return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME);
}
static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
{
int ret;
/* ST_GPA_BASE is identity mapped */
st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
sync_global_to_guest(vcpu->vm, st_gva[i]);
ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME,
(ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK);
TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
}
static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
{
struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
int i;
pr_info("VCPU%d:\n", vcpu_idx);
pr_info(" steal: %lld\n", st->steal);
pr_info(" version: %d\n", st->version);
pr_info(" flags: %d\n", st->flags);
pr_info(" preempted: %d\n", st->preempted);
pr_info(" u8_pad: ");
for (i = 0; i < 3; ++i)
pr_info("%d", st->u8_pad[i]);
pr_info("\n pad: ");
for (i = 0; i < 11; ++i)
pr_info("%d", st->pad[i]);
pr_info("\n");
}
#elif defined(__aarch64__)
/* PV_TIME_ST must have 64-byte alignment */
#define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63)
#define SMCCC_ARCH_FEATURES 0x80000001
#define PV_TIME_FEATURES 0xc5000020
#define PV_TIME_ST 0xc5000021
struct st_time {
uint32_t rev;
uint32_t attr;
uint64_t st_time;
};
static int64_t smccc(uint32_t func, uint64_t arg)
{
struct arm_smccc_res res;
smccc_hvc(func, arg, 0, 0, 0, 0, 0, 0, &res);
return res.a0;
}
static void check_status(struct st_time *st)
{
GUEST_ASSERT_EQ(READ_ONCE(st->rev), 0);
GUEST_ASSERT_EQ(READ_ONCE(st->attr), 0);
}
static void guest_code(int cpu)
{
struct st_time *st;
int64_t status;
status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES);
GUEST_ASSERT_EQ(status, 0);
status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES);
GUEST_ASSERT_EQ(status, 0);
status = smccc(PV_TIME_FEATURES, PV_TIME_ST);
GUEST_ASSERT_EQ(status, 0);
status = smccc(PV_TIME_ST, 0);
GUEST_ASSERT_NE(status, -1);
GUEST_ASSERT_EQ(status, (ulong)st_gva[cpu]);
st = (struct st_time *)status;
GUEST_SYNC(0);
check_status(st);
WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
GUEST_SYNC(1);
check_status(st);
WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
GUEST_DONE();
}
static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
{
struct kvm_device_attr dev = {
.group = KVM_ARM_VCPU_PVTIME_CTRL,
.attr = KVM_ARM_VCPU_PVTIME_IPA,
};
return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
}
static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
{
struct kvm_vm *vm = vcpu->vm;
uint64_t st_ipa;
int ret;
struct kvm_device_attr dev = {
.group = KVM_ARM_VCPU_PVTIME_CTRL,
.attr = KVM_ARM_VCPU_PVTIME_IPA,
.addr = (uint64_t)&st_ipa,
};
vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
/* ST_GPA_BASE is identity mapped */
st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
sync_global_to_guest(vm, st_gva[i]);
st_ipa = (ulong)st_gva[i] | 1;
ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
st_ipa = (ulong)st_gva[i];
vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
}
static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
{
struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
pr_info("VCPU%d:\n", vcpu_idx);
pr_info(" rev: %d\n", st->rev);
pr_info(" attr: %d\n", st->attr);
pr_info(" st_time: %ld\n", st->st_time);
}
#endif
static void *do_steal_time(void *arg)
{
struct timespec ts, stop;
clock_gettime(CLOCK_MONOTONIC, &ts);
stop = timespec_add_ns(ts, MIN_RUN_DELAY_NS);
while (1) {
clock_gettime(CLOCK_MONOTONIC, &ts);
if (timespec_to_ns(timespec_sub(ts, stop)) >= 0)
break;
}
return NULL;
}
static void run_vcpu(struct kvm_vcpu *vcpu)
{
struct ucall uc;
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
case UCALL_DONE:
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
default:
TEST_ASSERT(false, "Unexpected exit: %s",
exit_reason_str(vcpu->run->exit_reason));
}
}
int main(int ac, char **av)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
struct kvm_vm *vm;
pthread_attr_t attr;
pthread_t thread;
cpu_set_t cpuset;
unsigned int gpages;
long stolen_time;
long run_delay;
bool verbose;
int i;
verbose = ac > 1 && (!strncmp(av[1], "-v", 3) || !strncmp(av[1], "--verbose", 10));
/* Set CPU affinity so we can force preemption of the VCPU */
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
pthread_attr_init(&attr);
pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
/* Create a VM and an identity mapped memslot for the steal time structure */
vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
TEST_REQUIRE(is_steal_time_supported(vcpus[0]));
/* Run test on each VCPU */
for (i = 0; i < NR_VCPUS; ++i) {
steal_time_init(vcpus[i], i);
vcpu_args_set(vcpus[i], 1, i);
/* First VCPU run initializes steal-time */
run_vcpu(vcpus[i]);
/* Second VCPU run, expect guest stolen time to be <= run_delay */
run_vcpu(vcpus[i]);
sync_global_from_guest(vm, guest_stolen_time[i]);
stolen_time = guest_stolen_time[i];
run_delay = get_run_delay();
TEST_ASSERT(stolen_time <= run_delay,
"Expected stolen time <= %ld, got %ld",
run_delay, stolen_time);
/* Steal time from the VCPU. The steal time thread has the same CPU affinity as the VCPUs. */
run_delay = get_run_delay();
pthread_create(&thread, &attr, do_steal_time, NULL);
do
sched_yield();
while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
pthread_join(thread, NULL);
run_delay = get_run_delay() - run_delay;
TEST_ASSERT(run_delay >= MIN_RUN_DELAY_NS,
"Expected run_delay >= %ld, got %ld",
MIN_RUN_DELAY_NS, run_delay);
/* Run VCPU again to confirm stolen time is consistent with run_delay */
run_vcpu(vcpus[i]);
sync_global_from_guest(vm, guest_stolen_time[i]);
stolen_time = guest_stolen_time[i] - stolen_time;
TEST_ASSERT(stolen_time >= run_delay,
"Expected stolen time >= %ld, got %ld",
run_delay, stolen_time);
if (verbose) {
pr_info("VCPU%d: total-stolen-time=%ld test-stolen-time=%ld", i,
guest_stolen_time[i], stolen_time);
if (stolen_time == run_delay)
pr_info(" (BONUS: guest test-stolen-time even exactly matches test-run_delay)");
pr_info("\n");
steal_time_dump(vm, i);
}
}
return 0;
}
| linux-master | tools/testing/selftests/kvm/steal_time.c |
// SPDX-License-Identifier: GPL-2.0
/*
* access_tracking_perf_test
*
* Copyright (C) 2021, Google, Inc.
*
* This test measures the performance effects of KVM's access tracking.
* Access tracking is driven by the MMU notifiers test_young, clear_young, and
* clear_flush_young. These notifiers do not have a direct userspace API,
* however the clear_young notifier can be triggered by marking a pages as idle
* in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to
* enable access tracking on guest memory.
*
* To measure performance this test runs a VM with a configurable number of
* vCPUs that each touch every page in disjoint regions of memory. Performance
* is measured in the time it takes all vCPUs to finish touching their
* predefined region.
*
* Note that a deterministic correctness test of access tracking is not possible
* by using page_idle as it exists today. This is for a few reasons:
*
* 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This
* means subsequent guest accesses are not guaranteed to see page table
* updates made by KVM until some time in the future.
*
* 2. page_idle only operates on LRU pages. Newly allocated pages are not
* immediately allocated to LRU lists. Instead they are held in a "pagevec",
* which is drained to LRU lists some time in the future. There is no
* userspace API to force this drain to occur.
*
* These limitations are worked around in this test by using a large enough
* region of memory for each vCPU such that the number of translations cached in
* the TLB and the number of pages held in pagevecs are a small fraction of the
* overall workload. And if either of those conditions are not true (for example
* in nesting, where TLB size is unlimited) this test will print a warning
* rather than silently passing.
*/
#include <inttypes.h>
#include <limits.h>
#include <pthread.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "kvm_util.h"
#include "test_util.h"
#include "memstress.h"
#include "guest_modes.h"
#include "processor.h"
/* Global variable used to synchronize all of the vCPU threads. */
static int iteration;
/* Defines what vCPU threads should do during a given iteration. */
static enum {
/* Run the vCPU to access all its memory. */
ITERATION_ACCESS_MEMORY,
/* Mark the vCPU's memory idle in page_idle. */
ITERATION_MARK_IDLE,
} iteration_work;
/* The iteration that was last completed by each vCPU. */
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
/* Whether to overlap the regions of memory vCPUs access. */
static bool overlap_memory_access;
struct test_params {
/* The backing source for the region of memory. */
enum vm_mem_backing_src_type backing_src;
/* The amount of memory to allocate for each vCPU. */
uint64_t vcpu_memory_bytes;
/* The number of vCPUs to create in the VM. */
int nr_vcpus;
};
static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
{
uint64_t value;
off_t offset = index * sizeof(value);
TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value),
"pread from %s offset 0x%" PRIx64 " failed!",
filename, offset);
return value;
}
#define PAGEMAP_PRESENT (1ULL << 63)
#define PAGEMAP_PFN_MASK ((1ULL << 55) - 1)
static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
{
uint64_t hva = (uint64_t) addr_gva2hva(vm, gva);
uint64_t entry;
uint64_t pfn;
entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize());
if (!(entry & PAGEMAP_PRESENT))
return 0;
pfn = entry & PAGEMAP_PFN_MASK;
__TEST_REQUIRE(pfn, "Looking up PFNs requires CAP_SYS_ADMIN");
return pfn;
}
static bool is_page_idle(int page_idle_fd, uint64_t pfn)
{
uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64);
return !!((bits >> (pfn % 64)) & 1);
}
static void mark_page_idle(int page_idle_fd, uint64_t pfn)
{
uint64_t bits = 1ULL << (pfn % 64);
TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8,
"Set page_idle bits for PFN 0x%" PRIx64, pfn);
}
static void mark_vcpu_memory_idle(struct kvm_vm *vm,
struct memstress_vcpu_args *vcpu_args)
{
int vcpu_idx = vcpu_args->vcpu_idx;
uint64_t base_gva = vcpu_args->gva;
uint64_t pages = vcpu_args->pages;
uint64_t page;
uint64_t still_idle = 0;
uint64_t no_pfn = 0;
int page_idle_fd;
int pagemap_fd;
/* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */
if (overlap_memory_access && vcpu_idx)
return;
page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
TEST_ASSERT(page_idle_fd > 0, "Failed to open page_idle.");
pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
for (page = 0; page < pages; page++) {
uint64_t gva = base_gva + page * memstress_args.guest_page_size;
uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
if (!pfn) {
no_pfn++;
continue;
}
if (is_page_idle(page_idle_fd, pfn)) {
still_idle++;
continue;
}
mark_page_idle(page_idle_fd, pfn);
}
/*
* Assumption: Less than 1% of pages are going to be swapped out from
* under us during this test.
*/
TEST_ASSERT(no_pfn < pages / 100,
"vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.",
vcpu_idx, no_pfn, pages);
/*
* Check that at least 90% of memory has been marked idle (the rest
* might not be marked idle because the pages have not yet made it to an
* LRU list or the translations are still cached in the TLB). 90% is
* arbitrary; high enough that we ensure most memory access went through
* access tracking but low enough as to not make the test too brittle
* over time and across architectures.
*
* When running the guest as a nested VM, "warn" instead of asserting
* as the TLB size is effectively unlimited and the KVM doesn't
* explicitly flush the TLB when aging SPTEs. As a result, more pages
* are cached and the guest won't see the "idle" bit cleared.
*/
if (still_idle >= pages / 10) {
#ifdef __x86_64__
TEST_ASSERT(this_cpu_has(X86_FEATURE_HYPERVISOR),
"vCPU%d: Too many pages still idle (%lu out of %lu)",
vcpu_idx, still_idle, pages);
#endif
printf("WARNING: vCPU%d: Too many pages still idle (%lu out of %lu), "
"this will affect performance results.\n",
vcpu_idx, still_idle, pages);
}
close(page_idle_fd);
close(pagemap_fd);
}
static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall)
{
struct ucall uc;
uint64_t actual_ucall = get_ucall(vcpu, &uc);
TEST_ASSERT(expected_ucall == actual_ucall,
"Guest exited unexpectedly (expected ucall %" PRIu64
", got %" PRIu64 ")",
expected_ucall, actual_ucall);
}
static bool spin_wait_for_next_iteration(int *current_iteration)
{
int last_iteration = *current_iteration;
do {
if (READ_ONCE(memstress_args.stop_vcpus))
return false;
*current_iteration = READ_ONCE(iteration);
} while (last_iteration == *current_iteration);
return true;
}
static void vcpu_thread_main(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
struct kvm_vm *vm = memstress_args.vm;
int vcpu_idx = vcpu_args->vcpu_idx;
int current_iteration = 0;
while (spin_wait_for_next_iteration(¤t_iteration)) {
switch (READ_ONCE(iteration_work)) {
case ITERATION_ACCESS_MEMORY:
vcpu_run(vcpu);
assert_ucall(vcpu, UCALL_SYNC);
break;
case ITERATION_MARK_IDLE:
mark_vcpu_memory_idle(vm, vcpu_args);
break;
};
vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
}
}
static void spin_wait_for_vcpu(int vcpu_idx, int target_iteration)
{
while (READ_ONCE(vcpu_last_completed_iteration[vcpu_idx]) !=
target_iteration) {
continue;
}
}
/* The type of memory accesses to perform in the VM. */
enum access_type {
ACCESS_READ,
ACCESS_WRITE,
};
static void run_iteration(struct kvm_vm *vm, int nr_vcpus, const char *description)
{
struct timespec ts_start;
struct timespec ts_elapsed;
int next_iteration, i;
/* Kick off the vCPUs by incrementing iteration. */
next_iteration = ++iteration;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
/* Wait for all vCPUs to finish the iteration. */
for (i = 0; i < nr_vcpus; i++)
spin_wait_for_vcpu(i, next_iteration);
ts_elapsed = timespec_elapsed(ts_start);
pr_info("%-30s: %ld.%09lds\n",
description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec);
}
static void access_memory(struct kvm_vm *vm, int nr_vcpus,
enum access_type access, const char *description)
{
memstress_set_write_percent(vm, (access == ACCESS_READ) ? 0 : 100);
iteration_work = ITERATION_ACCESS_MEMORY;
run_iteration(vm, nr_vcpus, description);
}
static void mark_memory_idle(struct kvm_vm *vm, int nr_vcpus)
{
/*
* Even though this parallelizes the work across vCPUs, this is still a
* very slow operation because page_idle forces the test to mark one pfn
* at a time and the clear_young notifier serializes on the KVM MMU
* lock.
*/
pr_debug("Marking VM memory idle (slow)...\n");
iteration_work = ITERATION_MARK_IDLE;
run_iteration(vm, nr_vcpus, "Mark memory idle");
}
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *params = arg;
struct kvm_vm *vm;
int nr_vcpus = params->nr_vcpus;
vm = memstress_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
params->backing_src, !overlap_memory_access);
memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
pr_info("\n");
access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory");
/* As a control, read and write to the populated memory first. */
access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to populated memory");
access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from populated memory");
/* Repeat on memory that has been marked as idle. */
mark_memory_idle(vm, nr_vcpus);
access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to idle memory");
mark_memory_idle(vm, nr_vcpus);
access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from idle memory");
memstress_join_vcpu_threads(nr_vcpus);
memstress_destroy_vm(vm);
}
static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o] [-s mem_type]\n",
name);
puts("");
printf(" -h: Display this help message.");
guest_modes_help();
printf(" -b: specify the size of the memory region which should be\n"
" dirtied by each vCPU. e.g. 10M or 3G.\n"
" (default: 1G)\n");
printf(" -v: specify the number of vCPUs to run.\n");
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
" them into a separate region of memory for each vCPU.\n");
backing_src_help("-s");
puts("");
exit(0);
}
int main(int argc, char *argv[])
{
struct test_params params = {
.backing_src = DEFAULT_VM_MEM_SRC,
.vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
.nr_vcpus = 1,
};
int page_idle_fd;
int opt;
guest_modes_append_default();
while ((opt = getopt(argc, argv, "hm:b:v:os:")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
break;
case 'b':
params.vcpu_memory_bytes = parse_size(optarg);
break;
case 'v':
params.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
break;
case 'o':
overlap_memory_access = true;
break;
case 's':
params.backing_src = parse_backing_src_type(optarg);
break;
case 'h':
default:
help(argv[0]);
break;
}
}
page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
__TEST_REQUIRE(page_idle_fd >= 0,
"CONFIG_IDLE_PAGE_TRACKING is not enabled");
close(page_idle_fd);
for_each_guest_mode(run_test, ¶ms);
return 0;
}
| linux-master | tools/testing/selftests/kvm/access_tracking_perf_test.c |
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <semaphore.h>
#include <sys/types.h>
#include <signal.h>
#include <errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/atomic.h>
#include <linux/sizes.h>
#include "kvm_util.h"
#include "test_util.h"
#include "guest_modes.h"
#include "processor.h"
static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
{
uint64_t gpa;
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
*((volatile uint64_t *)gpa) = gpa;
GUEST_DONE();
}
struct vcpu_info {
struct kvm_vcpu *vcpu;
uint64_t start_gpa;
uint64_t end_gpa;
};
static int nr_vcpus;
static atomic_t rendezvous;
static void rendezvous_with_boss(void)
{
int orig = atomic_read(&rendezvous);
if (orig > 0) {
atomic_dec_and_test(&rendezvous);
while (atomic_read(&rendezvous) > 0)
cpu_relax();
} else {
atomic_inc(&rendezvous);
while (atomic_read(&rendezvous) < 0)
cpu_relax();
}
}
static void run_vcpu(struct kvm_vcpu *vcpu)
{
vcpu_run(vcpu);
TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
}
static void *vcpu_worker(void *data)
{
struct vcpu_info *info = data;
struct kvm_vcpu *vcpu = info->vcpu;
struct kvm_vm *vm = vcpu->vm;
struct kvm_sregs sregs;
struct kvm_regs regs;
vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);
/* Snapshot regs before the first run. */
vcpu_regs_get(vcpu, ®s);
rendezvous_with_boss();
run_vcpu(vcpu);
rendezvous_with_boss();
vcpu_regs_set(vcpu, ®s);
vcpu_sregs_get(vcpu, &sregs);
#ifdef __x86_64__
/* Toggle CR0.WP to trigger a MMU context reset. */
sregs.cr0 ^= X86_CR0_WP;
#endif
vcpu_sregs_set(vcpu, &sregs);
rendezvous_with_boss();
run_vcpu(vcpu);
rendezvous_with_boss();
return NULL;
}
static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus,
uint64_t start_gpa, uint64_t end_gpa)
{
struct vcpu_info *info;
uint64_t gpa, nr_bytes;
pthread_t *threads;
int i;
threads = malloc(nr_vcpus * sizeof(*threads));
TEST_ASSERT(threads, "Failed to allocate vCPU threads");
info = malloc(nr_vcpus * sizeof(*info));
TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges");
nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) &
~((uint64_t)vm->page_size - 1);
TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus);
for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) {
info[i].vcpu = vcpus[i];
info[i].start_gpa = gpa;
info[i].end_gpa = gpa + nr_bytes;
pthread_create(&threads[i], NULL, vcpu_worker, &info[i]);
}
return threads;
}
static void rendezvous_with_vcpus(struct timespec *time, const char *name)
{
int i, rendezvoused;
pr_info("Waiting for vCPUs to finish %s...\n", name);
rendezvoused = atomic_read(&rendezvous);
for (i = 0; abs(rendezvoused) != 1; i++) {
usleep(100);
if (!(i & 0x3f))
pr_info("\r%d vCPUs haven't rendezvoused...",
abs(rendezvoused) - 1);
rendezvoused = atomic_read(&rendezvous);
}
clock_gettime(CLOCK_MONOTONIC, time);
/* Release the vCPUs after getting the time of the previous action. */
pr_info("\rAll vCPUs finished %s, releasing...\n", name);
if (rendezvoused > 0)
atomic_set(&rendezvous, -nr_vcpus - 1);
else
atomic_set(&rendezvous, nr_vcpus + 1);
}
static void calc_default_nr_vcpus(void)
{
cpu_set_t possible_mask;
int r;
r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)",
errno, strerror(errno));
nr_vcpus = CPU_COUNT(&possible_mask) * 3/4;
TEST_ASSERT(nr_vcpus > 0, "Uh, no CPUs?");
}
int main(int argc, char *argv[])
{
/*
* Skip the first 4gb and slot0. slot0 maps <1gb and is used to back
* the guest's code, stack, and page tables. Because selftests creates
* an IRQCHIP, a.k.a. a local APIC, KVM creates an internal memslot
* just below the 4gb boundary. This test could create memory at
* 1gb-3gb,but it's simpler to skip straight to 4gb.
*/
const uint64_t start_gpa = SZ_4G;
const int first_slot = 1;
struct timespec time_start, time_run1, time_reset, time_run2;
uint64_t max_gpa, gpa, slot_size, max_mem, i;
int max_slots, slot, opt, fd;
bool hugepages = false;
struct kvm_vcpu **vcpus;
pthread_t *threads;
struct kvm_vm *vm;
void *mem;
/*
* Default to 2gb so that maxing out systems with MAXPHADDR=46, which
* are quite common for x86, requires changing only max_mem (KVM allows
* 32k memslots, 32k * 2gb == ~64tb of guest memory).
*/
slot_size = SZ_2G;
max_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
TEST_ASSERT(max_slots > first_slot, "KVM is broken");
/* All KVM MMUs should be able to survive a 128gb guest. */
max_mem = 128ull * SZ_1G;
calc_default_nr_vcpus();
while ((opt = getopt(argc, argv, "c:h:m:s:H")) != -1) {
switch (opt) {
case 'c':
nr_vcpus = atoi_positive("Number of vCPUs", optarg);
break;
case 'm':
max_mem = 1ull * atoi_positive("Memory size", optarg) * SZ_1G;
break;
case 's':
slot_size = 1ull * atoi_positive("Slot size", optarg) * SZ_1G;
break;
case 'H':
hugepages = true;
break;
case 'h':
default:
printf("usage: %s [-c nr_vcpus] [-m max_mem_in_gb] [-s slot_size_in_gb] [-H]\n", argv[0]);
exit(1);
}
}
vcpus = malloc(nr_vcpus * sizeof(*vcpus));
TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
max_gpa = vm->max_gfn << vm->page_shift;
TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
fd = kvm_memfd_alloc(slot_size, hugepages);
mem = mmap(NULL, slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
TEST_ASSERT(mem != MAP_FAILED, "mmap() failed");
TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed");
/* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */
for (i = 0; i < slot_size; i += vm->page_size)
((uint8_t *)mem)[i] = 0xaa;
gpa = 0;
for (slot = first_slot; slot < max_slots; slot++) {
gpa = start_gpa + ((slot - first_slot) * slot_size);
if (gpa + slot_size > max_gpa)
break;
if ((gpa - start_gpa) >= max_mem)
break;
vm_set_user_memory_region(vm, slot, 0, gpa, slot_size, mem);
#ifdef __x86_64__
/* Identity map memory in the guest using 1gb pages. */
for (i = 0; i < slot_size; i += SZ_1G)
__virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G);
#else
for (i = 0; i < slot_size; i += vm->page_size)
virt_pg_map(vm, gpa + i, gpa + i);
#endif
}
atomic_set(&rendezvous, nr_vcpus + 1);
threads = spawn_workers(vm, vcpus, start_gpa, gpa);
free(vcpus);
vcpus = NULL;
pr_info("Running with %lugb of guest memory and %u vCPUs\n",
(gpa - start_gpa) / SZ_1G, nr_vcpus);
rendezvous_with_vcpus(&time_start, "spawning");
rendezvous_with_vcpus(&time_run1, "run 1");
rendezvous_with_vcpus(&time_reset, "reset");
rendezvous_with_vcpus(&time_run2, "run 2");
time_run2 = timespec_sub(time_run2, time_reset);
time_reset = timespec_sub(time_reset, time_run1);
time_run1 = timespec_sub(time_run1, time_start);
pr_info("run1 = %ld.%.9lds, reset = %ld.%.9lds, run2 = %ld.%.9lds\n",
time_run1.tv_sec, time_run1.tv_nsec,
time_reset.tv_sec, time_reset.tv_nsec,
time_run2.tv_sec, time_run2.tv_nsec);
/*
* Delete even numbered slots (arbitrary) and unmap the first half of
* the backing (also arbitrary) to verify KVM correctly drops all
* references to the removed regions.
*/
for (slot = (slot - 1) & ~1ull; slot >= first_slot; slot -= 2)
vm_set_user_memory_region(vm, slot, 0, 0, 0, NULL);
munmap(mem, slot_size / 2);
/* Sanity check that the vCPUs actually ran. */
for (i = 0; i < nr_vcpus; i++)
pthread_join(threads[i], NULL);
/*
* Deliberately exit without deleting the remaining memslots or closing
* kvm_fd to test cleanup via mmu_notifier.release.
*/
}
| linux-master | tools/testing/selftests/kvm/max_guest_memory_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* A memslot-related performance benchmark.
*
* Copyright (C) 2021 Oracle and/or its affiliates.
*
* Basic guest setup / host vCPU thread code lifted from set_memory_region_test.
*/
#include <pthread.h>
#include <sched.h>
#include <semaphore.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <time.h>
#include <unistd.h>
#include <linux/compiler.h>
#include <linux/sizes.h>
#include <test_util.h>
#include <kvm_util.h>
#include <processor.h>
#define MEM_EXTRA_SIZE SZ_64K
#define MEM_SIZE (SZ_512M + MEM_EXTRA_SIZE)
#define MEM_GPA SZ_256M
#define MEM_AUX_GPA MEM_GPA
#define MEM_SYNC_GPA MEM_AUX_GPA
#define MEM_TEST_GPA (MEM_AUX_GPA + MEM_EXTRA_SIZE)
#define MEM_TEST_SIZE (MEM_SIZE - MEM_EXTRA_SIZE)
/*
* 32 MiB is max size that gets well over 100 iterations on 509 slots.
* Considering that each slot needs to have at least one page up to
* 8194 slots in use can then be tested (although with slightly
* limited resolution).
*/
#define MEM_SIZE_MAP (SZ_32M + MEM_EXTRA_SIZE)
#define MEM_TEST_MAP_SIZE (MEM_SIZE_MAP - MEM_EXTRA_SIZE)
/*
* 128 MiB is min size that fills 32k slots with at least one page in each
* while at the same time gets 100+ iterations in such test
*
* 2 MiB chunk size like a typical huge page
*/
#define MEM_TEST_UNMAP_SIZE SZ_128M
#define MEM_TEST_UNMAP_CHUNK_SIZE SZ_2M
/*
* For the move active test the middle of the test area is placed on
* a memslot boundary: half lies in the memslot being moved, half in
* other memslot(s).
*
* We have different number of memory slots, excluding the reserved
* memory slot 0, on various architectures and configurations. The
* memory size in this test is calculated by picking the maximal
* last memory slot's memory size, with alignment to the largest
* supported page size (64KB). In this way, the selected memory
* size for this test is compatible with test_memslot_move_prepare().
*
* architecture slots memory-per-slot memory-on-last-slot
* --------------------------------------------------------------
* x86-4KB 32763 16KB 160KB
* arm64-4KB 32766 16KB 112KB
* arm64-16KB 32766 16KB 112KB
* arm64-64KB 8192 64KB 128KB
*/
#define MEM_TEST_MOVE_SIZE (3 * SZ_64K)
#define MEM_TEST_MOVE_GPA_DEST (MEM_GPA + MEM_SIZE)
static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE,
"invalid move test region size");
#define MEM_TEST_VAL_1 0x1122334455667788
#define MEM_TEST_VAL_2 0x99AABBCCDDEEFF00
struct vm_data {
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
pthread_t vcpu_thread;
uint32_t nslots;
uint64_t npages;
uint64_t pages_per_slot;
void **hva_slots;
bool mmio_ok;
uint64_t mmio_gpa_min;
uint64_t mmio_gpa_max;
};
struct sync_area {
uint32_t guest_page_size;
atomic_bool start_flag;
atomic_bool exit_flag;
atomic_bool sync_flag;
void *move_area_ptr;
};
/*
* Technically, we need also for the atomic bool to be address-free, which
* is recommended, but not strictly required, by C11 for lockless
* implementations.
* However, in practice both GCC and Clang fulfill this requirement on
* all KVM-supported platforms.
*/
static_assert(ATOMIC_BOOL_LOCK_FREE == 2, "atomic bool is not lockless");
static sem_t vcpu_ready;
static bool map_unmap_verify;
static bool verbose;
#define pr_info_v(...) \
do { \
if (verbose) \
pr_info(__VA_ARGS__); \
} while (0)
static void check_mmio_access(struct vm_data *data, struct kvm_run *run)
{
TEST_ASSERT(data->mmio_ok, "Unexpected mmio exit");
TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
TEST_ASSERT(run->mmio.len == 8,
"Unexpected exit mmio size = %u", run->mmio.len);
TEST_ASSERT(run->mmio.phys_addr >= data->mmio_gpa_min &&
run->mmio.phys_addr <= data->mmio_gpa_max,
"Unexpected exit mmio address = 0x%llx",
run->mmio.phys_addr);
}
static void *vcpu_worker(void *__data)
{
struct vm_data *data = __data;
struct kvm_vcpu *vcpu = data->vcpu;
struct kvm_run *run = vcpu->run;
struct ucall uc;
while (1) {
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == 0,
"Unexpected sync ucall, got %lx",
(ulong)uc.args[1]);
sem_post(&vcpu_ready);
continue;
case UCALL_NONE:
if (run->exit_reason == KVM_EXIT_MMIO)
check_mmio_access(data, run);
else
goto done;
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
done:
return NULL;
}
static void wait_for_vcpu(void)
{
struct timespec ts;
TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
"clock_gettime() failed: %d\n", errno);
ts.tv_sec += 2;
TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
"sem_timedwait() failed: %d\n", errno);
}
static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
{
uint64_t gpage, pgoffs;
uint32_t slot, slotoffs;
void *base;
uint32_t guest_page_size = data->vm->page_size;
TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate");
TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size,
"Too high gpa to translate");
gpa -= MEM_GPA;
gpage = gpa / guest_page_size;
pgoffs = gpa % guest_page_size;
slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1);
slotoffs = gpage - (slot * data->pages_per_slot);
if (rempages) {
uint64_t slotpages;
if (slot == data->nslots - 1)
slotpages = data->npages - slot * data->pages_per_slot;
else
slotpages = data->pages_per_slot;
TEST_ASSERT(!pgoffs,
"Asking for remaining pages in slot but gpa not page aligned");
*rempages = slotpages - slotoffs;
}
base = data->hva_slots[slot];
return (uint8_t *)base + slotoffs * guest_page_size + pgoffs;
}
static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot)
{
uint32_t guest_page_size = data->vm->page_size;
TEST_ASSERT(slot < data->nslots, "Too high slot number");
return MEM_GPA + slot * data->pages_per_slot * guest_page_size;
}
static struct vm_data *alloc_vm(void)
{
struct vm_data *data;
data = malloc(sizeof(*data));
TEST_ASSERT(data, "malloc(vmdata) failed");
data->vm = NULL;
data->vcpu = NULL;
data->hva_slots = NULL;
return data;
}
static bool check_slot_pages(uint32_t host_page_size, uint32_t guest_page_size,
uint64_t pages_per_slot, uint64_t rempages)
{
if (!pages_per_slot)
return false;
if ((pages_per_slot * guest_page_size) % host_page_size)
return false;
if ((rempages * guest_page_size) % host_page_size)
return false;
return true;
}
static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size)
{
uint32_t guest_page_size = data->vm->page_size;
uint64_t mempages, pages_per_slot, rempages;
uint64_t slots;
mempages = data->npages;
slots = data->nslots;
while (--slots > 1) {
pages_per_slot = mempages / slots;
if (!pages_per_slot)
continue;
rempages = mempages % pages_per_slot;
if (check_slot_pages(host_page_size, guest_page_size,
pages_per_slot, rempages))
return slots + 1; /* slot 0 is reserved */
}
return 0;
}
static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
void *guest_code, uint64_t mem_size,
struct timespec *slot_runtime)
{
uint64_t mempages, rempages;
uint64_t guest_addr;
uint32_t slot, host_page_size, guest_page_size;
struct timespec tstart;
struct sync_area *sync;
host_page_size = getpagesize();
guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
mempages = mem_size / guest_page_size;
data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
TEST_ASSERT(data->vm->page_size == guest_page_size, "Invalid VM page size");
data->npages = mempages;
TEST_ASSERT(data->npages > 1, "Can't test without any memory");
data->nslots = nslots;
data->pages_per_slot = data->npages / data->nslots;
rempages = data->npages % data->nslots;
if (!check_slot_pages(host_page_size, guest_page_size,
data->pages_per_slot, rempages)) {
*maxslots = get_max_slots(data, host_page_size);
return false;
}
data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
TEST_ASSERT(data->hva_slots, "malloc() fail");
pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
data->nslots, data->pages_per_slot, rempages);
clock_gettime(CLOCK_MONOTONIC, &tstart);
for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
uint64_t npages;
npages = data->pages_per_slot;
if (slot == data->nslots)
npages += rempages;
vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS,
guest_addr, slot, npages,
0);
guest_addr += npages * guest_page_size;
}
*slot_runtime = timespec_elapsed(tstart);
for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
uint64_t npages;
uint64_t gpa;
npages = data->pages_per_slot;
if (slot == data->nslots)
npages += rempages;
gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot);
TEST_ASSERT(gpa == guest_addr,
"vm_phy_pages_alloc() failed\n");
data->hva_slots[slot - 1] = addr_gpa2hva(data->vm, guest_addr);
memset(data->hva_slots[slot - 1], 0, npages * guest_page_size);
guest_addr += npages * guest_page_size;
}
virt_map(data->vm, MEM_GPA, MEM_GPA, data->npages);
sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
sync->guest_page_size = data->vm->page_size;
atomic_init(&sync->start_flag, false);
atomic_init(&sync->exit_flag, false);
atomic_init(&sync->sync_flag, false);
data->mmio_ok = false;
return true;
}
static void launch_vm(struct vm_data *data)
{
pr_info_v("Launching the test VM\n");
pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data);
/* Ensure the guest thread is spun up. */
wait_for_vcpu();
}
static void free_vm(struct vm_data *data)
{
kvm_vm_free(data->vm);
free(data->hva_slots);
free(data);
}
static void wait_guest_exit(struct vm_data *data)
{
pthread_join(data->vcpu_thread, NULL);
}
static void let_guest_run(struct sync_area *sync)
{
atomic_store_explicit(&sync->start_flag, true, memory_order_release);
}
static void guest_spin_until_start(void)
{
struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire))
;
}
static void make_guest_exit(struct sync_area *sync)
{
atomic_store_explicit(&sync->exit_flag, true, memory_order_release);
}
static bool _guest_should_exit(void)
{
struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
return atomic_load_explicit(&sync->exit_flag, memory_order_acquire);
}
#define guest_should_exit() unlikely(_guest_should_exit())
/*
* noinline so we can easily see how much time the host spends waiting
* for the guest.
* For the same reason use alarm() instead of polling clock_gettime()
* to implement a wait timeout.
*/
static noinline void host_perform_sync(struct sync_area *sync)
{
alarm(2);
atomic_store_explicit(&sync->sync_flag, true, memory_order_release);
while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire))
;
alarm(0);
}
static bool guest_perform_sync(void)
{
struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
bool expected;
do {
if (guest_should_exit())
return false;
expected = true;
} while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag,
&expected, false,
memory_order_acq_rel,
memory_order_relaxed));
return true;
}
static void guest_code_test_memslot_move(void)
{
struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr);
GUEST_SYNC(0);
guest_spin_until_start();
while (!guest_should_exit()) {
uintptr_t ptr;
for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE;
ptr += page_size)
*(uint64_t *)ptr = MEM_TEST_VAL_1;
/*
* No host sync here since the MMIO exits are so expensive
* that the host would spend most of its time waiting for
* the guest and so instead of measuring memslot move
* performance we would measure the performance and
* likelihood of MMIO exits
*/
}
GUEST_DONE();
}
static void guest_code_test_memslot_map(void)
{
struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
GUEST_SYNC(0);
guest_spin_until_start();
while (1) {
uintptr_t ptr;
for (ptr = MEM_TEST_GPA;
ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
ptr += page_size)
*(uint64_t *)ptr = MEM_TEST_VAL_1;
if (!guest_perform_sync())
break;
for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE;
ptr += page_size)
*(uint64_t *)ptr = MEM_TEST_VAL_2;
if (!guest_perform_sync())
break;
}
GUEST_DONE();
}
static void guest_code_test_memslot_unmap(void)
{
struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
GUEST_SYNC(0);
guest_spin_until_start();
while (1) {
uintptr_t ptr = MEM_TEST_GPA;
/*
* We can afford to access (map) just a small number of pages
* per host sync as otherwise the host will spend
* a significant amount of its time waiting for the guest
* (instead of doing unmap operations), so this will
* effectively turn this test into a map performance test.
*
* Just access a single page to be on the safe side.
*/
*(uint64_t *)ptr = MEM_TEST_VAL_1;
if (!guest_perform_sync())
break;
ptr += MEM_TEST_UNMAP_SIZE / 2;
*(uint64_t *)ptr = MEM_TEST_VAL_2;
if (!guest_perform_sync())
break;
}
GUEST_DONE();
}
static void guest_code_test_memslot_rw(void)
{
struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
GUEST_SYNC(0);
guest_spin_until_start();
while (1) {
uintptr_t ptr;
for (ptr = MEM_TEST_GPA;
ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size)
*(uint64_t *)ptr = MEM_TEST_VAL_1;
if (!guest_perform_sync())
break;
for (ptr = MEM_TEST_GPA + page_size / 2;
ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) {
uint64_t val = *(uint64_t *)ptr;
GUEST_ASSERT_EQ(val, MEM_TEST_VAL_2);
*(uint64_t *)ptr = 0;
}
if (!guest_perform_sync())
break;
}
GUEST_DONE();
}
static bool test_memslot_move_prepare(struct vm_data *data,
struct sync_area *sync,
uint64_t *maxslots, bool isactive)
{
uint32_t guest_page_size = data->vm->page_size;
uint64_t movesrcgpa, movetestgpa;
movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
if (isactive) {
uint64_t lastpages;
vm_gpa2hva(data, movesrcgpa, &lastpages);
if (lastpages * guest_page_size < MEM_TEST_MOVE_SIZE / 2) {
*maxslots = 0;
return false;
}
}
movetestgpa = movesrcgpa - (MEM_TEST_MOVE_SIZE / (isactive ? 2 : 1));
sync->move_area_ptr = (void *)movetestgpa;
if (isactive) {
data->mmio_ok = true;
data->mmio_gpa_min = movesrcgpa;
data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1;
}
return true;
}
static bool test_memslot_move_prepare_active(struct vm_data *data,
struct sync_area *sync,
uint64_t *maxslots)
{
return test_memslot_move_prepare(data, sync, maxslots, true);
}
static bool test_memslot_move_prepare_inactive(struct vm_data *data,
struct sync_area *sync,
uint64_t *maxslots)
{
return test_memslot_move_prepare(data, sync, maxslots, false);
}
static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync)
{
uint64_t movesrcgpa;
movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
vm_mem_region_move(data->vm, data->nslots - 1 + 1,
MEM_TEST_MOVE_GPA_DEST);
vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa);
}
static void test_memslot_do_unmap(struct vm_data *data,
uint64_t offsp, uint64_t count)
{
uint64_t gpa, ctr;
uint32_t guest_page_size = data->vm->page_size;
for (gpa = MEM_TEST_GPA + offsp * guest_page_size, ctr = 0; ctr < count; ) {
uint64_t npages;
void *hva;
int ret;
hva = vm_gpa2hva(data, gpa, &npages);
TEST_ASSERT(npages, "Empty memory slot at gptr 0x%"PRIx64, gpa);
npages = min(npages, count - ctr);
ret = madvise(hva, npages * guest_page_size, MADV_DONTNEED);
TEST_ASSERT(!ret,
"madvise(%p, MADV_DONTNEED) on VM memory should not fail for gptr 0x%"PRIx64,
hva, gpa);
ctr += npages;
gpa += npages * guest_page_size;
}
TEST_ASSERT(ctr == count,
"madvise(MADV_DONTNEED) should exactly cover all of the requested area");
}
static void test_memslot_map_unmap_check(struct vm_data *data,
uint64_t offsp, uint64_t valexp)
{
uint64_t gpa;
uint64_t *val;
uint32_t guest_page_size = data->vm->page_size;
if (!map_unmap_verify)
return;
gpa = MEM_TEST_GPA + offsp * guest_page_size;
val = (typeof(val))vm_gpa2hva(data, gpa, NULL);
TEST_ASSERT(*val == valexp,
"Guest written values should read back correctly before unmap (%"PRIu64" vs %"PRIu64" @ %"PRIx64")",
*val, valexp, gpa);
*val = 0;
}
static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
{
uint32_t guest_page_size = data->vm->page_size;
uint64_t guest_pages = MEM_TEST_MAP_SIZE / guest_page_size;
/*
* Unmap the second half of the test area while guest writes to (maps)
* the first half.
*/
test_memslot_do_unmap(data, guest_pages / 2, guest_pages / 2);
/*
* Wait for the guest to finish writing the first half of the test
* area, verify the written value on the first and the last page of
* this area and then unmap it.
* Meanwhile, the guest is writing to (mapping) the second half of
* the test area.
*/
host_perform_sync(sync);
test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
test_memslot_map_unmap_check(data, guest_pages / 2 - 1, MEM_TEST_VAL_1);
test_memslot_do_unmap(data, 0, guest_pages / 2);
/*
* Wait for the guest to finish writing the second half of the test
* area and verify the written value on the first and the last page
* of this area.
* The area will be unmapped at the beginning of the next loop
* iteration.
* Meanwhile, the guest is writing to (mapping) the first half of
* the test area.
*/
host_perform_sync(sync);
test_memslot_map_unmap_check(data, guest_pages / 2, MEM_TEST_VAL_2);
test_memslot_map_unmap_check(data, guest_pages - 1, MEM_TEST_VAL_2);
}
static void test_memslot_unmap_loop_common(struct vm_data *data,
struct sync_area *sync,
uint64_t chunk)
{
uint32_t guest_page_size = data->vm->page_size;
uint64_t guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size;
uint64_t ctr;
/*
* Wait for the guest to finish mapping page(s) in the first half
* of the test area, verify the written value and then perform unmap
* of this area.
* Meanwhile, the guest is writing to (mapping) page(s) in the second
* half of the test area.
*/
host_perform_sync(sync);
test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
for (ctr = 0; ctr < guest_pages / 2; ctr += chunk)
test_memslot_do_unmap(data, ctr, chunk);
/* Likewise, but for the opposite host / guest areas */
host_perform_sync(sync);
test_memslot_map_unmap_check(data, guest_pages / 2, MEM_TEST_VAL_2);
for (ctr = guest_pages / 2; ctr < guest_pages; ctr += chunk)
test_memslot_do_unmap(data, ctr, chunk);
}
static void test_memslot_unmap_loop(struct vm_data *data,
struct sync_area *sync)
{
uint32_t host_page_size = getpagesize();
uint32_t guest_page_size = data->vm->page_size;
uint64_t guest_chunk_pages = guest_page_size >= host_page_size ?
1 : host_page_size / guest_page_size;
test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
}
static void test_memslot_unmap_loop_chunked(struct vm_data *data,
struct sync_area *sync)
{
uint32_t guest_page_size = data->vm->page_size;
uint64_t guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size;
test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
}
static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync)
{
uint64_t gptr;
uint32_t guest_page_size = data->vm->page_size;
for (gptr = MEM_TEST_GPA + guest_page_size / 2;
gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size)
*(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2;
host_perform_sync(sync);
for (gptr = MEM_TEST_GPA;
gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size) {
uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL);
uint64_t val = *vptr;
TEST_ASSERT(val == MEM_TEST_VAL_1,
"Guest written values should read back correctly (is %"PRIu64" @ %"PRIx64")",
val, gptr);
*vptr = 0;
}
host_perform_sync(sync);
}
struct test_data {
const char *name;
uint64_t mem_size;
void (*guest_code)(void);
bool (*prepare)(struct vm_data *data, struct sync_area *sync,
uint64_t *maxslots);
void (*loop)(struct vm_data *data, struct sync_area *sync);
};
static bool test_execute(int nslots, uint64_t *maxslots,
unsigned int maxtime,
const struct test_data *tdata,
uint64_t *nloops,
struct timespec *slot_runtime,
struct timespec *guest_runtime)
{
uint64_t mem_size = tdata->mem_size ? : MEM_SIZE;
struct vm_data *data;
struct sync_area *sync;
struct timespec tstart;
bool ret = true;
data = alloc_vm();
if (!prepare_vm(data, nslots, maxslots, tdata->guest_code,
mem_size, slot_runtime)) {
ret = false;
goto exit_free;
}
sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
if (tdata->prepare &&
!tdata->prepare(data, sync, maxslots)) {
ret = false;
goto exit_free;
}
launch_vm(data);
clock_gettime(CLOCK_MONOTONIC, &tstart);
let_guest_run(sync);
while (1) {
*guest_runtime = timespec_elapsed(tstart);
if (guest_runtime->tv_sec >= maxtime)
break;
tdata->loop(data, sync);
(*nloops)++;
}
make_guest_exit(sync);
wait_guest_exit(data);
exit_free:
free_vm(data);
return ret;
}
static const struct test_data tests[] = {
{
.name = "map",
.mem_size = MEM_SIZE_MAP,
.guest_code = guest_code_test_memslot_map,
.loop = test_memslot_map_loop,
},
{
.name = "unmap",
.mem_size = MEM_TEST_UNMAP_SIZE + MEM_EXTRA_SIZE,
.guest_code = guest_code_test_memslot_unmap,
.loop = test_memslot_unmap_loop,
},
{
.name = "unmap chunked",
.mem_size = MEM_TEST_UNMAP_SIZE + MEM_EXTRA_SIZE,
.guest_code = guest_code_test_memslot_unmap,
.loop = test_memslot_unmap_loop_chunked,
},
{
.name = "move active area",
.guest_code = guest_code_test_memslot_move,
.prepare = test_memslot_move_prepare_active,
.loop = test_memslot_move_loop,
},
{
.name = "move inactive area",
.guest_code = guest_code_test_memslot_move,
.prepare = test_memslot_move_prepare_inactive,
.loop = test_memslot_move_loop,
},
{
.name = "RW",
.guest_code = guest_code_test_memslot_rw,
.loop = test_memslot_rw_loop
},
};
#define NTESTS ARRAY_SIZE(tests)
struct test_args {
int tfirst;
int tlast;
int nslots;
int seconds;
int runs;
};
static void help(char *name, struct test_args *targs)
{
int ctr;
pr_info("usage: %s [-h] [-v] [-d] [-s slots] [-f first_test] [-e last_test] [-l test_length] [-r run_count]\n",
name);
pr_info(" -h: print this help screen.\n");
pr_info(" -v: enable verbose mode (not for benchmarking).\n");
pr_info(" -d: enable extra debug checks.\n");
pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n",
targs->nslots);
pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n",
targs->tfirst, NTESTS - 1);
pr_info(" -e: specify the last test to run (currently: %i; max %zu)\n",
targs->tlast, NTESTS - 1);
pr_info(" -l: specify the test length in seconds (currently: %i)\n",
targs->seconds);
pr_info(" -r: specify the number of runs per test (currently: %i)\n",
targs->runs);
pr_info("\nAvailable tests:\n");
for (ctr = 0; ctr < NTESTS; ctr++)
pr_info("%d: %s\n", ctr, tests[ctr].name);
}
static bool check_memory_sizes(void)
{
uint32_t host_page_size = getpagesize();
uint32_t guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
if (host_page_size > SZ_64K || guest_page_size > SZ_64K) {
pr_info("Unsupported page size on host (0x%x) or guest (0x%x)\n",
host_page_size, guest_page_size);
return false;
}
if (MEM_SIZE % guest_page_size ||
MEM_TEST_SIZE % guest_page_size) {
pr_info("invalid MEM_SIZE or MEM_TEST_SIZE\n");
return false;
}
if (MEM_SIZE_MAP % guest_page_size ||
MEM_TEST_MAP_SIZE % guest_page_size ||
(MEM_TEST_MAP_SIZE / guest_page_size) <= 2 ||
(MEM_TEST_MAP_SIZE / guest_page_size) % 2) {
pr_info("invalid MEM_SIZE_MAP or MEM_TEST_MAP_SIZE\n");
return false;
}
if (MEM_TEST_UNMAP_SIZE > MEM_TEST_SIZE ||
MEM_TEST_UNMAP_SIZE % guest_page_size ||
(MEM_TEST_UNMAP_SIZE / guest_page_size) %
(2 * MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size)) {
pr_info("invalid MEM_TEST_UNMAP_SIZE or MEM_TEST_UNMAP_CHUNK_SIZE\n");
return false;
}
return true;
}
static bool parse_args(int argc, char *argv[],
struct test_args *targs)
{
uint32_t max_mem_slots;
int opt;
while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) {
switch (opt) {
case 'h':
default:
help(argv[0], targs);
return false;
case 'v':
verbose = true;
break;
case 'd':
map_unmap_verify = true;
break;
case 's':
targs->nslots = atoi_paranoid(optarg);
if (targs->nslots <= 1 && targs->nslots != -1) {
pr_info("Slot count cap must be larger than 1 or -1 for no cap\n");
return false;
}
break;
case 'f':
targs->tfirst = atoi_non_negative("First test", optarg);
break;
case 'e':
targs->tlast = atoi_non_negative("Last test", optarg);
if (targs->tlast >= NTESTS) {
pr_info("Last test to run has to be non-negative and less than %zu\n",
NTESTS);
return false;
}
break;
case 'l':
targs->seconds = atoi_non_negative("Test length", optarg);
break;
case 'r':
targs->runs = atoi_positive("Runs per test", optarg);
break;
}
}
if (optind < argc) {
help(argv[0], targs);
return false;
}
if (targs->tfirst > targs->tlast) {
pr_info("First test to run cannot be greater than the last test to run\n");
return false;
}
max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
if (max_mem_slots <= 1) {
pr_info("KVM_CAP_NR_MEMSLOTS should be greater than 1\n");
return false;
}
/* Memory slot 0 is reserved */
if (targs->nslots == -1)
targs->nslots = max_mem_slots - 1;
else
targs->nslots = min_t(int, targs->nslots, max_mem_slots) - 1;
pr_info_v("Allowed Number of memory slots: %"PRIu32"\n",
targs->nslots + 1);
return true;
}
struct test_result {
struct timespec slot_runtime, guest_runtime, iter_runtime;
int64_t slottimens, runtimens;
uint64_t nloops;
};
static bool test_loop(const struct test_data *data,
const struct test_args *targs,
struct test_result *rbestslottime,
struct test_result *rbestruntime)
{
uint64_t maxslots;
struct test_result result;
result.nloops = 0;
if (!test_execute(targs->nslots, &maxslots, targs->seconds, data,
&result.nloops,
&result.slot_runtime, &result.guest_runtime)) {
if (maxslots)
pr_info("Memslot count too high for this test, decrease the cap (max is %"PRIu64")\n",
maxslots);
else
pr_info("Memslot count may be too high for this test, try adjusting the cap\n");
return false;
}
pr_info("Test took %ld.%.9lds for slot setup + %ld.%.9lds all iterations\n",
result.slot_runtime.tv_sec, result.slot_runtime.tv_nsec,
result.guest_runtime.tv_sec, result.guest_runtime.tv_nsec);
if (!result.nloops) {
pr_info("No full loops done - too short test time or system too loaded?\n");
return true;
}
result.iter_runtime = timespec_div(result.guest_runtime,
result.nloops);
pr_info("Done %"PRIu64" iterations, avg %ld.%.9lds each\n",
result.nloops,
result.iter_runtime.tv_sec,
result.iter_runtime.tv_nsec);
result.slottimens = timespec_to_ns(result.slot_runtime);
result.runtimens = timespec_to_ns(result.iter_runtime);
/*
* Only rank the slot setup time for tests using the whole test memory
* area so they are comparable
*/
if (!data->mem_size &&
(!rbestslottime->slottimens ||
result.slottimens < rbestslottime->slottimens))
*rbestslottime = result;
if (!rbestruntime->runtimens ||
result.runtimens < rbestruntime->runtimens)
*rbestruntime = result;
return true;
}
int main(int argc, char *argv[])
{
struct test_args targs = {
.tfirst = 0,
.tlast = NTESTS - 1,
.nslots = -1,
.seconds = 5,
.runs = 1,
};
struct test_result rbestslottime;
int tctr;
if (!check_memory_sizes())
return -1;
if (!parse_args(argc, argv, &targs))
return -1;
rbestslottime.slottimens = 0;
for (tctr = targs.tfirst; tctr <= targs.tlast; tctr++) {
const struct test_data *data = &tests[tctr];
unsigned int runctr;
struct test_result rbestruntime;
if (tctr > targs.tfirst)
pr_info("\n");
pr_info("Testing %s performance with %i runs, %d seconds each\n",
data->name, targs.runs, targs.seconds);
rbestruntime.runtimens = 0;
for (runctr = 0; runctr < targs.runs; runctr++)
if (!test_loop(data, &targs,
&rbestslottime, &rbestruntime))
break;
if (rbestruntime.runtimens)
pr_info("Best runtime result was %ld.%.9lds per iteration (with %"PRIu64" iterations)\n",
rbestruntime.iter_runtime.tv_sec,
rbestruntime.iter_runtime.tv_nsec,
rbestruntime.nloops);
}
if (rbestslottime.slottimens)
pr_info("Best slot setup time for the whole test area was %ld.%.9lds\n",
rbestslottime.slot_runtime.tv_sec,
rbestslottime.slot_runtime.tv_nsec);
return 0;
}
| linux-master | tools/testing/selftests/kvm/memslot_perf_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Check for KVM_GET_REG_LIST regressions.
*
* Copyright (C) 2020, Red Hat, Inc.
*
* When attempting to migrate from a host with an older kernel to a host
* with a newer kernel we allow the newer kernel on the destination to
* list new registers with get-reg-list. We assume they'll be unused, at
* least until the guest reboots, and so they're relatively harmless.
* However, if the destination host with the newer kernel is missing
* registers which the source host with the older kernel has, then that's
* a regression in get-reg-list. This test checks for that regression by
* checking the current list against a blessed list. We should never have
* missing registers, but if new ones appear then they can probably be
* added to the blessed list. A completely new blessed list can be created
* by running the test with the --list command line argument.
*
* The blessed list should be created from the oldest possible kernel.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "kvm_util.h"
#include "test_util.h"
#include "processor.h"
static struct kvm_reg_list *reg_list;
static __u64 *blessed_reg, blessed_n;
extern struct vcpu_reg_list *vcpu_configs[];
extern int vcpu_configs_n;
#define for_each_reg(i) \
for ((i) = 0; (i) < reg_list->n; ++(i))
#define for_each_reg_filtered(i) \
for_each_reg(i) \
if (!filter_reg(reg_list->reg[i]))
#define for_each_missing_reg(i) \
for ((i) = 0; (i) < blessed_n; ++(i)) \
if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i])) \
if (check_supported_reg(vcpu, blessed_reg[i]))
#define for_each_new_reg(i) \
for_each_reg_filtered(i) \
if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
#define for_each_present_blessed_reg(i) \
for_each_reg(i) \
if (find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
static const char *config_name(struct vcpu_reg_list *c)
{
struct vcpu_reg_sublist *s;
int len = 0;
if (c->name)
return c->name;
for_each_sublist(c, s)
len += strlen(s->name) + 1;
c->name = malloc(len);
len = 0;
for_each_sublist(c, s) {
if (!strcmp(s->name, "base"))
continue;
strcat(c->name + len, s->name);
len += strlen(s->name) + 1;
c->name[len - 1] = '+';
}
c->name[len - 1] = '\0';
return c->name;
}
bool __weak check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg)
{
return true;
}
bool __weak filter_reg(__u64 reg)
{
return false;
}
static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
{
int i;
for (i = 0; i < nr_regs; ++i)
if (reg == regs[i])
return true;
return false;
}
void __weak print_reg(const char *prefix, __u64 id)
{
printf("\t0x%llx,\n", id);
}
bool __weak check_reject_set(int err)
{
return true;
}
void __weak finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
{
}
#ifdef __aarch64__
static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init)
{
struct vcpu_reg_sublist *s;
for_each_sublist(c, s)
if (s->capability)
init->features[s->feature / 32] |= 1 << (s->feature % 32);
}
static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
{
struct kvm_vcpu_init init = { .target = -1, };
struct kvm_vcpu *vcpu;
prepare_vcpu_init(c, &init);
vcpu = __vm_vcpu_add(vm, 0);
aarch64_vcpu_setup(vcpu, &init);
return vcpu;
}
#else
static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
{
return __vm_vcpu_add(vm, 0);
}
#endif
static void check_supported(struct vcpu_reg_list *c)
{
struct vcpu_reg_sublist *s;
for_each_sublist(c, s) {
if (!s->capability)
continue;
__TEST_REQUIRE(kvm_has_cap(s->capability),
"%s: %s not available, skipping tests\n",
config_name(c), s->name);
}
}
static bool print_list;
static bool print_filtered;
static void run_test(struct vcpu_reg_list *c)
{
int new_regs = 0, missing_regs = 0, i, n;
int failed_get = 0, failed_set = 0, failed_reject = 0;
int skipped_set = 0;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct vcpu_reg_sublist *s;
check_supported(c);
vm = vm_create_barebones();
vcpu = vcpu_config_get_vcpu(c, vm);
finalize_vcpu(vcpu, c);
reg_list = vcpu_get_reg_list(vcpu);
if (print_list || print_filtered) {
putchar('\n');
for_each_reg(i) {
__u64 id = reg_list->reg[i];
if ((print_list && !filter_reg(id)) ||
(print_filtered && filter_reg(id)))
print_reg(config_name(c), id);
}
putchar('\n');
return;
}
for_each_sublist(c, s)
blessed_n += s->regs_n;
blessed_reg = calloc(blessed_n, sizeof(__u64));
n = 0;
for_each_sublist(c, s) {
for (i = 0; i < s->regs_n; ++i)
blessed_reg[n++] = s->regs[i];
}
/*
* We only test that we can get the register and then write back the
* same value. Some registers may allow other values to be written
* back, but others only allow some bits to be changed, and at least
* for ID registers set will fail if the value does not exactly match
* what was returned by get. If registers that allow other values to
* be written need to have the other values tested, then we should
* create a new set of tests for those in a new independent test
* executable.
*
* Only do the get/set tests on present, blessed list registers,
* since we don't know the capabilities of any new registers.
*/
for_each_present_blessed_reg(i) {
uint8_t addr[2048 / 8];
struct kvm_one_reg reg = {
.id = reg_list->reg[i],
.addr = (__u64)&addr,
};
bool reject_reg = false, skip_reg = false;
int ret;
ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
if (ret) {
printf("%s: Failed to get ", config_name(c));
print_reg(config_name(c), reg.id);
putchar('\n');
++failed_get;
}
for_each_sublist(c, s) {
/* rejects_set registers are rejected for set operation */
if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
reject_reg = true;
ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
if (ret != -1 || !check_reject_set(errno)) {
printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
print_reg(config_name(c), reg.id);
putchar('\n');
++failed_reject;
}
break;
}
/* skips_set registers are skipped for set operation */
if (s->skips_set && find_reg(s->skips_set, s->skips_set_n, reg.id)) {
skip_reg = true;
++skipped_set;
break;
}
}
if (!reject_reg && !skip_reg) {
ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
if (ret) {
printf("%s: Failed to set ", config_name(c));
print_reg(config_name(c), reg.id);
putchar('\n');
++failed_set;
}
}
}
for_each_new_reg(i)
++new_regs;
for_each_missing_reg(i)
++missing_regs;
if (new_regs || missing_regs) {
n = 0;
for_each_reg_filtered(i)
++n;
printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
printf("%s: Number registers: %5lld (includes %lld filtered registers)\n",
config_name(c), reg_list->n, reg_list->n - n);
}
if (new_regs) {
printf("\n%s: There are %d new registers.\n"
"Consider adding them to the blessed reg "
"list with the following lines:\n\n", config_name(c), new_regs);
for_each_new_reg(i)
print_reg(config_name(c), reg_list->reg[i]);
putchar('\n');
}
if (missing_regs) {
printf("\n%s: There are %d missing registers.\n"
"The following lines are missing registers:\n\n", config_name(c), missing_regs);
for_each_missing_reg(i)
print_reg(config_name(c), blessed_reg[i]);
putchar('\n');
}
TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
"%s: There are %d missing registers; %d registers failed get; "
"%d registers failed set; %d registers failed reject; %d registers skipped set",
config_name(c), missing_regs, failed_get, failed_set, failed_reject, skipped_set);
pr_info("%s: PASS\n", config_name(c));
blessed_n = 0;
free(blessed_reg);
free(reg_list);
kvm_vm_free(vm);
}
static void help(void)
{
struct vcpu_reg_list *c;
int i;
printf(
"\n"
"usage: get-reg-list [--config=<selection>] [--list] [--list-filtered]\n\n"
" --config=<selection> Used to select a specific vcpu configuration for the test/listing\n"
" '<selection>' may be\n");
for (i = 0; i < vcpu_configs_n; ++i) {
c = vcpu_configs[i];
printf(
" '%s'\n", config_name(c));
}
printf(
"\n"
" --list Print the register list rather than test it (requires --config)\n"
" --list-filtered Print registers that would normally be filtered out (requires --config)\n"
"\n"
);
}
static struct vcpu_reg_list *parse_config(const char *config)
{
struct vcpu_reg_list *c = NULL;
int i;
if (config[8] != '=')
help(), exit(1);
for (i = 0; i < vcpu_configs_n; ++i) {
c = vcpu_configs[i];
if (strcmp(config_name(c), &config[9]) == 0)
break;
}
if (i == vcpu_configs_n)
help(), exit(1);
return c;
}
int main(int ac, char **av)
{
struct vcpu_reg_list *c, *sel = NULL;
int i, ret = 0;
pid_t pid;
for (i = 1; i < ac; ++i) {
if (strncmp(av[i], "--config", 8) == 0)
sel = parse_config(av[i]);
else if (strcmp(av[i], "--list") == 0)
print_list = true;
else if (strcmp(av[i], "--list-filtered") == 0)
print_filtered = true;
else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0)
help(), exit(0);
else
help(), exit(1);
}
if (print_list || print_filtered) {
/*
* We only want to print the register list of a single config.
*/
if (!sel)
help(), exit(1);
}
for (i = 0; i < vcpu_configs_n; ++i) {
c = vcpu_configs[i];
if (sel && c != sel)
continue;
pid = fork();
if (!pid) {
run_test(c);
exit(0);
} else {
int wstatus;
pid_t wpid = wait(&wstatus);
TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return");
if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP)
ret = KSFT_FAIL;
}
}
return ret;
}
| linux-master | tools/testing/selftests/kvm/get-reg-list.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KVM demand paging test
* Adapted from dirty_log_test.c
*
* Copyright (C) 2018, Red Hat, Inc.
* Copyright (C) 2019, Google, Inc.
*/
#define _GNU_SOURCE /* for pipe2 */
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <poll.h>
#include <pthread.h>
#include <linux/userfaultfd.h>
#include <sys/syscall.h>
#include "kvm_util.h"
#include "test_util.h"
#include "memstress.h"
#include "guest_modes.h"
#include "userfaultfd_util.h"
#ifdef __NR_userfaultfd
static int nr_vcpus = 1;
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static size_t demand_paging_size;
static char *guest_data_prototype;
static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
int vcpu_idx = vcpu_args->vcpu_idx;
struct kvm_run *run = vcpu->run;
struct timespec start;
struct timespec ts_diff;
int ret;
clock_gettime(CLOCK_MONOTONIC, &start);
/* Let the guest access its memory */
ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
if (get_ucall(vcpu, NULL) != UCALL_SYNC) {
TEST_ASSERT(false,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
}
ts_diff = timespec_elapsed(start);
PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_idx,
ts_diff.tv_sec, ts_diff.tv_nsec);
}
static int handle_uffd_page_request(int uffd_mode, int uffd,
struct uffd_msg *msg)
{
pid_t tid = syscall(__NR_gettid);
uint64_t addr = msg->arg.pagefault.address;
struct timespec start;
struct timespec ts_diff;
int r;
clock_gettime(CLOCK_MONOTONIC, &start);
if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) {
struct uffdio_copy copy;
copy.src = (uint64_t)guest_data_prototype;
copy.dst = addr;
copy.len = demand_paging_size;
copy.mode = 0;
r = ioctl(uffd, UFFDIO_COPY, ©);
if (r == -1) {
pr_info("Failed UFFDIO_COPY in 0x%lx from thread %d with errno: %d\n",
addr, tid, errno);
return r;
}
} else if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
struct uffdio_continue cont = {0};
cont.range.start = addr;
cont.range.len = demand_paging_size;
r = ioctl(uffd, UFFDIO_CONTINUE, &cont);
if (r == -1) {
pr_info("Failed UFFDIO_CONTINUE in 0x%lx from thread %d with errno: %d\n",
addr, tid, errno);
return r;
}
} else {
TEST_FAIL("Invalid uffd mode %d", uffd_mode);
}
ts_diff = timespec_elapsed(start);
PER_PAGE_DEBUG("UFFD page-in %d \t%ld ns\n", tid,
timespec_to_ns(ts_diff));
PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
demand_paging_size, addr, tid);
return 0;
}
struct test_params {
int uffd_mode;
useconds_t uffd_delay;
enum vm_mem_backing_src_type src_type;
bool partition_vcpu_memory_access;
};
static void prefault_mem(void *alias, uint64_t len)
{
size_t p;
TEST_ASSERT(alias != NULL, "Alias required for minor faults");
for (p = 0; p < (len / demand_paging_size); ++p) {
memcpy(alias + (p * demand_paging_size),
guest_data_prototype, demand_paging_size);
}
}
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct memstress_vcpu_args *vcpu_args;
struct test_params *p = arg;
struct uffd_desc **uffd_descs = NULL;
struct timespec start;
struct timespec ts_diff;
struct kvm_vm *vm;
int i;
vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
p->src_type, p->partition_vcpu_memory_access);
demand_paging_size = get_backing_src_pagesz(p->src_type);
guest_data_prototype = malloc(demand_paging_size);
TEST_ASSERT(guest_data_prototype,
"Failed to allocate buffer for guest data pattern");
memset(guest_data_prototype, 0xAB, demand_paging_size);
if (p->uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
for (i = 0; i < nr_vcpus; i++) {
vcpu_args = &memstress_args.vcpu_args[i];
prefault_mem(addr_gpa2alias(vm, vcpu_args->gpa),
vcpu_args->pages * memstress_args.guest_page_size);
}
}
if (p->uffd_mode) {
uffd_descs = malloc(nr_vcpus * sizeof(struct uffd_desc *));
TEST_ASSERT(uffd_descs, "Memory allocation failed");
for (i = 0; i < nr_vcpus; i++) {
void *vcpu_hva;
vcpu_args = &memstress_args.vcpu_args[i];
/* Cache the host addresses of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
/*
* Set up user fault fd to handle demand paging
* requests.
*/
uffd_descs[i] = uffd_setup_demand_paging(
p->uffd_mode, p->uffd_delay, vcpu_hva,
vcpu_args->pages * memstress_args.guest_page_size,
&handle_uffd_page_request);
}
}
pr_info("Finished creating vCPUs and starting uffd threads\n");
clock_gettime(CLOCK_MONOTONIC, &start);
memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
pr_info("Started all vCPUs\n");
memstress_join_vcpu_threads(nr_vcpus);
ts_diff = timespec_elapsed(start);
pr_info("All vCPU threads joined\n");
if (p->uffd_mode) {
/* Tell the user fault fd handler threads to quit */
for (i = 0; i < nr_vcpus; i++)
uffd_stop_demand_paging(uffd_descs[i]);
}
pr_info("Total guest execution time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
pr_info("Overall demand paging rate: %f pgs/sec\n",
memstress_args.vcpu_args[0].pages * nr_vcpus /
((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / NSEC_PER_SEC));
memstress_destroy_vm(vm);
free(guest_data_prototype);
if (p->uffd_mode)
free(uffd_descs);
}
static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"
" [-b memory] [-s type] [-v vcpus] [-c cpu_list] [-o]\n", name);
guest_modes_help();
printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
" UFFD registration mode: 'MISSING' or 'MINOR'.\n");
kvm_print_vcpu_pinning_help();
printf(" -d: add a delay in usec to the User Fault\n"
" FD handler to simulate demand paging\n"
" overheads. Ignored without -u.\n");
printf(" -b: specify the size of the memory region which should be\n"
" demand paged by each vCPU. e.g. 10M or 3G.\n"
" Default: 1G\n");
backing_src_help("-s");
printf(" -v: specify the number of vCPUs to run.\n");
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
" them into a separate region of memory for each vCPU.\n");
puts("");
exit(0);
}
int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
const char *cpulist = NULL;
struct test_params p = {
.src_type = DEFAULT_VM_MEM_SRC,
.partition_vcpu_memory_access = true,
};
int opt;
guest_modes_append_default();
while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:c:o")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
break;
case 'u':
if (!strcmp("MISSING", optarg))
p.uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
else if (!strcmp("MINOR", optarg))
p.uffd_mode = UFFDIO_REGISTER_MODE_MINOR;
TEST_ASSERT(p.uffd_mode, "UFFD mode must be 'MISSING' or 'MINOR'.");
break;
case 'd':
p.uffd_delay = strtoul(optarg, NULL, 0);
TEST_ASSERT(p.uffd_delay >= 0, "A negative UFFD delay is not supported.");
break;
case 'b':
guest_percpu_mem_size = parse_size(optarg);
break;
case 's':
p.src_type = parse_backing_src_type(optarg);
break;
case 'v':
nr_vcpus = atoi_positive("Number of vCPUs", optarg);
TEST_ASSERT(nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
break;
case 'c':
cpulist = optarg;
break;
case 'o':
p.partition_vcpu_memory_access = false;
break;
case 'h':
default:
help(argv[0]);
break;
}
}
if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
!backing_src_is_shared(p.src_type)) {
TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");
}
if (cpulist) {
kvm_parse_vcpu_pinning(cpulist, memstress_args.vcpu_to_pcpu,
nr_vcpus);
memstress_args.pin_vcpus = true;
}
for_each_guest_mode(run_test, &p);
return 0;
}
#else /* __NR_userfaultfd */
#warning "missing __NR_userfaultfd definition"
int main(void)
{
print_skip("__NR_userfaultfd must be present for userfaultfd test");
return KSFT_SKIP;
}
#endif /* __NR_userfaultfd */
| linux-master | tools/testing/selftests/kvm/demand_paging_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KVM memslot modification stress test
* Adapted from demand_paging_test.c
*
* Copyright (C) 2018, Red Hat, Inc.
* Copyright (C) 2020, Google, Inc.
*/
#define _GNU_SOURCE /* for program_invocation_name */
#include <stdio.h>
#include <stdlib.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <asm/unistd.h>
#include <time.h>
#include <poll.h>
#include <pthread.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/userfaultfd.h>
#include "memstress.h"
#include "processor.h"
#include "test_util.h"
#include "guest_modes.h"
#define DUMMY_MEMSLOT_INDEX 7
#define DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS 10
static int nr_vcpus = 1;
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
struct kvm_run *run;
int ret;
run = vcpu->run;
/* Let the guest access its memory until a stop signal is received */
while (!READ_ONCE(memstress_args.stop_vcpus)) {
ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
if (get_ucall(vcpu, NULL) == UCALL_SYNC)
continue;
TEST_ASSERT(false,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
}
}
struct memslot_antagonist_args {
struct kvm_vm *vm;
useconds_t delay;
uint64_t nr_modifications;
};
static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
uint64_t nr_modifications)
{
uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size;
uint64_t gpa;
int i;
/*
* Add the dummy memslot just below the memstress memslot, which is
* at the top of the guest physical address space.
*/
gpa = memstress_args.gpa - pages * vm->page_size;
for (i = 0; i < nr_modifications; i++) {
usleep(delay);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa,
DUMMY_MEMSLOT_INDEX, pages, 0);
vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX);
}
}
struct test_params {
useconds_t delay;
uint64_t nr_iterations;
bool partition_vcpu_memory_access;
};
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
struct kvm_vm *vm;
vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
VM_MEM_SRC_ANONYMOUS,
p->partition_vcpu_memory_access);
pr_info("Finished creating vCPUs\n");
memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
pr_info("Started all vCPUs\n");
add_remove_memslot(vm, p->delay, p->nr_iterations);
memstress_join_vcpu_threads(nr_vcpus);
pr_info("All vCPU threads joined\n");
memstress_destroy_vm(vm);
}
static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-m mode] [-d delay_usec]\n"
" [-b memory] [-v vcpus] [-o] [-i iterations]\n", name);
guest_modes_help();
printf(" -d: add a delay between each iteration of adding and\n"
" deleting a memslot in usec.\n");
printf(" -b: specify the size of the memory region which should be\n"
" accessed by each vCPU. e.g. 10M or 3G.\n"
" Default: 1G\n");
printf(" -v: specify the number of vCPUs to run.\n");
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
" them into a separate region of memory for each vCPU.\n");
printf(" -i: specify the number of iterations of adding and removing\n"
" a memslot.\n"
" Default: %d\n", DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS);
puts("");
exit(0);
}
int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
int opt;
struct test_params p = {
.delay = 0,
.nr_iterations = DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS,
.partition_vcpu_memory_access = true
};
guest_modes_append_default();
while ((opt = getopt(argc, argv, "hm:d:b:v:oi:")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
break;
case 'd':
p.delay = atoi_non_negative("Delay", optarg);
break;
case 'b':
guest_percpu_mem_size = parse_size(optarg);
break;
case 'v':
nr_vcpus = atoi_positive("Number of vCPUs", optarg);
TEST_ASSERT(nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d",
max_vcpus);
break;
case 'o':
p.partition_vcpu_memory_access = false;
break;
case 'i':
p.nr_iterations = atoi_positive("Number of iterations", optarg);
break;
case 'h':
default:
help(argv[0]);
break;
}
}
for_each_guest_mode(run_test, &p);
return 0;
}
| linux-master | tools/testing/selftests/kvm/memslot_modification_stress_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KVM dirty page logging test
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#define _GNU_SOURCE /* for program_invocation_name */
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <semaphore.h>
#include <sys/types.h>
#include <signal.h>
#include <errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/atomic.h>
#include <asm/barrier.h>
#include "kvm_util.h"
#include "test_util.h"
#include "guest_modes.h"
#include "processor.h"
#define DIRTY_MEM_BITS 30 /* 1G */
#define PAGE_SHIFT_4K 12
/* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1
/* Default guest test virtual memory offset */
#define DEFAULT_GUEST_TEST_MEM 0xc0000000
/* How many pages to dirty for each guest loop */
#define TEST_PAGES_PER_LOOP 1024
/* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
#define TEST_HOST_LOOP_N 32UL
/* Interval for each host loop (ms) */
#define TEST_HOST_LOOP_INTERVAL 10UL
/* Dirty bitmaps are always little endian, so we need to swap on big endian */
#if defined(__s390x__)
# define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
# define test_bit_le(nr, addr) \
test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define __set_bit_le(nr, addr) \
__set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define __clear_bit_le(nr, addr) \
__clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define __test_and_set_bit_le(nr, addr) \
__test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define __test_and_clear_bit_le(nr, addr) \
__test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
#else
# define test_bit_le test_bit
# define __set_bit_le __set_bit
# define __clear_bit_le __clear_bit
# define __test_and_set_bit_le __test_and_set_bit
# define __test_and_clear_bit_le __test_and_clear_bit
#endif
#define TEST_DIRTY_RING_COUNT 65536
#define SIG_IPI SIGUSR1
/*
* Guest/Host shared variables. Ensure addr_gva2hva() and/or
* sync_global_to/from_guest() are used when accessing from
* the host. READ/WRITE_ONCE() should also be used with anything
* that may change.
*/
static uint64_t host_page_size;
static uint64_t guest_page_size;
static uint64_t guest_num_pages;
static uint64_t random_array[TEST_PAGES_PER_LOOP];
static uint64_t iteration;
/*
* Guest physical memory offset of the testing memory slot.
* This will be set to the topmost valid physical address minus
* the test memory size.
*/
static uint64_t guest_test_phys_mem;
/*
* Guest virtual memory offset of the testing memory slot.
* Must not conflict with identity mapped test code.
*/
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
/*
* Continuously write to the first 8 bytes of a random pages within
* the testing memory region.
*/
static void guest_code(void)
{
uint64_t addr;
int i;
/*
* On s390x, all pages of a 1M segment are initially marked as dirty
* when a page of the segment is written to for the very first time.
* To compensate this specialty in this test, we need to touch all
* pages during the first iteration.
*/
for (i = 0; i < guest_num_pages; i++) {
addr = guest_test_virt_mem + i * guest_page_size;
*(uint64_t *)addr = READ_ONCE(iteration);
}
while (true) {
for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
addr = guest_test_virt_mem;
addr += (READ_ONCE(random_array[i]) % guest_num_pages)
* guest_page_size;
addr = align_down(addr, host_page_size);
*(uint64_t *)addr = READ_ONCE(iteration);
}
/* Tell the host that we need more random numbers */
GUEST_SYNC(1);
}
}
/* Host variables */
static bool host_quit;
/* Points to the test VM memory region on which we track dirty logs */
static void *host_test_mem;
static uint64_t host_num_pages;
/* For statistics only */
static uint64_t host_dirty_count;
static uint64_t host_clear_count;
static uint64_t host_track_next_count;
/* Whether dirty ring reset is requested, or finished */
static sem_t sem_vcpu_stop;
static sem_t sem_vcpu_cont;
/*
* This is only set by main thread, and only cleared by vcpu thread. It is
* used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
* is the only place that we'll guarantee both "dirty bit" and "dirty data"
* will match. E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
* after setting dirty bit but before the data is written.
*/
static atomic_t vcpu_sync_stop_requested;
/*
* This is updated by the vcpu thread to tell the host whether it's a
* ring-full event. It should only be read until a sem_wait() of
* sem_vcpu_stop and before vcpu continues to run.
*/
static bool dirty_ring_vcpu_ring_full;
/*
* This is only used for verifying the dirty pages. Dirty ring has a very
* tricky case when the ring just got full, kvm will do userspace exit due to
* ring full. When that happens, the very last PFN is set but actually the
* data is not changed (the guest WRITE is not really applied yet), because
* we found that the dirty ring is full, refused to continue the vcpu, and
* recorded the dirty gfn with the old contents.
*
* For this specific case, it's safe to skip checking this pfn for this
* bit, because it's a redundant bit, and when the write happens later the bit
* will be set again. We use this variable to always keep track of the latest
* dirty gfn we've collected, so that if a mismatch of data found later in the
* verifying process, we let it pass.
*/
static uint64_t dirty_ring_last_page;
enum log_mode_t {
/* Only use KVM_GET_DIRTY_LOG for logging */
LOG_MODE_DIRTY_LOG = 0,
/* Use both KVM_[GET|CLEAR]_DIRTY_LOG for logging */
LOG_MODE_CLEAR_LOG = 1,
/* Use dirty ring for logging */
LOG_MODE_DIRTY_RING = 2,
LOG_MODE_NUM,
/* Run all supported modes */
LOG_MODE_ALL = LOG_MODE_NUM,
};
/* Mode of logging to test. Default is to run all supported modes */
static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
/* Logging mode for current run */
static enum log_mode_t host_log_mode;
static pthread_t vcpu_thread;
static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
static void vcpu_kick(void)
{
pthread_kill(vcpu_thread, SIG_IPI);
}
/*
* In our test we do signal tricks, let's use a better version of
* sem_wait to avoid signal interrupts
*/
static void sem_wait_until(sem_t *sem)
{
int ret;
do
ret = sem_wait(sem);
while (ret == -1 && errno == EINTR);
}
static bool clear_log_supported(void)
{
return kvm_has_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
}
static void clear_log_create_vm_done(struct kvm_vm *vm)
{
u64 manual_caps;
manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
KVM_DIRTY_LOG_INITIALLY_SET);
vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, manual_caps);
}
static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
void *bitmap, uint32_t num_pages,
uint32_t *unused)
{
kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
}
static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
void *bitmap, uint32_t num_pages,
uint32_t *unused)
{
kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
}
/* Should only be called after a GUEST_SYNC */
static void vcpu_handle_sync_stop(void)
{
if (atomic_read(&vcpu_sync_stop_requested)) {
/* It means main thread is sleeping waiting */
atomic_set(&vcpu_sync_stop_requested, false);
sem_post(&sem_vcpu_stop);
sem_wait_until(&sem_vcpu_cont);
}
}
static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
{
struct kvm_run *run = vcpu->run;
TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
"vcpu run failed: errno=%d", err);
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
vcpu_handle_sync_stop();
}
static bool dirty_ring_supported(void)
{
return (kvm_has_cap(KVM_CAP_DIRTY_LOG_RING) ||
kvm_has_cap(KVM_CAP_DIRTY_LOG_RING_ACQ_REL));
}
static void dirty_ring_create_vm_done(struct kvm_vm *vm)
{
uint64_t pages;
uint32_t limit;
/*
* We rely on vcpu exit due to full dirty ring state. Adjust
* the ring buffer size to ensure we're able to reach the
* full dirty ring state.
*/
pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
pages = vm_adjust_num_guest_pages(vm->mode, pages);
if (vm->page_size < getpagesize())
pages = vm_num_host_pages(vm->mode, pages);
limit = 1 << (31 - __builtin_clz(pages));
test_dirty_ring_count = 1 << (31 - __builtin_clz(test_dirty_ring_count));
test_dirty_ring_count = min(limit, test_dirty_ring_count);
pr_info("dirty ring count: 0x%x\n", test_dirty_ring_count);
/*
* Switch to dirty ring mode after VM creation but before any
* of the vcpu creation.
*/
vm_enable_dirty_ring(vm, test_dirty_ring_count *
sizeof(struct kvm_dirty_gfn));
}
static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
{
return smp_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
}
static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
{
smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
}
static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
int slot, void *bitmap,
uint32_t num_pages, uint32_t *fetch_index)
{
struct kvm_dirty_gfn *cur;
uint32_t count = 0;
while (true) {
cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
if (!dirty_gfn_is_dirtied(cur))
break;
TEST_ASSERT(cur->slot == slot, "Slot number didn't match: "
"%u != %u", cur->slot, slot);
TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
"0x%llx >= 0x%x", cur->offset, num_pages);
//pr_info("fetch 0x%x page %llu\n", *fetch_index, cur->offset);
__set_bit_le(cur->offset, bitmap);
dirty_ring_last_page = cur->offset;
dirty_gfn_set_collected(cur);
(*fetch_index)++;
count++;
}
return count;
}
static void dirty_ring_wait_vcpu(void)
{
/* This makes sure that hardware PML cache flushed */
vcpu_kick();
sem_wait_until(&sem_vcpu_stop);
}
static void dirty_ring_continue_vcpu(void)
{
pr_info("Notifying vcpu to continue\n");
sem_post(&sem_vcpu_cont);
}
static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
void *bitmap, uint32_t num_pages,
uint32_t *ring_buf_idx)
{
uint32_t count = 0, cleared;
bool continued_vcpu = false;
dirty_ring_wait_vcpu();
if (!dirty_ring_vcpu_ring_full) {
/*
* This is not a ring-full event, it's safe to allow
* vcpu to continue
*/
dirty_ring_continue_vcpu();
continued_vcpu = true;
}
/* Only have one vcpu */
count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
slot, bitmap, num_pages,
ring_buf_idx);
cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
/* Cleared pages should be the same as collected */
TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
"with collected (%u)", cleared, count);
if (!continued_vcpu) {
TEST_ASSERT(dirty_ring_vcpu_ring_full,
"Didn't continue vcpu even without ring full");
dirty_ring_continue_vcpu();
}
pr_info("Iteration %ld collected %u pages\n", iteration, count);
}
static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
{
struct kvm_run *run = vcpu->run;
/* A ucall-sync or ring-full event is allowed */
if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
/* We should allow this to continue */
;
} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
(ret == -1 && err == EINTR)) {
/* Update the flag first before pause */
WRITE_ONCE(dirty_ring_vcpu_ring_full,
run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
sem_post(&sem_vcpu_stop);
pr_info("vcpu stops because %s...\n",
dirty_ring_vcpu_ring_full ?
"dirty ring is full" : "vcpu is kicked out");
sem_wait_until(&sem_vcpu_cont);
pr_info("vcpu continues now.\n");
} else {
TEST_ASSERT(false, "Invalid guest sync status: "
"exit_reason=%s\n",
exit_reason_str(run->exit_reason));
}
}
static void dirty_ring_before_vcpu_join(void)
{
/* Kick another round of vcpu just to make sure it will quit */
sem_post(&sem_vcpu_cont);
}
struct log_mode {
const char *name;
/* Return true if this mode is supported, otherwise false */
bool (*supported)(void);
/* Hook when the vm creation is done (before vcpu creation) */
void (*create_vm_done)(struct kvm_vm *vm);
/* Hook to collect the dirty pages into the bitmap provided */
void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot,
void *bitmap, uint32_t num_pages,
uint32_t *ring_buf_idx);
/* Hook to call when after each vcpu run */
void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
void (*before_vcpu_join) (void);
} log_modes[LOG_MODE_NUM] = {
{
.name = "dirty-log",
.collect_dirty_pages = dirty_log_collect_dirty_pages,
.after_vcpu_run = default_after_vcpu_run,
},
{
.name = "clear-log",
.supported = clear_log_supported,
.create_vm_done = clear_log_create_vm_done,
.collect_dirty_pages = clear_log_collect_dirty_pages,
.after_vcpu_run = default_after_vcpu_run,
},
{
.name = "dirty-ring",
.supported = dirty_ring_supported,
.create_vm_done = dirty_ring_create_vm_done,
.collect_dirty_pages = dirty_ring_collect_dirty_pages,
.before_vcpu_join = dirty_ring_before_vcpu_join,
.after_vcpu_run = dirty_ring_after_vcpu_run,
},
};
/*
* We use this bitmap to track some pages that should have its dirty
* bit set in the _next_ iteration. For example, if we detected the
* page value changed to current iteration but at the same time the
* page bit is cleared in the latest bitmap, then the system must
* report that write in the next get dirty log call.
*/
static unsigned long *host_bmap_track;
static void log_modes_dump(void)
{
int i;
printf("all");
for (i = 0; i < LOG_MODE_NUM; i++)
printf(", %s", log_modes[i].name);
printf("\n");
}
static bool log_mode_supported(void)
{
struct log_mode *mode = &log_modes[host_log_mode];
if (mode->supported)
return mode->supported();
return true;
}
static void log_mode_create_vm_done(struct kvm_vm *vm)
{
struct log_mode *mode = &log_modes[host_log_mode];
if (mode->create_vm_done)
mode->create_vm_done(vm);
}
static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
void *bitmap, uint32_t num_pages,
uint32_t *ring_buf_idx)
{
struct log_mode *mode = &log_modes[host_log_mode];
TEST_ASSERT(mode->collect_dirty_pages != NULL,
"collect_dirty_pages() is required for any log mode!");
mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages, ring_buf_idx);
}
static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
{
struct log_mode *mode = &log_modes[host_log_mode];
if (mode->after_vcpu_run)
mode->after_vcpu_run(vcpu, ret, err);
}
static void log_mode_before_vcpu_join(void)
{
struct log_mode *mode = &log_modes[host_log_mode];
if (mode->before_vcpu_join)
mode->before_vcpu_join();
}
static void generate_random_array(uint64_t *guest_array, uint64_t size)
{
uint64_t i;
for (i = 0; i < size; i++)
guest_array[i] = random();
}
static void *vcpu_worker(void *data)
{
int ret;
struct kvm_vcpu *vcpu = data;
struct kvm_vm *vm = vcpu->vm;
uint64_t *guest_array;
uint64_t pages_count = 0;
struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
+ sizeof(sigset_t));
sigset_t *sigset = (sigset_t *) &sigmask->sigset;
/*
* SIG_IPI is unblocked atomically while in KVM_RUN. It causes the
* ioctl to return with -EINTR, but it is still pending and we need
* to accept it with the sigwait.
*/
sigmask->len = 8;
pthread_sigmask(0, NULL, sigset);
sigdelset(sigset, SIG_IPI);
vcpu_ioctl(vcpu, KVM_SET_SIGNAL_MASK, sigmask);
sigemptyset(sigset);
sigaddset(sigset, SIG_IPI);
guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
while (!READ_ONCE(host_quit)) {
/* Clear any existing kick signals */
generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
pages_count += TEST_PAGES_PER_LOOP;
/* Let the guest dirty the random pages */
ret = __vcpu_run(vcpu);
if (ret == -1 && errno == EINTR) {
int sig = -1;
sigwait(sigset, &sig);
assert(sig == SIG_IPI);
}
log_mode_after_vcpu_run(vcpu, ret, errno);
}
pr_info("Dirtied %"PRIu64" pages\n", pages_count);
return NULL;
}
static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
{
uint64_t step = vm_num_host_pages(mode, 1);
uint64_t page;
uint64_t *value_ptr;
uint64_t min_iter = 0;
for (page = 0; page < host_num_pages; page += step) {
value_ptr = host_test_mem + page * host_page_size;
/* If this is a special page that we were tracking... */
if (__test_and_clear_bit_le(page, host_bmap_track)) {
host_track_next_count++;
TEST_ASSERT(test_bit_le(page, bmap),
"Page %"PRIu64" should have its dirty bit "
"set in this iteration but it is missing",
page);
}
if (__test_and_clear_bit_le(page, bmap)) {
bool matched;
host_dirty_count++;
/*
* If the bit is set, the value written onto
* the corresponding page should be either the
* previous iteration number or the current one.
*/
matched = (*value_ptr == iteration ||
*value_ptr == iteration - 1);
if (host_log_mode == LOG_MODE_DIRTY_RING && !matched) {
if (*value_ptr == iteration - 2 && min_iter <= iteration - 2) {
/*
* Short answer: this case is special
* only for dirty ring test where the
* page is the last page before a kvm
* dirty ring full in iteration N-2.
*
* Long answer: Assuming ring size R,
* one possible condition is:
*
* main thr vcpu thr
* -------- --------
* iter=1
* write 1 to page 0~(R-1)
* full, vmexit
* collect 0~(R-1)
* kick vcpu
* write 1 to (R-1)~(2R-2)
* full, vmexit
* iter=2
* collect (R-1)~(2R-2)
* kick vcpu
* write 1 to (2R-2)
* (NOTE!!! "1" cached in cpu reg)
* write 2 to (2R-1)~(3R-3)
* full, vmexit
* iter=3
* collect (2R-2)~(3R-3)
* (here if we read value on page
* "2R-2" is 1, while iter=3!!!)
*
* This however can only happen once per iteration.
*/
min_iter = iteration - 1;
continue;
} else if (page == dirty_ring_last_page) {
/*
* Please refer to comments in
* dirty_ring_last_page.
*/
continue;
}
}
TEST_ASSERT(matched,
"Set page %"PRIu64" value %"PRIu64
" incorrect (iteration=%"PRIu64")",
page, *value_ptr, iteration);
} else {
host_clear_count++;
/*
* If cleared, the value written can be any
* value smaller or equals to the iteration
* number. Note that the value can be exactly
* (iteration-1) if that write can happen
* like this:
*
* (1) increase loop count to "iteration-1"
* (2) write to page P happens (with value
* "iteration-1")
* (3) get dirty log for "iteration-1"; we'll
* see that page P bit is set (dirtied),
* and not set the bit in host_bmap_track
* (4) increase loop count to "iteration"
* (which is current iteration)
* (5) get dirty log for current iteration,
* we'll see that page P is cleared, with
* value "iteration-1".
*/
TEST_ASSERT(*value_ptr <= iteration,
"Clear page %"PRIu64" value %"PRIu64
" incorrect (iteration=%"PRIu64")",
page, *value_ptr, iteration);
if (*value_ptr == iteration) {
/*
* This page is _just_ modified; it
* should report its dirtyness in the
* next run
*/
__set_bit_le(page, host_bmap_track);
}
}
}
}
static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
uint64_t extra_mem_pages, void *guest_code)
{
struct kvm_vm *vm;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
vm = __vm_create(mode, 1, extra_mem_pages);
log_mode_create_vm_done(vm);
*vcpu = vm_vcpu_add(vm, 0, guest_code);
return vm;
}
struct test_params {
unsigned long iterations;
unsigned long interval;
uint64_t phys_offset;
};
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
unsigned long *bmap;
uint32_t ring_buf_idx = 0;
if (!log_mode_supported()) {
print_skip("Log mode '%s' not supported",
log_modes[host_log_mode].name);
return;
}
/*
* We reserve page table for 2 times of extra dirty mem which
* will definitely cover the original (1G+) test range. Here
* we do the calculation with 4K page size which is the
* smallest so the page number will be enough for all archs
* (e.g., 64K page size guest will need even less memory for
* page tables).
*/
vm = create_vm(mode, &vcpu,
2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code);
guest_page_size = vm->page_size;
/*
* A little more than 1G of guest page sized pages. Cover the
* case where the size is not aligned to 64 pages.
*/
guest_num_pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_page_size = getpagesize();
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
if (!p->phys_offset) {
guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
guest_page_size;
guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
} else {
guest_test_phys_mem = p->phys_offset;
}
#ifdef __s390x__
/* Align to 1M (segment size) */
guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);
#endif
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
bmap = bitmap_zalloc(host_num_pages);
host_bmap_track = bitmap_zalloc(host_num_pages);
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
guest_test_phys_mem,
TEST_MEM_SLOT_INDEX,
guest_num_pages,
KVM_MEM_LOG_DIRTY_PAGES);
/* Do mapping for the dirty track memory slot */
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
/* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
/* Export the shared variables to the guest */
sync_global_to_guest(vm, host_page_size);
sync_global_to_guest(vm, guest_page_size);
sync_global_to_guest(vm, guest_test_virt_mem);
sync_global_to_guest(vm, guest_num_pages);
/* Start the iterations */
iteration = 1;
sync_global_to_guest(vm, iteration);
host_quit = false;
host_dirty_count = 0;
host_clear_count = 0;
host_track_next_count = 0;
WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
while (iteration < p->iterations) {
/* Give the vcpu thread some time to dirty some pages */
usleep(p->interval * 1000);
log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
bmap, host_num_pages,
&ring_buf_idx);
/*
* See vcpu_sync_stop_requested definition for details on why
* we need to stop vcpu when verify data.
*/
atomic_set(&vcpu_sync_stop_requested, true);
sem_wait_until(&sem_vcpu_stop);
/*
* NOTE: for dirty ring, it's possible that we didn't stop at
* GUEST_SYNC but instead we stopped because ring is full;
* that's okay too because ring full means we're only missing
* the flush of the last page, and since we handle the last
* page specially verification will succeed anyway.
*/
assert(host_log_mode == LOG_MODE_DIRTY_RING ||
atomic_read(&vcpu_sync_stop_requested) == false);
vm_dirty_log_verify(mode, bmap);
sem_post(&sem_vcpu_cont);
iteration++;
sync_global_to_guest(vm, iteration);
}
/* Tell the vcpu thread to quit */
host_quit = true;
log_mode_before_vcpu_join();
pthread_join(vcpu_thread, NULL);
pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
"track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
host_track_next_count);
free(bmap);
free(host_bmap_track);
kvm_vm_free(vm);
}
static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-i iterations] [-I interval] "
"[-p offset] [-m mode]\n", name);
puts("");
printf(" -c: hint to dirty ring size, in number of entries\n");
printf(" (only useful for dirty-ring test; default: %"PRIu32")\n",
TEST_DIRTY_RING_COUNT);
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
TEST_HOST_LOOP_N);
printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
TEST_HOST_LOOP_INTERVAL);
printf(" -p: specify guest physical test memory offset\n"
" Warning: a low offset can conflict with the loaded test code.\n");
printf(" -M: specify the host logging mode "
"(default: run all log modes). Supported modes: \n\t");
log_modes_dump();
guest_modes_help();
puts("");
exit(0);
}
int main(int argc, char *argv[])
{
struct test_params p = {
.iterations = TEST_HOST_LOOP_N,
.interval = TEST_HOST_LOOP_INTERVAL,
};
int opt, i;
sigset_t sigset;
sem_init(&sem_vcpu_stop, 0, 0);
sem_init(&sem_vcpu_cont, 0, 0);
guest_modes_append_default();
while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
switch (opt) {
case 'c':
test_dirty_ring_count = strtol(optarg, NULL, 10);
break;
case 'i':
p.iterations = strtol(optarg, NULL, 10);
break;
case 'I':
p.interval = strtol(optarg, NULL, 10);
break;
case 'p':
p.phys_offset = strtoull(optarg, NULL, 0);
break;
case 'm':
guest_modes_cmdline(optarg);
break;
case 'M':
if (!strcmp(optarg, "all")) {
host_log_mode_option = LOG_MODE_ALL;
break;
}
for (i = 0; i < LOG_MODE_NUM; i++) {
if (!strcmp(optarg, log_modes[i].name)) {
pr_info("Setting log mode to: '%s'\n",
optarg);
host_log_mode_option = i;
break;
}
}
if (i == LOG_MODE_NUM) {
printf("Log mode '%s' invalid. Please choose "
"from: ", optarg);
log_modes_dump();
exit(1);
}
break;
case 'h':
default:
help(argv[0]);
break;
}
}
TEST_ASSERT(p.iterations > 2, "Iterations must be greater than two");
TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
p.iterations, p.interval);
srandom(time(0));
/* Ensure that vCPU threads start with SIG_IPI blocked. */
sigemptyset(&sigset);
sigaddset(&sigset, SIG_IPI);
pthread_sigmask(SIG_BLOCK, &sigset, NULL);
if (host_log_mode_option == LOG_MODE_ALL) {
/* Run each log mode */
for (i = 0; i < LOG_MODE_NUM; i++) {
pr_info("Testing Log Mode '%s'\n", log_modes[i].name);
host_log_mode = i;
for_each_guest_mode(run_test, &p);
}
} else {
host_log_mode = host_log_mode_option;
for_each_guest_mode(run_test, &p);
}
return 0;
}
| linux-master | tools/testing/selftests/kvm/dirty_log_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test for s390x KVM_CAP_SYNC_REGS
*
* Based on the same test for x86:
* Copyright (C) 2018, Google LLC.
*
* Adaptions for s390x:
* Copyright (C) 2019, Red Hat, Inc.
*
* Test expected behavior of the KVM_CAP_SYNC_REGS functionality.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "diag318_test_handler.h"
#include "kselftest.h"
static void guest_code(void)
{
/*
* We embed diag 501 here instead of doing a ucall to avoid that
* the compiler has messed with r11 at the time of the ucall.
*/
asm volatile (
"0: diag 0,0,0x501\n"
" ahi 11,1\n"
" j 0b\n"
);
}
#define REG_COMPARE(reg) \
TEST_ASSERT(left->reg == right->reg, \
"Register " #reg \
" values did not match: 0x%llx, 0x%llx\n", \
left->reg, right->reg)
#define REG_COMPARE32(reg) \
TEST_ASSERT(left->reg == right->reg, \
"Register " #reg \
" values did not match: 0x%x, 0x%x\n", \
left->reg, right->reg)
static void compare_regs(struct kvm_regs *left, struct kvm_sync_regs *right)
{
int i;
for (i = 0; i < 16; i++)
REG_COMPARE(gprs[i]);
}
static void compare_sregs(struct kvm_sregs *left, struct kvm_sync_regs *right)
{
int i;
for (i = 0; i < 16; i++)
REG_COMPARE32(acrs[i]);
for (i = 0; i < 16; i++)
REG_COMPARE(crs[i]);
}
#undef REG_COMPARE
#define TEST_SYNC_FIELDS (KVM_SYNC_GPRS|KVM_SYNC_ACRS|KVM_SYNC_CRS|KVM_SYNC_DIAG318)
#define INVALID_SYNC_FIELD 0x80000000
void test_read_invalid(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
int rv;
/* Request reading invalid register set from VCPU. */
run->kvm_valid_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
run->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
run->kvm_valid_regs = 0;
}
void test_set_invalid(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
int rv;
/* Request setting invalid register set into VCPU. */
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
run->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
run->kvm_dirty_regs = 0;
}
void test_req_and_verify_all_valid_regs(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct kvm_sregs sregs;
struct kvm_regs regs;
int rv;
/* Request and verify all valid register sets. */
run->kvm_valid_regs = TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s390_sieic.icptcode == 4 &&
(run->s390_sieic.ipa >> 8) == 0x83 &&
(run->s390_sieic.ipb >> 16) == 0x501,
"Unexpected interception code: ic=%u, ipa=0x%x, ipb=0x%x\n",
run->s390_sieic.icptcode, run->s390_sieic.ipa,
run->s390_sieic.ipb);
vcpu_regs_get(vcpu, ®s);
compare_regs(®s, &run->s.regs);
vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs);
}
void test_set_and_verify_various_reg_values(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct kvm_sregs sregs;
struct kvm_regs regs;
int rv;
/* Set and verify various register values */
run->s.regs.gprs[11] = 0xBAD1DEA;
run->s.regs.acrs[0] = 1 << 11;
run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = KVM_SYNC_GPRS | KVM_SYNC_ACRS;
if (get_diag318_info() > 0) {
run->s.regs.diag318 = get_diag318_info();
run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
}
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s.regs.gprs[11] == 0xBAD1DEA + 1,
"r11 sync regs value incorrect 0x%llx.",
run->s.regs.gprs[11]);
TEST_ASSERT(run->s.regs.acrs[0] == 1 << 11,
"acr0 sync regs value incorrect 0x%x.",
run->s.regs.acrs[0]);
TEST_ASSERT(run->s.regs.diag318 == get_diag318_info(),
"diag318 sync regs value incorrect 0x%llx.",
run->s.regs.diag318);
vcpu_regs_get(vcpu, ®s);
compare_regs(®s, &run->s.regs);
vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs);
}
void test_clear_kvm_dirty_regs_bits(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
int rv;
/* Clear kvm_dirty_regs bits, verify new s.regs values are
* overwritten with existing guest values.
*/
run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = 0;
run->s.regs.gprs[11] = 0xDEADBEEF;
run->s.regs.diag318 = 0x4B1D;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s.regs.gprs[11] != 0xDEADBEEF,
"r11 sync regs value incorrect 0x%llx.",
run->s.regs.gprs[11]);
TEST_ASSERT(run->s.regs.diag318 != 0x4B1D,
"diag318 sync regs value incorrect 0x%llx.",
run->s.regs.diag318);
}
struct testdef {
const char *name;
void (*test)(struct kvm_vcpu *vcpu);
} testlist[] = {
{ "read invalid", test_read_invalid },
{ "set invalid", test_set_invalid },
{ "request+verify all valid regs", test_req_and_verify_all_valid_regs },
{ "set+verify various regs", test_set_and_verify_various_reg_values },
{ "clear kvm_dirty_regs bits", test_clear_kvm_dirty_regs_bits },
};
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int idx;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_SYNC_REGS));
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(testlist));
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
testlist[idx].test(vcpu);
ksft_test_result_pass("%s\n", testlist[idx].name);
}
kvm_vm_free(vm);
ksft_finished(); /* Print results and exit() accordingly */
}
| linux-master | tools/testing/selftests/kvm/s390x/sync_regs_test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test TEST PROTECTION emulation.
*
* Copyright IBM Corp. 2021
*/
#include <sys/mman.h>
#include "test_util.h"
#include "kvm_util.h"
#include "kselftest.h"
#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE];
static uint8_t *const page_store_prot = pages[0];
static uint8_t *const page_fetch_prot = pages[1];
/* Nonzero return value indicates that address not mapped */
static int set_storage_key(void *addr, uint8_t key)
{
int not_mapped = 0;
asm volatile (
"lra %[addr], 0(0,%[addr])\n"
" jz 0f\n"
" llill %[not_mapped],1\n"
" j 1f\n"
"0: sske %[key], %[addr]\n"
"1:"
: [addr] "+&a" (addr), [not_mapped] "+r" (not_mapped)
: [key] "r" (key)
: "cc"
);
return -not_mapped;
}
enum permission {
READ_WRITE = 0,
READ = 1,
RW_PROTECTED = 2,
TRANSL_UNAVAIL = 3,
};
static enum permission test_protection(void *addr, uint8_t key)
{
uint64_t mask;
asm volatile (
"tprot %[addr], 0(%[key])\n"
" ipm %[mask]\n"
: [mask] "=r" (mask)
: [addr] "Q" (*(char *)addr),
[key] "a" (key)
: "cc"
);
return (enum permission)(mask >> 28);
}
enum stage {
STAGE_INIT_SIMPLE,
TEST_SIMPLE,
STAGE_INIT_FETCH_PROT_OVERRIDE,
TEST_FETCH_PROT_OVERRIDE,
TEST_STORAGE_PROT_OVERRIDE,
STAGE_END /* must be the last entry (it's the amount of tests) */
};
struct test {
enum stage stage;
void *addr;
uint8_t key;
enum permission expected;
} tests[] = {
/*
* We perform each test in the array by executing TEST PROTECTION on
* the specified addr with the specified key and checking if the returned
* permissions match the expected value.
* Both guest and host cooperate to set up the required test conditions.
* A central condition is that the page targeted by addr has to be DAT
* protected in the host mappings, in order for KVM to emulate the
* TEST PROTECTION instruction.
* Since the page tables are shared, the host uses mprotect to achieve
* this.
*
* Test resulting in RW_PROTECTED/TRANSL_UNAVAIL will be interpreted
* by SIE, not KVM, but there is no harm in testing them also.
* See Enhanced Suppression-on-Protection Facilities in the
* Interpretive-Execution Mode
*/
/*
* guest: set storage key of page_store_prot to 1
* storage key of page_fetch_prot to 9 and enable
* protection for it
* STAGE_INIT_SIMPLE
* host: write protect both via mprotect
*/
/* access key 0 matches any storage key -> RW */
{ TEST_SIMPLE, page_store_prot, 0x00, READ_WRITE },
/* access key matches storage key -> RW */
{ TEST_SIMPLE, page_store_prot, 0x10, READ_WRITE },
/* mismatched keys, but no fetch protection -> RO */
{ TEST_SIMPLE, page_store_prot, 0x20, READ },
/* access key 0 matches any storage key -> RW */
{ TEST_SIMPLE, page_fetch_prot, 0x00, READ_WRITE },
/* access key matches storage key -> RW */
{ TEST_SIMPLE, page_fetch_prot, 0x90, READ_WRITE },
/* mismatched keys, fetch protection -> inaccessible */
{ TEST_SIMPLE, page_fetch_prot, 0x10, RW_PROTECTED },
/* page 0 not mapped yet -> translation not available */
{ TEST_SIMPLE, (void *)0x00, 0x10, TRANSL_UNAVAIL },
/*
* host: try to map page 0
* guest: set storage key of page 0 to 9 and enable fetch protection
* STAGE_INIT_FETCH_PROT_OVERRIDE
* host: write protect page 0
* enable fetch protection override
*/
/* mismatched keys, fetch protection, but override applies -> RO */
{ TEST_FETCH_PROT_OVERRIDE, (void *)0x00, 0x10, READ },
/* mismatched keys, fetch protection, override applies to 0-2048 only -> inaccessible */
{ TEST_FETCH_PROT_OVERRIDE, (void *)2049, 0x10, RW_PROTECTED },
/*
* host: enable storage protection override
*/
/* mismatched keys, but override applies (storage key 9) -> RW */
{ TEST_STORAGE_PROT_OVERRIDE, page_fetch_prot, 0x10, READ_WRITE },
/* mismatched keys, no fetch protection, override doesn't apply -> RO */
{ TEST_STORAGE_PROT_OVERRIDE, page_store_prot, 0x20, READ },
/* mismatched keys, but override applies (storage key 9) -> RW */
{ TEST_STORAGE_PROT_OVERRIDE, (void *)2049, 0x10, READ_WRITE },
/* end marker */
{ STAGE_END, 0, 0, 0 },
};
static enum stage perform_next_stage(int *i, bool mapped_0)
{
enum stage stage = tests[*i].stage;
enum permission result;
bool skip;
for (; tests[*i].stage == stage; (*i)++) {
/*
* Some fetch protection override tests require that page 0
* be mapped, however, when the hosts tries to map that page via
* vm_vaddr_alloc, it may happen that some other page gets mapped
* instead.
* In order to skip these tests we detect this inside the guest
*/
skip = tests[*i].addr < (void *)4096 &&
tests[*i].expected != TRANSL_UNAVAIL &&
!mapped_0;
if (!skip) {
result = test_protection(tests[*i].addr, tests[*i].key);
__GUEST_ASSERT(result == tests[*i].expected,
"Wanted %u, got %u, for i = %u",
tests[*i].expected, result, *i);
}
}
return stage;
}
static void guest_code(void)
{
bool mapped_0;
int i = 0;
GUEST_ASSERT_EQ(set_storage_key(page_store_prot, 0x10), 0);
GUEST_ASSERT_EQ(set_storage_key(page_fetch_prot, 0x98), 0);
GUEST_SYNC(STAGE_INIT_SIMPLE);
GUEST_SYNC(perform_next_stage(&i, false));
/* Fetch-protection override */
mapped_0 = !set_storage_key((void *)0, 0x98);
GUEST_SYNC(STAGE_INIT_FETCH_PROT_OVERRIDE);
GUEST_SYNC(perform_next_stage(&i, mapped_0));
/* Storage-protection override */
GUEST_SYNC(perform_next_stage(&i, mapped_0));
}
#define HOST_SYNC_NO_TAP(vcpup, stage) \
({ \
struct kvm_vcpu *__vcpu = (vcpup); \
struct ucall uc; \
int __stage = (stage); \
\
vcpu_run(__vcpu); \
get_ucall(__vcpu, &uc); \
if (uc.cmd == UCALL_ABORT) \
REPORT_GUEST_ASSERT(uc); \
TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \
TEST_ASSERT_EQ(uc.args[1], __stage); \
})
#define HOST_SYNC(vcpu, stage) \
({ \
HOST_SYNC_NO_TAP(vcpu, stage); \
ksft_test_result_pass("" #stage "\n"); \
})
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
vm_vaddr_t guest_0_page;
ksft_print_header();
ksft_set_plan(STAGE_END);
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
HOST_SYNC(vcpu, STAGE_INIT_SIMPLE);
mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ);
HOST_SYNC(vcpu, TEST_SIMPLE);
guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
if (guest_0_page != 0) {
/* Use NO_TAP so we don't get a PASS print */
HOST_SYNC_NO_TAP(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE);
ksft_test_result_skip("STAGE_INIT_FETCH_PROT_OVERRIDE - "
"Did not allocate page at 0\n");
} else {
HOST_SYNC(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE);
}
if (guest_0_page == 0)
mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ);
run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
run->kvm_dirty_regs = KVM_SYNC_CRS;
HOST_SYNC(vcpu, TEST_FETCH_PROT_OVERRIDE);
run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
run->kvm_dirty_regs = KVM_SYNC_CRS;
HOST_SYNC(vcpu, TEST_STORAGE_PROT_OVERRIDE);
kvm_vm_free(vm);
ksft_finished(); /* Print results and exit() accordingly */
}
| linux-master | tools/testing/selftests/kvm/s390x/tprot.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test for s390x CMMA migration
*
* Copyright IBM Corp. 2023
*
* Authors:
* Nico Boehr <[email protected]>
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "kselftest.h"
#define MAIN_PAGE_COUNT 512
#define TEST_DATA_PAGE_COUNT 512
#define TEST_DATA_MEMSLOT 1
#define TEST_DATA_START_GFN 4096
#define TEST_DATA_TWO_PAGE_COUNT 256
#define TEST_DATA_TWO_MEMSLOT 2
#define TEST_DATA_TWO_START_GFN 8192
static char cmma_value_buf[MAIN_PAGE_COUNT + TEST_DATA_PAGE_COUNT];
/**
* Dirty CMMA attributes of exactly one page in the TEST_DATA memslot,
* so use_cmma goes on and the CMMA related ioctls do something.
*/
static void guest_do_one_essa(void)
{
asm volatile(
/* load TEST_DATA_START_GFN into r1 */
" llilf 1,%[start_gfn]\n"
/* calculate the address from the gfn */
" sllg 1,1,12(0)\n"
/* set the first page in TEST_DATA memslot to STABLE */
" .insn rrf,0xb9ab0000,2,1,1,0\n"
/* hypercall */
" diag 0,0,0x501\n"
"0: j 0b"
:
: [start_gfn] "L"(TEST_DATA_START_GFN)
: "r1", "r2", "memory", "cc"
);
}
/**
* Touch CMMA attributes of all pages in TEST_DATA memslot. Set them to stable
* state.
*/
static void guest_dirty_test_data(void)
{
asm volatile(
/* r1 = TEST_DATA_START_GFN */
" xgr 1,1\n"
" llilf 1,%[start_gfn]\n"
/* r5 = TEST_DATA_PAGE_COUNT */
" lghi 5,%[page_count]\n"
/* r5 += r1 */
"2: agfr 5,1\n"
/* r2 = r1 << 12 */
"1: sllg 2,1,12(0)\n"
/* essa(r4, r2, SET_STABLE) */
" .insn rrf,0xb9ab0000,4,2,1,0\n"
/* i++ */
" agfi 1,1\n"
/* if r1 < r5 goto 1 */
" cgrjl 1,5,1b\n"
/* hypercall */
" diag 0,0,0x501\n"
"0: j 0b"
:
: [start_gfn] "L"(TEST_DATA_START_GFN),
[page_count] "L"(TEST_DATA_PAGE_COUNT)
:
/* the counter in our loop over the pages */
"r1",
/* the calculated page physical address */
"r2",
/* ESSA output register */
"r4",
/* last page */
"r5",
"cc", "memory"
);
}
static struct kvm_vm *create_vm(void)
{
return ____vm_create(VM_MODE_DEFAULT);
}
static void create_main_memslot(struct kvm_vm *vm)
{
int i;
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, MAIN_PAGE_COUNT, 0);
/* set the array of memslots to zero like __vm_create does */
for (i = 0; i < NR_MEM_REGIONS; i++)
vm->memslots[i] = 0;
}
static void create_test_memslot(struct kvm_vm *vm)
{
vm_userspace_mem_region_add(vm,
VM_MEM_SRC_ANONYMOUS,
TEST_DATA_START_GFN << vm->page_shift,
TEST_DATA_MEMSLOT,
TEST_DATA_PAGE_COUNT,
0
);
vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
}
static void create_memslots(struct kvm_vm *vm)
{
/*
* Our VM has the following memory layout:
* +------+---------------------------+
* | GFN | Memslot |
* +------+---------------------------+
* | 0 | |
* | ... | MAIN (Code, Stack, ...) |
* | 511 | |
* +------+---------------------------+
* | 4096 | |
* | ... | TEST_DATA |
* | 4607 | |
* +------+---------------------------+
*/
create_main_memslot(vm);
create_test_memslot(vm);
}
static void finish_vm_setup(struct kvm_vm *vm)
{
struct userspace_mem_region *slot0;
kvm_vm_elf_load(vm, program_invocation_name);
slot0 = memslot2region(vm, 0);
ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
kvm_arch_vm_post_create(vm);
}
static struct kvm_vm *create_vm_two_memslots(void)
{
struct kvm_vm *vm;
vm = create_vm();
create_memslots(vm);
finish_vm_setup(vm);
return vm;
}
static void enable_cmma(struct kvm_vm *vm)
{
int r;
r = __kvm_device_attr_set(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA, NULL);
TEST_ASSERT(!r, "enabling cmma failed r=%d errno=%d", r, errno);
}
static void enable_dirty_tracking(struct kvm_vm *vm)
{
vm_mem_region_set_flags(vm, 0, KVM_MEM_LOG_DIRTY_PAGES);
vm_mem_region_set_flags(vm, TEST_DATA_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
}
static int __enable_migration_mode(struct kvm_vm *vm)
{
return __kvm_device_attr_set(vm->fd,
KVM_S390_VM_MIGRATION,
KVM_S390_VM_MIGRATION_START,
NULL
);
}
static void enable_migration_mode(struct kvm_vm *vm)
{
int r = __enable_migration_mode(vm);
TEST_ASSERT(!r, "enabling migration mode failed r=%d errno=%d", r, errno);
}
static bool is_migration_mode_on(struct kvm_vm *vm)
{
u64 out;
int r;
r = __kvm_device_attr_get(vm->fd,
KVM_S390_VM_MIGRATION,
KVM_S390_VM_MIGRATION_STATUS,
&out
);
TEST_ASSERT(!r, "getting migration mode status failed r=%d errno=%d", r, errno);
return out;
}
static int vm_get_cmma_bits(struct kvm_vm *vm, u64 flags, int *errno_out)
{
struct kvm_s390_cmma_log args;
int rc;
errno = 0;
args = (struct kvm_s390_cmma_log){
.start_gfn = 0,
.count = sizeof(cmma_value_buf),
.flags = flags,
.values = (__u64)&cmma_value_buf[0]
};
rc = __vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
*errno_out = errno;
return rc;
}
static void test_get_cmma_basic(void)
{
struct kvm_vm *vm = create_vm_two_memslots();
struct kvm_vcpu *vcpu;
int rc, errno_out;
/* GET_CMMA_BITS without CMMA enabled should fail */
rc = vm_get_cmma_bits(vm, 0, &errno_out);
TEST_ASSERT_EQ(rc, -1);
TEST_ASSERT_EQ(errno_out, ENXIO);
enable_cmma(vm);
vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
vcpu_run(vcpu);
/* GET_CMMA_BITS without migration mode and without peeking should fail */
rc = vm_get_cmma_bits(vm, 0, &errno_out);
TEST_ASSERT_EQ(rc, -1);
TEST_ASSERT_EQ(errno_out, EINVAL);
/* GET_CMMA_BITS without migration mode and with peeking should work */
rc = vm_get_cmma_bits(vm, KVM_S390_CMMA_PEEK, &errno_out);
TEST_ASSERT_EQ(rc, 0);
TEST_ASSERT_EQ(errno_out, 0);
enable_dirty_tracking(vm);
enable_migration_mode(vm);
/* GET_CMMA_BITS with invalid flags */
rc = vm_get_cmma_bits(vm, 0xfeedc0fe, &errno_out);
TEST_ASSERT_EQ(rc, -1);
TEST_ASSERT_EQ(errno_out, EINVAL);
kvm_vm_free(vm);
}
static void assert_exit_was_hypercall(struct kvm_vcpu *vcpu)
{
TEST_ASSERT_EQ(vcpu->run->exit_reason, 13);
TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4);
TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300);
TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000);
}
static void test_migration_mode(void)
{
struct kvm_vm *vm = create_vm();
struct kvm_vcpu *vcpu;
u64 orig_psw;
int rc;
/* enabling migration mode on a VM without memory should fail */
rc = __enable_migration_mode(vm);
TEST_ASSERT_EQ(rc, -1);
TEST_ASSERT_EQ(errno, EINVAL);
TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
errno = 0;
create_memslots(vm);
finish_vm_setup(vm);
enable_cmma(vm);
vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
orig_psw = vcpu->run->psw_addr;
/*
* Execute one essa instruction in the guest. Otherwise the guest will
* not have use_cmm enabled and GET_CMMA_BITS will return no pages.
*/
vcpu_run(vcpu);
assert_exit_was_hypercall(vcpu);
/* migration mode when memslots have dirty tracking off should fail */
rc = __enable_migration_mode(vm);
TEST_ASSERT_EQ(rc, -1);
TEST_ASSERT_EQ(errno, EINVAL);
TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
errno = 0;
/* enable dirty tracking */
enable_dirty_tracking(vm);
/* enabling migration mode should work now */
rc = __enable_migration_mode(vm);
TEST_ASSERT_EQ(rc, 0);
TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
errno = 0;
/* execute another ESSA instruction to see this goes fine */
vcpu->run->psw_addr = orig_psw;
vcpu_run(vcpu);
assert_exit_was_hypercall(vcpu);
/*
* With migration mode on, create a new memslot with dirty tracking off.
* This should turn off migration mode.
*/
TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
vm_userspace_mem_region_add(vm,
VM_MEM_SRC_ANONYMOUS,
TEST_DATA_TWO_START_GFN << vm->page_shift,
TEST_DATA_TWO_MEMSLOT,
TEST_DATA_TWO_PAGE_COUNT,
0
);
TEST_ASSERT(!is_migration_mode_on(vm),
"creating memslot without dirty tracking turns off migration mode"
);
/* ESSA instructions should still execute fine */
vcpu->run->psw_addr = orig_psw;
vcpu_run(vcpu);
assert_exit_was_hypercall(vcpu);
/*
* Turn on dirty tracking on the new memslot.
* It should be possible to turn migration mode back on again.
*/
vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
rc = __enable_migration_mode(vm);
TEST_ASSERT_EQ(rc, 0);
TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
errno = 0;
/*
* Turn off dirty tracking again, this time with just a flag change.
* Again, migration mode should turn off.
*/
TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, 0);
TEST_ASSERT(!is_migration_mode_on(vm),
"disabling dirty tracking should turn off migration mode"
);
/* ESSA instructions should still execute fine */
vcpu->run->psw_addr = orig_psw;
vcpu_run(vcpu);
assert_exit_was_hypercall(vcpu);
kvm_vm_free(vm);
}
/**
* Given a VM with the MAIN and TEST_DATA memslot, assert that both slots have
* CMMA attributes of all pages in both memslots and nothing more dirty.
* This has the useful side effect of ensuring nothing is CMMA dirty after this
* function.
*/
static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
{
struct kvm_s390_cmma_log args;
/*
* First iteration - everything should be dirty.
* Start at the main memslot...
*/
args = (struct kvm_s390_cmma_log){
.start_gfn = 0,
.count = sizeof(cmma_value_buf),
.flags = 0,
.values = (__u64)&cmma_value_buf[0]
};
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
TEST_ASSERT_EQ(args.count, MAIN_PAGE_COUNT);
TEST_ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT);
TEST_ASSERT_EQ(args.start_gfn, 0);
/* ...and then - after a hole - the TEST_DATA memslot should follow */
args = (struct kvm_s390_cmma_log){
.start_gfn = MAIN_PAGE_COUNT,
.count = sizeof(cmma_value_buf),
.flags = 0,
.values = (__u64)&cmma_value_buf[0]
};
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
TEST_ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT);
TEST_ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN);
TEST_ASSERT_EQ(args.remaining, 0);
/* ...and nothing else should be there */
args = (struct kvm_s390_cmma_log){
.start_gfn = TEST_DATA_START_GFN + TEST_DATA_PAGE_COUNT,
.count = sizeof(cmma_value_buf),
.flags = 0,
.values = (__u64)&cmma_value_buf[0]
};
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
TEST_ASSERT_EQ(args.count, 0);
TEST_ASSERT_EQ(args.start_gfn, 0);
TEST_ASSERT_EQ(args.remaining, 0);
}
/**
* Given a VM, assert no pages are CMMA dirty.
*/
static void assert_no_pages_cmma_dirty(struct kvm_vm *vm)
{
struct kvm_s390_cmma_log args;
/* If we start from GFN 0 again, nothing should be dirty. */
args = (struct kvm_s390_cmma_log){
.start_gfn = 0,
.count = sizeof(cmma_value_buf),
.flags = 0,
.values = (__u64)&cmma_value_buf[0]
};
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
if (args.count || args.remaining || args.start_gfn)
TEST_FAIL("pages are still dirty start_gfn=0x%llx count=%u remaining=%llu",
args.start_gfn,
args.count,
args.remaining
);
}
static void test_get_inital_dirty(void)
{
struct kvm_vm *vm = create_vm_two_memslots();
struct kvm_vcpu *vcpu;
enable_cmma(vm);
vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
/*
* Execute one essa instruction in the guest. Otherwise the guest will
* not have use_cmm enabled and GET_CMMA_BITS will return no pages.
*/
vcpu_run(vcpu);
assert_exit_was_hypercall(vcpu);
enable_dirty_tracking(vm);
enable_migration_mode(vm);
assert_all_slots_cmma_dirty(vm);
/* Start from the beginning again and make sure nothing else is dirty */
assert_no_pages_cmma_dirty(vm);
kvm_vm_free(vm);
}
static void query_cmma_range(struct kvm_vm *vm,
u64 start_gfn, u64 gfn_count,
struct kvm_s390_cmma_log *res_out)
{
*res_out = (struct kvm_s390_cmma_log){
.start_gfn = start_gfn,
.count = gfn_count,
.flags = 0,
.values = (__u64)&cmma_value_buf[0]
};
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, res_out);
}
/**
* Assert the given cmma_log struct that was executed by query_cmma_range()
* indicates the first dirty gfn is at first_dirty_gfn and contains exactly
* dirty_gfn_count CMMA values.
*/
static void assert_cmma_dirty(u64 first_dirty_gfn,
u64 dirty_gfn_count,
const struct kvm_s390_cmma_log *res)
{
TEST_ASSERT_EQ(res->start_gfn, first_dirty_gfn);
TEST_ASSERT_EQ(res->count, dirty_gfn_count);
for (size_t i = 0; i < dirty_gfn_count; i++)
TEST_ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */
TEST_ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */
}
static void test_get_skip_holes(void)
{
size_t gfn_offset;
struct kvm_vm *vm = create_vm_two_memslots();
struct kvm_s390_cmma_log log;
struct kvm_vcpu *vcpu;
u64 orig_psw;
enable_cmma(vm);
vcpu = vm_vcpu_add(vm, 1, guest_dirty_test_data);
orig_psw = vcpu->run->psw_addr;
/*
* Execute some essa instructions in the guest. Otherwise the guest will
* not have use_cmm enabled and GET_CMMA_BITS will return no pages.
*/
vcpu_run(vcpu);
assert_exit_was_hypercall(vcpu);
enable_dirty_tracking(vm);
enable_migration_mode(vm);
/* un-dirty all pages */
assert_all_slots_cmma_dirty(vm);
/* Then, dirty just the TEST_DATA memslot */
vcpu->run->psw_addr = orig_psw;
vcpu_run(vcpu);
gfn_offset = TEST_DATA_START_GFN;
/**
* Query CMMA attributes of one page, starting at page 0. Since the
* main memslot was not touched by the VM, this should yield the first
* page of the TEST_DATA memslot.
* The dirty bitmap should now look like this:
* 0: not dirty
* [0x1, 0x200): dirty
*/
query_cmma_range(vm, 0, 1, &log);
assert_cmma_dirty(gfn_offset, 1, &log);
gfn_offset++;
/**
* Query CMMA attributes of 32 (0x20) pages past the end of the TEST_DATA
* memslot. This should wrap back to the beginning of the TEST_DATA
* memslot, page 1.
* The dirty bitmap should now look like this:
* [0, 0x21): not dirty
* [0x21, 0x200): dirty
*/
query_cmma_range(vm, TEST_DATA_START_GFN + TEST_DATA_PAGE_COUNT, 0x20, &log);
assert_cmma_dirty(gfn_offset, 0x20, &log);
gfn_offset += 0x20;
/* Skip 32 pages */
gfn_offset += 0x20;
/**
* After skipping 32 pages, query the next 32 (0x20) pages.
* The dirty bitmap should now look like this:
* [0, 0x21): not dirty
* [0x21, 0x41): dirty
* [0x41, 0x61): not dirty
* [0x61, 0x200): dirty
*/
query_cmma_range(vm, gfn_offset, 0x20, &log);
assert_cmma_dirty(gfn_offset, 0x20, &log);
gfn_offset += 0x20;
/**
* Query 1 page from the beginning of the TEST_DATA memslot. This should
* yield page 0x21.
* The dirty bitmap should now look like this:
* [0, 0x22): not dirty
* [0x22, 0x41): dirty
* [0x41, 0x61): not dirty
* [0x61, 0x200): dirty
*/
query_cmma_range(vm, TEST_DATA_START_GFN, 1, &log);
assert_cmma_dirty(TEST_DATA_START_GFN + 0x21, 1, &log);
gfn_offset++;
/**
* Query 15 (0xF) pages from page 0x23 in TEST_DATA memslot.
* This should yield pages [0x23, 0x33).
* The dirty bitmap should now look like this:
* [0, 0x22): not dirty
* 0x22: dirty
* [0x23, 0x33): not dirty
* [0x33, 0x41): dirty
* [0x41, 0x61): not dirty
* [0x61, 0x200): dirty
*/
gfn_offset = TEST_DATA_START_GFN + 0x23;
query_cmma_range(vm, gfn_offset, 15, &log);
assert_cmma_dirty(gfn_offset, 15, &log);
/**
* Query 17 (0x11) pages from page 0x22 in TEST_DATA memslot.
* This should yield page [0x22, 0x33)
* The dirty bitmap should now look like this:
* [0, 0x33): not dirty
* [0x33, 0x41): dirty
* [0x41, 0x61): not dirty
* [0x61, 0x200): dirty
*/
gfn_offset = TEST_DATA_START_GFN + 0x22;
query_cmma_range(vm, gfn_offset, 17, &log);
assert_cmma_dirty(gfn_offset, 17, &log);
/**
* Query 25 (0x19) pages from page 0x40 in TEST_DATA memslot.
* This should yield page 0x40 and nothing more, since there are more
* than 16 non-dirty pages after page 0x40.
* The dirty bitmap should now look like this:
* [0, 0x33): not dirty
* [0x33, 0x40): dirty
* [0x40, 0x61): not dirty
* [0x61, 0x200): dirty
*/
gfn_offset = TEST_DATA_START_GFN + 0x40;
query_cmma_range(vm, gfn_offset, 25, &log);
assert_cmma_dirty(gfn_offset, 1, &log);
/**
* Query pages [0x33, 0x40).
* The dirty bitmap should now look like this:
* [0, 0x61): not dirty
* [0x61, 0x200): dirty
*/
gfn_offset = TEST_DATA_START_GFN + 0x33;
query_cmma_range(vm, gfn_offset, 0x40 - 0x33, &log);
assert_cmma_dirty(gfn_offset, 0x40 - 0x33, &log);
/**
* Query the remaining pages [0x61, 0x200).
*/
gfn_offset = TEST_DATA_START_GFN;
query_cmma_range(vm, gfn_offset, TEST_DATA_PAGE_COUNT - 0x61, &log);
assert_cmma_dirty(TEST_DATA_START_GFN + 0x61, TEST_DATA_PAGE_COUNT - 0x61, &log);
assert_no_pages_cmma_dirty(vm);
}
struct testdef {
const char *name;
void (*test)(void);
} testlist[] = {
{ "migration mode and dirty tracking", test_migration_mode },
{ "GET_CMMA_BITS: basic calls", test_get_cmma_basic },
{ "GET_CMMA_BITS: all pages are dirty initally", test_get_inital_dirty },
{ "GET_CMMA_BITS: holes are skipped", test_get_skip_holes },
};
/**
* The kernel may support CMMA, but the machine may not (i.e. if running as
* guest-3).
*
* In this case, the CMMA capabilities are all there, but the CMMA-related
* ioctls fail. To find out whether the machine supports CMMA, create a
* temporary VM and then query the CMMA feature of the VM.
*/
static int machine_has_cmma(void)
{
struct kvm_vm *vm = create_vm();
int r;
r = !__kvm_has_device_attr(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA);
kvm_vm_free(vm);
return r;
}
int main(int argc, char *argv[])
{
int idx;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_SYNC_REGS));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_CMMA_MIGRATION));
TEST_REQUIRE(machine_has_cmma());
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(testlist));
for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
testlist[idx].test();
ksft_test_result_pass("%s\n", testlist[idx].name);
}
ksft_finished(); /* Print results and exit() accordingly */
}
| linux-master | tools/testing/selftests/kvm/s390x/cmma_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Test KVM debugging features. */
#include "kvm_util.h"
#include "test_util.h"
#include <linux/kvm.h>
#define __LC_SVC_NEW_PSW 0x1c0
#define __LC_PGM_NEW_PSW 0x1d0
#define ICPT_INSTRUCTION 0x04
#define IPA0_DIAG 0x8300
#define PGM_SPECIFICATION 0x06
/* Common code for testing single-stepping interruptions. */
extern char int_handler[];
asm("int_handler:\n"
"j .\n");
static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code,
size_t new_psw_off, uint64_t *new_psw)
{
struct kvm_guest_debug debug = {};
struct kvm_regs regs;
struct kvm_vm *vm;
char *lowcore;
vm = vm_create_with_one_vcpu(vcpu, guest_code);
lowcore = addr_gpa2hva(vm, 0);
new_psw[0] = (*vcpu)->run->psw_mask;
new_psw[1] = (uint64_t)int_handler;
memcpy(lowcore + new_psw_off, new_psw, 16);
vcpu_regs_get(*vcpu, ®s);
regs.gprs[2] = -1;
vcpu_regs_set(*vcpu, ®s);
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
vcpu_guest_debug_set(*vcpu, &debug);
vcpu_run(*vcpu);
return vm;
}
static void test_step_int(void *guest_code, size_t new_psw_off)
{
struct kvm_vcpu *vcpu;
uint64_t new_psw[2];
struct kvm_vm *vm;
vm = test_step_int_1(&vcpu, guest_code, new_psw_off, new_psw);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_DEBUG);
TEST_ASSERT_EQ(vcpu->run->psw_mask, new_psw[0]);
TEST_ASSERT_EQ(vcpu->run->psw_addr, new_psw[1]);
kvm_vm_free(vm);
}
/* Test single-stepping "boring" program interruptions. */
extern char test_step_pgm_guest_code[];
asm("test_step_pgm_guest_code:\n"
".insn rr,0x1d00,%r1,%r0 /* dr %r1,%r0 */\n"
"j .\n");
static void test_step_pgm(void)
{
test_step_int(test_step_pgm_guest_code, __LC_PGM_NEW_PSW);
}
/*
* Test single-stepping program interruptions caused by DIAG.
* Userspace emulation must not interfere with single-stepping.
*/
extern char test_step_pgm_diag_guest_code[];
asm("test_step_pgm_diag_guest_code:\n"
"diag %r0,%r0,0\n"
"j .\n");
static void test_step_pgm_diag(void)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_PROGRAM_INT,
.u.pgm.code = PGM_SPECIFICATION,
};
struct kvm_vcpu *vcpu;
uint64_t new_psw[2];
struct kvm_vm *vm;
vm = test_step_int_1(&vcpu, test_step_pgm_diag_guest_code,
__LC_PGM_NEW_PSW, new_psw);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_INSTRUCTION);
TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa & 0xff00, IPA0_DIAG);
vcpu_ioctl(vcpu, KVM_S390_IRQ, &irq);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_DEBUG);
TEST_ASSERT_EQ(vcpu->run->psw_mask, new_psw[0]);
TEST_ASSERT_EQ(vcpu->run->psw_addr, new_psw[1]);
kvm_vm_free(vm);
}
/*
* Test single-stepping program interruptions caused by ISKE.
* CPUSTAT_KSS handling must not interfere with single-stepping.
*/
extern char test_step_pgm_iske_guest_code[];
asm("test_step_pgm_iske_guest_code:\n"
"iske %r2,%r2\n"
"j .\n");
static void test_step_pgm_iske(void)
{
test_step_int(test_step_pgm_iske_guest_code, __LC_PGM_NEW_PSW);
}
/*
* Test single-stepping program interruptions caused by LCTL.
* KVM emulation must not interfere with single-stepping.
*/
extern char test_step_pgm_lctl_guest_code[];
asm("test_step_pgm_lctl_guest_code:\n"
"lctl %c0,%c0,1\n"
"j .\n");
static void test_step_pgm_lctl(void)
{
test_step_int(test_step_pgm_lctl_guest_code, __LC_PGM_NEW_PSW);
}
/* Test single-stepping supervisor-call interruptions. */
extern char test_step_svc_guest_code[];
asm("test_step_svc_guest_code:\n"
"svc 0\n"
"j .\n");
static void test_step_svc(void)
{
test_step_int(test_step_svc_guest_code, __LC_SVC_NEW_PSW);
}
/* Run all tests above. */
static struct testdef {
const char *name;
void (*test)(void);
} testlist[] = {
{ "single-step pgm", test_step_pgm },
{ "single-step pgm caused by diag", test_step_pgm_diag },
{ "single-step pgm caused by iske", test_step_pgm_iske },
{ "single-step pgm caused by lctl", test_step_pgm_lctl },
{ "single-step svc", test_step_svc },
};
int main(int argc, char *argv[])
{
int idx;
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(testlist));
for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
testlist[idx].test();
ksft_test_result_pass("%s\n", testlist[idx].name);
}
ksft_finished();
}
| linux-master | tools/testing/selftests/kvm/s390x/debug_test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test for s390x CPU resets
*
* Copyright (C) 2020, IBM
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "kselftest.h"
#define LOCAL_IRQS 32
#define ARBITRARY_NON_ZERO_VCPU_ID 3
struct kvm_s390_irq buf[ARBITRARY_NON_ZERO_VCPU_ID + LOCAL_IRQS];
static uint8_t regs_null[512];
static void guest_code_initial(void)
{
/* set several CRs to "safe" value */
unsigned long cr2_59 = 0x10; /* enable guarded storage */
unsigned long cr8_63 = 0x1; /* monitor mask = 1 */
unsigned long cr10 = 1; /* PER START */
unsigned long cr11 = -1; /* PER END */
/* Dirty registers */
asm volatile (
" lghi 2,0x11\n" /* Round toward 0 */
" sfpc 2\n" /* set fpc to !=0 */
" lctlg 2,2,%0\n"
" lctlg 8,8,%1\n"
" lctlg 10,10,%2\n"
" lctlg 11,11,%3\n"
/* now clobber some general purpose regs */
" llihh 0,0xffff\n"
" llihl 1,0x5555\n"
" llilh 2,0xaaaa\n"
" llill 3,0x0000\n"
/* now clobber a floating point reg */
" lghi 4,0x1\n"
" cdgbr 0,4\n"
/* now clobber an access reg */
" sar 9,4\n"
/* We embed diag 501 here to control register content */
" diag 0,0,0x501\n"
:
: "m" (cr2_59), "m" (cr8_63), "m" (cr10), "m" (cr11)
/* no clobber list as this should not return */
);
}
static void test_one_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t value)
{
uint64_t eval_reg;
vcpu_get_reg(vcpu, id, &eval_reg);
TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
}
static void assert_noirq(struct kvm_vcpu *vcpu)
{
struct kvm_s390_irq_state irq_state;
int irqs;
irq_state.len = sizeof(buf);
irq_state.buf = (unsigned long)buf;
irqs = __vcpu_ioctl(vcpu, KVM_S390_GET_IRQ_STATE, &irq_state);
/*
* irqs contains the number of retrieved interrupts. Any interrupt
* (notably, the emergency call interrupt we have injected) should
* be cleared by the resets, so this should be 0.
*/
TEST_ASSERT(irqs >= 0, "Could not fetch IRQs: errno %d\n", errno);
TEST_ASSERT(!irqs, "IRQ pending");
}
static void assert_clear(struct kvm_vcpu *vcpu)
{
struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
struct kvm_sregs sregs;
struct kvm_regs regs;
struct kvm_fpu fpu;
vcpu_regs_get(vcpu, ®s);
TEST_ASSERT(!memcmp(®s.gprs, regs_null, sizeof(regs.gprs)), "grs == 0");
vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(!memcmp(&sregs.acrs, regs_null, sizeof(sregs.acrs)), "acrs == 0");
vcpu_fpu_get(vcpu, &fpu);
TEST_ASSERT(!memcmp(&fpu.fprs, regs_null, sizeof(fpu.fprs)), "fprs == 0");
/* sync regs */
TEST_ASSERT(!memcmp(sync_regs->gprs, regs_null, sizeof(sync_regs->gprs)),
"gprs0-15 == 0 (sync_regs)");
TEST_ASSERT(!memcmp(sync_regs->acrs, regs_null, sizeof(sync_regs->acrs)),
"acrs0-15 == 0 (sync_regs)");
TEST_ASSERT(!memcmp(sync_regs->vrs, regs_null, sizeof(sync_regs->vrs)),
"vrs0-15 == 0 (sync_regs)");
}
static void assert_initial_noclear(struct kvm_vcpu *vcpu)
{
struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
TEST_ASSERT(sync_regs->gprs[0] == 0xffff000000000000UL,
"gpr0 == 0xffff000000000000 (sync_regs)");
TEST_ASSERT(sync_regs->gprs[1] == 0x0000555500000000UL,
"gpr1 == 0x0000555500000000 (sync_regs)");
TEST_ASSERT(sync_regs->gprs[2] == 0x00000000aaaa0000UL,
"gpr2 == 0x00000000aaaa0000 (sync_regs)");
TEST_ASSERT(sync_regs->gprs[3] == 0x0000000000000000UL,
"gpr3 == 0x0000000000000000 (sync_regs)");
TEST_ASSERT(sync_regs->fprs[0] == 0x3ff0000000000000UL,
"fpr0 == 0f1 (sync_regs)");
TEST_ASSERT(sync_regs->acrs[9] == 1, "ar9 == 1 (sync_regs)");
}
static void assert_initial(struct kvm_vcpu *vcpu)
{
struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
struct kvm_sregs sregs;
struct kvm_fpu fpu;
/* KVM_GET_SREGS */
vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(sregs.crs[0] == 0xE0UL, "cr0 == 0xE0 (KVM_GET_SREGS)");
TEST_ASSERT(sregs.crs[14] == 0xC2000000UL,
"cr14 == 0xC2000000 (KVM_GET_SREGS)");
TEST_ASSERT(!memcmp(&sregs.crs[1], regs_null, sizeof(sregs.crs[1]) * 12),
"cr1-13 == 0 (KVM_GET_SREGS)");
TEST_ASSERT(sregs.crs[15] == 0, "cr15 == 0 (KVM_GET_SREGS)");
/* sync regs */
TEST_ASSERT(sync_regs->crs[0] == 0xE0UL, "cr0 == 0xE0 (sync_regs)");
TEST_ASSERT(sync_regs->crs[14] == 0xC2000000UL,
"cr14 == 0xC2000000 (sync_regs)");
TEST_ASSERT(!memcmp(&sync_regs->crs[1], regs_null, 8 * 12),
"cr1-13 == 0 (sync_regs)");
TEST_ASSERT(sync_regs->crs[15] == 0, "cr15 == 0 (sync_regs)");
TEST_ASSERT(sync_regs->fpc == 0, "fpc == 0 (sync_regs)");
TEST_ASSERT(sync_regs->todpr == 0, "todpr == 0 (sync_regs)");
TEST_ASSERT(sync_regs->cputm == 0, "cputm == 0 (sync_regs)");
TEST_ASSERT(sync_regs->ckc == 0, "ckc == 0 (sync_regs)");
TEST_ASSERT(sync_regs->pp == 0, "pp == 0 (sync_regs)");
TEST_ASSERT(sync_regs->gbea == 1, "gbea == 1 (sync_regs)");
/* kvm_run */
TEST_ASSERT(vcpu->run->psw_addr == 0, "psw_addr == 0 (kvm_run)");
TEST_ASSERT(vcpu->run->psw_mask == 0, "psw_mask == 0 (kvm_run)");
vcpu_fpu_get(vcpu, &fpu);
TEST_ASSERT(!fpu.fpc, "fpc == 0");
test_one_reg(vcpu, KVM_REG_S390_GBEA, 1);
test_one_reg(vcpu, KVM_REG_S390_PP, 0);
test_one_reg(vcpu, KVM_REG_S390_TODPR, 0);
test_one_reg(vcpu, KVM_REG_S390_CPU_TIMER, 0);
test_one_reg(vcpu, KVM_REG_S390_CLOCK_COMP, 0);
}
static void assert_normal_noclear(struct kvm_vcpu *vcpu)
{
struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
TEST_ASSERT(sync_regs->crs[2] == 0x10, "cr2 == 10 (sync_regs)");
TEST_ASSERT(sync_regs->crs[8] == 1, "cr10 == 1 (sync_regs)");
TEST_ASSERT(sync_regs->crs[10] == 1, "cr10 == 1 (sync_regs)");
TEST_ASSERT(sync_regs->crs[11] == -1, "cr11 == -1 (sync_regs)");
}
static void assert_normal(struct kvm_vcpu *vcpu)
{
test_one_reg(vcpu, KVM_REG_S390_PFTOKEN, KVM_S390_PFAULT_TOKEN_INVALID);
TEST_ASSERT(vcpu->run->s.regs.pft == KVM_S390_PFAULT_TOKEN_INVALID,
"pft == 0xff..... (sync_regs)");
assert_noirq(vcpu);
}
static void inject_irq(struct kvm_vcpu *vcpu)
{
struct kvm_s390_irq_state irq_state;
struct kvm_s390_irq *irq = &buf[0];
int irqs;
/* Inject IRQ */
irq_state.len = sizeof(struct kvm_s390_irq);
irq_state.buf = (unsigned long)buf;
irq->type = KVM_S390_INT_EMERGENCY;
irq->u.emerg.code = vcpu->id;
irqs = __vcpu_ioctl(vcpu, KVM_S390_SET_IRQ_STATE, &irq_state);
TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d\n", errno);
}
static struct kvm_vm *create_vm(struct kvm_vcpu **vcpu)
{
struct kvm_vm *vm;
vm = vm_create(1);
*vcpu = vm_vcpu_add(vm, ARBITRARY_NON_ZERO_VCPU_ID, guest_code_initial);
return vm;
}
static void test_normal(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
ksft_print_msg("Testing normal reset\n");
vm = create_vm(&vcpu);
vcpu_run(vcpu);
inject_irq(vcpu);
vcpu_ioctl(vcpu, KVM_S390_NORMAL_RESET, NULL);
/* must clears */
assert_normal(vcpu);
/* must not clears */
assert_normal_noclear(vcpu);
assert_initial_noclear(vcpu);
kvm_vm_free(vm);
}
static void test_initial(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
ksft_print_msg("Testing initial reset\n");
vm = create_vm(&vcpu);
vcpu_run(vcpu);
inject_irq(vcpu);
vcpu_ioctl(vcpu, KVM_S390_INITIAL_RESET, NULL);
/* must clears */
assert_normal(vcpu);
assert_initial(vcpu);
/* must not clears */
assert_initial_noclear(vcpu);
kvm_vm_free(vm);
}
static void test_clear(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
ksft_print_msg("Testing clear reset\n");
vm = create_vm(&vcpu);
vcpu_run(vcpu);
inject_irq(vcpu);
vcpu_ioctl(vcpu, KVM_S390_CLEAR_RESET, NULL);
/* must clears */
assert_normal(vcpu);
assert_initial(vcpu);
assert_clear(vcpu);
kvm_vm_free(vm);
}
struct testdef {
const char *name;
void (*test)(void);
bool needs_cap;
} testlist[] = {
{ "initial", test_initial, false },
{ "normal", test_normal, true },
{ "clear", test_clear, true },
};
int main(int argc, char *argv[])
{
bool has_s390_vcpu_resets = kvm_check_cap(KVM_CAP_S390_VCPU_RESETS);
int idx;
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(testlist));
for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
if (!testlist[idx].needs_cap || has_s390_vcpu_resets) {
testlist[idx].test();
ksft_test_result_pass("%s\n", testlist[idx].name);
} else {
ksft_test_result_skip("%s - no VCPU_RESETS capability\n",
testlist[idx].name);
}
}
ksft_finished(); /* Print results and exit() accordingly */
}
| linux-master | tools/testing/selftests/kvm/s390x/resets.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test for s390x KVM_S390_MEM_OP
*
* Copyright (C) 2019, Red Hat, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <pthread.h>
#include <linux/bits.h>
#include "test_util.h"
#include "kvm_util.h"
#include "kselftest.h"
enum mop_target {
LOGICAL,
SIDA,
ABSOLUTE,
INVALID,
};
enum mop_access_mode {
READ,
WRITE,
CMPXCHG,
};
struct mop_desc {
uintptr_t gaddr;
uintptr_t gaddr_v;
uint64_t set_flags;
unsigned int f_check : 1;
unsigned int f_inject : 1;
unsigned int f_key : 1;
unsigned int _gaddr_v : 1;
unsigned int _set_flags : 1;
unsigned int _sida_offset : 1;
unsigned int _ar : 1;
uint32_t size;
enum mop_target target;
enum mop_access_mode mode;
void *buf;
uint32_t sida_offset;
void *old;
uint8_t old_value[16];
bool *cmpxchg_success;
uint8_t ar;
uint8_t key;
};
const uint8_t NO_KEY = 0xff;
static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc)
{
struct kvm_s390_mem_op ksmo = {
.gaddr = (uintptr_t)desc->gaddr,
.size = desc->size,
.buf = ((uintptr_t)desc->buf),
.reserved = "ignored_ignored_ignored_ignored"
};
switch (desc->target) {
case LOGICAL:
if (desc->mode == READ)
ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
if (desc->mode == WRITE)
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
break;
case SIDA:
if (desc->mode == READ)
ksmo.op = KVM_S390_MEMOP_SIDA_READ;
if (desc->mode == WRITE)
ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
break;
case ABSOLUTE:
if (desc->mode == READ)
ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
if (desc->mode == WRITE)
ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
if (desc->mode == CMPXCHG) {
ksmo.op = KVM_S390_MEMOP_ABSOLUTE_CMPXCHG;
ksmo.old_addr = (uint64_t)desc->old;
memcpy(desc->old_value, desc->old, desc->size);
}
break;
case INVALID:
ksmo.op = -1;
}
if (desc->f_check)
ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
if (desc->f_inject)
ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
if (desc->_set_flags)
ksmo.flags = desc->set_flags;
if (desc->f_key && desc->key != NO_KEY) {
ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
ksmo.key = desc->key;
}
if (desc->_ar)
ksmo.ar = desc->ar;
else
ksmo.ar = 0;
if (desc->_sida_offset)
ksmo.sida_offset = desc->sida_offset;
return ksmo;
}
struct test_info {
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
};
#define PRINT_MEMOP false
static void print_memop(struct kvm_vcpu *vcpu, const struct kvm_s390_mem_op *ksmo)
{
if (!PRINT_MEMOP)
return;
if (!vcpu)
printf("vm memop(");
else
printf("vcpu memop(");
switch (ksmo->op) {
case KVM_S390_MEMOP_LOGICAL_READ:
printf("LOGICAL, READ, ");
break;
case KVM_S390_MEMOP_LOGICAL_WRITE:
printf("LOGICAL, WRITE, ");
break;
case KVM_S390_MEMOP_SIDA_READ:
printf("SIDA, READ, ");
break;
case KVM_S390_MEMOP_SIDA_WRITE:
printf("SIDA, WRITE, ");
break;
case KVM_S390_MEMOP_ABSOLUTE_READ:
printf("ABSOLUTE, READ, ");
break;
case KVM_S390_MEMOP_ABSOLUTE_WRITE:
printf("ABSOLUTE, WRITE, ");
break;
case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
printf("ABSOLUTE, CMPXCHG, ");
break;
}
printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u, old_addr=%llx",
ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key,
ksmo->old_addr);
if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
printf(", CHECK_ONLY");
if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)
printf(", INJECT_EXCEPTION");
if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)
printf(", SKEY_PROTECTION");
puts(")");
}
static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
struct mop_desc *desc)
{
struct kvm_vcpu *vcpu = info.vcpu;
if (!vcpu)
return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
else
return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
}
static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
struct mop_desc *desc)
{
int r;
r = err_memop_ioctl(info, ksmo, desc);
if (ksmo->op == KVM_S390_MEMOP_ABSOLUTE_CMPXCHG) {
if (desc->cmpxchg_success) {
int diff = memcmp(desc->old_value, desc->old, desc->size);
*desc->cmpxchg_success = !diff;
}
}
TEST_ASSERT(!r, __KVM_IOCTL_ERROR("KVM_S390_MEM_OP", r));
}
#define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \
({ \
struct test_info __info = (info_p); \
struct mop_desc __desc = { \
.target = (mop_target_p), \
.mode = (access_mode_p), \
.buf = (buf_p), \
.size = (size_p), \
__VA_ARGS__ \
}; \
struct kvm_s390_mem_op __ksmo; \
\
if (__desc._gaddr_v) { \
if (__desc.target == ABSOLUTE) \
__desc.gaddr = addr_gva2gpa(__info.vm, __desc.gaddr_v); \
else \
__desc.gaddr = __desc.gaddr_v; \
} \
__ksmo = ksmo_from_desc(&__desc); \
print_memop(__info.vcpu, &__ksmo); \
err##memop_ioctl(__info, &__ksmo, &__desc); \
})
#define MOP(...) MEMOP(, __VA_ARGS__)
#define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)
#define GADDR(a) .gaddr = ((uintptr_t)a)
#define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)
#define CHECK_ONLY .f_check = 1
#define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)
#define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
#define AR(a) ._ar = 1, .ar = (a)
#define KEY(a) .f_key = 1, .key = (a)
#define INJECT .f_inject = 1
#define CMPXCHG_OLD(o) .old = (o)
#define CMPXCHG_SUCCESS(s) .cmpxchg_success = (s)
#define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
#define PAGE_SHIFT 12
#define PAGE_SIZE (1ULL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
static uint8_t __aligned(PAGE_SIZE) mem1[65536];
static uint8_t __aligned(PAGE_SIZE) mem2[65536];
struct test_default {
struct kvm_vm *kvm_vm;
struct test_info vm;
struct test_info vcpu;
struct kvm_run *run;
int size;
};
static struct test_default test_default_init(void *guest_code)
{
struct kvm_vcpu *vcpu;
struct test_default t;
t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
t.kvm_vm = vm_create_with_one_vcpu(&vcpu, guest_code);
t.vm = (struct test_info) { t.kvm_vm, NULL };
t.vcpu = (struct test_info) { t.kvm_vm, vcpu };
t.run = vcpu->run;
return t;
}
enum stage {
/* Synced state set by host, e.g. DAT */
STAGE_INITED,
/* Guest did nothing */
STAGE_IDLED,
/* Guest set storage keys (specifics up to test case) */
STAGE_SKEYS_SET,
/* Guest copied memory (locations up to test case) */
STAGE_COPIED,
/* End of guest code reached */
STAGE_DONE,
};
#define HOST_SYNC(info_p, stage) \
({ \
struct test_info __info = (info_p); \
struct kvm_vcpu *__vcpu = __info.vcpu; \
struct ucall uc; \
int __stage = (stage); \
\
vcpu_run(__vcpu); \
get_ucall(__vcpu, &uc); \
if (uc.cmd == UCALL_ABORT) { \
REPORT_GUEST_ASSERT(uc); \
} \
TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \
TEST_ASSERT_EQ(uc.args[1], __stage); \
}) \
static void prepare_mem12(void)
{
int i;
for (i = 0; i < sizeof(mem1); i++)
mem1[i] = rand();
memset(mem2, 0xaa, sizeof(mem2));
}
#define ASSERT_MEM_EQ(p1, p2, size) \
TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu,
enum mop_target mop_target, uint32_t size, uint8_t key)
{
prepare_mem12();
CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size,
GADDR_V(mem1), KEY(key));
HOST_SYNC(copy_cpu, STAGE_COPIED);
CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
GADDR_V(mem2), KEY(key));
ASSERT_MEM_EQ(mem1, mem2, size);
}
static void default_read(struct test_info copy_cpu, struct test_info mop_cpu,
enum mop_target mop_target, uint32_t size, uint8_t key)
{
prepare_mem12();
CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1));
HOST_SYNC(copy_cpu, STAGE_COPIED);
CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
GADDR_V(mem2), KEY(key));
ASSERT_MEM_EQ(mem1, mem2, size);
}
static void default_cmpxchg(struct test_default *test, uint8_t key)
{
for (int size = 1; size <= 16; size *= 2) {
for (int offset = 0; offset < 16; offset += size) {
uint8_t __aligned(16) new[16] = {};
uint8_t __aligned(16) old[16];
bool succ;
prepare_mem12();
default_write_read(test->vcpu, test->vcpu, LOGICAL, 16, NO_KEY);
memcpy(&old, mem1, 16);
MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,
size, GADDR_V(mem1 + offset),
CMPXCHG_OLD(old + offset),
CMPXCHG_SUCCESS(&succ), KEY(key));
HOST_SYNC(test->vcpu, STAGE_COPIED);
MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));
TEST_ASSERT(succ, "exchange of values should succeed");
memcpy(mem1 + offset, new + offset, size);
ASSERT_MEM_EQ(mem1, mem2, 16);
memcpy(&old, mem1, 16);
new[offset]++;
old[offset]++;
MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,
size, GADDR_V(mem1 + offset),
CMPXCHG_OLD(old + offset),
CMPXCHG_SUCCESS(&succ), KEY(key));
HOST_SYNC(test->vcpu, STAGE_COPIED);
MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));
TEST_ASSERT(!succ, "exchange of values should not succeed");
ASSERT_MEM_EQ(mem1, mem2, 16);
ASSERT_MEM_EQ(&old, mem1, 16);
}
}
}
static void guest_copy(void)
{
GUEST_SYNC(STAGE_INITED);
memcpy(&mem2, &mem1, sizeof(mem2));
GUEST_SYNC(STAGE_COPIED);
}
static void test_copy(void)
{
struct test_default t = test_default_init(guest_copy);
HOST_SYNC(t.vcpu, STAGE_INITED);
default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, NO_KEY);
kvm_vm_free(t.kvm_vm);
}
static void set_storage_key_range(void *addr, size_t len, uint8_t key)
{
uintptr_t _addr, abs, i;
int not_mapped = 0;
_addr = (uintptr_t)addr;
for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
abs = i;
asm volatile (
"lra %[abs], 0(0,%[abs])\n"
" jz 0f\n"
" llill %[not_mapped],1\n"
" j 1f\n"
"0: sske %[key], %[abs]\n"
"1:"
: [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)
: [key] "r" (key)
: "cc"
);
GUEST_ASSERT_EQ(not_mapped, 0);
}
}
static void guest_copy_key(void)
{
set_storage_key_range(mem1, sizeof(mem1), 0x90);
set_storage_key_range(mem2, sizeof(mem2), 0x90);
GUEST_SYNC(STAGE_SKEYS_SET);
for (;;) {
memcpy(&mem2, &mem1, sizeof(mem2));
GUEST_SYNC(STAGE_COPIED);
}
}
static void test_copy_key(void)
{
struct test_default t = test_default_init(guest_copy_key);
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
/* vm, no key */
default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, NO_KEY);
/* vm/vcpu, machting key or key 0 */
default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 0);
default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 0);
default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
/*
* There used to be different code paths for key handling depending on
* if the region crossed a page boundary.
* There currently are not, but the more tests the merrier.
*/
default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 0);
default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 9);
default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 0);
default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 9);
/* vm/vcpu, mismatching keys on read, but no fetch protection */
default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 2);
kvm_vm_free(t.kvm_vm);
}
static void test_cmpxchg_key(void)
{
struct test_default t = test_default_init(guest_copy_key);
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
default_cmpxchg(&t, NO_KEY);
default_cmpxchg(&t, 0);
default_cmpxchg(&t, 9);
kvm_vm_free(t.kvm_vm);
}
static __uint128_t cut_to_size(int size, __uint128_t val)
{
switch (size) {
case 1:
return (uint8_t)val;
case 2:
return (uint16_t)val;
case 4:
return (uint32_t)val;
case 8:
return (uint64_t)val;
case 16:
return val;
}
GUEST_FAIL("Invalid size = %u", size);
return 0;
}
static bool popcount_eq(__uint128_t a, __uint128_t b)
{
unsigned int count_a, count_b;
count_a = __builtin_popcountl((uint64_t)(a >> 64)) +
__builtin_popcountl((uint64_t)a);
count_b = __builtin_popcountl((uint64_t)(b >> 64)) +
__builtin_popcountl((uint64_t)b);
return count_a == count_b;
}
static __uint128_t rotate(int size, __uint128_t val, int amount)
{
unsigned int bits = size * 8;
amount = (amount + bits) % bits;
val = cut_to_size(size, val);
return (val << (bits - amount)) | (val >> amount);
}
const unsigned int max_block = 16;
static void choose_block(bool guest, int i, int *size, int *offset)
{
unsigned int rand;
rand = i;
if (guest) {
rand = rand * 19 + 11;
*size = 1 << ((rand % 3) + 2);
rand = rand * 19 + 11;
*offset = (rand % max_block) & ~(*size - 1);
} else {
rand = rand * 17 + 5;
*size = 1 << (rand % 5);
rand = rand * 17 + 5;
*offset = (rand % max_block) & ~(*size - 1);
}
}
static __uint128_t permutate_bits(bool guest, int i, int size, __uint128_t old)
{
unsigned int rand;
int amount;
bool swap;
rand = i;
rand = rand * 3 + 1;
if (guest)
rand = rand * 3 + 1;
swap = rand % 2 == 0;
if (swap) {
int i, j;
__uint128_t new;
uint8_t byte0, byte1;
rand = rand * 3 + 1;
i = rand % size;
rand = rand * 3 + 1;
j = rand % size;
if (i == j)
return old;
new = rotate(16, old, i * 8);
byte0 = new & 0xff;
new &= ~0xff;
new = rotate(16, new, -i * 8);
new = rotate(16, new, j * 8);
byte1 = new & 0xff;
new = (new & ~0xff) | byte0;
new = rotate(16, new, -j * 8);
new = rotate(16, new, i * 8);
new = new | byte1;
new = rotate(16, new, -i * 8);
return new;
}
rand = rand * 3 + 1;
amount = rand % (size * 8);
return rotate(size, old, amount);
}
static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t new)
{
bool ret;
switch (size) {
case 4: {
uint32_t old = *old_addr;
asm volatile ("cs %[old],%[new],%[address]"
: [old] "+d" (old),
[address] "+Q" (*(uint32_t *)(target))
: [new] "d" ((uint32_t)new)
: "cc"
);
ret = old == (uint32_t)*old_addr;
*old_addr = old;
return ret;
}
case 8: {
uint64_t old = *old_addr;
asm volatile ("csg %[old],%[new],%[address]"
: [old] "+d" (old),
[address] "+Q" (*(uint64_t *)(target))
: [new] "d" ((uint64_t)new)
: "cc"
);
ret = old == (uint64_t)*old_addr;
*old_addr = old;
return ret;
}
case 16: {
__uint128_t old = *old_addr;
asm volatile ("cdsg %[old],%[new],%[address]"
: [old] "+d" (old),
[address] "+Q" (*(__uint128_t *)(target))
: [new] "d" (new)
: "cc"
);
ret = old == *old_addr;
*old_addr = old;
return ret;
}
}
GUEST_FAIL("Invalid size = %u", size);
return 0;
}
const unsigned int cmpxchg_iter_outer = 100, cmpxchg_iter_inner = 10000;
static void guest_cmpxchg_key(void)
{
int size, offset;
__uint128_t old, new;
set_storage_key_range(mem1, max_block, 0x10);
set_storage_key_range(mem2, max_block, 0x10);
GUEST_SYNC(STAGE_SKEYS_SET);
for (int i = 0; i < cmpxchg_iter_outer; i++) {
do {
old = 1;
} while (!_cmpxchg(16, mem1, &old, 0));
for (int j = 0; j < cmpxchg_iter_inner; j++) {
choose_block(true, i + j, &size, &offset);
do {
new = permutate_bits(true, i + j, size, old);
} while (!_cmpxchg(size, mem2 + offset, &old, new));
}
}
GUEST_SYNC(STAGE_DONE);
}
static void *run_guest(void *data)
{
struct test_info *info = data;
HOST_SYNC(*info, STAGE_DONE);
return NULL;
}
static char *quad_to_char(__uint128_t *quad, int size)
{
return ((char *)quad) + (sizeof(*quad) - size);
}
static void test_cmpxchg_key_concurrent(void)
{
struct test_default t = test_default_init(guest_cmpxchg_key);
int size, offset;
__uint128_t old, new;
bool success;
pthread_t thread;
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
prepare_mem12();
MOP(t.vcpu, LOGICAL, WRITE, mem1, max_block, GADDR_V(mem2));
pthread_create(&thread, NULL, run_guest, &t.vcpu);
for (int i = 0; i < cmpxchg_iter_outer; i++) {
do {
old = 0;
new = 1;
MOP(t.vm, ABSOLUTE, CMPXCHG, &new,
sizeof(new), GADDR_V(mem1),
CMPXCHG_OLD(&old),
CMPXCHG_SUCCESS(&success), KEY(1));
} while (!success);
for (int j = 0; j < cmpxchg_iter_inner; j++) {
choose_block(false, i + j, &size, &offset);
do {
new = permutate_bits(false, i + j, size, old);
MOP(t.vm, ABSOLUTE, CMPXCHG, quad_to_char(&new, size),
size, GADDR_V(mem2 + offset),
CMPXCHG_OLD(quad_to_char(&old, size)),
CMPXCHG_SUCCESS(&success), KEY(1));
} while (!success);
}
}
pthread_join(thread, NULL);
MOP(t.vcpu, LOGICAL, READ, mem2, max_block, GADDR_V(mem2));
TEST_ASSERT(popcount_eq(*(__uint128_t *)mem1, *(__uint128_t *)mem2),
"Must retain number of set bits");
kvm_vm_free(t.kvm_vm);
}
static void guest_copy_key_fetch_prot(void)
{
/*
* For some reason combining the first sync with override enablement
* results in an exception when calling HOST_SYNC.
*/
GUEST_SYNC(STAGE_INITED);
/* Storage protection override applies to both store and fetch. */
set_storage_key_range(mem1, sizeof(mem1), 0x98);
set_storage_key_range(mem2, sizeof(mem2), 0x98);
GUEST_SYNC(STAGE_SKEYS_SET);
for (;;) {
memcpy(&mem2, &mem1, sizeof(mem2));
GUEST_SYNC(STAGE_COPIED);
}
}
static void test_copy_key_storage_prot_override(void)
{
struct test_default t = test_default_init(guest_copy_key_fetch_prot);
HOST_SYNC(t.vcpu, STAGE_INITED);
t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
t.run->kvm_dirty_regs = KVM_SYNC_CRS;
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
/* vcpu, mismatching keys, storage protection override in effect */
default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
kvm_vm_free(t.kvm_vm);
}
static void test_copy_key_fetch_prot(void)
{
struct test_default t = test_default_init(guest_copy_key_fetch_prot);
HOST_SYNC(t.vcpu, STAGE_INITED);
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
/* vm/vcpu, matching key, fetch protection in effect */
default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
kvm_vm_free(t.kvm_vm);
}
#define ERR_PROT_MOP(...) \
({ \
int rv; \
\
rv = ERR_MOP(__VA_ARGS__); \
TEST_ASSERT(rv == 4, "Should result in protection exception"); \
})
static void guest_error_key(void)
{
GUEST_SYNC(STAGE_INITED);
set_storage_key_range(mem1, PAGE_SIZE, 0x18);
set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98);
GUEST_SYNC(STAGE_SKEYS_SET);
GUEST_SYNC(STAGE_IDLED);
}
static void test_errors_key(void)
{
struct test_default t = test_default_init(guest_error_key);
HOST_SYNC(t.vcpu, STAGE_INITED);
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
/* vm/vcpu, mismatching keys, fetch protection in effect */
CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem1), KEY(2));
CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem1), KEY(2));
kvm_vm_free(t.kvm_vm);
}
static void test_errors_cmpxchg_key(void)
{
struct test_default t = test_default_init(guest_copy_key_fetch_prot);
int i;
HOST_SYNC(t.vcpu, STAGE_INITED);
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
for (i = 1; i <= 16; i *= 2) {
__uint128_t old = 0;
ERR_PROT_MOP(t.vm, ABSOLUTE, CMPXCHG, mem2, i, GADDR_V(mem2),
CMPXCHG_OLD(&old), KEY(2));
}
kvm_vm_free(t.kvm_vm);
}
static void test_termination(void)
{
struct test_default t = test_default_init(guest_error_key);
uint64_t prefix;
uint64_t teid;
uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);
uint64_t psw[2];
HOST_SYNC(t.vcpu, STAGE_INITED);
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
/* vcpu, mismatching keys after first page */
ERR_PROT_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(1), INJECT);
/*
* The memop injected a program exception and the test needs to check the
* Translation-Exception Identification (TEID). It is necessary to run
* the guest in order to be able to read the TEID from guest memory.
* Set the guest program new PSW, so the guest state is not clobbered.
*/
prefix = t.run->s.regs.prefix;
psw[0] = t.run->psw_mask;
psw[1] = t.run->psw_addr;
MOP(t.vm, ABSOLUTE, WRITE, psw, sizeof(psw), GADDR(prefix + 464));
HOST_SYNC(t.vcpu, STAGE_IDLED);
MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
/* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
TEST_ASSERT_EQ(teid & teid_mask, 0);
kvm_vm_free(t.kvm_vm);
}
static void test_errors_key_storage_prot_override(void)
{
struct test_default t = test_default_init(guest_copy_key_fetch_prot);
HOST_SYNC(t.vcpu, STAGE_INITED);
t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
t.run->kvm_dirty_regs = KVM_SYNC_CRS;
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
/* vm, mismatching keys, storage protection override not applicable to vm */
CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
kvm_vm_free(t.kvm_vm);
}
const uint64_t last_page_addr = -PAGE_SIZE;
static void guest_copy_key_fetch_prot_override(void)
{
int i;
char *page_0 = 0;
GUEST_SYNC(STAGE_INITED);
set_storage_key_range(0, PAGE_SIZE, 0x18);
set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0L), [key] "r"(0x18) : "cc");
GUEST_SYNC(STAGE_SKEYS_SET);
for (;;) {
for (i = 0; i < PAGE_SIZE; i++)
page_0[i] = mem1[i];
GUEST_SYNC(STAGE_COPIED);
}
}
static void test_copy_key_fetch_prot_override(void)
{
struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
vm_vaddr_t guest_0_page, guest_last_page;
guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
if (guest_0_page != 0 || guest_last_page != last_page_addr) {
print_skip("did not allocate guest pages at required positions");
goto out;
}
HOST_SYNC(t.vcpu, STAGE_INITED);
t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
t.run->kvm_dirty_regs = KVM_SYNC_CRS;
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
/* vcpu, mismatching keys on fetch, fetch protection override applies */
prepare_mem12();
MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
HOST_SYNC(t.vcpu, STAGE_COPIED);
CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
ASSERT_MEM_EQ(mem1, mem2, 2048);
/*
* vcpu, mismatching keys on fetch, fetch protection override applies,
* wraparound
*/
prepare_mem12();
MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
HOST_SYNC(t.vcpu, STAGE_COPIED);
CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
GADDR_V(guest_last_page), KEY(2));
ASSERT_MEM_EQ(mem1, mem2, 2048);
out:
kvm_vm_free(t.kvm_vm);
}
static void test_errors_key_fetch_prot_override_not_enabled(void)
{
struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
vm_vaddr_t guest_0_page, guest_last_page;
guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
if (guest_0_page != 0 || guest_last_page != last_page_addr) {
print_skip("did not allocate guest pages at required positions");
goto out;
}
HOST_SYNC(t.vcpu, STAGE_INITED);
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
/* vcpu, mismatching keys on fetch, fetch protection override not enabled */
CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2));
out:
kvm_vm_free(t.kvm_vm);
}
static void test_errors_key_fetch_prot_override_enabled(void)
{
struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
vm_vaddr_t guest_0_page, guest_last_page;
guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
if (guest_0_page != 0 || guest_last_page != last_page_addr) {
print_skip("did not allocate guest pages at required positions");
goto out;
}
HOST_SYNC(t.vcpu, STAGE_INITED);
t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
t.run->kvm_dirty_regs = KVM_SYNC_CRS;
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
/*
* vcpu, mismatching keys on fetch,
* fetch protection override does not apply because memory range exceeded
*/
CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2));
CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,
GADDR_V(guest_last_page), KEY(2));
/* vm, fetch protected override does not apply */
CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2));
CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
out:
kvm_vm_free(t.kvm_vm);
}
static void guest_idle(void)
{
GUEST_SYNC(STAGE_INITED); /* for consistency's sake */
for (;;)
GUEST_SYNC(STAGE_IDLED);
}
static void _test_errors_common(struct test_info info, enum mop_target target, int size)
{
int rv;
/* Bad size: */
rv = ERR_MOP(info, target, WRITE, mem1, -1, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
/* Zero size: */
rv = ERR_MOP(info, target, WRITE, mem1, 0, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
"ioctl allows 0 as size");
/* Bad flags: */
rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
/* Bad guest address: */
rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address with CHECK_ONLY");
rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL));
TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address on write");
/* Bad host address: */
rv = ERR_MOP(info, target, WRITE, 0, size, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == EFAULT,
"ioctl does not report bad host memory address");
/* Bad key: */
rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");
}
static void test_errors(void)
{
struct test_default t = test_default_init(guest_idle);
int rv;
HOST_SYNC(t.vcpu, STAGE_INITED);
_test_errors_common(t.vcpu, LOGICAL, t.size);
_test_errors_common(t.vm, ABSOLUTE, t.size);
/* Bad operation: */
rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
/* virtual addresses are not translated when passing INVALID */
rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
/* Bad access register: */
t.run->psw_mask &= ~(3UL << (63 - 17));
t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */
HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */
rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
t.run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */
HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */
/* Check that the SIDA calls are rejected for non-protected guests */
rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
TEST_ASSERT(rv == -1 && errno == EINVAL,
"ioctl does not reject SIDA_READ in non-protected mode");
rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
TEST_ASSERT(rv == -1 && errno == EINVAL,
"ioctl does not reject SIDA_WRITE in non-protected mode");
kvm_vm_free(t.kvm_vm);
}
static void test_errors_cmpxchg(void)
{
struct test_default t = test_default_init(guest_idle);
__uint128_t old;
int rv, i, power = 1;
HOST_SYNC(t.vcpu, STAGE_INITED);
for (i = 0; i < 32; i++) {
if (i == power) {
power *= 2;
continue;
}
rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1),
CMPXCHG_OLD(&old));
TEST_ASSERT(rv == -1 && errno == EINVAL,
"ioctl allows bad size for cmpxchg");
}
for (i = 1; i <= 16; i *= 2) {
rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR((void *)~0xfffUL),
CMPXCHG_OLD(&old));
TEST_ASSERT(rv > 0, "ioctl allows bad guest address for cmpxchg");
}
for (i = 2; i <= 16; i *= 2) {
rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1 + 1),
CMPXCHG_OLD(&old));
TEST_ASSERT(rv == -1 && errno == EINVAL,
"ioctl allows bad alignment for cmpxchg");
}
kvm_vm_free(t.kvm_vm);
}
int main(int argc, char *argv[])
{
int extension_cap, idx;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
struct testdef {
const char *name;
void (*test)(void);
bool requirements_met;
} testlist[] = {
{
.name = "simple copy",
.test = test_copy,
.requirements_met = true,
},
{
.name = "generic error checks",
.test = test_errors,
.requirements_met = true,
},
{
.name = "copy with storage keys",
.test = test_copy_key,
.requirements_met = extension_cap > 0,
},
{
.name = "cmpxchg with storage keys",
.test = test_cmpxchg_key,
.requirements_met = extension_cap & 0x2,
},
{
.name = "concurrently cmpxchg with storage keys",
.test = test_cmpxchg_key_concurrent,
.requirements_met = extension_cap & 0x2,
},
{
.name = "copy with key storage protection override",
.test = test_copy_key_storage_prot_override,
.requirements_met = extension_cap > 0,
},
{
.name = "copy with key fetch protection",
.test = test_copy_key_fetch_prot,
.requirements_met = extension_cap > 0,
},
{
.name = "copy with key fetch protection override",
.test = test_copy_key_fetch_prot_override,
.requirements_met = extension_cap > 0,
},
{
.name = "error checks with key",
.test = test_errors_key,
.requirements_met = extension_cap > 0,
},
{
.name = "error checks for cmpxchg with key",
.test = test_errors_cmpxchg_key,
.requirements_met = extension_cap & 0x2,
},
{
.name = "error checks for cmpxchg",
.test = test_errors_cmpxchg,
.requirements_met = extension_cap & 0x2,
},
{
.name = "termination",
.test = test_termination,
.requirements_met = extension_cap > 0,
},
{
.name = "error checks with key storage protection override",
.test = test_errors_key_storage_prot_override,
.requirements_met = extension_cap > 0,
},
{
.name = "error checks without key fetch prot override",
.test = test_errors_key_fetch_prot_override_not_enabled,
.requirements_met = extension_cap > 0,
},
{
.name = "error checks with key fetch prot override",
.test = test_errors_key_fetch_prot_override_enabled,
.requirements_met = extension_cap > 0,
},
};
ksft_print_header();
ksft_set_plan(ARRAY_SIZE(testlist));
for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
if (testlist[idx].requirements_met) {
testlist[idx].test();
ksft_test_result_pass("%s\n", testlist[idx].name);
} else {
ksft_test_result_skip("%s - requirements not met (kernel has extension cap %#x)\n",
testlist[idx].name, extension_cap);
}
}
ksft_finished(); /* Print results and exit() accordingly */
}
| linux-master | tools/testing/selftests/kvm/s390x/memop.c |
#include "../../../../lib/rbtree.c"
| linux-master | tools/testing/selftests/kvm/lib/rbtree.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "kvm_util.h"
#include "linux/types.h"
#include "linux/bitmap.h"
#include "linux/atomic.h"
#define GUEST_UCALL_FAILED -1
struct ucall_header {
DECLARE_BITMAP(in_use, KVM_MAX_VCPUS);
struct ucall ucalls[KVM_MAX_VCPUS];
};
int ucall_nr_pages_required(uint64_t page_size)
{
return align_up(sizeof(struct ucall_header), page_size) / page_size;
}
/*
* ucall_pool holds per-VM values (global data is duplicated by each VM), it
* must not be accessed from host code.
*/
static struct ucall_header *ucall_pool;
void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
struct ucall_header *hdr;
struct ucall *uc;
vm_vaddr_t vaddr;
int i;
vaddr = __vm_vaddr_alloc(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, MEM_REGION_DATA);
hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr);
memset(hdr, 0, sizeof(*hdr));
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
uc = &hdr->ucalls[i];
uc->hva = uc;
}
write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr);
ucall_arch_init(vm, mmio_gpa);
}
static struct ucall *ucall_alloc(void)
{
struct ucall *uc;
int i;
if (!ucall_pool)
goto ucall_failed;
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (!test_and_set_bit(i, ucall_pool->in_use)) {
uc = &ucall_pool->ucalls[i];
memset(uc->args, 0, sizeof(uc->args));
return uc;
}
}
ucall_failed:
/*
* If the vCPU cannot grab a ucall structure, make a bare ucall with a
* magic value to signal to get_ucall() that things went sideways.
* GUEST_ASSERT() depends on ucall_alloc() and so cannot be used here.
*/
ucall_arch_do_ucall(GUEST_UCALL_FAILED);
return NULL;
}
static void ucall_free(struct ucall *uc)
{
/* Beware, here be pointer arithmetic. */
clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use);
}
void ucall_assert(uint64_t cmd, const char *exp, const char *file,
unsigned int line, const char *fmt, ...)
{
struct ucall *uc;
va_list va;
uc = ucall_alloc();
uc->cmd = cmd;
WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp));
WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file));
WRITE_ONCE(uc->args[GUEST_LINE], line);
va_start(va, fmt);
guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
va_end(va);
ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
ucall_free(uc);
}
void ucall_fmt(uint64_t cmd, const char *fmt, ...)
{
struct ucall *uc;
va_list va;
uc = ucall_alloc();
uc->cmd = cmd;
va_start(va, fmt);
guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
va_end(va);
ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
ucall_free(uc);
}
void ucall(uint64_t cmd, int nargs, ...)
{
struct ucall *uc;
va_list va;
int i;
uc = ucall_alloc();
WRITE_ONCE(uc->cmd, cmd);
nargs = min(nargs, UCALL_MAX_ARGS);
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
WRITE_ONCE(uc->args[i], va_arg(va, uint64_t));
va_end(va);
ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
ucall_free(uc);
}
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
struct ucall ucall;
void *addr;
if (!uc)
uc = &ucall;
addr = ucall_arch_get_ucall(vcpu);
if (addr) {
TEST_ASSERT(addr != (void *)GUEST_UCALL_FAILED,
"Guest failed to allocate ucall struct");
memcpy(uc, addr, sizeof(*uc));
vcpu_run_complete_io(vcpu);
} else {
memset(uc, 0, sizeof(*uc));
}
return uc->cmd;
}
| linux-master | tools/testing/selftests/kvm/lib/ucall_common.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/kvm_util.c
*
* Copyright (C) 2018, Google LLC.
*/
#define _GNU_SOURCE /* for program_invocation_name */
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include <assert.h>
#include <sched.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <linux/kernel.h>
#define KVM_UTIL_MIN_PFN 2
static int vcpu_mmap_sz(void);
int open_path_or_exit(const char *path, int flags)
{
int fd;
fd = open(path, flags);
__TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno);
return fd;
}
/*
* Open KVM_DEV_PATH if available, otherwise exit the entire program.
*
* Input Args:
* flags - The flags to pass when opening KVM_DEV_PATH.
*
* Return:
* The opened file descriptor of /dev/kvm.
*/
static int _open_kvm_dev_path_or_exit(int flags)
{
return open_path_or_exit(KVM_DEV_PATH, flags);
}
int open_kvm_dev_path_or_exit(void)
{
return _open_kvm_dev_path_or_exit(O_RDONLY);
}
static bool get_module_param_bool(const char *module_name, const char *param)
{
const int path_size = 128;
char path[path_size];
char value;
ssize_t r;
int fd;
r = snprintf(path, path_size, "/sys/module/%s/parameters/%s",
module_name, param);
TEST_ASSERT(r < path_size,
"Failed to construct sysfs path in %d bytes.", path_size);
fd = open_path_or_exit(path, O_RDONLY);
r = read(fd, &value, 1);
TEST_ASSERT(r == 1, "read(%s) failed", path);
r = close(fd);
TEST_ASSERT(!r, "close(%s) failed", path);
if (value == 'Y')
return true;
else if (value == 'N')
return false;
TEST_FAIL("Unrecognized value '%c' for boolean module param", value);
}
bool get_kvm_param_bool(const char *param)
{
return get_module_param_bool("kvm", param);
}
bool get_kvm_intel_param_bool(const char *param)
{
return get_module_param_bool("kvm_intel", param);
}
bool get_kvm_amd_param_bool(const char *param)
{
return get_module_param_bool("kvm_amd", param);
}
/*
* Capability
*
* Input Args:
* cap - Capability
*
* Output Args: None
*
* Return:
* On success, the Value corresponding to the capability (KVM_CAP_*)
* specified by the value of cap. On failure a TEST_ASSERT failure
* is produced.
*
* Looks up and returns the value corresponding to the capability
* (KVM_CAP_*) given by cap.
*/
unsigned int kvm_check_cap(long cap)
{
int ret;
int kvm_fd;
kvm_fd = open_kvm_dev_path_or_exit();
ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, (void *)cap);
TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
close(kvm_fd);
return (unsigned int)ret;
}
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
{
if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL))
vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size);
else
vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
vm->dirty_ring_size = ring_size;
}
static void vm_open(struct kvm_vm *vm)
{
vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT));
vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type);
TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
}
const char *vm_guest_mode_string(uint32_t i)
{
static const char * const strings[] = {
[VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
[VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages",
[VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages",
[VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages",
[VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages",
[VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
[VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages",
[VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
[VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
[VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
[VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
[VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages",
[VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages",
[VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages",
[VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages",
};
_Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
"Missing new mode strings?");
TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i);
return strings[i];
}
const struct vm_guest_mode_params vm_guest_mode_params[] = {
[VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
[VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
[VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
[VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 },
[VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
[VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
[VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 },
[VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
[VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
[VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
[VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
[VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 },
[VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 },
[VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 },
[VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 },
};
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
/*
* Initializes vm->vpages_valid to match the canonical VA space of the
* architecture.
*
* The default implementation is valid for architectures which split the
* range addressed by a single page table into a low and high region
* based on the MSB of the VA. On architectures with this behavior
* the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
*/
__weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
{
sparsebit_set_num(vm->vpages_valid,
0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
sparsebit_set_num(vm->vpages_valid,
(~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
(1ULL << (vm->va_bits - 1)) >> vm->page_shift);
}
struct kvm_vm *____vm_create(enum vm_guest_mode mode)
{
struct kvm_vm *vm;
vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficient Memory");
INIT_LIST_HEAD(&vm->vcpus);
vm->regions.gpa_tree = RB_ROOT;
vm->regions.hva_tree = RB_ROOT;
hash_init(vm->regions.slot_hash);
vm->mode = mode;
vm->type = 0;
vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
vm->va_bits = vm_guest_mode_params[mode].va_bits;
vm->page_size = vm_guest_mode_params[mode].page_size;
vm->page_shift = vm_guest_mode_params[mode].page_shift;
/* Setup mode specific traits. */
switch (vm->mode) {
case VM_MODE_P52V48_4K:
vm->pgtable_levels = 4;
break;
case VM_MODE_P52V48_64K:
vm->pgtable_levels = 3;
break;
case VM_MODE_P48V48_4K:
vm->pgtable_levels = 4;
break;
case VM_MODE_P48V48_64K:
vm->pgtable_levels = 3;
break;
case VM_MODE_P40V48_4K:
case VM_MODE_P36V48_4K:
vm->pgtable_levels = 4;
break;
case VM_MODE_P40V48_64K:
case VM_MODE_P36V48_64K:
vm->pgtable_levels = 3;
break;
case VM_MODE_P48V48_16K:
case VM_MODE_P40V48_16K:
case VM_MODE_P36V48_16K:
vm->pgtable_levels = 4;
break;
case VM_MODE_P36V47_16K:
vm->pgtable_levels = 3;
break;
case VM_MODE_PXXV48_4K:
#ifdef __x86_64__
kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
/*
* Ignore KVM support for 5-level paging (vm->va_bits == 57),
* it doesn't take effect unless a CR4.LA57 is set, which it
* isn't for this VM_MODE.
*/
TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
"Linear address width (%d bits) not supported",
vm->va_bits);
pr_debug("Guest physical address width detected: %d\n",
vm->pa_bits);
vm->pgtable_levels = 4;
vm->va_bits = 48;
#else
TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
#endif
break;
case VM_MODE_P47V64_4K:
vm->pgtable_levels = 5;
break;
case VM_MODE_P44V64_4K:
vm->pgtable_levels = 5;
break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
}
#ifdef __aarch64__
if (vm->pa_bits != 40)
vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
#endif
vm_open(vm);
/* Limit to VA-bit canonical virtual addresses. */
vm->vpages_valid = sparsebit_alloc();
vm_vaddr_populate_bitmap(vm);
/* Limit physical addresses to PA-bits. */
vm->max_gfn = vm_compute_max_gfn(vm);
/* Allocate and setup memory for guest. */
vm->vpages_mapped = sparsebit_alloc();
return vm;
}
static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
uint32_t nr_runnable_vcpus,
uint64_t extra_mem_pages)
{
uint64_t page_size = vm_guest_mode_params[mode].page_size;
uint64_t nr_pages;
TEST_ASSERT(nr_runnable_vcpus,
"Use vm_create_barebones() for VMs that _never_ have vCPUs\n");
TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
"nr_vcpus = %d too large for host, max-vcpus = %d",
nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
/*
* Arbitrarily allocate 512 pages (2mb when page size is 4kb) for the
* test code and other per-VM assets that will be loaded into memslot0.
*/
nr_pages = 512;
/* Account for the per-vCPU stacks on behalf of the test. */
nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS;
/*
* Account for the number of pages needed for the page tables. The
* maximum page table size for a memory region will be when the
* smallest page size is used. Considering each page contains x page
* table descriptors, the total extra size for page tables (for extra
* N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
* than N/x*2.
*/
nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
/* Account for the number of pages needed by ucall. */
nr_pages += ucall_nr_pages_required(page_size);
return vm_adjust_num_guest_pages(mode, nr_pages);
}
struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
uint64_t nr_extra_pages)
{
uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus,
nr_extra_pages);
struct userspace_mem_region *slot0;
struct kvm_vm *vm;
int i;
pr_debug("%s: mode='%s' pages='%ld'\n", __func__,
vm_guest_mode_string(mode), nr_pages);
vm = ____vm_create(mode);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0);
for (i = 0; i < NR_MEM_REGIONS; i++)
vm->memslots[i] = 0;
kvm_vm_elf_load(vm, program_invocation_name);
/*
* TODO: Add proper defines to protect the library's memslots, and then
* carve out memslot1 for the ucall MMIO address. KVM treats writes to
* read-only memslots as MMIO, and creating a read-only memslot for the
* MMIO region would prevent silently clobbering the MMIO region.
*/
slot0 = memslot2region(vm, 0);
ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
kvm_arch_vm_post_create(vm);
return vm;
}
/*
* VM Create with customized parameters
*
* Input Args:
* mode - VM Mode (e.g. VM_MODE_P52V48_4K)
* nr_vcpus - VCPU count
* extra_mem_pages - Non-slot0 physical memory total size
* guest_code - Guest entry point
* vcpuids - VCPU IDs
*
* Output Args: None
*
* Return:
* Pointer to opaque structure that describes the created VM.
*
* Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
* extra_mem_pages is only used to calculate the maximum page table size,
* no real memory allocation for non-slot0 memory in this function.
*/
struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
uint64_t extra_mem_pages,
void *guest_code, struct kvm_vcpu *vcpus[])
{
struct kvm_vm *vm;
int i;
TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
vm = __vm_create(mode, nr_vcpus, extra_mem_pages);
for (i = 0; i < nr_vcpus; ++i)
vcpus[i] = vm_vcpu_add(vm, i, guest_code);
return vm;
}
struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
uint64_t extra_mem_pages,
void *guest_code)
{
struct kvm_vcpu *vcpus[1];
struct kvm_vm *vm;
vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages,
guest_code, vcpus);
*vcpu = vcpus[0];
return vm;
}
/*
* VM Restart
*
* Input Args:
* vm - VM that has been released before
*
* Output Args: None
*
* Reopens the file descriptors associated to the VM and reinstates the
* global state, such as the irqchip and the memory regions that are mapped
* into the guest.
*/
void kvm_vm_restart(struct kvm_vm *vmp)
{
int ctr;
struct userspace_mem_region *region;
vm_open(vmp);
if (vmp->has_irqchip)
vm_create_irqchip(vmp);
hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
" rc: %i errno: %i\n"
" slot: %u flags: 0x%x\n"
" guest_phys_addr: 0x%llx size: 0x%llx",
ret, errno, region->region.slot,
region->region.flags,
region->region.guest_phys_addr,
region->region.memory_size);
}
}
__weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm,
uint32_t vcpu_id)
{
return __vm_vcpu_add(vm, vcpu_id);
}
struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
{
kvm_vm_restart(vm);
return vm_vcpu_recreate(vm, 0);
}
void kvm_pin_this_task_to_pcpu(uint32_t pcpu)
{
cpu_set_t mask;
int r;
CPU_ZERO(&mask);
CPU_SET(pcpu, &mask);
r = sched_setaffinity(0, sizeof(mask), &mask);
TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.\n", pcpu);
}
static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
{
uint32_t pcpu = atoi_non_negative("CPU number", cpu_str);
TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask),
"Not allowed to run on pCPU '%d', check cgroups?\n", pcpu);
return pcpu;
}
void kvm_print_vcpu_pinning_help(void)
{
const char *name = program_invocation_name;
printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n"
" values (target pCPU), one for each vCPU, plus an optional\n"
" entry for the main application task (specified via entry\n"
" <nr_vcpus + 1>). If used, entries must be provided for all\n"
" vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
" E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
" vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
" %s -v 3 -c 22,23,24,50\n\n"
" To leave the application task unpinned, drop the final entry:\n\n"
" %s -v 3 -c 22,23,24\n\n"
" (default: no pinning)\n", name, name);
}
void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
int nr_vcpus)
{
cpu_set_t allowed_mask;
char *cpu, *cpu_list;
char delim[2] = ",";
int i, r;
cpu_list = strdup(pcpus_string);
TEST_ASSERT(cpu_list, "strdup() allocation failed.\n");
r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask);
TEST_ASSERT(!r, "sched_getaffinity() failed");
cpu = strtok(cpu_list, delim);
/* 1. Get all pcpus for vcpus. */
for (i = 0; i < nr_vcpus; i++) {
TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'\n", i);
vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask);
cpu = strtok(NULL, delim);
}
/* 2. Check if the main worker needs to be pinned. */
if (cpu) {
kvm_pin_this_task_to_pcpu(parse_pcpu(cpu, &allowed_mask));
cpu = strtok(NULL, delim);
}
TEST_ASSERT(!cpu, "pCPU list contains trailing garbage characters '%s'", cpu);
free(cpu_list);
}
/*
* Userspace Memory Region Find
*
* Input Args:
* vm - Virtual Machine
* start - Starting VM physical address
* end - Ending VM physical address, inclusive.
*
* Output Args: None
*
* Return:
* Pointer to overlapping region, NULL if no such region.
*
* Searches for a region with any physical memory that overlaps with
* any portion of the guest physical addresses from start to end
* inclusive. If multiple overlapping regions exist, a pointer to any
* of the regions is returned. Null is returned only when no overlapping
* region exists.
*/
static struct userspace_mem_region *
userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
{
struct rb_node *node;
for (node = vm->regions.gpa_tree.rb_node; node; ) {
struct userspace_mem_region *region =
container_of(node, struct userspace_mem_region, gpa_node);
uint64_t existing_start = region->region.guest_phys_addr;
uint64_t existing_end = region->region.guest_phys_addr
+ region->region.memory_size - 1;
if (start <= existing_end && end >= existing_start)
return region;
if (start < existing_start)
node = node->rb_left;
else
node = node->rb_right;
}
return NULL;
}
/*
* KVM Userspace Memory Region Find
*
* Input Args:
* vm - Virtual Machine
* start - Starting VM physical address
* end - Ending VM physical address, inclusive.
*
* Output Args: None
*
* Return:
* Pointer to overlapping region, NULL if no such region.
*
* Public interface to userspace_mem_region_find. Allows tests to look up
* the memslot datastructure for a given range of guest physical memory.
*/
struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
uint64_t end)
{
struct userspace_mem_region *region;
region = userspace_mem_region_find(vm, start, end);
if (!region)
return NULL;
return ®ion->region;
}
__weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
{
}
/*
* VM VCPU Remove
*
* Input Args:
* vcpu - VCPU to remove
*
* Output Args: None
*
* Return: None, TEST_ASSERT failures for all error conditions
*
* Removes a vCPU from a VM and frees its resources.
*/
static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
int ret;
if (vcpu->dirty_gfns) {
ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
vcpu->dirty_gfns = NULL;
}
ret = munmap(vcpu->run, vcpu_mmap_sz());
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
ret = close(vcpu->fd);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
list_del(&vcpu->list);
vcpu_arch_free(vcpu);
free(vcpu);
}
void kvm_vm_release(struct kvm_vm *vmp)
{
struct kvm_vcpu *vcpu, *tmp;
int ret;
list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
vm_vcpu_rm(vmp, vcpu);
ret = close(vmp->fd);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
ret = close(vmp->kvm_fd);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
}
static void __vm_mem_region_delete(struct kvm_vm *vm,
struct userspace_mem_region *region,
bool unlink)
{
int ret;
if (unlink) {
rb_erase(®ion->gpa_node, &vm->regions.gpa_tree);
rb_erase(®ion->hva_node, &vm->regions.hva_tree);
hash_del(®ion->slot_node);
}
region->region.memory_size = 0;
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
sparsebit_free(®ion->unused_phy_pages);
ret = munmap(region->mmap_start, region->mmap_size);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
if (region->fd >= 0) {
/* There's an extra map when using shared memory. */
ret = munmap(region->mmap_alias, region->mmap_size);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
close(region->fd);
}
free(region);
}
/*
* Destroys and frees the VM pointed to by vmp.
*/
void kvm_vm_free(struct kvm_vm *vmp)
{
int ctr;
struct hlist_node *node;
struct userspace_mem_region *region;
if (vmp == NULL)
return;
/* Free cached stats metadata and close FD */
if (vmp->stats_fd) {
free(vmp->stats_desc);
close(vmp->stats_fd);
}
/* Free userspace_mem_regions. */
hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
__vm_mem_region_delete(vmp, region, false);
/* Free sparsebit arrays. */
sparsebit_free(&vmp->vpages_valid);
sparsebit_free(&vmp->vpages_mapped);
kvm_vm_release(vmp);
/* Free the structure describing the VM. */
free(vmp);
}
int kvm_memfd_alloc(size_t size, bool hugepages)
{
int memfd_flags = MFD_CLOEXEC;
int fd, r;
if (hugepages)
memfd_flags |= MFD_HUGETLB;
fd = memfd_create("kvm_selftest", memfd_flags);
TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd));
r = ftruncate(fd, size);
TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r));
r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size);
TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
return fd;
}
/*
* Memory Compare, host virtual to guest virtual
*
* Input Args:
* hva - Starting host virtual address
* vm - Virtual Machine
* gva - Starting guest virtual address
* len - number of bytes to compare
*
* Output Args: None
*
* Input/Output Args: None
*
* Return:
* Returns 0 if the bytes starting at hva for a length of len
* are equal the guest virtual bytes starting at gva. Returns
* a value < 0, if bytes at hva are less than those at gva.
* Otherwise a value > 0 is returned.
*
* Compares the bytes starting at the host virtual address hva, for
* a length of len, to the guest bytes starting at the guest virtual
* address given by gva.
*/
int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
{
size_t amt;
/*
* Compare a batch of bytes until either a match is found
* or all the bytes have been compared.
*/
for (uintptr_t offset = 0; offset < len; offset += amt) {
uintptr_t ptr1 = (uintptr_t)hva + offset;
/*
* Determine host address for guest virtual address
* at offset.
*/
uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
/*
* Determine amount to compare on this pass.
* Don't allow the comparsion to cross a page boundary.
*/
amt = len - offset;
if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
amt = vm->page_size - (ptr1 % vm->page_size);
if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
amt = vm->page_size - (ptr2 % vm->page_size);
assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
/*
* Perform the comparison. If there is a difference
* return that result to the caller, otherwise need
* to continue on looking for a mismatch.
*/
int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
if (ret != 0)
return ret;
}
/*
* No mismatch found. Let the caller know the two memory
* areas are equal.
*/
return 0;
}
static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
struct userspace_mem_region *region)
{
struct rb_node **cur, *parent;
for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
struct userspace_mem_region *cregion;
cregion = container_of(*cur, typeof(*cregion), gpa_node);
parent = *cur;
if (region->region.guest_phys_addr <
cregion->region.guest_phys_addr)
cur = &(*cur)->rb_left;
else {
TEST_ASSERT(region->region.guest_phys_addr !=
cregion->region.guest_phys_addr,
"Duplicate GPA in region tree");
cur = &(*cur)->rb_right;
}
}
rb_link_node(®ion->gpa_node, parent, cur);
rb_insert_color(®ion->gpa_node, gpa_tree);
}
static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
struct userspace_mem_region *region)
{
struct rb_node **cur, *parent;
for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
struct userspace_mem_region *cregion;
cregion = container_of(*cur, typeof(*cregion), hva_node);
parent = *cur;
if (region->host_mem < cregion->host_mem)
cur = &(*cur)->rb_left;
else {
TEST_ASSERT(region->host_mem !=
cregion->host_mem,
"Duplicate HVA in region tree");
cur = &(*cur)->rb_right;
}
}
rb_link_node(®ion->hva_node, parent, cur);
rb_insert_color(®ion->hva_node, hva_tree);
}
int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
uint64_t gpa, uint64_t size, void *hva)
{
struct kvm_userspace_memory_region region = {
.slot = slot,
.flags = flags,
.guest_phys_addr = gpa,
.memory_size = size,
.userspace_addr = (uintptr_t)hva,
};
return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion);
}
void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
uint64_t gpa, uint64_t size, void *hva)
{
int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed, errno = %d (%s)",
errno, strerror(errno));
}
/*
* VM Userspace Memory Region Add
*
* Input Args:
* vm - Virtual Machine
* src_type - Storage source for this region.
* NULL to use anonymous memory.
* guest_paddr - Starting guest physical address
* slot - KVM region slot
* npages - Number of physical pages
* flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
*
* Output Args: None
*
* Return: None
*
* Allocates a memory area of the number of pages specified by npages
* and maps it to the VM specified by vm, at a starting physical address
* given by guest_paddr. The region is created with a KVM region slot
* given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The
* region is created with the flags given by flags.
*/
void vm_userspace_mem_region_add(struct kvm_vm *vm,
enum vm_mem_backing_src_type src_type,
uint64_t guest_paddr, uint32_t slot, uint64_t npages,
uint32_t flags)
{
int ret;
struct userspace_mem_region *region;
size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
size_t alignment;
TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
"Number of guest pages is not compatible with the host. "
"Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
"address not on a page boundary.\n"
" guest_paddr: 0x%lx vm->page_size: 0x%x",
guest_paddr, vm->page_size);
TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
<= vm->max_gfn, "Physical range beyond maximum "
"supported physical address,\n"
" guest_paddr: 0x%lx npages: 0x%lx\n"
" vm->max_gfn: 0x%lx vm->page_size: 0x%x",
guest_paddr, npages, vm->max_gfn, vm->page_size);
/*
* Confirm a mem region with an overlapping address doesn't
* already exist.
*/
region = (struct userspace_mem_region *) userspace_mem_region_find(
vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
if (region != NULL)
TEST_FAIL("overlapping userspace_mem_region already "
"exists\n"
" requested guest_paddr: 0x%lx npages: 0x%lx "
"page_size: 0x%x\n"
" existing guest_paddr: 0x%lx size: 0x%lx",
guest_paddr, npages, vm->page_size,
(uint64_t) region->region.guest_phys_addr,
(uint64_t) region->region.memory_size);
/* Confirm no region with the requested slot already exists. */
hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
slot) {
if (region->region.slot != slot)
continue;
TEST_FAIL("A mem region with the requested slot "
"already exists.\n"
" requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
" existing slot: %u paddr: 0x%lx size: 0x%lx",
slot, guest_paddr, npages,
region->region.slot,
(uint64_t) region->region.guest_phys_addr,
(uint64_t) region->region.memory_size);
}
/* Allocate and initialize new mem region structure. */
region = calloc(1, sizeof(*region));
TEST_ASSERT(region != NULL, "Insufficient Memory");
region->mmap_size = npages * vm->page_size;
#ifdef __s390x__
/* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
alignment = 0x100000;
#else
alignment = 1;
#endif
/*
* When using THP mmap is not guaranteed to returned a hugepage aligned
* address so we have to pad the mmap. Padding is not needed for HugeTLB
* because mmap will always return an address aligned to the HugeTLB
* page size.
*/
if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
alignment = max(backing_src_pagesz, alignment);
TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
/* Add enough memory to align up if necessary */
if (alignment > 1)
region->mmap_size += alignment;
region->fd = -1;
if (backing_src_is_shared(src_type))
region->fd = kvm_memfd_alloc(region->mmap_size,
src_type == VM_MEM_SRC_SHARED_HUGETLB);
region->mmap_start = mmap(NULL, region->mmap_size,
PROT_READ | PROT_WRITE,
vm_mem_backing_src_alias(src_type)->flag,
region->fd, 0);
TEST_ASSERT(region->mmap_start != MAP_FAILED,
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
"mmap_start %p is not aligned to HugeTLB page size 0x%lx",
region->mmap_start, backing_src_pagesz);
/* Align host address */
region->host_mem = align_ptr_up(region->mmap_start, alignment);
/* As needed perform madvise */
if ((src_type == VM_MEM_SRC_ANONYMOUS ||
src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
ret = madvise(region->host_mem, npages * vm->page_size,
src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
region->host_mem, npages * vm->page_size,
vm_mem_backing_src_alias(src_type)->name);
}
region->backing_src_type = src_type;
region->unused_phy_pages = sparsebit_alloc();
sparsebit_set_num(region->unused_phy_pages,
guest_paddr >> vm->page_shift, npages);
region->region.slot = slot;
region->region.flags = flags;
region->region.guest_phys_addr = guest_paddr;
region->region.memory_size = npages * vm->page_size;
region->region.userspace_addr = (uintptr_t) region->host_mem;
ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
" rc: %i errno: %i\n"
" slot: %u flags: 0x%x\n"
" guest_phys_addr: 0x%lx size: 0x%lx",
ret, errno, slot, flags,
guest_paddr, (uint64_t) region->region.memory_size);
/* Add to quick lookup data structures */
vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
hash_add(vm->regions.slot_hash, ®ion->slot_node, slot);
/* If shared memory, create an alias. */
if (region->fd >= 0) {
region->mmap_alias = mmap(NULL, region->mmap_size,
PROT_READ | PROT_WRITE,
vm_mem_backing_src_alias(src_type)->flag,
region->fd, 0);
TEST_ASSERT(region->mmap_alias != MAP_FAILED,
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
/* Align host alias address */
region->host_alias = align_ptr_up(region->mmap_alias, alignment);
}
}
/*
* Memslot to region
*
* Input Args:
* vm - Virtual Machine
* memslot - KVM memory slot ID
*
* Output Args: None
*
* Return:
* Pointer to memory region structure that describe memory region
* using kvm memory slot ID given by memslot. TEST_ASSERT failure
* on error (e.g. currently no memory region using memslot as a KVM
* memory slot ID).
*/
struct userspace_mem_region *
memslot2region(struct kvm_vm *vm, uint32_t memslot)
{
struct userspace_mem_region *region;
hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
memslot)
if (region->region.slot == memslot)
return region;
fprintf(stderr, "No mem region with the requested slot found,\n"
" requested slot: %u\n", memslot);
fputs("---- vm dump ----\n", stderr);
vm_dump(stderr, vm, 2);
TEST_FAIL("Mem region not found");
return NULL;
}
/*
* VM Memory Region Flags Set
*
* Input Args:
* vm - Virtual Machine
* flags - Starting guest physical address
*
* Output Args: None
*
* Return: None
*
* Sets the flags of the memory region specified by the value of slot,
* to the values given by flags.
*/
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
{
int ret;
struct userspace_mem_region *region;
region = memslot2region(vm, slot);
region->region.flags = flags;
ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
" rc: %i errno: %i slot: %u flags: 0x%x",
ret, errno, slot, flags);
}
/*
* VM Memory Region Move
*
* Input Args:
* vm - Virtual Machine
* slot - Slot of the memory region to move
* new_gpa - Starting guest physical address
*
* Output Args: None
*
* Return: None
*
* Change the gpa of a memory region.
*/
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
{
struct userspace_mem_region *region;
int ret;
region = memslot2region(vm, slot);
region->region.guest_phys_addr = new_gpa;
ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n"
"ret: %i errno: %i slot: %u new_gpa: 0x%lx",
ret, errno, slot, new_gpa);
}
/*
* VM Memory Region Delete
*
* Input Args:
* vm - Virtual Machine
* slot - Slot of the memory region to delete
*
* Output Args: None
*
* Return: None
*
* Delete a memory region.
*/
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
{
__vm_mem_region_delete(vm, memslot2region(vm, slot), true);
}
/* Returns the size of a vCPU's kvm_run structure. */
static int vcpu_mmap_sz(void)
{
int dev_fd, ret;
dev_fd = open_kvm_dev_path_or_exit();
ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
TEST_ASSERT(ret >= sizeof(struct kvm_run),
KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret));
close(dev_fd);
return ret;
}
static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
{
struct kvm_vcpu *vcpu;
list_for_each_entry(vcpu, &vm->vcpus, list) {
if (vcpu->id == vcpu_id)
return true;
}
return false;
}
/*
* Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
* No additional vCPU setup is done. Returns the vCPU.
*/
struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
{
struct kvm_vcpu *vcpu;
/* Confirm a vcpu with the specified id doesn't already exist. */
TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id);
/* Allocate and initialize new vcpu structure. */
vcpu = calloc(1, sizeof(*vcpu));
TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
vcpu->vm = vm;
vcpu->id = vcpu_id;
vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id);
TEST_ASSERT(vcpu->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu->fd));
TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
"smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
vcpu_mmap_sz(), sizeof(*vcpu->run));
vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
TEST_ASSERT(vcpu->run != MAP_FAILED,
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
/* Add to linked-list of VCPUs. */
list_add(&vcpu->list, &vm->vcpus);
return vcpu;
}
/*
* VM Virtual Address Unused Gap
*
* Input Args:
* vm - Virtual Machine
* sz - Size (bytes)
* vaddr_min - Minimum Virtual Address
*
* Output Args: None
*
* Return:
* Lowest virtual address at or below vaddr_min, with at least
* sz unused bytes. TEST_ASSERT failure if no area of at least
* size sz is available.
*
* Within the VM specified by vm, locates the lowest starting virtual
* address >= vaddr_min, that has at least sz unallocated bytes. A
* TEST_ASSERT failure occurs for invalid input or no area of at least
* sz unallocated bytes >= vaddr_min is available.
*/
vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
vm_vaddr_t vaddr_min)
{
uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
/* Determine lowest permitted virtual page index. */
uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
if ((pgidx_start * vm->page_size) < vaddr_min)
goto no_va_found;
/* Loop over section with enough valid virtual page indexes. */
if (!sparsebit_is_set_num(vm->vpages_valid,
pgidx_start, pages))
pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
pgidx_start, pages);
do {
/*
* Are there enough unused virtual pages available at
* the currently proposed starting virtual page index.
* If not, adjust proposed starting index to next
* possible.
*/
if (sparsebit_is_clear_num(vm->vpages_mapped,
pgidx_start, pages))
goto va_found;
pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
pgidx_start, pages);
if (pgidx_start == 0)
goto no_va_found;
/*
* If needed, adjust proposed starting virtual address,
* to next range of valid virtual addresses.
*/
if (!sparsebit_is_set_num(vm->vpages_valid,
pgidx_start, pages)) {
pgidx_start = sparsebit_next_set_num(
vm->vpages_valid, pgidx_start, pages);
if (pgidx_start == 0)
goto no_va_found;
}
} while (pgidx_start != 0);
no_va_found:
TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
/* NOT REACHED */
return -1;
va_found:
TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
pgidx_start, pages),
"Unexpected, invalid virtual page index range,\n"
" pgidx_start: 0x%lx\n"
" pages: 0x%lx",
pgidx_start, pages);
TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
pgidx_start, pages),
"Unexpected, pages already mapped,\n"
" pgidx_start: 0x%lx\n"
" pages: 0x%lx",
pgidx_start, pages);
return pgidx_start * vm->page_size;
}
vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
enum kvm_mem_region_type type)
{
uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
virt_pgd_alloc(vm);
vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
KVM_UTIL_MIN_PFN * vm->page_size,
vm->memslots[type]);
/*
* Find an unused range of virtual page addresses of at least
* pages in length.
*/
vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
/* Map the virtual pages. */
for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
pages--, vaddr += vm->page_size, paddr += vm->page_size) {
virt_pg_map(vm, vaddr, paddr);
sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
}
return vaddr_start;
}
/*
* VM Virtual Address Allocate
*
* Input Args:
* vm - Virtual Machine
* sz - Size in bytes
* vaddr_min - Minimum starting virtual address
*
* Output Args: None
*
* Return:
* Starting guest virtual address
*
* Allocates at least sz bytes within the virtual address space of the vm
* given by vm. The allocated bytes are mapped to a virtual address >=
* the address given by vaddr_min. Note that each allocation uses a
* a unique set of pages, with the minimum real allocation being at least
* a page. The allocated physical space comes from the TEST_DATA memory region.
*/
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
{
return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
}
/*
* VM Virtual Address Allocate Pages
*
* Input Args:
* vm - Virtual Machine
*
* Output Args: None
*
* Return:
* Starting guest virtual address
*
* Allocates at least N system pages worth of bytes within the virtual address
* space of the vm.
*/
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
{
return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
}
vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
{
return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
}
/*
* VM Virtual Address Allocate Page
*
* Input Args:
* vm - Virtual Machine
*
* Output Args: None
*
* Return:
* Starting guest virtual address
*
* Allocates at least one system page worth of bytes within the virtual address
* space of the vm.
*/
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
{
return vm_vaddr_alloc_pages(vm, 1);
}
/*
* Map a range of VM virtual address to the VM's physical address
*
* Input Args:
* vm - Virtual Machine
* vaddr - Virtuall address to map
* paddr - VM Physical Address
* npages - The number of pages to map
*
* Output Args: None
*
* Return: None
*
* Within the VM given by @vm, creates a virtual translation for
* @npages starting at @vaddr to the page range starting at @paddr.
*/
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
unsigned int npages)
{
size_t page_size = vm->page_size;
size_t size = npages * page_size;
TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
while (npages--) {
virt_pg_map(vm, vaddr, paddr);
sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
vaddr += page_size;
paddr += page_size;
}
}
/*
* Address VM Physical to Host Virtual
*
* Input Args:
* vm - Virtual Machine
* gpa - VM physical address
*
* Output Args: None
*
* Return:
* Equivalent host virtual address
*
* Locates the memory region containing the VM physical address given
* by gpa, within the VM given by vm. When found, the host virtual
* address providing the memory to the vm physical address is returned.
* A TEST_ASSERT failure occurs if no region containing gpa exists.
*/
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
{
struct userspace_mem_region *region;
region = userspace_mem_region_find(vm, gpa, gpa);
if (!region) {
TEST_FAIL("No vm physical memory at 0x%lx", gpa);
return NULL;
}
return (void *)((uintptr_t)region->host_mem
+ (gpa - region->region.guest_phys_addr));
}
/*
* Address Host Virtual to VM Physical
*
* Input Args:
* vm - Virtual Machine
* hva - Host virtual address
*
* Output Args: None
*
* Return:
* Equivalent VM physical address
*
* Locates the memory region containing the host virtual address given
* by hva, within the VM given by vm. When found, the equivalent
* VM physical address is returned. A TEST_ASSERT failure occurs if no
* region containing hva exists.
*/
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
{
struct rb_node *node;
for (node = vm->regions.hva_tree.rb_node; node; ) {
struct userspace_mem_region *region =
container_of(node, struct userspace_mem_region, hva_node);
if (hva >= region->host_mem) {
if (hva <= (region->host_mem
+ region->region.memory_size - 1))
return (vm_paddr_t)((uintptr_t)
region->region.guest_phys_addr
+ (hva - (uintptr_t)region->host_mem));
node = node->rb_right;
} else
node = node->rb_left;
}
TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
return -1;
}
/*
* Address VM physical to Host Virtual *alias*.
*
* Input Args:
* vm - Virtual Machine
* gpa - VM physical address
*
* Output Args: None
*
* Return:
* Equivalent address within the host virtual *alias* area, or NULL
* (without failing the test) if the guest memory is not shared (so
* no alias exists).
*
* Create a writable, shared virtual=>physical alias for the specific GPA.
* The primary use case is to allow the host selftest to manipulate guest
* memory without mapping said memory in the guest's address space. And, for
* userfaultfd-based demand paging, to do so without triggering userfaults.
*/
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
{
struct userspace_mem_region *region;
uintptr_t offset;
region = userspace_mem_region_find(vm, gpa, gpa);
if (!region)
return NULL;
if (!region->host_alias)
return NULL;
offset = gpa - region->region.guest_phys_addr;
return (void *) ((uintptr_t) region->host_alias + offset);
}
/* Create an interrupt controller chip for the specified VM. */
void vm_create_irqchip(struct kvm_vm *vm)
{
vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
vm->has_irqchip = true;
}
int _vcpu_run(struct kvm_vcpu *vcpu)
{
int rc;
do {
rc = __vcpu_run(vcpu);
} while (rc == -1 && errno == EINTR);
assert_on_unhandled_exception(vcpu);
return rc;
}
/*
* Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
* Assert if the KVM returns an error (other than -EINTR).
*/
void vcpu_run(struct kvm_vcpu *vcpu)
{
int ret = _vcpu_run(vcpu);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret));
}
void vcpu_run_complete_io(struct kvm_vcpu *vcpu)
{
int ret;
vcpu->run->immediate_exit = 1;
ret = __vcpu_run(vcpu);
vcpu->run->immediate_exit = 0;
TEST_ASSERT(ret == -1 && errno == EINTR,
"KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
ret, errno);
}
/*
* Get the list of guest registers which are supported for
* KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer,
* it is the caller's responsibility to free the list.
*/
struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
{
struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
int ret;
ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®_list_n);
TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
reg_list->n = reg_list_n.n;
vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list);
return reg_list;
}
void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
{
uint32_t page_size = getpagesize();
uint32_t size = vcpu->vm->dirty_ring_size;
TEST_ASSERT(size > 0, "Should enable dirty ring first");
if (!vcpu->dirty_gfns) {
void *addr;
addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd,
page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd,
page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
vcpu->dirty_gfns = addr;
vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
}
return vcpu->dirty_gfns;
}
/*
* Device Ioctl
*/
int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
{
struct kvm_device_attr attribute = {
.group = group,
.attr = attr,
.flags = 0,
};
return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
}
int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
{
struct kvm_create_device create_dev = {
.type = type,
.flags = KVM_CREATE_DEVICE_TEST,
};
return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
}
int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
{
struct kvm_create_device create_dev = {
.type = type,
.fd = -1,
.flags = 0,
};
int err;
err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value");
return err ? : create_dev.fd;
}
int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
{
struct kvm_device_attr kvmattr = {
.group = group,
.attr = attr,
.flags = 0,
.addr = (uintptr_t)val,
};
return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr);
}
int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
{
struct kvm_device_attr kvmattr = {
.group = group,
.attr = attr,
.flags = 0,
.addr = (uintptr_t)val,
};
return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr);
}
/*
* IRQ related functions.
*/
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
{
struct kvm_irq_level irq_level = {
.irq = irq,
.level = level,
};
return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
}
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
{
int ret = _kvm_irq_line(vm, irq, level);
TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
}
struct kvm_irq_routing *kvm_gsi_routing_create(void)
{
struct kvm_irq_routing *routing;
size_t size;
size = sizeof(struct kvm_irq_routing);
/* Allocate space for the max number of entries: this wastes 196 KBs. */
size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
routing = calloc(1, size);
assert(routing);
return routing;
}
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
uint32_t gsi, uint32_t pin)
{
int i;
assert(routing);
assert(routing->nr < KVM_MAX_IRQ_ROUTES);
i = routing->nr;
routing->entries[i].gsi = gsi;
routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
routing->entries[i].flags = 0;
routing->entries[i].u.irqchip.irqchip = 0;
routing->entries[i].u.irqchip.pin = pin;
routing->nr++;
}
int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
{
int ret;
assert(routing);
ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
free(routing);
return ret;
}
void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
{
int ret;
ret = _kvm_gsi_routing_write(vm, routing);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret));
}
/*
* VM Dump
*
* Input Args:
* vm - Virtual Machine
* indent - Left margin indent amount
*
* Output Args:
* stream - Output FILE stream
*
* Return: None
*
* Dumps the current state of the VM given by vm, to the FILE stream
* given by stream.
*/
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
int ctr;
struct userspace_mem_region *region;
struct kvm_vcpu *vcpu;
fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
fprintf(stream, "%*sMem Regions:\n", indent, "");
hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
"host_virt: %p\n", indent + 2, "",
(uint64_t) region->region.guest_phys_addr,
(uint64_t) region->region.memory_size,
region->host_mem);
fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
sparsebit_dump(stream, region->unused_phy_pages, 0);
}
fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
fprintf(stream, "%*spgd_created: %u\n", indent, "",
vm->pgd_created);
if (vm->pgd_created) {
fprintf(stream, "%*sVirtual Translation Tables:\n",
indent + 2, "");
virt_dump(stream, vm, indent + 4);
}
fprintf(stream, "%*sVCPUs:\n", indent, "");
list_for_each_entry(vcpu, &vm->vcpus, list)
vcpu_dump(stream, vcpu, indent + 2);
}
#define KVM_EXIT_STRING(x) {KVM_EXIT_##x, #x}
/* Known KVM exit reasons */
static struct exit_reason {
unsigned int reason;
const char *name;
} exit_reasons_known[] = {
KVM_EXIT_STRING(UNKNOWN),
KVM_EXIT_STRING(EXCEPTION),
KVM_EXIT_STRING(IO),
KVM_EXIT_STRING(HYPERCALL),
KVM_EXIT_STRING(DEBUG),
KVM_EXIT_STRING(HLT),
KVM_EXIT_STRING(MMIO),
KVM_EXIT_STRING(IRQ_WINDOW_OPEN),
KVM_EXIT_STRING(SHUTDOWN),
KVM_EXIT_STRING(FAIL_ENTRY),
KVM_EXIT_STRING(INTR),
KVM_EXIT_STRING(SET_TPR),
KVM_EXIT_STRING(TPR_ACCESS),
KVM_EXIT_STRING(S390_SIEIC),
KVM_EXIT_STRING(S390_RESET),
KVM_EXIT_STRING(DCR),
KVM_EXIT_STRING(NMI),
KVM_EXIT_STRING(INTERNAL_ERROR),
KVM_EXIT_STRING(OSI),
KVM_EXIT_STRING(PAPR_HCALL),
KVM_EXIT_STRING(S390_UCONTROL),
KVM_EXIT_STRING(WATCHDOG),
KVM_EXIT_STRING(S390_TSCH),
KVM_EXIT_STRING(EPR),
KVM_EXIT_STRING(SYSTEM_EVENT),
KVM_EXIT_STRING(S390_STSI),
KVM_EXIT_STRING(IOAPIC_EOI),
KVM_EXIT_STRING(HYPERV),
KVM_EXIT_STRING(ARM_NISV),
KVM_EXIT_STRING(X86_RDMSR),
KVM_EXIT_STRING(X86_WRMSR),
KVM_EXIT_STRING(DIRTY_RING_FULL),
KVM_EXIT_STRING(AP_RESET_HOLD),
KVM_EXIT_STRING(X86_BUS_LOCK),
KVM_EXIT_STRING(XEN),
KVM_EXIT_STRING(RISCV_SBI),
KVM_EXIT_STRING(RISCV_CSR),
KVM_EXIT_STRING(NOTIFY),
#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
KVM_EXIT_STRING(MEMORY_NOT_PRESENT),
#endif
};
/*
* Exit Reason String
*
* Input Args:
* exit_reason - Exit reason
*
* Output Args: None
*
* Return:
* Constant string pointer describing the exit reason.
*
* Locates and returns a constant string that describes the KVM exit
* reason given by exit_reason. If no such string is found, a constant
* string of "Unknown" is returned.
*/
const char *exit_reason_str(unsigned int exit_reason)
{
unsigned int n1;
for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
if (exit_reason == exit_reasons_known[n1].reason)
return exit_reasons_known[n1].name;
}
return "Unknown";
}
/*
* Physical Contiguous Page Allocator
*
* Input Args:
* vm - Virtual Machine
* num - number of pages
* paddr_min - Physical address minimum
* memslot - Memory region to allocate page from
*
* Output Args: None
*
* Return:
* Starting physical address
*
* Within the VM specified by vm, locates a range of available physical
* pages at or above paddr_min. If found, the pages are marked as in use
* and their base address is returned. A TEST_ASSERT failure occurs if
* not enough pages are available at or above paddr_min.
*/
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot)
{
struct userspace_mem_region *region;
sparsebit_idx_t pg, base;
TEST_ASSERT(num > 0, "Must allocate at least one page");
TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
"not divisible by page size.\n"
" paddr_min: 0x%lx page_size: 0x%x",
paddr_min, vm->page_size);
region = memslot2region(vm, memslot);
base = pg = paddr_min >> vm->page_shift;
do {
for (; pg < base + num; ++pg) {
if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
break;
}
}
} while (pg && pg != base + num);
if (pg == 0) {
fprintf(stderr, "No guest physical page available, "
"paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
paddr_min, vm->page_size, memslot);
fputs("---- vm dump ----\n", stderr);
vm_dump(stderr, vm, 2);
abort();
}
for (pg = base; pg < base + num; ++pg)
sparsebit_clear(region->unused_phy_pages, pg);
return base * vm->page_size;
}
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot)
{
return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
}
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
{
return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
}
/*
* Address Guest Virtual to Host Virtual
*
* Input Args:
* vm - Virtual Machine
* gva - VM virtual address
*
* Output Args: None
*
* Return:
* Equivalent host virtual address
*/
void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
{
return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
}
unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm)
{
return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
}
static unsigned int vm_calc_num_pages(unsigned int num_pages,
unsigned int page_shift,
unsigned int new_page_shift,
bool ceil)
{
unsigned int n = 1 << (new_page_shift - page_shift);
if (page_shift >= new_page_shift)
return num_pages * (1 << (page_shift - new_page_shift));
return num_pages / n + !!(ceil && num_pages % n);
}
static inline int getpageshift(void)
{
return __builtin_ffs(getpagesize()) - 1;
}
unsigned int
vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
{
return vm_calc_num_pages(num_guest_pages,
vm_guest_mode_params[mode].page_shift,
getpageshift(), true);
}
unsigned int
vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
{
return vm_calc_num_pages(num_host_pages, getpageshift(),
vm_guest_mode_params[mode].page_shift, false);
}
unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
{
unsigned int n;
n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
return vm_adjust_num_guest_pages(mode, n);
}
/*
* Read binary stats descriptors
*
* Input Args:
* stats_fd - the file descriptor for the binary stats file from which to read
* header - the binary stats metadata header corresponding to the given FD
*
* Output Args: None
*
* Return:
* A pointer to a newly allocated series of stat descriptors.
* Caller is responsible for freeing the returned kvm_stats_desc.
*
* Read the stats descriptors from the binary stats interface.
*/
struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
struct kvm_stats_header *header)
{
struct kvm_stats_desc *stats_desc;
ssize_t desc_size, total_size, ret;
desc_size = get_stats_descriptor_size(header);
total_size = header->num_desc * desc_size;
stats_desc = calloc(header->num_desc, desc_size);
TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors");
ret = pread(stats_fd, stats_desc, total_size, header->desc_offset);
TEST_ASSERT(ret == total_size, "Read KVM stats descriptors");
return stats_desc;
}
/*
* Read stat data for a particular stat
*
* Input Args:
* stats_fd - the file descriptor for the binary stats file from which to read
* header - the binary stats metadata header corresponding to the given FD
* desc - the binary stat metadata for the particular stat to be read
* max_elements - the maximum number of 8-byte values to read into data
*
* Output Args:
* data - the buffer into which stat data should be read
*
* Read the data values of a specified stat from the binary stats interface.
*/
void read_stat_data(int stats_fd, struct kvm_stats_header *header,
struct kvm_stats_desc *desc, uint64_t *data,
size_t max_elements)
{
size_t nr_elements = min_t(ssize_t, desc->size, max_elements);
size_t size = nr_elements * sizeof(*data);
ssize_t ret;
TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name);
TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name);
ret = pread(stats_fd, data, size,
header->data_offset + desc->offset);
TEST_ASSERT(ret >= 0, "pread() failed on stat '%s', errno: %i (%s)",
desc->name, errno, strerror(errno));
TEST_ASSERT(ret == size,
"pread() on stat '%s' read %ld bytes, wanted %lu bytes",
desc->name, size, ret);
}
/*
* Read the data of the named stat
*
* Input Args:
* vm - the VM for which the stat should be read
* stat_name - the name of the stat to read
* max_elements - the maximum number of 8-byte values to read into data
*
* Output Args:
* data - the buffer into which stat data should be read
*
* Read the data values of a specified stat from the binary stats interface.
*/
void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
size_t max_elements)
{
struct kvm_stats_desc *desc;
size_t size_desc;
int i;
if (!vm->stats_fd) {
vm->stats_fd = vm_get_stats_fd(vm);
read_stats_header(vm->stats_fd, &vm->stats_header);
vm->stats_desc = read_stats_descriptors(vm->stats_fd,
&vm->stats_header);
}
size_desc = get_stats_descriptor_size(&vm->stats_header);
for (i = 0; i < vm->stats_header.num_desc; ++i) {
desc = (void *)vm->stats_desc + (i * size_desc);
if (strcmp(desc->name, stat_name))
continue;
read_stat_data(vm->stats_fd, &vm->stats_header, desc,
data, max_elements);
break;
}
}
__weak void kvm_arch_vm_post_create(struct kvm_vm *vm)
{
}
__weak void kvm_selftest_arch_init(void)
{
}
void __attribute((constructor)) kvm_selftest_init(void)
{
/* Tell stdout not to buffer its content. */
setbuf(stdout, NULL);
kvm_selftest_arch_init();
}
| linux-master | tools/testing/selftests/kvm/lib/kvm_util.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/io.c
*
* Copyright (C) 2018, Google LLC.
*/
#include "test_util.h"
/* Test Write
*
* A wrapper for write(2), that automatically handles the following
* special conditions:
*
* + Interrupted system call (EINTR)
* + Write of less than requested amount
* + Non-block return (EAGAIN)
*
* For each of the above, an additional write is performed to automatically
* continue writing the requested data.
* There are also many cases where write(2) can return an unexpected
* error (e.g. EIO). Such errors cause a TEST_ASSERT failure.
*
* Note, for function signature compatibility with write(2), this function
* returns the number of bytes written, but that value will always be equal
* to the number of requested bytes. All other conditions in this and
* future enhancements to this function either automatically issue another
* write(2) or cause a TEST_ASSERT failure.
*
* Args:
* fd - Opened file descriptor to file to be written.
* count - Number of bytes to write.
*
* Output:
* buf - Starting address of data to be written.
*
* Return:
* On success, number of bytes written.
* On failure, a TEST_ASSERT failure is caused.
*/
ssize_t test_write(int fd, const void *buf, size_t count)
{
ssize_t rc;
ssize_t num_written = 0;
size_t num_left = count;
const char *ptr = buf;
/* Note: Count of zero is allowed (see "RETURN VALUE" portion of
* write(2) manpage for details.
*/
TEST_ASSERT(count >= 0, "Unexpected count, count: %li", count);
do {
rc = write(fd, ptr, num_left);
switch (rc) {
case -1:
TEST_ASSERT(errno == EAGAIN || errno == EINTR,
"Unexpected write failure,\n"
" rc: %zi errno: %i", rc, errno);
continue;
case 0:
TEST_FAIL("Unexpected EOF,\n"
" rc: %zi num_written: %zi num_left: %zu",
rc, num_written, num_left);
break;
default:
TEST_ASSERT(rc >= 0, "Unexpected ret from write,\n"
" rc: %zi errno: %i", rc, errno);
num_written += rc;
num_left -= rc;
ptr += rc;
break;
}
} while (num_written < count);
return num_written;
}
/* Test Read
*
* A wrapper for read(2), that automatically handles the following
* special conditions:
*
* + Interrupted system call (EINTR)
* + Read of less than requested amount
* + Non-block return (EAGAIN)
*
* For each of the above, an additional read is performed to automatically
* continue reading the requested data.
* There are also many cases where read(2) can return an unexpected
* error (e.g. EIO). Such errors cause a TEST_ASSERT failure. Note,
* it is expected that the file opened by fd at the current file position
* contains at least the number of requested bytes to be read. A TEST_ASSERT
* failure is produced if an End-Of-File condition occurs, before all the
* data is read. It is the callers responsibility to assure that sufficient
* data exists.
*
* Note, for function signature compatibility with read(2), this function
* returns the number of bytes read, but that value will always be equal
* to the number of requested bytes. All other conditions in this and
* future enhancements to this function either automatically issue another
* read(2) or cause a TEST_ASSERT failure.
*
* Args:
* fd - Opened file descriptor to file to be read.
* count - Number of bytes to read.
*
* Output:
* buf - Starting address of where to write the bytes read.
*
* Return:
* On success, number of bytes read.
* On failure, a TEST_ASSERT failure is caused.
*/
ssize_t test_read(int fd, void *buf, size_t count)
{
ssize_t rc;
ssize_t num_read = 0;
size_t num_left = count;
char *ptr = buf;
/* Note: Count of zero is allowed (see "If count is zero" portion of
* read(2) manpage for details.
*/
TEST_ASSERT(count >= 0, "Unexpected count, count: %li", count);
do {
rc = read(fd, ptr, num_left);
switch (rc) {
case -1:
TEST_ASSERT(errno == EAGAIN || errno == EINTR,
"Unexpected read failure,\n"
" rc: %zi errno: %i", rc, errno);
break;
case 0:
TEST_FAIL("Unexpected EOF,\n"
" rc: %zi num_read: %zi num_left: %zu",
rc, num_read, num_left);
break;
default:
TEST_ASSERT(rc > 0, "Unexpected ret from read,\n"
" rc: %zi errno: %i", rc, errno);
num_read += rc;
num_left -= rc;
ptr += rc;
break;
}
} while (num_read < count);
return num_read;
}
| linux-master | tools/testing/selftests/kvm/lib/io.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KVM userfaultfd util
* Adapted from demand_paging_test.c
*
* Copyright (C) 2018, Red Hat, Inc.
* Copyright (C) 2019-2022 Google LLC
*/
#define _GNU_SOURCE /* for pipe2 */
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <poll.h>
#include <pthread.h>
#include <linux/userfaultfd.h>
#include <sys/syscall.h>
#include "kvm_util.h"
#include "test_util.h"
#include "memstress.h"
#include "userfaultfd_util.h"
#ifdef __NR_userfaultfd
static void *uffd_handler_thread_fn(void *arg)
{
struct uffd_desc *uffd_desc = (struct uffd_desc *)arg;
int uffd = uffd_desc->uffd;
int pipefd = uffd_desc->pipefds[0];
useconds_t delay = uffd_desc->delay;
int64_t pages = 0;
struct timespec start;
struct timespec ts_diff;
clock_gettime(CLOCK_MONOTONIC, &start);
while (1) {
struct uffd_msg msg;
struct pollfd pollfd[2];
char tmp_chr;
int r;
pollfd[0].fd = uffd;
pollfd[0].events = POLLIN;
pollfd[1].fd = pipefd;
pollfd[1].events = POLLIN;
r = poll(pollfd, 2, -1);
switch (r) {
case -1:
pr_info("poll err");
continue;
case 0:
continue;
case 1:
break;
default:
pr_info("Polling uffd returned %d", r);
return NULL;
}
if (pollfd[0].revents & POLLERR) {
pr_info("uffd revents has POLLERR");
return NULL;
}
if (pollfd[1].revents & POLLIN) {
r = read(pollfd[1].fd, &tmp_chr, 1);
TEST_ASSERT(r == 1,
"Error reading pipefd in UFFD thread\n");
break;
}
if (!(pollfd[0].revents & POLLIN))
continue;
r = read(uffd, &msg, sizeof(msg));
if (r == -1) {
if (errno == EAGAIN)
continue;
pr_info("Read of uffd got errno %d\n", errno);
return NULL;
}
if (r != sizeof(msg)) {
pr_info("Read on uffd returned unexpected size: %d bytes", r);
return NULL;
}
if (!(msg.event & UFFD_EVENT_PAGEFAULT))
continue;
if (delay)
usleep(delay);
r = uffd_desc->handler(uffd_desc->uffd_mode, uffd, &msg);
if (r < 0)
return NULL;
pages++;
}
ts_diff = timespec_elapsed(start);
PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n",
pages, ts_diff.tv_sec, ts_diff.tv_nsec,
pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / NSEC_PER_SEC));
return NULL;
}
struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
void *hva, uint64_t len,
uffd_handler_t handler)
{
struct uffd_desc *uffd_desc;
bool is_minor = (uffd_mode == UFFDIO_REGISTER_MODE_MINOR);
int uffd;
struct uffdio_api uffdio_api;
struct uffdio_register uffdio_register;
uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY;
int ret;
PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
is_minor ? "MINOR" : "MISSING",
is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY");
uffd_desc = malloc(sizeof(struct uffd_desc));
TEST_ASSERT(uffd_desc, "malloc failed");
/* In order to get minor faults, prefault via the alias. */
if (is_minor)
expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE;
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno);
uffdio_api.api = UFFD_API;
uffdio_api.features = 0;
TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1,
"ioctl UFFDIO_API failed: %" PRIu64,
(uint64_t)uffdio_api.api);
uffdio_register.range.start = (uint64_t)hva;
uffdio_register.range.len = len;
uffdio_register.mode = uffd_mode;
TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1,
"ioctl UFFDIO_REGISTER failed");
TEST_ASSERT((uffdio_register.ioctls & expected_ioctls) ==
expected_ioctls, "missing userfaultfd ioctls");
ret = pipe2(uffd_desc->pipefds, O_CLOEXEC | O_NONBLOCK);
TEST_ASSERT(!ret, "Failed to set up pipefd");
uffd_desc->uffd_mode = uffd_mode;
uffd_desc->uffd = uffd;
uffd_desc->delay = delay;
uffd_desc->handler = handler;
pthread_create(&uffd_desc->thread, NULL, uffd_handler_thread_fn,
uffd_desc);
PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n",
hva, hva + len);
return uffd_desc;
}
void uffd_stop_demand_paging(struct uffd_desc *uffd)
{
char c = 0;
int ret;
ret = write(uffd->pipefds[1], &c, 1);
TEST_ASSERT(ret == 1, "Unable to write to pipefd");
ret = pthread_join(uffd->thread, NULL);
TEST_ASSERT(ret == 0, "Pthread_join failed.");
close(uffd->uffd);
close(uffd->pipefds[1]);
close(uffd->pipefds[0]);
free(uffd);
}
#endif /* __NR_userfaultfd */
| linux-master | tools/testing/selftests/kvm/lib/userfaultfd_util.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "test_util.h"
#include "kvm_util.h"
#include "ucall_common.h"
#define APPEND_BUFFER_SAFE(str, end, v) \
do { \
GUEST_ASSERT(str < end); \
*str++ = (v); \
} while (0)
static int isdigit(int ch)
{
return (ch >= '0') && (ch <= '9');
}
static int skip_atoi(const char **s)
{
int i = 0;
while (isdigit(**s))
i = i * 10 + *((*s)++) - '0';
return i;
}
#define ZEROPAD 1 /* pad with zero */
#define SIGN 2 /* unsigned/signed long */
#define PLUS 4 /* show plus */
#define SPACE 8 /* space if plus */
#define LEFT 16 /* left justified */
#define SMALL 32 /* Must be 32 == 0x20 */
#define SPECIAL 64 /* 0x */
#define __do_div(n, base) \
({ \
int __res; \
\
__res = ((uint64_t) n) % (uint32_t) base; \
n = ((uint64_t) n) / (uint32_t) base; \
__res; \
})
static char *number(char *str, const char *end, long num, int base, int size,
int precision, int type)
{
/* we are called with base 8, 10 or 16, only, thus don't need "G..." */
static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
char tmp[66];
char c, sign, locase;
int i;
/*
* locase = 0 or 0x20. ORing digits or letters with 'locase'
* produces same digits or (maybe lowercased) letters
*/
locase = (type & SMALL);
if (type & LEFT)
type &= ~ZEROPAD;
if (base < 2 || base > 16)
return NULL;
c = (type & ZEROPAD) ? '0' : ' ';
sign = 0;
if (type & SIGN) {
if (num < 0) {
sign = '-';
num = -num;
size--;
} else if (type & PLUS) {
sign = '+';
size--;
} else if (type & SPACE) {
sign = ' ';
size--;
}
}
if (type & SPECIAL) {
if (base == 16)
size -= 2;
else if (base == 8)
size--;
}
i = 0;
if (num == 0)
tmp[i++] = '0';
else
while (num != 0)
tmp[i++] = (digits[__do_div(num, base)] | locase);
if (i > precision)
precision = i;
size -= precision;
if (!(type & (ZEROPAD + LEFT)))
while (size-- > 0)
APPEND_BUFFER_SAFE(str, end, ' ');
if (sign)
APPEND_BUFFER_SAFE(str, end, sign);
if (type & SPECIAL) {
if (base == 8)
APPEND_BUFFER_SAFE(str, end, '0');
else if (base == 16) {
APPEND_BUFFER_SAFE(str, end, '0');
APPEND_BUFFER_SAFE(str, end, 'x');
}
}
if (!(type & LEFT))
while (size-- > 0)
APPEND_BUFFER_SAFE(str, end, c);
while (i < precision--)
APPEND_BUFFER_SAFE(str, end, '0');
while (i-- > 0)
APPEND_BUFFER_SAFE(str, end, tmp[i]);
while (size-- > 0)
APPEND_BUFFER_SAFE(str, end, ' ');
return str;
}
int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args)
{
char *str, *end;
const char *s;
uint64_t num;
int i, base;
int len;
int flags; /* flags to number() */
int field_width; /* width of output field */
int precision; /*
* min. # of digits for integers; max
* number of chars for from string
*/
int qualifier; /* 'h', 'l', or 'L' for integer fields */
end = buf + n;
GUEST_ASSERT(buf < end);
GUEST_ASSERT(n > 0);
for (str = buf; *fmt; ++fmt) {
if (*fmt != '%') {
APPEND_BUFFER_SAFE(str, end, *fmt);
continue;
}
/* process flags */
flags = 0;
repeat:
++fmt; /* this also skips first '%' */
switch (*fmt) {
case '-':
flags |= LEFT;
goto repeat;
case '+':
flags |= PLUS;
goto repeat;
case ' ':
flags |= SPACE;
goto repeat;
case '#':
flags |= SPECIAL;
goto repeat;
case '0':
flags |= ZEROPAD;
goto repeat;
}
/* get field width */
field_width = -1;
if (isdigit(*fmt))
field_width = skip_atoi(&fmt);
else if (*fmt == '*') {
++fmt;
/* it's the next argument */
field_width = va_arg(args, int);
if (field_width < 0) {
field_width = -field_width;
flags |= LEFT;
}
}
/* get the precision */
precision = -1;
if (*fmt == '.') {
++fmt;
if (isdigit(*fmt))
precision = skip_atoi(&fmt);
else if (*fmt == '*') {
++fmt;
/* it's the next argument */
precision = va_arg(args, int);
}
if (precision < 0)
precision = 0;
}
/* get the conversion qualifier */
qualifier = -1;
if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L') {
qualifier = *fmt;
++fmt;
}
/* default base */
base = 10;
switch (*fmt) {
case 'c':
if (!(flags & LEFT))
while (--field_width > 0)
APPEND_BUFFER_SAFE(str, end, ' ');
APPEND_BUFFER_SAFE(str, end,
(uint8_t)va_arg(args, int));
while (--field_width > 0)
APPEND_BUFFER_SAFE(str, end, ' ');
continue;
case 's':
s = va_arg(args, char *);
len = strnlen(s, precision);
if (!(flags & LEFT))
while (len < field_width--)
APPEND_BUFFER_SAFE(str, end, ' ');
for (i = 0; i < len; ++i)
APPEND_BUFFER_SAFE(str, end, *s++);
while (len < field_width--)
APPEND_BUFFER_SAFE(str, end, ' ');
continue;
case 'p':
if (field_width == -1) {
field_width = 2 * sizeof(void *);
flags |= SPECIAL | SMALL | ZEROPAD;
}
str = number(str, end,
(uint64_t)va_arg(args, void *), 16,
field_width, precision, flags);
continue;
case 'n':
if (qualifier == 'l') {
long *ip = va_arg(args, long *);
*ip = (str - buf);
} else {
int *ip = va_arg(args, int *);
*ip = (str - buf);
}
continue;
case '%':
APPEND_BUFFER_SAFE(str, end, '%');
continue;
/* integer number formats - set up the flags and "break" */
case 'o':
base = 8;
break;
case 'x':
flags |= SMALL;
case 'X':
base = 16;
break;
case 'd':
case 'i':
flags |= SIGN;
case 'u':
break;
default:
APPEND_BUFFER_SAFE(str, end, '%');
if (*fmt)
APPEND_BUFFER_SAFE(str, end, *fmt);
else
--fmt;
continue;
}
if (qualifier == 'l')
num = va_arg(args, uint64_t);
else if (qualifier == 'h') {
num = (uint16_t)va_arg(args, int);
if (flags & SIGN)
num = (int16_t)num;
} else if (flags & SIGN)
num = va_arg(args, int);
else
num = va_arg(args, uint32_t);
str = number(str, end, num, base, field_width, precision, flags);
}
GUEST_ASSERT(str < end);
*str = '\0';
return str - buf;
}
int guest_snprintf(char *buf, int n, const char *fmt, ...)
{
va_list va;
int len;
va_start(va, fmt);
len = guest_vsnprintf(buf, n, fmt, va);
va_end(va);
return len;
}
| linux-master | tools/testing/selftests/kvm/lib/guest_sprintf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/test_util.c
*
* Copyright (C) 2020, Google LLC.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdarg.h>
#include <assert.h>
#include <ctype.h>
#include <limits.h>
#include <stdlib.h>
#include <time.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <linux/mman.h>
#include "linux/kernel.h"
#include "test_util.h"
/*
* Random number generator that is usable from guest code. This is the
* Park-Miller LCG using standard constants.
*/
struct guest_random_state new_guest_random_state(uint32_t seed)
{
struct guest_random_state s = {.seed = seed};
return s;
}
uint32_t guest_random_u32(struct guest_random_state *state)
{
state->seed = (uint64_t)state->seed * 48271 % ((uint32_t)(1 << 31) - 1);
return state->seed;
}
/*
* Parses "[0-9]+[kmgt]?".
*/
size_t parse_size(const char *size)
{
size_t base;
char *scale;
int shift = 0;
TEST_ASSERT(size && isdigit(size[0]), "Need at least one digit in '%s'", size);
base = strtoull(size, &scale, 0);
TEST_ASSERT(base != ULLONG_MAX, "Overflow parsing size!");
switch (tolower(*scale)) {
case 't':
shift = 40;
break;
case 'g':
shift = 30;
break;
case 'm':
shift = 20;
break;
case 'k':
shift = 10;
break;
case 'b':
case '\0':
shift = 0;
break;
default:
TEST_ASSERT(false, "Unknown size letter %c", *scale);
}
TEST_ASSERT((base << shift) >> shift == base, "Overflow scaling size!");
return base << shift;
}
int64_t timespec_to_ns(struct timespec ts)
{
return (int64_t)ts.tv_nsec + 1000000000LL * (int64_t)ts.tv_sec;
}
struct timespec timespec_add_ns(struct timespec ts, int64_t ns)
{
struct timespec res;
res.tv_nsec = ts.tv_nsec + ns;
res.tv_sec = ts.tv_sec + res.tv_nsec / 1000000000LL;
res.tv_nsec %= 1000000000LL;
return res;
}
struct timespec timespec_add(struct timespec ts1, struct timespec ts2)
{
int64_t ns1 = timespec_to_ns(ts1);
int64_t ns2 = timespec_to_ns(ts2);
return timespec_add_ns((struct timespec){0}, ns1 + ns2);
}
struct timespec timespec_sub(struct timespec ts1, struct timespec ts2)
{
int64_t ns1 = timespec_to_ns(ts1);
int64_t ns2 = timespec_to_ns(ts2);
return timespec_add_ns((struct timespec){0}, ns1 - ns2);
}
struct timespec timespec_elapsed(struct timespec start)
{
struct timespec end;
clock_gettime(CLOCK_MONOTONIC, &end);
return timespec_sub(end, start);
}
struct timespec timespec_div(struct timespec ts, int divisor)
{
int64_t ns = timespec_to_ns(ts) / divisor;
return timespec_add_ns((struct timespec){0}, ns);
}
void print_skip(const char *fmt, ...)
{
va_list ap;
assert(fmt);
va_start(ap, fmt);
vprintf(fmt, ap);
va_end(ap);
puts(", skipping test");
}
bool thp_configured(void)
{
int ret;
struct stat statbuf;
ret = stat("/sys/kernel/mm/transparent_hugepage", &statbuf);
TEST_ASSERT(ret == 0 || (ret == -1 && errno == ENOENT),
"Error in stating /sys/kernel/mm/transparent_hugepage");
return ret == 0;
}
size_t get_trans_hugepagesz(void)
{
size_t size;
FILE *f;
int ret;
TEST_ASSERT(thp_configured(), "THP is not configured in host kernel");
f = fopen("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", "r");
TEST_ASSERT(f != NULL, "Error in opening transparent_hugepage/hpage_pmd_size");
ret = fscanf(f, "%ld", &size);
ret = fscanf(f, "%ld", &size);
TEST_ASSERT(ret < 1, "Error reading transparent_hugepage/hpage_pmd_size");
fclose(f);
return size;
}
size_t get_def_hugetlb_pagesz(void)
{
char buf[64];
const char *hugepagesize = "Hugepagesize:";
const char *hugepages_total = "HugePages_Total:";
FILE *f;
f = fopen("/proc/meminfo", "r");
TEST_ASSERT(f != NULL, "Error in opening /proc/meminfo");
while (fgets(buf, sizeof(buf), f) != NULL) {
if (strstr(buf, hugepages_total) == buf) {
unsigned long long total = strtoull(buf + strlen(hugepages_total), NULL, 10);
if (!total) {
fprintf(stderr, "HUGETLB is not enabled in /proc/sys/vm/nr_hugepages\n");
exit(KSFT_SKIP);
}
}
if (strstr(buf, hugepagesize) == buf) {
fclose(f);
return strtoull(buf + strlen(hugepagesize), NULL, 10) << 10;
}
}
if (feof(f)) {
fprintf(stderr, "HUGETLB is not configured in host kernel");
exit(KSFT_SKIP);
}
TEST_FAIL("Error in reading /proc/meminfo");
}
#define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
#define ANON_HUGE_FLAGS (ANON_FLAGS | MAP_HUGETLB)
const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
{
static const struct vm_mem_backing_src_alias aliases[] = {
[VM_MEM_SRC_ANONYMOUS] = {
.name = "anonymous",
.flag = ANON_FLAGS,
},
[VM_MEM_SRC_ANONYMOUS_THP] = {
.name = "anonymous_thp",
.flag = ANON_FLAGS,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB] = {
.name = "anonymous_hugetlb",
.flag = ANON_HUGE_FLAGS,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB] = {
.name = "anonymous_hugetlb_16kb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_16KB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB] = {
.name = "anonymous_hugetlb_64kb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_64KB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB] = {
.name = "anonymous_hugetlb_512kb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_512KB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB] = {
.name = "anonymous_hugetlb_1mb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_1MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB] = {
.name = "anonymous_hugetlb_2mb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_2MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB] = {
.name = "anonymous_hugetlb_8mb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_8MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB] = {
.name = "anonymous_hugetlb_16mb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_16MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB] = {
.name = "anonymous_hugetlb_32mb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_32MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB] = {
.name = "anonymous_hugetlb_256mb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_256MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB] = {
.name = "anonymous_hugetlb_512mb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_512MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB] = {
.name = "anonymous_hugetlb_1gb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_1GB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB] = {
.name = "anonymous_hugetlb_2gb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_2GB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB] = {
.name = "anonymous_hugetlb_16gb",
.flag = ANON_HUGE_FLAGS | MAP_HUGE_16GB,
},
[VM_MEM_SRC_SHMEM] = {
.name = "shmem",
.flag = MAP_SHARED,
},
[VM_MEM_SRC_SHARED_HUGETLB] = {
.name = "shared_hugetlb",
/*
* No MAP_HUGETLB, we use MFD_HUGETLB instead. Since
* we're using "file backed" memory, we need to specify
* this when the FD is created, not when the area is
* mapped.
*/
.flag = MAP_SHARED,
},
};
_Static_assert(ARRAY_SIZE(aliases) == NUM_SRC_TYPES,
"Missing new backing src types?");
TEST_ASSERT(i < NUM_SRC_TYPES, "Backing src type ID %d too big", i);
return &aliases[i];
}
#define MAP_HUGE_PAGE_SIZE(x) (1ULL << ((x >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK))
size_t get_backing_src_pagesz(uint32_t i)
{
uint32_t flag = vm_mem_backing_src_alias(i)->flag;
switch (i) {
case VM_MEM_SRC_ANONYMOUS:
case VM_MEM_SRC_SHMEM:
return getpagesize();
case VM_MEM_SRC_ANONYMOUS_THP:
return get_trans_hugepagesz();
case VM_MEM_SRC_ANONYMOUS_HUGETLB:
case VM_MEM_SRC_SHARED_HUGETLB:
return get_def_hugetlb_pagesz();
default:
return MAP_HUGE_PAGE_SIZE(flag);
}
}
bool is_backing_src_hugetlb(uint32_t i)
{
return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB);
}
static void print_available_backing_src_types(const char *prefix)
{
int i;
printf("%sAvailable backing src types:\n", prefix);
for (i = 0; i < NUM_SRC_TYPES; i++)
printf("%s %s\n", prefix, vm_mem_backing_src_alias(i)->name);
}
void backing_src_help(const char *flag)
{
printf(" %s: specify the type of memory that should be used to\n"
" back the guest data region. (default: %s)\n",
flag, vm_mem_backing_src_alias(DEFAULT_VM_MEM_SRC)->name);
print_available_backing_src_types(" ");
}
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)
{
int i;
for (i = 0; i < NUM_SRC_TYPES; i++)
if (!strcmp(type_name, vm_mem_backing_src_alias(i)->name))
return i;
print_available_backing_src_types("");
TEST_FAIL("Unknown backing src type: %s", type_name);
return -1;
}
long get_run_delay(void)
{
char path[64];
long val[2];
FILE *fp;
sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
fp = fopen(path, "r");
/* Return MIN_RUN_DELAY_NS upon failure just to be safe */
if (fscanf(fp, "%ld %ld ", &val[0], &val[1]) < 2)
val[1] = MIN_RUN_DELAY_NS;
fclose(fp);
return val[1];
}
int atoi_paranoid(const char *num_str)
{
char *end_ptr;
long num;
errno = 0;
num = strtol(num_str, &end_ptr, 0);
TEST_ASSERT(!errno, "strtol(\"%s\") failed", num_str);
TEST_ASSERT(num_str != end_ptr,
"strtol(\"%s\") didn't find a valid integer.", num_str);
TEST_ASSERT(*end_ptr == '\0',
"strtol(\"%s\") failed to parse trailing characters \"%s\".",
num_str, end_ptr);
TEST_ASSERT(num >= INT_MIN && num <= INT_MAX,
"%ld not in range of [%d, %d]", num, INT_MIN, INT_MAX);
return num;
}
char *strdup_printf(const char *fmt, ...)
{
va_list ap;
char *str;
va_start(ap, fmt);
TEST_ASSERT(vasprintf(&str, fmt, ap) >= 0, "vasprintf() failed");
va_end(ap);
return str;
}
| linux-master | tools/testing/selftests/kvm/lib/test_util.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/assert.c
*
* Copyright (C) 2018, Google LLC.
*/
#define _GNU_SOURCE /* for getline(3) and strchrnul(3)*/
#include "test_util.h"
#include <execinfo.h>
#include <sys/syscall.h>
#include "kselftest.h"
/* Dumps the current stack trace to stderr. */
static void __attribute__((noinline)) test_dump_stack(void);
static void test_dump_stack(void)
{
/*
* Build and run this command:
*
* addr2line -s -e /proc/$PPID/exe -fpai {backtrace addresses} | \
* cat -n 1>&2
*
* Note that the spacing is different and there's no newline.
*/
size_t i;
size_t n = 20;
void *stack[n];
const char *addr2line = "addr2line -s -e /proc/$PPID/exe -fpai";
const char *pipeline = "|cat -n 1>&2";
char cmd[strlen(addr2line) + strlen(pipeline) +
/* N bytes per addr * 2 digits per byte + 1 space per addr: */
n * (((sizeof(void *)) * 2) + 1) +
/* Null terminator: */
1];
char *c = cmd;
n = backtrace(stack, n);
/*
* Skip the first 2 frames, which should be test_dump_stack() and
* test_assert(); both of which are declared noinline. Bail if the
* resulting stack trace would be empty. Otherwise, addr2line will block
* waiting for addresses to be passed in via stdin.
*/
if (n <= 2) {
fputs(" (stack trace empty)\n", stderr);
return;
}
c += sprintf(c, "%s", addr2line);
for (i = 2; i < n; i++)
c += sprintf(c, " %lx", ((unsigned long) stack[i]) - 1);
c += sprintf(c, "%s", pipeline);
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-result"
system(cmd);
#pragma GCC diagnostic pop
}
static pid_t _gettid(void)
{
return syscall(SYS_gettid);
}
void __attribute__((noinline))
test_assert(bool exp, const char *exp_str,
const char *file, unsigned int line, const char *fmt, ...)
{
va_list ap;
if (!(exp)) {
va_start(ap, fmt);
fprintf(stderr, "==== Test Assertion Failure ====\n"
" %s:%u: %s\n"
" pid=%d tid=%d errno=%d - %s\n",
file, line, exp_str, getpid(), _gettid(),
errno, strerror(errno));
test_dump_stack();
if (fmt) {
fputs(" ", stderr);
vfprintf(stderr, fmt, ap);
fputs("\n", stderr);
}
va_end(ap);
if (errno == EACCES) {
print_skip("Access denied - Exiting");
exit(KSFT_SKIP);
}
exit(254);
}
return;
}
| linux-master | tools/testing/selftests/kvm/lib/assert.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Sparse bit array
*
* Copyright (C) 2018, Google LLC.
* Copyright (C) 2018, Red Hat, Inc. (code style cleanup and fuzzing driver)
*
* This library provides functions to support a memory efficient bit array,
* with an index size of 2^64. A sparsebit array is allocated through
* the use sparsebit_alloc() and free'd via sparsebit_free(),
* such as in the following:
*
* struct sparsebit *s;
* s = sparsebit_alloc();
* sparsebit_free(&s);
*
* The struct sparsebit type resolves down to a struct sparsebit.
* Note that, sparsebit_free() takes a pointer to the sparsebit
* structure. This is so that sparsebit_free() is able to poison
* the pointer (e.g. set it to NULL) to the struct sparsebit before
* returning to the caller.
*
* Between the return of sparsebit_alloc() and the call of
* sparsebit_free(), there are multiple query and modifying operations
* that can be performed on the allocated sparsebit array. All of
* these operations take as a parameter the value returned from
* sparsebit_alloc() and most also take a bit index. Frequently
* used routines include:
*
* ---- Query Operations
* sparsebit_is_set(s, idx)
* sparsebit_is_clear(s, idx)
* sparsebit_any_set(s)
* sparsebit_first_set(s)
* sparsebit_next_set(s, prev_idx)
*
* ---- Modifying Operations
* sparsebit_set(s, idx)
* sparsebit_clear(s, idx)
* sparsebit_set_num(s, idx, num);
* sparsebit_clear_num(s, idx, num);
*
* A common operation, is to itterate over all the bits set in a test
* sparsebit array. This can be done via code with the following structure:
*
* sparsebit_idx_t idx;
* if (sparsebit_any_set(s)) {
* idx = sparsebit_first_set(s);
* do {
* ...
* idx = sparsebit_next_set(s, idx);
* } while (idx != 0);
* }
*
* The index of the first bit set needs to be obtained via
* sparsebit_first_set(), because sparsebit_next_set(), needs
* the index of the previously set. The sparsebit_idx_t type is
* unsigned, so there is no previous index before 0 that is available.
* Also, the call to sparsebit_first_set() is not made unless there
* is at least 1 bit in the array set. This is because sparsebit_first_set()
* aborts if sparsebit_first_set() is called with no bits set.
* It is the callers responsibility to assure that the
* sparsebit array has at least a single bit set before calling
* sparsebit_first_set().
*
* ==== Implementation Overview ====
* For the most part the internal implementation of sparsebit is
* opaque to the caller. One important implementation detail that the
* caller may need to be aware of is the spatial complexity of the
* implementation. This implementation of a sparsebit array is not
* only sparse, in that it uses memory proportional to the number of bits
* set. It is also efficient in memory usage when most of the bits are
* set.
*
* At a high-level the state of the bit settings are maintained through
* the use of a binary-search tree, where each node contains at least
* the following members:
*
* typedef uint64_t sparsebit_idx_t;
* typedef uint64_t sparsebit_num_t;
*
* sparsebit_idx_t idx;
* uint32_t mask;
* sparsebit_num_t num_after;
*
* The idx member contains the bit index of the first bit described by this
* node, while the mask member stores the setting of the first 32-bits.
* The setting of the bit at idx + n, where 0 <= n < 32, is located in the
* mask member at 1 << n.
*
* Nodes are sorted by idx and the bits described by two nodes will never
* overlap. The idx member is always aligned to the mask size, i.e. a
* multiple of 32.
*
* Beyond a typical implementation, the nodes in this implementation also
* contains a member named num_after. The num_after member holds the
* number of bits immediately after the mask bits that are contiguously set.
* The use of the num_after member allows this implementation to efficiently
* represent cases where most bits are set. For example, the case of all
* but the last two bits set, is represented by the following two nodes:
*
* node 0 - idx: 0x0 mask: 0xffffffff num_after: 0xffffffffffffffc0
* node 1 - idx: 0xffffffffffffffe0 mask: 0x3fffffff num_after: 0
*
* ==== Invariants ====
* This implementation usses the following invariants:
*
* + Node are only used to represent bits that are set.
* Nodes with a mask of 0 and num_after of 0 are not allowed.
*
* + Sum of bits set in all the nodes is equal to the value of
* the struct sparsebit_pvt num_set member.
*
* + The setting of at least one bit is always described in a nodes
* mask (mask >= 1).
*
* + A node with all mask bits set only occurs when the last bit
* described by the previous node is not equal to this nodes
* starting index - 1. All such occurences of this condition are
* avoided by moving the setting of the nodes mask bits into
* the previous nodes num_after setting.
*
* + Node starting index is evenly divisible by the number of bits
* within a nodes mask member.
*
* + Nodes never represent a range of bits that wrap around the
* highest supported index.
*
* (idx + MASK_BITS + num_after - 1) <= ((sparsebit_idx_t) 0) - 1)
*
* As a consequence of the above, the num_after member of a node
* will always be <=:
*
* maximum_index - nodes_starting_index - number_of_mask_bits
*
* + Nodes within the binary search tree are sorted based on each
* nodes starting index.
*
* + The range of bits described by any two nodes do not overlap. The
* range of bits described by a single node is:
*
* start: node->idx
* end (inclusive): node->idx + MASK_BITS + node->num_after - 1;
*
* Note, at times these invariants are temporarily violated for a
* specific portion of the code. For example, when setting a mask
* bit, there is a small delay between when the mask bit is set and the
* value in the struct sparsebit_pvt num_set member is updated. Other
* temporary violations occur when node_split() is called with a specified
* index and assures that a node where its mask represents the bit
* at the specified index exists. At times to do this node_split()
* must split an existing node into two nodes or create a node that
* has no bits set. Such temporary violations must be corrected before
* returning to the caller. These corrections are typically performed
* by the local function node_reduce().
*/
#include "test_util.h"
#include "sparsebit.h"
#include <limits.h>
#include <assert.h>
#define DUMP_LINE_MAX 100 /* Does not include indent amount */
typedef uint32_t mask_t;
#define MASK_BITS (sizeof(mask_t) * CHAR_BIT)
struct node {
struct node *parent;
struct node *left;
struct node *right;
sparsebit_idx_t idx; /* index of least-significant bit in mask */
sparsebit_num_t num_after; /* num contiguously set after mask */
mask_t mask;
};
struct sparsebit {
/*
* Points to root node of the binary search
* tree. Equal to NULL when no bits are set in
* the entire sparsebit array.
*/
struct node *root;
/*
* A redundant count of the total number of bits set. Used for
* diagnostic purposes and to change the time complexity of
* sparsebit_num_set() from O(n) to O(1).
* Note: Due to overflow, a value of 0 means none or all set.
*/
sparsebit_num_t num_set;
};
/* Returns the number of set bits described by the settings
* of the node pointed to by nodep.
*/
static sparsebit_num_t node_num_set(struct node *nodep)
{
return nodep->num_after + __builtin_popcount(nodep->mask);
}
/* Returns a pointer to the node that describes the
* lowest bit index.
*/
static struct node *node_first(struct sparsebit *s)
{
struct node *nodep;
for (nodep = s->root; nodep && nodep->left; nodep = nodep->left)
;
return nodep;
}
/* Returns a pointer to the node that describes the
* lowest bit index > the index of the node pointed to by np.
* Returns NULL if no node with a higher index exists.
*/
static struct node *node_next(struct sparsebit *s, struct node *np)
{
struct node *nodep = np;
/*
* If current node has a right child, next node is the left-most
* of the right child.
*/
if (nodep->right) {
for (nodep = nodep->right; nodep->left; nodep = nodep->left)
;
return nodep;
}
/*
* No right child. Go up until node is left child of a parent.
* That parent is then the next node.
*/
while (nodep->parent && nodep == nodep->parent->right)
nodep = nodep->parent;
return nodep->parent;
}
/* Searches for and returns a pointer to the node that describes the
* highest index < the index of the node pointed to by np.
* Returns NULL if no node with a lower index exists.
*/
static struct node *node_prev(struct sparsebit *s, struct node *np)
{
struct node *nodep = np;
/*
* If current node has a left child, next node is the right-most
* of the left child.
*/
if (nodep->left) {
for (nodep = nodep->left; nodep->right; nodep = nodep->right)
;
return (struct node *) nodep;
}
/*
* No left child. Go up until node is right child of a parent.
* That parent is then the next node.
*/
while (nodep->parent && nodep == nodep->parent->left)
nodep = nodep->parent;
return (struct node *) nodep->parent;
}
/* Allocates space to hold a copy of the node sub-tree pointed to by
* subtree and duplicates the bit settings to the newly allocated nodes.
* Returns the newly allocated copy of subtree.
*/
static struct node *node_copy_subtree(struct node *subtree)
{
struct node *root;
/* Duplicate the node at the root of the subtree */
root = calloc(1, sizeof(*root));
if (!root) {
perror("calloc");
abort();
}
root->idx = subtree->idx;
root->mask = subtree->mask;
root->num_after = subtree->num_after;
/* As needed, recursively duplicate the left and right subtrees */
if (subtree->left) {
root->left = node_copy_subtree(subtree->left);
root->left->parent = root;
}
if (subtree->right) {
root->right = node_copy_subtree(subtree->right);
root->right->parent = root;
}
return root;
}
/* Searches for and returns a pointer to the node that describes the setting
* of the bit given by idx. A node describes the setting of a bit if its
* index is within the bits described by the mask bits or the number of
* contiguous bits set after the mask. Returns NULL if there is no such node.
*/
static struct node *node_find(struct sparsebit *s, sparsebit_idx_t idx)
{
struct node *nodep;
/* Find the node that describes the setting of the bit at idx */
for (nodep = s->root; nodep;
nodep = nodep->idx > idx ? nodep->left : nodep->right) {
if (idx >= nodep->idx &&
idx <= nodep->idx + MASK_BITS + nodep->num_after - 1)
break;
}
return nodep;
}
/* Entry Requirements:
* + A node that describes the setting of idx is not already present.
*
* Adds a new node to describe the setting of the bit at the index given
* by idx. Returns a pointer to the newly added node.
*
* TODO(lhuemill): Degenerate cases causes the tree to get unbalanced.
*/
static struct node *node_add(struct sparsebit *s, sparsebit_idx_t idx)
{
struct node *nodep, *parentp, *prev;
/* Allocate and initialize the new node. */
nodep = calloc(1, sizeof(*nodep));
if (!nodep) {
perror("calloc");
abort();
}
nodep->idx = idx & -MASK_BITS;
/* If no nodes, set it up as the root node. */
if (!s->root) {
s->root = nodep;
return nodep;
}
/*
* Find the parent where the new node should be attached
* and add the node there.
*/
parentp = s->root;
while (true) {
if (idx < parentp->idx) {
if (!parentp->left) {
parentp->left = nodep;
nodep->parent = parentp;
break;
}
parentp = parentp->left;
} else {
assert(idx > parentp->idx + MASK_BITS + parentp->num_after - 1);
if (!parentp->right) {
parentp->right = nodep;
nodep->parent = parentp;
break;
}
parentp = parentp->right;
}
}
/*
* Does num_after bits of previous node overlap with the mask
* of the new node? If so set the bits in the new nodes mask
* and reduce the previous nodes num_after.
*/
prev = node_prev(s, nodep);
while (prev && prev->idx + MASK_BITS + prev->num_after - 1 >= nodep->idx) {
unsigned int n1 = (prev->idx + MASK_BITS + prev->num_after - 1)
- nodep->idx;
assert(prev->num_after > 0);
assert(n1 < MASK_BITS);
assert(!(nodep->mask & (1 << n1)));
nodep->mask |= (1 << n1);
prev->num_after--;
}
return nodep;
}
/* Returns whether all the bits in the sparsebit array are set. */
bool sparsebit_all_set(struct sparsebit *s)
{
/*
* If any nodes there must be at least one bit set. Only case
* where a bit is set and total num set is 0, is when all bits
* are set.
*/
return s->root && s->num_set == 0;
}
/* Clears all bits described by the node pointed to by nodep, then
* removes the node.
*/
static void node_rm(struct sparsebit *s, struct node *nodep)
{
struct node *tmp;
sparsebit_num_t num_set;
num_set = node_num_set(nodep);
assert(s->num_set >= num_set || sparsebit_all_set(s));
s->num_set -= node_num_set(nodep);
/* Have both left and right child */
if (nodep->left && nodep->right) {
/*
* Move left children to the leftmost leaf node
* of the right child.
*/
for (tmp = nodep->right; tmp->left; tmp = tmp->left)
;
tmp->left = nodep->left;
nodep->left = NULL;
tmp->left->parent = tmp;
}
/* Left only child */
if (nodep->left) {
if (!nodep->parent) {
s->root = nodep->left;
nodep->left->parent = NULL;
} else {
nodep->left->parent = nodep->parent;
if (nodep == nodep->parent->left)
nodep->parent->left = nodep->left;
else {
assert(nodep == nodep->parent->right);
nodep->parent->right = nodep->left;
}
}
nodep->parent = nodep->left = nodep->right = NULL;
free(nodep);
return;
}
/* Right only child */
if (nodep->right) {
if (!nodep->parent) {
s->root = nodep->right;
nodep->right->parent = NULL;
} else {
nodep->right->parent = nodep->parent;
if (nodep == nodep->parent->left)
nodep->parent->left = nodep->right;
else {
assert(nodep == nodep->parent->right);
nodep->parent->right = nodep->right;
}
}
nodep->parent = nodep->left = nodep->right = NULL;
free(nodep);
return;
}
/* Leaf Node */
if (!nodep->parent) {
s->root = NULL;
} else {
if (nodep->parent->left == nodep)
nodep->parent->left = NULL;
else {
assert(nodep == nodep->parent->right);
nodep->parent->right = NULL;
}
}
nodep->parent = nodep->left = nodep->right = NULL;
free(nodep);
return;
}
/* Splits the node containing the bit at idx so that there is a node
* that starts at the specified index. If no such node exists, a new
* node at the specified index is created. Returns the new node.
*
* idx must start of a mask boundary.
*/
static struct node *node_split(struct sparsebit *s, sparsebit_idx_t idx)
{
struct node *nodep1, *nodep2;
sparsebit_idx_t offset;
sparsebit_num_t orig_num_after;
assert(!(idx % MASK_BITS));
/*
* Is there a node that describes the setting of idx?
* If not, add it.
*/
nodep1 = node_find(s, idx);
if (!nodep1)
return node_add(s, idx);
/*
* All done if the starting index of the node is where the
* split should occur.
*/
if (nodep1->idx == idx)
return nodep1;
/*
* Split point not at start of mask, so it must be part of
* bits described by num_after.
*/
/*
* Calculate offset within num_after for where the split is
* to occur.
*/
offset = idx - (nodep1->idx + MASK_BITS);
orig_num_after = nodep1->num_after;
/*
* Add a new node to describe the bits starting at
* the split point.
*/
nodep1->num_after = offset;
nodep2 = node_add(s, idx);
/* Move bits after the split point into the new node */
nodep2->num_after = orig_num_after - offset;
if (nodep2->num_after >= MASK_BITS) {
nodep2->mask = ~(mask_t) 0;
nodep2->num_after -= MASK_BITS;
} else {
nodep2->mask = (1 << nodep2->num_after) - 1;
nodep2->num_after = 0;
}
return nodep2;
}
/* Iteratively reduces the node pointed to by nodep and its adjacent
* nodes into a more compact form. For example, a node with a mask with
* all bits set adjacent to a previous node, will get combined into a
* single node with an increased num_after setting.
*
* After each reduction, a further check is made to see if additional
* reductions are possible with the new previous and next nodes. Note,
* a search for a reduction is only done across the nodes nearest nodep
* and those that became part of a reduction. Reductions beyond nodep
* and the adjacent nodes that are reduced are not discovered. It is the
* responsibility of the caller to pass a nodep that is within one node
* of each possible reduction.
*
* This function does not fix the temporary violation of all invariants.
* For example it does not fix the case where the bit settings described
* by two or more nodes overlap. Such a violation introduces the potential
* complication of a bit setting for a specific index having different settings
* in different nodes. This would then introduce the further complication
* of which node has the correct setting of the bit and thus such conditions
* are not allowed.
*
* This function is designed to fix invariant violations that are introduced
* by node_split() and by changes to the nodes mask or num_after members.
* For example, when setting a bit within a nodes mask, the function that
* sets the bit doesn't have to worry about whether the setting of that
* bit caused the mask to have leading only or trailing only bits set.
* Instead, the function can call node_reduce(), with nodep equal to the
* node address that it set a mask bit in, and node_reduce() will notice
* the cases of leading or trailing only bits and that there is an
* adjacent node that the bit settings could be merged into.
*
* This implementation specifically detects and corrects violation of the
* following invariants:
*
* + Node are only used to represent bits that are set.
* Nodes with a mask of 0 and num_after of 0 are not allowed.
*
* + The setting of at least one bit is always described in a nodes
* mask (mask >= 1).
*
* + A node with all mask bits set only occurs when the last bit
* described by the previous node is not equal to this nodes
* starting index - 1. All such occurences of this condition are
* avoided by moving the setting of the nodes mask bits into
* the previous nodes num_after setting.
*/
static void node_reduce(struct sparsebit *s, struct node *nodep)
{
bool reduction_performed;
do {
reduction_performed = false;
struct node *prev, *next, *tmp;
/* 1) Potential reductions within the current node. */
/* Nodes with all bits cleared may be removed. */
if (nodep->mask == 0 && nodep->num_after == 0) {
/*
* About to remove the node pointed to by
* nodep, which normally would cause a problem
* for the next pass through the reduction loop,
* because the node at the starting point no longer
* exists. This potential problem is handled
* by first remembering the location of the next
* or previous nodes. Doesn't matter which, because
* once the node at nodep is removed, there will be
* no other nodes between prev and next.
*
* Note, the checks performed on nodep against both
* both prev and next both check for an adjacent
* node that can be reduced into a single node. As
* such, after removing the node at nodep, doesn't
* matter whether the nodep for the next pass
* through the loop is equal to the previous pass
* prev or next node. Either way, on the next pass
* the one not selected will become either the
* prev or next node.
*/
tmp = node_next(s, nodep);
if (!tmp)
tmp = node_prev(s, nodep);
node_rm(s, nodep);
nodep = tmp;
reduction_performed = true;
continue;
}
/*
* When the mask is 0, can reduce the amount of num_after
* bits by moving the initial num_after bits into the mask.
*/
if (nodep->mask == 0) {
assert(nodep->num_after != 0);
assert(nodep->idx + MASK_BITS > nodep->idx);
nodep->idx += MASK_BITS;
if (nodep->num_after >= MASK_BITS) {
nodep->mask = ~0;
nodep->num_after -= MASK_BITS;
} else {
nodep->mask = (1u << nodep->num_after) - 1;
nodep->num_after = 0;
}
reduction_performed = true;
continue;
}
/*
* 2) Potential reductions between the current and
* previous nodes.
*/
prev = node_prev(s, nodep);
if (prev) {
sparsebit_idx_t prev_highest_bit;
/* Nodes with no bits set can be removed. */
if (prev->mask == 0 && prev->num_after == 0) {
node_rm(s, prev);
reduction_performed = true;
continue;
}
/*
* All mask bits set and previous node has
* adjacent index.
*/
if (nodep->mask + 1 == 0 &&
prev->idx + MASK_BITS == nodep->idx) {
prev->num_after += MASK_BITS + nodep->num_after;
nodep->mask = 0;
nodep->num_after = 0;
reduction_performed = true;
continue;
}
/*
* Is node adjacent to previous node and the node
* contains a single contiguous range of bits
* starting from the beginning of the mask?
*/
prev_highest_bit = prev->idx + MASK_BITS - 1 + prev->num_after;
if (prev_highest_bit + 1 == nodep->idx &&
(nodep->mask | (nodep->mask >> 1)) == nodep->mask) {
/*
* How many contiguous bits are there?
* Is equal to the total number of set
* bits, due to an earlier check that
* there is a single contiguous range of
* set bits.
*/
unsigned int num_contiguous
= __builtin_popcount(nodep->mask);
assert((num_contiguous > 0) &&
((1ULL << num_contiguous) - 1) == nodep->mask);
prev->num_after += num_contiguous;
nodep->mask = 0;
/*
* For predictable performance, handle special
* case where all mask bits are set and there
* is a non-zero num_after setting. This code
* is functionally correct without the following
* conditionalized statements, but without them
* the value of num_after is only reduced by
* the number of mask bits per pass. There are
* cases where num_after can be close to 2^64.
* Without this code it could take nearly
* (2^64) / 32 passes to perform the full
* reduction.
*/
if (num_contiguous == MASK_BITS) {
prev->num_after += nodep->num_after;
nodep->num_after = 0;
}
reduction_performed = true;
continue;
}
}
/*
* 3) Potential reductions between the current and
* next nodes.
*/
next = node_next(s, nodep);
if (next) {
/* Nodes with no bits set can be removed. */
if (next->mask == 0 && next->num_after == 0) {
node_rm(s, next);
reduction_performed = true;
continue;
}
/*
* Is next node index adjacent to current node
* and has a mask with all bits set?
*/
if (next->idx == nodep->idx + MASK_BITS + nodep->num_after &&
next->mask == ~(mask_t) 0) {
nodep->num_after += MASK_BITS;
next->mask = 0;
nodep->num_after += next->num_after;
next->num_after = 0;
node_rm(s, next);
next = NULL;
reduction_performed = true;
continue;
}
}
} while (nodep && reduction_performed);
}
/* Returns whether the bit at the index given by idx, within the
* sparsebit array is set or not.
*/
bool sparsebit_is_set(struct sparsebit *s, sparsebit_idx_t idx)
{
struct node *nodep;
/* Find the node that describes the setting of the bit at idx */
for (nodep = s->root; nodep;
nodep = nodep->idx > idx ? nodep->left : nodep->right)
if (idx >= nodep->idx &&
idx <= nodep->idx + MASK_BITS + nodep->num_after - 1)
goto have_node;
return false;
have_node:
/* Bit is set if it is any of the bits described by num_after */
if (nodep->num_after && idx >= nodep->idx + MASK_BITS)
return true;
/* Is the corresponding mask bit set */
assert(idx >= nodep->idx && idx - nodep->idx < MASK_BITS);
return !!(nodep->mask & (1 << (idx - nodep->idx)));
}
/* Within the sparsebit array pointed to by s, sets the bit
* at the index given by idx.
*/
static void bit_set(struct sparsebit *s, sparsebit_idx_t idx)
{
struct node *nodep;
/* Skip bits that are already set */
if (sparsebit_is_set(s, idx))
return;
/*
* Get a node where the bit at idx is described by the mask.
* The node_split will also create a node, if there isn't
* already a node that describes the setting of bit.
*/
nodep = node_split(s, idx & -MASK_BITS);
/* Set the bit within the nodes mask */
assert(idx >= nodep->idx && idx <= nodep->idx + MASK_BITS - 1);
assert(!(nodep->mask & (1 << (idx - nodep->idx))));
nodep->mask |= 1 << (idx - nodep->idx);
s->num_set++;
node_reduce(s, nodep);
}
/* Within the sparsebit array pointed to by s, clears the bit
* at the index given by idx.
*/
static void bit_clear(struct sparsebit *s, sparsebit_idx_t idx)
{
struct node *nodep;
/* Skip bits that are already cleared */
if (!sparsebit_is_set(s, idx))
return;
/* Is there a node that describes the setting of this bit? */
nodep = node_find(s, idx);
if (!nodep)
return;
/*
* If a num_after bit, split the node, so that the bit is
* part of a node mask.
*/
if (idx >= nodep->idx + MASK_BITS)
nodep = node_split(s, idx & -MASK_BITS);
/*
* After node_split above, bit at idx should be within the mask.
* Clear that bit.
*/
assert(idx >= nodep->idx && idx <= nodep->idx + MASK_BITS - 1);
assert(nodep->mask & (1 << (idx - nodep->idx)));
nodep->mask &= ~(1 << (idx - nodep->idx));
assert(s->num_set > 0 || sparsebit_all_set(s));
s->num_set--;
node_reduce(s, nodep);
}
/* Recursively dumps to the FILE stream given by stream the contents
* of the sub-tree of nodes pointed to by nodep. Each line of output
* is prefixed by the number of spaces given by indent. On each
* recursion, the indent amount is increased by 2. This causes nodes
* at each level deeper into the binary search tree to be displayed
* with a greater indent.
*/
static void dump_nodes(FILE *stream, struct node *nodep,
unsigned int indent)
{
char *node_type;
/* Dump contents of node */
if (!nodep->parent)
node_type = "root";
else if (nodep == nodep->parent->left)
node_type = "left";
else {
assert(nodep == nodep->parent->right);
node_type = "right";
}
fprintf(stream, "%*s---- %s nodep: %p\n", indent, "", node_type, nodep);
fprintf(stream, "%*s parent: %p left: %p right: %p\n", indent, "",
nodep->parent, nodep->left, nodep->right);
fprintf(stream, "%*s idx: 0x%lx mask: 0x%x num_after: 0x%lx\n",
indent, "", nodep->idx, nodep->mask, nodep->num_after);
/* If present, dump contents of left child nodes */
if (nodep->left)
dump_nodes(stream, nodep->left, indent + 2);
/* If present, dump contents of right child nodes */
if (nodep->right)
dump_nodes(stream, nodep->right, indent + 2);
}
static inline sparsebit_idx_t node_first_set(struct node *nodep, int start)
{
mask_t leading = (mask_t)1 << start;
int n1 = __builtin_ctz(nodep->mask & -leading);
return nodep->idx + n1;
}
static inline sparsebit_idx_t node_first_clear(struct node *nodep, int start)
{
mask_t leading = (mask_t)1 << start;
int n1 = __builtin_ctz(~nodep->mask & -leading);
return nodep->idx + n1;
}
/* Dumps to the FILE stream specified by stream, the implementation dependent
* internal state of s. Each line of output is prefixed with the number
* of spaces given by indent. The output is completely implementation
* dependent and subject to change. Output from this function should only
* be used for diagnostic purposes. For example, this function can be
* used by test cases after they detect an unexpected condition, as a means
* to capture diagnostic information.
*/
static void sparsebit_dump_internal(FILE *stream, struct sparsebit *s,
unsigned int indent)
{
/* Dump the contents of s */
fprintf(stream, "%*sroot: %p\n", indent, "", s->root);
fprintf(stream, "%*snum_set: 0x%lx\n", indent, "", s->num_set);
if (s->root)
dump_nodes(stream, s->root, indent);
}
/* Allocates and returns a new sparsebit array. The initial state
* of the newly allocated sparsebit array has all bits cleared.
*/
struct sparsebit *sparsebit_alloc(void)
{
struct sparsebit *s;
/* Allocate top level structure. */
s = calloc(1, sizeof(*s));
if (!s) {
perror("calloc");
abort();
}
return s;
}
/* Frees the implementation dependent data for the sparsebit array
* pointed to by s and poisons the pointer to that data.
*/
void sparsebit_free(struct sparsebit **sbitp)
{
struct sparsebit *s = *sbitp;
if (!s)
return;
sparsebit_clear_all(s);
free(s);
*sbitp = NULL;
}
/* Makes a copy of the sparsebit array given by s, to the sparsebit
* array given by d. Note, d must have already been allocated via
* sparsebit_alloc(). It can though already have bits set, which
* if different from src will be cleared.
*/
void sparsebit_copy(struct sparsebit *d, struct sparsebit *s)
{
/* First clear any bits already set in the destination */
sparsebit_clear_all(d);
if (s->root) {
d->root = node_copy_subtree(s->root);
d->num_set = s->num_set;
}
}
/* Returns whether num consecutive bits starting at idx are all set. */
bool sparsebit_is_set_num(struct sparsebit *s,
sparsebit_idx_t idx, sparsebit_num_t num)
{
sparsebit_idx_t next_cleared;
assert(num > 0);
assert(idx + num - 1 >= idx);
/* With num > 0, the first bit must be set. */
if (!sparsebit_is_set(s, idx))
return false;
/* Find the next cleared bit */
next_cleared = sparsebit_next_clear(s, idx);
/*
* If no cleared bits beyond idx, then there are at least num
* set bits. idx + num doesn't wrap. Otherwise check if
* there are enough set bits between idx and the next cleared bit.
*/
return next_cleared == 0 || next_cleared - idx >= num;
}
/* Returns whether the bit at the index given by idx. */
bool sparsebit_is_clear(struct sparsebit *s,
sparsebit_idx_t idx)
{
return !sparsebit_is_set(s, idx);
}
/* Returns whether num consecutive bits starting at idx are all cleared. */
bool sparsebit_is_clear_num(struct sparsebit *s,
sparsebit_idx_t idx, sparsebit_num_t num)
{
sparsebit_idx_t next_set;
assert(num > 0);
assert(idx + num - 1 >= idx);
/* With num > 0, the first bit must be cleared. */
if (!sparsebit_is_clear(s, idx))
return false;
/* Find the next set bit */
next_set = sparsebit_next_set(s, idx);
/*
* If no set bits beyond idx, then there are at least num
* cleared bits. idx + num doesn't wrap. Otherwise check if
* there are enough cleared bits between idx and the next set bit.
*/
return next_set == 0 || next_set - idx >= num;
}
/* Returns the total number of bits set. Note: 0 is also returned for
* the case of all bits set. This is because with all bits set, there
* is 1 additional bit set beyond what can be represented in the return
* value. Use sparsebit_any_set(), instead of sparsebit_num_set() > 0,
* to determine if the sparsebit array has any bits set.
*/
sparsebit_num_t sparsebit_num_set(struct sparsebit *s)
{
return s->num_set;
}
/* Returns whether any bit is set in the sparsebit array. */
bool sparsebit_any_set(struct sparsebit *s)
{
/*
* Nodes only describe set bits. If any nodes then there
* is at least 1 bit set.
*/
if (!s->root)
return false;
/*
* Every node should have a non-zero mask. For now will
* just assure that the root node has a non-zero mask,
* which is a quick check that at least 1 bit is set.
*/
assert(s->root->mask != 0);
assert(s->num_set > 0 ||
(s->root->num_after == ((sparsebit_num_t) 0) - MASK_BITS &&
s->root->mask == ~(mask_t) 0));
return true;
}
/* Returns whether all the bits in the sparsebit array are cleared. */
bool sparsebit_all_clear(struct sparsebit *s)
{
return !sparsebit_any_set(s);
}
/* Returns whether all the bits in the sparsebit array are set. */
bool sparsebit_any_clear(struct sparsebit *s)
{
return !sparsebit_all_set(s);
}
/* Returns the index of the first set bit. Abort if no bits are set.
*/
sparsebit_idx_t sparsebit_first_set(struct sparsebit *s)
{
struct node *nodep;
/* Validate at least 1 bit is set */
assert(sparsebit_any_set(s));
nodep = node_first(s);
return node_first_set(nodep, 0);
}
/* Returns the index of the first cleared bit. Abort if
* no bits are cleared.
*/
sparsebit_idx_t sparsebit_first_clear(struct sparsebit *s)
{
struct node *nodep1, *nodep2;
/* Validate at least 1 bit is cleared. */
assert(sparsebit_any_clear(s));
/* If no nodes or first node index > 0 then lowest cleared is 0 */
nodep1 = node_first(s);
if (!nodep1 || nodep1->idx > 0)
return 0;
/* Does the mask in the first node contain any cleared bits. */
if (nodep1->mask != ~(mask_t) 0)
return node_first_clear(nodep1, 0);
/*
* All mask bits set in first node. If there isn't a second node
* then the first cleared bit is the first bit after the bits
* described by the first node.
*/
nodep2 = node_next(s, nodep1);
if (!nodep2) {
/*
* No second node. First cleared bit is first bit beyond
* bits described by first node.
*/
assert(nodep1->mask == ~(mask_t) 0);
assert(nodep1->idx + MASK_BITS + nodep1->num_after != (sparsebit_idx_t) 0);
return nodep1->idx + MASK_BITS + nodep1->num_after;
}
/*
* There is a second node.
* If it is not adjacent to the first node, then there is a gap
* of cleared bits between the nodes, and the first cleared bit
* is the first bit within the gap.
*/
if (nodep1->idx + MASK_BITS + nodep1->num_after != nodep2->idx)
return nodep1->idx + MASK_BITS + nodep1->num_after;
/*
* Second node is adjacent to the first node.
* Because it is adjacent, its mask should be non-zero. If all
* its mask bits are set, then with it being adjacent, it should
* have had the mask bits moved into the num_after setting of the
* previous node.
*/
return node_first_clear(nodep2, 0);
}
/* Returns index of next bit set within s after the index given by prev.
* Returns 0 if there are no bits after prev that are set.
*/
sparsebit_idx_t sparsebit_next_set(struct sparsebit *s,
sparsebit_idx_t prev)
{
sparsebit_idx_t lowest_possible = prev + 1;
sparsebit_idx_t start;
struct node *nodep;
/* A bit after the highest index can't be set. */
if (lowest_possible == 0)
return 0;
/*
* Find the leftmost 'candidate' overlapping or to the right
* of lowest_possible.
*/
struct node *candidate = NULL;
/* True iff lowest_possible is within candidate */
bool contains = false;
/*
* Find node that describes setting of bit at lowest_possible.
* If such a node doesn't exist, find the node with the lowest
* starting index that is > lowest_possible.
*/
for (nodep = s->root; nodep;) {
if ((nodep->idx + MASK_BITS + nodep->num_after - 1)
>= lowest_possible) {
candidate = nodep;
if (candidate->idx <= lowest_possible) {
contains = true;
break;
}
nodep = nodep->left;
} else {
nodep = nodep->right;
}
}
if (!candidate)
return 0;
assert(candidate->mask != 0);
/* Does the candidate node describe the setting of lowest_possible? */
if (!contains) {
/*
* Candidate doesn't describe setting of bit at lowest_possible.
* Candidate points to the first node with a starting index
* > lowest_possible.
*/
assert(candidate->idx > lowest_possible);
return node_first_set(candidate, 0);
}
/*
* Candidate describes setting of bit at lowest_possible.
* Note: although the node describes the setting of the bit
* at lowest_possible, its possible that its setting and the
* setting of all latter bits described by this node are 0.
* For now, just handle the cases where this node describes
* a bit at or after an index of lowest_possible that is set.
*/
start = lowest_possible - candidate->idx;
if (start < MASK_BITS && candidate->mask >= (1 << start))
return node_first_set(candidate, start);
if (candidate->num_after) {
sparsebit_idx_t first_num_after_idx = candidate->idx + MASK_BITS;
return lowest_possible < first_num_after_idx
? first_num_after_idx : lowest_possible;
}
/*
* Although candidate node describes setting of bit at
* the index of lowest_possible, all bits at that index and
* latter that are described by candidate are cleared. With
* this, the next bit is the first bit in the next node, if
* such a node exists. If a next node doesn't exist, then
* there is no next set bit.
*/
candidate = node_next(s, candidate);
if (!candidate)
return 0;
return node_first_set(candidate, 0);
}
/* Returns index of next bit cleared within s after the index given by prev.
* Returns 0 if there are no bits after prev that are cleared.
*/
sparsebit_idx_t sparsebit_next_clear(struct sparsebit *s,
sparsebit_idx_t prev)
{
sparsebit_idx_t lowest_possible = prev + 1;
sparsebit_idx_t idx;
struct node *nodep1, *nodep2;
/* A bit after the highest index can't be set. */
if (lowest_possible == 0)
return 0;
/*
* Does a node describing the setting of lowest_possible exist?
* If not, the bit at lowest_possible is cleared.
*/
nodep1 = node_find(s, lowest_possible);
if (!nodep1)
return lowest_possible;
/* Does a mask bit in node 1 describe the next cleared bit. */
for (idx = lowest_possible - nodep1->idx; idx < MASK_BITS; idx++)
if (!(nodep1->mask & (1 << idx)))
return nodep1->idx + idx;
/*
* Next cleared bit is not described by node 1. If there
* isn't a next node, then next cleared bit is described
* by bit after the bits described by the first node.
*/
nodep2 = node_next(s, nodep1);
if (!nodep2)
return nodep1->idx + MASK_BITS + nodep1->num_after;
/*
* There is a second node.
* If it is not adjacent to the first node, then there is a gap
* of cleared bits between the nodes, and the next cleared bit
* is the first bit within the gap.
*/
if (nodep1->idx + MASK_BITS + nodep1->num_after != nodep2->idx)
return nodep1->idx + MASK_BITS + nodep1->num_after;
/*
* Second node is adjacent to the first node.
* Because it is adjacent, its mask should be non-zero. If all
* its mask bits are set, then with it being adjacent, it should
* have had the mask bits moved into the num_after setting of the
* previous node.
*/
return node_first_clear(nodep2, 0);
}
/* Starting with the index 1 greater than the index given by start, finds
* and returns the index of the first sequence of num consecutively set
* bits. Returns a value of 0 of no such sequence exists.
*/
sparsebit_idx_t sparsebit_next_set_num(struct sparsebit *s,
sparsebit_idx_t start, sparsebit_num_t num)
{
sparsebit_idx_t idx;
assert(num >= 1);
for (idx = sparsebit_next_set(s, start);
idx != 0 && idx + num - 1 >= idx;
idx = sparsebit_next_set(s, idx)) {
assert(sparsebit_is_set(s, idx));
/*
* Does the sequence of bits starting at idx consist of
* num set bits?
*/
if (sparsebit_is_set_num(s, idx, num))
return idx;
/*
* Sequence of set bits at idx isn't large enough.
* Skip this entire sequence of set bits.
*/
idx = sparsebit_next_clear(s, idx);
if (idx == 0)
return 0;
}
return 0;
}
/* Starting with the index 1 greater than the index given by start, finds
* and returns the index of the first sequence of num consecutively cleared
* bits. Returns a value of 0 of no such sequence exists.
*/
sparsebit_idx_t sparsebit_next_clear_num(struct sparsebit *s,
sparsebit_idx_t start, sparsebit_num_t num)
{
sparsebit_idx_t idx;
assert(num >= 1);
for (idx = sparsebit_next_clear(s, start);
idx != 0 && idx + num - 1 >= idx;
idx = sparsebit_next_clear(s, idx)) {
assert(sparsebit_is_clear(s, idx));
/*
* Does the sequence of bits starting at idx consist of
* num cleared bits?
*/
if (sparsebit_is_clear_num(s, idx, num))
return idx;
/*
* Sequence of cleared bits at idx isn't large enough.
* Skip this entire sequence of cleared bits.
*/
idx = sparsebit_next_set(s, idx);
if (idx == 0)
return 0;
}
return 0;
}
/* Sets the bits * in the inclusive range idx through idx + num - 1. */
void sparsebit_set_num(struct sparsebit *s,
sparsebit_idx_t start, sparsebit_num_t num)
{
struct node *nodep, *next;
unsigned int n1;
sparsebit_idx_t idx;
sparsebit_num_t n;
sparsebit_idx_t middle_start, middle_end;
assert(num > 0);
assert(start + num - 1 >= start);
/*
* Leading - bits before first mask boundary.
*
* TODO(lhuemill): With some effort it may be possible to
* replace the following loop with a sequential sequence
* of statements. High level sequence would be:
*
* 1. Use node_split() to force node that describes setting
* of idx to be within the mask portion of a node.
* 2. Form mask of bits to be set.
* 3. Determine number of mask bits already set in the node
* and store in a local variable named num_already_set.
* 4. Set the appropriate mask bits within the node.
* 5. Increment struct sparsebit_pvt num_set member
* by the number of bits that were actually set.
* Exclude from the counts bits that were already set.
* 6. Before returning to the caller, use node_reduce() to
* handle the multiple corner cases that this method
* introduces.
*/
for (idx = start, n = num; n > 0 && idx % MASK_BITS != 0; idx++, n--)
bit_set(s, idx);
/* Middle - bits spanning one or more entire mask */
middle_start = idx;
middle_end = middle_start + (n & -MASK_BITS) - 1;
if (n >= MASK_BITS) {
nodep = node_split(s, middle_start);
/*
* As needed, split just after end of middle bits.
* No split needed if end of middle bits is at highest
* supported bit index.
*/
if (middle_end + 1 > middle_end)
(void) node_split(s, middle_end + 1);
/* Delete nodes that only describe bits within the middle. */
for (next = node_next(s, nodep);
next && (next->idx < middle_end);
next = node_next(s, nodep)) {
assert(next->idx + MASK_BITS + next->num_after - 1 <= middle_end);
node_rm(s, next);
next = NULL;
}
/* As needed set each of the mask bits */
for (n1 = 0; n1 < MASK_BITS; n1++) {
if (!(nodep->mask & (1 << n1))) {
nodep->mask |= 1 << n1;
s->num_set++;
}
}
s->num_set -= nodep->num_after;
nodep->num_after = middle_end - middle_start + 1 - MASK_BITS;
s->num_set += nodep->num_after;
node_reduce(s, nodep);
}
idx = middle_end + 1;
n -= middle_end - middle_start + 1;
/* Trailing - bits at and beyond last mask boundary */
assert(n < MASK_BITS);
for (; n > 0; idx++, n--)
bit_set(s, idx);
}
/* Clears the bits * in the inclusive range idx through idx + num - 1. */
void sparsebit_clear_num(struct sparsebit *s,
sparsebit_idx_t start, sparsebit_num_t num)
{
struct node *nodep, *next;
unsigned int n1;
sparsebit_idx_t idx;
sparsebit_num_t n;
sparsebit_idx_t middle_start, middle_end;
assert(num > 0);
assert(start + num - 1 >= start);
/* Leading - bits before first mask boundary */
for (idx = start, n = num; n > 0 && idx % MASK_BITS != 0; idx++, n--)
bit_clear(s, idx);
/* Middle - bits spanning one or more entire mask */
middle_start = idx;
middle_end = middle_start + (n & -MASK_BITS) - 1;
if (n >= MASK_BITS) {
nodep = node_split(s, middle_start);
/*
* As needed, split just after end of middle bits.
* No split needed if end of middle bits is at highest
* supported bit index.
*/
if (middle_end + 1 > middle_end)
(void) node_split(s, middle_end + 1);
/* Delete nodes that only describe bits within the middle. */
for (next = node_next(s, nodep);
next && (next->idx < middle_end);
next = node_next(s, nodep)) {
assert(next->idx + MASK_BITS + next->num_after - 1 <= middle_end);
node_rm(s, next);
next = NULL;
}
/* As needed clear each of the mask bits */
for (n1 = 0; n1 < MASK_BITS; n1++) {
if (nodep->mask & (1 << n1)) {
nodep->mask &= ~(1 << n1);
s->num_set--;
}
}
/* Clear any bits described by num_after */
s->num_set -= nodep->num_after;
nodep->num_after = 0;
/*
* Delete the node that describes the beginning of
* the middle bits and perform any allowed reductions
* with the nodes prev or next of nodep.
*/
node_reduce(s, nodep);
nodep = NULL;
}
idx = middle_end + 1;
n -= middle_end - middle_start + 1;
/* Trailing - bits at and beyond last mask boundary */
assert(n < MASK_BITS);
for (; n > 0; idx++, n--)
bit_clear(s, idx);
}
/* Sets the bit at the index given by idx. */
void sparsebit_set(struct sparsebit *s, sparsebit_idx_t idx)
{
sparsebit_set_num(s, idx, 1);
}
/* Clears the bit at the index given by idx. */
void sparsebit_clear(struct sparsebit *s, sparsebit_idx_t idx)
{
sparsebit_clear_num(s, idx, 1);
}
/* Sets the bits in the entire addressable range of the sparsebit array. */
void sparsebit_set_all(struct sparsebit *s)
{
sparsebit_set(s, 0);
sparsebit_set_num(s, 1, ~(sparsebit_idx_t) 0);
assert(sparsebit_all_set(s));
}
/* Clears the bits in the entire addressable range of the sparsebit array. */
void sparsebit_clear_all(struct sparsebit *s)
{
sparsebit_clear(s, 0);
sparsebit_clear_num(s, 1, ~(sparsebit_idx_t) 0);
assert(!sparsebit_any_set(s));
}
static size_t display_range(FILE *stream, sparsebit_idx_t low,
sparsebit_idx_t high, bool prepend_comma_space)
{
char *fmt_str;
size_t sz;
/* Determine the printf format string */
if (low == high)
fmt_str = prepend_comma_space ? ", 0x%lx" : "0x%lx";
else
fmt_str = prepend_comma_space ? ", 0x%lx:0x%lx" : "0x%lx:0x%lx";
/*
* When stream is NULL, just determine the size of what would
* have been printed, else print the range.
*/
if (!stream)
sz = snprintf(NULL, 0, fmt_str, low, high);
else
sz = fprintf(stream, fmt_str, low, high);
return sz;
}
/* Dumps to the FILE stream given by stream, the bit settings
* of s. Each line of output is prefixed with the number of
* spaces given by indent. The length of each line is implementation
* dependent and does not depend on the indent amount. The following
* is an example output of a sparsebit array that has bits:
*
* 0x5, 0x8, 0xa:0xe, 0x12
*
* This corresponds to a sparsebit whose bits 5, 8, 10, 11, 12, 13, 14, 18
* are set. Note that a ':', instead of a '-' is used to specify a range of
* contiguous bits. This is done because '-' is used to specify command-line
* options, and sometimes ranges are specified as command-line arguments.
*/
void sparsebit_dump(FILE *stream, struct sparsebit *s,
unsigned int indent)
{
size_t current_line_len = 0;
size_t sz;
struct node *nodep;
if (!sparsebit_any_set(s))
return;
/* Display initial indent */
fprintf(stream, "%*s", indent, "");
/* For each node */
for (nodep = node_first(s); nodep; nodep = node_next(s, nodep)) {
unsigned int n1;
sparsebit_idx_t low, high;
/* For each group of bits in the mask */
for (n1 = 0; n1 < MASK_BITS; n1++) {
if (nodep->mask & (1 << n1)) {
low = high = nodep->idx + n1;
for (; n1 < MASK_BITS; n1++) {
if (nodep->mask & (1 << n1))
high = nodep->idx + n1;
else
break;
}
if ((n1 == MASK_BITS) && nodep->num_after)
high += nodep->num_after;
/*
* How much room will it take to display
* this range.
*/
sz = display_range(NULL, low, high,
current_line_len != 0);
/*
* If there is not enough room, display
* a newline plus the indent of the next
* line.
*/
if (current_line_len + sz > DUMP_LINE_MAX) {
fputs("\n", stream);
fprintf(stream, "%*s", indent, "");
current_line_len = 0;
}
/* Display the range */
sz = display_range(stream, low, high,
current_line_len != 0);
current_line_len += sz;
}
}
/*
* If num_after and most significant-bit of mask is not
* set, then still need to display a range for the bits
* described by num_after.
*/
if (!(nodep->mask & (1 << (MASK_BITS - 1))) && nodep->num_after) {
low = nodep->idx + MASK_BITS;
high = nodep->idx + MASK_BITS + nodep->num_after - 1;
/*
* How much room will it take to display
* this range.
*/
sz = display_range(NULL, low, high,
current_line_len != 0);
/*
* If there is not enough room, display
* a newline plus the indent of the next
* line.
*/
if (current_line_len + sz > DUMP_LINE_MAX) {
fputs("\n", stream);
fprintf(stream, "%*s", indent, "");
current_line_len = 0;
}
/* Display the range */
sz = display_range(stream, low, high,
current_line_len != 0);
current_line_len += sz;
}
}
fputs("\n", stream);
}
/* Validates the internal state of the sparsebit array given by
* s. On error, diagnostic information is printed to stderr and
* abort is called.
*/
void sparsebit_validate_internal(struct sparsebit *s)
{
bool error_detected = false;
struct node *nodep, *prev = NULL;
sparsebit_num_t total_bits_set = 0;
unsigned int n1;
/* For each node */
for (nodep = node_first(s); nodep;
prev = nodep, nodep = node_next(s, nodep)) {
/*
* Increase total bits set by the number of bits set
* in this node.
*/
for (n1 = 0; n1 < MASK_BITS; n1++)
if (nodep->mask & (1 << n1))
total_bits_set++;
total_bits_set += nodep->num_after;
/*
* Arbitrary choice as to whether a mask of 0 is allowed
* or not. For diagnostic purposes it is beneficial to
* have only one valid means to represent a set of bits.
* To support this an arbitrary choice has been made
* to not allow a mask of zero.
*/
if (nodep->mask == 0) {
fprintf(stderr, "Node mask of zero, "
"nodep: %p nodep->mask: 0x%x",
nodep, nodep->mask);
error_detected = true;
break;
}
/*
* Validate num_after is not greater than the max index
* - the number of mask bits. The num_after member
* uses 0-based indexing and thus has no value that
* represents all bits set. This limitation is handled
* by requiring a non-zero mask. With a non-zero mask,
* MASK_BITS worth of bits are described by the mask,
* which makes the largest needed num_after equal to:
*
* (~(sparsebit_num_t) 0) - MASK_BITS + 1
*/
if (nodep->num_after
> (~(sparsebit_num_t) 0) - MASK_BITS + 1) {
fprintf(stderr, "num_after too large, "
"nodep: %p nodep->num_after: 0x%lx",
nodep, nodep->num_after);
error_detected = true;
break;
}
/* Validate node index is divisible by the mask size */
if (nodep->idx % MASK_BITS) {
fprintf(stderr, "Node index not divisible by "
"mask size,\n"
" nodep: %p nodep->idx: 0x%lx "
"MASK_BITS: %lu\n",
nodep, nodep->idx, MASK_BITS);
error_detected = true;
break;
}
/*
* Validate bits described by node don't wrap beyond the
* highest supported index.
*/
if ((nodep->idx + MASK_BITS + nodep->num_after - 1) < nodep->idx) {
fprintf(stderr, "Bits described by node wrap "
"beyond highest supported index,\n"
" nodep: %p nodep->idx: 0x%lx\n"
" MASK_BITS: %lu nodep->num_after: 0x%lx",
nodep, nodep->idx, MASK_BITS, nodep->num_after);
error_detected = true;
break;
}
/* Check parent pointers. */
if (nodep->left) {
if (nodep->left->parent != nodep) {
fprintf(stderr, "Left child parent pointer "
"doesn't point to this node,\n"
" nodep: %p nodep->left: %p "
"nodep->left->parent: %p",
nodep, nodep->left,
nodep->left->parent);
error_detected = true;
break;
}
}
if (nodep->right) {
if (nodep->right->parent != nodep) {
fprintf(stderr, "Right child parent pointer "
"doesn't point to this node,\n"
" nodep: %p nodep->right: %p "
"nodep->right->parent: %p",
nodep, nodep->right,
nodep->right->parent);
error_detected = true;
break;
}
}
if (!nodep->parent) {
if (s->root != nodep) {
fprintf(stderr, "Unexpected root node, "
"s->root: %p nodep: %p",
s->root, nodep);
error_detected = true;
break;
}
}
if (prev) {
/*
* Is index of previous node before index of
* current node?
*/
if (prev->idx >= nodep->idx) {
fprintf(stderr, "Previous node index "
">= current node index,\n"
" prev: %p prev->idx: 0x%lx\n"
" nodep: %p nodep->idx: 0x%lx",
prev, prev->idx, nodep, nodep->idx);
error_detected = true;
break;
}
/*
* Nodes occur in asscending order, based on each
* nodes starting index.
*/
if ((prev->idx + MASK_BITS + prev->num_after - 1)
>= nodep->idx) {
fprintf(stderr, "Previous node bit range "
"overlap with current node bit range,\n"
" prev: %p prev->idx: 0x%lx "
"prev->num_after: 0x%lx\n"
" nodep: %p nodep->idx: 0x%lx "
"nodep->num_after: 0x%lx\n"
" MASK_BITS: %lu",
prev, prev->idx, prev->num_after,
nodep, nodep->idx, nodep->num_after,
MASK_BITS);
error_detected = true;
break;
}
/*
* When the node has all mask bits set, it shouldn't
* be adjacent to the last bit described by the
* previous node.
*/
if (nodep->mask == ~(mask_t) 0 &&
prev->idx + MASK_BITS + prev->num_after == nodep->idx) {
fprintf(stderr, "Current node has mask with "
"all bits set and is adjacent to the "
"previous node,\n"
" prev: %p prev->idx: 0x%lx "
"prev->num_after: 0x%lx\n"
" nodep: %p nodep->idx: 0x%lx "
"nodep->num_after: 0x%lx\n"
" MASK_BITS: %lu",
prev, prev->idx, prev->num_after,
nodep, nodep->idx, nodep->num_after,
MASK_BITS);
error_detected = true;
break;
}
}
}
if (!error_detected) {
/*
* Is sum of bits set in each node equal to the count
* of total bits set.
*/
if (s->num_set != total_bits_set) {
fprintf(stderr, "Number of bits set mismatch,\n"
" s->num_set: 0x%lx total_bits_set: 0x%lx",
s->num_set, total_bits_set);
error_detected = true;
}
}
if (error_detected) {
fputs(" dump_internal:\n", stderr);
sparsebit_dump_internal(stderr, s, 4);
abort();
}
}
#ifdef FUZZ
/* A simple but effective fuzzing driver. Look for bugs with the help
* of some invariants and of a trivial representation of sparsebit.
* Just use 512 bytes of /dev/zero and /dev/urandom as inputs, and let
* afl-fuzz do the magic. :)
*/
#include <stdlib.h>
struct range {
sparsebit_idx_t first, last;
bool set;
};
struct sparsebit *s;
struct range ranges[1000];
int num_ranges;
static bool get_value(sparsebit_idx_t idx)
{
int i;
for (i = num_ranges; --i >= 0; )
if (ranges[i].first <= idx && idx <= ranges[i].last)
return ranges[i].set;
return false;
}
static void operate(int code, sparsebit_idx_t first, sparsebit_idx_t last)
{
sparsebit_num_t num;
sparsebit_idx_t next;
if (first < last) {
num = last - first + 1;
} else {
num = first - last + 1;
first = last;
last = first + num - 1;
}
switch (code) {
case 0:
sparsebit_set(s, first);
assert(sparsebit_is_set(s, first));
assert(!sparsebit_is_clear(s, first));
assert(sparsebit_any_set(s));
assert(!sparsebit_all_clear(s));
if (get_value(first))
return;
if (num_ranges == 1000)
exit(0);
ranges[num_ranges++] = (struct range)
{ .first = first, .last = first, .set = true };
break;
case 1:
sparsebit_clear(s, first);
assert(!sparsebit_is_set(s, first));
assert(sparsebit_is_clear(s, first));
assert(sparsebit_any_clear(s));
assert(!sparsebit_all_set(s));
if (!get_value(first))
return;
if (num_ranges == 1000)
exit(0);
ranges[num_ranges++] = (struct range)
{ .first = first, .last = first, .set = false };
break;
case 2:
assert(sparsebit_is_set(s, first) == get_value(first));
assert(sparsebit_is_clear(s, first) == !get_value(first));
break;
case 3:
if (sparsebit_any_set(s))
assert(get_value(sparsebit_first_set(s)));
if (sparsebit_any_clear(s))
assert(!get_value(sparsebit_first_clear(s)));
sparsebit_set_all(s);
assert(!sparsebit_any_clear(s));
assert(sparsebit_all_set(s));
num_ranges = 0;
ranges[num_ranges++] = (struct range)
{ .first = 0, .last = ~(sparsebit_idx_t)0, .set = true };
break;
case 4:
if (sparsebit_any_set(s))
assert(get_value(sparsebit_first_set(s)));
if (sparsebit_any_clear(s))
assert(!get_value(sparsebit_first_clear(s)));
sparsebit_clear_all(s);
assert(!sparsebit_any_set(s));
assert(sparsebit_all_clear(s));
num_ranges = 0;
break;
case 5:
next = sparsebit_next_set(s, first);
assert(next == 0 || next > first);
assert(next == 0 || get_value(next));
break;
case 6:
next = sparsebit_next_clear(s, first);
assert(next == 0 || next > first);
assert(next == 0 || !get_value(next));
break;
case 7:
next = sparsebit_next_clear(s, first);
if (sparsebit_is_set_num(s, first, num)) {
assert(next == 0 || next > last);
if (first)
next = sparsebit_next_set(s, first - 1);
else if (sparsebit_any_set(s))
next = sparsebit_first_set(s);
else
return;
assert(next == first);
} else {
assert(sparsebit_is_clear(s, first) || next <= last);
}
break;
case 8:
next = sparsebit_next_set(s, first);
if (sparsebit_is_clear_num(s, first, num)) {
assert(next == 0 || next > last);
if (first)
next = sparsebit_next_clear(s, first - 1);
else if (sparsebit_any_clear(s))
next = sparsebit_first_clear(s);
else
return;
assert(next == first);
} else {
assert(sparsebit_is_set(s, first) || next <= last);
}
break;
case 9:
sparsebit_set_num(s, first, num);
assert(sparsebit_is_set_num(s, first, num));
assert(!sparsebit_is_clear_num(s, first, num));
assert(sparsebit_any_set(s));
assert(!sparsebit_all_clear(s));
if (num_ranges == 1000)
exit(0);
ranges[num_ranges++] = (struct range)
{ .first = first, .last = last, .set = true };
break;
case 10:
sparsebit_clear_num(s, first, num);
assert(!sparsebit_is_set_num(s, first, num));
assert(sparsebit_is_clear_num(s, first, num));
assert(sparsebit_any_clear(s));
assert(!sparsebit_all_set(s));
if (num_ranges == 1000)
exit(0);
ranges[num_ranges++] = (struct range)
{ .first = first, .last = last, .set = false };
break;
case 11:
sparsebit_validate_internal(s);
break;
default:
break;
}
}
unsigned char get8(void)
{
int ch;
ch = getchar();
if (ch == EOF)
exit(0);
return ch;
}
uint64_t get64(void)
{
uint64_t x;
x = get8();
x = (x << 8) | get8();
x = (x << 8) | get8();
x = (x << 8) | get8();
x = (x << 8) | get8();
x = (x << 8) | get8();
x = (x << 8) | get8();
return (x << 8) | get8();
}
int main(void)
{
s = sparsebit_alloc();
for (;;) {
uint8_t op = get8() & 0xf;
uint64_t first = get64();
uint64_t last = get64();
operate(op, first, last);
}
}
#endif
| linux-master | tools/testing/selftests/kvm/lib/sparsebit.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <stddef.h>
/*
* Override the "basic" built-in string helpers so that they can be used in
* guest code. KVM selftests don't support dynamic loading in guest code and
* will jump into the weeds if the compiler decides to insert an out-of-line
* call via the PLT.
*/
int memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res = 0;
for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) {
if ((res = *su1 - *su2) != 0)
break;
}
return res;
}
void *memcpy(void *dest, const void *src, size_t count)
{
char *tmp = dest;
const char *s = src;
while (count--)
*tmp++ = *s++;
return dest;
}
void *memset(void *s, int c, size_t count)
{
char *xs = s;
while (count--)
*xs++ = c;
return s;
}
size_t strnlen(const char *s, size_t count)
{
const char *sc;
for (sc = s; count-- && *sc != '\0'; ++sc)
/* nothing */;
return sc - s;
}
| linux-master | tools/testing/selftests/kvm/lib/string_override.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020, Red Hat, Inc.
*/
#include "guest_modes.h"
#ifdef __aarch64__
#include "processor.h"
enum vm_guest_mode vm_mode_default;
#endif
struct guest_mode guest_modes[NUM_VM_MODES];
void guest_modes_append_default(void)
{
#ifndef __aarch64__
guest_mode_append(VM_MODE_DEFAULT, true, true);
#else
{
unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
bool ps4k, ps16k, ps64k;
int i;
aarch64_get_supported_page_sizes(limit, &ps4k, &ps16k, &ps64k);
vm_mode_default = NUM_VM_MODES;
if (limit >= 52)
guest_mode_append(VM_MODE_P52V48_64K, ps64k, ps64k);
if (limit >= 48) {
guest_mode_append(VM_MODE_P48V48_4K, ps4k, ps4k);
guest_mode_append(VM_MODE_P48V48_16K, ps16k, ps16k);
guest_mode_append(VM_MODE_P48V48_64K, ps64k, ps64k);
}
if (limit >= 40) {
guest_mode_append(VM_MODE_P40V48_4K, ps4k, ps4k);
guest_mode_append(VM_MODE_P40V48_16K, ps16k, ps16k);
guest_mode_append(VM_MODE_P40V48_64K, ps64k, ps64k);
if (ps4k)
vm_mode_default = VM_MODE_P40V48_4K;
}
if (limit >= 36) {
guest_mode_append(VM_MODE_P36V48_4K, ps4k, ps4k);
guest_mode_append(VM_MODE_P36V48_16K, ps16k, ps16k);
guest_mode_append(VM_MODE_P36V48_64K, ps64k, ps64k);
guest_mode_append(VM_MODE_P36V47_16K, ps16k, ps16k);
}
/*
* Pick the first supported IPA size if the default
* isn't available.
*/
for (i = 0; vm_mode_default == NUM_VM_MODES && i < NUM_VM_MODES; i++) {
if (guest_modes[i].supported && guest_modes[i].enabled)
vm_mode_default = i;
}
TEST_ASSERT(vm_mode_default != NUM_VM_MODES,
"No supported mode!");
}
#endif
#ifdef __s390x__
{
int kvm_fd, vm_fd;
struct kvm_s390_vm_cpu_processor info;
kvm_fd = open_kvm_dev_path_or_exit();
vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, NULL);
kvm_device_attr_get(vm_fd, KVM_S390_VM_CPU_MODEL,
KVM_S390_VM_CPU_PROCESSOR, &info);
close(vm_fd);
close(kvm_fd);
/* Starting with z13 we have 47bits of physical address */
if (info.ibc >= 0x30)
guest_mode_append(VM_MODE_P47V64_4K, true, true);
}
#endif
#ifdef __riscv
{
unsigned int sz = kvm_check_cap(KVM_CAP_VM_GPA_BITS);
if (sz >= 52)
guest_mode_append(VM_MODE_P52V48_4K, true, true);
if (sz >= 48)
guest_mode_append(VM_MODE_P48V48_4K, true, true);
}
#endif
}
void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg)
{
int i;
for (i = 0; i < NUM_VM_MODES; ++i) {
if (!guest_modes[i].enabled)
continue;
TEST_ASSERT(guest_modes[i].supported,
"Guest mode ID %d (%s) not supported.",
i, vm_guest_mode_string(i));
func(i, arg);
}
}
void guest_modes_help(void)
{
int i;
printf(" -m: specify the guest mode ID to test\n"
" (default: test all supported modes)\n"
" This option may be used multiple times.\n"
" Guest mode IDs:\n");
for (i = 0; i < NUM_VM_MODES; ++i) {
printf(" %d: %s%s\n", i, vm_guest_mode_string(i),
guest_modes[i].supported ? " (supported)" : "");
}
}
void guest_modes_cmdline(const char *arg)
{
static bool mode_selected;
unsigned int mode;
int i;
if (!mode_selected) {
for (i = 0; i < NUM_VM_MODES; ++i)
guest_modes[i].enabled = false;
mode_selected = true;
}
mode = atoi_non_negative("Guest mode ID", arg);
TEST_ASSERT(mode < NUM_VM_MODES, "Guest mode ID %d too big", mode);
guest_modes[mode].enabled = true;
}
| linux-master | tools/testing/selftests/kvm/lib/guest_modes.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/elf.c
*
* Copyright (C) 2018, Google LLC.
*/
#include "test_util.h"
#include <bits/endian.h>
#include <linux/elf.h>
#include "kvm_util.h"
static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp)
{
off_t offset_rv;
/* Open the ELF file. */
int fd;
fd = open(filename, O_RDONLY);
TEST_ASSERT(fd >= 0, "Failed to open ELF file,\n"
" filename: %s\n"
" rv: %i errno: %i", filename, fd, errno);
/* Read in and validate ELF Identification Record.
* The ELF Identification record is the first 16 (EI_NIDENT) bytes
* of the ELF header, which is at the beginning of the ELF file.
* For now it is only safe to read the first EI_NIDENT bytes. Once
* read and validated, the value of e_ehsize can be used to determine
* the real size of the ELF header.
*/
unsigned char ident[EI_NIDENT];
test_read(fd, ident, sizeof(ident));
TEST_ASSERT((ident[EI_MAG0] == ELFMAG0) && (ident[EI_MAG1] == ELFMAG1)
&& (ident[EI_MAG2] == ELFMAG2) && (ident[EI_MAG3] == ELFMAG3),
"ELF MAGIC Mismatch,\n"
" filename: %s\n"
" ident[EI_MAG0 - EI_MAG3]: %02x %02x %02x %02x\n"
" Expected: %02x %02x %02x %02x",
filename,
ident[EI_MAG0], ident[EI_MAG1], ident[EI_MAG2], ident[EI_MAG3],
ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3);
TEST_ASSERT(ident[EI_CLASS] == ELFCLASS64,
"Current implementation only able to handle ELFCLASS64,\n"
" filename: %s\n"
" ident[EI_CLASS]: %02x\n"
" expected: %02x",
filename,
ident[EI_CLASS], ELFCLASS64);
TEST_ASSERT(((BYTE_ORDER == LITTLE_ENDIAN)
&& (ident[EI_DATA] == ELFDATA2LSB))
|| ((BYTE_ORDER == BIG_ENDIAN)
&& (ident[EI_DATA] == ELFDATA2MSB)), "Current "
"implementation only able to handle\n"
"cases where the host and ELF file endianness\n"
"is the same:\n"
" host BYTE_ORDER: %u\n"
" host LITTLE_ENDIAN: %u\n"
" host BIG_ENDIAN: %u\n"
" ident[EI_DATA]: %u\n"
" ELFDATA2LSB: %u\n"
" ELFDATA2MSB: %u",
BYTE_ORDER, LITTLE_ENDIAN, BIG_ENDIAN,
ident[EI_DATA], ELFDATA2LSB, ELFDATA2MSB);
TEST_ASSERT(ident[EI_VERSION] == EV_CURRENT,
"Current implementation only able to handle current "
"ELF version,\n"
" filename: %s\n"
" ident[EI_VERSION]: %02x\n"
" expected: %02x",
filename, ident[EI_VERSION], EV_CURRENT);
/* Read in the ELF header.
* With the ELF Identification portion of the ELF header
* validated, especially that the value at EI_VERSION is
* as expected, it is now safe to read the entire ELF header.
*/
offset_rv = lseek(fd, 0, SEEK_SET);
TEST_ASSERT(offset_rv == 0, "Seek to ELF header failed,\n"
" rv: %zi expected: %i", offset_rv, 0);
test_read(fd, hdrp, sizeof(*hdrp));
TEST_ASSERT(hdrp->e_phentsize == sizeof(Elf64_Phdr),
"Unexpected physical header size,\n"
" hdrp->e_phentsize: %x\n"
" expected: %zx",
hdrp->e_phentsize, sizeof(Elf64_Phdr));
TEST_ASSERT(hdrp->e_shentsize == sizeof(Elf64_Shdr),
"Unexpected section header size,\n"
" hdrp->e_shentsize: %x\n"
" expected: %zx",
hdrp->e_shentsize, sizeof(Elf64_Shdr));
close(fd);
}
/* VM ELF Load
*
* Input Args:
* filename - Path to ELF file
*
* Output Args: None
*
* Input/Output Args:
* vm - Pointer to opaque type that describes the VM.
*
* Return: None, TEST_ASSERT failures for all error conditions
*
* Loads the program image of the ELF file specified by filename,
* into the virtual address space of the VM pointed to by vm. On entry
* the VM needs to not be using any of the virtual address space used
* by the image and it needs to have sufficient available physical pages, to
* back the virtual pages used to load the image.
*/
void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
{
off_t offset, offset_rv;
Elf64_Ehdr hdr;
/* Open the ELF file. */
int fd;
fd = open(filename, O_RDONLY);
TEST_ASSERT(fd >= 0, "Failed to open ELF file,\n"
" filename: %s\n"
" rv: %i errno: %i", filename, fd, errno);
/* Read in the ELF header. */
elfhdr_get(filename, &hdr);
/* For each program header.
* The following ELF header members specify the location
* and size of the program headers:
*
* e_phoff - File offset to start of program headers
* e_phentsize - Size of each program header
* e_phnum - Number of program header entries
*/
for (unsigned int n1 = 0; n1 < hdr.e_phnum; n1++) {
/* Seek to the beginning of the program header. */
offset = hdr.e_phoff + (n1 * hdr.e_phentsize);
offset_rv = lseek(fd, offset, SEEK_SET);
TEST_ASSERT(offset_rv == offset,
"Failed to seek to beginning of program header %u,\n"
" filename: %s\n"
" rv: %jd errno: %i",
n1, filename, (intmax_t) offset_rv, errno);
/* Read in the program header. */
Elf64_Phdr phdr;
test_read(fd, &phdr, sizeof(phdr));
/* Skip if this header doesn't describe a loadable segment. */
if (phdr.p_type != PT_LOAD)
continue;
/* Allocate memory for this segment within the VM. */
TEST_ASSERT(phdr.p_memsz > 0, "Unexpected loadable segment "
"memsize of 0,\n"
" phdr index: %u p_memsz: 0x%" PRIx64,
n1, (uint64_t) phdr.p_memsz);
vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size);
vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1;
seg_vend |= vm->page_size - 1;
size_t seg_size = seg_vend - seg_vstart + 1;
vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart,
MEM_REGION_CODE);
TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
"virtual memory for segment at requested min addr,\n"
" segment idx: %u\n"
" seg_vstart: 0x%lx\n"
" vaddr: 0x%lx",
n1, seg_vstart, vaddr);
memset(addr_gva2hva(vm, vaddr), 0, seg_size);
/* TODO(lhuemill): Set permissions of each memory segment
* based on the least-significant 3 bits of phdr.p_flags.
*/
/* Load portion of initial state that is contained within
* the ELF file.
*/
if (phdr.p_filesz) {
offset_rv = lseek(fd, phdr.p_offset, SEEK_SET);
TEST_ASSERT(offset_rv == phdr.p_offset,
"Seek to program segment offset failed,\n"
" program header idx: %u errno: %i\n"
" offset_rv: 0x%jx\n"
" expected: 0x%jx\n",
n1, errno, (intmax_t) offset_rv,
(intmax_t) phdr.p_offset);
test_read(fd, addr_gva2hva(vm, phdr.p_vaddr),
phdr.p_filesz);
}
}
close(fd);
}
| linux-master | tools/testing/selftests/kvm/lib/elf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020, Google LLC.
*/
#define _GNU_SOURCE
#include <inttypes.h>
#include <linux/bitmap.h>
#include "kvm_util.h"
#include "memstress.h"
#include "processor.h"
struct memstress_args memstress_args;
/*
* Guest virtual memory offset of the testing memory slot.
* Must not conflict with identity mapped test code.
*/
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
struct vcpu_thread {
/* The index of the vCPU. */
int vcpu_idx;
/* The pthread backing the vCPU. */
pthread_t thread;
/* Set to true once the vCPU thread is up and running. */
bool running;
};
/* The vCPU threads involved in this test. */
static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
/* The function run by each vCPU thread, as provided by the test. */
static void (*vcpu_thread_fn)(struct memstress_vcpu_args *);
/* Set to true once all vCPU threads are up and running. */
static bool all_vcpu_threads_running;
static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
/*
* Continuously write to the first 8 bytes of each page in the
* specified region.
*/
void memstress_guest_code(uint32_t vcpu_idx)
{
struct memstress_args *args = &memstress_args;
struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
struct guest_random_state rand_state;
uint64_t gva;
uint64_t pages;
uint64_t addr;
uint64_t page;
int i;
rand_state = new_guest_random_state(args->random_seed + vcpu_idx);
gva = vcpu_args->gva;
pages = vcpu_args->pages;
/* Make sure vCPU args data structure is not corrupt. */
GUEST_ASSERT(vcpu_args->vcpu_idx == vcpu_idx);
while (true) {
for (i = 0; i < sizeof(memstress_args); i += args->guest_page_size)
(void) *((volatile char *)args + i);
for (i = 0; i < pages; i++) {
if (args->random_access)
page = guest_random_u32(&rand_state) % pages;
else
page = i;
addr = gva + (page * args->guest_page_size);
if (guest_random_u32(&rand_state) % 100 < args->write_percent)
*(uint64_t *)addr = 0x0123456789ABCDEF;
else
READ_ONCE(*(uint64_t *)addr);
}
GUEST_SYNC(1);
}
}
void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
struct kvm_vcpu *vcpus[],
uint64_t vcpu_memory_bytes,
bool partition_vcpu_memory_access)
{
struct memstress_args *args = &memstress_args;
struct memstress_vcpu_args *vcpu_args;
int i;
for (i = 0; i < nr_vcpus; i++) {
vcpu_args = &args->vcpu_args[i];
vcpu_args->vcpu = vcpus[i];
vcpu_args->vcpu_idx = i;
if (partition_vcpu_memory_access) {
vcpu_args->gva = guest_test_virt_mem +
(i * vcpu_memory_bytes);
vcpu_args->pages = vcpu_memory_bytes /
args->guest_page_size;
vcpu_args->gpa = args->gpa + (i * vcpu_memory_bytes);
} else {
vcpu_args->gva = guest_test_virt_mem;
vcpu_args->pages = (nr_vcpus * vcpu_memory_bytes) /
args->guest_page_size;
vcpu_args->gpa = args->gpa;
}
vcpu_args_set(vcpus[i], 1, i);
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
i, vcpu_args->gpa, vcpu_args->gpa +
(vcpu_args->pages * args->guest_page_size));
}
}
struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
uint64_t vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access)
{
struct memstress_args *args = &memstress_args;
struct kvm_vm *vm;
uint64_t guest_num_pages, slot0_pages = 0;
uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
uint64_t region_end_gfn;
int i;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
/* By default vCPUs will write to memory. */
args->write_percent = 100;
/*
* Snapshot the non-huge page size. This is used by the guest code to
* access/dirty pages at the logging granularity.
*/
args->guest_page_size = vm_guest_mode_params[mode].page_size;
guest_num_pages = vm_adjust_num_guest_pages(mode,
(nr_vcpus * vcpu_memory_bytes) / args->guest_page_size);
TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
"Guest memory size is not host page size aligned.");
TEST_ASSERT(vcpu_memory_bytes % args->guest_page_size == 0,
"Guest memory size is not guest page size aligned.");
TEST_ASSERT(guest_num_pages % slots == 0,
"Guest memory cannot be evenly divided into %d slots.",
slots);
/*
* If using nested, allocate extra pages for the nested page tables and
* in-memory data structures.
*/
if (args->nested)
slot0_pages += memstress_nested_pages(nr_vcpus);
/*
* Pass guest_num_pages to populate the page tables for test memory.
* The memory is also added to memslot 0, but that's a benign side
* effect as KVM allows aliasing HVAs in meslots.
*/
vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
memstress_guest_code, vcpus);
args->vm = vm;
/* Put the test region at the top guest physical memory. */
region_end_gfn = vm->max_gfn + 1;
#ifdef __x86_64__
/*
* When running vCPUs in L2, restrict the test region to 48 bits to
* avoid needing 5-level page tables to identity map L2.
*/
if (args->nested)
region_end_gfn = min(region_end_gfn, (1UL << 48) / args->guest_page_size);
#endif
/*
* If there should be more memory in the guest test region than there
* can be pages in the guest, it will definitely cause problems.
*/
TEST_ASSERT(guest_num_pages < region_end_gfn,
"Requested more guest memory than address space allows.\n"
" guest pages: %" PRIx64 " max gfn: %" PRIx64
" nr_vcpus: %d wss: %" PRIx64 "]\n",
guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes);
args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size;
args->gpa = align_down(args->gpa, backing_src_pagesz);
#ifdef __s390x__
/* Align to 1M (segment size) */
args->gpa = align_down(args->gpa, 1 << 20);
#endif
args->size = guest_num_pages * args->guest_page_size;
pr_info("guest physical test memory: [0x%lx, 0x%lx)\n",
args->gpa, args->gpa + args->size);
/* Add extra memory slots for testing */
for (i = 0; i < slots; i++) {
uint64_t region_pages = guest_num_pages / slots;
vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;
vm_userspace_mem_region_add(vm, backing_src, region_start,
MEMSTRESS_MEM_SLOT_INDEX + i,
region_pages, 0);
}
/* Do mapping for the demand paging memory slot */
virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages);
memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
partition_vcpu_memory_access);
if (args->nested) {
pr_info("Configuring vCPUs to run in L2 (nested).\n");
memstress_setup_nested(vm, nr_vcpus, vcpus);
}
/* Export the shared variables to the guest. */
sync_global_to_guest(vm, memstress_args);
return vm;
}
void memstress_destroy_vm(struct kvm_vm *vm)
{
kvm_vm_free(vm);
}
void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
{
memstress_args.write_percent = write_percent;
sync_global_to_guest(vm, memstress_args.write_percent);
}
void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
{
memstress_args.random_seed = random_seed;
sync_global_to_guest(vm, memstress_args.random_seed);
}
void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
{
memstress_args.random_access = random_access;
sync_global_to_guest(vm, memstress_args.random_access);
}
uint64_t __weak memstress_nested_pages(int nr_vcpus)
{
return 0;
}
void __weak memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
{
pr_info("%s() not support on this architecture, skipping.\n", __func__);
exit(KSFT_SKIP);
}
static void *vcpu_thread_main(void *data)
{
struct vcpu_thread *vcpu = data;
int vcpu_idx = vcpu->vcpu_idx;
if (memstress_args.pin_vcpus)
kvm_pin_this_task_to_pcpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);
WRITE_ONCE(vcpu->running, true);
/*
* Wait for all vCPU threads to be up and running before calling the test-
* provided vCPU thread function. This prevents thread creation (which
* requires taking the mmap_sem in write mode) from interfering with the
* guest faulting in its memory.
*/
while (!READ_ONCE(all_vcpu_threads_running))
;
vcpu_thread_fn(&memstress_args.vcpu_args[vcpu_idx]);
return NULL;
}
void memstress_start_vcpu_threads(int nr_vcpus,
void (*vcpu_fn)(struct memstress_vcpu_args *))
{
int i;
vcpu_thread_fn = vcpu_fn;
WRITE_ONCE(all_vcpu_threads_running, false);
WRITE_ONCE(memstress_args.stop_vcpus, false);
for (i = 0; i < nr_vcpus; i++) {
struct vcpu_thread *vcpu = &vcpu_threads[i];
vcpu->vcpu_idx = i;
WRITE_ONCE(vcpu->running, false);
pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
}
for (i = 0; i < nr_vcpus; i++) {
while (!READ_ONCE(vcpu_threads[i].running))
;
}
WRITE_ONCE(all_vcpu_threads_running, true);
}
void memstress_join_vcpu_threads(int nr_vcpus)
{
int i;
WRITE_ONCE(memstress_args.stop_vcpus, true);
for (i = 0; i < nr_vcpus; i++)
pthread_join(vcpu_threads[i].thread, NULL);
}
static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
{
int i;
for (i = 0; i < slots; i++) {
int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
vm_mem_region_set_flags(vm, slot, flags);
}
}
void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots)
{
toggle_dirty_logging(vm, slots, true);
}
void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots)
{
toggle_dirty_logging(vm, slots, false);
}
void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
{
int i;
for (i = 0; i < slots; i++) {
int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
}
}
void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
int slots, uint64_t pages_per_slot)
{
int i;
for (i = 0; i < slots; i++) {
int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
}
}
unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot)
{
unsigned long **bitmaps;
int i;
bitmaps = malloc(slots * sizeof(bitmaps[0]));
TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
for (i = 0; i < slots; i++) {
bitmaps[i] = bitmap_zalloc(pages_per_slot);
TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
}
return bitmaps;
}
void memstress_free_bitmaps(unsigned long *bitmaps[], int slots)
{
int i;
for (i = 0; i < slots; i++)
free(bitmaps[i]);
free(bitmaps);
}
| linux-master | tools/testing/selftests/kvm/lib/memstress.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test handler for the s390x DIAGNOSE 0x0318 instruction.
*
* Copyright (C) 2020, IBM
*/
#include "test_util.h"
#include "kvm_util.h"
#define ICPT_INSTRUCTION 0x04
#define IPA0_DIAG 0x8300
static void guest_code(void)
{
uint64_t diag318_info = 0x12345678;
asm volatile ("diag %0,0,0x318\n" : : "d" (diag318_info));
}
/*
* The DIAGNOSE 0x0318 instruction call must be handled via userspace. As such,
* we create an ad-hoc VM here to handle the instruction then extract the
* necessary data. It is up to the caller to decide what to do with that data.
*/
static uint64_t diag318_handler(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
uint64_t reg;
uint64_t diag318_info;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_run(vcpu);
run = vcpu->run;
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s390_sieic.icptcode == ICPT_INSTRUCTION,
"Unexpected intercept code: 0x%x", run->s390_sieic.icptcode);
TEST_ASSERT((run->s390_sieic.ipa & 0xff00) == IPA0_DIAG,
"Unexpected IPA0 code: 0x%x", (run->s390_sieic.ipa & 0xff00));
reg = (run->s390_sieic.ipa & 0x00f0) >> 4;
diag318_info = run->s.regs.gprs[reg];
TEST_ASSERT(diag318_info != 0, "DIAGNOSE 0x0318 info not set");
kvm_vm_free(vm);
return diag318_info;
}
uint64_t get_diag318_info(void)
{
static uint64_t diag318_info;
static bool printed_skip;
/*
* If KVM does not support diag318, then return 0 to
* ensure tests do not break.
*/
if (!kvm_has_cap(KVM_CAP_S390_DIAG318)) {
if (!printed_skip) {
fprintf(stdout, "KVM_CAP_S390_DIAG318 not supported. "
"Skipping diag318 test.\n");
printed_skip = true;
}
return 0;
}
/*
* If a test has previously requested the diag318 info,
* then don't bother spinning up a temporary VM again.
*/
if (!diag318_info)
diag318_info = diag318_handler();
return diag318_info;
}
| linux-master | tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* KVM selftest s390x library code - CPU-related functions (page tables...)
*
* Copyright (C) 2019, Red Hat, Inc.
*/
#include "processor.h"
#include "kvm_util.h"
#define PAGES_PER_REGION 4
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
vm_paddr_t paddr;
TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
vm->page_size);
if (vm->pgd_created)
return;
paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
vm->pgd = paddr;
vm->pgd_created = true;
}
/*
* Allocate 4 pages for a region/segment table (ri < 4), or one page for
* a page table (ri == 4). Returns a suitable region/segment table entry
* which points to the freshly allocated pages.
*/
static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
{
uint64_t taddr;
taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
return (taddr & REGION_ENTRY_ORIGIN)
| (((4 - ri) << 2) & REGION_ENTRY_TYPE)
| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
}
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
{
int ri, idx;
uint64_t *entry;
TEST_ASSERT((gva % vm->page_size) == 0,
"Virtual address not on page boundary,\n"
" vaddr: 0x%lx vm->page_size: 0x%x",
gva, vm->page_size);
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
(gva >> vm->page_shift)),
"Invalid virtual address, vaddr: 0x%lx",
gva);
TEST_ASSERT((gpa % vm->page_size) == 0,
"Physical address not on page boundary,\n"
" paddr: 0x%lx vm->page_size: 0x%x",
gva, vm->page_size);
TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
gva, vm->max_gfn, vm->page_size);
/* Walk through region and segment tables */
entry = addr_gpa2hva(vm, vm->pgd);
for (ri = 1; ri <= 4; ri++) {
idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
if (entry[idx] & REGION_ENTRY_INVALID)
entry[idx] = virt_alloc_region(vm, ri);
entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
}
/* Fill in page table entry */
idx = (gva >> 12) & 0x0ffu; /* page index */
if (!(entry[idx] & PAGE_INVALID))
fprintf(stderr,
"WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
entry[idx] = gpa;
}
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
int ri, idx;
uint64_t *entry;
TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
vm->page_size);
entry = addr_gpa2hva(vm, vm->pgd);
for (ri = 1; ri <= 4; ri++) {
idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID),
"No region mapping for vm virtual address 0x%lx",
gva);
entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
}
idx = (gva >> 12) & 0x0ffu; /* page index */
TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
"No page mapping for vm virtual address 0x%lx", gva);
return (entry[idx] & ~0xffful) + (gva & 0xffful);
}
static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
uint64_t ptea_start)
{
uint64_t *pte, ptea;
for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
pte = addr_gpa2hva(vm, ptea);
if (*pte & PAGE_INVALID)
continue;
fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n",
indent, "", ptea, *pte);
}
}
static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
uint64_t reg_tab_addr)
{
uint64_t addr, *entry;
for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
entry = addr_gpa2hva(vm, addr);
if (*entry & REGION_ENTRY_INVALID)
continue;
fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n",
indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2),
addr, *entry);
if (*entry & REGION_ENTRY_TYPE) {
virt_dump_region(stream, vm, indent + 2,
*entry & REGION_ENTRY_ORIGIN);
} else {
virt_dump_ptes(stream, vm, indent + 2,
*entry & REGION_ENTRY_ORIGIN);
}
}
}
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
if (!vm->pgd_created)
return;
virt_dump_region(stream, vm, indent, vm->pgd);
}
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
{
size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
uint64_t stack_vaddr;
struct kvm_regs regs;
struct kvm_sregs sregs;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
vm->page_size);
stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
DEFAULT_GUEST_STACK_VADDR_MIN,
MEM_REGION_DATA);
vcpu = __vm_vcpu_add(vm, vcpu_id);
/* Setup guest registers */
vcpu_regs_get(vcpu, ®s);
regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
vcpu_regs_set(vcpu, ®s);
vcpu_sregs_get(vcpu, &sregs);
sregs.crs[0] |= 0x00040000; /* Enable floating point regs */
sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */
vcpu_sregs_set(vcpu, &sregs);
run = vcpu->run;
run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */
run->psw_addr = (uintptr_t)guest_code;
return vcpu;
}
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
struct kvm_regs regs;
int i;
TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
" num: %u\n",
num);
va_start(ap, num);
vcpu_regs_get(vcpu, ®s);
for (i = 0; i < num; i++)
regs.gprs[i + 2] = va_arg(ap, uint64_t);
vcpu_regs_set(vcpu, ®s);
va_end(ap);
}
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
}
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
}
| linux-master | tools/testing/selftests/kvm/lib/s390x/processor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ucall support. A ucall is a "hypercall to userspace".
*
* Copyright (C) 2019 Red Hat, Inc.
*/
#include "kvm_util.h"
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
if (run->exit_reason == KVM_EXIT_S390_SIEIC &&
run->s390_sieic.icptcode == 4 &&
(run->s390_sieic.ipa >> 8) == 0x83 && /* 0x83 means DIAGNOSE */
(run->s390_sieic.ipb >> 16) == 0x501) {
int reg = run->s390_sieic.ipa & 0xf;
return (void *)run->s.regs.gprs[reg];
}
return NULL;
}
| linux-master | tools/testing/selftests/kvm/lib/s390x/ucall.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RISC-V code
*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
*/
#include <linux/compiler.h>
#include <assert.h>
#include "kvm_util.h"
#include "processor.h"
#define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000
static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
{
return (v + vm->page_size) & ~(vm->page_size - 1);
}
static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
{
return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) <<
PGTBL_PAGE_SIZE_SHIFT;
}
static uint64_t ptrs_per_pte(struct kvm_vm *vm)
{
return PGTBL_PAGE_SIZE / sizeof(uint64_t);
}
static uint64_t pte_index_mask[] = {
PGTBL_L0_INDEX_MASK,
PGTBL_L1_INDEX_MASK,
PGTBL_L2_INDEX_MASK,
PGTBL_L3_INDEX_MASK,
};
static uint32_t pte_index_shift[] = {
PGTBL_L0_INDEX_SHIFT,
PGTBL_L1_INDEX_SHIFT,
PGTBL_L2_INDEX_SHIFT,
PGTBL_L3_INDEX_SHIFT,
};
static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
{
TEST_ASSERT(level > -1,
"Negative page table level (%d) not possible", level);
TEST_ASSERT(level < vm->pgtable_levels,
"Invalid page table level (%d)", level);
return (gva & pte_index_mask[level]) >> pte_index_shift[level];
}
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size;
if (vm->pgd_created)
return;
vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
vm->pgd_created = true;
}
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
uint64_t *ptep, next_ppn;
int level = vm->pgtable_levels - 1;
TEST_ASSERT((vaddr % vm->page_size) == 0,
"Virtual address not on page boundary,\n"
" vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
(vaddr >> vm->page_shift)),
"Invalid virtual address, vaddr: 0x%lx", vaddr);
TEST_ASSERT((paddr % vm->page_size) == 0,
"Physical address not on page boundary,\n"
" paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond maximum supported,\n"
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, vaddr, level) * 8;
if (!*ptep) {
next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT;
*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) |
PGTBL_PTE_VALID_MASK;
}
level--;
while (level > -1) {
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
pte_index(vm, vaddr, level) * 8;
if (!*ptep && level > 0) {
next_ppn = vm_alloc_page_table(vm) >>
PGTBL_PAGE_SIZE_SHIFT;
*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) |
PGTBL_PTE_VALID_MASK;
}
level--;
}
paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT;
*ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) |
PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK;
}
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
uint64_t *ptep;
int level = vm->pgtable_levels - 1;
if (!vm->pgd_created)
goto unmapped_gva;
ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8;
if (!ptep)
goto unmapped_gva;
level--;
while (level > -1) {
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
pte_index(vm, gva, level) * 8;
if (!ptep)
goto unmapped_gva;
level--;
}
return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
unmapped_gva:
TEST_FAIL("No mapping for vm virtual address gva: 0x%lx level: %d",
gva, level);
exit(1);
}
static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
uint64_t page, int level)
{
#ifdef DEBUG
static const char *const type[] = { "pte", "pmd", "pud", "p4d"};
uint64_t pte, *ptep;
if (level < 0)
return;
for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
ptep = addr_gpa2hva(vm, pte);
if (!*ptep)
continue;
fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "",
type[level], pte, *ptep, ptep);
pte_dump(stream, vm, indent + 1,
pte_addr(vm, *ptep), level - 1);
}
#endif
}
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
int level = vm->pgtable_levels - 1;
uint64_t pgd, *ptep;
if (!vm->pgd_created)
return;
for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pte(vm) * 8; pgd += 8) {
ptep = addr_gpa2hva(vm, pgd);
if (!*ptep)
continue;
fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "",
pgd, *ptep, ptep);
pte_dump(stream, vm, indent + 1,
pte_addr(vm, *ptep), level - 1);
}
}
void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu)
{
struct kvm_vm *vm = vcpu->vm;
unsigned long satp;
/*
* The RISC-V Sv48 MMU mode supports 56-bit physical address
* for 48-bit virtual address with 4KB last level page size.
*/
switch (vm->mode) {
case VM_MODE_P52V48_4K:
case VM_MODE_P48V48_4K:
case VM_MODE_P40V48_4K:
break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
satp |= SATP_MODE_48;
vcpu_set_reg(vcpu, RISCV_CSR_REG(satp), satp);
}
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
struct kvm_riscv_core core;
vcpu_get_reg(vcpu, RISCV_CORE_REG(mode), &core.mode);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &core.regs.pc);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra), &core.regs.ra);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp), &core.regs.sp);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp), &core.regs.gp);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp), &core.regs.tp);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0), &core.regs.t0);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1), &core.regs.t1);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2), &core.regs.t2);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0), &core.regs.s0);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1), &core.regs.s1);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0), &core.regs.a0);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1), &core.regs.a1);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2), &core.regs.a2);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3), &core.regs.a3);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4), &core.regs.a4);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5), &core.regs.a5);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6), &core.regs.a6);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7), &core.regs.a7);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2), &core.regs.s2);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3), &core.regs.s3);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4), &core.regs.s4);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5), &core.regs.s5);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6), &core.regs.s6);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7), &core.regs.s7);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8), &core.regs.s8);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9), &core.regs.s9);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10), &core.regs.s10);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11), &core.regs.s11);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3), &core.regs.t3);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4), &core.regs.t4);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5), &core.regs.t5);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6), &core.regs.t6);
fprintf(stream,
" MODE: 0x%lx\n", core.mode);
fprintf(stream,
" PC: 0x%016lx RA: 0x%016lx SP: 0x%016lx GP: 0x%016lx\n",
core.regs.pc, core.regs.ra, core.regs.sp, core.regs.gp);
fprintf(stream,
" TP: 0x%016lx T0: 0x%016lx T1: 0x%016lx T2: 0x%016lx\n",
core.regs.tp, core.regs.t0, core.regs.t1, core.regs.t2);
fprintf(stream,
" S0: 0x%016lx S1: 0x%016lx A0: 0x%016lx A1: 0x%016lx\n",
core.regs.s0, core.regs.s1, core.regs.a0, core.regs.a1);
fprintf(stream,
" A2: 0x%016lx A3: 0x%016lx A4: 0x%016lx A5: 0x%016lx\n",
core.regs.a2, core.regs.a3, core.regs.a4, core.regs.a5);
fprintf(stream,
" A6: 0x%016lx A7: 0x%016lx S2: 0x%016lx S3: 0x%016lx\n",
core.regs.a6, core.regs.a7, core.regs.s2, core.regs.s3);
fprintf(stream,
" S4: 0x%016lx S5: 0x%016lx S6: 0x%016lx S7: 0x%016lx\n",
core.regs.s4, core.regs.s5, core.regs.s6, core.regs.s7);
fprintf(stream,
" S8: 0x%016lx S9: 0x%016lx S10: 0x%016lx S11: 0x%016lx\n",
core.regs.s8, core.regs.s9, core.regs.s10, core.regs.s11);
fprintf(stream,
" T3: 0x%016lx T4: 0x%016lx T5: 0x%016lx T6: 0x%016lx\n",
core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
}
static void __aligned(16) guest_unexp_trap(void)
{
sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
KVM_RISCV_SELFTESTS_SBI_UNEXP,
0, 0, 0, 0, 0, 0);
}
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
{
int r;
size_t stack_size;
unsigned long stack_vaddr;
unsigned long current_gp = 0;
struct kvm_mp_state mps;
struct kvm_vcpu *vcpu;
stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
vm->page_size;
stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
DEFAULT_RISCV_GUEST_STACK_VADDR_MIN,
MEM_REGION_DATA);
vcpu = __vm_vcpu_add(vm, vcpu_id);
riscv_vcpu_mmu_setup(vcpu);
/*
* With SBI HSM support in KVM RISC-V, all secondary VCPUs are
* powered-off by default so we ensure that all secondary VCPUs
* are powered-on using KVM_SET_MP_STATE ioctl().
*/
mps.mp_state = KVM_MP_STATE_RUNNABLE;
r = __vcpu_ioctl(vcpu, KVM_SET_MP_STATE, &mps);
TEST_ASSERT(!r, "IOCTL KVM_SET_MP_STATE failed (error %d)", r);
/* Setup global pointer of guest to be same as the host */
asm volatile (
"add %0, gp, zero" : "=r" (current_gp) : : "memory");
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp);
/* Setup stack pointer and program counter of guest */
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size);
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
/* Setup default exception vector of guest */
vcpu_set_reg(vcpu, RISCV_CSR_REG(stvec), (unsigned long)guest_unexp_trap);
return vcpu;
}
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
uint64_t id = RISCV_CORE_REG(regs.a0);
int i;
TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
" num: %u\n", num);
va_start(ap, num);
for (i = 0; i < num; i++) {
switch (i) {
case 0:
id = RISCV_CORE_REG(regs.a0);
break;
case 1:
id = RISCV_CORE_REG(regs.a1);
break;
case 2:
id = RISCV_CORE_REG(regs.a2);
break;
case 3:
id = RISCV_CORE_REG(regs.a3);
break;
case 4:
id = RISCV_CORE_REG(regs.a4);
break;
case 5:
id = RISCV_CORE_REG(regs.a5);
break;
case 6:
id = RISCV_CORE_REG(regs.a6);
break;
case 7:
id = RISCV_CORE_REG(regs.a7);
break;
}
vcpu_set_reg(vcpu, id, va_arg(ap, uint64_t));
}
va_end(ap);
}
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
}
| linux-master | tools/testing/selftests/kvm/lib/riscv/processor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ucall support. A ucall is a "hypercall to userspace".
*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
*/
#include <linux/kvm.h>
#include "kvm_util.h"
#include "processor.h"
struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4,
unsigned long arg5)
{
register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
struct sbiret ret;
asm volatile (
"ecall"
: "+r" (a0), "+r" (a1)
: "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
: "memory");
ret.error = a0;
ret.value = a1;
return ret;
}
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
if (run->exit_reason == KVM_EXIT_RISCV_SBI &&
run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) {
switch (run->riscv_sbi.function_id) {
case KVM_RISCV_SELFTESTS_SBI_UCALL:
return (void *)run->riscv_sbi.args[0];
case KVM_RISCV_SELFTESTS_SBI_UNEXP:
vcpu_dump(stderr, vcpu, 2);
TEST_ASSERT(0, "Unexpected trap taken by guest");
break;
default:
break;
}
}
return NULL;
}
| linux-master | tools/testing/selftests/kvm/lib/riscv/ucall.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ARM Generic Interrupt Controller (GIC) support
*/
#include <errno.h>
#include <linux/bits.h>
#include <linux/sizes.h>
#include "kvm_util.h"
#include <gic.h>
#include "gic_private.h"
#include "processor.h"
#include "spinlock.h"
static const struct gic_common_ops *gic_common_ops;
static struct spinlock gic_lock;
static void gic_cpu_init(unsigned int cpu, void *redist_base)
{
gic_common_ops->gic_cpu_init(cpu, redist_base);
}
static void
gic_dist_init(enum gic_type type, unsigned int nr_cpus, void *dist_base)
{
const struct gic_common_ops *gic_ops = NULL;
spin_lock(&gic_lock);
/* Distributor initialization is needed only once per VM */
if (gic_common_ops) {
spin_unlock(&gic_lock);
return;
}
if (type == GIC_V3)
gic_ops = &gicv3_ops;
GUEST_ASSERT(gic_ops);
gic_ops->gic_init(nr_cpus, dist_base);
gic_common_ops = gic_ops;
/* Make sure that the initialized data is visible to all the vCPUs */
dsb(sy);
spin_unlock(&gic_lock);
}
void gic_init(enum gic_type type, unsigned int nr_cpus,
void *dist_base, void *redist_base)
{
uint32_t cpu = guest_get_vcpuid();
GUEST_ASSERT(type < GIC_TYPE_MAX);
GUEST_ASSERT(dist_base);
GUEST_ASSERT(redist_base);
GUEST_ASSERT(nr_cpus);
gic_dist_init(type, nr_cpus, dist_base);
gic_cpu_init(cpu, redist_base);
}
void gic_irq_enable(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_enable(intid);
}
void gic_irq_disable(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_disable(intid);
}
unsigned int gic_get_and_ack_irq(void)
{
uint64_t irqstat;
unsigned int intid;
GUEST_ASSERT(gic_common_ops);
irqstat = gic_common_ops->gic_read_iar();
intid = irqstat & GENMASK(23, 0);
return intid;
}
void gic_set_eoi(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_write_eoir(intid);
}
void gic_set_dir(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_write_dir(intid);
}
void gic_set_eoi_split(bool split)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_eoi_split(split);
}
void gic_set_priority_mask(uint64_t pmr)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_priority_mask(pmr);
}
void gic_set_priority(unsigned int intid, unsigned int prio)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_priority(intid, prio);
}
void gic_irq_set_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_active(intid);
}
void gic_irq_clear_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_clear_active(intid);
}
bool gic_irq_get_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
return gic_common_ops->gic_irq_get_active(intid);
}
void gic_irq_set_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_pending(intid);
}
void gic_irq_clear_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_clear_pending(intid);
}
bool gic_irq_get_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
return gic_common_ops->gic_irq_get_pending(intid);
}
void gic_irq_set_config(unsigned int intid, bool is_edge)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_config(intid, is_edge);
}
| linux-master | tools/testing/selftests/kvm/lib/aarch64/gic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ARM Generic Interrupt Controller (GIC) v3 host support
*/
#include <linux/kvm.h>
#include <linux/sizes.h>
#include <asm/kvm_para.h>
#include <asm/kvm.h>
#include "kvm_util.h"
#include "vgic.h"
#include "gic.h"
#include "gic_v3.h"
/*
* vGIC-v3 default host setup
*
* Input args:
* vm - KVM VM
* nr_vcpus - Number of vCPUs supported by this VM
* gicd_base_gpa - Guest Physical Address of the Distributor region
* gicr_base_gpa - Guest Physical Address of the Redistributor region
*
* Output args: None
*
* Return: GIC file-descriptor or negative error code upon failure
*
* The function creates a vGIC-v3 device and maps the distributor and
* redistributor regions of the guest. Since it depends on the number of
* vCPUs for the VM, it must be called after all the vCPUs have been created.
*/
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
uint64_t gicd_base_gpa, uint64_t gicr_base_gpa)
{
int gic_fd;
uint64_t redist_attr;
struct list_head *iter;
unsigned int nr_gic_pages, nr_vcpus_created = 0;
TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty\n");
/*
* Make sure that the caller is infact calling this
* function after all the vCPUs are added.
*/
list_for_each(iter, &vm->vcpus)
nr_vcpus_created++;
TEST_ASSERT(nr_vcpus == nr_vcpus_created,
"Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)\n",
nr_vcpus, nr_vcpus_created);
/* Distributor setup */
gic_fd = __kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3);
if (gic_fd < 0)
return gic_fd;
kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0, &nr_irqs);
kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa);
nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE);
virt_map(vm, gicd_base_gpa, gicd_base_gpa, nr_gic_pages);
/* Redistributor setup */
redist_attr = REDIST_REGION_ATTR_ADDR(nr_vcpus, gicr_base_gpa, 0, 0);
kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &redist_attr);
nr_gic_pages = vm_calc_num_guest_pages(vm->mode,
KVM_VGIC_V3_REDIST_SIZE * nr_vcpus);
virt_map(vm, gicr_base_gpa, gicr_base_gpa, nr_gic_pages);
kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
return gic_fd;
}
/* should only work for level sensitive interrupts */
int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
{
uint64_t attr = 32 * (intid / 32);
uint64_t index = intid % 32;
uint64_t val;
int ret;
ret = __kvm_device_attr_get(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
attr, &val);
if (ret != 0)
return ret;
val |= 1U << index;
ret = __kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
attr, &val);
return ret;
}
void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
{
int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, ret));
}
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
{
uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK;
TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself "
"doesn't allow injecting SGIs. There's no mask for it.");
if (INTID_IS_PPI(intid))
irq |= KVM_ARM_IRQ_TYPE_PPI << KVM_ARM_IRQ_TYPE_SHIFT;
else
irq |= KVM_ARM_IRQ_TYPE_SPI << KVM_ARM_IRQ_TYPE_SHIFT;
return _kvm_irq_line(vm, irq, level);
}
void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
{
int ret = _kvm_arm_irq_line(vm, intid, level);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
}
static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu,
uint64_t reg_off)
{
uint64_t reg = intid / 32;
uint64_t index = intid % 32;
uint64_t attr = reg_off + reg * 4;
uint64_t val;
bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid);
uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
: KVM_DEV_ARM_VGIC_GRP_DIST_REGS;
if (intid_is_private) {
/* TODO: only vcpu 0 implemented for now. */
assert(vcpu->id == 0);
attr += SZ_64K;
}
/* Check that the addr part of the attr is within 32 bits. */
assert((attr & ~KVM_DEV_ARM_VGIC_OFFSET_MASK) == 0);
/*
* All calls will succeed, even with invalid intid's, as long as the
* addr part of the attr is within 32 bits (checked above). An invalid
* intid will just make the read/writes point to above the intended
* register space (i.e., ICPENDR after ISPENDR).
*/
kvm_device_attr_get(gic_fd, group, attr, &val);
val |= 1ULL << index;
kvm_device_attr_set(gic_fd, group, attr, &val);
}
void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR);
}
void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER);
}
| linux-master | tools/testing/selftests/kvm/lib/aarch64/vgic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ARM64 Spinlock support
*/
#include <stdint.h>
#include "spinlock.h"
void spin_lock(struct spinlock *lock)
{
int val, res;
asm volatile(
"1: ldaxr %w0, [%2]\n"
" cbnz %w0, 1b\n"
" mov %w0, #1\n"
" stxr %w1, %w0, [%2]\n"
" cbnz %w1, 1b\n"
: "=&r" (val), "=&r" (res)
: "r" (&lock->v)
: "memory");
}
void spin_unlock(struct spinlock *lock)
{
asm volatile("stlr wzr, [%0]\n" : : "r" (&lock->v) : "memory");
}
| linux-master | tools/testing/selftests/kvm/lib/aarch64/spinlock.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ARM Generic Interrupt Controller (GIC) v3 support
*/
#include <linux/sizes.h>
#include "kvm_util.h"
#include "processor.h"
#include "delay.h"
#include "gic_v3.h"
#include "gic_private.h"
struct gicv3_data {
void *dist_base;
void *redist_base[GICV3_MAX_CPUS];
unsigned int nr_cpus;
unsigned int nr_spis;
};
#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K)
#define DIST_BIT (1U << 31)
enum gicv3_intid_range {
SGI_RANGE,
PPI_RANGE,
SPI_RANGE,
INVALID_RANGE,
};
static struct gicv3_data gicv3_data;
static void gicv3_gicd_wait_for_rwp(void)
{
unsigned int count = 100000; /* 1s */
while (readl(gicv3_data.dist_base + GICD_CTLR) & GICD_CTLR_RWP) {
GUEST_ASSERT(count--);
udelay(10);
}
}
static void gicv3_gicr_wait_for_rwp(void *redist_base)
{
unsigned int count = 100000; /* 1s */
while (readl(redist_base + GICR_CTLR) & GICR_CTLR_RWP) {
GUEST_ASSERT(count--);
udelay(10);
}
}
static void gicv3_wait_for_rwp(uint32_t cpu_or_dist)
{
if (cpu_or_dist & DIST_BIT)
gicv3_gicd_wait_for_rwp();
else
gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu_or_dist]);
}
static enum gicv3_intid_range get_intid_range(unsigned int intid)
{
switch (intid) {
case 0 ... 15:
return SGI_RANGE;
case 16 ... 31:
return PPI_RANGE;
case 32 ... 1019:
return SPI_RANGE;
}
/* We should not be reaching here */
GUEST_ASSERT(0);
return INVALID_RANGE;
}
static uint64_t gicv3_read_iar(void)
{
uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
dsb(sy);
return irqstat;
}
static void gicv3_write_eoir(uint32_t irq)
{
write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
isb();
}
static void gicv3_write_dir(uint32_t irq)
{
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
isb();
}
static void gicv3_set_priority_mask(uint64_t mask)
{
write_sysreg_s(mask, SYS_ICC_PMR_EL1);
}
static void gicv3_set_eoi_split(bool split)
{
uint32_t val;
/*
* All other fields are read-only, so no need to read CTLR first. In
* fact, the kernel does the same.
*/
val = split ? (1U << 1) : 0;
write_sysreg_s(val, SYS_ICC_CTLR_EL1);
isb();
}
uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset)
{
void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
return readl(base + offset);
}
void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val)
{
void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
writel(reg_val, base + offset);
}
uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask)
{
return gicv3_reg_readl(cpu_or_dist, offset) & mask;
}
void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
uint32_t mask, uint32_t reg_val)
{
uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
tmp |= (reg_val & mask);
gicv3_reg_writel(cpu_or_dist, offset, tmp);
}
/*
* We use a single offset for the distributor and redistributor maps as they
* have the same value in both. The only exceptions are registers that only
* exist in one and not the other, like GICR_WAKER that doesn't exist in the
* distributor map. Such registers are conveniently marked as reserved in the
* map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
* marked as "Reserved" in the Distributor map.
*/
static void gicv3_access_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field,
bool write, uint32_t *val)
{
uint32_t cpu = guest_get_vcpuid();
enum gicv3_intid_range intid_range = get_intid_range(intid);
uint32_t fields_per_reg, index, mask, shift;
uint32_t cpu_or_dist;
GUEST_ASSERT(bits_per_field <= reg_bits);
GUEST_ASSERT(!write || *val < (1U << bits_per_field));
/*
* This function does not support 64 bit accesses. Just asserting here
* until we implement readq/writeq.
*/
GUEST_ASSERT(reg_bits == 32);
fields_per_reg = reg_bits / bits_per_field;
index = intid % fields_per_reg;
shift = index * bits_per_field;
mask = ((1U << bits_per_field) - 1) << shift;
/* Set offset to the actual register holding intid's config. */
offset += (intid / fields_per_reg) * (reg_bits / 8);
cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu;
if (write)
gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift);
*val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
}
static void gicv3_write_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field, uint32_t val)
{
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, true, &val);
}
static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field)
{
uint32_t val;
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, false, &val);
return val;
}
static void gicv3_set_priority(uint32_t intid, uint32_t prio)
{
gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
}
/* Sets the intid to be level-sensitive or edge-triggered. */
static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
{
uint32_t val;
/* N/A for private interrupts. */
GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
val = is_edge ? 2 : 0;
gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
}
static void gicv3_irq_enable(uint32_t intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
uint32_t cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
static void gicv3_irq_disable(uint32_t intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
uint32_t cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
static void gicv3_irq_set_active(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
}
static void gicv3_irq_clear_active(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
}
static bool gicv3_irq_get_active(uint32_t intid)
{
return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
}
static void gicv3_irq_set_pending(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
}
static void gicv3_irq_clear_pending(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
}
static bool gicv3_irq_get_pending(uint32_t intid)
{
return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
}
static void gicv3_enable_redist(void *redist_base)
{
uint32_t val = readl(redist_base + GICR_WAKER);
unsigned int count = 100000; /* 1s */
val &= ~GICR_WAKER_ProcessorSleep;
writel(val, redist_base + GICR_WAKER);
/* Wait until the processor is 'active' */
while (readl(redist_base + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
GUEST_ASSERT(count--);
udelay(10);
}
}
static inline void *gicr_base_cpu(void *redist_base, uint32_t cpu)
{
/* Align all the redistributors sequentially */
return redist_base + cpu * SZ_64K * 2;
}
static void gicv3_cpu_init(unsigned int cpu, void *redist_base)
{
void *sgi_base;
unsigned int i;
void *redist_base_cpu;
GUEST_ASSERT(cpu < gicv3_data.nr_cpus);
redist_base_cpu = gicr_base_cpu(redist_base, cpu);
sgi_base = sgi_base_from_redist(redist_base_cpu);
gicv3_enable_redist(redist_base_cpu);
/*
* Mark all the SGI and PPI interrupts as non-secure Group-1.
* Also, deactivate and disable them.
*/
writel(~0, sgi_base + GICR_IGROUPR0);
writel(~0, sgi_base + GICR_ICACTIVER0);
writel(~0, sgi_base + GICR_ICENABLER0);
/* Set a default priority for all the SGIs and PPIs */
for (i = 0; i < 32; i += 4)
writel(GICD_INT_DEF_PRI_X4,
sgi_base + GICR_IPRIORITYR0 + i);
gicv3_gicr_wait_for_rwp(redist_base_cpu);
/* Enable the GIC system register (ICC_*) access */
write_sysreg_s(read_sysreg_s(SYS_ICC_SRE_EL1) | ICC_SRE_EL1_SRE,
SYS_ICC_SRE_EL1);
/* Set a default priority threshold */
write_sysreg_s(ICC_PMR_DEF_PRIO, SYS_ICC_PMR_EL1);
/* Enable non-secure Group-1 interrupts */
write_sysreg_s(ICC_IGRPEN1_EL1_ENABLE, SYS_ICC_GRPEN1_EL1);
gicv3_data.redist_base[cpu] = redist_base_cpu;
}
static void gicv3_dist_init(void)
{
void *dist_base = gicv3_data.dist_base;
unsigned int i;
/* Disable the distributor until we set things up */
writel(0, dist_base + GICD_CTLR);
gicv3_gicd_wait_for_rwp();
/*
* Mark all the SPI interrupts as non-secure Group-1.
* Also, deactivate and disable them.
*/
for (i = 32; i < gicv3_data.nr_spis; i += 32) {
writel(~0, dist_base + GICD_IGROUPR + i / 8);
writel(~0, dist_base + GICD_ICACTIVER + i / 8);
writel(~0, dist_base + GICD_ICENABLER + i / 8);
}
/* Set a default priority for all the SPIs */
for (i = 32; i < gicv3_data.nr_spis; i += 4)
writel(GICD_INT_DEF_PRI_X4,
dist_base + GICD_IPRIORITYR + i);
/* Wait for the settings to sync-in */
gicv3_gicd_wait_for_rwp();
/* Finally, enable the distributor globally with ARE */
writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A |
GICD_CTLR_ENABLE_G1, dist_base + GICD_CTLR);
gicv3_gicd_wait_for_rwp();
}
static void gicv3_init(unsigned int nr_cpus, void *dist_base)
{
GUEST_ASSERT(nr_cpus <= GICV3_MAX_CPUS);
gicv3_data.nr_cpus = nr_cpus;
gicv3_data.dist_base = dist_base;
gicv3_data.nr_spis = GICD_TYPER_SPIS(
readl(gicv3_data.dist_base + GICD_TYPER));
if (gicv3_data.nr_spis > 1020)
gicv3_data.nr_spis = 1020;
/*
* Initialize only the distributor for now.
* The redistributor and CPU interfaces are initialized
* later for every PE.
*/
gicv3_dist_init();
}
const struct gic_common_ops gicv3_ops = {
.gic_init = gicv3_init,
.gic_cpu_init = gicv3_cpu_init,
.gic_irq_enable = gicv3_irq_enable,
.gic_irq_disable = gicv3_irq_disable,
.gic_read_iar = gicv3_read_iar,
.gic_write_eoir = gicv3_write_eoir,
.gic_write_dir = gicv3_write_dir,
.gic_set_priority_mask = gicv3_set_priority_mask,
.gic_set_eoi_split = gicv3_set_eoi_split,
.gic_set_priority = gicv3_set_priority,
.gic_irq_set_active = gicv3_irq_set_active,
.gic_irq_clear_active = gicv3_irq_clear_active,
.gic_irq_get_active = gicv3_irq_get_active,
.gic_irq_set_pending = gicv3_irq_set_pending,
.gic_irq_clear_pending = gicv3_irq_clear_pending,
.gic_irq_get_pending = gicv3_irq_get_pending,
.gic_irq_set_config = gicv3_irq_set_config,
};
| linux-master | tools/testing/selftests/kvm/lib/aarch64/gic_v3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* AArch64 code
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#include <linux/compiler.h>
#include <assert.h>
#include "guest_modes.h"
#include "kvm_util.h"
#include "processor.h"
#include <linux/bitfield.h>
#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
static vm_vaddr_t exception_handlers;
static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
{
return (v + vm->page_size) & ~(vm->page_size - 1);
}
static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
{
unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
return (gva >> shift) & mask;
}
static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
{
unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
TEST_ASSERT(vm->pgtable_levels == 4,
"Mode %d does not have 4 page table levels", vm->mode);
return (gva >> shift) & mask;
}
static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
{
unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
TEST_ASSERT(vm->pgtable_levels >= 3,
"Mode %d does not have >= 3 page table levels", vm->mode);
return (gva >> shift) & mask;
}
static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
{
uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
return (gva >> vm->page_shift) & mask;
}
static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
{
uint64_t pte;
pte = pa & GENMASK(47, vm->page_shift);
if (vm->page_shift == 16)
pte |= FIELD_GET(GENMASK(51, 48), pa) << 12;
pte |= attrs;
return pte;
}
static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
{
uint64_t pa;
pa = pte & GENMASK(47, vm->page_shift);
if (vm->page_shift == 16)
pa |= FIELD_GET(GENMASK(15, 12), pte) << 48;
return pa;
}
static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
{
unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
return 1 << (vm->va_bits - shift);
}
static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
{
return 1 << (vm->page_shift - 3);
}
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
size_t nr_pages = page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size;
if (vm->pgd_created)
return;
vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
vm->pgd_created = true;
}
static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
uint64_t flags)
{
uint8_t attr_idx = flags & 7;
uint64_t *ptep;
TEST_ASSERT((vaddr % vm->page_size) == 0,
"Virtual address not on page boundary,\n"
" vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
(vaddr >> vm->page_shift)),
"Invalid virtual address, vaddr: 0x%lx", vaddr);
TEST_ASSERT((paddr % vm->page_size) == 0,
"Physical address not on page boundary,\n"
" paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
if (!*ptep)
*ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
switch (vm->pgtable_levels) {
case 4:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
if (!*ptep)
*ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
/* fall through */
case 3:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
if (!*ptep)
*ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
/* fall through */
case 2:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
break;
default:
TEST_FAIL("Page table levels must be 2, 3, or 4");
}
*ptep = addr_pte(vm, paddr, (attr_idx << 2) | (1 << 10) | 3); /* AF */
}
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
uint64_t attr_idx = MT_NORMAL;
_virt_pg_map(vm, vaddr, paddr, attr_idx);
}
uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
{
uint64_t *ptep;
if (!vm->pgd_created)
goto unmapped_gva;
ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
if (!ptep)
goto unmapped_gva;
switch (vm->pgtable_levels) {
case 4:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
if (!ptep)
goto unmapped_gva;
/* fall through */
case 3:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
if (!ptep)
goto unmapped_gva;
/* fall through */
case 2:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
if (!ptep)
goto unmapped_gva;
break;
default:
TEST_FAIL("Page table levels must be 2, 3, or 4");
}
return ptep;
unmapped_gva:
TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
exit(EXIT_FAILURE);
}
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
uint64_t *ptep = virt_get_pte_hva(vm, gva);
return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
}
static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
{
#ifdef DEBUG
static const char * const type[] = { "", "pud", "pmd", "pte" };
uint64_t pte, *ptep;
if (level == 4)
return;
for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
ptep = addr_gpa2hva(vm, pte);
if (!*ptep)
continue;
fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
}
#endif
}
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
int level = 4 - (vm->pgtable_levels - 1);
uint64_t pgd, *ptep;
if (!vm->pgd_created)
return;
for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
ptep = addr_gpa2hva(vm, pgd);
if (!*ptep)
continue;
fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
}
}
void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
{
struct kvm_vcpu_init default_init = { .target = -1, };
struct kvm_vm *vm = vcpu->vm;
uint64_t sctlr_el1, tcr_el1, ttbr0_el1;
if (!init)
init = &default_init;
if (init->target == -1) {
struct kvm_vcpu_init preferred;
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
init->target = preferred.target;
}
vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
/*
* Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
* registers, which the variable argument list macros do.
*/
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
/* Configure base granule size */
switch (vm->mode) {
case VM_MODE_P52V48_4K:
TEST_FAIL("AArch64 does not support 4K sized pages "
"with 52-bit physical address ranges");
case VM_MODE_PXXV48_4K:
TEST_FAIL("AArch64 does not support 4K sized pages "
"with ANY-bit physical address ranges");
case VM_MODE_P52V48_64K:
case VM_MODE_P48V48_64K:
case VM_MODE_P40V48_64K:
case VM_MODE_P36V48_64K:
tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
break;
case VM_MODE_P48V48_16K:
case VM_MODE_P40V48_16K:
case VM_MODE_P36V48_16K:
case VM_MODE_P36V47_16K:
tcr_el1 |= 2ul << 14; /* TG0 = 16KB */
break;
case VM_MODE_P48V48_4K:
case VM_MODE_P40V48_4K:
case VM_MODE_P36V48_4K:
tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
ttbr0_el1 = vm->pgd & GENMASK(47, vm->page_shift);
/* Configure output size */
switch (vm->mode) {
case VM_MODE_P52V48_64K:
tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->pgd) << 2;
break;
case VM_MODE_P48V48_4K:
case VM_MODE_P48V48_16K:
case VM_MODE_P48V48_64K:
tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
break;
case VM_MODE_P40V48_4K:
case VM_MODE_P40V48_16K:
case VM_MODE_P40V48_64K:
tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
break;
case VM_MODE_P36V48_4K:
case VM_MODE_P36V48_16K:
case VM_MODE_P36V48_64K:
case VM_MODE_P36V47_16K:
tcr_el1 |= 1ul << 32; /* IPS = 36 bits */
break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
/* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), ttbr0_el1);
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
}
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
uint64_t pstate, pc;
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate);
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
indent, "", pstate, pc);
}
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_vcpu_init *init, void *guest_code)
{
size_t stack_size;
uint64_t stack_vaddr;
struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
vm->page_size;
stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
MEM_REGION_DATA);
aarch64_vcpu_setup(vcpu, init);
vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
return vcpu;
}
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
{
return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
}
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
int i;
TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
" num: %u\n", num);
va_start(ap, num);
for (i = 0; i < num; i++) {
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
va_arg(ap, uint64_t));
}
va_end(ap);
}
void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
{
ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
while (1)
;
}
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
struct ucall uc;
if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
return;
if (uc.args[2]) /* valid_ec */ {
assert(VECTOR_IS_SYNC(uc.args[0]));
TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
uc.args[0], uc.args[1]);
} else {
assert(!VECTOR_IS_SYNC(uc.args[0]));
TEST_FAIL("Unexpected exception (vector:0x%lx)",
uc.args[0]);
}
}
struct handlers {
handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
};
void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
{
extern char vectors;
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
}
void route_exception(struct ex_regs *regs, int vector)
{
struct handlers *handlers = (struct handlers *)exception_handlers;
bool valid_ec;
int ec = 0;
switch (vector) {
case VECTOR_SYNC_CURRENT:
case VECTOR_SYNC_LOWER_64:
ec = (read_sysreg(esr_el1) >> ESR_EC_SHIFT) & ESR_EC_MASK;
valid_ec = true;
break;
case VECTOR_IRQ_CURRENT:
case VECTOR_IRQ_LOWER_64:
case VECTOR_FIQ_CURRENT:
case VECTOR_FIQ_LOWER_64:
case VECTOR_ERROR_CURRENT:
case VECTOR_ERROR_LOWER_64:
ec = 0;
valid_ec = false;
break;
default:
valid_ec = false;
goto unexpected_exception;
}
if (handlers && handlers->exception_handlers[vector][ec])
return handlers->exception_handlers[vector][ec](regs);
unexpected_exception:
kvm_exit_unexpected_exception(vector, ec, valid_ec);
}
void vm_init_descriptor_tables(struct kvm_vm *vm)
{
vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
vm->page_size, MEM_REGION_DATA);
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
}
void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
void (*handler)(struct ex_regs *))
{
struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
assert(VECTOR_IS_SYNC(vector));
assert(vector < VECTOR_NUM);
assert(ec < ESR_EC_NUM);
handlers->exception_handlers[vector][ec] = handler;
}
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *))
{
struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
assert(!VECTOR_IS_SYNC(vector));
assert(vector < VECTOR_NUM);
handlers->exception_handlers[vector][0] = handler;
}
uint32_t guest_get_vcpuid(void)
{
return read_sysreg(tpidr_el1);
}
void aarch64_get_supported_page_sizes(uint32_t ipa,
bool *ps4k, bool *ps16k, bool *ps64k)
{
struct kvm_vcpu_init preferred_init;
int kvm_fd, vm_fd, vcpu_fd, err;
uint64_t val;
struct kvm_one_reg reg = {
.id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
.addr = (uint64_t)&val,
};
kvm_fd = open_kvm_dev_path_or_exit();
vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, (void *)(unsigned long)ipa);
TEST_ASSERT(vm_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm_fd));
vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
TEST_ASSERT(vcpu_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu_fd));
err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_PREFERRED_TARGET, err));
err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init);
TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_VCPU_INIT, err));
err = ioctl(vcpu_fd, KVM_GET_ONE_REG, ®);
TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
*ps4k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN4), val) != 0xf;
*ps64k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN64), val) == 0;
*ps16k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN16), val) != 0;
close(vcpu_fd);
close(vm_fd);
close(kvm_fd);
}
#define __smccc_call(insn, function_id, arg0, arg1, arg2, arg3, arg4, arg5, \
arg6, res) \
asm volatile("mov w0, %w[function_id]\n" \
"mov x1, %[arg0]\n" \
"mov x2, %[arg1]\n" \
"mov x3, %[arg2]\n" \
"mov x4, %[arg3]\n" \
"mov x5, %[arg4]\n" \
"mov x6, %[arg5]\n" \
"mov x7, %[arg6]\n" \
#insn "#0\n" \
"mov %[res0], x0\n" \
"mov %[res1], x1\n" \
"mov %[res2], x2\n" \
"mov %[res3], x3\n" \
: [res0] "=r"(res->a0), [res1] "=r"(res->a1), \
[res2] "=r"(res->a2), [res3] "=r"(res->a3) \
: [function_id] "r"(function_id), [arg0] "r"(arg0), \
[arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3), \
[arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6) \
: "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7")
void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
uint64_t arg6, struct arm_smccc_res *res)
{
__smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
arg6, res);
}
void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
uint64_t arg6, struct arm_smccc_res *res)
{
__smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
arg6, res);
}
void kvm_selftest_arch_init(void)
{
/*
* arm64 doesn't have a true default mode, so start by computing the
* available IPA space and page sizes early.
*/
guest_modes_append_default();
}
void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
{
/*
* arm64 selftests use only TTBR0_EL1, meaning that the valid VA space
* is [0, 2^(64 - TCR_EL1.T0SZ)).
*/
sparsebit_set_num(vm->vpages_valid, 0,
(1ULL << vm->va_bits) >> vm->page_shift);
}
| linux-master | tools/testing/selftests/kvm/lib/aarch64/processor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ucall support. A ucall is a "hypercall to userspace".
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#include "kvm_util.h"
vm_vaddr_t *ucall_exit_mmio_addr;
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
virt_map(vm, mmio_gva, mmio_gpa, 1);
vm->ucall_mmio_addr = mmio_gpa;
write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva);
}
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
if (run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) {
TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t),
"Unexpected ucall exit mmio address access");
return (void *)(*((uint64_t *)run->mmio.data));
}
return NULL;
}
| linux-master | tools/testing/selftests/kvm/lib/aarch64/ucall.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hyper-V specific functions.
*
* Copyright (C) 2021, Red Hat Inc.
*/
#include <stdint.h>
#include "processor.h"
#include "hyperv.h"
struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
vm_vaddr_t *p_hv_pages_gva)
{
vm_vaddr_t hv_pages_gva = vm_vaddr_alloc_page(vm);
struct hyperv_test_pages *hv = addr_gva2hva(vm, hv_pages_gva);
/* Setup of a region of guest memory for the VP Assist page. */
hv->vp_assist = (void *)vm_vaddr_alloc_page(vm);
hv->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->vp_assist);
hv->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->vp_assist);
/* Setup of a region of guest memory for the partition assist page. */
hv->partition_assist = (void *)vm_vaddr_alloc_page(vm);
hv->partition_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->partition_assist);
hv->partition_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->partition_assist);
/* Setup of a region of guest memory for the enlightened VMCS. */
hv->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm);
hv->enlightened_vmcs_hva = addr_gva2hva(vm, (uintptr_t)hv->enlightened_vmcs);
hv->enlightened_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)hv->enlightened_vmcs);
*p_hv_pages_gva = hv_pages_gva;
return hv;
}
int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
{
uint64_t val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val);
current_vp_assist = vp_assist;
return 0;
}
| linux-master | tools/testing/selftests/kvm/lib/x86_64/hyperv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/x86_64/vmx.c
*
* Copyright (C) 2018, Google LLC.
*/
#include <asm/msr-index.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
#define PAGE_SHIFT_4K 12
#define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000
bool enable_evmcs;
struct hv_enlightened_vmcs *current_evmcs;
struct hv_vp_assist_page *current_vp_assist;
struct eptPageTableEntry {
uint64_t readable:1;
uint64_t writable:1;
uint64_t executable:1;
uint64_t memory_type:3;
uint64_t ignore_pat:1;
uint64_t page_size:1;
uint64_t accessed:1;
uint64_t dirty:1;
uint64_t ignored_11_10:2;
uint64_t address:40;
uint64_t ignored_62_52:11;
uint64_t suppress_ve:1;
};
struct eptPageTablePointer {
uint64_t memory_type:3;
uint64_t page_walk_length:3;
uint64_t ad_enabled:1;
uint64_t reserved_11_07:5;
uint64_t address:40;
uint64_t reserved_63_52:12;
};
int vcpu_enable_evmcs(struct kvm_vcpu *vcpu)
{
uint16_t evmcs_ver;
vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
(unsigned long)&evmcs_ver);
/* KVM should return supported EVMCS version range */
TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
(evmcs_ver & 0xff) > 0,
"Incorrect EVMCS version range: %x:%x\n",
evmcs_ver & 0xff, evmcs_ver >> 8);
return evmcs_ver;
}
/* Allocate memory regions for nested VMX tests.
*
* Input Args:
* vm - The VM to allocate guest-virtual addresses in.
*
* Output Args:
* p_vmx_gva - The guest virtual address for the struct vmx_pages.
*
* Return:
* Pointer to structure with the addresses of the VMX areas.
*/
struct vmx_pages *
vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
{
vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm);
struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
/* Setup of a region of guest memory for the vmxon region. */
vmx->vmxon = (void *)vm_vaddr_alloc_page(vm);
vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
/* Setup of a region of guest memory for a vmcs. */
vmx->vmcs = (void *)vm_vaddr_alloc_page(vm);
vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs);
/* Setup of a region of guest memory for the MSR bitmap. */
vmx->msr = (void *)vm_vaddr_alloc_page(vm);
vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr);
vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr);
memset(vmx->msr_hva, 0, getpagesize());
/* Setup of a region of guest memory for the shadow VMCS. */
vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm);
vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs);
vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs);
/* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */
vmx->vmread = (void *)vm_vaddr_alloc_page(vm);
vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread);
vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread);
memset(vmx->vmread_hva, 0, getpagesize());
vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm);
vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite);
vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
memset(vmx->vmwrite_hva, 0, getpagesize());
*p_vmx_gva = vmx_gva;
return vmx;
}
bool prepare_for_vmx_operation(struct vmx_pages *vmx)
{
uint64_t feature_control;
uint64_t required;
unsigned long cr0;
unsigned long cr4;
/*
* Ensure bits in CR0 and CR4 are valid in VMX operation:
* - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx.
* - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx.
*/
__asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory");
cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1);
cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0);
__asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory");
__asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory");
cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1);
cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0);
/* Enable VMX operation */
cr4 |= X86_CR4_VMXE;
__asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory");
/*
* Configure IA32_FEATURE_CONTROL MSR to allow VMXON:
* Bit 0: Lock bit. If clear, VMXON causes a #GP.
* Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON
* outside of SMX causes a #GP.
*/
required = FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
required |= FEAT_CTL_LOCKED;
feature_control = rdmsr(MSR_IA32_FEAT_CTL);
if ((feature_control & required) != required)
wrmsr(MSR_IA32_FEAT_CTL, feature_control | required);
/* Enter VMX root operation. */
*(uint32_t *)(vmx->vmxon) = vmcs_revision();
if (vmxon(vmx->vmxon_gpa))
return false;
return true;
}
bool load_vmcs(struct vmx_pages *vmx)
{
/* Load a VMCS. */
*(uint32_t *)(vmx->vmcs) = vmcs_revision();
if (vmclear(vmx->vmcs_gpa))
return false;
if (vmptrld(vmx->vmcs_gpa))
return false;
/* Setup shadow VMCS, do not load it yet. */
*(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul;
if (vmclear(vmx->shadow_vmcs_gpa))
return false;
return true;
}
static bool ept_vpid_cap_supported(uint64_t mask)
{
return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask;
}
bool ept_1g_pages_supported(void)
{
return ept_vpid_cap_supported(VMX_EPT_VPID_CAP_1G_PAGES);
}
/*
* Initialize the control fields to the most basic settings possible.
*/
static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
{
uint32_t sec_exec_ctl = 0;
vmwrite(VIRTUAL_PROCESSOR_ID, 0);
vmwrite(POSTED_INTR_NV, 0);
vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS));
if (vmx->eptp_gpa) {
uint64_t ept_paddr;
struct eptPageTablePointer eptp = {
.memory_type = VMX_BASIC_MEM_TYPE_WB,
.page_walk_length = 3, /* + 1 */
.ad_enabled = ept_vpid_cap_supported(VMX_EPT_VPID_CAP_AD_BITS),
.address = vmx->eptp_gpa >> PAGE_SHIFT_4K,
};
memcpy(&ept_paddr, &eptp, sizeof(ept_paddr));
vmwrite(EPT_POINTER, ept_paddr);
sec_exec_ctl |= SECONDARY_EXEC_ENABLE_EPT;
}
if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, sec_exec_ctl))
vmwrite(CPU_BASED_VM_EXEC_CONTROL,
rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
else {
vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS));
GUEST_ASSERT(!sec_exec_ctl);
}
vmwrite(EXCEPTION_BITMAP, 0);
vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */
vmwrite(CR3_TARGET_COUNT, 0);
vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) |
VM_EXIT_HOST_ADDR_SPACE_SIZE); /* 64-bit host */
vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) |
VM_ENTRY_IA32E_MODE); /* 64-bit guest */
vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
vmwrite(TPR_THRESHOLD, 0);
vmwrite(CR0_GUEST_HOST_MASK, 0);
vmwrite(CR4_GUEST_HOST_MASK, 0);
vmwrite(CR0_READ_SHADOW, get_cr0());
vmwrite(CR4_READ_SHADOW, get_cr4());
vmwrite(MSR_BITMAP, vmx->msr_gpa);
vmwrite(VMREAD_BITMAP, vmx->vmread_gpa);
vmwrite(VMWRITE_BITMAP, vmx->vmwrite_gpa);
}
/*
* Initialize the host state fields based on the current host state, with
* the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch
* or vmresume.
*/
static inline void init_vmcs_host_state(void)
{
uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS);
vmwrite(HOST_ES_SELECTOR, get_es());
vmwrite(HOST_CS_SELECTOR, get_cs());
vmwrite(HOST_SS_SELECTOR, get_ss());
vmwrite(HOST_DS_SELECTOR, get_ds());
vmwrite(HOST_FS_SELECTOR, get_fs());
vmwrite(HOST_GS_SELECTOR, get_gs());
vmwrite(HOST_TR_SELECTOR, get_tr());
if (exit_controls & VM_EXIT_LOAD_IA32_PAT)
vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT));
if (exit_controls & VM_EXIT_LOAD_IA32_EFER)
vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER));
if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
vmwrite(HOST_IA32_PERF_GLOBAL_CTRL,
rdmsr(MSR_CORE_PERF_GLOBAL_CTRL));
vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS));
vmwrite(HOST_CR0, get_cr0());
vmwrite(HOST_CR3, get_cr3());
vmwrite(HOST_CR4, get_cr4());
vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE));
vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE));
vmwrite(HOST_TR_BASE,
get_desc64_base((struct desc64 *)(get_gdt().address + get_tr())));
vmwrite(HOST_GDTR_BASE, get_gdt().address);
vmwrite(HOST_IDTR_BASE, get_idt().address);
vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP));
vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP));
}
/*
* Initialize the guest state fields essentially as a clone of
* the host state fields. Some host state fields have fixed
* values, and we set the corresponding guest state fields accordingly.
*/
static inline void init_vmcs_guest_state(void *rip, void *rsp)
{
vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR));
vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR));
vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR));
vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR));
vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR));
vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR));
vmwrite(GUEST_LDTR_SELECTOR, 0);
vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR));
vmwrite(GUEST_INTR_STATUS, 0);
vmwrite(GUEST_PML_INDEX, 0);
vmwrite(VMCS_LINK_POINTER, -1ll);
vmwrite(GUEST_IA32_DEBUGCTL, 0);
vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT));
vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER));
vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL,
vmreadz(HOST_IA32_PERF_GLOBAL_CTRL));
vmwrite(GUEST_ES_LIMIT, -1);
vmwrite(GUEST_CS_LIMIT, -1);
vmwrite(GUEST_SS_LIMIT, -1);
vmwrite(GUEST_DS_LIMIT, -1);
vmwrite(GUEST_FS_LIMIT, -1);
vmwrite(GUEST_GS_LIMIT, -1);
vmwrite(GUEST_LDTR_LIMIT, -1);
vmwrite(GUEST_TR_LIMIT, 0x67);
vmwrite(GUEST_GDTR_LIMIT, 0xffff);
vmwrite(GUEST_IDTR_LIMIT, 0xffff);
vmwrite(GUEST_ES_AR_BYTES,
vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093);
vmwrite(GUEST_CS_AR_BYTES, 0xa09b);
vmwrite(GUEST_SS_AR_BYTES, 0xc093);
vmwrite(GUEST_DS_AR_BYTES,
vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093);
vmwrite(GUEST_FS_AR_BYTES,
vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093);
vmwrite(GUEST_GS_AR_BYTES,
vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093);
vmwrite(GUEST_LDTR_AR_BYTES, 0x10000);
vmwrite(GUEST_TR_AR_BYTES, 0x8b);
vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
vmwrite(GUEST_ACTIVITY_STATE, 0);
vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS));
vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0);
vmwrite(GUEST_CR0, vmreadz(HOST_CR0));
vmwrite(GUEST_CR3, vmreadz(HOST_CR3));
vmwrite(GUEST_CR4, vmreadz(HOST_CR4));
vmwrite(GUEST_ES_BASE, 0);
vmwrite(GUEST_CS_BASE, 0);
vmwrite(GUEST_SS_BASE, 0);
vmwrite(GUEST_DS_BASE, 0);
vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE));
vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE));
vmwrite(GUEST_LDTR_BASE, 0);
vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE));
vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE));
vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE));
vmwrite(GUEST_DR7, 0x400);
vmwrite(GUEST_RSP, (uint64_t)rsp);
vmwrite(GUEST_RIP, (uint64_t)rip);
vmwrite(GUEST_RFLAGS, 2);
vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0);
vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP));
vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP));
}
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
{
init_vmcs_control_fields(vmx);
init_vmcs_host_state();
init_vmcs_guest_state(guest_rip, guest_rsp);
}
static void nested_create_pte(struct kvm_vm *vm,
struct eptPageTableEntry *pte,
uint64_t nested_paddr,
uint64_t paddr,
int current_level,
int target_level)
{
if (!pte->readable) {
pte->writable = true;
pte->readable = true;
pte->executable = true;
pte->page_size = (current_level == target_level);
if (pte->page_size)
pte->address = paddr >> vm->page_shift;
else
pte->address = vm_alloc_page_table(vm) >> vm->page_shift;
} else {
/*
* Entry already present. Assert that the caller doesn't want
* a hugepage at this level, and that there isn't a hugepage at
* this level.
*/
TEST_ASSERT(current_level != target_level,
"Cannot create hugepage at level: %u, nested_paddr: 0x%lx\n",
current_level, nested_paddr);
TEST_ASSERT(!pte->page_size,
"Cannot create page table at level: %u, nested_paddr: 0x%lx\n",
current_level, nested_paddr);
}
}
void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, int target_level)
{
const uint64_t page_size = PG_LEVEL_SIZE(target_level);
struct eptPageTableEntry *pt = vmx->eptp_hva, *pte;
uint16_t index;
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
TEST_ASSERT((nested_paddr >> 48) == 0,
"Nested physical address 0x%lx requires 5-level paging",
nested_paddr);
TEST_ASSERT((nested_paddr % page_size) == 0,
"Nested physical address not on page boundary,\n"
" nested_paddr: 0x%lx page_size: 0x%lx",
nested_paddr, page_size);
TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
" nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
TEST_ASSERT((paddr % page_size) == 0,
"Physical address not on page boundary,\n"
" paddr: 0x%lx page_size: 0x%lx",
paddr, page_size);
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
for (int level = PG_LEVEL_512G; level >= PG_LEVEL_4K; level--) {
index = (nested_paddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
pte = &pt[index];
nested_create_pte(vm, pte, nested_paddr, paddr, level, target_level);
if (pte->page_size)
break;
pt = addr_gpa2hva(vm, pte->address * vm->page_size);
}
/*
* For now mark these as accessed and dirty because the only
* testcase we have needs that. Can be reconsidered later.
*/
pte->accessed = true;
pte->dirty = true;
}
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr)
{
__nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K);
}
/*
* Map a range of EPT guest physical addresses to the VM's physical address
*
* Input Args:
* vm - Virtual Machine
* nested_paddr - Nested guest physical address to map
* paddr - VM Physical Address
* size - The size of the range to map
* level - The level at which to map the range
*
* Output Args: None
*
* Return: None
*
* Within the VM given by vm, creates a nested guest translation for the
* page range starting at nested_paddr to the page range starting at paddr.
*/
void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint64_t size,
int level)
{
size_t page_size = PG_LEVEL_SIZE(level);
size_t npages = size / page_size;
TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
while (npages--) {
__nested_pg_map(vmx, vm, nested_paddr, paddr, level);
nested_paddr += page_size;
paddr += page_size;
}
}
void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint64_t size)
{
__nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K);
}
/* Prepare an identity extended page table that maps all the
* physical pages in VM.
*/
void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t memslot)
{
sparsebit_idx_t i, last;
struct userspace_mem_region *region =
memslot2region(vm, memslot);
i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
last = i + (region->region.memory_size >> vm->page_shift);
for (;;) {
i = sparsebit_next_clear(region->unused_phy_pages, i);
if (i > last)
break;
nested_map(vmx, vm,
(uint64_t)i << vm->page_shift,
(uint64_t)i << vm->page_shift,
1 << vm->page_shift);
}
}
/* Identity map a region with 1GiB Pages. */
void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t addr, uint64_t size)
{
__nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
}
bool kvm_cpu_has_ept(void)
{
uint64_t ctrl;
ctrl = kvm_get_feature_msr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32;
if (!(ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
return false;
ctrl = kvm_get_feature_msr(MSR_IA32_VMX_PROCBASED_CTLS2) >> 32;
return ctrl & SECONDARY_EXEC_ENABLE_EPT;
}
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot)
{
TEST_ASSERT(kvm_cpu_has_ept(), "KVM doesn't support nested EPT");
vmx->eptp = (void *)vm_vaddr_alloc_page(vm);
vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
}
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm)
{
vmx->apic_access = (void *)vm_vaddr_alloc_page(vm);
vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access);
vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access);
}
| linux-master | tools/testing/selftests/kvm/lib/x86_64/vmx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/x86_64/processor.c
*
* Copyright (C) 2018, Google LLC.
*/
#include "linux/bitmap.h"
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#ifndef NUM_INTERRUPTS
#define NUM_INTERRUPTS 256
#endif
#define DEFAULT_CODE_SELECTOR 0x8
#define DEFAULT_DATA_SELECTOR 0x10
#define MAX_NR_CPUID_ENTRIES 100
vm_vaddr_t exception_handlers;
bool host_cpu_is_amd;
bool host_cpu_is_intel;
static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
{
fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
"rcx: 0x%.16llx rdx: 0x%.16llx\n",
indent, "",
regs->rax, regs->rbx, regs->rcx, regs->rdx);
fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
"rsp: 0x%.16llx rbp: 0x%.16llx\n",
indent, "",
regs->rsi, regs->rdi, regs->rsp, regs->rbp);
fprintf(stream, "%*sr8: 0x%.16llx r9: 0x%.16llx "
"r10: 0x%.16llx r11: 0x%.16llx\n",
indent, "",
regs->r8, regs->r9, regs->r10, regs->r11);
fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
"r14: 0x%.16llx r15: 0x%.16llx\n",
indent, "",
regs->r12, regs->r13, regs->r14, regs->r15);
fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
indent, "",
regs->rip, regs->rflags);
}
static void segment_dump(FILE *stream, struct kvm_segment *segment,
uint8_t indent)
{
fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
"selector: 0x%.4x type: 0x%.2x\n",
indent, "", segment->base, segment->limit,
segment->selector, segment->type);
fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
"db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
indent, "", segment->present, segment->dpl,
segment->db, segment->s, segment->l);
fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
"unusable: 0x%.2x padding: 0x%.2x\n",
indent, "", segment->g, segment->avl,
segment->unusable, segment->padding);
}
static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
uint8_t indent)
{
fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
"padding: 0x%.4x 0x%.4x 0x%.4x\n",
indent, "", dtable->base, dtable->limit,
dtable->padding[0], dtable->padding[1], dtable->padding[2]);
}
static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
{
unsigned int i;
fprintf(stream, "%*scs:\n", indent, "");
segment_dump(stream, &sregs->cs, indent + 2);
fprintf(stream, "%*sds:\n", indent, "");
segment_dump(stream, &sregs->ds, indent + 2);
fprintf(stream, "%*ses:\n", indent, "");
segment_dump(stream, &sregs->es, indent + 2);
fprintf(stream, "%*sfs:\n", indent, "");
segment_dump(stream, &sregs->fs, indent + 2);
fprintf(stream, "%*sgs:\n", indent, "");
segment_dump(stream, &sregs->gs, indent + 2);
fprintf(stream, "%*sss:\n", indent, "");
segment_dump(stream, &sregs->ss, indent + 2);
fprintf(stream, "%*str:\n", indent, "");
segment_dump(stream, &sregs->tr, indent + 2);
fprintf(stream, "%*sldt:\n", indent, "");
segment_dump(stream, &sregs->ldt, indent + 2);
fprintf(stream, "%*sgdt:\n", indent, "");
dtable_dump(stream, &sregs->gdt, indent + 2);
fprintf(stream, "%*sidt:\n", indent, "");
dtable_dump(stream, &sregs->idt, indent + 2);
fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
"cr3: 0x%.16llx cr4: 0x%.16llx\n",
indent, "",
sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
"apic_base: 0x%.16llx\n",
indent, "",
sregs->cr8, sregs->efer, sregs->apic_base);
fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
fprintf(stream, "%*s%.16llx\n", indent + 2, "",
sregs->interrupt_bitmap[i]);
}
}
bool kvm_is_tdp_enabled(void)
{
if (host_cpu_is_intel)
return get_kvm_intel_param_bool("ept");
else
return get_kvm_amd_param_bool("npt");
}
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
/* If needed, create page map l4 table. */
if (!vm->pgd_created) {
vm->pgd = vm_alloc_page_table(vm);
vm->pgd_created = true;
}
}
static void *virt_get_pte(struct kvm_vm *vm, uint64_t *parent_pte,
uint64_t vaddr, int level)
{
uint64_t pt_gpa = PTE_GET_PA(*parent_pte);
uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
TEST_ASSERT((*parent_pte & PTE_PRESENT_MASK) || parent_pte == &vm->pgd,
"Parent PTE (level %d) not PRESENT for gva: 0x%08lx",
level + 1, vaddr);
return &page_table[index];
}
static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
uint64_t *parent_pte,
uint64_t vaddr,
uint64_t paddr,
int current_level,
int target_level)
{
uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level);
if (!(*pte & PTE_PRESENT_MASK)) {
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
if (current_level == target_level)
*pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
else
*pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
} else {
/*
* Entry already present. Assert that the caller doesn't want
* a hugepage at this level, and that there isn't a hugepage at
* this level.
*/
TEST_ASSERT(current_level != target_level,
"Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
current_level, vaddr);
TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
"Cannot create page table at level: %u, vaddr: 0x%lx\n",
current_level, vaddr);
}
return pte;
}
void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
{
const uint64_t pg_size = PG_LEVEL_SIZE(level);
uint64_t *pml4e, *pdpe, *pde;
uint64_t *pte;
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
"Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
TEST_ASSERT((vaddr % pg_size) == 0,
"Virtual address not aligned,\n"
"vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
"Invalid virtual address, vaddr: 0x%lx", vaddr);
TEST_ASSERT((paddr % pg_size) == 0,
"Physical address not aligned,\n"
" paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond maximum supported,\n"
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
/*
* Allocate upper level page tables, if not already present. Return
* early if a hugepage was created.
*/
pml4e = virt_create_upper_pte(vm, &vm->pgd, vaddr, paddr, PG_LEVEL_512G, level);
if (*pml4e & PTE_LARGE_MASK)
return;
pdpe = virt_create_upper_pte(vm, pml4e, vaddr, paddr, PG_LEVEL_1G, level);
if (*pdpe & PTE_LARGE_MASK)
return;
pde = virt_create_upper_pte(vm, pdpe, vaddr, paddr, PG_LEVEL_2M, level);
if (*pde & PTE_LARGE_MASK)
return;
/* Fill in page table entry. */
pte = virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K);
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
"PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
}
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
__virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K);
}
void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
uint64_t nr_bytes, int level)
{
uint64_t pg_size = PG_LEVEL_SIZE(level);
uint64_t nr_pages = nr_bytes / pg_size;
int i;
TEST_ASSERT(nr_bytes % pg_size == 0,
"Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx",
nr_bytes, pg_size);
for (i = 0; i < nr_pages; i++) {
__virt_pg_map(vm, vaddr, paddr, level);
vaddr += pg_size;
paddr += pg_size;
}
}
static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
{
if (*pte & PTE_LARGE_MASK) {
TEST_ASSERT(*level == PG_LEVEL_NONE ||
*level == current_level,
"Unexpected hugepage at level %d\n", current_level);
*level = current_level;
}
return *level == current_level;
}
uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
int *level)
{
uint64_t *pml4e, *pdpe, *pde;
TEST_ASSERT(*level >= PG_LEVEL_NONE && *level < PG_LEVEL_NUM,
"Invalid PG_LEVEL_* '%d'", *level);
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
(vaddr >> vm->page_shift)),
"Invalid virtual address, vaddr: 0x%lx",
vaddr);
/*
* Based on the mode check above there are 48 bits in the vaddr, so
* shift 16 to sign extend the last bit (bit-47),
*/
TEST_ASSERT(vaddr == (((int64_t)vaddr << 16) >> 16),
"Canonical check failed. The virtual address is invalid.");
pml4e = virt_get_pte(vm, &vm->pgd, vaddr, PG_LEVEL_512G);
if (vm_is_target_pte(pml4e, level, PG_LEVEL_512G))
return pml4e;
pdpe = virt_get_pte(vm, pml4e, vaddr, PG_LEVEL_1G);
if (vm_is_target_pte(pdpe, level, PG_LEVEL_1G))
return pdpe;
pde = virt_get_pte(vm, pdpe, vaddr, PG_LEVEL_2M);
if (vm_is_target_pte(pde, level, PG_LEVEL_2M))
return pde;
return virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K);
}
uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
{
int level = PG_LEVEL_4K;
return __vm_get_page_table_entry(vm, vaddr, &level);
}
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
uint64_t *pml4e, *pml4e_start;
uint64_t *pdpe, *pdpe_start;
uint64_t *pde, *pde_start;
uint64_t *pte, *pte_start;
if (!vm->pgd_created)
return;
fprintf(stream, "%*s "
" no\n", indent, "");
fprintf(stream, "%*s index hvaddr gpaddr "
"addr w exec dirty\n",
indent, "");
pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd);
for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
pml4e = &pml4e_start[n1];
if (!(*pml4e & PTE_PRESENT_MASK))
continue;
fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
" %u\n",
indent, "",
pml4e - pml4e_start, pml4e,
addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
!!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK));
pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
pdpe = &pdpe_start[n2];
if (!(*pdpe & PTE_PRESENT_MASK))
continue;
fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx "
"%u %u\n",
indent, "",
pdpe - pdpe_start, pdpe,
addr_hva2gpa(vm, pdpe),
PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK),
!!(*pdpe & PTE_NX_MASK));
pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
pde = &pde_start[n3];
if (!(*pde & PTE_PRESENT_MASK))
continue;
fprintf(stream, "%*spde 0x%-3zx %p "
"0x%-12lx 0x%-10llx %u %u\n",
indent, "", pde - pde_start, pde,
addr_hva2gpa(vm, pde),
PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK),
!!(*pde & PTE_NX_MASK));
pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
pte = &pte_start[n4];
if (!(*pte & PTE_PRESENT_MASK))
continue;
fprintf(stream, "%*spte 0x%-3zx %p "
"0x%-12lx 0x%-10llx %u %u "
" %u 0x%-10lx\n",
indent, "",
pte - pte_start, pte,
addr_hva2gpa(vm, pte),
PTE_GET_PFN(*pte),
!!(*pte & PTE_WRITABLE_MASK),
!!(*pte & PTE_NX_MASK),
!!(*pte & PTE_DIRTY_MASK),
((uint64_t) n1 << 27)
| ((uint64_t) n2 << 18)
| ((uint64_t) n3 << 9)
| ((uint64_t) n4));
}
}
}
}
}
/*
* Set Unusable Segment
*
* Input Args: None
*
* Output Args:
* segp - Pointer to segment register
*
* Return: None
*
* Sets the segment register pointed to by @segp to an unusable state.
*/
static void kvm_seg_set_unusable(struct kvm_segment *segp)
{
memset(segp, 0, sizeof(*segp));
segp->unusable = true;
}
static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
{
void *gdt = addr_gva2hva(vm, vm->gdt);
struct desc64 *desc = gdt + (segp->selector >> 3) * 8;
desc->limit0 = segp->limit & 0xFFFF;
desc->base0 = segp->base & 0xFFFF;
desc->base1 = segp->base >> 16;
desc->type = segp->type;
desc->s = segp->s;
desc->dpl = segp->dpl;
desc->p = segp->present;
desc->limit1 = segp->limit >> 16;
desc->avl = segp->avl;
desc->l = segp->l;
desc->db = segp->db;
desc->g = segp->g;
desc->base2 = segp->base >> 24;
if (!segp->s)
desc->base3 = segp->base >> 32;
}
/*
* Set Long Mode Flat Kernel Code Segment
*
* Input Args:
* vm - VM whose GDT is being filled, or NULL to only write segp
* selector - selector value
*
* Output Args:
* segp - Pointer to KVM segment
*
* Return: None
*
* Sets up the KVM segment pointed to by @segp, to be a code segment
* with the selector value given by @selector.
*/
static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector,
struct kvm_segment *segp)
{
memset(segp, 0, sizeof(*segp));
segp->selector = selector;
segp->limit = 0xFFFFFFFFu;
segp->s = 0x1; /* kTypeCodeData */
segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
* | kFlagCodeReadable
*/
segp->g = true;
segp->l = true;
segp->present = 1;
if (vm)
kvm_seg_fill_gdt_64bit(vm, segp);
}
/*
* Set Long Mode Flat Kernel Data Segment
*
* Input Args:
* vm - VM whose GDT is being filled, or NULL to only write segp
* selector - selector value
*
* Output Args:
* segp - Pointer to KVM segment
*
* Return: None
*
* Sets up the KVM segment pointed to by @segp, to be a data segment
* with the selector value given by @selector.
*/
static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
struct kvm_segment *segp)
{
memset(segp, 0, sizeof(*segp));
segp->selector = selector;
segp->limit = 0xFFFFFFFFu;
segp->s = 0x1; /* kTypeCodeData */
segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
* | kFlagDataWritable
*/
segp->g = true;
segp->present = true;
if (vm)
kvm_seg_fill_gdt_64bit(vm, segp);
}
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
int level = PG_LEVEL_NONE;
uint64_t *pte = __vm_get_page_table_entry(vm, gva, &level);
TEST_ASSERT(*pte & PTE_PRESENT_MASK,
"Leaf PTE not PRESENT for gva: 0x%08lx", gva);
/*
* No need for a hugepage mask on the PTE, x86-64 requires the "unused"
* address bits to be zero.
*/
return PTE_GET_PA(*pte) | (gva & ~HUGEPAGE_MASK(level));
}
static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)
{
if (!vm->gdt)
vm->gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
dt->base = vm->gdt;
dt->limit = getpagesize();
}
static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
int selector)
{
if (!vm->tss)
vm->tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
memset(segp, 0, sizeof(*segp));
segp->base = vm->tss;
segp->limit = 0x67;
segp->selector = selector;
segp->type = 0xb;
segp->present = 1;
kvm_seg_fill_gdt_64bit(vm, segp);
}
static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
struct kvm_sregs sregs;
/* Set mode specific system register values. */
vcpu_sregs_get(vcpu, &sregs);
sregs.idt.limit = 0;
kvm_setup_gdt(vm, &sregs.gdt);
switch (vm->mode) {
case VM_MODE_PXXV48_4K:
sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
kvm_seg_set_unusable(&sregs.ldt);
kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs);
kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds);
kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es);
kvm_setup_tss_64bit(vm, &sregs.tr, 0x18);
break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
sregs.cr3 = vm->pgd;
vcpu_sregs_set(vcpu, &sregs);
}
void kvm_arch_vm_post_create(struct kvm_vm *vm)
{
vm_create_irqchip(vm);
sync_global_to_guest(vm, host_cpu_is_intel);
sync_global_to_guest(vm, host_cpu_is_amd);
}
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
void *guest_code)
{
struct kvm_mp_state mp_state;
struct kvm_regs regs;
vm_vaddr_t stack_vaddr;
struct kvm_vcpu *vcpu;
stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
DEFAULT_GUEST_STACK_VADDR_MIN,
MEM_REGION_DATA);
stack_vaddr += DEFAULT_STACK_PGS * getpagesize();
/*
* Align stack to match calling sequence requirements in section "The
* Stack Frame" of the System V ABI AMD64 Architecture Processor
* Supplement, which requires the value (%rsp + 8) to be a multiple of
* 16 when control is transferred to the function entry point.
*
* If this code is ever used to launch a vCPU with 32-bit entry point it
* may need to subtract 4 bytes instead of 8 bytes.
*/
TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE),
"__vm_vaddr_alloc() did not provide a page-aligned address");
stack_vaddr -= 8;
vcpu = __vm_vcpu_add(vm, vcpu_id);
vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
vcpu_setup(vm, vcpu);
/* Setup guest general purpose registers */
vcpu_regs_get(vcpu, ®s);
regs.rflags = regs.rflags | 0x2;
regs.rsp = stack_vaddr;
regs.rip = (unsigned long) guest_code;
vcpu_regs_set(vcpu, ®s);
/* Setup the MP state */
mp_state.mp_state = 0;
vcpu_mp_state_set(vcpu, &mp_state);
return vcpu;
}
struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id)
{
struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
return vcpu;
}
void vcpu_arch_free(struct kvm_vcpu *vcpu)
{
if (vcpu->cpuid)
free(vcpu->cpuid);
}
/* Do not use kvm_supported_cpuid directly except for validity checks. */
static void *kvm_supported_cpuid;
const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
{
int kvm_fd;
if (kvm_supported_cpuid)
return kvm_supported_cpuid;
kvm_supported_cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
kvm_fd = open_kvm_dev_path_or_exit();
kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID,
(struct kvm_cpuid2 *)kvm_supported_cpuid);
close(kvm_fd);
return kvm_supported_cpuid;
}
static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
uint32_t function, uint32_t index,
uint8_t reg, uint8_t lo, uint8_t hi)
{
const struct kvm_cpuid_entry2 *entry;
int i;
for (i = 0; i < cpuid->nent; i++) {
entry = &cpuid->entries[i];
/*
* The output registers in kvm_cpuid_entry2 are in alphabetical
* order, but kvm_x86_cpu_feature matches that mess, so yay
* pointer shenanigans!
*/
if (entry->function == function && entry->index == index)
return ((&entry->eax)[reg] & GENMASK(hi, lo)) >> lo;
}
return 0;
}
bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
struct kvm_x86_cpu_feature feature)
{
return __kvm_cpu_has(cpuid, feature.function, feature.index,
feature.reg, feature.bit, feature.bit);
}
uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
struct kvm_x86_cpu_property property)
{
return __kvm_cpu_has(cpuid, property.function, property.index,
property.reg, property.lo_bit, property.hi_bit);
}
uint64_t kvm_get_feature_msr(uint64_t msr_index)
{
struct {
struct kvm_msrs header;
struct kvm_msr_entry entry;
} buffer = {};
int r, kvm_fd;
buffer.header.nmsrs = 1;
buffer.entry.index = msr_index;
kvm_fd = open_kvm_dev_path_or_exit();
r = __kvm_ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_GET_MSRS, r));
close(kvm_fd);
return buffer.entry.data;
}
void __vm_xsave_require_permission(uint64_t xfeature, const char *name)
{
int kvm_fd;
u64 bitmask;
long rc;
struct kvm_device_attr attr = {
.group = 0,
.attr = KVM_X86_XCOMP_GUEST_SUPP,
.addr = (unsigned long) &bitmask,
};
TEST_ASSERT(!kvm_supported_cpuid,
"kvm_get_supported_cpuid() cannot be used before ARCH_REQ_XCOMP_GUEST_PERM");
TEST_ASSERT(is_power_of_2(xfeature),
"Dynamic XFeatures must be enabled one at a time");
kvm_fd = open_kvm_dev_path_or_exit();
rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
close(kvm_fd);
if (rc == -1 && (errno == ENXIO || errno == EINVAL))
__TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported");
TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
__TEST_REQUIRE(bitmask & xfeature,
"Required XSAVE feature '%s' not supported", name);
TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, ilog2(xfeature)));
rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
TEST_ASSERT(bitmask & xfeature,
"'%s' (0x%lx) not permitted after prctl(ARCH_REQ_XCOMP_GUEST_PERM) permitted=0x%lx",
name, xfeature, bitmask);
}
void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid)
{
TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID");
/* Allow overriding the default CPUID. */
if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) {
free(vcpu->cpuid);
vcpu->cpuid = NULL;
}
if (!vcpu->cpuid)
vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent);
memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent));
vcpu_set_cpuid(vcpu);
}
void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr)
{
struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, 0x80000008);
entry->eax = (entry->eax & ~0xff) | maxphyaddr;
vcpu_set_cpuid(vcpu);
}
void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function)
{
struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function);
entry->eax = 0;
entry->ebx = 0;
entry->ecx = 0;
entry->edx = 0;
vcpu_set_cpuid(vcpu);
}
void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
struct kvm_x86_cpu_feature feature,
bool set)
{
struct kvm_cpuid_entry2 *entry;
u32 *reg;
entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
reg = (&entry->eax) + feature.reg;
if (set)
*reg |= BIT(feature.bit);
else
*reg &= ~BIT(feature.bit);
vcpu_set_cpuid(vcpu);
}
uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
{
struct {
struct kvm_msrs header;
struct kvm_msr_entry entry;
} buffer = {};
buffer.header.nmsrs = 1;
buffer.entry.index = msr_index;
vcpu_msrs_get(vcpu, &buffer.header);
return buffer.entry.data;
}
int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value)
{
struct {
struct kvm_msrs header;
struct kvm_msr_entry entry;
} buffer = {};
memset(&buffer, 0, sizeof(buffer));
buffer.header.nmsrs = 1;
buffer.entry.index = msr_index;
buffer.entry.data = msr_value;
return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header);
}
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
struct kvm_regs regs;
TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
" num: %u\n",
num);
va_start(ap, num);
vcpu_regs_get(vcpu, ®s);
if (num >= 1)
regs.rdi = va_arg(ap, uint64_t);
if (num >= 2)
regs.rsi = va_arg(ap, uint64_t);
if (num >= 3)
regs.rdx = va_arg(ap, uint64_t);
if (num >= 4)
regs.rcx = va_arg(ap, uint64_t);
if (num >= 5)
regs.r8 = va_arg(ap, uint64_t);
if (num >= 6)
regs.r9 = va_arg(ap, uint64_t);
vcpu_regs_set(vcpu, ®s);
va_end(ap);
}
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
struct kvm_regs regs;
struct kvm_sregs sregs;
fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id);
fprintf(stream, "%*sregs:\n", indent + 2, "");
vcpu_regs_get(vcpu, ®s);
regs_dump(stream, ®s, indent + 4);
fprintf(stream, "%*ssregs:\n", indent + 2, "");
vcpu_sregs_get(vcpu, &sregs);
sregs_dump(stream, &sregs, indent + 4);
}
static struct kvm_msr_list *__kvm_get_msr_index_list(bool feature_msrs)
{
struct kvm_msr_list *list;
struct kvm_msr_list nmsrs;
int kvm_fd, r;
kvm_fd = open_kvm_dev_path_or_exit();
nmsrs.nmsrs = 0;
if (!feature_msrs)
r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
else
r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, &nmsrs);
TEST_ASSERT(r == -1 && errno == E2BIG,
"Expected -E2BIG, got rc: %i errno: %i (%s)",
r, errno, strerror(errno));
list = malloc(sizeof(*list) + nmsrs.nmsrs * sizeof(list->indices[0]));
TEST_ASSERT(list, "-ENOMEM when allocating MSR index list");
list->nmsrs = nmsrs.nmsrs;
if (!feature_msrs)
kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
else
kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list);
close(kvm_fd);
TEST_ASSERT(list->nmsrs == nmsrs.nmsrs,
"Number of MSRs in list changed, was %d, now %d",
nmsrs.nmsrs, list->nmsrs);
return list;
}
const struct kvm_msr_list *kvm_get_msr_index_list(void)
{
static const struct kvm_msr_list *list;
if (!list)
list = __kvm_get_msr_index_list(false);
return list;
}
const struct kvm_msr_list *kvm_get_feature_msr_index_list(void)
{
static const struct kvm_msr_list *list;
if (!list)
list = __kvm_get_msr_index_list(true);
return list;
}
bool kvm_msr_is_in_save_restore_list(uint32_t msr_index)
{
const struct kvm_msr_list *list = kvm_get_msr_index_list();
int i;
for (i = 0; i < list->nmsrs; ++i) {
if (list->indices[i] == msr_index)
return true;
}
return false;
}
static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu,
struct kvm_x86_state *state)
{
int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2);
if (size) {
state->xsave = malloc(size);
vcpu_xsave2_get(vcpu, state->xsave);
} else {
state->xsave = malloc(sizeof(struct kvm_xsave));
vcpu_xsave_get(vcpu, state->xsave);
}
}
struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu)
{
const struct kvm_msr_list *msr_list = kvm_get_msr_index_list();
struct kvm_x86_state *state;
int i;
static int nested_size = -1;
if (nested_size == -1) {
nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
TEST_ASSERT(nested_size <= sizeof(state->nested_),
"Nested state size too big, %i > %zi",
nested_size, sizeof(state->nested_));
}
/*
* When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
* guest state is consistent only after userspace re-enters the
* kernel with KVM_RUN. Complete IO prior to migrating state
* to a new VM.
*/
vcpu_run_complete_io(vcpu);
state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0]));
TEST_ASSERT(state, "-ENOMEM when allocating kvm state");
vcpu_events_get(vcpu, &state->events);
vcpu_mp_state_get(vcpu, &state->mp_state);
vcpu_regs_get(vcpu, &state->regs);
vcpu_save_xsave_state(vcpu, state);
if (kvm_has_cap(KVM_CAP_XCRS))
vcpu_xcrs_get(vcpu, &state->xcrs);
vcpu_sregs_get(vcpu, &state->sregs);
if (nested_size) {
state->nested.size = sizeof(state->nested_);
vcpu_nested_state_get(vcpu, &state->nested);
TEST_ASSERT(state->nested.size <= nested_size,
"Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
state->nested.size, nested_size);
} else {
state->nested.size = 0;
}
state->msrs.nmsrs = msr_list->nmsrs;
for (i = 0; i < msr_list->nmsrs; i++)
state->msrs.entries[i].index = msr_list->indices[i];
vcpu_msrs_get(vcpu, &state->msrs);
vcpu_debugregs_get(vcpu, &state->debugregs);
return state;
}
void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state)
{
vcpu_sregs_set(vcpu, &state->sregs);
vcpu_msrs_set(vcpu, &state->msrs);
if (kvm_has_cap(KVM_CAP_XCRS))
vcpu_xcrs_set(vcpu, &state->xcrs);
vcpu_xsave_set(vcpu, state->xsave);
vcpu_events_set(vcpu, &state->events);
vcpu_mp_state_set(vcpu, &state->mp_state);
vcpu_debugregs_set(vcpu, &state->debugregs);
vcpu_regs_set(vcpu, &state->regs);
if (state->nested.size)
vcpu_nested_state_set(vcpu, &state->nested);
}
void kvm_x86_state_cleanup(struct kvm_x86_state *state)
{
free(state->xsave);
free(state);
}
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
{
if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) {
*pa_bits = kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32;
*va_bits = 32;
} else {
*pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
*va_bits = kvm_cpu_property(X86_PROPERTY_MAX_VIRT_ADDR);
}
}
static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
int dpl, unsigned short selector)
{
struct idt_entry *base =
(struct idt_entry *)addr_gva2hva(vm, vm->idt);
struct idt_entry *e = &base[vector];
memset(e, 0, sizeof(*e));
e->offset0 = addr;
e->selector = selector;
e->ist = 0;
e->type = 14;
e->dpl = dpl;
e->p = 1;
e->offset1 = addr >> 16;
e->offset2 = addr >> 32;
}
static bool kvm_fixup_exception(struct ex_regs *regs)
{
if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10)
return false;
if (regs->vector == DE_VECTOR)
return false;
regs->rip = regs->r11;
regs->r9 = regs->vector;
regs->r10 = regs->error_code;
return true;
}
void route_exception(struct ex_regs *regs)
{
typedef void(*handler)(struct ex_regs *);
handler *handlers = (handler *)exception_handlers;
if (handlers && handlers[regs->vector]) {
handlers[regs->vector](regs);
return;
}
if (kvm_fixup_exception(regs))
return;
ucall_assert(UCALL_UNHANDLED,
"Unhandled exception in guest", __FILE__, __LINE__,
"Unhandled exception '0x%lx' at guest RIP '0x%lx'",
regs->vector, regs->rip);
}
void vm_init_descriptor_tables(struct kvm_vm *vm)
{
extern void *idt_handlers;
int i;
vm->idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
/* Handlers have the same address in both address spaces.*/
for (i = 0; i < NUM_INTERRUPTS; i++)
set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0,
DEFAULT_CODE_SELECTOR);
}
void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
{
struct kvm_vm *vm = vcpu->vm;
struct kvm_sregs sregs;
vcpu_sregs_get(vcpu, &sregs);
sregs.idt.base = vm->idt;
sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
sregs.gdt.base = vm->gdt;
sregs.gdt.limit = getpagesize() - 1;
kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs);
vcpu_sregs_set(vcpu, &sregs);
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
}
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *))
{
vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
handlers[vector] = (vm_vaddr_t)handler;
}
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
struct ucall uc;
if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED)
REPORT_GUEST_ASSERT(uc);
}
const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
uint32_t function, uint32_t index)
{
int i;
for (i = 0; i < cpuid->nent; i++) {
if (cpuid->entries[i].function == function &&
cpuid->entries[i].index == index)
return &cpuid->entries[i];
}
TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index);
return NULL;
}
#define X86_HYPERCALL(inputs...) \
({ \
uint64_t r; \
\
asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \
"jnz 1f\n\t" \
"vmcall\n\t" \
"jmp 2f\n\t" \
"1: vmmcall\n\t" \
"2:" \
: "=a"(r) \
: [use_vmmcall] "r" (host_cpu_is_amd), inputs); \
\
r; \
})
uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
uint64_t a3)
{
return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
}
uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
{
return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1));
}
void xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
{
GUEST_ASSERT(!__xen_hypercall(nr, a0, a1));
}
const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
{
static struct kvm_cpuid2 *cpuid;
int kvm_fd;
if (cpuid)
return cpuid;
cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
kvm_fd = open_kvm_dev_path_or_exit();
kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
close(kvm_fd);
return cpuid;
}
void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu)
{
static struct kvm_cpuid2 *cpuid_full;
const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
int i, nent = 0;
if (!cpuid_full) {
cpuid_sys = kvm_get_supported_cpuid();
cpuid_hv = kvm_get_supported_hv_cpuid();
cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent);
if (!cpuid_full) {
perror("malloc");
abort();
}
/* Need to skip KVM CPUID leaves 0x400000xx */
for (i = 0; i < cpuid_sys->nent; i++) {
if (cpuid_sys->entries[i].function >= 0x40000000 &&
cpuid_sys->entries[i].function < 0x40000100)
continue;
cpuid_full->entries[nent] = cpuid_sys->entries[i];
nent++;
}
memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
cpuid_full->nent = nent + cpuid_hv->nent;
}
vcpu_init_cpuid(vcpu, cpuid_full);
}
const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
return cpuid;
}
unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
{
const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
unsigned long ht_gfn, max_gfn, max_pfn;
uint8_t maxphyaddr;
max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
/* Avoid reserved HyperTransport region on AMD processors. */
if (!host_cpu_is_amd)
return max_gfn;
/* On parts with <40 physical address bits, the area is fully hidden */
if (vm->pa_bits < 40)
return max_gfn;
/* Before family 17h, the HyperTransport area is just below 1T. */
ht_gfn = (1 << 28) - num_ht_pages;
if (this_cpu_family() < 0x17)
goto done;
/*
* Otherwise it's at the top of the physical address space, possibly
* reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use
* the old conservative value if MAXPHYADDR is not enumerated.
*/
if (!this_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR))
goto done;
maxphyaddr = this_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
max_pfn = (1ULL << (maxphyaddr - vm->page_shift)) - 1;
if (this_cpu_has_p(X86_PROPERTY_PHYS_ADDR_REDUCTION))
max_pfn >>= this_cpu_property(X86_PROPERTY_PHYS_ADDR_REDUCTION);
ht_gfn = max_pfn - num_ht_pages;
done:
return min(max_gfn, ht_gfn - 1);
}
/* Returns true if kvm_intel was loaded with unrestricted_guest=1. */
bool vm_is_unrestricted_guest(struct kvm_vm *vm)
{
/* Ensure that a KVM vendor-specific module is loaded. */
if (vm == NULL)
close(open_kvm_dev_path_or_exit());
return get_kvm_intel_param_bool("unrestricted_guest");
}
void kvm_selftest_arch_init(void)
{
host_cpu_is_intel = this_cpu_is_intel();
host_cpu_is_amd = this_cpu_is_amd();
}
| linux-master | tools/testing/selftests/kvm/lib/x86_64/processor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ucall support. A ucall is a "hypercall to userspace".
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#include "kvm_util.h"
#define UCALL_PIO_PORT ((uint16_t)0x1000)
void ucall_arch_do_ucall(vm_vaddr_t uc)
{
/*
* FIXME: Revert this hack (the entire commit that added it) once nVMX
* preserves L2 GPRs across a nested VM-Exit. If a ucall from L2, e.g.
* to do a GUEST_SYNC(), lands the vCPU in L1, any and all GPRs can be
* clobbered by L1. Save and restore non-volatile GPRs (clobbering RBP
* in particular is problematic) along with RDX and RDI (which are
* inputs), and clobber volatile GPRs. *sigh*
*/
#define HORRIFIC_L2_UCALL_CLOBBER_HACK \
"rcx", "rsi", "r8", "r9", "r10", "r11"
asm volatile("push %%rbp\n\t"
"push %%r15\n\t"
"push %%r14\n\t"
"push %%r13\n\t"
"push %%r12\n\t"
"push %%rbx\n\t"
"push %%rdx\n\t"
"push %%rdi\n\t"
"in %[port], %%al\n\t"
"pop %%rdi\n\t"
"pop %%rdx\n\t"
"pop %%rbx\n\t"
"pop %%r12\n\t"
"pop %%r13\n\t"
"pop %%r14\n\t"
"pop %%r15\n\t"
"pop %%rbp\n\t"
: : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory",
HORRIFIC_L2_UCALL_CLOBBER_HACK);
}
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs;
vcpu_regs_get(vcpu, ®s);
return (void *)regs.rdi;
}
return NULL;
}
| linux-master | tools/testing/selftests/kvm/lib/x86_64/ucall.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/x86_64/svm.c
* Helpers used for nested SVM testing
* Largely inspired from KVM unit test svm.c
*
* Copyright (C) 2020, Red Hat, Inc.
*/
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "svm_util.h"
#define SEV_DEV_PATH "/dev/sev"
struct gpr64_regs guest_regs;
u64 rflags;
/* Allocate memory regions for nested SVM tests.
*
* Input Args:
* vm - The VM to allocate guest-virtual addresses in.
*
* Output Args:
* p_svm_gva - The guest virtual address for the struct svm_test_data.
*
* Return:
* Pointer to structure with the addresses of the SVM areas.
*/
struct svm_test_data *
vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva)
{
vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm);
struct svm_test_data *svm = addr_gva2hva(vm, svm_gva);
svm->vmcb = (void *)vm_vaddr_alloc_page(vm);
svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb);
svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb);
svm->save_area = (void *)vm_vaddr_alloc_page(vm);
svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area);
svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area);
svm->msr = (void *)vm_vaddr_alloc_page(vm);
svm->msr_hva = addr_gva2hva(vm, (uintptr_t)svm->msr);
svm->msr_gpa = addr_gva2gpa(vm, (uintptr_t)svm->msr);
memset(svm->msr_hva, 0, getpagesize());
*p_svm_gva = svm_gva;
return svm;
}
static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
u64 base, u32 limit, u32 attr)
{
seg->selector = selector;
seg->attrib = attr;
seg->limit = limit;
seg->base = base;
}
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp)
{
struct vmcb *vmcb = svm->vmcb;
uint64_t vmcb_gpa = svm->vmcb_gpa;
struct vmcb_save_area *save = &vmcb->save;
struct vmcb_control_area *ctrl = &vmcb->control;
u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
| SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
| SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
uint64_t efer;
efer = rdmsr(MSR_EFER);
wrmsr(MSR_EFER, efer | EFER_SVME);
wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa);
memset(vmcb, 0, sizeof(*vmcb));
asm volatile ("vmsave %0\n\t" : : "a" (vmcb_gpa) : "memory");
vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr);
vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr);
vmcb_set_seg(&save->ss, get_ss(), 0, -1U, data_seg_attr);
vmcb_set_seg(&save->ds, get_ds(), 0, -1U, data_seg_attr);
vmcb_set_seg(&save->gdtr, 0, get_gdt().address, get_gdt().size, 0);
vmcb_set_seg(&save->idtr, 0, get_idt().address, get_idt().size, 0);
ctrl->asid = 1;
save->cpl = 0;
save->efer = rdmsr(MSR_EFER);
asm volatile ("mov %%cr4, %0" : "=r"(save->cr4) : : "memory");
asm volatile ("mov %%cr3, %0" : "=r"(save->cr3) : : "memory");
asm volatile ("mov %%cr0, %0" : "=r"(save->cr0) : : "memory");
asm volatile ("mov %%dr7, %0" : "=r"(save->dr7) : : "memory");
asm volatile ("mov %%dr6, %0" : "=r"(save->dr6) : : "memory");
asm volatile ("mov %%cr2, %0" : "=r"(save->cr2) : : "memory");
save->g_pat = rdmsr(MSR_IA32_CR_PAT);
save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
ctrl->intercept = (1ULL << INTERCEPT_VMRUN) |
(1ULL << INTERCEPT_VMMCALL);
ctrl->msrpm_base_pa = svm->msr_gpa;
vmcb->save.rip = (u64)guest_rip;
vmcb->save.rsp = (u64)guest_rsp;
guest_regs.rdi = (u64)svm;
}
/*
* save/restore 64-bit general registers except rax, rip, rsp
* which are directly handed through the VMCB guest processor state
*/
#define SAVE_GPR_C \
"xchg %%rbx, guest_regs+0x20\n\t" \
"xchg %%rcx, guest_regs+0x10\n\t" \
"xchg %%rdx, guest_regs+0x18\n\t" \
"xchg %%rbp, guest_regs+0x30\n\t" \
"xchg %%rsi, guest_regs+0x38\n\t" \
"xchg %%rdi, guest_regs+0x40\n\t" \
"xchg %%r8, guest_regs+0x48\n\t" \
"xchg %%r9, guest_regs+0x50\n\t" \
"xchg %%r10, guest_regs+0x58\n\t" \
"xchg %%r11, guest_regs+0x60\n\t" \
"xchg %%r12, guest_regs+0x68\n\t" \
"xchg %%r13, guest_regs+0x70\n\t" \
"xchg %%r14, guest_regs+0x78\n\t" \
"xchg %%r15, guest_regs+0x80\n\t"
#define LOAD_GPR_C SAVE_GPR_C
/*
* selftests do not use interrupts so we dropped clgi/sti/cli/stgi
* for now. registers involved in LOAD/SAVE_GPR_C are eventually
* unmodified so they do not need to be in the clobber list.
*/
void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
{
asm volatile (
"vmload %[vmcb_gpa]\n\t"
"mov rflags, %%r15\n\t" // rflags
"mov %%r15, 0x170(%[vmcb])\n\t"
"mov guest_regs, %%r15\n\t" // rax
"mov %%r15, 0x1f8(%[vmcb])\n\t"
LOAD_GPR_C
"vmrun %[vmcb_gpa]\n\t"
SAVE_GPR_C
"mov 0x170(%[vmcb]), %%r15\n\t" // rflags
"mov %%r15, rflags\n\t"
"mov 0x1f8(%[vmcb]), %%r15\n\t" // rax
"mov %%r15, guest_regs\n\t"
"vmsave %[vmcb_gpa]\n\t"
: : [vmcb] "r" (vmcb), [vmcb_gpa] "a" (vmcb_gpa)
: "r15", "memory");
}
/*
* Open SEV_DEV_PATH if available, otherwise exit the entire program.
*
* Return:
* The opened file descriptor of /dev/sev.
*/
int open_sev_dev_path_or_exit(void)
{
return open_path_or_exit(SEV_DEV_PATH, 0);
}
| linux-master | tools/testing/selftests/kvm/lib/x86_64/svm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/x86_64/processor.c
*
* Copyright (C) 2021, Google LLC.
*/
#include "apic.h"
void apic_disable(void)
{
wrmsr(MSR_IA32_APICBASE,
rdmsr(MSR_IA32_APICBASE) &
~(MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD));
}
void xapic_enable(void)
{
uint64_t val = rdmsr(MSR_IA32_APICBASE);
/* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */
if (val & MSR_IA32_APICBASE_EXTD) {
apic_disable();
wrmsr(MSR_IA32_APICBASE,
rdmsr(MSR_IA32_APICBASE) | MSR_IA32_APICBASE_ENABLE);
} else if (!(val & MSR_IA32_APICBASE_ENABLE)) {
wrmsr(MSR_IA32_APICBASE, val | MSR_IA32_APICBASE_ENABLE);
}
/*
* Per SDM: reset value of spurious interrupt vector register has the
* APIC software enabled bit=0. It must be enabled in addition to the
* enable bit in the MSR.
*/
val = xapic_read_reg(APIC_SPIV) | APIC_SPIV_APIC_ENABLED;
xapic_write_reg(APIC_SPIV, val);
}
void x2apic_enable(void)
{
wrmsr(MSR_IA32_APICBASE, rdmsr(MSR_IA32_APICBASE) |
MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD);
x2apic_write_reg(APIC_SPIV,
x2apic_read_reg(APIC_SPIV) | APIC_SPIV_APIC_ENABLED);
}
| linux-master | tools/testing/selftests/kvm/lib/x86_64/apic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* x86_64-specific extensions to memstress.c.
*
* Copyright (C) 2022, Google, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include "test_util.h"
#include "kvm_util.h"
#include "memstress.h"
#include "processor.h"
#include "vmx.h"
void memstress_l2_guest_code(uint64_t vcpu_id)
{
memstress_guest_code(vcpu_id);
vmcall();
}
extern char memstress_l2_guest_entry[];
__asm__(
"memstress_l2_guest_entry:"
" mov (%rsp), %rdi;"
" call memstress_l2_guest_code;"
" ud2;"
);
static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
unsigned long *rsp;
GUEST_ASSERT(vmx->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx));
GUEST_ASSERT(load_vmcs(vmx));
GUEST_ASSERT(ept_1g_pages_supported());
rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
*rsp = vcpu_id;
prepare_vmcs(vmx, memstress_l2_guest_entry, rsp);
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_DONE();
}
uint64_t memstress_nested_pages(int nr_vcpus)
{
/*
* 513 page tables is enough to identity-map 256 TiB of L2 with 1G
* pages and 4-level paging, plus a few pages per-vCPU for data
* structures such as the VMCS.
*/
return 513 + 10 * nr_vcpus;
}
void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
{
uint64_t start, end;
prepare_eptp(vmx, vm, 0);
/*
* Identity map the first 4G and the test region with 1G pages so that
* KVM can shadow the EPT12 with the maximum huge page size supported
* by the backing source.
*/
nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
start = align_down(memstress_args.gpa, PG_SIZE_1G);
end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G);
nested_identity_map_1g(vmx, vm, start, end - start);
}
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
{
struct vmx_pages *vmx, *vmx0 = NULL;
struct kvm_regs regs;
vm_vaddr_t vmx_gva;
int vcpu_id;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_cpu_has_ept());
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
vmx = vcpu_alloc_vmx(vm, &vmx_gva);
if (vcpu_id == 0) {
memstress_setup_ept(vmx, vm);
vmx0 = vmx;
} else {
/* Share the same EPT table across all vCPUs. */
vmx->eptp = vmx0->eptp;
vmx->eptp_hva = vmx0->eptp_hva;
vmx->eptp_gpa = vmx0->eptp_gpa;
}
/*
* Override the vCPU to run memstress_l1_guest_code() which will
* bounce it into L2 before calling memstress_guest_code().
*/
vcpu_regs_get(vcpus[vcpu_id], ®s);
regs.rip = (unsigned long) memstress_l1_guest_code;
vcpu_regs_set(vcpus[vcpu_id], ®s);
vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id);
}
}
| linux-master | tools/testing/selftests/kvm/lib/x86_64/memstress.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Check for KVM_GET_REG_LIST regressions.
*
* Copyright (c) 2023 Intel Corporation
*
*/
#include <stdio.h>
#include "kvm_util.h"
#include "test_util.h"
#include "processor.h"
#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX];
bool filter_reg(__u64 reg)
{
switch (reg & ~REG_MASK) {
/*
* Same set of ISA_EXT registers are not present on all host because
* ISA_EXT registers are visible to the KVM user space based on the
* ISA extensions available on the host. Also, disabling an ISA
* extension using corresponding ISA_EXT register does not affect
* the visibility of the ISA_EXT register itself.
*
* Based on above, we should filter-out all ISA_EXT registers.
*/
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_V:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM:
return true;
/* AIA registers are always available when Ssaia can't be disabled */
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect):
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1):
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2):
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh):
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph):
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
return isa_ext_cant_disable[KVM_RISCV_ISA_EXT_SSAIA];
default:
break;
}
return false;
}
bool check_reject_set(int err)
{
return err == EINVAL;
}
static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext)
{
int ret;
unsigned long value;
ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value);
return (ret) ? false : !!value;
}
void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
{
unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 };
struct vcpu_reg_sublist *s;
int rc;
for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
__vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(i), &isa_ext_state[i]);
/*
* Disable all extensions which were enabled by default
* if they were available in the risc-v host.
*/
for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
rc = __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0);
if (rc && isa_ext_state[i])
isa_ext_cant_disable[i] = true;
}
for_each_sublist(c, s) {
if (!s->feature)
continue;
/* Try to enable the desired extension */
__vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(s->feature), 1);
/* Double check whether the desired extension was enabled */
__TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature),
"%s not available, skipping tests\n", s->name);
}
}
static const char *config_id_to_str(__u64 id)
{
/* reg_off is the offset into struct kvm_riscv_config */
__u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG);
switch (reg_off) {
case KVM_REG_RISCV_CONFIG_REG(isa):
return "KVM_REG_RISCV_CONFIG_REG(isa)";
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)";
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)";
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
return "KVM_REG_RISCV_CONFIG_REG(mvendorid)";
case KVM_REG_RISCV_CONFIG_REG(marchid):
return "KVM_REG_RISCV_CONFIG_REG(marchid)";
case KVM_REG_RISCV_CONFIG_REG(mimpid):
return "KVM_REG_RISCV_CONFIG_REG(mimpid)";
case KVM_REG_RISCV_CONFIG_REG(satp_mode):
return "KVM_REG_RISCV_CONFIG_REG(satp_mode)";
}
/*
* Config regs would grow regularly with new pseudo reg added, so
* just show raw id to indicate a new pseudo config reg.
*/
return strdup_printf("KVM_REG_RISCV_CONFIG_REG(%lld) /* UNKNOWN */", reg_off);
}
static const char *core_id_to_str(const char *prefix, __u64 id)
{
/* reg_off is the offset into struct kvm_riscv_core */
__u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE);
switch (reg_off) {
case KVM_REG_RISCV_CORE_REG(regs.pc):
return "KVM_REG_RISCV_CORE_REG(regs.pc)";
case KVM_REG_RISCV_CORE_REG(regs.ra):
return "KVM_REG_RISCV_CORE_REG(regs.ra)";
case KVM_REG_RISCV_CORE_REG(regs.sp):
return "KVM_REG_RISCV_CORE_REG(regs.sp)";
case KVM_REG_RISCV_CORE_REG(regs.gp):
return "KVM_REG_RISCV_CORE_REG(regs.gp)";
case KVM_REG_RISCV_CORE_REG(regs.tp):
return "KVM_REG_RISCV_CORE_REG(regs.tp)";
case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2):
return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
reg_off - KVM_REG_RISCV_CORE_REG(regs.t0));
case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1):
return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
reg_off - KVM_REG_RISCV_CORE_REG(regs.s0));
case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7):
return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)",
reg_off - KVM_REG_RISCV_CORE_REG(regs.a0));
case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11):
return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2);
case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6):
return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3);
case KVM_REG_RISCV_CORE_REG(mode):
return "KVM_REG_RISCV_CORE_REG(mode)";
}
TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
return NULL;
}
#define RISCV_CSR_GENERAL(csr) \
"KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")"
#define RISCV_CSR_AIA(csr) \
"KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")"
static const char *general_csr_id_to_str(__u64 reg_off)
{
/* reg_off is the offset into struct kvm_riscv_csr */
switch (reg_off) {
case KVM_REG_RISCV_CSR_REG(sstatus):
return RISCV_CSR_GENERAL(sstatus);
case KVM_REG_RISCV_CSR_REG(sie):
return RISCV_CSR_GENERAL(sie);
case KVM_REG_RISCV_CSR_REG(stvec):
return RISCV_CSR_GENERAL(stvec);
case KVM_REG_RISCV_CSR_REG(sscratch):
return RISCV_CSR_GENERAL(sscratch);
case KVM_REG_RISCV_CSR_REG(sepc):
return RISCV_CSR_GENERAL(sepc);
case KVM_REG_RISCV_CSR_REG(scause):
return RISCV_CSR_GENERAL(scause);
case KVM_REG_RISCV_CSR_REG(stval):
return RISCV_CSR_GENERAL(stval);
case KVM_REG_RISCV_CSR_REG(sip):
return RISCV_CSR_GENERAL(sip);
case KVM_REG_RISCV_CSR_REG(satp):
return RISCV_CSR_GENERAL(satp);
case KVM_REG_RISCV_CSR_REG(scounteren):
return RISCV_CSR_GENERAL(scounteren);
}
TEST_FAIL("Unknown general csr reg: 0x%llx", reg_off);
return NULL;
}
static const char *aia_csr_id_to_str(__u64 reg_off)
{
/* reg_off is the offset into struct kvm_riscv_aia_csr */
switch (reg_off) {
case KVM_REG_RISCV_CSR_AIA_REG(siselect):
return RISCV_CSR_AIA(siselect);
case KVM_REG_RISCV_CSR_AIA_REG(iprio1):
return RISCV_CSR_AIA(iprio1);
case KVM_REG_RISCV_CSR_AIA_REG(iprio2):
return RISCV_CSR_AIA(iprio2);
case KVM_REG_RISCV_CSR_AIA_REG(sieh):
return RISCV_CSR_AIA(sieh);
case KVM_REG_RISCV_CSR_AIA_REG(siph):
return RISCV_CSR_AIA(siph);
case KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
return RISCV_CSR_AIA(iprio1h);
case KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
return RISCV_CSR_AIA(iprio2h);
}
TEST_FAIL("Unknown aia csr reg: 0x%llx", reg_off);
return NULL;
}
static const char *csr_id_to_str(const char *prefix, __u64 id)
{
__u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR);
__u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
switch (reg_subtype) {
case KVM_REG_RISCV_CSR_GENERAL:
return general_csr_id_to_str(reg_off);
case KVM_REG_RISCV_CSR_AIA:
return aia_csr_id_to_str(reg_off);
}
TEST_FAIL("%s: Unknown csr subtype: 0x%llx", prefix, reg_subtype);
return NULL;
}
static const char *timer_id_to_str(const char *prefix, __u64 id)
{
/* reg_off is the offset into struct kvm_riscv_timer */
__u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER);
switch (reg_off) {
case KVM_REG_RISCV_TIMER_REG(frequency):
return "KVM_REG_RISCV_TIMER_REG(frequency)";
case KVM_REG_RISCV_TIMER_REG(time):
return "KVM_REG_RISCV_TIMER_REG(time)";
case KVM_REG_RISCV_TIMER_REG(compare):
return "KVM_REG_RISCV_TIMER_REG(compare)";
case KVM_REG_RISCV_TIMER_REG(state):
return "KVM_REG_RISCV_TIMER_REG(state)";
}
TEST_FAIL("%s: Unknown timer reg id: 0x%llx", prefix, id);
return NULL;
}
static const char *fp_f_id_to_str(const char *prefix, __u64 id)
{
/* reg_off is the offset into struct __riscv_f_ext_state */
__u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F);
switch (reg_off) {
case KVM_REG_RISCV_FP_F_REG(f[0]) ...
KVM_REG_RISCV_FP_F_REG(f[31]):
return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off);
case KVM_REG_RISCV_FP_F_REG(fcsr):
return "KVM_REG_RISCV_FP_F_REG(fcsr)";
}
TEST_FAIL("%s: Unknown fp_f reg id: 0x%llx", prefix, id);
return NULL;
}
static const char *fp_d_id_to_str(const char *prefix, __u64 id)
{
/* reg_off is the offset into struct __riscv_d_ext_state */
__u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D);
switch (reg_off) {
case KVM_REG_RISCV_FP_D_REG(f[0]) ...
KVM_REG_RISCV_FP_D_REG(f[31]):
return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off);
case KVM_REG_RISCV_FP_D_REG(fcsr):
return "KVM_REG_RISCV_FP_D_REG(fcsr)";
}
TEST_FAIL("%s: Unknown fp_d reg id: 0x%llx", prefix, id);
return NULL;
}
static const char *isa_ext_id_to_str(__u64 id)
{
/* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */
__u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT);
static const char * const kvm_isa_ext_reg_name[] = {
"KVM_RISCV_ISA_EXT_A",
"KVM_RISCV_ISA_EXT_C",
"KVM_RISCV_ISA_EXT_D",
"KVM_RISCV_ISA_EXT_F",
"KVM_RISCV_ISA_EXT_H",
"KVM_RISCV_ISA_EXT_I",
"KVM_RISCV_ISA_EXT_M",
"KVM_RISCV_ISA_EXT_SVPBMT",
"KVM_RISCV_ISA_EXT_SSTC",
"KVM_RISCV_ISA_EXT_SVINVAL",
"KVM_RISCV_ISA_EXT_ZIHINTPAUSE",
"KVM_RISCV_ISA_EXT_ZICBOM",
"KVM_RISCV_ISA_EXT_ZICBOZ",
"KVM_RISCV_ISA_EXT_ZBB",
"KVM_RISCV_ISA_EXT_SSAIA",
"KVM_RISCV_ISA_EXT_V",
"KVM_RISCV_ISA_EXT_SVNAPOT",
"KVM_RISCV_ISA_EXT_ZBA",
"KVM_RISCV_ISA_EXT_ZBS",
"KVM_RISCV_ISA_EXT_ZICNTR",
"KVM_RISCV_ISA_EXT_ZICSR",
"KVM_RISCV_ISA_EXT_ZIFENCEI",
"KVM_RISCV_ISA_EXT_ZIHPM",
};
if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) {
/*
* isa_ext regs would grow regularly with new isa extension added, so
* just show "reg" to indicate a new extension.
*/
return strdup_printf("%lld /* UNKNOWN */", reg_off);
}
return kvm_isa_ext_reg_name[reg_off];
}
static const char *sbi_ext_single_id_to_str(__u64 reg_off)
{
/* reg_off is KVM_RISCV_SBI_EXT_ID */
static const char * const kvm_sbi_ext_reg_name[] = {
"KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01",
"KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME",
"KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI",
"KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE",
"KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST",
"KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM",
"KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU",
"KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL",
"KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR",
};
if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) {
/*
* sbi_ext regs would grow regularly with new sbi extension added, so
* just show "reg" to indicate a new extension.
*/
return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off);
}
return kvm_sbi_ext_reg_name[reg_off];
}
static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
{
if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) {
/*
* sbi_ext regs would grow regularly with new sbi extension added, so
* just show "reg" to indicate a new extension.
*/
return strdup_printf("%lld /* UNKNOWN */", reg_off);
}
switch (reg_subtype) {
case KVM_REG_RISCV_SBI_MULTI_EN:
return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld", reg_off);
case KVM_REG_RISCV_SBI_MULTI_DIS:
return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld", reg_off);
}
return NULL;
}
static const char *sbi_ext_id_to_str(const char *prefix, __u64 id)
{
__u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT);
__u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
switch (reg_subtype) {
case KVM_REG_RISCV_SBI_SINGLE:
return sbi_ext_single_id_to_str(reg_off);
case KVM_REG_RISCV_SBI_MULTI_EN:
case KVM_REG_RISCV_SBI_MULTI_DIS:
return sbi_ext_multi_id_to_str(reg_subtype, reg_off);
}
TEST_FAIL("%s: Unknown sbi ext subtype: 0x%llx", prefix, reg_subtype);
return NULL;
}
void print_reg(const char *prefix, __u64 id)
{
const char *reg_size = NULL;
TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV,
"%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id);
switch (id & KVM_REG_SIZE_MASK) {
case KVM_REG_SIZE_U32:
reg_size = "KVM_REG_SIZE_U32";
break;
case KVM_REG_SIZE_U64:
reg_size = "KVM_REG_SIZE_U64";
break;
case KVM_REG_SIZE_U128:
reg_size = "KVM_REG_SIZE_U128";
break;
default:
TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
}
switch (id & KVM_REG_RISCV_TYPE_MASK) {
case KVM_REG_RISCV_CONFIG:
printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n",
reg_size, config_id_to_str(id));
break;
case KVM_REG_RISCV_CORE:
printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n",
reg_size, core_id_to_str(prefix, id));
break;
case KVM_REG_RISCV_CSR:
printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n",
reg_size, csr_id_to_str(prefix, id));
break;
case KVM_REG_RISCV_TIMER:
printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n",
reg_size, timer_id_to_str(prefix, id));
break;
case KVM_REG_RISCV_FP_F:
printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n",
reg_size, fp_f_id_to_str(prefix, id));
break;
case KVM_REG_RISCV_FP_D:
printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n",
reg_size, fp_d_id_to_str(prefix, id));
break;
case KVM_REG_RISCV_ISA_EXT:
printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n",
reg_size, isa_ext_id_to_str(id));
break;
case KVM_REG_RISCV_SBI_EXT:
printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n",
reg_size, sbi_ext_id_to_str(prefix, id));
break;
default:
TEST_FAIL("%s: Unexpected reg type: 0x%llx in reg id: 0x%llx", prefix,
(id & KVM_REG_RISCV_TYPE_MASK) >> KVM_REG_RISCV_TYPE_SHIFT, id);
}
}
/*
* The current blessed list was primed with the output of kernel version
* v6.5-rc3 and then later updated with new registers.
*/
static __u64 base_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(satp_mode),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0,
};
/*
* The skips_set list registers that should skip set test.
* - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly.
*/
static __u64 base_skips_set[] = {
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
};
static __u64 h_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H,
};
static __u64 zicbom_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM,
};
static __u64 zicboz_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ,
};
static __u64 svpbmt_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT,
};
static __u64 sstc_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC,
};
static __u64 svinval_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL,
};
static __u64 zihintpause_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE,
};
static __u64 zba_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA,
};
static __u64 zbb_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB,
};
static __u64 zbs_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS,
};
static __u64 zicntr_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR,
};
static __u64 zicsr_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR,
};
static __u64 zifencei_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI,
};
static __u64 zihpm_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM,
};
static __u64 aia_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
};
static __u64 fp_f_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F,
};
static __u64 fp_d_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]),
KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr),
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D,
};
#define BASE_SUBLIST \
{"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \
.skips_set = base_skips_set, .skips_set_n = ARRAY_SIZE(base_skips_set),}
#define H_REGS_SUBLIST \
{"h", .feature = KVM_RISCV_ISA_EXT_H, .regs = h_regs, .regs_n = ARRAY_SIZE(h_regs),}
#define ZICBOM_REGS_SUBLIST \
{"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),}
#define ZICBOZ_REGS_SUBLIST \
{"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),}
#define SVPBMT_REGS_SUBLIST \
{"svpbmt", .feature = KVM_RISCV_ISA_EXT_SVPBMT, .regs = svpbmt_regs, .regs_n = ARRAY_SIZE(svpbmt_regs),}
#define SSTC_REGS_SUBLIST \
{"sstc", .feature = KVM_RISCV_ISA_EXT_SSTC, .regs = sstc_regs, .regs_n = ARRAY_SIZE(sstc_regs),}
#define SVINVAL_REGS_SUBLIST \
{"svinval", .feature = KVM_RISCV_ISA_EXT_SVINVAL, .regs = svinval_regs, .regs_n = ARRAY_SIZE(svinval_regs),}
#define ZIHINTPAUSE_REGS_SUBLIST \
{"zihintpause", .feature = KVM_RISCV_ISA_EXT_ZIHINTPAUSE, .regs = zihintpause_regs, .regs_n = ARRAY_SIZE(zihintpause_regs),}
#define ZBA_REGS_SUBLIST \
{"zba", .feature = KVM_RISCV_ISA_EXT_ZBA, .regs = zba_regs, .regs_n = ARRAY_SIZE(zba_regs),}
#define ZBB_REGS_SUBLIST \
{"zbb", .feature = KVM_RISCV_ISA_EXT_ZBB, .regs = zbb_regs, .regs_n = ARRAY_SIZE(zbb_regs),}
#define ZBS_REGS_SUBLIST \
{"zbs", .feature = KVM_RISCV_ISA_EXT_ZBS, .regs = zbs_regs, .regs_n = ARRAY_SIZE(zbs_regs),}
#define ZICNTR_REGS_SUBLIST \
{"zicntr", .feature = KVM_RISCV_ISA_EXT_ZICNTR, .regs = zicntr_regs, .regs_n = ARRAY_SIZE(zicntr_regs),}
#define ZICSR_REGS_SUBLIST \
{"zicsr", .feature = KVM_RISCV_ISA_EXT_ZICSR, .regs = zicsr_regs, .regs_n = ARRAY_SIZE(zicsr_regs),}
#define ZIFENCEI_REGS_SUBLIST \
{"zifencei", .feature = KVM_RISCV_ISA_EXT_ZIFENCEI, .regs = zifencei_regs, .regs_n = ARRAY_SIZE(zifencei_regs),}
#define ZIHPM_REGS_SUBLIST \
{"zihpm", .feature = KVM_RISCV_ISA_EXT_ZIHPM, .regs = zihpm_regs, .regs_n = ARRAY_SIZE(zihpm_regs),}
#define AIA_REGS_SUBLIST \
{"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_regs, .regs_n = ARRAY_SIZE(aia_regs),}
#define FP_F_REGS_SUBLIST \
{"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \
.regs_n = ARRAY_SIZE(fp_f_regs),}
#define FP_D_REGS_SUBLIST \
{"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \
.regs_n = ARRAY_SIZE(fp_d_regs),}
static struct vcpu_reg_list h_config = {
.sublists = {
BASE_SUBLIST,
H_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list zicbom_config = {
.sublists = {
BASE_SUBLIST,
ZICBOM_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list zicboz_config = {
.sublists = {
BASE_SUBLIST,
ZICBOZ_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list svpbmt_config = {
.sublists = {
BASE_SUBLIST,
SVPBMT_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list sstc_config = {
.sublists = {
BASE_SUBLIST,
SSTC_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list svinval_config = {
.sublists = {
BASE_SUBLIST,
SVINVAL_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list zihintpause_config = {
.sublists = {
BASE_SUBLIST,
ZIHINTPAUSE_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list zba_config = {
.sublists = {
BASE_SUBLIST,
ZBA_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list zbb_config = {
.sublists = {
BASE_SUBLIST,
ZBB_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list zbs_config = {
.sublists = {
BASE_SUBLIST,
ZBS_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list zicntr_config = {
.sublists = {
BASE_SUBLIST,
ZICNTR_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list zicsr_config = {
.sublists = {
BASE_SUBLIST,
ZICSR_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list zifencei_config = {
.sublists = {
BASE_SUBLIST,
ZIFENCEI_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list zihpm_config = {
.sublists = {
BASE_SUBLIST,
ZIHPM_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list aia_config = {
.sublists = {
BASE_SUBLIST,
AIA_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list fp_f_config = {
.sublists = {
BASE_SUBLIST,
FP_F_REGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list fp_d_config = {
.sublists = {
BASE_SUBLIST,
FP_D_REGS_SUBLIST,
{0},
},
};
struct vcpu_reg_list *vcpu_configs[] = {
&h_config,
&zicbom_config,
&zicboz_config,
&svpbmt_config,
&sstc_config,
&svinval_config,
&zihintpause_config,
&zba_config,
&zbb_config,
&zbs_config,
&zicntr_config,
&zicsr_config,
&zifencei_config,
&zihpm_config,
&aia_config,
&fp_f_config,
&fp_d_config,
};
int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
| linux-master | tools/testing/selftests/kvm/riscv/get-reg-list.c |
// SPDX-License-Identifier: GPL-2.0
/*
* page_fault_test.c - Test stage 2 faults.
*
* This test tries different combinations of guest accesses (e.g., write,
* S1PTW), backing source type (e.g., anon) and types of faults (e.g., read on
* hugetlbfs with a hole). It checks that the expected handling method is
* called (e.g., uffd faults with the right address and write/read flag).
*/
#define _GNU_SOURCE
#include <linux/bitmap.h>
#include <fcntl.h>
#include <test_util.h>
#include <kvm_util.h>
#include <processor.h>
#include <asm/sysreg.h>
#include <linux/bitfield.h>
#include "guest_modes.h"
#include "userfaultfd_util.h"
/* Guest virtual addresses that point to the test page and its PTE. */
#define TEST_GVA 0xc0000000
#define TEST_EXEC_GVA (TEST_GVA + 0x8)
#define TEST_PTE_GVA 0xb0000000
#define TEST_DATA 0x0123456789ABCDEF
static uint64_t *guest_test_memory = (uint64_t *)TEST_GVA;
#define CMD_NONE (0)
#define CMD_SKIP_TEST (1ULL << 1)
#define CMD_HOLE_PT (1ULL << 2)
#define CMD_HOLE_DATA (1ULL << 3)
#define CMD_CHECK_WRITE_IN_DIRTY_LOG (1ULL << 4)
#define CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG (1ULL << 5)
#define CMD_CHECK_NO_WRITE_IN_DIRTY_LOG (1ULL << 6)
#define CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG (1ULL << 7)
#define CMD_SET_PTE_AF (1ULL << 8)
#define PREPARE_FN_NR 10
#define CHECK_FN_NR 10
static struct event_cnt {
int mmio_exits;
int fail_vcpu_runs;
int uffd_faults;
/* uffd_faults is incremented from multiple threads. */
pthread_mutex_t uffd_faults_mutex;
} events;
struct test_desc {
const char *name;
uint64_t mem_mark_cmd;
/* Skip the test if any prepare function returns false */
bool (*guest_prepare[PREPARE_FN_NR])(void);
void (*guest_test)(void);
void (*guest_test_check[CHECK_FN_NR])(void);
uffd_handler_t uffd_pt_handler;
uffd_handler_t uffd_data_handler;
void (*dabt_handler)(struct ex_regs *regs);
void (*iabt_handler)(struct ex_regs *regs);
void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run);
void (*fail_vcpu_run_handler)(int ret);
uint32_t pt_memslot_flags;
uint32_t data_memslot_flags;
bool skip;
struct event_cnt expected_events;
};
struct test_params {
enum vm_mem_backing_src_type src_type;
struct test_desc *test_desc;
};
static inline void flush_tlb_page(uint64_t vaddr)
{
uint64_t page = vaddr >> 12;
dsb(ishst);
asm volatile("tlbi vaae1is, %0" :: "r" (page));
dsb(ish);
isb();
}
static void guest_write64(void)
{
uint64_t val;
WRITE_ONCE(*guest_test_memory, TEST_DATA);
val = READ_ONCE(*guest_test_memory);
GUEST_ASSERT_EQ(val, TEST_DATA);
}
/* Check the system for atomic instructions. */
static bool guest_check_lse(void)
{
uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
uint64_t atomic;
atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS), isar0);
return atomic >= 2;
}
static bool guest_check_dc_zva(void)
{
uint64_t dczid = read_sysreg(dczid_el0);
uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_DZP), dczid);
return dzp == 0;
}
/* Compare and swap instruction. */
static void guest_cas(void)
{
uint64_t val;
GUEST_ASSERT(guest_check_lse());
asm volatile(".arch_extension lse\n"
"casal %0, %1, [%2]\n"
:: "r" (0ul), "r" (TEST_DATA), "r" (guest_test_memory));
val = READ_ONCE(*guest_test_memory);
GUEST_ASSERT_EQ(val, TEST_DATA);
}
static void guest_read64(void)
{
uint64_t val;
val = READ_ONCE(*guest_test_memory);
GUEST_ASSERT_EQ(val, 0);
}
/* Address translation instruction */
static void guest_at(void)
{
uint64_t par;
asm volatile("at s1e1r, %0" :: "r" (guest_test_memory));
par = read_sysreg(par_el1);
isb();
/* Bit 1 indicates whether the AT was successful */
GUEST_ASSERT_EQ(par & 1, 0);
}
/*
* The size of the block written by "dc zva" is guaranteed to be between (2 <<
* 0) and (2 << 9), which is safe in our case as we need the write to happen
* for at least a word, and not more than a page.
*/
static void guest_dc_zva(void)
{
uint16_t val;
asm volatile("dc zva, %0" :: "r" (guest_test_memory));
dsb(ish);
val = READ_ONCE(*guest_test_memory);
GUEST_ASSERT_EQ(val, 0);
}
/*
* Pre-indexing loads and stores don't have a valid syndrome (ESR_EL2.ISV==0).
* And that's special because KVM must take special care with those: they
* should still count as accesses for dirty logging or user-faulting, but
* should be handled differently on mmio.
*/
static void guest_ld_preidx(void)
{
uint64_t val;
uint64_t addr = TEST_GVA - 8;
/*
* This ends up accessing "TEST_GVA + 8 - 8", where "TEST_GVA - 8" is
* in a gap between memslots not backing by anything.
*/
asm volatile("ldr %0, [%1, #8]!"
: "=r" (val), "+r" (addr));
GUEST_ASSERT_EQ(val, 0);
GUEST_ASSERT_EQ(addr, TEST_GVA);
}
static void guest_st_preidx(void)
{
uint64_t val = TEST_DATA;
uint64_t addr = TEST_GVA - 8;
asm volatile("str %0, [%1, #8]!"
: "+r" (val), "+r" (addr));
GUEST_ASSERT_EQ(addr, TEST_GVA);
val = READ_ONCE(*guest_test_memory);
}
static bool guest_set_ha(void)
{
uint64_t mmfr1 = read_sysreg(id_aa64mmfr1_el1);
uint64_t hadbs, tcr;
/* Skip if HA is not supported. */
hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS), mmfr1);
if (hadbs == 0)
return false;
tcr = read_sysreg(tcr_el1) | TCR_EL1_HA;
write_sysreg(tcr, tcr_el1);
isb();
return true;
}
static bool guest_clear_pte_af(void)
{
*((uint64_t *)TEST_PTE_GVA) &= ~PTE_AF;
flush_tlb_page(TEST_GVA);
return true;
}
static void guest_check_pte_af(void)
{
dsb(ish);
GUEST_ASSERT_EQ(*((uint64_t *)TEST_PTE_GVA) & PTE_AF, PTE_AF);
}
static void guest_check_write_in_dirty_log(void)
{
GUEST_SYNC(CMD_CHECK_WRITE_IN_DIRTY_LOG);
}
static void guest_check_no_write_in_dirty_log(void)
{
GUEST_SYNC(CMD_CHECK_NO_WRITE_IN_DIRTY_LOG);
}
static void guest_check_s1ptw_wr_in_dirty_log(void)
{
GUEST_SYNC(CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG);
}
static void guest_check_no_s1ptw_wr_in_dirty_log(void)
{
GUEST_SYNC(CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG);
}
static void guest_exec(void)
{
int (*code)(void) = (int (*)(void))TEST_EXEC_GVA;
int ret;
ret = code();
GUEST_ASSERT_EQ(ret, 0x77);
}
static bool guest_prepare(struct test_desc *test)
{
bool (*prepare_fn)(void);
int i;
for (i = 0; i < PREPARE_FN_NR; i++) {
prepare_fn = test->guest_prepare[i];
if (prepare_fn && !prepare_fn())
return false;
}
return true;
}
static void guest_test_check(struct test_desc *test)
{
void (*check_fn)(void);
int i;
for (i = 0; i < CHECK_FN_NR; i++) {
check_fn = test->guest_test_check[i];
if (check_fn)
check_fn();
}
}
static void guest_code(struct test_desc *test)
{
if (!guest_prepare(test))
GUEST_SYNC(CMD_SKIP_TEST);
GUEST_SYNC(test->mem_mark_cmd);
if (test->guest_test)
test->guest_test();
guest_test_check(test);
GUEST_DONE();
}
static void no_dabt_handler(struct ex_regs *regs)
{
GUEST_FAIL("Unexpected dabt, far_el1 = 0x%llx", read_sysreg(far_el1));
}
static void no_iabt_handler(struct ex_regs *regs)
{
GUEST_FAIL("Unexpected iabt, pc = 0x%lx", regs->pc);
}
static struct uffd_args {
char *copy;
void *hva;
uint64_t paging_size;
} pt_args, data_args;
/* Returns true to continue the test, and false if it should be skipped. */
static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
struct uffd_args *args)
{
uint64_t addr = msg->arg.pagefault.address;
uint64_t flags = msg->arg.pagefault.flags;
struct uffdio_copy copy;
int ret;
TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
"The only expected UFFD mode is MISSING");
TEST_ASSERT_EQ(addr, (uint64_t)args->hva);
pr_debug("uffd fault: addr=%p write=%d\n",
(void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE));
copy.src = (uint64_t)args->copy;
copy.dst = addr;
copy.len = args->paging_size;
copy.mode = 0;
ret = ioctl(uffd, UFFDIO_COPY, ©);
if (ret == -1) {
pr_info("Failed UFFDIO_COPY in 0x%lx with errno: %d\n",
addr, errno);
return ret;
}
pthread_mutex_lock(&events.uffd_faults_mutex);
events.uffd_faults += 1;
pthread_mutex_unlock(&events.uffd_faults_mutex);
return 0;
}
static int uffd_pt_handler(int mode, int uffd, struct uffd_msg *msg)
{
return uffd_generic_handler(mode, uffd, msg, &pt_args);
}
static int uffd_data_handler(int mode, int uffd, struct uffd_msg *msg)
{
return uffd_generic_handler(mode, uffd, msg, &data_args);
}
static void setup_uffd_args(struct userspace_mem_region *region,
struct uffd_args *args)
{
args->hva = (void *)region->region.userspace_addr;
args->paging_size = region->region.memory_size;
args->copy = malloc(args->paging_size);
TEST_ASSERT(args->copy, "Failed to allocate data copy.");
memcpy(args->copy, args->hva, args->paging_size);
}
static void setup_uffd(struct kvm_vm *vm, struct test_params *p,
struct uffd_desc **pt_uffd, struct uffd_desc **data_uffd)
{
struct test_desc *test = p->test_desc;
int uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_PT), &pt_args);
setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_TEST_DATA), &data_args);
*pt_uffd = NULL;
if (test->uffd_pt_handler)
*pt_uffd = uffd_setup_demand_paging(uffd_mode, 0,
pt_args.hva,
pt_args.paging_size,
test->uffd_pt_handler);
*data_uffd = NULL;
if (test->uffd_data_handler)
*data_uffd = uffd_setup_demand_paging(uffd_mode, 0,
data_args.hva,
data_args.paging_size,
test->uffd_data_handler);
}
static void free_uffd(struct test_desc *test, struct uffd_desc *pt_uffd,
struct uffd_desc *data_uffd)
{
if (test->uffd_pt_handler)
uffd_stop_demand_paging(pt_uffd);
if (test->uffd_data_handler)
uffd_stop_demand_paging(data_uffd);
free(pt_args.copy);
free(data_args.copy);
}
static int uffd_no_handler(int mode, int uffd, struct uffd_msg *msg)
{
TEST_FAIL("There was no UFFD fault expected.");
return -1;
}
/* Returns false if the test should be skipped. */
static bool punch_hole_in_backing_store(struct kvm_vm *vm,
struct userspace_mem_region *region)
{
void *hva = (void *)region->region.userspace_addr;
uint64_t paging_size = region->region.memory_size;
int ret, fd = region->fd;
if (fd != -1) {
ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
0, paging_size);
TEST_ASSERT(ret == 0, "fallocate failed\n");
} else {
ret = madvise(hva, paging_size, MADV_DONTNEED);
TEST_ASSERT(ret == 0, "madvise failed\n");
}
return true;
}
static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run)
{
struct userspace_mem_region *region;
void *hva;
region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
hva = (void *)region->region.userspace_addr;
TEST_ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr);
memcpy(hva, run->mmio.data, run->mmio.len);
events.mmio_exits += 1;
}
static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run)
{
uint64_t data;
memcpy(&data, run->mmio.data, sizeof(data));
pr_debug("addr=%lld len=%d w=%d data=%lx\n",
run->mmio.phys_addr, run->mmio.len,
run->mmio.is_write, data);
TEST_FAIL("There was no MMIO exit expected.");
}
static bool check_write_in_dirty_log(struct kvm_vm *vm,
struct userspace_mem_region *region,
uint64_t host_pg_nr)
{
unsigned long *bmap;
bool first_page_dirty;
uint64_t size = region->region.memory_size;
/* getpage_size() is not always equal to vm->page_size */
bmap = bitmap_zalloc(size / getpagesize());
kvm_vm_get_dirty_log(vm, region->region.slot, bmap);
first_page_dirty = test_bit(host_pg_nr, bmap);
free(bmap);
return first_page_dirty;
}
/* Returns true to continue the test, and false if it should be skipped. */
static bool handle_cmd(struct kvm_vm *vm, int cmd)
{
struct userspace_mem_region *data_region, *pt_region;
bool continue_test = true;
uint64_t pte_gpa, pte_pg;
data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
pt_region = vm_get_mem_region(vm, MEM_REGION_PT);
pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
pte_pg = (pte_gpa - pt_region->region.guest_phys_addr) / getpagesize();
if (cmd == CMD_SKIP_TEST)
continue_test = false;
if (cmd & CMD_HOLE_PT)
continue_test = punch_hole_in_backing_store(vm, pt_region);
if (cmd & CMD_HOLE_DATA)
continue_test = punch_hole_in_backing_store(vm, data_region);
if (cmd & CMD_CHECK_WRITE_IN_DIRTY_LOG)
TEST_ASSERT(check_write_in_dirty_log(vm, data_region, 0),
"Missing write in dirty log");
if (cmd & CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG)
TEST_ASSERT(check_write_in_dirty_log(vm, pt_region, pte_pg),
"Missing s1ptw write in dirty log");
if (cmd & CMD_CHECK_NO_WRITE_IN_DIRTY_LOG)
TEST_ASSERT(!check_write_in_dirty_log(vm, data_region, 0),
"Unexpected write in dirty log");
if (cmd & CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG)
TEST_ASSERT(!check_write_in_dirty_log(vm, pt_region, pte_pg),
"Unexpected s1ptw write in dirty log");
return continue_test;
}
void fail_vcpu_run_no_handler(int ret)
{
TEST_FAIL("Unexpected vcpu run failure\n");
}
void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
{
TEST_ASSERT(errno == ENOSYS,
"The mmio handler should have returned not implemented.");
events.fail_vcpu_runs += 1;
}
typedef uint32_t aarch64_insn_t;
extern aarch64_insn_t __exec_test[2];
noinline void __return_0x77(void)
{
asm volatile("__exec_test: mov x0, #0x77\n"
"ret\n");
}
/*
* Note that this function runs on the host before the test VM starts: there's
* no need to sync the D$ and I$ caches.
*/
static void load_exec_code_for_test(struct kvm_vm *vm)
{
uint64_t *code;
struct userspace_mem_region *region;
void *hva;
region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
hva = (void *)region->region.userspace_addr;
assert(TEST_EXEC_GVA > TEST_GVA);
code = hva + TEST_EXEC_GVA - TEST_GVA;
memcpy(code, __exec_test, sizeof(__exec_test));
}
static void setup_abort_handlers(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
struct test_desc *test)
{
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_DABT, no_dabt_handler);
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_IABT, no_iabt_handler);
}
static void setup_gva_maps(struct kvm_vm *vm)
{
struct userspace_mem_region *region;
uint64_t pte_gpa;
region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
/* Map TEST_GVA first. This will install a new PTE. */
virt_pg_map(vm, TEST_GVA, region->region.guest_phys_addr);
/* Then map TEST_PTE_GVA to the above PTE. */
pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
virt_pg_map(vm, TEST_PTE_GVA, pte_gpa);
}
enum pf_test_memslots {
CODE_AND_DATA_MEMSLOT,
PAGE_TABLE_MEMSLOT,
TEST_DATA_MEMSLOT,
};
/*
* Create a memslot for code and data at pfn=0, and test-data and PT ones
* at max_gfn.
*/
static void setup_memslots(struct kvm_vm *vm, struct test_params *p)
{
uint64_t backing_src_pagesz = get_backing_src_pagesz(p->src_type);
uint64_t guest_page_size = vm->page_size;
uint64_t max_gfn = vm_compute_max_gfn(vm);
/* Enough for 2M of code when using 4K guest pages. */
uint64_t code_npages = 512;
uint64_t pt_size, data_size, data_gpa;
/*
* This test requires 1 pgd, 2 pud, 4 pmd, and 6 pte pages when using
* VM_MODE_P48V48_4K. Note that the .text takes ~1.6MBs. That's 13
* pages. VM_MODE_P48V48_4K is the mode with most PT pages; let's use
* twice that just in case.
*/
pt_size = 26 * guest_page_size;
/* memslot sizes and gpa's must be aligned to the backing page size */
pt_size = align_up(pt_size, backing_src_pagesz);
data_size = align_up(guest_page_size, backing_src_pagesz);
data_gpa = (max_gfn * guest_page_size) - data_size;
data_gpa = align_down(data_gpa, backing_src_pagesz);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0,
CODE_AND_DATA_MEMSLOT, code_npages, 0);
vm->memslots[MEM_REGION_CODE] = CODE_AND_DATA_MEMSLOT;
vm->memslots[MEM_REGION_DATA] = CODE_AND_DATA_MEMSLOT;
vm_userspace_mem_region_add(vm, p->src_type, data_gpa - pt_size,
PAGE_TABLE_MEMSLOT, pt_size / guest_page_size,
p->test_desc->pt_memslot_flags);
vm->memslots[MEM_REGION_PT] = PAGE_TABLE_MEMSLOT;
vm_userspace_mem_region_add(vm, p->src_type, data_gpa, TEST_DATA_MEMSLOT,
data_size / guest_page_size,
p->test_desc->data_memslot_flags);
vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
}
static void setup_ucall(struct kvm_vm *vm)
{
struct userspace_mem_region *region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
ucall_init(vm, region->region.guest_phys_addr + region->region.memory_size);
}
static void setup_default_handlers(struct test_desc *test)
{
if (!test->mmio_handler)
test->mmio_handler = mmio_no_handler;
if (!test->fail_vcpu_run_handler)
test->fail_vcpu_run_handler = fail_vcpu_run_no_handler;
}
static void check_event_counts(struct test_desc *test)
{
TEST_ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
TEST_ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
TEST_ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
}
static void print_test_banner(enum vm_guest_mode mode, struct test_params *p)
{
struct test_desc *test = p->test_desc;
pr_debug("Test: %s\n", test->name);
pr_debug("Testing guest mode: %s\n", vm_guest_mode_string(mode));
pr_debug("Testing memory backing src type: %s\n",
vm_mem_backing_src_alias(p->src_type)->name);
}
static void reset_event_counts(void)
{
memset(&events, 0, sizeof(events));
}
/*
* This function either succeeds, skips the test (after setting test->skip), or
* fails with a TEST_FAIL that aborts all tests.
*/
static void vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
struct test_desc *test)
{
struct kvm_run *run;
struct ucall uc;
int ret;
run = vcpu->run;
for (;;) {
ret = _vcpu_run(vcpu);
if (ret) {
test->fail_vcpu_run_handler(ret);
goto done;
}
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
if (!handle_cmd(vm, uc.args[1])) {
test->skip = true;
goto done;
}
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
goto done;
case UCALL_NONE:
if (run->exit_reason == KVM_EXIT_MMIO)
test->mmio_handler(vm, run);
break;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
done:
pr_debug(test->skip ? "Skipped.\n" : "Done.\n");
}
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = (struct test_params *)arg;
struct test_desc *test = p->test_desc;
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
struct uffd_desc *pt_uffd, *data_uffd;
print_test_banner(mode, p);
vm = ____vm_create(mode);
setup_memslots(vm, p);
kvm_vm_elf_load(vm, program_invocation_name);
setup_ucall(vm);
vcpu = vm_vcpu_add(vm, 0, guest_code);
setup_gva_maps(vm);
reset_event_counts();
/*
* Set some code in the data memslot for the guest to execute (only
* applicable to the EXEC tests). This has to be done before
* setup_uffd() as that function copies the memslot data for the uffd
* handler.
*/
load_exec_code_for_test(vm);
setup_uffd(vm, p, &pt_uffd, &data_uffd);
setup_abort_handlers(vm, vcpu, test);
setup_default_handlers(test);
vcpu_args_set(vcpu, 1, test);
vcpu_run_loop(vm, vcpu, test);
kvm_vm_free(vm);
free_uffd(test, pt_uffd, data_uffd);
/*
* Make sure we check the events after the uffd threads have exited,
* which means they updated their respective event counters.
*/
if (!test->skip)
check_event_counts(test);
}
static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-s mem-type]\n", name);
puts("");
guest_modes_help();
backing_src_help("-s");
puts("");
}
#define SNAME(s) #s
#define SCAT2(a, b) SNAME(a ## _ ## b)
#define SCAT3(a, b, c) SCAT2(a, SCAT2(b, c))
#define SCAT4(a, b, c, d) SCAT2(a, SCAT3(b, c, d))
#define _CHECK(_test) _CHECK_##_test
#define _PREPARE(_test) _PREPARE_##_test
#define _PREPARE_guest_read64 NULL
#define _PREPARE_guest_ld_preidx NULL
#define _PREPARE_guest_write64 NULL
#define _PREPARE_guest_st_preidx NULL
#define _PREPARE_guest_exec NULL
#define _PREPARE_guest_at NULL
#define _PREPARE_guest_dc_zva guest_check_dc_zva
#define _PREPARE_guest_cas guest_check_lse
/* With or without access flag checks */
#define _PREPARE_with_af guest_set_ha, guest_clear_pte_af
#define _PREPARE_no_af NULL
#define _CHECK_with_af guest_check_pte_af
#define _CHECK_no_af NULL
/* Performs an access and checks that no faults were triggered. */
#define TEST_ACCESS(_access, _with_af, _mark_cmd) \
{ \
.name = SCAT3(_access, _with_af, #_mark_cmd), \
.guest_prepare = { _PREPARE(_with_af), \
_PREPARE(_access) }, \
.mem_mark_cmd = _mark_cmd, \
.guest_test = _access, \
.guest_test_check = { _CHECK(_with_af) }, \
.expected_events = { 0 }, \
}
#define TEST_UFFD(_access, _with_af, _mark_cmd, \
_uffd_data_handler, _uffd_pt_handler, _uffd_faults) \
{ \
.name = SCAT4(uffd, _access, _with_af, #_mark_cmd), \
.guest_prepare = { _PREPARE(_with_af), \
_PREPARE(_access) }, \
.guest_test = _access, \
.mem_mark_cmd = _mark_cmd, \
.guest_test_check = { _CHECK(_with_af) }, \
.uffd_data_handler = _uffd_data_handler, \
.uffd_pt_handler = _uffd_pt_handler, \
.expected_events = { .uffd_faults = _uffd_faults, }, \
}
#define TEST_DIRTY_LOG(_access, _with_af, _test_check, _pt_check) \
{ \
.name = SCAT3(dirty_log, _access, _with_af), \
.data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
.pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
.guest_prepare = { _PREPARE(_with_af), \
_PREPARE(_access) }, \
.guest_test = _access, \
.guest_test_check = { _CHECK(_with_af), _test_check, _pt_check }, \
.expected_events = { 0 }, \
}
#define TEST_UFFD_AND_DIRTY_LOG(_access, _with_af, _uffd_data_handler, \
_uffd_faults, _test_check, _pt_check) \
{ \
.name = SCAT3(uffd_and_dirty_log, _access, _with_af), \
.data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
.pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
.guest_prepare = { _PREPARE(_with_af), \
_PREPARE(_access) }, \
.guest_test = _access, \
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
.guest_test_check = { _CHECK(_with_af), _test_check, _pt_check }, \
.uffd_data_handler = _uffd_data_handler, \
.uffd_pt_handler = uffd_pt_handler, \
.expected_events = { .uffd_faults = _uffd_faults, }, \
}
#define TEST_RO_MEMSLOT(_access, _mmio_handler, _mmio_exits) \
{ \
.name = SCAT2(ro_memslot, _access), \
.data_memslot_flags = KVM_MEM_READONLY, \
.pt_memslot_flags = KVM_MEM_READONLY, \
.guest_prepare = { _PREPARE(_access) }, \
.guest_test = _access, \
.mmio_handler = _mmio_handler, \
.expected_events = { .mmio_exits = _mmio_exits }, \
}
#define TEST_RO_MEMSLOT_NO_SYNDROME(_access) \
{ \
.name = SCAT2(ro_memslot_no_syndrome, _access), \
.data_memslot_flags = KVM_MEM_READONLY, \
.pt_memslot_flags = KVM_MEM_READONLY, \
.guest_test = _access, \
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
.expected_events = { .fail_vcpu_runs = 1 }, \
}
#define TEST_RO_MEMSLOT_AND_DIRTY_LOG(_access, _mmio_handler, _mmio_exits, \
_test_check) \
{ \
.name = SCAT2(ro_memslot, _access), \
.data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
.pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
.guest_prepare = { _PREPARE(_access) }, \
.guest_test = _access, \
.guest_test_check = { _test_check }, \
.mmio_handler = _mmio_handler, \
.expected_events = { .mmio_exits = _mmio_exits}, \
}
#define TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(_access, _test_check) \
{ \
.name = SCAT2(ro_memslot_no_syn_and_dlog, _access), \
.data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
.pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
.guest_test = _access, \
.guest_test_check = { _test_check }, \
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
.expected_events = { .fail_vcpu_runs = 1 }, \
}
#define TEST_RO_MEMSLOT_AND_UFFD(_access, _mmio_handler, _mmio_exits, \
_uffd_data_handler, _uffd_faults) \
{ \
.name = SCAT2(ro_memslot_uffd, _access), \
.data_memslot_flags = KVM_MEM_READONLY, \
.pt_memslot_flags = KVM_MEM_READONLY, \
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
.guest_prepare = { _PREPARE(_access) }, \
.guest_test = _access, \
.uffd_data_handler = _uffd_data_handler, \
.uffd_pt_handler = uffd_pt_handler, \
.mmio_handler = _mmio_handler, \
.expected_events = { .mmio_exits = _mmio_exits, \
.uffd_faults = _uffd_faults }, \
}
#define TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(_access, _uffd_data_handler, \
_uffd_faults) \
{ \
.name = SCAT2(ro_memslot_no_syndrome, _access), \
.data_memslot_flags = KVM_MEM_READONLY, \
.pt_memslot_flags = KVM_MEM_READONLY, \
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
.guest_test = _access, \
.uffd_data_handler = _uffd_data_handler, \
.uffd_pt_handler = uffd_pt_handler, \
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
.expected_events = { .fail_vcpu_runs = 1, \
.uffd_faults = _uffd_faults }, \
}
static struct test_desc tests[] = {
/* Check that HW is setting the Access Flag (AF) (sanity checks). */
TEST_ACCESS(guest_read64, with_af, CMD_NONE),
TEST_ACCESS(guest_ld_preidx, with_af, CMD_NONE),
TEST_ACCESS(guest_cas, with_af, CMD_NONE),
TEST_ACCESS(guest_write64, with_af, CMD_NONE),
TEST_ACCESS(guest_st_preidx, with_af, CMD_NONE),
TEST_ACCESS(guest_dc_zva, with_af, CMD_NONE),
TEST_ACCESS(guest_exec, with_af, CMD_NONE),
/*
* Punch a hole in the data backing store, and then try multiple
* accesses: reads should rturn zeroes, and writes should
* re-populate the page. Moreover, the test also check that no
* exception was generated in the guest. Note that this
* reading/writing behavior is the same as reading/writing a
* punched page (with fallocate(FALLOC_FL_PUNCH_HOLE)) from
* userspace.
*/
TEST_ACCESS(guest_read64, no_af, CMD_HOLE_DATA),
TEST_ACCESS(guest_cas, no_af, CMD_HOLE_DATA),
TEST_ACCESS(guest_ld_preidx, no_af, CMD_HOLE_DATA),
TEST_ACCESS(guest_write64, no_af, CMD_HOLE_DATA),
TEST_ACCESS(guest_st_preidx, no_af, CMD_HOLE_DATA),
TEST_ACCESS(guest_at, no_af, CMD_HOLE_DATA),
TEST_ACCESS(guest_dc_zva, no_af, CMD_HOLE_DATA),
/*
* Punch holes in the data and PT backing stores and mark them for
* userfaultfd handling. This should result in 2 faults: the access
* on the data backing store, and its respective S1 page table walk
* (S1PTW).
*/
TEST_UFFD(guest_read64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_read64, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_cas, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_handler, uffd_pt_handler, 2),
/*
* Can't test guest_at with_af as it's IMPDEF whether the AF is set.
* The S1PTW fault should still be marked as a write.
*/
TEST_UFFD(guest_at, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_no_handler, uffd_pt_handler, 1),
TEST_UFFD(guest_ld_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_write64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_dc_zva, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_st_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_exec, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_handler, uffd_pt_handler, 2),
/*
* Try accesses when the data and PT memory regions are both
* tracked for dirty logging.
*/
TEST_DIRTY_LOG(guest_read64, with_af, guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_read64, no_af, guest_check_no_write_in_dirty_log,
guest_check_no_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_ld_preidx, with_af,
guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_at, no_af, guest_check_no_write_in_dirty_log,
guest_check_no_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_exec, with_af, guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_write64, with_af, guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_cas, with_af, guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
/*
* Access when the data and PT memory regions are both marked for
* dirty logging and UFFD at the same time. The expected result is
* that writes should mark the dirty log and trigger a userfaultfd
* write fault. Reads/execs should result in a read userfaultfd
* fault, and nothing in the dirty log. Any S1PTW should result in
* a write in the dirty log and a userfaultfd write.
*/
TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af,
uffd_data_handler, 2,
guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af,
uffd_data_handler, 2,
guest_check_no_write_in_dirty_log,
guest_check_no_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af,
uffd_data_handler,
2, guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, uffd_no_handler, 1,
guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af,
uffd_data_handler, 2,
guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af,
uffd_data_handler,
2, guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af,
uffd_data_handler, 2,
guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af,
uffd_data_handler,
2, guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_st_preidx, with_af,
uffd_data_handler, 2,
guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
/*
* Access when both the PT and data regions are marked read-only
* (with KVM_MEM_READONLY). Writes with a syndrome result in an
* MMIO exit, writes with no syndrome (e.g., CAS) result in a
* failed vcpu run, and reads/execs with and without syndroms do
* not fault.
*/
TEST_RO_MEMSLOT(guest_read64, 0, 0),
TEST_RO_MEMSLOT(guest_ld_preidx, 0, 0),
TEST_RO_MEMSLOT(guest_at, 0, 0),
TEST_RO_MEMSLOT(guest_exec, 0, 0),
TEST_RO_MEMSLOT(guest_write64, mmio_on_test_gpa_handler, 1),
TEST_RO_MEMSLOT_NO_SYNDROME(guest_dc_zva),
TEST_RO_MEMSLOT_NO_SYNDROME(guest_cas),
TEST_RO_MEMSLOT_NO_SYNDROME(guest_st_preidx),
/*
* The PT and data regions are both read-only and marked
* for dirty logging at the same time. The expected result is that
* for writes there should be no write in the dirty log. The
* readonly handling is the same as if the memslot was not marked
* for dirty logging: writes with a syndrome result in an MMIO
* exit, and writes with no syndrome result in a failed vcpu run.
*/
TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_read64, 0, 0,
guest_check_no_write_in_dirty_log),
TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_ld_preidx, 0, 0,
guest_check_no_write_in_dirty_log),
TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_at, 0, 0,
guest_check_no_write_in_dirty_log),
TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_exec, 0, 0,
guest_check_no_write_in_dirty_log),
TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_write64, mmio_on_test_gpa_handler,
1, guest_check_no_write_in_dirty_log),
TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_dc_zva,
guest_check_no_write_in_dirty_log),
TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_cas,
guest_check_no_write_in_dirty_log),
TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_st_preidx,
guest_check_no_write_in_dirty_log),
/*
* The PT and data regions are both read-only and punched with
* holes tracked with userfaultfd. The expected result is the
* union of both userfaultfd and read-only behaviors. For example,
* write accesses result in a userfaultfd write fault and an MMIO
* exit. Writes with no syndrome result in a failed vcpu run and
* no userfaultfd write fault. Reads result in userfaultfd getting
* triggered.
*/
TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0, uffd_data_handler, 2),
TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0, uffd_data_handler, 2),
TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0, uffd_no_handler, 1),
TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0, uffd_data_handler, 2),
TEST_RO_MEMSLOT_AND_UFFD(guest_write64, mmio_on_test_gpa_handler, 1,
uffd_data_handler, 2),
TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas, uffd_data_handler, 2),
TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva, uffd_no_handler, 1),
TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx, uffd_no_handler, 1),
{ 0 }
};
static void for_each_test_and_guest_mode(enum vm_mem_backing_src_type src_type)
{
struct test_desc *t;
for (t = &tests[0]; t->name; t++) {
if (t->skip)
continue;
struct test_params p = {
.src_type = src_type,
.test_desc = t,
};
for_each_guest_mode(run_test, &p);
}
}
int main(int argc, char *argv[])
{
enum vm_mem_backing_src_type src_type;
int opt;
src_type = DEFAULT_VM_MEM_SRC;
while ((opt = getopt(argc, argv, "hm:s:")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
break;
case 's':
src_type = parse_backing_src_type(optarg);
break;
case 'h':
default:
help(argv[0]);
exit(0);
}
}
for_each_test_and_guest_mode(src_type);
return 0;
}
| linux-master | tools/testing/selftests/kvm/aarch64/page_fault_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* vgic_irq.c - Test userspace injection of IRQs
*
* This test validates the injection of IRQs from userspace using various
* methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the
* host to inject a specific intid via a GUEST_SYNC call, and then checks that
* it received it.
*/
#include <asm/kvm.h>
#include <asm/kvm_para.h>
#include <sys/eventfd.h>
#include <linux/sizes.h>
#include "processor.h"
#include "test_util.h"
#include "kvm_util.h"
#include "gic.h"
#include "gic_v3.h"
#include "vgic.h"
#define GICD_BASE_GPA 0x08000000ULL
#define GICR_BASE_GPA 0x080A0000ULL
/*
* Stores the user specified args; it's passed to the guest and to every test
* function.
*/
struct test_args {
uint32_t nr_irqs; /* number of KVM supported IRQs. */
bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
bool level_sensitive; /* 1 is level, 0 is edge */
int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
};
/*
* KVM implements 32 priority levels:
* 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8
*
* Note that these macros will still be correct in the case that KVM implements
* more priority levels. Also note that 32 is the minimum for GICv3 and GICv2.
*/
#define KVM_NUM_PRIOS 32
#define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */
#define KVM_PRIO_STEPS (1 << KVM_PRIO_SHIFT) /* 8 */
#define LOWEST_PRIO (KVM_NUM_PRIOS - 1)
#define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */
#define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1)
#define IRQ_DEFAULT_PRIO_REG (IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */
static void *dist = (void *)GICD_BASE_GPA;
static void *redist = (void *)GICR_BASE_GPA;
/*
* The kvm_inject_* utilities are used by the guest to ask the host to inject
* interrupts (e.g., using the KVM_IRQ_LINE ioctl).
*/
typedef enum {
KVM_INJECT_EDGE_IRQ_LINE = 1,
KVM_SET_IRQ_LINE,
KVM_SET_IRQ_LINE_HIGH,
KVM_SET_LEVEL_INFO_HIGH,
KVM_INJECT_IRQFD,
KVM_WRITE_ISPENDR,
KVM_WRITE_ISACTIVER,
} kvm_inject_cmd;
struct kvm_inject_args {
kvm_inject_cmd cmd;
uint32_t first_intid;
uint32_t num;
int level;
bool expect_failure;
};
/* Used on the guest side to perform the hypercall. */
static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
uint32_t num, int level, bool expect_failure);
/* Used on the host side to get the hypercall info. */
static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
struct kvm_inject_args *args);
#define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure) \
kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure)
#define KVM_INJECT_MULTI(cmd, intid, num) \
_KVM_INJECT_MULTI(cmd, intid, num, false)
#define _KVM_INJECT(cmd, intid, expect_failure) \
_KVM_INJECT_MULTI(cmd, intid, 1, expect_failure)
#define KVM_INJECT(cmd, intid) \
_KVM_INJECT_MULTI(cmd, intid, 1, false)
#define KVM_ACTIVATE(cmd, intid) \
kvm_inject_call(cmd, intid, 1, 1, false);
struct kvm_inject_desc {
kvm_inject_cmd cmd;
/* can inject PPIs, PPIs, and/or SPIs. */
bool sgi, ppi, spi;
};
static struct kvm_inject_desc inject_edge_fns[] = {
/* sgi ppi spi */
{ KVM_INJECT_EDGE_IRQ_LINE, false, false, true },
{ KVM_INJECT_IRQFD, false, false, true },
{ KVM_WRITE_ISPENDR, true, false, true },
{ 0, },
};
static struct kvm_inject_desc inject_level_fns[] = {
/* sgi ppi spi */
{ KVM_SET_IRQ_LINE_HIGH, false, true, true },
{ KVM_SET_LEVEL_INFO_HIGH, false, true, true },
{ KVM_INJECT_IRQFD, false, false, true },
{ KVM_WRITE_ISPENDR, false, true, true },
{ 0, },
};
static struct kvm_inject_desc set_active_fns[] = {
/* sgi ppi spi */
{ KVM_WRITE_ISACTIVER, true, true, true },
{ 0, },
};
#define for_each_inject_fn(t, f) \
for ((f) = (t); (f)->cmd; (f)++)
#define for_each_supported_inject_fn(args, t, f) \
for_each_inject_fn(t, f) \
if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
#define for_each_supported_activate_fn(args, t, f) \
for_each_supported_inject_fn((args), (t), (f))
/* Shared between the guest main thread and the IRQ handlers. */
volatile uint64_t irq_handled;
volatile uint32_t irqnr_received[MAX_SPI + 1];
static void reset_stats(void)
{
int i;
irq_handled = 0;
for (i = 0; i <= MAX_SPI; i++)
irqnr_received[i] = 0;
}
static uint64_t gic_read_ap1r0(void)
{
uint64_t reg = read_sysreg_s(SYS_ICV_AP1R0_EL1);
dsb(sy);
return reg;
}
static void gic_write_ap1r0(uint64_t val)
{
write_sysreg_s(val, SYS_ICV_AP1R0_EL1);
isb();
}
static void guest_set_irq_line(uint32_t intid, uint32_t level);
static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
{
uint32_t intid = gic_get_and_ack_irq();
if (intid == IAR_SPURIOUS)
return;
GUEST_ASSERT(gic_irq_get_active(intid));
if (!level_sensitive)
GUEST_ASSERT(!gic_irq_get_pending(intid));
if (level_sensitive)
guest_set_irq_line(intid, 0);
GUEST_ASSERT(intid < MAX_SPI);
irqnr_received[intid] += 1;
irq_handled += 1;
gic_set_eoi(intid);
GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
if (eoi_split)
gic_set_dir(intid);
GUEST_ASSERT(!gic_irq_get_active(intid));
GUEST_ASSERT(!gic_irq_get_pending(intid));
}
static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
uint32_t num, int level, bool expect_failure)
{
struct kvm_inject_args args = {
.cmd = cmd,
.first_intid = first_intid,
.num = num,
.level = level,
.expect_failure = expect_failure,
};
GUEST_SYNC(&args);
}
#define GUEST_ASSERT_IAR_EMPTY() \
do { \
uint32_t _intid; \
_intid = gic_get_and_ack_irq(); \
GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \
} while (0)
#define CAT_HELPER(a, b) a ## b
#define CAT(a, b) CAT_HELPER(a, b)
#define PREFIX guest_irq_handler_
#define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev))
#define GENERATE_GUEST_IRQ_HANDLER(split, lev) \
static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs) \
{ \
guest_irq_generic_handler(split, lev); \
}
GENERATE_GUEST_IRQ_HANDLER(0, 0);
GENERATE_GUEST_IRQ_HANDLER(0, 1);
GENERATE_GUEST_IRQ_HANDLER(1, 0);
GENERATE_GUEST_IRQ_HANDLER(1, 1);
static void (*guest_irq_handlers[2][2])(struct ex_regs *) = {
{GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),},
{GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),},
};
static void reset_priorities(struct test_args *args)
{
int i;
for (i = 0; i < args->nr_irqs; i++)
gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
}
static void guest_set_irq_line(uint32_t intid, uint32_t level)
{
kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
}
static void test_inject_fail(struct test_args *args,
uint32_t intid, kvm_inject_cmd cmd)
{
reset_stats();
_KVM_INJECT(cmd, intid, true);
/* no IRQ to handle on entry */
GUEST_ASSERT_EQ(irq_handled, 0);
GUEST_ASSERT_IAR_EMPTY();
}
static void guest_inject(struct test_args *args,
uint32_t first_intid, uint32_t num,
kvm_inject_cmd cmd)
{
uint32_t i;
reset_stats();
/* Cycle over all priorities to make things more interesting. */
for (i = first_intid; i < num + first_intid; i++)
gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3);
asm volatile("msr daifset, #2" : : : "memory");
KVM_INJECT_MULTI(cmd, first_intid, num);
while (irq_handled < num) {
asm volatile("wfi\n"
"msr daifclr, #2\n"
/* handle IRQ */
"msr daifset, #2\n"
: : : "memory");
}
asm volatile("msr daifclr, #2" : : : "memory");
GUEST_ASSERT_EQ(irq_handled, num);
for (i = first_intid; i < num + first_intid; i++)
GUEST_ASSERT_EQ(irqnr_received[i], 1);
GUEST_ASSERT_IAR_EMPTY();
reset_priorities(args);
}
/*
* Restore the active state of multiple concurrent IRQs (given by
* concurrent_irqs). This does what a live-migration would do on the
* destination side assuming there are some active IRQs that were not
* deactivated yet.
*/
static void guest_restore_active(struct test_args *args,
uint32_t first_intid, uint32_t num,
kvm_inject_cmd cmd)
{
uint32_t prio, intid, ap1r;
int i;
/*
* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
* in descending order, so intid+1 can preempt intid.
*/
for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
GUEST_ASSERT(prio >= 0);
intid = i + first_intid;
gic_set_priority(intid, prio);
}
/*
* In a real migration, KVM would restore all GIC state before running
* guest code.
*/
for (i = 0; i < num; i++) {
intid = i + first_intid;
KVM_ACTIVATE(cmd, intid);
ap1r = gic_read_ap1r0();
ap1r |= 1U << i;
gic_write_ap1r0(ap1r);
}
/* This is where the "migration" would occur. */
/* finish handling the IRQs starting with the highest priority one. */
for (i = 0; i < num; i++) {
intid = num - i - 1 + first_intid;
gic_set_eoi(intid);
if (args->eoi_split)
gic_set_dir(intid);
}
for (i = 0; i < num; i++)
GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
GUEST_ASSERT_IAR_EMPTY();
}
/*
* Polls the IAR until it's not a spurious interrupt.
*
* This function should only be used in test_inject_preemption (with IRQs
* masked).
*/
static uint32_t wait_for_and_activate_irq(void)
{
uint32_t intid;
do {
asm volatile("wfi" : : : "memory");
intid = gic_get_and_ack_irq();
} while (intid == IAR_SPURIOUS);
return intid;
}
/*
* Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
* handle them without handling the actual exceptions. This is done by masking
* interrupts for the whole test.
*/
static void test_inject_preemption(struct test_args *args,
uint32_t first_intid, int num,
kvm_inject_cmd cmd)
{
uint32_t intid, prio, step = KVM_PRIO_STEPS;
int i;
/* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
* in descending order, so intid+1 can preempt intid.
*/
for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) {
GUEST_ASSERT(prio >= 0);
intid = i + first_intid;
gic_set_priority(intid, prio);
}
local_irq_disable();
for (i = 0; i < num; i++) {
uint32_t tmp;
intid = i + first_intid;
KVM_INJECT(cmd, intid);
/* Each successive IRQ will preempt the previous one. */
tmp = wait_for_and_activate_irq();
GUEST_ASSERT_EQ(tmp, intid);
if (args->level_sensitive)
guest_set_irq_line(intid, 0);
}
/* finish handling the IRQs starting with the highest priority one. */
for (i = 0; i < num; i++) {
intid = num - i - 1 + first_intid;
gic_set_eoi(intid);
if (args->eoi_split)
gic_set_dir(intid);
}
local_irq_enable();
for (i = 0; i < num; i++)
GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
GUEST_ASSERT_IAR_EMPTY();
reset_priorities(args);
}
static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
{
uint32_t nr_irqs = args->nr_irqs;
if (f->sgi) {
guest_inject(args, MIN_SGI, 1, f->cmd);
guest_inject(args, 0, 16, f->cmd);
}
if (f->ppi)
guest_inject(args, MIN_PPI, 1, f->cmd);
if (f->spi) {
guest_inject(args, MIN_SPI, 1, f->cmd);
guest_inject(args, nr_irqs - 1, 1, f->cmd);
guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd);
}
}
static void test_injection_failure(struct test_args *args,
struct kvm_inject_desc *f)
{
uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
int i;
for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
test_inject_fail(args, bad_intid[i], f->cmd);
}
static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
{
/*
* Test up to 4 levels of preemption. The reason is that KVM doesn't
* currently implement the ability to have more than the number-of-LRs
* number of concurrently active IRQs. The number of LRs implemented is
* IMPLEMENTATION DEFINED, however, it seems that most implement 4.
*/
if (f->sgi)
test_inject_preemption(args, MIN_SGI, 4, f->cmd);
if (f->ppi)
test_inject_preemption(args, MIN_PPI, 4, f->cmd);
if (f->spi)
test_inject_preemption(args, MIN_SPI, 4, f->cmd);
}
static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
{
/* Test up to 4 active IRQs. Same reason as in test_preemption. */
if (f->sgi)
guest_restore_active(args, MIN_SGI, 4, f->cmd);
if (f->ppi)
guest_restore_active(args, MIN_PPI, 4, f->cmd);
if (f->spi)
guest_restore_active(args, MIN_SPI, 4, f->cmd);
}
static void guest_code(struct test_args *args)
{
uint32_t i, nr_irqs = args->nr_irqs;
bool level_sensitive = args->level_sensitive;
struct kvm_inject_desc *f, *inject_fns;
gic_init(GIC_V3, 1, dist, redist);
for (i = 0; i < nr_irqs; i++)
gic_irq_enable(i);
for (i = MIN_SPI; i < nr_irqs; i++)
gic_irq_set_config(i, !level_sensitive);
gic_set_eoi_split(args->eoi_split);
reset_priorities(args);
gic_set_priority_mask(CPU_PRIO_MASK);
inject_fns = level_sensitive ? inject_level_fns
: inject_edge_fns;
local_irq_enable();
/* Start the tests. */
for_each_supported_inject_fn(args, inject_fns, f) {
test_injection(args, f);
test_preemption(args, f);
test_injection_failure(args, f);
}
/*
* Restore the active state of IRQs. This would happen when live
* migrating IRQs in the middle of being handled.
*/
for_each_supported_activate_fn(args, set_active_fns, f)
test_restore_active(args, f);
GUEST_DONE();
}
static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
struct test_args *test_args, bool expect_failure)
{
int ret;
if (!expect_failure) {
kvm_arm_irq_line(vm, intid, level);
} else {
/* The interface doesn't allow larger intid's. */
if (intid > KVM_ARM_IRQ_NUM_MASK)
return;
ret = _kvm_arm_irq_line(vm, intid, level);
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Bad intid %i did not cause KVM_IRQ_LINE "
"error: rc: %i errno: %i", intid, ret, errno);
}
}
void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
bool expect_failure)
{
if (!expect_failure) {
kvm_irq_set_level_info(gic_fd, intid, level);
} else {
int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
/*
* The kernel silently fails for invalid SPIs and SGIs (which
* are not level-sensitive). It only checks for intid to not
* spill over 1U << 10 (the max reserved SPI). Also, callers
* are supposed to mask the intid with 0x3ff (1023).
*/
if (intid > VGIC_MAX_RESERVED)
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Bad intid %i did not cause VGIC_GRP_LEVEL_INFO "
"error: rc: %i errno: %i", intid, ret, errno);
else
TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO "
"for intid %i failed, rc: %i errno: %i",
intid, ret, errno);
}
}
static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
bool expect_failure)
{
struct kvm_irq_routing *routing;
int ret;
uint64_t i;
assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
routing = kvm_gsi_routing_create();
for (i = intid; i < (uint64_t)intid + num; i++)
kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
if (!expect_failure) {
kvm_gsi_routing_write(vm, routing);
} else {
ret = _kvm_gsi_routing_write(vm, routing);
/* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */
if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS)
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Bad intid %u did not cause KVM_SET_GSI_ROUTING "
"error: rc: %i errno: %i", intid, ret, errno);
else
TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING "
"for intid %i failed, rc: %i errno: %i",
intid, ret, errno);
}
}
static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
struct kvm_vcpu *vcpu,
bool expect_failure)
{
/*
* Ignore this when expecting failure as invalid intids will lead to
* either trying to inject SGIs when we configured the test to be
* level_sensitive (or the reverse), or inject large intids which
* will lead to writing above the ISPENDR register space (and we
* don't want to do that either).
*/
if (!expect_failure)
kvm_irq_write_ispendr(gic_fd, intid, vcpu);
}
static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
bool expect_failure)
{
int fd[MAX_SPI];
uint64_t val;
int ret, f;
uint64_t i;
/*
* There is no way to try injecting an SGI or PPI as the interface
* starts counting from the first SPI (above the private ones), so just
* exit.
*/
if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid))
return;
kvm_set_gsi_routing_irqchip_check(vm, intid, num,
kvm_max_routes, expect_failure);
/*
* If expect_failure, then just to inject anyway. These
* will silently fail. And in any case, the guest will check
* that no actual interrupt was injected for those cases.
*/
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
fd[f] = eventfd(0, 0);
TEST_ASSERT(fd[f] != -1, __KVM_SYSCALL_ERROR("eventfd()", fd[f]));
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
struct kvm_irqfd irqfd = {
.fd = fd[f],
.gsi = i - MIN_SPI,
};
assert(i <= (uint64_t)UINT_MAX);
vm_ioctl(vm, KVM_IRQFD, &irqfd);
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
val = 1;
ret = write(fd[f], &val, sizeof(uint64_t));
TEST_ASSERT(ret == sizeof(uint64_t),
__KVM_SYSCALL_ERROR("write()", ret));
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
close(fd[f]);
}
/* handles the valid case: intid=0xffffffff num=1 */
#define for_each_intid(first, num, tmp, i) \
for ((tmp) = (i) = (first); \
(tmp) < (uint64_t)(first) + (uint64_t)(num); \
(tmp)++, (i)++)
static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd,
struct kvm_inject_args *inject_args,
struct test_args *test_args)
{
kvm_inject_cmd cmd = inject_args->cmd;
uint32_t intid = inject_args->first_intid;
uint32_t num = inject_args->num;
int level = inject_args->level;
bool expect_failure = inject_args->expect_failure;
struct kvm_vm *vm = vcpu->vm;
uint64_t tmp;
uint32_t i;
/* handles the valid case: intid=0xffffffff num=1 */
assert(intid < UINT_MAX - num || num == 1);
switch (cmd) {
case KVM_INJECT_EDGE_IRQ_LINE:
for_each_intid(intid, num, tmp, i)
kvm_irq_line_check(vm, i, 1, test_args,
expect_failure);
for_each_intid(intid, num, tmp, i)
kvm_irq_line_check(vm, i, 0, test_args,
expect_failure);
break;
case KVM_SET_IRQ_LINE:
for_each_intid(intid, num, tmp, i)
kvm_irq_line_check(vm, i, level, test_args,
expect_failure);
break;
case KVM_SET_IRQ_LINE_HIGH:
for_each_intid(intid, num, tmp, i)
kvm_irq_line_check(vm, i, 1, test_args,
expect_failure);
break;
case KVM_SET_LEVEL_INFO_HIGH:
for_each_intid(intid, num, tmp, i)
kvm_irq_set_level_info_check(gic_fd, i, 1,
expect_failure);
break;
case KVM_INJECT_IRQFD:
kvm_routing_and_irqfd_check(vm, intid, num,
test_args->kvm_max_routes,
expect_failure);
break;
case KVM_WRITE_ISPENDR:
for (i = intid; i < intid + num; i++)
kvm_irq_write_ispendr_check(gic_fd, i, vcpu,
expect_failure);
break;
case KVM_WRITE_ISACTIVER:
for (i = intid; i < intid + num; i++)
kvm_irq_write_isactiver(gic_fd, i, vcpu);
break;
default:
break;
}
}
static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
struct kvm_inject_args *args)
{
struct kvm_inject_args *kvm_args_hva;
vm_vaddr_t kvm_args_gva;
kvm_args_gva = uc->args[1];
kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args));
}
static void print_args(struct test_args *args)
{
printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n",
args->nr_irqs, args->level_sensitive,
args->eoi_split);
}
static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
{
struct ucall uc;
int gic_fd;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_inject_args inject_args;
vm_vaddr_t args_gva;
struct test_args args = {
.nr_irqs = nr_irqs,
.level_sensitive = level_sensitive,
.eoi_split = eoi_split,
.kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING),
.kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD),
};
print_args(&args);
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
/* Setup the guest args page (so it gets the args). */
args_gva = vm_vaddr_alloc_page(vm);
memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
vcpu_args_set(vcpu, 1, args_gva);
gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
GICD_BASE_GPA, GICR_BASE_GPA);
__TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping");
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
guest_irq_handlers[args.eoi_split][args.level_sensitive]);
while (1) {
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
kvm_inject_get_call(vm, &uc, &inject_args);
run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
done:
close(gic_fd);
kvm_vm_free(vm);
}
static void help(const char *name)
{
printf(
"\n"
"usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name);
printf(" -n: specify number of IRQs to setup the vgic with. "
"It has to be a multiple of 32 and between 64 and 1024.\n");
printf(" -e: if 1 then EOI is split into a write to DIR on top "
"of writing EOI.\n");
printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).");
puts("");
exit(1);
}
int main(int argc, char **argv)
{
uint32_t nr_irqs = 64;
bool default_args = true;
bool level_sensitive = false;
int opt;
bool eoi_split = false;
while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
switch (opt) {
case 'n':
nr_irqs = atoi_non_negative("Number of IRQs", optarg);
if (nr_irqs > 1024 || nr_irqs % 32)
help(argv[0]);
break;
case 'e':
eoi_split = (bool)atoi_paranoid(optarg);
default_args = false;
break;
case 'l':
level_sensitive = (bool)atoi_paranoid(optarg);
default_args = false;
break;
case 'h':
default:
help(argv[0]);
break;
}
}
/*
* If the user just specified nr_irqs and/or gic_version, then run all
* combinations.
*/
if (default_args) {
test_vgic(nr_irqs, false /* level */, false /* eoi_split */);
test_vgic(nr_irqs, false /* level */, true /* eoi_split */);
test_vgic(nr_irqs, true /* level */, false /* eoi_split */);
test_vgic(nr_irqs, true /* level */, true /* eoi_split */);
} else {
test_vgic(nr_irqs, level_sensitive, eoi_split);
}
return 0;
}
| linux-master | tools/testing/selftests/kvm/aarch64/vgic_irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* vgic init sequence tests
*
* Copyright (C) 2020, Red Hat, Inc.
*/
#define _GNU_SOURCE
#include <linux/kernel.h>
#include <sys/syscall.h>
#include <asm/kvm.h>
#include <asm/kvm_para.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vgic.h"
#define NR_VCPUS 4
#define REG_OFFSET(vcpu, offset) (((uint64_t)vcpu << 32) | offset)
#define GICR_TYPER 0x8
#define VGIC_DEV_IS_V2(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V2)
#define VGIC_DEV_IS_V3(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V3)
struct vm_gic {
struct kvm_vm *vm;
int gic_fd;
uint32_t gic_dev_type;
};
static uint64_t max_phys_size;
/*
* Helpers to access a redistributor register and verify the ioctl() failed or
* succeeded as expected, and provided the correct value on success.
*/
static void v3_redist_reg_get_errno(int gicv3_fd, int vcpu, int offset,
int want, const char *msg)
{
uint32_t ignored_val;
int ret = __kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
REG_OFFSET(vcpu, offset), &ignored_val);
TEST_ASSERT(ret && errno == want, "%s; want errno = %d", msg, want);
}
static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, uint32_t want,
const char *msg)
{
uint32_t val;
kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
REG_OFFSET(vcpu, offset), &val);
TEST_ASSERT(val == want, "%s; want '0x%x', got '0x%x'", msg, want, val);
}
/* dummy guest code */
static void guest_code(void)
{
GUEST_SYNC(0);
GUEST_SYNC(1);
GUEST_SYNC(2);
GUEST_DONE();
}
/* we don't want to assert on run execution, hence that helper */
static int run_vcpu(struct kvm_vcpu *vcpu)
{
return __vcpu_run(vcpu) ? -errno : 0;
}
static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
uint32_t nr_vcpus,
struct kvm_vcpu *vcpus[])
{
struct vm_gic v;
v.gic_dev_type = gic_dev_type;
v.vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
return v;
}
static void vm_gic_destroy(struct vm_gic *v)
{
close(v->gic_fd);
kvm_vm_free(v->vm);
}
struct vgic_region_attr {
uint64_t attr;
uint64_t size;
uint64_t alignment;
};
struct vgic_region_attr gic_v3_dist_region = {
.attr = KVM_VGIC_V3_ADDR_TYPE_DIST,
.size = 0x10000,
.alignment = 0x10000,
};
struct vgic_region_attr gic_v3_redist_region = {
.attr = KVM_VGIC_V3_ADDR_TYPE_REDIST,
.size = NR_VCPUS * 0x20000,
.alignment = 0x10000,
};
struct vgic_region_attr gic_v2_dist_region = {
.attr = KVM_VGIC_V2_ADDR_TYPE_DIST,
.size = 0x1000,
.alignment = 0x1000,
};
struct vgic_region_attr gic_v2_cpu_region = {
.attr = KVM_VGIC_V2_ADDR_TYPE_CPU,
.size = 0x2000,
.alignment = 0x1000,
};
/**
* Helper routine that performs KVM device tests in general. Eventually the
* ARM_VGIC (GICv2 or GICv3) device gets created with an overlapping
* DIST/REDIST (or DIST/CPUIF for GICv2). Assumption is 4 vcpus are going to be
* used hence the overlap. In the case of GICv3, A RDIST region is set at @0x0
* and a DIST region is set @0x70000. The GICv2 case sets a CPUIF @0x0 and a
* DIST region @0x1000.
*/
static void subtest_dist_rdist(struct vm_gic *v)
{
int ret;
uint64_t addr;
struct vgic_region_attr rdist; /* CPU interface in GICv2*/
struct vgic_region_attr dist;
rdist = VGIC_DEV_IS_V3(v->gic_dev_type) ? gic_v3_redist_region
: gic_v2_cpu_region;
dist = VGIC_DEV_IS_V3(v->gic_dev_type) ? gic_v3_dist_region
: gic_v2_dist_region;
/* Check existing group/attributes */
kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, dist.attr);
kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, rdist.attr);
/* check non existing attribute */
ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, -1);
TEST_ASSERT(ret && errno == ENXIO, "attribute not supported");
/* misaligned DIST and REDIST address settings */
addr = dist.alignment / 0x10;
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
dist.attr, &addr);
TEST_ASSERT(ret && errno == EINVAL, "GIC dist base not aligned");
addr = rdist.alignment / 0x10;
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
rdist.attr, &addr);
TEST_ASSERT(ret && errno == EINVAL, "GIC redist/cpu base not aligned");
/* out of range address */
addr = max_phys_size;
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
dist.attr, &addr);
TEST_ASSERT(ret && errno == E2BIG, "dist address beyond IPA limit");
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
rdist.attr, &addr);
TEST_ASSERT(ret && errno == E2BIG, "redist address beyond IPA limit");
/* Space for half a rdist (a rdist is: 2 * rdist.alignment). */
addr = max_phys_size - dist.alignment;
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
rdist.attr, &addr);
TEST_ASSERT(ret && errno == E2BIG,
"half of the redist is beyond IPA limit");
/* set REDIST base address @0x0*/
addr = 0x00000;
kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
rdist.attr, &addr);
/* Attempt to create a second legacy redistributor region */
addr = 0xE0000;
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
rdist.attr, &addr);
TEST_ASSERT(ret && errno == EEXIST, "GIC redist base set again");
ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST);
if (!ret) {
/* Attempt to mix legacy and new redistributor regions */
addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 0, 0);
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL,
"attempt to mix GICv3 REDIST and REDIST_REGION");
}
/*
* Set overlapping DIST / REDIST, cannot be detected here. Will be detected
* on first vcpu run instead.
*/
addr = rdist.size - rdist.alignment;
kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
dist.attr, &addr);
}
/* Test the new REDIST region API */
static void subtest_v3_redist_regions(struct vm_gic *v)
{
uint64_t addr, expected_addr;
int ret;
ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST);
TEST_ASSERT(!ret, "Multiple redist regions advertised");
addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 2, 0);
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "redist region attr value with flags != 0");
addr = REDIST_REGION_ATTR_ADDR(0, 0x100000, 0, 0);
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "redist region attr value with count== 0");
addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 1);
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL,
"attempt to register the first rdist region with index != 0");
addr = REDIST_REGION_ATTR_ADDR(2, 0x201000, 0, 1);
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "rdist region with misaligned address");
addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0);
kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 1);
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "register an rdist region with already used index");
addr = REDIST_REGION_ATTR_ADDR(1, 0x210000, 0, 2);
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL,
"register an rdist region overlapping with another one");
addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 2);
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "register redist region with index not +1");
addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 1);
kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
addr = REDIST_REGION_ATTR_ADDR(1, max_phys_size, 0, 2);
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == E2BIG,
"register redist region with base address beyond IPA range");
/* The last redist is above the pa range. */
addr = REDIST_REGION_ATTR_ADDR(2, max_phys_size - 0x30000, 0, 2);
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == E2BIG,
"register redist region with top address beyond IPA range");
addr = 0x260000;
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr);
TEST_ASSERT(ret && errno == EINVAL,
"Mix KVM_VGIC_V3_ADDR_TYPE_REDIST and REDIST_REGION");
/*
* Now there are 2 redist regions:
* region 0 @ 0x200000 2 redists
* region 1 @ 0x240000 1 redist
* Attempt to read their characteristics
*/
addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 0);
expected_addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0);
ret = __kvm_device_attr_get(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(!ret && addr == expected_addr, "read characteristics of region #0");
addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 1);
expected_addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 1);
ret = __kvm_device_attr_get(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(!ret && addr == expected_addr, "read characteristics of region #1");
addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 2);
ret = __kvm_device_attr_get(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == ENOENT, "read characteristics of non existing region");
addr = 0x260000;
kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_DIST, &addr);
addr = REDIST_REGION_ATTR_ADDR(1, 0x260000, 0, 2);
ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "register redist region colliding with dist");
}
/*
* VGIC KVM device is created and initialized before the secondary CPUs
* get created
*/
static void test_vgic_then_vcpus(uint32_t gic_dev_type)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
int ret, i;
v = vm_gic_create_with_vcpus(gic_dev_type, 1, vcpus);
subtest_dist_rdist(&v);
/* Add the rest of the VCPUs */
for (i = 1; i < NR_VCPUS; ++i)
vcpus[i] = vm_vcpu_add(v.vm, i, guest_code);
ret = run_vcpu(vcpus[3]);
TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run");
vm_gic_destroy(&v);
}
/* All the VCPUs are created before the VGIC KVM device gets initialized */
static void test_vcpus_then_vgic(uint32_t gic_dev_type)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
int ret;
v = vm_gic_create_with_vcpus(gic_dev_type, NR_VCPUS, vcpus);
subtest_dist_rdist(&v);
ret = run_vcpu(vcpus[3]);
TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run");
vm_gic_destroy(&v);
}
static void test_v3_new_redist_regions(void)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
void *dummy = NULL;
struct vm_gic v;
uint64_t addr;
int ret;
v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
subtest_v3_redist_regions(&v);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
ret = run_vcpu(vcpus[3]);
TEST_ASSERT(ret == -ENXIO, "running without sufficient number of rdists");
vm_gic_destroy(&v);
/* step2 */
v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
subtest_v3_redist_regions(&v);
addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
ret = run_vcpu(vcpus[3]);
TEST_ASSERT(ret == -EBUSY, "running without vgic explicit init");
vm_gic_destroy(&v);
/* step 3 */
v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
subtest_v3_redist_regions(&v);
ret = __kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, dummy);
TEST_ASSERT(ret && errno == EFAULT,
"register a third region allowing to cover the 4 vcpus");
addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
ret = run_vcpu(vcpus[3]);
TEST_ASSERT(!ret, "vcpu run");
vm_gic_destroy(&v);
}
static void test_v3_typer_accesses(void)
{
struct vm_gic v;
uint64_t addr;
int ret, i;
v.vm = vm_create(NR_VCPUS);
(void)vm_vcpu_add(v.vm, 0, guest_code);
v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3);
(void)vm_vcpu_add(v.vm, 3, guest_code);
v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EINVAL,
"attempting to read GICR_TYPER of non created vcpu");
(void)vm_vcpu_add(v.vm, 1, guest_code);
v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EBUSY,
"read GICR_TYPER before GIC initialized");
(void)vm_vcpu_add(v.vm, 2, guest_code);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
for (i = 0; i < NR_VCPUS ; i++) {
v3_redist_reg_get(v.gic_fd, i, GICR_TYPER, i * 0x100,
"read GICR_TYPER before rdist region setting");
}
addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
/* The 2 first rdists should be put there (vcpu 0 and 3) */
v3_redist_reg_get(v.gic_fd, 0, GICR_TYPER, 0x0, "read typer of rdist #0");
v3_redist_reg_get(v.gic_fd, 3, GICR_TYPER, 0x310, "read typer of rdist #1");
addr = REDIST_REGION_ATTR_ADDR(10, 0x100000, 0, 1);
ret = __kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "collision with previous rdist region");
v3_redist_reg_get(v.gic_fd, 1, GICR_TYPER, 0x100,
"no redist region attached to vcpu #1 yet, last cannot be returned");
v3_redist_reg_get(v.gic_fd, 2, GICR_TYPER, 0x200,
"no redist region attached to vcpu #2, last cannot be returned");
addr = REDIST_REGION_ATTR_ADDR(10, 0x20000, 0, 1);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
v3_redist_reg_get(v.gic_fd, 1, GICR_TYPER, 0x100, "read typer of rdist #1");
v3_redist_reg_get(v.gic_fd, 2, GICR_TYPER, 0x210,
"read typer of rdist #1, last properly returned");
vm_gic_destroy(&v);
}
static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus,
uint32_t vcpuids[])
{
struct vm_gic v;
int i;
v.vm = vm_create(nr_vcpus);
for (i = 0; i < nr_vcpus; i++)
vm_vcpu_add(v.vm, vcpuids[i], guest_code);
v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3);
return v;
}
/**
* Test GICR_TYPER last bit with new redist regions
* rdist regions #1 and #2 are contiguous
* rdist region #0 @0x100000 2 rdist capacity
* rdists: 0, 3 (Last)
* rdist region #1 @0x240000 2 rdist capacity
* rdists: 5, 4 (Last)
* rdist region #2 @0x200000 2 rdist capacity
* rdists: 1, 2
*/
static void test_v3_last_bit_redist_regions(void)
{
uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
struct vm_gic v;
uint64_t addr;
v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
addr = REDIST_REGION_ATTR_ADDR(2, 0x100000, 0, 0);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
addr = REDIST_REGION_ATTR_ADDR(2, 0x240000, 0, 1);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 2);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
v3_redist_reg_get(v.gic_fd, 0, GICR_TYPER, 0x000, "read typer of rdist #0");
v3_redist_reg_get(v.gic_fd, 1, GICR_TYPER, 0x100, "read typer of rdist #1");
v3_redist_reg_get(v.gic_fd, 2, GICR_TYPER, 0x200, "read typer of rdist #2");
v3_redist_reg_get(v.gic_fd, 3, GICR_TYPER, 0x310, "read typer of rdist #3");
v3_redist_reg_get(v.gic_fd, 5, GICR_TYPER, 0x500, "read typer of rdist #5");
v3_redist_reg_get(v.gic_fd, 4, GICR_TYPER, 0x410, "read typer of rdist #4");
vm_gic_destroy(&v);
}
/* Test last bit with legacy region */
static void test_v3_last_bit_single_rdist(void)
{
uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
struct vm_gic v;
uint64_t addr;
v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
addr = 0x10000;
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr);
v3_redist_reg_get(v.gic_fd, 0, GICR_TYPER, 0x000, "read typer of rdist #0");
v3_redist_reg_get(v.gic_fd, 3, GICR_TYPER, 0x300, "read typer of rdist #1");
v3_redist_reg_get(v.gic_fd, 5, GICR_TYPER, 0x500, "read typer of rdist #2");
v3_redist_reg_get(v.gic_fd, 1, GICR_TYPER, 0x100, "read typer of rdist #3");
v3_redist_reg_get(v.gic_fd, 2, GICR_TYPER, 0x210, "read typer of rdist #3");
vm_gic_destroy(&v);
}
/* Uses the legacy REDIST region API. */
static void test_v3_redist_ipa_range_check_at_vcpu_run(void)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
int ret, i;
uint64_t addr;
v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, 1, vcpus);
/* Set space for 3 redists, we have 1 vcpu, so this succeeds. */
addr = max_phys_size - (3 * 2 * 0x10000);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr);
addr = 0x00000;
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_DIST, &addr);
/* Add the rest of the VCPUs */
for (i = 1; i < NR_VCPUS; ++i)
vcpus[i] = vm_vcpu_add(v.vm, i, guest_code);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
/* Attempt to run a vcpu without enough redist space. */
ret = run_vcpu(vcpus[2]);
TEST_ASSERT(ret && errno == EINVAL,
"redist base+size above PA range detected on 1st vcpu run");
vm_gic_destroy(&v);
}
static void test_v3_its_region(void)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
uint64_t addr;
int its_fd, ret;
v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
its_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_ITS);
addr = 0x401000;
ret = __kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_ITS_ADDR_TYPE, &addr);
TEST_ASSERT(ret && errno == EINVAL,
"ITS region with misaligned address");
addr = max_phys_size;
ret = __kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_ITS_ADDR_TYPE, &addr);
TEST_ASSERT(ret && errno == E2BIG,
"register ITS region with base address beyond IPA range");
addr = max_phys_size - 0x10000;
ret = __kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_ITS_ADDR_TYPE, &addr);
TEST_ASSERT(ret && errno == E2BIG,
"Half of ITS region is beyond IPA range");
/* This one succeeds setting the ITS base */
addr = 0x400000;
kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_ITS_ADDR_TYPE, &addr);
addr = 0x300000;
ret = __kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_ITS_ADDR_TYPE, &addr);
TEST_ASSERT(ret && errno == EEXIST, "ITS base set again");
close(its_fd);
vm_gic_destroy(&v);
}
/*
* Returns 0 if it's possible to create GIC device of a given type (V2 or V3).
*/
int test_kvm_device(uint32_t gic_dev_type)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
uint32_t other;
int ret;
v.vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
/* try to create a non existing KVM device */
ret = __kvm_test_create_device(v.vm, 0);
TEST_ASSERT(ret && errno == ENODEV, "unsupported device");
/* trial mode */
ret = __kvm_test_create_device(v.vm, gic_dev_type);
if (ret)
return ret;
v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
ret = __kvm_create_device(v.vm, gic_dev_type);
TEST_ASSERT(ret < 0 && errno == EEXIST, "create GIC device twice");
/* try to create the other gic_dev_type */
other = VGIC_DEV_IS_V2(gic_dev_type) ? KVM_DEV_TYPE_ARM_VGIC_V3
: KVM_DEV_TYPE_ARM_VGIC_V2;
if (!__kvm_test_create_device(v.vm, other)) {
ret = __kvm_create_device(v.vm, other);
TEST_ASSERT(ret < 0 && (errno == EINVAL || errno == EEXIST),
"create GIC device while other version exists");
}
vm_gic_destroy(&v);
return 0;
}
void run_tests(uint32_t gic_dev_type)
{
test_vcpus_then_vgic(gic_dev_type);
test_vgic_then_vcpus(gic_dev_type);
if (VGIC_DEV_IS_V3(gic_dev_type)) {
test_v3_new_redist_regions();
test_v3_typer_accesses();
test_v3_last_bit_redist_regions();
test_v3_last_bit_single_rdist();
test_v3_redist_ipa_range_check_at_vcpu_run();
test_v3_its_region();
}
}
int main(int ac, char **av)
{
int ret;
int pa_bits;
int cnt_impl = 0;
pa_bits = vm_guest_mode_params[VM_MODE_DEFAULT].pa_bits;
max_phys_size = 1ULL << pa_bits;
ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V3);
if (!ret) {
pr_info("Running GIC_v3 tests.\n");
run_tests(KVM_DEV_TYPE_ARM_VGIC_V3);
cnt_impl++;
}
ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V2);
if (!ret) {
pr_info("Running GIC_v2 tests.\n");
run_tests(KVM_DEV_TYPE_ARM_VGIC_V2);
cnt_impl++;
}
if (!cnt_impl) {
print_skip("No GICv2 nor GICv3 support");
exit(KSFT_SKIP);
}
return 0;
}
| linux-master | tools/testing/selftests/kvm/aarch64/vgic_init.c |
// SPDX-License-Identifier: GPL-2.0-only
/* hypercalls: Check the ARM64's psuedo-firmware bitmap register interface.
*
* The test validates the basic hypercall functionalities that are exposed
* via the psuedo-firmware bitmap register. This includes the registers'
* read/write behavior before and after the VM has started, and if the
* hypercalls are properly masked or unmasked to the guest when disabled or
* enabled from the KVM userspace, respectively.
*/
#include <errno.h>
#include <linux/arm-smccc.h>
#include <asm/kvm.h>
#include <kvm_util.h>
#include "processor.h"
#define FW_REG_ULIMIT_VAL(max_feat_bit) (GENMASK(max_feat_bit, 0))
/* Last valid bits of the bitmapped firmware registers */
#define KVM_REG_ARM_STD_BMAP_BIT_MAX 0
#define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX 0
#define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX 1
struct kvm_fw_reg_info {
uint64_t reg; /* Register definition */
uint64_t max_feat_bit; /* Bit that represents the upper limit of the feature-map */
};
#define FW_REG_INFO(r) \
{ \
.reg = r, \
.max_feat_bit = r##_BIT_MAX, \
}
static const struct kvm_fw_reg_info fw_reg_info[] = {
FW_REG_INFO(KVM_REG_ARM_STD_BMAP),
FW_REG_INFO(KVM_REG_ARM_STD_HYP_BMAP),
FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP),
};
enum test_stage {
TEST_STAGE_REG_IFACE,
TEST_STAGE_HVC_IFACE_FEAT_DISABLED,
TEST_STAGE_HVC_IFACE_FEAT_ENABLED,
TEST_STAGE_HVC_IFACE_FALSE_INFO,
TEST_STAGE_END,
};
static int stage = TEST_STAGE_REG_IFACE;
struct test_hvc_info {
uint32_t func_id;
uint64_t arg1;
};
#define TEST_HVC_INFO(f, a1) \
{ \
.func_id = f, \
.arg1 = a1, \
}
static const struct test_hvc_info hvc_info[] = {
/* KVM_REG_ARM_STD_BMAP */
TEST_HVC_INFO(ARM_SMCCC_TRNG_VERSION, 0),
TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_TRNG_RND64),
TEST_HVC_INFO(ARM_SMCCC_TRNG_GET_UUID, 0),
TEST_HVC_INFO(ARM_SMCCC_TRNG_RND32, 0),
TEST_HVC_INFO(ARM_SMCCC_TRNG_RND64, 0),
/* KVM_REG_ARM_STD_HYP_BMAP */
TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_HV_PV_TIME_FEATURES),
TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_HV_PV_TIME_ST),
TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_ST, 0),
/* KVM_REG_ARM_VENDOR_HYP_BMAP */
TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID,
ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0),
TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, KVM_PTP_VIRT_COUNTER),
};
/* Feed false hypercall info to test the KVM behavior */
static const struct test_hvc_info false_hvc_info[] = {
/* Feature support check against a different family of hypercalls */
TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_TRNG_RND64),
TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_TRNG_RND64),
};
static void guest_test_hvc(const struct test_hvc_info *hc_info)
{
unsigned int i;
struct arm_smccc_res res;
unsigned int hvc_info_arr_sz;
hvc_info_arr_sz =
hc_info == hvc_info ? ARRAY_SIZE(hvc_info) : ARRAY_SIZE(false_hvc_info);
for (i = 0; i < hvc_info_arr_sz; i++, hc_info++) {
memset(&res, 0, sizeof(res));
smccc_hvc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res);
switch (stage) {
case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
case TEST_STAGE_HVC_IFACE_FALSE_INFO:
__GUEST_ASSERT(res.a0 == SMCCC_RET_NOT_SUPPORTED,
"a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u",
res.a0, hc_info->func_id, hc_info->arg1, stage);
break;
case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
__GUEST_ASSERT(res.a0 != SMCCC_RET_NOT_SUPPORTED,
"a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u",
res.a0, hc_info->func_id, hc_info->arg1, stage);
break;
default:
GUEST_FAIL("Unexpected stage = %u", stage);
}
}
}
static void guest_code(void)
{
while (stage != TEST_STAGE_END) {
switch (stage) {
case TEST_STAGE_REG_IFACE:
break;
case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
guest_test_hvc(hvc_info);
break;
case TEST_STAGE_HVC_IFACE_FALSE_INFO:
guest_test_hvc(false_hvc_info);
break;
default:
GUEST_FAIL("Unexpected stage = %u", stage);
}
GUEST_SYNC(stage);
}
GUEST_DONE();
}
struct st_time {
uint32_t rev;
uint32_t attr;
uint64_t st_time;
};
#define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63)
#define ST_GPA_BASE (1 << 30)
static void steal_time_init(struct kvm_vcpu *vcpu)
{
uint64_t st_ipa = (ulong)ST_GPA_BASE;
unsigned int gpages;
gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE);
vm_userspace_mem_region_add(vcpu->vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PVTIME_CTRL,
KVM_ARM_VCPU_PVTIME_IPA, &st_ipa);
}
static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
{
uint64_t val;
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
/* First 'read' should be an upper limit of the features supported */
vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
/* Test a 'write' by disabling all the features of the register map */
ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
TEST_ASSERT(ret == 0,
"Failed to clear all the features of reg: 0x%lx; ret: %d\n",
reg_info->reg, errno);
vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
/*
* Test enabling a feature that's not supported.
* Avoid this check if all the bits are occupied.
*/
if (reg_info->max_feat_bit < 63) {
ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
errno, reg_info->reg);
}
}
}
static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
{
uint64_t val;
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
/*
* Before starting the VM, the test clears all the bits.
* Check if that's still the case.
*/
vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx\n",
reg_info->reg);
/*
* Since the VM has run at least once, KVM shouldn't allow modification of
* the registers and should return EBUSY. Set the registers and check for
* the expected errno.
*/
ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
TEST_ASSERT(ret != 0 && errno == EBUSY,
"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
errno, reg_info->reg);
}
}
static struct kvm_vm *test_vm_create(struct kvm_vcpu **vcpu)
{
struct kvm_vm *vm;
vm = vm_create_with_one_vcpu(vcpu, guest_code);
steal_time_init(*vcpu);
return vm;
}
static void test_guest_stage(struct kvm_vm **vm, struct kvm_vcpu **vcpu)
{
int prev_stage = stage;
pr_debug("Stage: %d\n", prev_stage);
/* Sync the stage early, the VM might be freed below. */
stage++;
sync_global_to_guest(*vm, stage);
switch (prev_stage) {
case TEST_STAGE_REG_IFACE:
test_fw_regs_after_vm_start(*vcpu);
break;
case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
/* Start a new VM so that all the features are now enabled by default */
kvm_vm_free(*vm);
*vm = test_vm_create(vcpu);
break;
case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
case TEST_STAGE_HVC_IFACE_FALSE_INFO:
break;
default:
TEST_FAIL("Unknown test stage: %d\n", prev_stage);
}
}
static void test_run(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
bool guest_done = false;
vm = test_vm_create(&vcpu);
test_fw_regs_before_vm_start(vcpu);
while (!guest_done) {
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
test_guest_stage(&vm, &vcpu);
break;
case UCALL_DONE:
guest_done = true;
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
default:
TEST_FAIL("Unexpected guest exit\n");
}
}
kvm_vm_free(vm);
}
int main(void)
{
test_run();
return 0;
}
| linux-master | tools/testing/selftests/kvm/aarch64/hypercalls.c |
// SPDX-License-Identifier: GPL-2.0
#include <test_util.h>
#include <kvm_util.h>
#include <processor.h>
#include <linux/bitfield.h>
#define MDSCR_KDE (1 << 13)
#define MDSCR_MDE (1 << 15)
#define MDSCR_SS (1 << 0)
#define DBGBCR_LEN8 (0xff << 5)
#define DBGBCR_EXEC (0x0 << 3)
#define DBGBCR_EL1 (0x1 << 1)
#define DBGBCR_E (0x1 << 0)
#define DBGBCR_LBN_SHIFT 16
#define DBGBCR_BT_SHIFT 20
#define DBGBCR_BT_ADDR_LINK_CTX (0x1 << DBGBCR_BT_SHIFT)
#define DBGBCR_BT_CTX_LINK (0x3 << DBGBCR_BT_SHIFT)
#define DBGWCR_LEN8 (0xff << 5)
#define DBGWCR_RD (0x1 << 3)
#define DBGWCR_WR (0x2 << 3)
#define DBGWCR_EL1 (0x1 << 1)
#define DBGWCR_E (0x1 << 0)
#define DBGWCR_LBN_SHIFT 16
#define DBGWCR_WT_SHIFT 20
#define DBGWCR_WT_LINK (0x1 << DBGWCR_WT_SHIFT)
#define SPSR_D (1 << 9)
#define SPSR_SS (1 << 21)
extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start, hw_bp_ctx;
extern unsigned char iter_ss_begin, iter_ss_end;
static volatile uint64_t sw_bp_addr, hw_bp_addr;
static volatile uint64_t wp_addr, wp_data_addr;
static volatile uint64_t svc_addr;
static volatile uint64_t ss_addr[4], ss_idx;
#define PC(v) ((uint64_t)&(v))
#define GEN_DEBUG_WRITE_REG(reg_name) \
static void write_##reg_name(int num, uint64_t val) \
{ \
switch (num) { \
case 0: \
write_sysreg(val, reg_name##0_el1); \
break; \
case 1: \
write_sysreg(val, reg_name##1_el1); \
break; \
case 2: \
write_sysreg(val, reg_name##2_el1); \
break; \
case 3: \
write_sysreg(val, reg_name##3_el1); \
break; \
case 4: \
write_sysreg(val, reg_name##4_el1); \
break; \
case 5: \
write_sysreg(val, reg_name##5_el1); \
break; \
case 6: \
write_sysreg(val, reg_name##6_el1); \
break; \
case 7: \
write_sysreg(val, reg_name##7_el1); \
break; \
case 8: \
write_sysreg(val, reg_name##8_el1); \
break; \
case 9: \
write_sysreg(val, reg_name##9_el1); \
break; \
case 10: \
write_sysreg(val, reg_name##10_el1); \
break; \
case 11: \
write_sysreg(val, reg_name##11_el1); \
break; \
case 12: \
write_sysreg(val, reg_name##12_el1); \
break; \
case 13: \
write_sysreg(val, reg_name##13_el1); \
break; \
case 14: \
write_sysreg(val, reg_name##14_el1); \
break; \
case 15: \
write_sysreg(val, reg_name##15_el1); \
break; \
default: \
GUEST_ASSERT(0); \
} \
}
/* Define write_dbgbcr()/write_dbgbvr()/write_dbgwcr()/write_dbgwvr() */
GEN_DEBUG_WRITE_REG(dbgbcr)
GEN_DEBUG_WRITE_REG(dbgbvr)
GEN_DEBUG_WRITE_REG(dbgwcr)
GEN_DEBUG_WRITE_REG(dbgwvr)
static void reset_debug_state(void)
{
uint8_t brps, wrps, i;
uint64_t dfr0;
asm volatile("msr daifset, #8");
write_sysreg(0, osdlr_el1);
write_sysreg(0, oslar_el1);
isb();
write_sysreg(0, mdscr_el1);
write_sysreg(0, contextidr_el1);
/* Reset all bcr/bvr/wcr/wvr registers */
dfr0 = read_sysreg(id_aa64dfr0_el1);
brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), dfr0);
for (i = 0; i <= brps; i++) {
write_dbgbcr(i, 0);
write_dbgbvr(i, 0);
}
wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), dfr0);
for (i = 0; i <= wrps; i++) {
write_dbgwcr(i, 0);
write_dbgwvr(i, 0);
}
isb();
}
static void enable_os_lock(void)
{
write_sysreg(1, oslar_el1);
isb();
GUEST_ASSERT(read_sysreg(oslsr_el1) & 2);
}
static void enable_monitor_debug_exceptions(void)
{
uint32_t mdscr;
asm volatile("msr daifclr, #8");
mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
write_sysreg(mdscr, mdscr_el1);
isb();
}
static void install_wp(uint8_t wpn, uint64_t addr)
{
uint32_t wcr;
wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E;
write_dbgwcr(wpn, wcr);
write_dbgwvr(wpn, addr);
isb();
enable_monitor_debug_exceptions();
}
static void install_hw_bp(uint8_t bpn, uint64_t addr)
{
uint32_t bcr;
bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E;
write_dbgbcr(bpn, bcr);
write_dbgbvr(bpn, addr);
isb();
enable_monitor_debug_exceptions();
}
static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr,
uint64_t ctx)
{
uint32_t wcr;
uint64_t ctx_bcr;
/* Setup a context-aware breakpoint for Linked Context ID Match */
ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
DBGBCR_BT_CTX_LINK;
write_dbgbcr(ctx_bp, ctx_bcr);
write_dbgbvr(ctx_bp, ctx);
/* Setup a linked watchpoint (linked to the context-aware breakpoint) */
wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E |
DBGWCR_WT_LINK | ((uint32_t)ctx_bp << DBGWCR_LBN_SHIFT);
write_dbgwcr(addr_wp, wcr);
write_dbgwvr(addr_wp, addr);
isb();
enable_monitor_debug_exceptions();
}
void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
uint64_t ctx)
{
uint32_t addr_bcr, ctx_bcr;
/* Setup a context-aware breakpoint for Linked Context ID Match */
ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
DBGBCR_BT_CTX_LINK;
write_dbgbcr(ctx_bp, ctx_bcr);
write_dbgbvr(ctx_bp, ctx);
/*
* Setup a normal breakpoint for Linked Address Match, and link it
* to the context-aware breakpoint.
*/
addr_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
DBGBCR_BT_ADDR_LINK_CTX |
((uint32_t)ctx_bp << DBGBCR_LBN_SHIFT);
write_dbgbcr(addr_bp, addr_bcr);
write_dbgbvr(addr_bp, addr);
isb();
enable_monitor_debug_exceptions();
}
static void install_ss(void)
{
uint32_t mdscr;
asm volatile("msr daifclr, #8");
mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_SS;
write_sysreg(mdscr, mdscr_el1);
isb();
}
static volatile char write_data;
static void guest_code(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
{
uint64_t ctx = 0xabcdef; /* a random context number */
/* Software-breakpoint */
reset_debug_state();
asm volatile("sw_bp: brk #0");
GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp));
/* Hardware-breakpoint */
reset_debug_state();
install_hw_bp(bpn, PC(hw_bp));
asm volatile("hw_bp: nop");
GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp));
/* Hardware-breakpoint + svc */
reset_debug_state();
install_hw_bp(bpn, PC(bp_svc));
asm volatile("bp_svc: svc #0");
GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_svc));
GUEST_ASSERT_EQ(svc_addr, PC(bp_svc) + 4);
/* Hardware-breakpoint + software-breakpoint */
reset_debug_state();
install_hw_bp(bpn, PC(bp_brk));
asm volatile("bp_brk: brk #0");
GUEST_ASSERT_EQ(sw_bp_addr, PC(bp_brk));
GUEST_ASSERT_EQ(hw_bp_addr, PC(bp_brk));
/* Watchpoint */
reset_debug_state();
install_wp(wpn, PC(write_data));
write_data = 'x';
GUEST_ASSERT_EQ(write_data, 'x');
GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
/* Single-step */
reset_debug_state();
install_ss();
ss_idx = 0;
asm volatile("ss_start:\n"
"mrs x0, esr_el1\n"
"add x0, x0, #1\n"
"msr daifset, #8\n"
: : : "x0");
GUEST_ASSERT_EQ(ss_addr[0], PC(ss_start));
GUEST_ASSERT_EQ(ss_addr[1], PC(ss_start) + 4);
GUEST_ASSERT_EQ(ss_addr[2], PC(ss_start) + 8);
/* OS Lock does not block software-breakpoint */
reset_debug_state();
enable_os_lock();
sw_bp_addr = 0;
asm volatile("sw_bp2: brk #0");
GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp2));
/* OS Lock blocking hardware-breakpoint */
reset_debug_state();
enable_os_lock();
install_hw_bp(bpn, PC(hw_bp2));
hw_bp_addr = 0;
asm volatile("hw_bp2: nop");
GUEST_ASSERT_EQ(hw_bp_addr, 0);
/* OS Lock blocking watchpoint */
reset_debug_state();
enable_os_lock();
write_data = '\0';
wp_data_addr = 0;
install_wp(wpn, PC(write_data));
write_data = 'x';
GUEST_ASSERT_EQ(write_data, 'x');
GUEST_ASSERT_EQ(wp_data_addr, 0);
/* OS Lock blocking single-step */
reset_debug_state();
enable_os_lock();
ss_addr[0] = 0;
install_ss();
ss_idx = 0;
asm volatile("mrs x0, esr_el1\n\t"
"add x0, x0, #1\n\t"
"msr daifset, #8\n\t"
: : : "x0");
GUEST_ASSERT_EQ(ss_addr[0], 0);
/* Linked hardware-breakpoint */
hw_bp_addr = 0;
reset_debug_state();
install_hw_bp_ctx(bpn, ctx_bpn, PC(hw_bp_ctx), ctx);
/* Set context id */
write_sysreg(ctx, contextidr_el1);
isb();
asm volatile("hw_bp_ctx: nop");
write_sysreg(0, contextidr_el1);
GUEST_ASSERT_EQ(hw_bp_addr, PC(hw_bp_ctx));
/* Linked watchpoint */
reset_debug_state();
install_wp_ctx(wpn, ctx_bpn, PC(write_data), ctx);
/* Set context id */
write_sysreg(ctx, contextidr_el1);
isb();
write_data = 'x';
GUEST_ASSERT_EQ(write_data, 'x');
GUEST_ASSERT_EQ(wp_data_addr, PC(write_data));
GUEST_DONE();
}
static void guest_sw_bp_handler(struct ex_regs *regs)
{
sw_bp_addr = regs->pc;
regs->pc += 4;
}
static void guest_hw_bp_handler(struct ex_regs *regs)
{
hw_bp_addr = regs->pc;
regs->pstate |= SPSR_D;
}
static void guest_wp_handler(struct ex_regs *regs)
{
wp_data_addr = read_sysreg(far_el1);
wp_addr = regs->pc;
regs->pstate |= SPSR_D;
}
static void guest_ss_handler(struct ex_regs *regs)
{
__GUEST_ASSERT(ss_idx < 4, "Expected index < 4, got '%u'", ss_idx);
ss_addr[ss_idx++] = regs->pc;
regs->pstate |= SPSR_SS;
}
static void guest_svc_handler(struct ex_regs *regs)
{
svc_addr = regs->pc;
}
static void guest_code_ss(int test_cnt)
{
uint64_t i;
uint64_t bvr, wvr, w_bvr, w_wvr;
for (i = 0; i < test_cnt; i++) {
/* Bits [1:0] of dbg{b,w}vr are RES0 */
w_bvr = i << 2;
w_wvr = i << 2;
/*
* Enable Single Step execution. Note! This _must_ be a bare
* ucall as the ucall() path uses atomic operations to manage
* the ucall structures, and the built-in "atomics" are usually
* implemented via exclusive access instructions. The exlusive
* monitor is cleared on ERET, and so taking debug exceptions
* during a LDREX=>STREX sequence will prevent forward progress
* and hang the guest/test.
*/
GUEST_UCALL_NONE();
/*
* The userspace will verify that the pc is as expected during
* single step execution between iter_ss_begin and iter_ss_end.
*/
asm volatile("iter_ss_begin:nop\n");
write_sysreg(w_bvr, dbgbvr0_el1);
write_sysreg(w_wvr, dbgwvr0_el1);
bvr = read_sysreg(dbgbvr0_el1);
wvr = read_sysreg(dbgwvr0_el1);
/* Userspace disables Single Step when the end is nigh. */
asm volatile("iter_ss_end:\n");
GUEST_ASSERT_EQ(bvr, w_bvr);
GUEST_ASSERT_EQ(wvr, w_wvr);
}
GUEST_DONE();
}
static int debug_version(uint64_t id_aa64dfr0)
{
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), id_aa64dfr0);
}
static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_BRK_INS, guest_sw_bp_handler);
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_HW_BP_CURRENT, guest_hw_bp_handler);
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_WP_CURRENT, guest_wp_handler);
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_SSTEP_CURRENT, guest_ss_handler);
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_SVC64, guest_svc_handler);
/* Specify bpn/wpn/ctx_bpn to be tested */
vcpu_args_set(vcpu, 3, bpn, wpn, ctx_bpn);
pr_debug("Use bpn#%d, wpn#%d and ctx_bpn#%d\n", bpn, wpn, ctx_bpn);
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
done:
kvm_vm_free(vm);
}
void test_single_step_from_userspace(int test_cnt)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
struct kvm_run *run;
uint64_t pc, cmd;
uint64_t test_pc = 0;
bool ss_enable = false;
struct kvm_guest_debug debug = {};
vm = vm_create_with_one_vcpu(&vcpu, guest_code_ss);
run = vcpu->run;
vcpu_args_set(vcpu, 1, test_cnt);
while (1) {
vcpu_run(vcpu);
if (run->exit_reason != KVM_EXIT_DEBUG) {
cmd = get_ucall(vcpu, &uc);
if (cmd == UCALL_ABORT) {
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
} else if (cmd == UCALL_DONE) {
break;
}
TEST_ASSERT(cmd == UCALL_NONE,
"Unexpected ucall cmd 0x%lx", cmd);
debug.control = KVM_GUESTDBG_ENABLE |
KVM_GUESTDBG_SINGLESTEP;
ss_enable = true;
vcpu_guest_debug_set(vcpu, &debug);
continue;
}
TEST_ASSERT(ss_enable, "Unexpected KVM_EXIT_DEBUG");
/* Check if the current pc is expected. */
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
TEST_ASSERT(!test_pc || pc == test_pc,
"Unexpected pc 0x%lx (expected 0x%lx)",
pc, test_pc);
if ((pc + 4) == (uint64_t)&iter_ss_end) {
test_pc = 0;
debug.control = KVM_GUESTDBG_ENABLE;
ss_enable = false;
vcpu_guest_debug_set(vcpu, &debug);
continue;
}
/*
* If the current pc is between iter_ss_bgin and
* iter_ss_end, the pc for the next KVM_EXIT_DEBUG should
* be the current pc + 4.
*/
if ((pc >= (uint64_t)&iter_ss_begin) &&
(pc < (uint64_t)&iter_ss_end))
test_pc = pc + 4;
else
test_pc = 0;
}
kvm_vm_free(vm);
}
/*
* Run debug testing using the various breakpoint#, watchpoint# and
* context-aware breakpoint# with the given ID_AA64DFR0_EL1 configuration.
*/
void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
{
uint8_t brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base;
int b, w, c;
/* Number of breakpoints */
brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), aa64dfr0) + 1;
__TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
/* Number of watchpoints */
wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), aa64dfr0) + 1;
/* Number of context aware breakpoints */
ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_CTX_CMPS), aa64dfr0) + 1;
pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
brp_num, wrp_num, ctx_brp_num);
/* Number of normal (non-context aware) breakpoints */
normal_brp_num = brp_num - ctx_brp_num;
/* Lowest context aware breakpoint number */
ctx_brp_base = normal_brp_num;
/* Run tests with all supported breakpoints/watchpoints */
for (c = ctx_brp_base; c < ctx_brp_base + ctx_brp_num; c++) {
for (b = 0; b < normal_brp_num; b++) {
for (w = 0; w < wrp_num; w++)
test_guest_debug_exceptions(b, w, c);
}
}
}
static void help(char *name)
{
puts("");
printf("Usage: %s [-h] [-i iterations of the single step test]\n", name);
puts("");
exit(0);
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int opt;
int ss_iteration = 10000;
uint64_t aa64dfr0;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &aa64dfr0);
__TEST_REQUIRE(debug_version(aa64dfr0) >= 6,
"Armv8 debug architecture not supported.");
kvm_vm_free(vm);
while ((opt = getopt(argc, argv, "i:")) != -1) {
switch (opt) {
case 'i':
ss_iteration = atoi_positive("Number of iterations", optarg);
break;
case 'h':
default:
help(argv[0]);
break;
}
}
test_guest_debug_exceptions_all(aa64dfr0);
test_single_step_from_userspace(ss_iteration);
return 0;
}
| linux-master | tools/testing/selftests/kvm/aarch64/debug-exceptions.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch_timer.c - Tests the aarch64 timer IRQ functionality
*
* The test validates both the virtual and physical timer IRQs using
* CVAL and TVAL registers. This consitutes the four stages in the test.
* The guest's main thread configures the timer interrupt for a stage
* and waits for it to fire, with a timeout equal to the timer period.
* It asserts that the timeout doesn't exceed the timer period.
*
* On the other hand, upon receipt of an interrupt, the guest's interrupt
* handler validates the interrupt by checking if the architectural state
* is in compliance with the specifications.
*
* The test provides command-line options to configure the timer's
* period (-p), number of vCPUs (-n), and iterations per stage (-i).
* To stress-test the timer stack even more, an option to migrate the
* vCPUs across pCPUs (-m), at a particular rate, is also provided.
*
* Copyright (c) 2021, Google LLC.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <pthread.h>
#include <linux/kvm.h>
#include <linux/sizes.h>
#include <linux/bitmap.h>
#include <sys/sysinfo.h>
#include "kvm_util.h"
#include "processor.h"
#include "delay.h"
#include "arch_timer.h"
#include "gic.h"
#include "vgic.h"
#define NR_VCPUS_DEF 4
#define NR_TEST_ITERS_DEF 5
#define TIMER_TEST_PERIOD_MS_DEF 10
#define TIMER_TEST_ERR_MARGIN_US 100
#define TIMER_TEST_MIGRATION_FREQ_MS 2
struct test_args {
int nr_vcpus;
int nr_iter;
int timer_period_ms;
int migration_freq_ms;
struct kvm_arm_counter_offset offset;
};
static struct test_args test_args = {
.nr_vcpus = NR_VCPUS_DEF,
.nr_iter = NR_TEST_ITERS_DEF,
.timer_period_ms = TIMER_TEST_PERIOD_MS_DEF,
.migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS,
.offset = { .reserved = 1 },
};
#define msecs_to_usecs(msec) ((msec) * 1000LL)
#define GICD_BASE_GPA 0x8000000ULL
#define GICR_BASE_GPA 0x80A0000ULL
enum guest_stage {
GUEST_STAGE_VTIMER_CVAL = 1,
GUEST_STAGE_VTIMER_TVAL,
GUEST_STAGE_PTIMER_CVAL,
GUEST_STAGE_PTIMER_TVAL,
GUEST_STAGE_MAX,
};
/* Shared variables between host and guest */
struct test_vcpu_shared_data {
int nr_iter;
enum guest_stage guest_stage;
uint64_t xcnt;
};
static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
static pthread_t pt_vcpu_run[KVM_MAX_VCPUS];
static struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS];
static int vtimer_irq, ptimer_irq;
static unsigned long *vcpu_done_map;
static pthread_mutex_t vcpu_done_map_lock;
static void
guest_configure_timer_action(struct test_vcpu_shared_data *shared_data)
{
switch (shared_data->guest_stage) {
case GUEST_STAGE_VTIMER_CVAL:
timer_set_next_cval_ms(VIRTUAL, test_args.timer_period_ms);
shared_data->xcnt = timer_get_cntct(VIRTUAL);
timer_set_ctl(VIRTUAL, CTL_ENABLE);
break;
case GUEST_STAGE_VTIMER_TVAL:
timer_set_next_tval_ms(VIRTUAL, test_args.timer_period_ms);
shared_data->xcnt = timer_get_cntct(VIRTUAL);
timer_set_ctl(VIRTUAL, CTL_ENABLE);
break;
case GUEST_STAGE_PTIMER_CVAL:
timer_set_next_cval_ms(PHYSICAL, test_args.timer_period_ms);
shared_data->xcnt = timer_get_cntct(PHYSICAL);
timer_set_ctl(PHYSICAL, CTL_ENABLE);
break;
case GUEST_STAGE_PTIMER_TVAL:
timer_set_next_tval_ms(PHYSICAL, test_args.timer_period_ms);
shared_data->xcnt = timer_get_cntct(PHYSICAL);
timer_set_ctl(PHYSICAL, CTL_ENABLE);
break;
default:
GUEST_ASSERT(0);
}
}
static void guest_validate_irq(unsigned int intid,
struct test_vcpu_shared_data *shared_data)
{
enum guest_stage stage = shared_data->guest_stage;
uint64_t xcnt = 0, xcnt_diff_us, cval = 0;
unsigned long xctl = 0;
unsigned int timer_irq = 0;
unsigned int accessor;
if (intid == IAR_SPURIOUS)
return;
switch (stage) {
case GUEST_STAGE_VTIMER_CVAL:
case GUEST_STAGE_VTIMER_TVAL:
accessor = VIRTUAL;
timer_irq = vtimer_irq;
break;
case GUEST_STAGE_PTIMER_CVAL:
case GUEST_STAGE_PTIMER_TVAL:
accessor = PHYSICAL;
timer_irq = ptimer_irq;
break;
default:
GUEST_ASSERT(0);
return;
}
xctl = timer_get_ctl(accessor);
if ((xctl & CTL_IMASK) || !(xctl & CTL_ENABLE))
return;
timer_set_ctl(accessor, CTL_IMASK);
xcnt = timer_get_cntct(accessor);
cval = timer_get_cval(accessor);
xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
/* Make sure we are dealing with the correct timer IRQ */
GUEST_ASSERT_EQ(intid, timer_irq);
/* Basic 'timer condition met' check */
__GUEST_ASSERT(xcnt >= cval,
"xcnt = 0x%llx, cval = 0x%llx, xcnt_diff_us = 0x%llx",
xcnt, cval, xcnt_diff_us);
__GUEST_ASSERT(xctl & CTL_ISTATUS, "xcnt = 0x%llx", xcnt);
WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
}
static void guest_irq_handler(struct ex_regs *regs)
{
unsigned int intid = gic_get_and_ack_irq();
uint32_t cpu = guest_get_vcpuid();
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
guest_validate_irq(intid, shared_data);
gic_set_eoi(intid);
}
static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
enum guest_stage stage)
{
uint32_t irq_iter, config_iter;
shared_data->guest_stage = stage;
shared_data->nr_iter = 0;
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
/* Setup the next interrupt */
guest_configure_timer_action(shared_data);
/* Setup a timeout for the interrupt to arrive */
udelay(msecs_to_usecs(test_args.timer_period_ms) +
TIMER_TEST_ERR_MARGIN_US);
irq_iter = READ_ONCE(shared_data->nr_iter);
GUEST_ASSERT_EQ(config_iter + 1, irq_iter);
}
}
static void guest_code(void)
{
uint32_t cpu = guest_get_vcpuid();
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
local_irq_disable();
gic_init(GIC_V3, test_args.nr_vcpus,
(void *)GICD_BASE_GPA, (void *)GICR_BASE_GPA);
timer_set_ctl(VIRTUAL, CTL_IMASK);
timer_set_ctl(PHYSICAL, CTL_IMASK);
gic_irq_enable(vtimer_irq);
gic_irq_enable(ptimer_irq);
local_irq_enable();
guest_run_stage(shared_data, GUEST_STAGE_VTIMER_CVAL);
guest_run_stage(shared_data, GUEST_STAGE_VTIMER_TVAL);
guest_run_stage(shared_data, GUEST_STAGE_PTIMER_CVAL);
guest_run_stage(shared_data, GUEST_STAGE_PTIMER_TVAL);
GUEST_DONE();
}
static void *test_vcpu_run(void *arg)
{
unsigned int vcpu_idx = (unsigned long)arg;
struct ucall uc;
struct kvm_vcpu *vcpu = vcpus[vcpu_idx];
struct kvm_vm *vm = vcpu->vm;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx];
vcpu_run(vcpu);
/* Currently, any exit from guest is an indication of completion */
pthread_mutex_lock(&vcpu_done_map_lock);
__set_bit(vcpu_idx, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
case UCALL_DONE:
break;
case UCALL_ABORT:
sync_global_from_guest(vm, *shared_data);
fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n",
vcpu_idx, shared_data->guest_stage, shared_data->nr_iter);
REPORT_GUEST_ASSERT(uc);
break;
default:
TEST_FAIL("Unexpected guest exit\n");
}
return NULL;
}
static uint32_t test_get_pcpu(void)
{
uint32_t pcpu;
unsigned int nproc_conf;
cpu_set_t online_cpuset;
nproc_conf = get_nprocs_conf();
sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset);
/* Randomly find an available pCPU to place a vCPU on */
do {
pcpu = rand() % nproc_conf;
} while (!CPU_ISSET(pcpu, &online_cpuset));
return pcpu;
}
static int test_migrate_vcpu(unsigned int vcpu_idx)
{
int ret;
cpu_set_t cpuset;
uint32_t new_pcpu = test_get_pcpu();
CPU_ZERO(&cpuset);
CPU_SET(new_pcpu, &cpuset);
pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu);
ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx],
sizeof(cpuset), &cpuset);
/* Allow the error where the vCPU thread is already finished */
TEST_ASSERT(ret == 0 || ret == ESRCH,
"Failed to migrate the vCPU:%u to pCPU: %u; ret: %d\n",
vcpu_idx, new_pcpu, ret);
return ret;
}
static void *test_vcpu_migration(void *arg)
{
unsigned int i, n_done;
bool vcpu_done;
do {
usleep(msecs_to_usecs(test_args.migration_freq_ms));
for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) {
pthread_mutex_lock(&vcpu_done_map_lock);
vcpu_done = test_bit(i, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock);
if (vcpu_done) {
n_done++;
continue;
}
test_migrate_vcpu(i);
}
} while (test_args.nr_vcpus != n_done);
return NULL;
}
static void test_run(struct kvm_vm *vm)
{
pthread_t pt_vcpu_migration;
unsigned int i;
int ret;
pthread_mutex_init(&vcpu_done_map_lock, NULL);
vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap\n");
for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
(void *)(unsigned long)i);
TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread\n", i);
}
/* Spawn a thread to control the vCPU migrations */
if (test_args.migration_freq_ms) {
srand(time(NULL));
ret = pthread_create(&pt_vcpu_migration, NULL,
test_vcpu_migration, NULL);
TEST_ASSERT(!ret, "Failed to create the migration pthread\n");
}
for (i = 0; i < test_args.nr_vcpus; i++)
pthread_join(pt_vcpu_run[i], NULL);
if (test_args.migration_freq_ms)
pthread_join(pt_vcpu_migration, NULL);
bitmap_free(vcpu_done_map);
}
static void test_init_timer_irq(struct kvm_vm *vm)
{
/* Timer initid should be same for all the vCPUs, so query only vCPU-0 */
vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
sync_global_to_guest(vm, ptimer_irq);
sync_global_to_guest(vm, vtimer_irq);
pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
}
static int gic_fd;
static struct kvm_vm *test_vm_create(void)
{
struct kvm_vm *vm;
unsigned int i;
int nr_vcpus = test_args.nr_vcpus;
vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
vm_init_descriptor_tables(vm);
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
if (!test_args.offset.reserved) {
if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET))
vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset);
else
TEST_FAIL("no support for global offset\n");
}
for (i = 0; i < nr_vcpus; i++)
vcpu_init_descriptor_tables(vcpus[i]);
test_init_timer_irq(vm);
gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
__TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
/* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args);
return vm;
}
static void test_vm_cleanup(struct kvm_vm *vm)
{
close(gic_fd);
kvm_vm_free(vm);
}
static void test_print_help(char *name)
{
pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
name);
pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n",
NR_VCPUS_DEF, KVM_MAX_VCPUS);
pr_info("\t-i: Number of iterations per stage (default: %u)\n",
NR_TEST_ITERS_DEF);
pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n",
TIMER_TEST_PERIOD_MS_DEF);
pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n",
TIMER_TEST_MIGRATION_FREQ_MS);
pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n");
pr_info("\t-h: print this help screen\n");
}
static bool parse_args(int argc, char *argv[])
{
int opt;
while ((opt = getopt(argc, argv, "hn:i:p:m:o:")) != -1) {
switch (opt) {
case 'n':
test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
if (test_args.nr_vcpus > KVM_MAX_VCPUS) {
pr_info("Max allowed vCPUs: %u\n",
KVM_MAX_VCPUS);
goto err;
}
break;
case 'i':
test_args.nr_iter = atoi_positive("Number of iterations", optarg);
break;
case 'p':
test_args.timer_period_ms = atoi_positive("Periodicity", optarg);
break;
case 'm':
test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg);
break;
case 'o':
test_args.offset.counter_offset = strtol(optarg, NULL, 0);
test_args.offset.reserved = 0;
break;
case 'h':
default:
goto err;
}
}
return true;
err:
test_print_help(argv[0]);
return false;
}
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
if (!parse_args(argc, argv))
exit(KSFT_SKIP);
__TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
"At least two physical CPUs needed for vCPU migration");
vm = test_vm_create();
test_run(vm);
test_vm_cleanup(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/aarch64/arch_timer.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* smccc_filter - Tests for the SMCCC filter UAPI.
*
* Copyright (c) 2023 Google LLC
*
* This test includes:
* - Tests that the UAPI constraints are upheld by KVM. For example, userspace
* is prevented from filtering the architecture range of SMCCC calls.
* - Test that the filter actions (DENIED, FWD_TO_USER) work as intended.
*/
#include <linux/arm-smccc.h>
#include <linux/psci.h>
#include <stdint.h>
#include "processor.h"
#include "test_util.h"
enum smccc_conduit {
HVC_INSN,
SMC_INSN,
};
#define for_each_conduit(conduit) \
for (conduit = HVC_INSN; conduit <= SMC_INSN; conduit++)
static void guest_main(uint32_t func_id, enum smccc_conduit conduit)
{
struct arm_smccc_res res;
if (conduit == SMC_INSN)
smccc_smc(func_id, 0, 0, 0, 0, 0, 0, 0, &res);
else
smccc_hvc(func_id, 0, 0, 0, 0, 0, 0, 0, &res);
GUEST_SYNC(res.a0);
}
static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions,
enum kvm_smccc_filter_action action)
{
struct kvm_smccc_filter filter = {
.base = start,
.nr_functions = nr_functions,
.action = action,
};
return __kvm_device_attr_set(vm->fd, KVM_ARM_VM_SMCCC_CTRL,
KVM_ARM_VM_SMCCC_FILTER, &filter);
}
static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions,
enum kvm_smccc_filter_action action)
{
int ret = __set_smccc_filter(vm, start, nr_functions, action);
TEST_ASSERT(!ret, "failed to configure SMCCC filter: %d", ret);
}
static struct kvm_vm *setup_vm(struct kvm_vcpu **vcpu)
{
struct kvm_vcpu_init init;
struct kvm_vm *vm;
vm = vm_create(1);
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
/*
* Enable in-kernel emulation of PSCI to ensure that calls are denied
* due to the SMCCC filter, not because of KVM.
*/
init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
*vcpu = aarch64_vcpu_add(vm, 0, &init, guest_main);
return vm;
}
static void test_pad_must_be_zero(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm = setup_vm(&vcpu);
struct kvm_smccc_filter filter = {
.base = PSCI_0_2_FN_PSCI_VERSION,
.nr_functions = 1,
.action = KVM_SMCCC_FILTER_DENY,
.pad = { -1 },
};
int r;
r = __kvm_device_attr_set(vm->fd, KVM_ARM_VM_SMCCC_CTRL,
KVM_ARM_VM_SMCCC_FILTER, &filter);
TEST_ASSERT(r < 0 && errno == EINVAL,
"Setting filter with nonzero padding should return EINVAL");
}
/* Ensure that userspace cannot filter the Arm Architecture SMCCC range */
static void test_filter_reserved_range(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm = setup_vm(&vcpu);
uint32_t smc64_fn;
int r;
r = __set_smccc_filter(vm, ARM_SMCCC_ARCH_WORKAROUND_1,
1, KVM_SMCCC_FILTER_DENY);
TEST_ASSERT(r < 0 && errno == EEXIST,
"Attempt to filter reserved range should return EEXIST");
smc64_fn = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64,
0, 0);
r = __set_smccc_filter(vm, smc64_fn, 1, KVM_SMCCC_FILTER_DENY);
TEST_ASSERT(r < 0 && errno == EEXIST,
"Attempt to filter reserved range should return EEXIST");
kvm_vm_free(vm);
}
static void test_invalid_nr_functions(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm = setup_vm(&vcpu);
int r;
r = __set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 0, KVM_SMCCC_FILTER_DENY);
TEST_ASSERT(r < 0 && errno == EINVAL,
"Attempt to filter 0 functions should return EINVAL");
kvm_vm_free(vm);
}
static void test_overflow_nr_functions(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm = setup_vm(&vcpu);
int r;
r = __set_smccc_filter(vm, ~0, ~0, KVM_SMCCC_FILTER_DENY);
TEST_ASSERT(r < 0 && errno == EINVAL,
"Attempt to overflow filter range should return EINVAL");
kvm_vm_free(vm);
}
static void test_reserved_action(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm = setup_vm(&vcpu);
int r;
r = __set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 1, -1);
TEST_ASSERT(r < 0 && errno == EINVAL,
"Attempt to use reserved filter action should return EINVAL");
kvm_vm_free(vm);
}
/* Test that overlapping configurations of the SMCCC filter are rejected */
static void test_filter_overlap(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm = setup_vm(&vcpu);
int r;
set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 1, KVM_SMCCC_FILTER_DENY);
r = __set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 1, KVM_SMCCC_FILTER_DENY);
TEST_ASSERT(r < 0 && errno == EEXIST,
"Attempt to filter already configured range should return EEXIST");
kvm_vm_free(vm);
}
static void expect_call_denied(struct kvm_vcpu *vcpu)
{
struct ucall uc;
if (get_ucall(vcpu, &uc) != UCALL_SYNC)
TEST_FAIL("Unexpected ucall: %lu\n", uc.cmd);
TEST_ASSERT(uc.args[1] == SMCCC_RET_NOT_SUPPORTED,
"Unexpected SMCCC return code: %lu", uc.args[1]);
}
/* Denied SMCCC calls have a return code of SMCCC_RET_NOT_SUPPORTED */
static void test_filter_denied(void)
{
enum smccc_conduit conduit;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
for_each_conduit(conduit) {
vm = setup_vm(&vcpu);
set_smccc_filter(vm, PSCI_0_2_FN_PSCI_VERSION, 1, KVM_SMCCC_FILTER_DENY);
vcpu_args_set(vcpu, 2, PSCI_0_2_FN_PSCI_VERSION, conduit);
vcpu_run(vcpu);
expect_call_denied(vcpu);
kvm_vm_free(vm);
}
}
static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, uint32_t func_id,
enum smccc_conduit conduit)
{
struct kvm_run *run = vcpu->run;
TEST_ASSERT(run->exit_reason == KVM_EXIT_HYPERCALL,
"Unexpected exit reason: %u", run->exit_reason);
TEST_ASSERT(run->hypercall.nr == func_id,
"Unexpected SMCCC function: %llu", run->hypercall.nr);
if (conduit == SMC_INSN)
TEST_ASSERT(run->hypercall.flags & KVM_HYPERCALL_EXIT_SMC,
"KVM_HYPERCALL_EXIT_SMC is not set");
else
TEST_ASSERT(!(run->hypercall.flags & KVM_HYPERCALL_EXIT_SMC),
"KVM_HYPERCALL_EXIT_SMC is set");
}
/* SMCCC calls forwarded to userspace cause KVM_EXIT_HYPERCALL exits */
static void test_filter_fwd_to_user(void)
{
enum smccc_conduit conduit;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
for_each_conduit(conduit) {
vm = setup_vm(&vcpu);
set_smccc_filter(vm, PSCI_0_2_FN_PSCI_VERSION, 1, KVM_SMCCC_FILTER_FWD_TO_USER);
vcpu_args_set(vcpu, 2, PSCI_0_2_FN_PSCI_VERSION, conduit);
vcpu_run(vcpu);
expect_call_fwd_to_user(vcpu, PSCI_0_2_FN_PSCI_VERSION, conduit);
kvm_vm_free(vm);
}
}
static bool kvm_supports_smccc_filter(void)
{
struct kvm_vm *vm = vm_create_barebones();
int r;
r = __kvm_has_device_attr(vm->fd, KVM_ARM_VM_SMCCC_CTRL, KVM_ARM_VM_SMCCC_FILTER);
kvm_vm_free(vm);
return !r;
}
int main(void)
{
TEST_REQUIRE(kvm_supports_smccc_filter());
test_pad_must_be_zero();
test_invalid_nr_functions();
test_overflow_nr_functions();
test_reserved_action();
test_filter_reserved_range();
test_filter_overlap();
test_filter_denied();
test_filter_fwd_to_user();
}
| linux-master | tools/testing/selftests/kvm/aarch64/smccc_filter.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* aarch32_id_regs - Test for ID register behavior on AArch64-only systems
*
* Copyright (c) 2022 Google LLC.
*
* Test that KVM handles the AArch64 views of the AArch32 ID registers as RAZ
* and WI from userspace.
*/
#include <stdint.h>
#include "kvm_util.h"
#include "processor.h"
#include "test_util.h"
#include <linux/bitfield.h>
#define BAD_ID_REG_VAL 0x1badc0deul
#define GUEST_ASSERT_REG_RAZ(reg) GUEST_ASSERT_EQ(read_sysreg_s(reg), 0)
static void guest_main(void)
{
GUEST_ASSERT_REG_RAZ(SYS_ID_PFR0_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_PFR1_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_DFR0_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_AFR0_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR0_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR1_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR2_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR3_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR0_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR1_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR2_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR3_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR4_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR5_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR4_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR6_EL1);
GUEST_ASSERT_REG_RAZ(SYS_MVFR0_EL1);
GUEST_ASSERT_REG_RAZ(SYS_MVFR1_EL1);
GUEST_ASSERT_REG_RAZ(SYS_MVFR2_EL1);
GUEST_ASSERT_REG_RAZ(sys_reg(3, 0, 0, 3, 3));
GUEST_ASSERT_REG_RAZ(SYS_ID_PFR2_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_DFR1_EL1);
GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR5_EL1);
GUEST_ASSERT_REG_RAZ(sys_reg(3, 0, 0, 3, 7));
GUEST_DONE();
}
static void test_guest_raz(struct kvm_vcpu *vcpu)
{
struct ucall uc;
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
break;
default:
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
}
}
static uint64_t raz_wi_reg_ids[] = {
KVM_ARM64_SYS_REG(SYS_ID_PFR0_EL1),
KVM_ARM64_SYS_REG(SYS_ID_PFR1_EL1),
KVM_ARM64_SYS_REG(SYS_ID_DFR0_EL1),
KVM_ARM64_SYS_REG(SYS_ID_MMFR0_EL1),
KVM_ARM64_SYS_REG(SYS_ID_MMFR1_EL1),
KVM_ARM64_SYS_REG(SYS_ID_MMFR2_EL1),
KVM_ARM64_SYS_REG(SYS_ID_MMFR3_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR0_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR1_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR2_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR3_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR4_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR5_EL1),
KVM_ARM64_SYS_REG(SYS_ID_MMFR4_EL1),
KVM_ARM64_SYS_REG(SYS_ID_ISAR6_EL1),
KVM_ARM64_SYS_REG(SYS_MVFR0_EL1),
KVM_ARM64_SYS_REG(SYS_MVFR1_EL1),
KVM_ARM64_SYS_REG(SYS_MVFR2_EL1),
KVM_ARM64_SYS_REG(SYS_ID_PFR2_EL1),
KVM_ARM64_SYS_REG(SYS_ID_MMFR5_EL1),
};
static void test_user_raz_wi(struct kvm_vcpu *vcpu)
{
int i;
for (i = 0; i < ARRAY_SIZE(raz_wi_reg_ids); i++) {
uint64_t reg_id = raz_wi_reg_ids[i];
uint64_t val;
vcpu_get_reg(vcpu, reg_id, &val);
TEST_ASSERT_EQ(val, 0);
/*
* Expect the ioctl to succeed with no effect on the register
* value.
*/
vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
vcpu_get_reg(vcpu, reg_id, &val);
TEST_ASSERT_EQ(val, 0);
}
}
static uint64_t raz_invariant_reg_ids[] = {
KVM_ARM64_SYS_REG(SYS_ID_AFR0_EL1),
KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 3)),
KVM_ARM64_SYS_REG(SYS_ID_DFR1_EL1),
KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 7)),
};
static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
{
int i, r;
for (i = 0; i < ARRAY_SIZE(raz_invariant_reg_ids); i++) {
uint64_t reg_id = raz_invariant_reg_ids[i];
uint64_t val;
vcpu_get_reg(vcpu, reg_id, &val);
TEST_ASSERT_EQ(val, 0);
r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
TEST_ASSERT(r < 0 && errno == EINVAL,
"unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
vcpu_get_reg(vcpu, reg_id, &val);
TEST_ASSERT_EQ(val, 0);
}
}
static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
{
uint64_t val, el0;
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), val);
return el0 == ID_AA64PFR0_ELx_64BIT_ONLY;
}
int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
TEST_REQUIRE(vcpu_aarch64_only(vcpu));
test_user_raz_wi(vcpu);
test_user_raz_invariant(vcpu);
test_guest_raz(vcpu);
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Check for KVM_GET_REG_LIST regressions.
*
* Copyright (C) 2020, Red Hat, Inc.
*
* While the blessed list should be created from the oldest possible
* kernel, we can't go older than v5.2, though, because that's the first
* release which includes df205b5c6328 ("KVM: arm64: Filter out invalid
* core register IDs in KVM_GET_REG_LIST"). Without that commit the core
* registers won't match expectations.
*/
#include <stdio.h>
#include "kvm_util.h"
#include "test_util.h"
#include "processor.h"
struct feature_id_reg {
__u64 reg;
__u64 id_reg;
__u64 feat_shift;
__u64 feat_min;
};
static struct feature_id_reg feat_id_regs[] = {
{
ARM64_SYS_REG(3, 0, 2, 0, 3), /* TCR2_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
0,
1
},
{
ARM64_SYS_REG(3, 0, 10, 2, 2), /* PIRE0_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
4,
1
},
{
ARM64_SYS_REG(3, 0, 10, 2, 3), /* PIR_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
4,
1
}
};
bool filter_reg(__u64 reg)
{
/*
* DEMUX register presence depends on the host's CLIDR_EL1.
* This means there's no set of them that we can bless.
*/
if ((reg & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return true;
return false;
}
static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg)
{
int i, ret;
__u64 data, feat_val;
for (i = 0; i < ARRAY_SIZE(feat_id_regs); i++) {
if (feat_id_regs[i].reg == reg) {
ret = __vcpu_get_reg(vcpu, feat_id_regs[i].id_reg, &data);
if (ret < 0)
return false;
feat_val = ((data >> feat_id_regs[i].feat_shift) & 0xf);
return feat_val >= feat_id_regs[i].feat_min;
}
}
return true;
}
bool check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg)
{
return check_supported_feat_reg(vcpu, reg);
}
bool check_reject_set(int err)
{
return err == EPERM;
}
void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
{
struct vcpu_reg_sublist *s;
int feature;
for_each_sublist(c, s) {
if (s->finalize) {
feature = s->feature;
vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
}
}
}
#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
#define CORE_REGS_XX_NR_WORDS 2
#define CORE_SPSR_XX_NR_WORDS 2
#define CORE_FPREGS_XX_NR_WORDS 4
static const char *core_id_to_str(const char *prefix, __u64 id)
{
__u64 core_off = id & ~REG_MASK, idx;
/*
* core_off is the offset into struct kvm_regs
*/
switch (core_off) {
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
KVM_REG_ARM_CORE_REG(regs.regs[30]):
idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", prefix, idx);
return strdup_printf("KVM_REG_ARM_CORE_REG(regs.regs[%lld])", idx);
case KVM_REG_ARM_CORE_REG(regs.sp):
return "KVM_REG_ARM_CORE_REG(regs.sp)";
case KVM_REG_ARM_CORE_REG(regs.pc):
return "KVM_REG_ARM_CORE_REG(regs.pc)";
case KVM_REG_ARM_CORE_REG(regs.pstate):
return "KVM_REG_ARM_CORE_REG(regs.pstate)";
case KVM_REG_ARM_CORE_REG(sp_el1):
return "KVM_REG_ARM_CORE_REG(sp_el1)";
case KVM_REG_ARM_CORE_REG(elr_el1):
return "KVM_REG_ARM_CORE_REG(elr_el1)";
case KVM_REG_ARM_CORE_REG(spsr[0]) ...
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", prefix, idx);
return strdup_printf("KVM_REG_ARM_CORE_REG(spsr[%lld])", idx);
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", prefix, idx);
return strdup_printf("KVM_REG_ARM_CORE_REG(fp_regs.vregs[%lld])", idx);
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
}
TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
return NULL;
}
static const char *sve_id_to_str(const char *prefix, __u64 id)
{
__u64 sve_off, n, i;
if (id == KVM_REG_ARM64_SVE_VLS)
return "KVM_REG_ARM64_SVE_VLS";
sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", prefix, id);
switch (sve_off) {
case KVM_REG_ARM64_SVE_ZREG_BASE ...
KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
"%s: Unexpected bits set in SVE ZREG id: 0x%llx", prefix, id);
return strdup_printf("KVM_REG_ARM64_SVE_ZREG(%lld, 0)", n);
case KVM_REG_ARM64_SVE_PREG_BASE ...
KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
"%s: Unexpected bits set in SVE PREG id: 0x%llx", prefix, id);
return strdup_printf("KVM_REG_ARM64_SVE_PREG(%lld, 0)", n);
case KVM_REG_ARM64_SVE_FFR_BASE:
TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
"%s: Unexpected bits set in SVE FFR id: 0x%llx", prefix, id);
return "KVM_REG_ARM64_SVE_FFR(0)";
}
return NULL;
}
void print_reg(const char *prefix, __u64 id)
{
unsigned op0, op1, crn, crm, op2;
const char *reg_size = NULL;
TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
"%s: KVM_REG_ARM64 missing in reg id: 0x%llx", prefix, id);
switch (id & KVM_REG_SIZE_MASK) {
case KVM_REG_SIZE_U8:
reg_size = "KVM_REG_SIZE_U8";
break;
case KVM_REG_SIZE_U16:
reg_size = "KVM_REG_SIZE_U16";
break;
case KVM_REG_SIZE_U32:
reg_size = "KVM_REG_SIZE_U32";
break;
case KVM_REG_SIZE_U64:
reg_size = "KVM_REG_SIZE_U64";
break;
case KVM_REG_SIZE_U128:
reg_size = "KVM_REG_SIZE_U128";
break;
case KVM_REG_SIZE_U256:
reg_size = "KVM_REG_SIZE_U256";
break;
case KVM_REG_SIZE_U512:
reg_size = "KVM_REG_SIZE_U512";
break;
case KVM_REG_SIZE_U1024:
reg_size = "KVM_REG_SIZE_U1024";
break;
case KVM_REG_SIZE_U2048:
reg_size = "KVM_REG_SIZE_U2048";
break;
default:
TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
}
switch (id & KVM_REG_ARM_COPROC_MASK) {
case KVM_REG_ARM_CORE:
printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(prefix, id));
break;
case KVM_REG_ARM_DEMUX:
TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
"%s: Unexpected bits set in DEMUX reg id: 0x%llx", prefix, id);
printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
break;
case KVM_REG_ARM64_SYSREG:
op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT;
op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT;
crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT;
crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
"%s: Unexpected bits set in SYSREG reg id: 0x%llx", prefix, id);
printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
break;
case KVM_REG_ARM_FW:
TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
"%s: Unexpected bits set in FW reg id: 0x%llx", prefix, id);
printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
break;
case KVM_REG_ARM_FW_FEAT_BMAP:
TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff),
"%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", prefix, id);
printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff);
break;
case KVM_REG_ARM64_SVE:
printf("\t%s,\n", sve_id_to_str(prefix, id));
break;
default:
TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
prefix, (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
}
}
/*
* The original blessed list was primed with the output of kernel version
* v4.15 with --core-reg-fixup and then later updated with new registers.
* (The --core-reg-fixup option and it's fixup function have been removed
* from the test, as it's unlikely to use this type of test on a kernel
* older than v5.2.)
*
* The blessed list is up to date with kernel version v6.4 (or so we hope)
*/
static __u64 base_regs[] = {
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]),
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
KVM_REG_ARM_FW_REG(0), /* KVM_REG_ARM_PSCI_VERSION */
KVM_REG_ARM_FW_REG(1), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
KVM_REG_ARM_FW_REG(2), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
KVM_REG_ARM_FW_REG(3), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
KVM_REG_ARM_FW_FEAT_BMAP_REG(0), /* KVM_REG_ARM_STD_BMAP */
KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */
KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */
ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
ARM64_SYS_REG(3, 3, 14, 0, 2),
ARM64_SYS_REG(3, 0, 0, 0, 0), /* MIDR_EL1 */
ARM64_SYS_REG(3, 0, 0, 0, 6), /* REVIDR_EL1 */
ARM64_SYS_REG(3, 1, 0, 0, 1), /* CLIDR_EL1 */
ARM64_SYS_REG(3, 1, 0, 0, 7), /* AIDR_EL1 */
ARM64_SYS_REG(3, 3, 0, 0, 1), /* CTR_EL0 */
ARM64_SYS_REG(2, 0, 0, 0, 4),
ARM64_SYS_REG(2, 0, 0, 0, 5),
ARM64_SYS_REG(2, 0, 0, 0, 6),
ARM64_SYS_REG(2, 0, 0, 0, 7),
ARM64_SYS_REG(2, 0, 0, 1, 4),
ARM64_SYS_REG(2, 0, 0, 1, 5),
ARM64_SYS_REG(2, 0, 0, 1, 6),
ARM64_SYS_REG(2, 0, 0, 1, 7),
ARM64_SYS_REG(2, 0, 0, 2, 0), /* MDCCINT_EL1 */
ARM64_SYS_REG(2, 0, 0, 2, 2), /* MDSCR_EL1 */
ARM64_SYS_REG(2, 0, 0, 2, 4),
ARM64_SYS_REG(2, 0, 0, 2, 5),
ARM64_SYS_REG(2, 0, 0, 2, 6),
ARM64_SYS_REG(2, 0, 0, 2, 7),
ARM64_SYS_REG(2, 0, 0, 3, 4),
ARM64_SYS_REG(2, 0, 0, 3, 5),
ARM64_SYS_REG(2, 0, 0, 3, 6),
ARM64_SYS_REG(2, 0, 0, 3, 7),
ARM64_SYS_REG(2, 0, 0, 4, 4),
ARM64_SYS_REG(2, 0, 0, 4, 5),
ARM64_SYS_REG(2, 0, 0, 4, 6),
ARM64_SYS_REG(2, 0, 0, 4, 7),
ARM64_SYS_REG(2, 0, 0, 5, 4),
ARM64_SYS_REG(2, 0, 0, 5, 5),
ARM64_SYS_REG(2, 0, 0, 5, 6),
ARM64_SYS_REG(2, 0, 0, 5, 7),
ARM64_SYS_REG(2, 0, 0, 6, 4),
ARM64_SYS_REG(2, 0, 0, 6, 5),
ARM64_SYS_REG(2, 0, 0, 6, 6),
ARM64_SYS_REG(2, 0, 0, 6, 7),
ARM64_SYS_REG(2, 0, 0, 7, 4),
ARM64_SYS_REG(2, 0, 0, 7, 5),
ARM64_SYS_REG(2, 0, 0, 7, 6),
ARM64_SYS_REG(2, 0, 0, 7, 7),
ARM64_SYS_REG(2, 0, 0, 8, 4),
ARM64_SYS_REG(2, 0, 0, 8, 5),
ARM64_SYS_REG(2, 0, 0, 8, 6),
ARM64_SYS_REG(2, 0, 0, 8, 7),
ARM64_SYS_REG(2, 0, 0, 9, 4),
ARM64_SYS_REG(2, 0, 0, 9, 5),
ARM64_SYS_REG(2, 0, 0, 9, 6),
ARM64_SYS_REG(2, 0, 0, 9, 7),
ARM64_SYS_REG(2, 0, 0, 10, 4),
ARM64_SYS_REG(2, 0, 0, 10, 5),
ARM64_SYS_REG(2, 0, 0, 10, 6),
ARM64_SYS_REG(2, 0, 0, 10, 7),
ARM64_SYS_REG(2, 0, 0, 11, 4),
ARM64_SYS_REG(2, 0, 0, 11, 5),
ARM64_SYS_REG(2, 0, 0, 11, 6),
ARM64_SYS_REG(2, 0, 0, 11, 7),
ARM64_SYS_REG(2, 0, 0, 12, 4),
ARM64_SYS_REG(2, 0, 0, 12, 5),
ARM64_SYS_REG(2, 0, 0, 12, 6),
ARM64_SYS_REG(2, 0, 0, 12, 7),
ARM64_SYS_REG(2, 0, 0, 13, 4),
ARM64_SYS_REG(2, 0, 0, 13, 5),
ARM64_SYS_REG(2, 0, 0, 13, 6),
ARM64_SYS_REG(2, 0, 0, 13, 7),
ARM64_SYS_REG(2, 0, 0, 14, 4),
ARM64_SYS_REG(2, 0, 0, 14, 5),
ARM64_SYS_REG(2, 0, 0, 14, 6),
ARM64_SYS_REG(2, 0, 0, 14, 7),
ARM64_SYS_REG(2, 0, 0, 15, 4),
ARM64_SYS_REG(2, 0, 0, 15, 5),
ARM64_SYS_REG(2, 0, 0, 15, 6),
ARM64_SYS_REG(2, 0, 0, 15, 7),
ARM64_SYS_REG(2, 0, 1, 1, 4), /* OSLSR_EL1 */
ARM64_SYS_REG(2, 4, 0, 7, 0), /* DBGVCR32_EL2 */
ARM64_SYS_REG(3, 0, 0, 0, 5), /* MPIDR_EL1 */
ARM64_SYS_REG(3, 0, 0, 1, 0), /* ID_PFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 1, 1), /* ID_PFR1_EL1 */
ARM64_SYS_REG(3, 0, 0, 1, 2), /* ID_DFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 1, 3), /* ID_AFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 1, 4), /* ID_MMFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 1, 5), /* ID_MMFR1_EL1 */
ARM64_SYS_REG(3, 0, 0, 1, 6), /* ID_MMFR2_EL1 */
ARM64_SYS_REG(3, 0, 0, 1, 7), /* ID_MMFR3_EL1 */
ARM64_SYS_REG(3, 0, 0, 2, 0), /* ID_ISAR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 2, 1), /* ID_ISAR1_EL1 */
ARM64_SYS_REG(3, 0, 0, 2, 2), /* ID_ISAR2_EL1 */
ARM64_SYS_REG(3, 0, 0, 2, 3), /* ID_ISAR3_EL1 */
ARM64_SYS_REG(3, 0, 0, 2, 4), /* ID_ISAR4_EL1 */
ARM64_SYS_REG(3, 0, 0, 2, 5), /* ID_ISAR5_EL1 */
ARM64_SYS_REG(3, 0, 0, 2, 6), /* ID_MMFR4_EL1 */
ARM64_SYS_REG(3, 0, 0, 2, 7), /* ID_ISAR6_EL1 */
ARM64_SYS_REG(3, 0, 0, 3, 0), /* MVFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 3, 1), /* MVFR1_EL1 */
ARM64_SYS_REG(3, 0, 0, 3, 2), /* MVFR2_EL1 */
ARM64_SYS_REG(3, 0, 0, 3, 3),
ARM64_SYS_REG(3, 0, 0, 3, 4), /* ID_PFR2_EL1 */
ARM64_SYS_REG(3, 0, 0, 3, 5), /* ID_DFR1_EL1 */
ARM64_SYS_REG(3, 0, 0, 3, 6), /* ID_MMFR5_EL1 */
ARM64_SYS_REG(3, 0, 0, 3, 7),
ARM64_SYS_REG(3, 0, 0, 4, 0), /* ID_AA64PFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 4, 1), /* ID_AA64PFR1_EL1 */
ARM64_SYS_REG(3, 0, 0, 4, 2), /* ID_AA64PFR2_EL1 */
ARM64_SYS_REG(3, 0, 0, 4, 3),
ARM64_SYS_REG(3, 0, 0, 4, 4), /* ID_AA64ZFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 4, 5), /* ID_AA64SMFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 4, 6),
ARM64_SYS_REG(3, 0, 0, 4, 7),
ARM64_SYS_REG(3, 0, 0, 5, 0), /* ID_AA64DFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 5, 1), /* ID_AA64DFR1_EL1 */
ARM64_SYS_REG(3, 0, 0, 5, 2),
ARM64_SYS_REG(3, 0, 0, 5, 3),
ARM64_SYS_REG(3, 0, 0, 5, 4), /* ID_AA64AFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 5, 5), /* ID_AA64AFR1_EL1 */
ARM64_SYS_REG(3, 0, 0, 5, 6),
ARM64_SYS_REG(3, 0, 0, 5, 7),
ARM64_SYS_REG(3, 0, 0, 6, 0), /* ID_AA64ISAR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 6, 1), /* ID_AA64ISAR1_EL1 */
ARM64_SYS_REG(3, 0, 0, 6, 2), /* ID_AA64ISAR2_EL1 */
ARM64_SYS_REG(3, 0, 0, 6, 3),
ARM64_SYS_REG(3, 0, 0, 6, 4),
ARM64_SYS_REG(3, 0, 0, 6, 5),
ARM64_SYS_REG(3, 0, 0, 6, 6),
ARM64_SYS_REG(3, 0, 0, 6, 7),
ARM64_SYS_REG(3, 0, 0, 7, 0), /* ID_AA64MMFR0_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 1), /* ID_AA64MMFR1_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 2), /* ID_AA64MMFR2_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 4), /* ID_AA64MMFR4_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 5),
ARM64_SYS_REG(3, 0, 0, 7, 6),
ARM64_SYS_REG(3, 0, 0, 7, 7),
ARM64_SYS_REG(3, 0, 1, 0, 0), /* SCTLR_EL1 */
ARM64_SYS_REG(3, 0, 1, 0, 1), /* ACTLR_EL1 */
ARM64_SYS_REG(3, 0, 1, 0, 2), /* CPACR_EL1 */
ARM64_SYS_REG(3, 0, 2, 0, 0), /* TTBR0_EL1 */
ARM64_SYS_REG(3, 0, 2, 0, 1), /* TTBR1_EL1 */
ARM64_SYS_REG(3, 0, 2, 0, 2), /* TCR_EL1 */
ARM64_SYS_REG(3, 0, 2, 0, 3), /* TCR2_EL1 */
ARM64_SYS_REG(3, 0, 5, 1, 0), /* AFSR0_EL1 */
ARM64_SYS_REG(3, 0, 5, 1, 1), /* AFSR1_EL1 */
ARM64_SYS_REG(3, 0, 5, 2, 0), /* ESR_EL1 */
ARM64_SYS_REG(3, 0, 6, 0, 0), /* FAR_EL1 */
ARM64_SYS_REG(3, 0, 7, 4, 0), /* PAR_EL1 */
ARM64_SYS_REG(3, 0, 10, 2, 0), /* MAIR_EL1 */
ARM64_SYS_REG(3, 0, 10, 2, 2), /* PIRE0_EL1 */
ARM64_SYS_REG(3, 0, 10, 2, 3), /* PIR_EL1 */
ARM64_SYS_REG(3, 0, 10, 3, 0), /* AMAIR_EL1 */
ARM64_SYS_REG(3, 0, 12, 0, 0), /* VBAR_EL1 */
ARM64_SYS_REG(3, 0, 12, 1, 1), /* DISR_EL1 */
ARM64_SYS_REG(3, 0, 13, 0, 1), /* CONTEXTIDR_EL1 */
ARM64_SYS_REG(3, 0, 13, 0, 4), /* TPIDR_EL1 */
ARM64_SYS_REG(3, 0, 14, 1, 0), /* CNTKCTL_EL1 */
ARM64_SYS_REG(3, 2, 0, 0, 0), /* CSSELR_EL1 */
ARM64_SYS_REG(3, 3, 13, 0, 2), /* TPIDR_EL0 */
ARM64_SYS_REG(3, 3, 13, 0, 3), /* TPIDRRO_EL0 */
ARM64_SYS_REG(3, 3, 14, 0, 1), /* CNTPCT_EL0 */
ARM64_SYS_REG(3, 3, 14, 2, 1), /* CNTP_CTL_EL0 */
ARM64_SYS_REG(3, 3, 14, 2, 2), /* CNTP_CVAL_EL0 */
ARM64_SYS_REG(3, 4, 3, 0, 0), /* DACR32_EL2 */
ARM64_SYS_REG(3, 4, 5, 0, 1), /* IFSR32_EL2 */
ARM64_SYS_REG(3, 4, 5, 3, 0), /* FPEXC32_EL2 */
};
static __u64 pmu_regs[] = {
ARM64_SYS_REG(3, 0, 9, 14, 1), /* PMINTENSET_EL1 */
ARM64_SYS_REG(3, 0, 9, 14, 2), /* PMINTENCLR_EL1 */
ARM64_SYS_REG(3, 3, 9, 12, 0), /* PMCR_EL0 */
ARM64_SYS_REG(3, 3, 9, 12, 1), /* PMCNTENSET_EL0 */
ARM64_SYS_REG(3, 3, 9, 12, 2), /* PMCNTENCLR_EL0 */
ARM64_SYS_REG(3, 3, 9, 12, 3), /* PMOVSCLR_EL0 */
ARM64_SYS_REG(3, 3, 9, 12, 4), /* PMSWINC_EL0 */
ARM64_SYS_REG(3, 3, 9, 12, 5), /* PMSELR_EL0 */
ARM64_SYS_REG(3, 3, 9, 13, 0), /* PMCCNTR_EL0 */
ARM64_SYS_REG(3, 3, 9, 14, 0), /* PMUSERENR_EL0 */
ARM64_SYS_REG(3, 3, 9, 14, 3), /* PMOVSSET_EL0 */
ARM64_SYS_REG(3, 3, 14, 8, 0),
ARM64_SYS_REG(3, 3, 14, 8, 1),
ARM64_SYS_REG(3, 3, 14, 8, 2),
ARM64_SYS_REG(3, 3, 14, 8, 3),
ARM64_SYS_REG(3, 3, 14, 8, 4),
ARM64_SYS_REG(3, 3, 14, 8, 5),
ARM64_SYS_REG(3, 3, 14, 8, 6),
ARM64_SYS_REG(3, 3, 14, 8, 7),
ARM64_SYS_REG(3, 3, 14, 9, 0),
ARM64_SYS_REG(3, 3, 14, 9, 1),
ARM64_SYS_REG(3, 3, 14, 9, 2),
ARM64_SYS_REG(3, 3, 14, 9, 3),
ARM64_SYS_REG(3, 3, 14, 9, 4),
ARM64_SYS_REG(3, 3, 14, 9, 5),
ARM64_SYS_REG(3, 3, 14, 9, 6),
ARM64_SYS_REG(3, 3, 14, 9, 7),
ARM64_SYS_REG(3, 3, 14, 10, 0),
ARM64_SYS_REG(3, 3, 14, 10, 1),
ARM64_SYS_REG(3, 3, 14, 10, 2),
ARM64_SYS_REG(3, 3, 14, 10, 3),
ARM64_SYS_REG(3, 3, 14, 10, 4),
ARM64_SYS_REG(3, 3, 14, 10, 5),
ARM64_SYS_REG(3, 3, 14, 10, 6),
ARM64_SYS_REG(3, 3, 14, 10, 7),
ARM64_SYS_REG(3, 3, 14, 11, 0),
ARM64_SYS_REG(3, 3, 14, 11, 1),
ARM64_SYS_REG(3, 3, 14, 11, 2),
ARM64_SYS_REG(3, 3, 14, 11, 3),
ARM64_SYS_REG(3, 3, 14, 11, 4),
ARM64_SYS_REG(3, 3, 14, 11, 5),
ARM64_SYS_REG(3, 3, 14, 11, 6),
ARM64_SYS_REG(3, 3, 14, 12, 0),
ARM64_SYS_REG(3, 3, 14, 12, 1),
ARM64_SYS_REG(3, 3, 14, 12, 2),
ARM64_SYS_REG(3, 3, 14, 12, 3),
ARM64_SYS_REG(3, 3, 14, 12, 4),
ARM64_SYS_REG(3, 3, 14, 12, 5),
ARM64_SYS_REG(3, 3, 14, 12, 6),
ARM64_SYS_REG(3, 3, 14, 12, 7),
ARM64_SYS_REG(3, 3, 14, 13, 0),
ARM64_SYS_REG(3, 3, 14, 13, 1),
ARM64_SYS_REG(3, 3, 14, 13, 2),
ARM64_SYS_REG(3, 3, 14, 13, 3),
ARM64_SYS_REG(3, 3, 14, 13, 4),
ARM64_SYS_REG(3, 3, 14, 13, 5),
ARM64_SYS_REG(3, 3, 14, 13, 6),
ARM64_SYS_REG(3, 3, 14, 13, 7),
ARM64_SYS_REG(3, 3, 14, 14, 0),
ARM64_SYS_REG(3, 3, 14, 14, 1),
ARM64_SYS_REG(3, 3, 14, 14, 2),
ARM64_SYS_REG(3, 3, 14, 14, 3),
ARM64_SYS_REG(3, 3, 14, 14, 4),
ARM64_SYS_REG(3, 3, 14, 14, 5),
ARM64_SYS_REG(3, 3, 14, 14, 6),
ARM64_SYS_REG(3, 3, 14, 14, 7),
ARM64_SYS_REG(3, 3, 14, 15, 0),
ARM64_SYS_REG(3, 3, 14, 15, 1),
ARM64_SYS_REG(3, 3, 14, 15, 2),
ARM64_SYS_REG(3, 3, 14, 15, 3),
ARM64_SYS_REG(3, 3, 14, 15, 4),
ARM64_SYS_REG(3, 3, 14, 15, 5),
ARM64_SYS_REG(3, 3, 14, 15, 6),
ARM64_SYS_REG(3, 3, 14, 15, 7), /* PMCCFILTR_EL0 */
};
static __u64 vregs[] = {
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]),
KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]),
};
static __u64 sve_regs[] = {
KVM_REG_ARM64_SVE_VLS,
KVM_REG_ARM64_SVE_ZREG(0, 0),
KVM_REG_ARM64_SVE_ZREG(1, 0),
KVM_REG_ARM64_SVE_ZREG(2, 0),
KVM_REG_ARM64_SVE_ZREG(3, 0),
KVM_REG_ARM64_SVE_ZREG(4, 0),
KVM_REG_ARM64_SVE_ZREG(5, 0),
KVM_REG_ARM64_SVE_ZREG(6, 0),
KVM_REG_ARM64_SVE_ZREG(7, 0),
KVM_REG_ARM64_SVE_ZREG(8, 0),
KVM_REG_ARM64_SVE_ZREG(9, 0),
KVM_REG_ARM64_SVE_ZREG(10, 0),
KVM_REG_ARM64_SVE_ZREG(11, 0),
KVM_REG_ARM64_SVE_ZREG(12, 0),
KVM_REG_ARM64_SVE_ZREG(13, 0),
KVM_REG_ARM64_SVE_ZREG(14, 0),
KVM_REG_ARM64_SVE_ZREG(15, 0),
KVM_REG_ARM64_SVE_ZREG(16, 0),
KVM_REG_ARM64_SVE_ZREG(17, 0),
KVM_REG_ARM64_SVE_ZREG(18, 0),
KVM_REG_ARM64_SVE_ZREG(19, 0),
KVM_REG_ARM64_SVE_ZREG(20, 0),
KVM_REG_ARM64_SVE_ZREG(21, 0),
KVM_REG_ARM64_SVE_ZREG(22, 0),
KVM_REG_ARM64_SVE_ZREG(23, 0),
KVM_REG_ARM64_SVE_ZREG(24, 0),
KVM_REG_ARM64_SVE_ZREG(25, 0),
KVM_REG_ARM64_SVE_ZREG(26, 0),
KVM_REG_ARM64_SVE_ZREG(27, 0),
KVM_REG_ARM64_SVE_ZREG(28, 0),
KVM_REG_ARM64_SVE_ZREG(29, 0),
KVM_REG_ARM64_SVE_ZREG(30, 0),
KVM_REG_ARM64_SVE_ZREG(31, 0),
KVM_REG_ARM64_SVE_PREG(0, 0),
KVM_REG_ARM64_SVE_PREG(1, 0),
KVM_REG_ARM64_SVE_PREG(2, 0),
KVM_REG_ARM64_SVE_PREG(3, 0),
KVM_REG_ARM64_SVE_PREG(4, 0),
KVM_REG_ARM64_SVE_PREG(5, 0),
KVM_REG_ARM64_SVE_PREG(6, 0),
KVM_REG_ARM64_SVE_PREG(7, 0),
KVM_REG_ARM64_SVE_PREG(8, 0),
KVM_REG_ARM64_SVE_PREG(9, 0),
KVM_REG_ARM64_SVE_PREG(10, 0),
KVM_REG_ARM64_SVE_PREG(11, 0),
KVM_REG_ARM64_SVE_PREG(12, 0),
KVM_REG_ARM64_SVE_PREG(13, 0),
KVM_REG_ARM64_SVE_PREG(14, 0),
KVM_REG_ARM64_SVE_PREG(15, 0),
KVM_REG_ARM64_SVE_FFR(0),
ARM64_SYS_REG(3, 0, 1, 2, 0), /* ZCR_EL1 */
};
static __u64 sve_rejects_set[] = {
KVM_REG_ARM64_SVE_VLS,
};
static __u64 pauth_addr_regs[] = {
ARM64_SYS_REG(3, 0, 2, 1, 0), /* APIAKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 1, 1), /* APIAKEYHI_EL1 */
ARM64_SYS_REG(3, 0, 2, 1, 2), /* APIBKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 1, 3), /* APIBKEYHI_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 0), /* APDAKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 1), /* APDAKEYHI_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 2), /* APDBKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 3) /* APDBKEYHI_EL1 */
};
static __u64 pauth_generic_regs[] = {
ARM64_SYS_REG(3, 0, 2, 3, 0), /* APGAKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */
};
#define BASE_SUBLIST \
{ "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
#define VREGS_SUBLIST \
{ "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
#define PMU_SUBLIST \
{ "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
.regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
#define SVE_SUBLIST \
{ "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
.regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
.rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), }
#define PAUTH_SUBLIST \
{ \
.name = "pauth_address", \
.capability = KVM_CAP_ARM_PTRAUTH_ADDRESS, \
.feature = KVM_ARM_VCPU_PTRAUTH_ADDRESS, \
.regs = pauth_addr_regs, \
.regs_n = ARRAY_SIZE(pauth_addr_regs), \
}, \
{ \
.name = "pauth_generic", \
.capability = KVM_CAP_ARM_PTRAUTH_GENERIC, \
.feature = KVM_ARM_VCPU_PTRAUTH_GENERIC, \
.regs = pauth_generic_regs, \
.regs_n = ARRAY_SIZE(pauth_generic_regs), \
}
static struct vcpu_reg_list vregs_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
{0},
},
};
static struct vcpu_reg_list vregs_pmu_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
PMU_SUBLIST,
{0},
},
};
static struct vcpu_reg_list sve_config = {
.sublists = {
BASE_SUBLIST,
SVE_SUBLIST,
{0},
},
};
static struct vcpu_reg_list sve_pmu_config = {
.sublists = {
BASE_SUBLIST,
SVE_SUBLIST,
PMU_SUBLIST,
{0},
},
};
static struct vcpu_reg_list pauth_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
PAUTH_SUBLIST,
{0},
},
};
static struct vcpu_reg_list pauth_pmu_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
PAUTH_SUBLIST,
PMU_SUBLIST,
{0},
},
};
struct vcpu_reg_list *vcpu_configs[] = {
&vregs_config,
&vregs_pmu_config,
&sve_config,
&sve_pmu_config,
&pauth_config,
&pauth_pmu_config,
};
int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
| linux-master | tools/testing/selftests/kvm/aarch64/get-reg-list.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* psci_test - Tests relating to KVM's PSCI implementation.
*
* Copyright (c) 2021 Google LLC.
*
* This test includes:
* - A regression test for a race between KVM servicing the PSCI CPU_ON call
* and userspace reading the targeted vCPU's registers.
* - A test for KVM's handling of PSCI SYSTEM_SUSPEND and the associated
* KVM_SYSTEM_EVENT_SUSPEND UAPI.
*/
#define _GNU_SOURCE
#include <linux/psci.h>
#include "kvm_util.h"
#include "processor.h"
#include "test_util.h"
#define CPU_ON_ENTRY_ADDR 0xfeedf00dul
#define CPU_ON_CONTEXT_ID 0xdeadc0deul
static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
uint64_t context_id)
{
struct arm_smccc_res res;
smccc_hvc(PSCI_0_2_FN64_CPU_ON, target_cpu, entry_addr, context_id,
0, 0, 0, 0, &res);
return res.a0;
}
static uint64_t psci_affinity_info(uint64_t target_affinity,
uint64_t lowest_affinity_level)
{
struct arm_smccc_res res;
smccc_hvc(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, lowest_affinity_level,
0, 0, 0, 0, 0, &res);
return res.a0;
}
static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id)
{
struct arm_smccc_res res;
smccc_hvc(PSCI_1_0_FN64_SYSTEM_SUSPEND, entry_addr, context_id,
0, 0, 0, 0, 0, &res);
return res.a0;
}
static uint64_t psci_features(uint32_t func_id)
{
struct arm_smccc_res res;
smccc_hvc(PSCI_1_0_FN_PSCI_FEATURES, func_id, 0, 0, 0, 0, 0, 0, &res);
return res.a0;
}
static void vcpu_power_off(struct kvm_vcpu *vcpu)
{
struct kvm_mp_state mp_state = {
.mp_state = KVM_MP_STATE_STOPPED,
};
vcpu_mp_state_set(vcpu, &mp_state);
}
static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source,
struct kvm_vcpu **target)
{
struct kvm_vcpu_init init;
struct kvm_vm *vm;
vm = vm_create(2);
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
*source = aarch64_vcpu_add(vm, 0, &init, guest_code);
*target = aarch64_vcpu_add(vm, 1, &init, guest_code);
return vm;
}
static void enter_guest(struct kvm_vcpu *vcpu)
{
struct ucall uc;
vcpu_run(vcpu);
if (get_ucall(vcpu, &uc) == UCALL_ABORT)
REPORT_GUEST_ASSERT(uc);
}
static void assert_vcpu_reset(struct kvm_vcpu *vcpu)
{
uint64_t obs_pc, obs_x0;
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &obs_pc);
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
"unexpected target cpu pc: %lx (expected: %lx)",
obs_pc, CPU_ON_ENTRY_ADDR);
TEST_ASSERT(obs_x0 == CPU_ON_CONTEXT_ID,
"unexpected target context id: %lx (expected: %lx)",
obs_x0, CPU_ON_CONTEXT_ID);
}
static void guest_test_cpu_on(uint64_t target_cpu)
{
uint64_t target_state;
GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID));
do {
target_state = psci_affinity_info(target_cpu, 0);
GUEST_ASSERT((target_state == PSCI_0_2_AFFINITY_LEVEL_ON) ||
(target_state == PSCI_0_2_AFFINITY_LEVEL_OFF));
} while (target_state != PSCI_0_2_AFFINITY_LEVEL_ON);
GUEST_DONE();
}
static void host_test_cpu_on(void)
{
struct kvm_vcpu *source, *target;
uint64_t target_mpidr;
struct kvm_vm *vm;
struct ucall uc;
vm = setup_vm(guest_test_cpu_on, &source, &target);
/*
* make sure the target is already off when executing the test.
*/
vcpu_power_off(target);
vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
vcpu_args_set(source, 1, target_mpidr & MPIDR_HWID_BITMASK);
enter_guest(source);
if (get_ucall(source, &uc) != UCALL_DONE)
TEST_FAIL("Unhandled ucall: %lu", uc.cmd);
assert_vcpu_reset(target);
kvm_vm_free(vm);
}
static void guest_test_system_suspend(void)
{
uint64_t ret;
/* assert that SYSTEM_SUSPEND is discoverable */
GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND));
GUEST_ASSERT(!psci_features(PSCI_1_0_FN64_SYSTEM_SUSPEND));
ret = psci_system_suspend(CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID);
GUEST_SYNC(ret);
}
static void host_test_system_suspend(void)
{
struct kvm_vcpu *source, *target;
struct kvm_run *run;
struct kvm_vm *vm;
vm = setup_vm(guest_test_system_suspend, &source, &target);
vm_enable_cap(vm, KVM_CAP_ARM_SYSTEM_SUSPEND, 0);
vcpu_power_off(target);
run = source->run;
enter_guest(source);
TEST_ASSERT_KVM_EXIT_REASON(source, KVM_EXIT_SYSTEM_EVENT);
TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SUSPEND,
"Unhandled system event: %u (expected: %u)",
run->system_event.type, KVM_SYSTEM_EVENT_SUSPEND);
kvm_vm_free(vm);
}
int main(void)
{
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SYSTEM_SUSPEND));
host_test_cpu_on();
host_test_system_suspend();
return 0;
}
| linux-master | tools/testing/selftests/kvm/aarch64/psci_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vcpu_width_config - Test KVM_ARM_VCPU_INIT() with KVM_ARM_VCPU_EL1_32BIT.
*
* Copyright (c) 2022 Google LLC.
*
* This is a test that ensures that non-mixed-width vCPUs (all 64bit vCPUs
* or all 32bit vcPUs) can be configured and mixed-width vCPUs cannot be
* configured.
*/
#include "kvm_util.h"
#include "processor.h"
#include "test_util.h"
/*
* Add a vCPU, run KVM_ARM_VCPU_INIT with @init0, and then
* add another vCPU, and run KVM_ARM_VCPU_INIT with @init1.
*/
static int add_init_2vcpus(struct kvm_vcpu_init *init0,
struct kvm_vcpu_init *init1)
{
struct kvm_vcpu *vcpu0, *vcpu1;
struct kvm_vm *vm;
int ret;
vm = vm_create_barebones();
vcpu0 = __vm_vcpu_add(vm, 0);
ret = __vcpu_ioctl(vcpu0, KVM_ARM_VCPU_INIT, init0);
if (ret)
goto free_exit;
vcpu1 = __vm_vcpu_add(vm, 1);
ret = __vcpu_ioctl(vcpu1, KVM_ARM_VCPU_INIT, init1);
free_exit:
kvm_vm_free(vm);
return ret;
}
/*
* Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init0,
* and run KVM_ARM_VCPU_INIT for another vCPU with @init1.
*/
static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init0,
struct kvm_vcpu_init *init1)
{
struct kvm_vcpu *vcpu0, *vcpu1;
struct kvm_vm *vm;
int ret;
vm = vm_create_barebones();
vcpu0 = __vm_vcpu_add(vm, 0);
vcpu1 = __vm_vcpu_add(vm, 1);
ret = __vcpu_ioctl(vcpu0, KVM_ARM_VCPU_INIT, init0);
if (ret)
goto free_exit;
ret = __vcpu_ioctl(vcpu1, KVM_ARM_VCPU_INIT, init1);
free_exit:
kvm_vm_free(vm);
return ret;
}
/*
* Tests that two 64bit vCPUs can be configured, two 32bit vCPUs can be
* configured, and two mixed-width vCPUs cannot be configured.
* Each of those three cases, configure vCPUs in two different orders.
* The one is running KVM_CREATE_VCPU for 2 vCPUs, and then running
* KVM_ARM_VCPU_INIT for them.
* The other is running KVM_CREATE_VCPU and KVM_ARM_VCPU_INIT for a vCPU,
* and then run those commands for another vCPU.
*/
int main(void)
{
struct kvm_vcpu_init init0, init1;
struct kvm_vm *vm;
int ret;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_EL1_32BIT));
/* Get the preferred target type and copy that to init1 for later use */
vm = vm_create_barebones();
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init0);
kvm_vm_free(vm);
init1 = init0;
/* Test with 64bit vCPUs */
ret = add_init_2vcpus(&init0, &init0);
TEST_ASSERT(ret == 0,
"Configuring 64bit EL1 vCPUs failed unexpectedly");
ret = add_2vcpus_init_2vcpus(&init0, &init0);
TEST_ASSERT(ret == 0,
"Configuring 64bit EL1 vCPUs failed unexpectedly");
/* Test with 32bit vCPUs */
init0.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
ret = add_init_2vcpus(&init0, &init0);
TEST_ASSERT(ret == 0,
"Configuring 32bit EL1 vCPUs failed unexpectedly");
ret = add_2vcpus_init_2vcpus(&init0, &init0);
TEST_ASSERT(ret == 0,
"Configuring 32bit EL1 vCPUs failed unexpectedly");
/* Test with mixed-width vCPUs */
init0.features[0] = 0;
init1.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
ret = add_init_2vcpus(&init0, &init1);
TEST_ASSERT(ret != 0,
"Configuring mixed-width vCPUs worked unexpectedly");
ret = add_2vcpus_init_2vcpus(&init0, &init1);
TEST_ASSERT(ret != 0,
"Configuring mixed-width vCPUs worked unexpectedly");
return 0;
}
| linux-master | tools/testing/selftests/kvm/aarch64/vcpu_width_config.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test for x86 KVM_CAP_SYNC_REGS
*
* Copyright (C) 2018, Google LLC.
*
* Verifies expected behavior of x86 KVM_CAP_SYNC_REGS functionality,
* including requesting an invalid register set, updates to/from values
* in kvm_run.s.regs when kvm_valid_regs and kvm_dirty_regs are toggled.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <pthread.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#define UCALL_PIO_PORT ((uint16_t)0x1000)
struct ucall uc_none = {
.cmd = UCALL_NONE,
};
/*
* ucall is embedded here to protect against compiler reshuffling registers
* before calling a function. In this test we only need to get KVM_EXIT_IO
* vmexit and preserve RBX, no additional information is needed.
*/
void guest_code(void)
{
asm volatile("1: in %[port], %%al\n"
"add $0x1, %%rbx\n"
"jmp 1b"
: : [port] "d" (UCALL_PIO_PORT), "D" (&uc_none)
: "rax", "rbx");
}
static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
{
#define REG_COMPARE(reg) \
TEST_ASSERT(left->reg == right->reg, \
"Register " #reg \
" values did not match: 0x%llx, 0x%llx\n", \
left->reg, right->reg)
REG_COMPARE(rax);
REG_COMPARE(rbx);
REG_COMPARE(rcx);
REG_COMPARE(rdx);
REG_COMPARE(rsi);
REG_COMPARE(rdi);
REG_COMPARE(rsp);
REG_COMPARE(rbp);
REG_COMPARE(r8);
REG_COMPARE(r9);
REG_COMPARE(r10);
REG_COMPARE(r11);
REG_COMPARE(r12);
REG_COMPARE(r13);
REG_COMPARE(r14);
REG_COMPARE(r15);
REG_COMPARE(rip);
REG_COMPARE(rflags);
#undef REG_COMPARE
}
static void compare_sregs(struct kvm_sregs *left, struct kvm_sregs *right)
{
}
static void compare_vcpu_events(struct kvm_vcpu_events *left,
struct kvm_vcpu_events *right)
{
}
#define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
#define INVALID_SYNC_FIELD 0x80000000
/*
* Set an exception as pending *and* injected while KVM is processing events.
* KVM is supposed to ignore/drop pending exceptions if userspace is also
* requesting that an exception be injected.
*/
static void *race_events_inj_pen(void *arg)
{
struct kvm_run *run = (struct kvm_run *)arg;
struct kvm_vcpu_events *events = &run->s.regs.events;
WRITE_ONCE(events->exception.nr, UD_VECTOR);
for (;;) {
WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
WRITE_ONCE(events->flags, 0);
WRITE_ONCE(events->exception.injected, 1);
WRITE_ONCE(events->exception.pending, 1);
pthread_testcancel();
}
return NULL;
}
/*
* Set an invalid exception vector while KVM is processing events. KVM is
* supposed to reject any vector >= 32, as well as NMIs (vector 2).
*/
static void *race_events_exc(void *arg)
{
struct kvm_run *run = (struct kvm_run *)arg;
struct kvm_vcpu_events *events = &run->s.regs.events;
for (;;) {
WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
WRITE_ONCE(events->flags, 0);
WRITE_ONCE(events->exception.nr, UD_VECTOR);
WRITE_ONCE(events->exception.pending, 1);
WRITE_ONCE(events->exception.nr, 255);
pthread_testcancel();
}
return NULL;
}
/*
* Toggle CR4.PAE while KVM is processing SREGS, EFER.LME=1 with CR4.PAE=0 is
* illegal, and KVM's MMU heavily relies on vCPU state being valid.
*/
static noinline void *race_sregs_cr4(void *arg)
{
struct kvm_run *run = (struct kvm_run *)arg;
__u64 *cr4 = &run->s.regs.sregs.cr4;
__u64 pae_enabled = *cr4;
__u64 pae_disabled = *cr4 & ~X86_CR4_PAE;
for (;;) {
WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_SREGS);
WRITE_ONCE(*cr4, pae_enabled);
asm volatile(".rept 512\n\t"
"nop\n\t"
".endr");
WRITE_ONCE(*cr4, pae_disabled);
pthread_testcancel();
}
return NULL;
}
static void race_sync_regs(void *racer)
{
const time_t TIMEOUT = 2; /* seconds, roughly */
struct kvm_x86_state *state;
struct kvm_translation tr;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
pthread_t thread;
time_t t;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
run->kvm_valid_regs = KVM_SYNC_X86_SREGS;
vcpu_run(vcpu);
run->kvm_valid_regs = 0;
/* Save state *before* spawning the thread that mucks with vCPU state. */
state = vcpu_save_state(vcpu);
/*
* Selftests run 64-bit guests by default, both EFER.LME and CR4.PAE
* should already be set in guest state.
*/
TEST_ASSERT((run->s.regs.sregs.cr4 & X86_CR4_PAE) &&
(run->s.regs.sregs.efer & EFER_LME),
"vCPU should be in long mode, CR4.PAE=%d, EFER.LME=%d",
!!(run->s.regs.sregs.cr4 & X86_CR4_PAE),
!!(run->s.regs.sregs.efer & EFER_LME));
TEST_ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0);
for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
/*
* Reload known good state if the vCPU triple faults, e.g. due
* to the unhandled #GPs being injected. VMX preserves state
* on shutdown, but SVM synthesizes an INIT as the VMCB state
* is architecturally undefined on triple fault.
*/
if (!__vcpu_run(vcpu) && run->exit_reason == KVM_EXIT_SHUTDOWN)
vcpu_load_state(vcpu, state);
if (racer == race_sregs_cr4) {
tr = (struct kvm_translation) { .linear_address = 0 };
__vcpu_ioctl(vcpu, KVM_TRANSLATE, &tr);
}
}
TEST_ASSERT_EQ(pthread_cancel(thread), 0);
TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
kvm_x86_state_cleanup(state);
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_regs regs;
struct kvm_sregs sregs;
struct kvm_vcpu_events events;
int rv, cap;
cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS);
TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
/* Request reading invalid register set from VCPU. */
run->kvm_valid_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
run->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
run->kvm_valid_regs = 0;
/* Request setting invalid register set into VCPU. */
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
run->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
run->kvm_dirty_regs = 0;
/* Request and verify all valid register sets. */
/* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
run->kvm_valid_regs = TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
vcpu_regs_get(vcpu, ®s);
compare_regs(®s, &run->s.regs.regs);
vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs.sregs);
vcpu_events_get(vcpu, &events);
compare_vcpu_events(&events, &run->s.regs.events);
/* Set and verify various register values. */
run->s.regs.regs.rbx = 0xBAD1DEA;
run->s.regs.sregs.apic_base = 1 << 11;
/* TODO run->s.regs.events.XYZ = ABC; */
run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
rv = _vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1,
"rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx);
TEST_ASSERT(run->s.regs.sregs.apic_base == 1 << 11,
"apic_base sync regs value incorrect 0x%llx.",
run->s.regs.sregs.apic_base);
vcpu_regs_get(vcpu, ®s);
compare_regs(®s, &run->s.regs.regs);
vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs.sregs);
vcpu_events_get(vcpu, &events);
compare_vcpu_events(&events, &run->s.regs.events);
/* Clear kvm_dirty_regs bits, verify new s.regs values are
* overwritten with existing guest values.
*/
run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = 0;
run->s.regs.regs.rbx = 0xDEADBEEF;
rv = _vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF,
"rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx);
/* Clear kvm_valid_regs bits and kvm_dirty_bits.
* Verify s.regs values are not overwritten with existing guest values
* and that guest values are not overwritten with kvm_sync_regs values.
*/
run->kvm_valid_regs = 0;
run->kvm_dirty_regs = 0;
run->s.regs.regs.rbx = 0xAAAA;
regs.rbx = 0xBAC0;
vcpu_regs_set(vcpu, ®s);
rv = _vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
"rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx);
vcpu_regs_get(vcpu, ®s);
TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
"rbx guest value incorrect 0x%llx.",
regs.rbx);
/* Clear kvm_valid_regs bits. Verify s.regs values are not overwritten
* with existing guest values but that guest values are overwritten
* with kvm_sync_regs values.
*/
run->kvm_valid_regs = 0;
run->kvm_dirty_regs = TEST_SYNC_FIELDS;
run->s.regs.regs.rbx = 0xBBBB;
rv = _vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
"rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx);
vcpu_regs_get(vcpu, ®s);
TEST_ASSERT(regs.rbx == 0xBBBB + 1,
"rbx guest value incorrect 0x%llx.",
regs.rbx);
kvm_vm_free(vm);
race_sync_regs(race_sregs_cr4);
race_sync_regs(race_events_exc);
race_sync_regs(race_events_inj_pen);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/sync_regs_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018, Red Hat, Inc.
*
* Tests for SMM.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "vmx.h"
#include "svm_util.h"
#define SMRAM_SIZE 65536
#define SMRAM_MEMSLOT ((1 << 16) | 1)
#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
#define SMRAM_GPA 0x1000000
#define SMRAM_STAGE 0xfe
#define STR(x) #x
#define XSTR(s) STR(s)
#define SYNC_PORT 0xe
#define DONE 0xff
/*
* This is compiled as normal 64-bit code, however, SMI handler is executed
* in real-address mode. To stay simple we're limiting ourselves to a mode
* independent subset of asm here.
* SMI handler always report back fixed stage SMRAM_STAGE.
*/
uint8_t smi_handler[] = {
0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */
0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */
0x0f, 0xaa, /* rsm */
};
static inline void sync_with_host(uint64_t phase)
{
asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
: "+a" (phase));
}
static void self_smi(void)
{
x2apic_write_reg(APIC_ICR,
APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
}
static void l2_guest_code(void)
{
sync_with_host(8);
sync_with_host(10);
vmcall();
}
static void guest_code(void *arg)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
struct svm_test_data *svm = arg;
struct vmx_pages *vmx_pages = arg;
sync_with_host(1);
wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
sync_with_host(2);
self_smi();
sync_with_host(4);
if (arg) {
if (this_cpu_has(X86_FEATURE_SVM)) {
generic_svm_setup(svm, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
} else {
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_ASSERT(load_vmcs(vmx_pages));
prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
}
sync_with_host(5);
self_smi();
sync_with_host(7);
if (this_cpu_has(X86_FEATURE_SVM)) {
run_guest(svm->vmcb, svm->vmcb_gpa);
run_guest(svm->vmcb, svm->vmcb_gpa);
} else {
vmlaunch();
vmresume();
}
/* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
sync_with_host(12);
}
sync_with_host(DONE);
}
void inject_smi(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_events events;
vcpu_events_get(vcpu, &events);
events.smi.pending = 1;
events.flags |= KVM_VCPUEVENT_VALID_SMM;
vcpu_events_set(vcpu, &events);
}
int main(int argc, char *argv[])
{
vm_vaddr_t nested_gva = 0;
struct kvm_vcpu *vcpu;
struct kvm_regs regs;
struct kvm_vm *vm;
struct kvm_x86_state *state;
int stage, stage_reported;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM));
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
SMRAM_MEMSLOT, SMRAM_PAGES, 0);
TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
== SMRAM_GPA, "could not allocate guest physical addresses?");
memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
sizeof(smi_handler));
vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA);
if (kvm_has_cap(KVM_CAP_NESTED_STATE)) {
if (kvm_cpu_has(X86_FEATURE_SVM))
vcpu_alloc_svm(vm, &nested_gva);
else if (kvm_cpu_has(X86_FEATURE_VMX))
vcpu_alloc_vmx(vm, &nested_gva);
}
if (!nested_gva)
pr_info("will skip SMM test with VMX enabled\n");
vcpu_args_set(vcpu, 1, nested_gva);
for (stage = 1;; stage++) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
memset(®s, 0, sizeof(regs));
vcpu_regs_get(vcpu, ®s);
stage_reported = regs.rax & 0xff;
if (stage_reported == DONE)
goto done;
TEST_ASSERT(stage_reported == stage ||
stage_reported == SMRAM_STAGE,
"Unexpected stage: #%x, got %x",
stage, stage_reported);
/*
* Enter SMM during L2 execution and check that we correctly
* return from it. Do not perform save/restore while in SMM yet.
*/
if (stage == 8) {
inject_smi(vcpu);
continue;
}
/*
* Perform save/restore while the guest is in SMM triggered
* during L2 execution.
*/
if (stage == 10)
inject_smi(vcpu);
state = vcpu_save_state(vcpu);
kvm_vm_release(vm);
vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_load_state(vcpu, state);
kvm_x86_state_cleanup(state);
}
done:
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/smm_test.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kvm.h>
#include <linux/psp-sev.h>
#include <stdio.h>
#include <sys/ioctl.h>
#include <stdlib.h>
#include <errno.h>
#include <pthread.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "svm_util.h"
#include "kselftest.h"
#define SEV_POLICY_ES 0b100
#define NR_MIGRATE_TEST_VCPUS 4
#define NR_MIGRATE_TEST_VMS 3
#define NR_LOCK_TESTING_THREADS 3
#define NR_LOCK_TESTING_ITERATIONS 10000
bool have_sev_es;
static int __sev_ioctl(int vm_fd, int cmd_id, void *data, __u32 *fw_error)
{
struct kvm_sev_cmd cmd = {
.id = cmd_id,
.data = (uint64_t)data,
.sev_fd = open_sev_dev_path_or_exit(),
};
int ret;
ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
*fw_error = cmd.error;
return ret;
}
static void sev_ioctl(int vm_fd, int cmd_id, void *data)
{
int ret;
__u32 fw_error;
ret = __sev_ioctl(vm_fd, cmd_id, data, &fw_error);
TEST_ASSERT(ret == 0 && fw_error == SEV_RET_SUCCESS,
"%d failed: return code: %d, errno: %d, fw error: %d",
cmd_id, ret, errno, fw_error);
}
static struct kvm_vm *sev_vm_create(bool es)
{
struct kvm_vm *vm;
struct kvm_sev_launch_start start = { 0 };
int i;
vm = vm_create_barebones();
sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
__vm_vcpu_add(vm, i);
if (es)
start.policy |= SEV_POLICY_ES;
sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
if (es)
sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
return vm;
}
static struct kvm_vm *aux_vm_create(bool with_vcpus)
{
struct kvm_vm *vm;
int i;
vm = vm_create_barebones();
if (!with_vcpus)
return vm;
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
__vm_vcpu_add(vm, i);
return vm;
}
static int __sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src)
{
return __vm_enable_cap(dst, KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM, src->fd);
}
static void sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src)
{
int ret;
ret = __sev_migrate_from(dst, src);
TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d\n", ret, errno);
}
static void test_sev_migrate_from(bool es)
{
struct kvm_vm *src_vm;
struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
int i, ret;
src_vm = sev_vm_create(es);
for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
dst_vms[i] = aux_vm_create(true);
/* Initial migration from the src to the first dst. */
sev_migrate_from(dst_vms[0], src_vm);
for (i = 1; i < NR_MIGRATE_TEST_VMS; i++)
sev_migrate_from(dst_vms[i], dst_vms[i - 1]);
/* Migrate the guest back to the original VM. */
ret = __sev_migrate_from(src_vm, dst_vms[NR_MIGRATE_TEST_VMS - 1]);
TEST_ASSERT(ret == -1 && errno == EIO,
"VM that was migrated from should be dead. ret %d, errno: %d\n", ret,
errno);
kvm_vm_free(src_vm);
for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
kvm_vm_free(dst_vms[i]);
}
struct locking_thread_input {
struct kvm_vm *vm;
struct kvm_vm *source_vms[NR_LOCK_TESTING_THREADS];
};
static void *locking_test_thread(void *arg)
{
int i, j;
struct locking_thread_input *input = (struct locking_thread_input *)arg;
for (i = 0; i < NR_LOCK_TESTING_ITERATIONS; ++i) {
j = i % NR_LOCK_TESTING_THREADS;
__sev_migrate_from(input->vm, input->source_vms[j]);
}
return NULL;
}
static void test_sev_migrate_locking(void)
{
struct locking_thread_input input[NR_LOCK_TESTING_THREADS];
pthread_t pt[NR_LOCK_TESTING_THREADS];
int i;
for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) {
input[i].vm = sev_vm_create(/* es= */ false);
input[0].source_vms[i] = input[i].vm;
}
for (i = 1; i < NR_LOCK_TESTING_THREADS; ++i)
memcpy(input[i].source_vms, input[0].source_vms,
sizeof(input[i].source_vms));
for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
pthread_join(pt[i], NULL);
for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
kvm_vm_free(input[i].vm);
}
static void test_sev_migrate_parameters(void)
{
struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_no_sev,
*sev_es_vm_no_vmsa;
int ret;
vm_no_vcpu = vm_create_barebones();
vm_no_sev = aux_vm_create(true);
ret = __sev_migrate_from(vm_no_vcpu, vm_no_sev);
TEST_ASSERT(ret == -1 && errno == EINVAL,
"Migrations require SEV enabled. ret %d, errno: %d\n", ret,
errno);
if (!have_sev_es)
goto out;
sev_vm = sev_vm_create(/* es= */ false);
sev_es_vm = sev_vm_create(/* es= */ true);
sev_es_vm_no_vmsa = vm_create_barebones();
sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
__vm_vcpu_add(sev_es_vm_no_vmsa, 1);
ret = __sev_migrate_from(sev_vm, sev_es_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"Should not be able migrate to SEV enabled VM. ret: %d, errno: %d\n",
ret, errno);
ret = __sev_migrate_from(sev_es_vm, sev_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d\n",
ret, errno);
ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d\n",
ret, errno);
ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm_no_vmsa);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d\n",
ret, errno);
kvm_vm_free(sev_vm);
kvm_vm_free(sev_es_vm);
kvm_vm_free(sev_es_vm_no_vmsa);
out:
kvm_vm_free(vm_no_vcpu);
kvm_vm_free(vm_no_sev);
}
static int __sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
{
return __vm_enable_cap(dst, KVM_CAP_VM_COPY_ENC_CONTEXT_FROM, src->fd);
}
static void sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
{
int ret;
ret = __sev_mirror_create(dst, src);
TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno);
}
static void verify_mirror_allowed_cmds(int vm_fd)
{
struct kvm_sev_guest_status status;
for (int cmd_id = KVM_SEV_INIT; cmd_id < KVM_SEV_NR_MAX; ++cmd_id) {
int ret;
__u32 fw_error;
/*
* These commands are allowed for mirror VMs, all others are
* not.
*/
switch (cmd_id) {
case KVM_SEV_LAUNCH_UPDATE_VMSA:
case KVM_SEV_GUEST_STATUS:
case KVM_SEV_DBG_DECRYPT:
case KVM_SEV_DBG_ENCRYPT:
continue;
default:
break;
}
/*
* These commands should be disallowed before the data
* parameter is examined so NULL is OK here.
*/
ret = __sev_ioctl(vm_fd, cmd_id, NULL, &fw_error);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"Should not be able call command: %d. ret: %d, errno: %d\n",
cmd_id, ret, errno);
}
sev_ioctl(vm_fd, KVM_SEV_GUEST_STATUS, &status);
}
static void test_sev_mirror(bool es)
{
struct kvm_vm *src_vm, *dst_vm;
int i;
src_vm = sev_vm_create(es);
dst_vm = aux_vm_create(false);
sev_mirror_create(dst_vm, src_vm);
/* Check that we can complete creation of the mirror VM. */
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
__vm_vcpu_add(dst_vm, i);
if (es)
sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
verify_mirror_allowed_cmds(dst_vm->fd);
kvm_vm_free(src_vm);
kvm_vm_free(dst_vm);
}
static void test_sev_mirror_parameters(void)
{
struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu;
int ret;
sev_vm = sev_vm_create(/* es= */ false);
vm_with_vcpu = aux_vm_create(true);
vm_no_vcpu = aux_vm_create(false);
ret = __sev_mirror_create(sev_vm, sev_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"Should not be able copy context to self. ret: %d, errno: %d\n",
ret, errno);
ret = __sev_mirror_create(vm_no_vcpu, vm_with_vcpu);
TEST_ASSERT(ret == -1 && errno == EINVAL,
"Copy context requires SEV enabled. ret %d, errno: %d\n", ret,
errno);
ret = __sev_mirror_create(vm_with_vcpu, sev_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n",
ret, errno);
if (!have_sev_es)
goto out;
sev_es_vm = sev_vm_create(/* es= */ true);
ret = __sev_mirror_create(sev_vm, sev_es_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n",
ret, errno);
ret = __sev_mirror_create(sev_es_vm, sev_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n",
ret, errno);
kvm_vm_free(sev_es_vm);
out:
kvm_vm_free(sev_vm);
kvm_vm_free(vm_with_vcpu);
kvm_vm_free(vm_no_vcpu);
}
static void test_sev_move_copy(void)
{
struct kvm_vm *dst_vm, *dst2_vm, *dst3_vm, *sev_vm, *mirror_vm,
*dst_mirror_vm, *dst2_mirror_vm, *dst3_mirror_vm;
sev_vm = sev_vm_create(/* es= */ false);
dst_vm = aux_vm_create(true);
dst2_vm = aux_vm_create(true);
dst3_vm = aux_vm_create(true);
mirror_vm = aux_vm_create(false);
dst_mirror_vm = aux_vm_create(false);
dst2_mirror_vm = aux_vm_create(false);
dst3_mirror_vm = aux_vm_create(false);
sev_mirror_create(mirror_vm, sev_vm);
sev_migrate_from(dst_mirror_vm, mirror_vm);
sev_migrate_from(dst_vm, sev_vm);
sev_migrate_from(dst2_vm, dst_vm);
sev_migrate_from(dst2_mirror_vm, dst_mirror_vm);
sev_migrate_from(dst3_mirror_vm, dst2_mirror_vm);
sev_migrate_from(dst3_vm, dst2_vm);
kvm_vm_free(dst_vm);
kvm_vm_free(sev_vm);
kvm_vm_free(dst2_vm);
kvm_vm_free(dst3_vm);
kvm_vm_free(mirror_vm);
kvm_vm_free(dst_mirror_vm);
kvm_vm_free(dst2_mirror_vm);
kvm_vm_free(dst3_mirror_vm);
/*
* Run similar test be destroy mirrors before mirrored VMs to ensure
* destruction is done safely.
*/
sev_vm = sev_vm_create(/* es= */ false);
dst_vm = aux_vm_create(true);
mirror_vm = aux_vm_create(false);
dst_mirror_vm = aux_vm_create(false);
sev_mirror_create(mirror_vm, sev_vm);
sev_migrate_from(dst_mirror_vm, mirror_vm);
sev_migrate_from(dst_vm, sev_vm);
kvm_vm_free(mirror_vm);
kvm_vm_free(dst_mirror_vm);
kvm_vm_free(dst_vm);
kvm_vm_free(sev_vm);
}
int main(int argc, char *argv[])
{
TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM));
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV));
have_sev_es = kvm_cpu_has(X86_FEATURE_SEV_ES);
if (kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
test_sev_migrate_from(/* es= */ false);
if (have_sev_es)
test_sev_migrate_from(/* es= */ true);
test_sev_migrate_locking();
test_sev_migrate_parameters();
if (kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
test_sev_move_copy();
}
if (kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
test_sev_mirror(/* es= */ false);
if (have_sev_es)
test_sev_mirror(/* es= */ true);
test_sev_mirror_parameters();
}
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020, Google LLC.
*
* Tests for KVM paravirtual feature disablement
*/
#include <asm/kvm_para.h>
#include <linux/kvm_para.h>
#include <linux/stringify.h>
#include <stdint.h>
#include "apic.h"
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
/* VMCALL and VMMCALL are both 3-byte opcodes. */
#define HYPERCALL_INSN_SIZE 3
static bool quirk_disabled;
static void guest_ud_handler(struct ex_regs *regs)
{
regs->rax = -EFAULT;
regs->rip += HYPERCALL_INSN_SIZE;
}
static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 };
static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 };
extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE];
static uint64_t do_sched_yield(uint8_t apic_id)
{
uint64_t ret;
asm volatile("hypercall_insn:\n\t"
".byte 0xcc,0xcc,0xcc\n\t"
: "=a"(ret)
: "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id)
: "memory");
return ret;
}
static void guest_main(void)
{
const uint8_t *native_hypercall_insn;
const uint8_t *other_hypercall_insn;
uint64_t ret;
if (host_cpu_is_intel) {
native_hypercall_insn = vmx_vmcall;
other_hypercall_insn = svm_vmmcall;
} else if (host_cpu_is_amd) {
native_hypercall_insn = svm_vmmcall;
other_hypercall_insn = vmx_vmcall;
} else {
GUEST_ASSERT(0);
/* unreachable */
return;
}
memcpy(hypercall_insn, other_hypercall_insn, HYPERCALL_INSN_SIZE);
ret = do_sched_yield(GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)));
/*
* If the quirk is disabled, verify that guest_ud_handler() "returned"
* -EFAULT and that KVM did NOT patch the hypercall. If the quirk is
* enabled, verify that the hypercall succeeded and that KVM patched in
* the "right" hypercall.
*/
if (quirk_disabled) {
GUEST_ASSERT(ret == (uint64_t)-EFAULT);
GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn,
HYPERCALL_INSN_SIZE));
} else {
GUEST_ASSERT(!ret);
GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn,
HYPERCALL_INSN_SIZE));
}
GUEST_DONE();
}
static void enter_guest(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct ucall uc;
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]);
break;
case UCALL_DONE:
return;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
default:
TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)",
uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason));
}
}
static void test_fix_hypercall(bool disable_quirk)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
vm_init_descriptor_tables(vcpu->vm);
vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
if (disable_quirk)
vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
quirk_disabled = disable_quirk;
sync_global_to_guest(vm, quirk_disabled);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
enter_guest(vcpu);
}
int main(void)
{
TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
test_fix_hypercall(false);
test_fix_hypercall(true);
}
| linux-master | tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 Oracle and/or its affiliates.
*
* Based on:
* svm_int_ctl_test
*
* Copyright (C) 2021, Red Hat, Inc.
*
*/
#include <stdatomic.h>
#include <stdio.h>
#include <unistd.h>
#include "apic.h"
#include "kvm_util.h"
#include "processor.h"
#include "svm_util.h"
#include "test_util.h"
#define INT_NR 0x20
static_assert(ATOMIC_INT_LOCK_FREE == 2, "atomic int is not lockless");
static unsigned int bp_fired;
static void guest_bp_handler(struct ex_regs *regs)
{
bp_fired++;
}
static unsigned int int_fired;
static void l2_guest_code_int(void);
static void guest_int_handler(struct ex_regs *regs)
{
int_fired++;
GUEST_ASSERT_EQ(regs->rip, (unsigned long)l2_guest_code_int);
}
static void l2_guest_code_int(void)
{
GUEST_ASSERT_EQ(int_fired, 1);
/*
* Same as the vmmcall() function, but with a ud2 sneaked after the
* vmmcall. The caller injects an exception with the return address
* increased by 2, so the "pop rbp" must be after the ud2 and we cannot
* use vmmcall() directly.
*/
__asm__ __volatile__("push %%rbp; vmmcall; ud2; pop %%rbp"
: : "a"(0xdeadbeef), "c"(0xbeefdead)
: "rbx", "rdx", "rsi", "rdi", "r8", "r9",
"r10", "r11", "r12", "r13", "r14", "r15");
GUEST_ASSERT_EQ(bp_fired, 1);
hlt();
}
static atomic_int nmi_stage;
#define nmi_stage_get() atomic_load_explicit(&nmi_stage, memory_order_acquire)
#define nmi_stage_inc() atomic_fetch_add_explicit(&nmi_stage, 1, memory_order_acq_rel)
static void guest_nmi_handler(struct ex_regs *regs)
{
nmi_stage_inc();
if (nmi_stage_get() == 1) {
vmmcall();
GUEST_FAIL("Unexpected resume after VMMCALL");
} else {
GUEST_ASSERT_EQ(nmi_stage_get(), 3);
GUEST_DONE();
}
}
static void l2_guest_code_nmi(void)
{
ud2();
}
static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t idt_alt)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
struct vmcb *vmcb = svm->vmcb;
if (is_nmi)
x2apic_enable();
/* Prepare for L2 execution. */
generic_svm_setup(svm,
is_nmi ? l2_guest_code_nmi : l2_guest_code_int,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
vmcb->control.intercept_exceptions |= BIT(PF_VECTOR) | BIT(UD_VECTOR);
vmcb->control.intercept |= BIT(INTERCEPT_NMI) | BIT(INTERCEPT_HLT);
if (is_nmi) {
vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
} else {
vmcb->control.event_inj = INT_NR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_SOFT;
/* The return address pushed on stack */
vmcb->control.next_rip = vmcb->save.rip;
}
run_guest(vmcb, svm->vmcb_gpa);
__GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
"Expected VMMCAL #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'",
vmcb->control.exit_code,
vmcb->control.exit_info_1, vmcb->control.exit_info_2);
if (is_nmi) {
clgi();
x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_NMI);
GUEST_ASSERT_EQ(nmi_stage_get(), 1);
nmi_stage_inc();
stgi();
/* self-NMI happens here */
while (true)
cpu_relax();
}
/* Skip over VMMCALL */
vmcb->save.rip += 3;
/* Switch to alternate IDT to cause intervening NPF again */
vmcb->save.idtr.base = idt_alt;
vmcb->control.clean = 0; /* &= ~BIT(VMCB_DT) would be enough */
vmcb->control.event_inj = BP_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
/* The return address pushed on stack, skip over UD2 */
vmcb->control.next_rip = vmcb->save.rip + 2;
run_guest(vmcb, svm->vmcb_gpa);
__GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_HLT,
"Expected HLT #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'",
vmcb->control.exit_code,
vmcb->control.exit_info_1, vmcb->control.exit_info_2);
GUEST_DONE();
}
static void run_test(bool is_nmi)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
vm_vaddr_t svm_gva;
vm_vaddr_t idt_alt_vm;
struct kvm_guest_debug debug;
pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int");
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler);
vm_install_exception_handler(vm, INT_NR, guest_int_handler);
vcpu_alloc_svm(vm, &svm_gva);
if (!is_nmi) {
void *idt, *idt_alt;
idt_alt_vm = vm_vaddr_alloc_page(vm);
idt_alt = addr_gva2hva(vm, idt_alt_vm);
idt = addr_gva2hva(vm, vm->idt);
memcpy(idt_alt, idt, getpagesize());
} else {
idt_alt_vm = 0;
}
vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm);
memset(&debug, 0, sizeof(debug));
vcpu_guest_debug_set(vcpu, &debug);
struct ucall uc;
alarm(2);
vcpu_run(vcpu);
alarm(0);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
/* NOT REACHED */
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
}
done:
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
TEST_ASSERT(kvm_cpu_has(X86_FEATURE_NRIPS),
"KVM with nSVM is supposed to unconditionally advertise nRIP Save");
atomic_init(&nmi_stage, 0);
run_test(false);
run_test(true);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vmx_apic_access_test
*
* Copyright (C) 2020, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* The first subtest simply checks to see that an L2 guest can be
* launched with a valid APIC-access address that is backed by a
* page of L1 physical memory.
*
* The second subtest sets the APIC-access address to a (valid) L1
* physical address that is not backed by memory. KVM can't handle
* this situation, so resuming L2 should result in a KVM exit for
* internal error (emulation). This is not an architectural
* requirement. It is just a shortcoming of KVM. The internal error
* is unfortunate, but it's better than what used to happen!
*/
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
#include <string.h>
#include <sys/ioctl.h>
#include "kselftest.h"
static void l2_guest_code(void)
{
/* Exit to L1 */
__asm__ __volatile__("vmcall");
}
static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
uint32_t control;
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_ASSERT(load_vmcs(vmx_pages));
/* Prepare the VMCS for L2 execution. */
prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
control |= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
control = vmreadz(SECONDARY_VM_EXEC_CONTROL);
control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
vmwrite(SECONDARY_VM_EXEC_CONTROL, control);
vmwrite(APIC_ACCESS_ADDR, vmx_pages->apic_access_gpa);
/* Try to launch L2 with the memory-backed APIC-access address. */
GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
vmwrite(APIC_ACCESS_ADDR, high_gpa);
/* Try to resume L2 with the unbacked APIC-access address. */
GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_DONE();
}
int main(int argc, char *argv[])
{
unsigned long apic_access_addr = ~0ul;
vm_vaddr_t vmx_pages_gva;
unsigned long high_gpa;
struct vmx_pages *vmx;
bool done = false;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
high_gpa = (vm->max_gfn - 1) << vm->page_shift;
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
prepare_virtualize_apic_accesses(vmx, vm);
vcpu_args_set(vcpu, 2, vmx_pages_gva, high_gpa);
while (!done) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
vcpu_run(vcpu);
if (apic_access_addr == high_gpa) {
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
TEST_ASSERT(run->internal.suberror ==
KVM_INTERNAL_ERROR_EMULATION,
"Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u\n",
run->internal.suberror);
break;
}
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
apic_access_addr = uc.args[1];
break;
case UCALL_DONE:
done = true;
break;
default:
TEST_ASSERT(false, "Unknown ucall %lu", uc.cmd);
}
}
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021, Red Hat Inc.
*
* Generic tests for KVM CPUID set/get ioctls
*/
#include <asm/kvm_para.h>
#include <linux/kvm_para.h>
#include <stdint.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
/* CPUIDs known to differ */
struct {
u32 function;
u32 index;
} mangled_cpuids[] = {
/*
* These entries depend on the vCPU's XCR0 register and IA32_XSS MSR,
* which are not controlled for by this test.
*/
{.function = 0xd, .index = 0},
{.function = 0xd, .index = 1},
};
static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
{
int i;
u32 eax, ebx, ecx, edx;
for (i = 0; i < guest_cpuid->nent; i++) {
__cpuid(guest_cpuid->entries[i].function,
guest_cpuid->entries[i].index,
&eax, &ebx, &ecx, &edx);
GUEST_ASSERT_EQ(eax, guest_cpuid->entries[i].eax);
GUEST_ASSERT_EQ(ebx, guest_cpuid->entries[i].ebx);
GUEST_ASSERT_EQ(ecx, guest_cpuid->entries[i].ecx);
GUEST_ASSERT_EQ(edx, guest_cpuid->entries[i].edx);
}
}
static void guest_main(struct kvm_cpuid2 *guest_cpuid)
{
GUEST_SYNC(1);
test_guest_cpuids(guest_cpuid);
GUEST_SYNC(2);
GUEST_ASSERT_EQ(this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF), 0x40000001);
GUEST_DONE();
}
static bool is_cpuid_mangled(const struct kvm_cpuid_entry2 *entrie)
{
int i;
for (i = 0; i < sizeof(mangled_cpuids); i++) {
if (mangled_cpuids[i].function == entrie->function &&
mangled_cpuids[i].index == entrie->index)
return true;
}
return false;
}
static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
const struct kvm_cpuid2 *cpuid2)
{
const struct kvm_cpuid_entry2 *e1, *e2;
int i;
TEST_ASSERT(cpuid1->nent == cpuid2->nent,
"CPUID nent mismatch: %d vs. %d", cpuid1->nent, cpuid2->nent);
for (i = 0; i < cpuid1->nent; i++) {
e1 = &cpuid1->entries[i];
e2 = &cpuid2->entries[i];
TEST_ASSERT(e1->function == e2->function &&
e1->index == e2->index && e1->flags == e2->flags,
"CPUID entries[%d] mismtach: 0x%x.%d.%x vs. 0x%x.%d.%x\n",
i, e1->function, e1->index, e1->flags,
e2->function, e2->index, e2->flags);
if (is_cpuid_mangled(e1))
continue;
TEST_ASSERT(e1->eax == e2->eax && e1->ebx == e2->ebx &&
e1->ecx == e2->ecx && e1->edx == e2->edx,
"CPUID 0x%x.%x differ: 0x%x:0x%x:0x%x:0x%x vs 0x%x:0x%x:0x%x:0x%x",
e1->function, e1->index,
e1->eax, e1->ebx, e1->ecx, e1->edx,
e2->eax, e2->ebx, e2->ecx, e2->edx);
}
}
static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
{
struct ucall uc;
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1,
"Stage %d: Unexpected register values vmexit, got %lx",
stage + 1, (ulong)uc.args[1]);
return;
case UCALL_DONE:
return;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
default:
TEST_ASSERT(false, "Unexpected exit: %s",
exit_reason_str(vcpu->run->exit_reason));
}
}
struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid)
{
int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]);
vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR);
struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva);
memcpy(guest_cpuids, cpuid, size);
*p_gva = gva;
return guest_cpuids;
}
static void set_cpuid_after_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *ent;
int rc;
u32 eax, ebx, x;
/* Setting unmodified CPUID is allowed */
rc = __vcpu_set_cpuid(vcpu);
TEST_ASSERT(!rc, "Setting unmodified CPUID after KVM_RUN failed: %d", rc);
/* Changing CPU features is forbidden */
ent = vcpu_get_cpuid_entry(vcpu, 0x7);
ebx = ent->ebx;
ent->ebx--;
rc = __vcpu_set_cpuid(vcpu);
TEST_ASSERT(rc, "Changing CPU features should fail");
ent->ebx = ebx;
/* Changing MAXPHYADDR is forbidden */
ent = vcpu_get_cpuid_entry(vcpu, 0x80000008);
eax = ent->eax;
x = eax & 0xff;
ent->eax = (eax & ~0xffu) | (x - 1);
rc = __vcpu_set_cpuid(vcpu);
TEST_ASSERT(rc, "Changing MAXPHYADDR should fail");
ent->eax = eax;
}
static void test_get_cpuid2(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent + 1);
int i, r;
vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid);
TEST_ASSERT(cpuid->nent == vcpu->cpuid->nent,
"KVM didn't update nent on success, wanted %u, got %u\n",
vcpu->cpuid->nent, cpuid->nent);
for (i = 0; i < vcpu->cpuid->nent; i++) {
cpuid->nent = i;
r = __vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid);
TEST_ASSERT(r && errno == E2BIG, KVM_IOCTL_ERROR(KVM_GET_CPUID2, r));
TEST_ASSERT(cpuid->nent == i, "KVM modified nent on failure");
}
free(cpuid);
}
int main(void)
{
struct kvm_vcpu *vcpu;
vm_vaddr_t cpuid_gva;
struct kvm_vm *vm;
int stage;
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
compare_cpuids(kvm_get_supported_cpuid(), vcpu->cpuid);
vcpu_alloc_cpuid(vm, &cpuid_gva, vcpu->cpuid);
vcpu_args_set(vcpu, 1, cpuid_gva);
for (stage = 0; stage < 3; stage++)
run_vcpu(vcpu, stage);
set_cpuid_after_run(vcpu);
test_get_cpuid2(vcpu);
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/cpuid_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022, Google LLC.
*
* Test for KVM_CAP_EXIT_ON_EMULATION_FAILURE.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include "flds_emulation.h"
#include "test_util.h"
#define MMIO_GPA 0x700000000
#define MMIO_GVA MMIO_GPA
static void guest_code(void)
{
/* Execute flds with an MMIO address to force KVM to emulate it. */
flds(MMIO_GVA);
GUEST_DONE();
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm_enable_cap(vm, KVM_CAP_EXIT_ON_EMULATION_FAILURE, 1);
virt_map(vm, MMIO_GVA, MMIO_GPA, 1);
vcpu_run(vcpu);
handle_flds_emulation_failure_exit(vcpu);
vcpu_run(vcpu);
TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* maximum APIC ID capability tests
*
* Copyright (C) 2022, Intel, Inc.
*
* Tests for getting/setting maximum APIC ID capability
*/
#include "kvm_util.h"
#define MAX_VCPU_ID 2
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
int ret;
vm = vm_create_barebones();
/* Get KVM_CAP_MAX_VCPU_ID cap supported in KVM */
ret = vm_check_cap(vm, KVM_CAP_MAX_VCPU_ID);
/* Try to set KVM_CAP_MAX_VCPU_ID beyond KVM cap */
ret = __vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, ret + 1);
TEST_ASSERT(ret < 0,
"Setting KVM_CAP_MAX_VCPU_ID beyond KVM cap should fail");
/* Set KVM_CAP_MAX_VCPU_ID */
vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID);
/* Try to set KVM_CAP_MAX_VCPU_ID again */
ret = __vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID + 1);
TEST_ASSERT(ret < 0,
"Setting KVM_CAP_MAX_VCPU_ID multiple times should fail");
/* Create vCPU with id beyond KVM_CAP_MAX_VCPU_ID cap*/
ret = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)MAX_VCPU_ID);
TEST_ASSERT(ret < 0, "Creating vCPU with ID > MAX_VCPU_ID should fail");
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/max_vcpuid_cap_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vmx_set_nested_state_test
*
* Copyright (C) 2019, Google LLC.
*
* This test verifies the integrity of calling the ioctl KVM_SET_NESTED_STATE.
*/
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
#include <errno.h>
#include <linux/kvm.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
/*
* Mirror of VMCS12_REVISION in arch/x86/kvm/vmx/vmcs12.h. If that value
* changes this should be updated.
*/
#define VMCS12_REVISION 0x11e57ed0
bool have_evmcs;
void test_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state *state)
{
vcpu_nested_state_set(vcpu, state);
}
void test_nested_state_expect_errno(struct kvm_vcpu *vcpu,
struct kvm_nested_state *state,
int expected_errno)
{
int rv;
rv = __vcpu_nested_state_set(vcpu, state);
TEST_ASSERT(rv == -1 && errno == expected_errno,
"Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
strerror(expected_errno), expected_errno, rv, strerror(errno),
errno);
}
void test_nested_state_expect_einval(struct kvm_vcpu *vcpu,
struct kvm_nested_state *state)
{
test_nested_state_expect_errno(vcpu, state, EINVAL);
}
void test_nested_state_expect_efault(struct kvm_vcpu *vcpu,
struct kvm_nested_state *state)
{
test_nested_state_expect_errno(vcpu, state, EFAULT);
}
void set_revision_id_for_vmcs12(struct kvm_nested_state *state,
u32 vmcs12_revision)
{
/* Set revision_id in vmcs12 to vmcs12_revision. */
memcpy(&state->data, &vmcs12_revision, sizeof(u32));
}
void set_default_state(struct kvm_nested_state *state)
{
memset(state, 0, sizeof(*state));
state->flags = KVM_STATE_NESTED_RUN_PENDING |
KVM_STATE_NESTED_GUEST_MODE;
state->format = 0;
state->size = sizeof(*state);
}
void set_default_vmx_state(struct kvm_nested_state *state, int size)
{
memset(state, 0, size);
if (have_evmcs)
state->flags = KVM_STATE_NESTED_EVMCS;
state->format = 0;
state->size = size;
state->hdr.vmx.vmxon_pa = 0x1000;
state->hdr.vmx.vmcs12_pa = 0x2000;
state->hdr.vmx.smm.flags = 0;
set_revision_id_for_vmcs12(state, VMCS12_REVISION);
}
void test_vmx_nested_state(struct kvm_vcpu *vcpu)
{
/* Add a page for VMCS12. */
const int state_sz = sizeof(struct kvm_nested_state) + getpagesize();
struct kvm_nested_state *state =
(struct kvm_nested_state *)malloc(state_sz);
/* The format must be set to 0. 0 for VMX, 1 for SVM. */
set_default_vmx_state(state, state_sz);
state->format = 1;
test_nested_state_expect_einval(vcpu, state);
/*
* We cannot virtualize anything if the guest does not have VMX
* enabled.
*/
set_default_vmx_state(state, state_sz);
test_nested_state_expect_einval(vcpu, state);
/*
* We cannot virtualize anything if the guest does not have VMX
* enabled. We expect KVM_SET_NESTED_STATE to return 0 if vmxon_pa
* is set to -1ull, but the flags must be zero.
*/
set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = -1ull;
test_nested_state_expect_einval(vcpu, state);
state->hdr.vmx.vmcs12_pa = -1ull;
state->flags = KVM_STATE_NESTED_EVMCS;
test_nested_state_expect_einval(vcpu, state);
state->flags = 0;
test_nested_state(vcpu, state);
/* Enable VMX in the guest CPUID. */
vcpu_set_cpuid_feature(vcpu, X86_FEATURE_VMX);
/*
* Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
* setting the nested state but flags other than eVMCS must be clear.
* The eVMCS flag can be set if the enlightened VMCS capability has
* been enabled.
*/
set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = -1ull;
state->hdr.vmx.vmcs12_pa = -1ull;
test_nested_state_expect_einval(vcpu, state);
state->flags &= KVM_STATE_NESTED_EVMCS;
if (have_evmcs) {
test_nested_state_expect_einval(vcpu, state);
vcpu_enable_evmcs(vcpu);
}
test_nested_state(vcpu, state);
/* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
state->hdr.vmx.smm.flags = 1;
test_nested_state_expect_einval(vcpu, state);
/* Invalid flags are rejected. */
set_default_vmx_state(state, state_sz);
state->hdr.vmx.flags = ~0;
test_nested_state_expect_einval(vcpu, state);
/* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = -1ull;
state->flags = 0;
test_nested_state_expect_einval(vcpu, state);
/* It is invalid to have vmxon_pa set to a non-page aligned address. */
set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = 1;
test_nested_state_expect_einval(vcpu, state);
/*
* It is invalid to have KVM_STATE_NESTED_SMM_GUEST_MODE and
* KVM_STATE_NESTED_GUEST_MODE set together.
*/
set_default_vmx_state(state, state_sz);
state->flags = KVM_STATE_NESTED_GUEST_MODE |
KVM_STATE_NESTED_RUN_PENDING;
state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
test_nested_state_expect_einval(vcpu, state);
/*
* It is invalid to have any of the SMM flags set besides:
* KVM_STATE_NESTED_SMM_GUEST_MODE
* KVM_STATE_NESTED_SMM_VMXON
*/
set_default_vmx_state(state, state_sz);
state->hdr.vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
KVM_STATE_NESTED_SMM_VMXON);
test_nested_state_expect_einval(vcpu, state);
/* Outside SMM, SMM flags must be zero. */
set_default_vmx_state(state, state_sz);
state->flags = 0;
state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
test_nested_state_expect_einval(vcpu, state);
/*
* Size must be large enough to fit kvm_nested_state and vmcs12
* if VMCS12 physical address is set
*/
set_default_vmx_state(state, state_sz);
state->size = sizeof(*state);
state->flags = 0;
test_nested_state_expect_einval(vcpu, state);
set_default_vmx_state(state, state_sz);
state->size = sizeof(*state);
state->flags = 0;
state->hdr.vmx.vmcs12_pa = -1;
test_nested_state(vcpu, state);
/*
* KVM_SET_NESTED_STATE succeeds with invalid VMCS
* contents but L2 not running.
*/
set_default_vmx_state(state, state_sz);
state->flags = 0;
test_nested_state(vcpu, state);
/* Invalid flags are rejected, even if no VMCS loaded. */
set_default_vmx_state(state, state_sz);
state->size = sizeof(*state);
state->flags = 0;
state->hdr.vmx.vmcs12_pa = -1;
state->hdr.vmx.flags = ~0;
test_nested_state_expect_einval(vcpu, state);
/* vmxon_pa cannot be the same address as vmcs_pa. */
set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = 0;
state->hdr.vmx.vmcs12_pa = 0;
test_nested_state_expect_einval(vcpu, state);
/*
* Test that if we leave nesting the state reflects that when we get
* it again.
*/
set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = -1ull;
state->hdr.vmx.vmcs12_pa = -1ull;
state->flags = 0;
test_nested_state(vcpu, state);
vcpu_nested_state_get(vcpu, state);
TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
"Size must be between %ld and %d. The size returned was %d.",
sizeof(*state), state_sz, state->size);
TEST_ASSERT(state->hdr.vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
TEST_ASSERT(state->hdr.vmx.vmcs12_pa == -1ull, "vmcs_pa must be -1ull.");
free(state);
}
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
struct kvm_nested_state state;
struct kvm_vcpu *vcpu;
have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
/*
* AMD currently does not implement set_nested_state, so for now we
* just early out.
*/
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
vm = vm_create_with_one_vcpu(&vcpu, NULL);
/*
* First run tests with VMX disabled to check error handling.
*/
vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_VMX);
/* Passing a NULL kvm_nested_state causes a EFAULT. */
test_nested_state_expect_efault(vcpu, NULL);
/* 'size' cannot be smaller than sizeof(kvm_nested_state). */
set_default_state(&state);
state.size = 0;
test_nested_state_expect_einval(vcpu, &state);
/*
* Setting the flags 0xf fails the flags check. The only flags that
* can be used are:
* KVM_STATE_NESTED_GUEST_MODE
* KVM_STATE_NESTED_RUN_PENDING
* KVM_STATE_NESTED_EVMCS
*/
set_default_state(&state);
state.flags = 0xf;
test_nested_state_expect_einval(vcpu, &state);
/*
* If KVM_STATE_NESTED_RUN_PENDING is set then
* KVM_STATE_NESTED_GUEST_MODE has to be set as well.
*/
set_default_state(&state);
state.flags = KVM_STATE_NESTED_RUN_PENDING;
test_nested_state_expect_einval(vcpu, &state);
test_vmx_nested_state(vcpu);
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021, Red Hat, Inc.
*
* Tests for Hyper-V clocksources
*/
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "hyperv.h"
struct ms_hyperv_tsc_page {
volatile u32 tsc_sequence;
u32 reserved1;
volatile u64 tsc_scale;
volatile s64 tsc_offset;
} __packed;
/* Simplified mul_u64_u64_shr() */
static inline u64 mul_u64_u64_shr64(u64 a, u64 b)
{
union {
u64 ll;
struct {
u32 low, high;
} l;
} rm, rn, rh, a0, b0;
u64 c;
a0.ll = a;
b0.ll = b;
rm.ll = (u64)a0.l.low * b0.l.high;
rn.ll = (u64)a0.l.high * b0.l.low;
rh.ll = (u64)a0.l.high * b0.l.high;
rh.l.low = c = rm.l.high + rn.l.high + rh.l.low;
rh.l.high = (c >> 32) + rh.l.high;
return rh.ll;
}
static inline void nop_loop(void)
{
int i;
for (i = 0; i < 100000000; i++)
asm volatile("nop");
}
static inline void check_tsc_msr_rdtsc(void)
{
u64 tsc_freq, r1, r2, t1, t2;
s64 delta_ns;
tsc_freq = rdmsr(HV_X64_MSR_TSC_FREQUENCY);
GUEST_ASSERT(tsc_freq > 0);
/* For increased accuracy, take mean rdtsc() before and afrer rdmsr() */
r1 = rdtsc();
t1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
r1 = (r1 + rdtsc()) / 2;
nop_loop();
r2 = rdtsc();
t2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
r2 = (r2 + rdtsc()) / 2;
GUEST_ASSERT(r2 > r1 && t2 > t1);
/* HV_X64_MSR_TIME_REF_COUNT is in 100ns */
delta_ns = ((t2 - t1) * 100) - ((r2 - r1) * 1000000000 / tsc_freq);
if (delta_ns < 0)
delta_ns = -delta_ns;
/* 1% tolerance */
GUEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100);
}
static inline u64 get_tscpage_ts(struct ms_hyperv_tsc_page *tsc_page)
{
return mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
}
static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page)
{
u64 r1, r2, t1, t2;
/* Compare TSC page clocksource with HV_X64_MSR_TIME_REF_COUNT */
t1 = get_tscpage_ts(tsc_page);
r1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
/* 10 ms tolerance */
GUEST_ASSERT(r1 >= t1 && r1 - t1 < 100000);
nop_loop();
t2 = get_tscpage_ts(tsc_page);
r2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000);
}
static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_gpa)
{
u64 tsc_scale, tsc_offset;
/* Set Guest OS id to enable Hyper-V emulation */
GUEST_SYNC(1);
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
GUEST_SYNC(2);
check_tsc_msr_rdtsc();
GUEST_SYNC(3);
/* Set up TSC page is disabled state, check that it's clean */
wrmsr(HV_X64_MSR_REFERENCE_TSC, tsc_page_gpa);
GUEST_ASSERT(tsc_page->tsc_sequence == 0);
GUEST_ASSERT(tsc_page->tsc_scale == 0);
GUEST_ASSERT(tsc_page->tsc_offset == 0);
GUEST_SYNC(4);
/* Set up TSC page is enabled state */
wrmsr(HV_X64_MSR_REFERENCE_TSC, tsc_page_gpa | 0x1);
GUEST_ASSERT(tsc_page->tsc_sequence != 0);
GUEST_SYNC(5);
check_tsc_msr_tsc_page(tsc_page);
GUEST_SYNC(6);
tsc_offset = tsc_page->tsc_offset;
/* Call KVM_SET_CLOCK from userspace, check that TSC page was updated */
GUEST_SYNC(7);
/* Sanity check TSC page timestamp, it should be close to 0 */
GUEST_ASSERT(get_tscpage_ts(tsc_page) < 100000);
GUEST_ASSERT(tsc_page->tsc_offset != tsc_offset);
nop_loop();
/*
* Enable Re-enlightenment and check that TSC page stays constant across
* KVM_SET_CLOCK.
*/
wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0x1 << 16 | 0xff);
wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL, 0x1);
tsc_offset = tsc_page->tsc_offset;
tsc_scale = tsc_page->tsc_scale;
GUEST_SYNC(8);
GUEST_ASSERT(tsc_page->tsc_offset == tsc_offset);
GUEST_ASSERT(tsc_page->tsc_scale == tsc_scale);
GUEST_SYNC(9);
check_tsc_msr_tsc_page(tsc_page);
/*
* Disable re-enlightenment and TSC page, check that KVM doesn't update
* it anymore.
*/
wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
wrmsr(HV_X64_MSR_REFERENCE_TSC, 0);
memset(tsc_page, 0, sizeof(*tsc_page));
GUEST_SYNC(10);
GUEST_ASSERT(tsc_page->tsc_sequence == 0);
GUEST_ASSERT(tsc_page->tsc_offset == 0);
GUEST_ASSERT(tsc_page->tsc_scale == 0);
GUEST_DONE();
}
static void host_check_tsc_msr_rdtsc(struct kvm_vcpu *vcpu)
{
u64 tsc_freq, r1, r2, t1, t2;
s64 delta_ns;
tsc_freq = vcpu_get_msr(vcpu, HV_X64_MSR_TSC_FREQUENCY);
TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero");
/* For increased accuracy, take mean rdtsc() before and afrer ioctl */
r1 = rdtsc();
t1 = vcpu_get_msr(vcpu, HV_X64_MSR_TIME_REF_COUNT);
r1 = (r1 + rdtsc()) / 2;
nop_loop();
r2 = rdtsc();
t2 = vcpu_get_msr(vcpu, HV_X64_MSR_TIME_REF_COUNT);
r2 = (r2 + rdtsc()) / 2;
TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2);
/* HV_X64_MSR_TIME_REF_COUNT is in 100ns */
delta_ns = ((t2 - t1) * 100) - ((r2 - r1) * 1000000000 / tsc_freq);
if (delta_ns < 0)
delta_ns = -delta_ns;
/* 1% tolerance */
TEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100,
"Elapsed time does not match (MSR=%ld, TSC=%ld)",
(t2 - t1) * 100, (r2 - r1) * 1000000000 / tsc_freq);
}
int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
vm_vaddr_t tsc_page_gva;
int stage;
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
vcpu_set_hv_cpuid(vcpu);
tsc_page_gva = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
"TSC page has to be page aligned\n");
vcpu_args_set(vcpu, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
host_check_tsc_msr_rdtsc(vcpu);
for (stage = 1;; stage++) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
case UCALL_DONE:
/* Keep in sync with guest_main() */
TEST_ASSERT(stage == 11, "Testing ended prematurely, stage %d\n",
stage);
goto out;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage,
"Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]);
/* Reset kvmclock triggering TSC page update */
if (stage == 7 || stage == 8 || stage == 10) {
struct kvm_clock_data clock = {0};
vm_ioctl(vm, KVM_SET_CLOCK, &clock);
}
}
out:
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/hyperv_clock.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KVM dirty page logging test
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#define _GNU_SOURCE /* for program_invocation_name */
#include <stdio.h>
#include <stdlib.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
/* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1
#define TEST_MEM_PAGES 3
/* L1 guest test virtual memory offset */
#define GUEST_TEST_MEM 0xc0000000
/* L2 guest test virtual memory offset */
#define NESTED_TEST_MEM1 0xc0001000
#define NESTED_TEST_MEM2 0xc0002000
static void l2_guest_code(void)
{
*(volatile uint64_t *)NESTED_TEST_MEM1;
*(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
GUEST_SYNC(true);
GUEST_SYNC(false);
*(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
GUEST_SYNC(true);
*(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
GUEST_SYNC(true);
GUEST_SYNC(false);
/* Exit to L1 and never come back. */
vmcall();
}
void l1_guest_code(struct vmx_pages *vmx)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
GUEST_ASSERT(vmx->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx));
GUEST_ASSERT(load_vmcs(vmx));
prepare_vmcs(vmx, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_SYNC(false);
GUEST_ASSERT(!vmlaunch());
GUEST_SYNC(false);
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_DONE();
}
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva = 0;
struct vmx_pages *vmx;
unsigned long *bmap;
uint64_t *host_test_mem;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
bool done = false;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_cpu_has_ept());
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vcpu, 1, vmx_pages_gva);
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
GUEST_TEST_MEM,
TEST_MEM_SLOT_INDEX,
TEST_MEM_PAGES,
KVM_MEM_LOG_DIRTY_PAGES);
/*
* Add an identity map for GVA range [0xc0000000, 0xc0002000). This
* affects both L1 and L2. However...
*/
virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES);
/*
* ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
* 0xc0000000.
*
* Note that prepare_eptp should be called only L1's GPA map is done,
* meaning after the last call to virt_map.
*/
prepare_eptp(vmx, vm, 0);
nested_map_memslot(vmx, vm, 0);
nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
bmap = bitmap_zalloc(TEST_MEM_PAGES);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
while (!done) {
memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
/*
* The nested guest wrote at offset 0x1000 in the memslot, but the
* dirty bitmap must be filled in according to L1 GPA, not L2.
*/
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
if (uc.args[1]) {
TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean\n");
TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest\n");
} else {
TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty\n");
TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest\n");
}
TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty\n");
TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest\n");
TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty\n");
TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest\n");
break;
case UCALL_DONE:
done = true;
break;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
}
| linux-master | tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test for x86 KVM_SET_PMU_EVENT_FILTER.
*
* Copyright (C) 2022, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Verifies the expected behavior of allow lists and deny lists for
* virtual PMU events.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
/*
* In lieu of copying perf_event.h into tools...
*/
#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
/* End of stuff taken from perf_event.h. */
/* Oddly, this isn't in perf_event.h. */
#define ARCH_PERFMON_BRANCHES_RETIRED 5
#define NUM_BRANCHES 42
#define INTEL_PMC_IDX_FIXED 32
/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
#define MAX_FILTER_EVENTS 300
#define MAX_TEST_EVENTS 10
#define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1)
#define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAGS_VALID_MASK << 1)
#define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1)
/*
* This is how the event selector and unit mask are stored in an AMD
* core performance event-select register. Intel's format is similar,
* but the event selector is only 8 bits.
*/
#define EVENT(select, umask) ((select & 0xf00UL) << 24 | (select & 0xff) | \
(umask & 0xff) << 8)
/*
* "Branch instructions retired", from the Intel SDM, volume 3,
* "Pre-defined Architectural Performance Events."
*/
#define INTEL_BR_RETIRED EVENT(0xc4, 0)
/*
* "Retired branch instructions", from Processor Programming Reference
* (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
* Preliminary Processor Programming Reference (PPR) for AMD Family
* 17h Model 31h, Revision B0 Processors, and Preliminary Processor
* Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
* B1 Processors Volume 1 of 2.
*/
#define AMD_ZEN_BR_RETIRED EVENT(0xc2, 0)
/*
* "Retired instructions", from Processor Programming Reference
* (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
* Preliminary Processor Programming Reference (PPR) for AMD Family
* 17h Model 31h, Revision B0 Processors, and Preliminary Processor
* Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
* B1 Processors Volume 1 of 2.
* --- and ---
* "Instructions retired", from the Intel SDM, volume 3,
* "Pre-defined Architectural Performance Events."
*/
#define INST_RETIRED EVENT(0xc0, 0)
struct __kvm_pmu_event_filter {
__u32 action;
__u32 nevents;
__u32 fixed_counter_bitmap;
__u32 flags;
__u32 pad[4];
__u64 events[MAX_FILTER_EVENTS];
};
/*
* This event list comprises Intel's eight architectural events plus
* AMD's "retired branch instructions" for Zen[123] (and possibly
* other AMD CPUs).
*/
static const struct __kvm_pmu_event_filter base_event_filter = {
.nevents = ARRAY_SIZE(base_event_filter.events),
.events = {
EVENT(0x3c, 0),
INST_RETIRED,
EVENT(0x3c, 1),
EVENT(0x2e, 0x4f),
EVENT(0x2e, 0x41),
EVENT(0xc4, 0),
EVENT(0xc5, 0),
EVENT(0xa4, 1),
AMD_ZEN_BR_RETIRED,
},
};
struct {
uint64_t loads;
uint64_t stores;
uint64_t loads_stores;
uint64_t branches_retired;
uint64_t instructions_retired;
} pmc_results;
/*
* If we encounter a #GP during the guest PMU sanity check, then the guest
* PMU is not functional. Inform the hypervisor via GUEST_SYNC(0).
*/
static void guest_gp_handler(struct ex_regs *regs)
{
GUEST_SYNC(-EFAULT);
}
/*
* Check that we can write a new value to the given MSR and read it back.
* The caller should provide a non-empty set of bits that are safe to flip.
*
* Return on success. GUEST_SYNC(0) on error.
*/
static void check_msr(uint32_t msr, uint64_t bits_to_flip)
{
uint64_t v = rdmsr(msr) ^ bits_to_flip;
wrmsr(msr, v);
if (rdmsr(msr) != v)
GUEST_SYNC(-EIO);
v ^= bits_to_flip;
wrmsr(msr, v);
if (rdmsr(msr) != v)
GUEST_SYNC(-EIO);
}
static void run_and_measure_loop(uint32_t msr_base)
{
const uint64_t branches_retired = rdmsr(msr_base + 0);
const uint64_t insn_retired = rdmsr(msr_base + 1);
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
pmc_results.branches_retired = rdmsr(msr_base + 0) - branches_retired;
pmc_results.instructions_retired = rdmsr(msr_base + 1) - insn_retired;
}
static void intel_guest_code(void)
{
check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
check_msr(MSR_P6_EVNTSEL0, 0xffff);
check_msr(MSR_IA32_PMC0, 0xffff);
GUEST_SYNC(0);
for (;;) {
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED);
wrmsr(MSR_P6_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED);
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
run_and_measure_loop(MSR_IA32_PMC0);
GUEST_SYNC(0);
}
}
/*
* To avoid needing a check for CPUID.80000001:ECX.PerfCtrExtCore[bit 23],
* this code uses the always-available, legacy K7 PMU MSRs, which alias to
* the first four of the six extended core PMU MSRs.
*/
static void amd_guest_code(void)
{
check_msr(MSR_K7_EVNTSEL0, 0xffff);
check_msr(MSR_K7_PERFCTR0, 0xffff);
GUEST_SYNC(0);
for (;;) {
wrmsr(MSR_K7_EVNTSEL0, 0);
wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED);
wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED);
run_and_measure_loop(MSR_K7_PERFCTR0);
GUEST_SYNC(0);
}
}
/*
* Run the VM to the next GUEST_SYNC(value), and return the value passed
* to the sync. Any other exit from the guest is fatal.
*/
static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu)
{
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
get_ucall(vcpu, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC,
"Received ucall other than UCALL_SYNC: %lu", uc.cmd);
return uc.args[1];
}
static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu)
{
uint64_t r;
memset(&pmc_results, 0, sizeof(pmc_results));
sync_global_to_guest(vcpu->vm, pmc_results);
r = run_vcpu_to_sync(vcpu);
TEST_ASSERT(!r, "Unexpected sync value: 0x%lx", r);
sync_global_from_guest(vcpu->vm, pmc_results);
}
/*
* In a nested environment or if the vPMU is disabled, the guest PMU
* might not work as architected (accessing the PMU MSRs may raise
* #GP, or writes could simply be discarded). In those situations,
* there is no point in running these tests. The guest code will perform
* a sanity check and then GUEST_SYNC(success). In the case of failure,
* the behavior of the guest on resumption is undefined.
*/
static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
{
uint64_t r;
vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
r = run_vcpu_to_sync(vcpu);
vm_install_exception_handler(vcpu->vm, GP_VECTOR, NULL);
return !r;
}
/*
* Remove the first occurrence of 'event' (if any) from the filter's
* event list.
*/
static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
{
bool found = false;
int i;
for (i = 0; i < f->nevents; i++) {
if (found)
f->events[i - 1] = f->events[i];
else
found = f->events[i] == event;
}
if (found)
f->nevents--;
}
#define ASSERT_PMC_COUNTING_INSTRUCTIONS() \
do { \
uint64_t br = pmc_results.branches_retired; \
uint64_t ir = pmc_results.instructions_retired; \
\
if (br && br != NUM_BRANCHES) \
pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \
__func__, br, NUM_BRANCHES); \
TEST_ASSERT(br, "%s: Branch instructions retired = %lu (expected > 0)", \
__func__, br); \
TEST_ASSERT(ir, "%s: Instructions retired = %lu (expected > 0)", \
__func__, ir); \
} while (0)
#define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS() \
do { \
uint64_t br = pmc_results.branches_retired; \
uint64_t ir = pmc_results.instructions_retired; \
\
TEST_ASSERT(!br, "%s: Branch instructions retired = %lu (expected 0)", \
__func__, br); \
TEST_ASSERT(!ir, "%s: Instructions retired = %lu (expected 0)", \
__func__, ir); \
} while (0)
static void test_without_filter(struct kvm_vcpu *vcpu)
{
run_vcpu_and_sync_pmc_results(vcpu);
ASSERT_PMC_COUNTING_INSTRUCTIONS();
}
static void test_with_filter(struct kvm_vcpu *vcpu,
struct __kvm_pmu_event_filter *__f)
{
struct kvm_pmu_event_filter *f = (void *)__f;
vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
run_vcpu_and_sync_pmc_results(vcpu);
}
static void test_amd_deny_list(struct kvm_vcpu *vcpu)
{
struct __kvm_pmu_event_filter f = {
.action = KVM_PMU_EVENT_DENY,
.nevents = 1,
.events = {
EVENT(0x1C2, 0),
},
};
test_with_filter(vcpu, &f);
ASSERT_PMC_COUNTING_INSTRUCTIONS();
}
static void test_member_deny_list(struct kvm_vcpu *vcpu)
{
struct __kvm_pmu_event_filter f = base_event_filter;
f.action = KVM_PMU_EVENT_DENY;
test_with_filter(vcpu, &f);
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
}
static void test_member_allow_list(struct kvm_vcpu *vcpu)
{
struct __kvm_pmu_event_filter f = base_event_filter;
f.action = KVM_PMU_EVENT_ALLOW;
test_with_filter(vcpu, &f);
ASSERT_PMC_COUNTING_INSTRUCTIONS();
}
static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
{
struct __kvm_pmu_event_filter f = base_event_filter;
f.action = KVM_PMU_EVENT_DENY;
remove_event(&f, INST_RETIRED);
remove_event(&f, INTEL_BR_RETIRED);
remove_event(&f, AMD_ZEN_BR_RETIRED);
test_with_filter(vcpu, &f);
ASSERT_PMC_COUNTING_INSTRUCTIONS();
}
static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
{
struct __kvm_pmu_event_filter f = base_event_filter;
f.action = KVM_PMU_EVENT_ALLOW;
remove_event(&f, INST_RETIRED);
remove_event(&f, INTEL_BR_RETIRED);
remove_event(&f, AMD_ZEN_BR_RETIRED);
test_with_filter(vcpu, &f);
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
}
/*
* Verify that setting KVM_PMU_CAP_DISABLE prevents the use of the PMU.
*
* Note that KVM_CAP_PMU_CAPABILITY must be invoked prior to creating VCPUs.
*/
static void test_pmu_config_disable(void (*guest_code)(void))
{
struct kvm_vcpu *vcpu;
int r;
struct kvm_vm *vm;
r = kvm_check_cap(KVM_CAP_PMU_CAPABILITY);
if (!(r & KVM_PMU_CAP_DISABLE))
return;
vm = vm_create(1);
vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE);
vcpu = vm_vcpu_add(vm, 0, guest_code);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
TEST_ASSERT(!sanity_check_pmu(vcpu),
"Guest should not be able to use disabled PMU.");
kvm_vm_free(vm);
}
/*
* On Intel, check for a non-zero PMU version, at least one general-purpose
* counter per logical processor, and support for counting the number of branch
* instructions retired.
*/
static bool use_intel_pmu(void)
{
return host_cpu_is_intel &&
kvm_cpu_property(X86_PROPERTY_PMU_VERSION) &&
kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) &&
kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED);
}
static bool is_zen1(uint32_t family, uint32_t model)
{
return family == 0x17 && model <= 0x0f;
}
static bool is_zen2(uint32_t family, uint32_t model)
{
return family == 0x17 && model >= 0x30 && model <= 0x3f;
}
static bool is_zen3(uint32_t family, uint32_t model)
{
return family == 0x19 && model <= 0x0f;
}
/*
* Determining AMD support for a PMU event requires consulting the AMD
* PPR for the CPU or reference material derived therefrom. The AMD
* test code herein has been verified to work on Zen1, Zen2, and Zen3.
*
* Feel free to add more AMD CPUs that are documented to support event
* select 0xc2 umask 0 as "retired branch instructions."
*/
static bool use_amd_pmu(void)
{
uint32_t family = kvm_cpu_family();
uint32_t model = kvm_cpu_model();
return host_cpu_is_amd &&
(is_zen1(family, model) ||
is_zen2(family, model) ||
is_zen3(family, model));
}
/*
* "MEM_INST_RETIRED.ALL_LOADS", "MEM_INST_RETIRED.ALL_STORES", and
* "MEM_INST_RETIRED.ANY" from https://perfmon-events.intel.com/
* supported on Intel Xeon processors:
* - Sapphire Rapids, Ice Lake, Cascade Lake, Skylake.
*/
#define MEM_INST_RETIRED 0xD0
#define MEM_INST_RETIRED_LOAD EVENT(MEM_INST_RETIRED, 0x81)
#define MEM_INST_RETIRED_STORE EVENT(MEM_INST_RETIRED, 0x82)
#define MEM_INST_RETIRED_LOAD_STORE EVENT(MEM_INST_RETIRED, 0x83)
static bool supports_event_mem_inst_retired(void)
{
uint32_t eax, ebx, ecx, edx;
cpuid(1, &eax, &ebx, &ecx, &edx);
if (x86_family(eax) == 0x6) {
switch (x86_model(eax)) {
/* Sapphire Rapids */
case 0x8F:
/* Ice Lake */
case 0x6A:
/* Skylake */
/* Cascade Lake */
case 0x55:
return true;
}
}
return false;
}
/*
* "LS Dispatch", from Processor Programming Reference
* (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
* Preliminary Processor Programming Reference (PPR) for AMD Family
* 17h Model 31h, Revision B0 Processors, and Preliminary Processor
* Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
* B1 Processors Volume 1 of 2.
*/
#define LS_DISPATCH 0x29
#define LS_DISPATCH_LOAD EVENT(LS_DISPATCH, BIT(0))
#define LS_DISPATCH_STORE EVENT(LS_DISPATCH, BIT(1))
#define LS_DISPATCH_LOAD_STORE EVENT(LS_DISPATCH, BIT(2))
#define INCLUDE_MASKED_ENTRY(event_select, mask, match) \
KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, false)
#define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \
KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true)
static void masked_events_guest_test(uint32_t msr_base)
{
/*
* The actual value of the counters don't determine the outcome of
* the test. Only that they are zero or non-zero.
*/
const uint64_t loads = rdmsr(msr_base + 0);
const uint64_t stores = rdmsr(msr_base + 1);
const uint64_t loads_stores = rdmsr(msr_base + 2);
int val;
__asm__ __volatile__("movl $0, %[v];"
"movl %[v], %%eax;"
"incl %[v];"
: [v]"+m"(val) :: "eax");
pmc_results.loads = rdmsr(msr_base + 0) - loads;
pmc_results.stores = rdmsr(msr_base + 1) - stores;
pmc_results.loads_stores = rdmsr(msr_base + 2) - loads_stores;
}
static void intel_masked_events_guest_code(void)
{
for (;;) {
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
wrmsr(MSR_P6_EVNTSEL0 + 0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_LOAD);
wrmsr(MSR_P6_EVNTSEL0 + 1, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_STORE);
wrmsr(MSR_P6_EVNTSEL0 + 2, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_LOAD_STORE);
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x7);
masked_events_guest_test(MSR_IA32_PMC0);
GUEST_SYNC(0);
}
}
static void amd_masked_events_guest_code(void)
{
for (;;) {
wrmsr(MSR_K7_EVNTSEL0, 0);
wrmsr(MSR_K7_EVNTSEL1, 0);
wrmsr(MSR_K7_EVNTSEL2, 0);
wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD);
wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_STORE);
wrmsr(MSR_K7_EVNTSEL2, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD_STORE);
masked_events_guest_test(MSR_K7_PERFCTR0);
GUEST_SYNC(0);
}
}
static void run_masked_events_test(struct kvm_vcpu *vcpu,
const uint64_t masked_events[],
const int nmasked_events)
{
struct __kvm_pmu_event_filter f = {
.nevents = nmasked_events,
.action = KVM_PMU_EVENT_ALLOW,
.flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
};
memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
test_with_filter(vcpu, &f);
}
#define ALLOW_LOADS BIT(0)
#define ALLOW_STORES BIT(1)
#define ALLOW_LOADS_STORES BIT(2)
struct masked_events_test {
uint64_t intel_events[MAX_TEST_EVENTS];
uint64_t intel_event_end;
uint64_t amd_events[MAX_TEST_EVENTS];
uint64_t amd_event_end;
const char *msg;
uint32_t flags;
};
/*
* These are the test cases for the masked events tests.
*
* For each test, the guest enables 3 PMU counters (loads, stores,
* loads + stores). The filter is then set in KVM with the masked events
* provided. The test then verifies that the counters agree with which
* ones should be counting and which ones should be filtered.
*/
const struct masked_events_test test_cases[] = {
{
.intel_events = {
INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x81),
},
.amd_events = {
INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(0)),
},
.msg = "Only allow loads.",
.flags = ALLOW_LOADS,
}, {
.intel_events = {
INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x82),
},
.amd_events = {
INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(1)),
},
.msg = "Only allow stores.",
.flags = ALLOW_STORES,
}, {
.intel_events = {
INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x83),
},
.amd_events = {
INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(2)),
},
.msg = "Only allow loads + stores.",
.flags = ALLOW_LOADS_STORES,
}, {
.intel_events = {
INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0),
EXCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x83),
},
.amd_events = {
INCLUDE_MASKED_ENTRY(LS_DISPATCH, ~(BIT(0) | BIT(1)), 0),
},
.msg = "Only allow loads and stores.",
.flags = ALLOW_LOADS | ALLOW_STORES,
}, {
.intel_events = {
INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0),
EXCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x82),
},
.amd_events = {
INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0),
EXCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(1)),
},
.msg = "Only allow loads and loads + stores.",
.flags = ALLOW_LOADS | ALLOW_LOADS_STORES
}, {
.intel_events = {
INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFE, 0x82),
},
.amd_events = {
INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0),
EXCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(0)),
},
.msg = "Only allow stores and loads + stores.",
.flags = ALLOW_STORES | ALLOW_LOADS_STORES
}, {
.intel_events = {
INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0),
},
.amd_events = {
INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0),
},
.msg = "Only allow loads, stores, and loads + stores.",
.flags = ALLOW_LOADS | ALLOW_STORES | ALLOW_LOADS_STORES
},
};
static int append_test_events(const struct masked_events_test *test,
uint64_t *events, int nevents)
{
const uint64_t *evts;
int i;
evts = use_intel_pmu() ? test->intel_events : test->amd_events;
for (i = 0; i < MAX_TEST_EVENTS; i++) {
if (evts[i] == 0)
break;
events[nevents + i] = evts[i];
}
return nevents + i;
}
static bool bool_eq(bool a, bool b)
{
return a == b;
}
static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events,
int nevents)
{
int ntests = ARRAY_SIZE(test_cases);
int i, n;
for (i = 0; i < ntests; i++) {
const struct masked_events_test *test = &test_cases[i];
/* Do any test case events overflow MAX_TEST_EVENTS? */
assert(test->intel_event_end == 0);
assert(test->amd_event_end == 0);
n = append_test_events(test, events, nevents);
run_masked_events_test(vcpu, events, n);
TEST_ASSERT(bool_eq(pmc_results.loads, test->flags & ALLOW_LOADS) &&
bool_eq(pmc_results.stores, test->flags & ALLOW_STORES) &&
bool_eq(pmc_results.loads_stores,
test->flags & ALLOW_LOADS_STORES),
"%s loads: %lu, stores: %lu, loads + stores: %lu",
test->msg, pmc_results.loads, pmc_results.stores,
pmc_results.loads_stores);
}
}
static void add_dummy_events(uint64_t *events, int nevents)
{
int i;
for (i = 0; i < nevents; i++) {
int event_select = i % 0xFF;
bool exclude = ((i % 4) == 0);
if (event_select == MEM_INST_RETIRED ||
event_select == LS_DISPATCH)
event_select++;
events[i] = KVM_PMU_ENCODE_MASKED_ENTRY(event_select, 0,
0, exclude);
}
}
static void test_masked_events(struct kvm_vcpu *vcpu)
{
int nevents = MAX_FILTER_EVENTS - MAX_TEST_EVENTS;
uint64_t events[MAX_FILTER_EVENTS];
/* Run the test cases against a sparse PMU event filter. */
run_masked_events_tests(vcpu, events, 0);
/* Run the test cases against a dense PMU event filter. */
add_dummy_events(events, MAX_FILTER_EVENTS);
run_masked_events_tests(vcpu, events, nevents);
}
static int set_pmu_event_filter(struct kvm_vcpu *vcpu,
struct __kvm_pmu_event_filter *__f)
{
struct kvm_pmu_event_filter *f = (void *)__f;
return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
}
static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
uint32_t flags, uint32_t action)
{
struct __kvm_pmu_event_filter f = {
.nevents = 1,
.flags = flags,
.action = action,
.events = {
event,
},
};
return set_pmu_event_filter(vcpu, &f);
}
static void test_filter_ioctl(struct kvm_vcpu *vcpu)
{
uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
struct __kvm_pmu_event_filter f;
uint64_t e = ~0ul;
int r;
/*
* Unfortunately having invalid bits set in event data is expected to
* pass when flags == 0 (bits other than eventsel+umask).
*/
r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
r = set_pmu_single_event_filter(vcpu, e,
KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
r = set_pmu_single_event_filter(vcpu, e,
KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
f = base_event_filter;
f.action = PMU_EVENT_FILTER_INVALID_ACTION;
r = set_pmu_event_filter(vcpu, &f);
TEST_ASSERT(r, "Set invalid action is expected to fail");
f = base_event_filter;
f.flags = PMU_EVENT_FILTER_INVALID_FLAGS;
r = set_pmu_event_filter(vcpu, &f);
TEST_ASSERT(r, "Set invalid flags is expected to fail");
f = base_event_filter;
f.nevents = PMU_EVENT_FILTER_INVALID_NEVENTS;
r = set_pmu_event_filter(vcpu, &f);
TEST_ASSERT(r, "Exceeding the max number of filter events should fail");
f = base_event_filter;
f.fixed_counter_bitmap = ~GENMASK_ULL(nr_fixed_counters, 0);
r = set_pmu_event_filter(vcpu, &f);
TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
}
static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx)
{
for (;;) {
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0);
/* Only OS_EN bit is enabled for fixed counter[idx]. */
wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx));
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL,
BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx));
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx));
}
}
static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
uint32_t action, uint32_t bitmap)
{
struct __kvm_pmu_event_filter f = {
.action = action,
.fixed_counter_bitmap = bitmap,
};
set_pmu_event_filter(vcpu, &f);
return run_vcpu_to_sync(vcpu);
}
static uint64_t test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu,
uint32_t action,
uint32_t bitmap)
{
struct __kvm_pmu_event_filter f = base_event_filter;
f.action = action;
f.fixed_counter_bitmap = bitmap;
set_pmu_event_filter(vcpu, &f);
return run_vcpu_to_sync(vcpu);
}
static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
uint8_t nr_fixed_counters)
{
unsigned int i;
uint32_t bitmap;
uint64_t count;
TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8,
"Invalid nr_fixed_counters");
/*
* Check the fixed performance counter can count normally when KVM
* userspace doesn't set any pmu filter.
*/
count = run_vcpu_to_sync(vcpu);
TEST_ASSERT(count, "Unexpected count value: %ld\n", count);
for (i = 0; i < BIT(nr_fixed_counters); i++) {
bitmap = BIT(i);
count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW,
bitmap);
TEST_ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY,
bitmap);
TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
/*
* Check that fixed_counter_bitmap has higher priority than
* events[] when both are set.
*/
count = test_set_gp_and_fixed_event_filter(vcpu,
KVM_PMU_EVENT_ALLOW,
bitmap);
TEST_ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
count = test_set_gp_and_fixed_event_filter(vcpu,
KVM_PMU_EVENT_DENY,
bitmap);
TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
}
}
static void test_fixed_counter_bitmap(void)
{
uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
uint8_t idx;
/*
* Check that pmu_event_filter works as expected when it's applied to
* fixed performance counters.
*/
for (idx = 0; idx < nr_fixed_counters; idx++) {
vm = vm_create_with_one_vcpu(&vcpu,
intel_run_fixed_counter_guest_code);
vcpu_args_set(vcpu, 1, idx);
__test_fixed_counter_bitmap(vcpu, idx, nr_fixed_counters);
kvm_vm_free(vm);
}
}
int main(int argc, char *argv[])
{
void (*guest_code)(void);
struct kvm_vcpu *vcpu, *vcpu2 = NULL;
struct kvm_vm *vm;
TEST_REQUIRE(get_kvm_param_bool("enable_pmu"));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_FILTER));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_MASKED_EVENTS));
TEST_REQUIRE(use_intel_pmu() || use_amd_pmu());
guest_code = use_intel_pmu() ? intel_guest_code : amd_guest_code;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
TEST_REQUIRE(sanity_check_pmu(vcpu));
if (use_amd_pmu())
test_amd_deny_list(vcpu);
test_without_filter(vcpu);
test_member_deny_list(vcpu);
test_member_allow_list(vcpu);
test_not_member_deny_list(vcpu);
test_not_member_allow_list(vcpu);
if (use_intel_pmu() &&
supports_event_mem_inst_retired() &&
kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) >= 3)
vcpu2 = vm_vcpu_add(vm, 2, intel_masked_events_guest_code);
else if (use_amd_pmu())
vcpu2 = vm_vcpu_add(vm, 2, amd_masked_events_guest_code);
if (vcpu2)
test_masked_events(vcpu2);
test_filter_ioctl(vcpu);
kvm_vm_free(vm);
test_pmu_config_disable(guest_code);
test_fixed_counter_bitmap();
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test Hyper-V extended hypercall, HV_EXT_CALL_QUERY_CAPABILITIES (0x8001),
* exit to userspace and receive result in guest.
*
* Negative tests are present in hyperv_features.c
*
* Copyright 2022 Google LLC
* Author: Vipin Sharma <[email protected]>
*/
#include "kvm_util.h"
#include "processor.h"
#include "hyperv.h"
/* Any value is fine */
#define EXT_CAPABILITIES 0xbull
static void guest_code(vm_paddr_t in_pg_gpa, vm_paddr_t out_pg_gpa,
vm_vaddr_t out_pg_gva)
{
uint64_t *output_gva;
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
wrmsr(HV_X64_MSR_HYPERCALL, in_pg_gpa);
output_gva = (uint64_t *)out_pg_gva;
hyperv_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, in_pg_gpa, out_pg_gpa);
/* TLFS states output will be a uint64_t value */
GUEST_ASSERT_EQ(*output_gva, EXT_CAPABILITIES);
GUEST_DONE();
}
int main(void)
{
vm_vaddr_t hcall_out_page;
vm_vaddr_t hcall_in_page;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
uint64_t *outval;
struct ucall uc;
/* Verify if extended hypercalls are supported */
if (!kvm_cpuid_has(kvm_get_supported_hv_cpuid(),
HV_ENABLE_EXTENDED_HYPERCALLS)) {
print_skip("Extended calls not supported by the kernel");
exit(KSFT_SKIP);
}
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
vcpu_set_hv_cpuid(vcpu);
/* Hypercall input */
hcall_in_page = vm_vaddr_alloc_pages(vm, 1);
memset(addr_gva2hva(vm, hcall_in_page), 0x0, vm->page_size);
/* Hypercall output */
hcall_out_page = vm_vaddr_alloc_pages(vm, 1);
memset(addr_gva2hva(vm, hcall_out_page), 0x0, vm->page_size);
vcpu_args_set(vcpu, 3, addr_gva2gpa(vm, hcall_in_page),
addr_gva2gpa(vm, hcall_out_page), hcall_out_page);
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_HYPERV,
"Unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
outval = addr_gpa2hva(vm, run->hyperv.u.hcall.params[1]);
*outval = EXT_CAPABILITIES;
run->hyperv.u.hcall.result = HV_STATUS_SUCCESS;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
break;
default:
TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
}
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CR4 and CPUID sync test
*
* Copyright 2018, Red Hat, Inc. and/or its affiliates.
*
* Author:
* Wei Huang <[email protected]>
*/
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
static inline bool cr4_cpuid_is_sync(void)
{
uint64_t cr4 = get_cr4();
return (this_cpu_has(X86_FEATURE_OSXSAVE) == !!(cr4 & X86_CR4_OSXSAVE));
}
static void guest_code(void)
{
uint64_t cr4;
/* turn on CR4.OSXSAVE */
cr4 = get_cr4();
cr4 |= X86_CR4_OSXSAVE;
set_cr4(cr4);
/* verify CR4.OSXSAVE == CPUID.OSXSAVE */
GUEST_ASSERT(cr4_cpuid_is_sync());
/* notify hypervisor to change CR4 */
GUEST_SYNC(0);
/* check again */
GUEST_ASSERT(cr4_cpuid_is_sync());
GUEST_DONE();
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_sregs sregs;
struct ucall uc;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
while (1) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
/* emulate hypervisor clearing CR4.OSXSAVE */
vcpu_sregs_get(vcpu, &sregs);
sregs.cr4 &= ~X86_CR4_OSXSAVE;
vcpu_sregs_set(vcpu, &sregs);
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
done:
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019, Google LLC.
*
* Tests for the IA32_XSS MSR.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "vmx.h"
#define MSR_BITS 64
int main(int argc, char *argv[])
{
bool xss_in_msr_list;
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
uint64_t xss_val;
int i, r;
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, NULL);
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVES));
xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS);
TEST_ASSERT(xss_val == 0,
"MSR_IA32_XSS should be initialized to zero\n");
vcpu_set_msr(vcpu, MSR_IA32_XSS, xss_val);
/*
* At present, KVM only supports a guest IA32_XSS value of 0. Verify
* that trying to set the guest IA32_XSS to an unsupported value fails.
* Also, in the future when a non-zero value succeeds check that
* IA32_XSS is in the list of MSRs to save/restore.
*/
xss_in_msr_list = kvm_msr_is_in_save_restore_list(MSR_IA32_XSS);
for (i = 0; i < MSR_BITS; ++i) {
r = _vcpu_set_msr(vcpu, MSR_IA32_XSS, 1ull << i);
/*
* Setting a list of MSRs returns the entry that "faulted", or
* the last entry +1 if all MSRs were successfully written.
*/
TEST_ASSERT(!r || r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r));
TEST_ASSERT(r != 1 || xss_in_msr_list,
"IA32_XSS was able to be set, but was not in save/restore list");
}
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/xss_msr_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vmx_close_while_nested
*
* Copyright (C) 2019, Red Hat, Inc.
*
* Verify that nothing bad happens if a KVM user exits with open
* file descriptors while executing a nested guest.
*/
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
#include <string.h>
#include <sys/ioctl.h>
#include "kselftest.h"
enum {
PORT_L0_EXIT = 0x2000,
};
static void l2_guest_code(void)
{
/* Exit to L0 */
asm volatile("inb %%dx, %%al"
: : [port] "d" (PORT_L0_EXIT) : "rax");
}
static void l1_guest_code(struct vmx_pages *vmx_pages)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_ASSERT(load_vmcs(vmx_pages));
/* Prepare the VMCS for L2 execution. */
prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(0);
}
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
/* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (;;) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
if (run->io.port == PORT_L0_EXIT)
break;
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
}
| linux-master | tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hyper-V HvFlushVirtualAddress{List,Space}{,Ex} tests
*
* Copyright (C) 2022, Red Hat, Inc.
*
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <asm/barrier.h>
#include <pthread.h>
#include <inttypes.h>
#include "kvm_util.h"
#include "processor.h"
#include "hyperv.h"
#include "test_util.h"
#include "vmx.h"
#define WORKER_VCPU_ID_1 2
#define WORKER_VCPU_ID_2 65
#define NTRY 100
#define NTEST_PAGES 2
struct hv_vpset {
u64 format;
u64 valid_bank_mask;
u64 bank_contents[];
};
enum HV_GENERIC_SET_FORMAT {
HV_GENERIC_SET_SPARSE_4K,
HV_GENERIC_SET_ALL,
};
#define HV_FLUSH_ALL_PROCESSORS BIT(0)
#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
struct hv_tlb_flush {
u64 address_space;
u64 flags;
u64 processor_mask;
u64 gva_list[];
} __packed;
/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
struct hv_tlb_flush_ex {
u64 address_space;
u64 flags;
struct hv_vpset hv_vp_set;
u64 gva_list[];
} __packed;
/*
* Pass the following info to 'workers' and 'sender'
* - Hypercall page's GVA
* - Hypercall page's GPA
* - Test pages GVA
* - GVAs of the test pages' PTEs
*/
struct test_data {
vm_vaddr_t hcall_gva;
vm_paddr_t hcall_gpa;
vm_vaddr_t test_pages;
vm_vaddr_t test_pages_pte[NTEST_PAGES];
};
/* 'Worker' vCPU code checking the contents of the test page */
static void worker_guest_code(vm_vaddr_t test_data)
{
struct test_data *data = (struct test_data *)test_data;
u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
void *exp_page = (void *)data->test_pages + PAGE_SIZE * NTEST_PAGES;
u64 *this_cpu = (u64 *)(exp_page + vcpu_id * sizeof(u64));
u64 expected, val;
x2apic_enable();
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
for (;;) {
cpu_relax();
expected = READ_ONCE(*this_cpu);
/*
* Make sure the value in the test page is read after reading
* the expectation for the first time. Pairs with wmb() in
* prepare_to_test().
*/
rmb();
val = READ_ONCE(*(u64 *)data->test_pages);
/*
* Make sure the value in the test page is read after before
* reading the expectation for the second time. Pairs with wmb()
* post_test().
*/
rmb();
/*
* '0' indicates the sender is between iterations, wait until
* the sender is ready for this vCPU to start checking again.
*/
if (!expected)
continue;
/*
* Re-read the per-vCPU byte to ensure the sender didn't move
* onto a new iteration.
*/
if (expected != READ_ONCE(*this_cpu))
continue;
GUEST_ASSERT(val == expected);
}
}
/*
* Write per-CPU info indicating what each 'worker' CPU is supposed to see in
* test page. '0' means don't check.
*/
static void set_expected_val(void *addr, u64 val, int vcpu_id)
{
void *exp_page = addr + PAGE_SIZE * NTEST_PAGES;
*(u64 *)(exp_page + vcpu_id * sizeof(u64)) = val;
}
/*
* Update PTEs swapping two test pages.
* TODO: use swap()/xchg() when these are provided.
*/
static void swap_two_test_pages(vm_paddr_t pte_gva1, vm_paddr_t pte_gva2)
{
uint64_t tmp = *(uint64_t *)pte_gva1;
*(uint64_t *)pte_gva1 = *(uint64_t *)pte_gva2;
*(uint64_t *)pte_gva2 = tmp;
}
/*
* TODO: replace the silly NOP loop with a proper udelay() implementation.
*/
static inline void do_delay(void)
{
int i;
for (i = 0; i < 1000000; i++)
asm volatile("nop");
}
/*
* Prepare to test: 'disable' workers by setting the expectation to '0',
* clear hypercall input page and then swap two test pages.
*/
static inline void prepare_to_test(struct test_data *data)
{
/* Clear hypercall input page */
memset((void *)data->hcall_gva, 0, PAGE_SIZE);
/* 'Disable' workers */
set_expected_val((void *)data->test_pages, 0x0, WORKER_VCPU_ID_1);
set_expected_val((void *)data->test_pages, 0x0, WORKER_VCPU_ID_2);
/* Make sure workers are 'disabled' before we swap PTEs. */
wmb();
/* Make sure workers have enough time to notice */
do_delay();
/* Swap test page mappings */
swap_two_test_pages(data->test_pages_pte[0], data->test_pages_pte[1]);
}
/*
* Finalize the test: check hypercall resule set the expected val for
* 'worker' CPUs and give them some time to test.
*/
static inline void post_test(struct test_data *data, u64 exp1, u64 exp2)
{
/* Make sure we change the expectation after swapping PTEs */
wmb();
/* Set the expectation for workers, '0' means don't test */
set_expected_val((void *)data->test_pages, exp1, WORKER_VCPU_ID_1);
set_expected_val((void *)data->test_pages, exp2, WORKER_VCPU_ID_2);
/* Make sure workers have enough time to test */
do_delay();
}
#define TESTVAL1 0x0101010101010101
#define TESTVAL2 0x0202020202020202
/* Main vCPU doing the test */
static void sender_guest_code(vm_vaddr_t test_data)
{
struct test_data *data = (struct test_data *)test_data;
struct hv_tlb_flush *flush = (struct hv_tlb_flush *)data->hcall_gva;
struct hv_tlb_flush_ex *flush_ex = (struct hv_tlb_flush_ex *)data->hcall_gva;
vm_paddr_t hcall_gpa = data->hcall_gpa;
int i, stage = 1;
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
wrmsr(HV_X64_MSR_HYPERCALL, data->hcall_gpa);
/* "Slow" hypercalls */
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
flush->processor_mask = BIT(WORKER_VCPU_ID_1);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, hcall_gpa,
hcall_gpa + PAGE_SIZE);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
flush->processor_mask = BIT(WORKER_VCPU_ID_1);
flush->gva_list[0] = (u64)data->test_pages;
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
hcall_gpa, hcall_gpa + PAGE_SIZE);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
HV_FLUSH_ALL_PROCESSORS;
flush->processor_mask = 0;
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, hcall_gpa,
hcall_gpa + PAGE_SIZE);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
HV_FLUSH_ALL_PROCESSORS;
flush->gva_list[0] = (u64)data->test_pages;
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
hcall_gpa, hcall_gpa + PAGE_SIZE);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
(1 << HV_HYPERCALL_VARHEAD_OFFSET),
hcall_gpa, hcall_gpa + PAGE_SIZE);
post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
/* bank_contents and gva_list occupy the same space, thus [1] */
flush_ex->gva_list[1] = (u64)data->test_pages;
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
(1 << HV_HYPERCALL_VARHEAD_OFFSET) |
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
hcall_gpa, hcall_gpa + PAGE_SIZE);
post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64) |
BIT_ULL(WORKER_VCPU_ID_1 / 64);
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
(2 << HV_HYPERCALL_VARHEAD_OFFSET),
hcall_gpa, hcall_gpa + PAGE_SIZE);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_1 / 64) |
BIT_ULL(WORKER_VCPU_ID_2 / 64);
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
/* bank_contents and gva_list occupy the same space, thus [2] */
flush_ex->gva_list[2] = (u64)data->test_pages;
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
(2 << HV_HYPERCALL_VARHEAD_OFFSET) |
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
hcall_gpa, hcall_gpa + PAGE_SIZE);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
hcall_gpa, hcall_gpa + PAGE_SIZE);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
flush_ex->gva_list[0] = (u64)data->test_pages;
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
hcall_gpa, hcall_gpa + PAGE_SIZE);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
i % 2 ? TESTVAL1 : TESTVAL2);
}
/* "Fast" hypercalls */
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush->processor_mask = BIT(WORKER_VCPU_ID_1);
hyperv_write_xmm_input(&flush->processor_mask, 1);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
HV_HYPERCALL_FAST_BIT, 0x0,
HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush->processor_mask = BIT(WORKER_VCPU_ID_1);
flush->gva_list[0] = (u64)data->test_pages;
hyperv_write_xmm_input(&flush->processor_mask, 1);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
HV_HYPERCALL_FAST_BIT |
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
hyperv_write_xmm_input(&flush->processor_mask, 1);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
HV_HYPERCALL_FAST_BIT, 0x0,
HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
HV_FLUSH_ALL_PROCESSORS);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush->gva_list[0] = (u64)data->test_pages;
hyperv_write_xmm_input(&flush->processor_mask, 1);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
HV_HYPERCALL_FAST_BIT |
(1UL << HV_HYPERCALL_REP_COMP_OFFSET), 0x0,
HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
HV_FLUSH_ALL_PROCESSORS);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
HV_HYPERCALL_FAST_BIT |
(1 << HV_HYPERCALL_VARHEAD_OFFSET),
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
/* bank_contents and gva_list occupy the same space, thus [1] */
flush_ex->gva_list[1] = (u64)data->test_pages;
hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
HV_HYPERCALL_FAST_BIT |
(1 << HV_HYPERCALL_VARHEAD_OFFSET) |
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64) |
BIT_ULL(WORKER_VCPU_ID_1 / 64);
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
HV_HYPERCALL_FAST_BIT |
(2 << HV_HYPERCALL_VARHEAD_OFFSET),
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
post_test(data, i % 2 ? TESTVAL1 :
TESTVAL2, i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_1 / 64) |
BIT_ULL(WORKER_VCPU_ID_2 / 64);
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
/* bank_contents and gva_list occupy the same space, thus [2] */
flush_ex->gva_list[2] = (u64)data->test_pages;
hyperv_write_xmm_input(&flush_ex->hv_vp_set, 3);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
HV_HYPERCALL_FAST_BIT |
(2 << HV_HYPERCALL_VARHEAD_OFFSET) |
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
HV_HYPERCALL_FAST_BIT,
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_SYNC(stage++);
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */
for (i = 0; i < NTRY; i++) {
prepare_to_test(data);
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
flush_ex->gva_list[0] = (u64)data->test_pages;
hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
HV_HYPERCALL_FAST_BIT |
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
i % 2 ? TESTVAL1 : TESTVAL2);
}
GUEST_DONE();
}
static void *vcpu_thread(void *arg)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)arg;
struct ucall uc;
int old;
int r;
r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
vcpu->id, r);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
default:
TEST_FAIL("Unexpected ucall %lu, vCPU %d", uc.cmd, vcpu->id);
}
return NULL;
}
static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
{
void *retval;
int r;
r = pthread_cancel(thread);
TEST_ASSERT(!r, "pthread_cancel on vcpu_id=%d failed with errno=%d",
vcpu->id, r);
r = pthread_join(thread, &retval);
TEST_ASSERT(!r, "pthread_join on vcpu_id=%d failed with errno=%d",
vcpu->id, r);
TEST_ASSERT(retval == PTHREAD_CANCELED,
"expected retval=%p, got %p", PTHREAD_CANCELED,
retval);
}
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
struct kvm_vcpu *vcpu[3];
pthread_t threads[2];
vm_vaddr_t test_data_page, gva;
vm_paddr_t gpa;
uint64_t *pte;
struct test_data *data;
struct ucall uc;
int stage = 1, r, i;
vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
/* Test data page */
test_data_page = vm_vaddr_alloc_page(vm);
data = (struct test_data *)addr_gva2hva(vm, test_data_page);
/* Hypercall input/output */
data->hcall_gva = vm_vaddr_alloc_pages(vm, 2);
data->hcall_gpa = addr_gva2gpa(vm, data->hcall_gva);
memset(addr_gva2hva(vm, data->hcall_gva), 0x0, 2 * PAGE_SIZE);
/*
* Test pages: the first one is filled with '0x01's, the second with '0x02's
* and the test will swap their mappings. The third page keeps the indication
* about the current state of mappings.
*/
data->test_pages = vm_vaddr_alloc_pages(vm, NTEST_PAGES + 1);
for (i = 0; i < NTEST_PAGES; i++)
memset(addr_gva2hva(vm, data->test_pages + PAGE_SIZE * i),
(u8)(i + 1), PAGE_SIZE);
set_expected_val(addr_gva2hva(vm, data->test_pages), 0x0, WORKER_VCPU_ID_1);
set_expected_val(addr_gva2hva(vm, data->test_pages), 0x0, WORKER_VCPU_ID_2);
/*
* Get PTE pointers for test pages and map them inside the guest.
* Use separate page for each PTE for simplicity.
*/
gva = vm_vaddr_unused_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR);
for (i = 0; i < NTEST_PAGES; i++) {
pte = vm_get_page_table_entry(vm, data->test_pages + i * PAGE_SIZE);
gpa = addr_hva2gpa(vm, pte);
__virt_pg_map(vm, gva + PAGE_SIZE * i, gpa & PAGE_MASK, PG_LEVEL_4K);
data->test_pages_pte[i] = gva + (gpa & ~PAGE_MASK);
}
/*
* Sender vCPU which performs the test: swaps test pages, sets expectation
* for 'workers' and issues TLB flush hypercalls.
*/
vcpu_args_set(vcpu[0], 1, test_data_page);
vcpu_set_hv_cpuid(vcpu[0]);
/* Create worker vCPUs which check the contents of the test pages */
vcpu[1] = vm_vcpu_add(vm, WORKER_VCPU_ID_1, worker_guest_code);
vcpu_args_set(vcpu[1], 1, test_data_page);
vcpu_set_msr(vcpu[1], HV_X64_MSR_VP_INDEX, WORKER_VCPU_ID_1);
vcpu_set_hv_cpuid(vcpu[1]);
vcpu[2] = vm_vcpu_add(vm, WORKER_VCPU_ID_2, worker_guest_code);
vcpu_args_set(vcpu[2], 1, test_data_page);
vcpu_set_msr(vcpu[2], HV_X64_MSR_VP_INDEX, WORKER_VCPU_ID_2);
vcpu_set_hv_cpuid(vcpu[2]);
r = pthread_create(&threads[0], NULL, vcpu_thread, vcpu[1]);
TEST_ASSERT(!r, "pthread_create() failed");
r = pthread_create(&threads[1], NULL, vcpu_thread, vcpu[2]);
TEST_ASSERT(!r, "pthread_create() failed");
while (true) {
vcpu_run(vcpu[0]);
TEST_ASSERT_KVM_EXIT_REASON(vcpu[0], KVM_EXIT_IO);
switch (get_ucall(vcpu[0], &uc)) {
case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == stage,
"Unexpected stage: %ld (%d expected)\n",
uc.args[1], stage);
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
stage++;
}
done:
cancel_join_vcpu_thread(threads[0], vcpu[1]);
cancel_join_vcpu_thread(threads[1], vcpu[2]);
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ucna_injection_test
*
* Copyright (C) 2022, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Test that user space can inject UnCorrectable No Action required (UCNA)
* memory errors to the guest.
*
* The test starts one vCPU with the MCG_CMCI_P enabled. It verifies that
* proper UCNA errors can be injected to a vCPU with MCG_CMCI_P and
* corresponding per-bank control register (MCI_CTL2) bit enabled.
* The test also checks that the UCNA errors get recorded in the
* Machine Check bank registers no matter the error signal interrupts get
* delivered into the guest or not.
*
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <pthread.h>
#include <inttypes.h>
#include <string.h>
#include <time.h>
#include "kvm_util_base.h"
#include "kvm_util.h"
#include "mce.h"
#include "processor.h"
#include "test_util.h"
#include "apic.h"
#define SYNC_FIRST_UCNA 9
#define SYNC_SECOND_UCNA 10
#define SYNC_GP 11
#define FIRST_UCNA_ADDR 0xdeadbeef
#define SECOND_UCNA_ADDR 0xcafeb0ba
/*
* Vector for the CMCI interrupt.
* Value is arbitrary. Any value in 0x20-0xFF should work:
* https://wiki.osdev.org/Interrupt_Vector_Table
*/
#define CMCI_VECTOR 0xa9
#define UCNA_BANK 0x7 // IMC0 bank
#define MCI_CTL2_RESERVED_BIT BIT_ULL(29)
static uint64_t supported_mcg_caps;
/*
* Record states about the injected UCNA.
* The variables started with the 'i_' prefixes are recorded in interrupt
* handler. Variables without the 'i_' prefixes are recorded in guest main
* execution thread.
*/
static volatile uint64_t i_ucna_rcvd;
static volatile uint64_t i_ucna_addr;
static volatile uint64_t ucna_addr;
static volatile uint64_t ucna_addr2;
struct thread_params {
struct kvm_vcpu *vcpu;
uint64_t *p_i_ucna_rcvd;
uint64_t *p_i_ucna_addr;
uint64_t *p_ucna_addr;
uint64_t *p_ucna_addr2;
};
static void verify_apic_base_addr(void)
{
uint64_t msr = rdmsr(MSR_IA32_APICBASE);
uint64_t base = GET_APIC_BASE(msr);
GUEST_ASSERT(base == APIC_DEFAULT_GPA);
}
static void ucna_injection_guest_code(void)
{
uint64_t ctl2;
verify_apic_base_addr();
xapic_enable();
/* Sets up the interrupt vector and enables per-bank CMCI sigaling. */
xapic_write_reg(APIC_LVTCMCI, CMCI_VECTOR | APIC_DM_FIXED);
ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK));
wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_CMCI_EN);
/* Enables interrupt in guest. */
asm volatile("sti");
/* Let user space inject the first UCNA */
GUEST_SYNC(SYNC_FIRST_UCNA);
ucna_addr = rdmsr(MSR_IA32_MCx_ADDR(UCNA_BANK));
/* Disables the per-bank CMCI signaling. */
ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK));
wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 & ~MCI_CTL2_CMCI_EN);
/* Let the user space inject the second UCNA */
GUEST_SYNC(SYNC_SECOND_UCNA);
ucna_addr2 = rdmsr(MSR_IA32_MCx_ADDR(UCNA_BANK));
GUEST_DONE();
}
static void cmci_disabled_guest_code(void)
{
uint64_t ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK));
wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_CMCI_EN);
GUEST_DONE();
}
static void cmci_enabled_guest_code(void)
{
uint64_t ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK));
wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_RESERVED_BIT);
GUEST_DONE();
}
static void guest_cmci_handler(struct ex_regs *regs)
{
i_ucna_rcvd++;
i_ucna_addr = rdmsr(MSR_IA32_MCx_ADDR(UCNA_BANK));
xapic_write_reg(APIC_EOI, 0);
}
static void guest_gp_handler(struct ex_regs *regs)
{
GUEST_SYNC(SYNC_GP);
}
static void run_vcpu_expect_gp(struct kvm_vcpu *vcpu)
{
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_SYNC,
"Expect UCALL_SYNC\n");
TEST_ASSERT(uc.args[1] == SYNC_GP, "#GP is expected.");
printf("vCPU received GP in guest.\n");
}
static void inject_ucna(struct kvm_vcpu *vcpu, uint64_t addr) {
/*
* A UCNA error is indicated with VAL=1, UC=1, PCC=0, S=0 and AR=0 in
* the IA32_MCi_STATUS register.
* MSCOD=1 (BIT[16] - MscodDataRdErr).
* MCACOD=0x0090 (Memory controller error format, channel 0)
*/
uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
MCI_STATUS_MISCV | MCI_STATUS_ADDRV | 0x10090;
struct kvm_x86_mce mce = {};
mce.status = status;
mce.mcg_status = 0;
/*
* MCM_ADDR_PHYS indicates the reported address is a physical address.
* Lowest 6 bits is the recoverable address LSB, i.e., the injected MCE
* is at 4KB granularity.
*/
mce.misc = (MCM_ADDR_PHYS << 6) | 0xc;
mce.addr = addr;
mce.bank = UCNA_BANK;
vcpu_ioctl(vcpu, KVM_X86_SET_MCE, &mce);
}
static void *run_ucna_injection(void *arg)
{
struct thread_params *params = (struct thread_params *)arg;
struct ucall uc;
int old;
int r;
r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
TEST_ASSERT(r == 0,
"pthread_setcanceltype failed with errno=%d",
r);
vcpu_run(params->vcpu);
TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC,
"Expect UCALL_SYNC\n");
TEST_ASSERT(uc.args[1] == SYNC_FIRST_UCNA, "Injecting first UCNA.");
printf("Injecting first UCNA at %#x.\n", FIRST_UCNA_ADDR);
inject_ucna(params->vcpu, FIRST_UCNA_ADDR);
vcpu_run(params->vcpu);
TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC,
"Expect UCALL_SYNC\n");
TEST_ASSERT(uc.args[1] == SYNC_SECOND_UCNA, "Injecting second UCNA.");
printf("Injecting second UCNA at %#x.\n", SECOND_UCNA_ADDR);
inject_ucna(params->vcpu, SECOND_UCNA_ADDR);
vcpu_run(params->vcpu);
TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO);
if (get_ucall(params->vcpu, &uc) == UCALL_ABORT) {
TEST_ASSERT(false, "vCPU assertion failure: %s.\n",
(const char *)uc.args[0]);
}
return NULL;
}
static void test_ucna_injection(struct kvm_vcpu *vcpu, struct thread_params *params)
{
struct kvm_vm *vm = vcpu->vm;
params->vcpu = vcpu;
params->p_i_ucna_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&i_ucna_rcvd);
params->p_i_ucna_addr = (uint64_t *)addr_gva2hva(vm, (uint64_t)&i_ucna_addr);
params->p_ucna_addr = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ucna_addr);
params->p_ucna_addr2 = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ucna_addr2);
run_ucna_injection(params);
TEST_ASSERT(*params->p_i_ucna_rcvd == 1, "Only first UCNA get signaled.");
TEST_ASSERT(*params->p_i_ucna_addr == FIRST_UCNA_ADDR,
"Only first UCNA reported addr get recorded via interrupt.");
TEST_ASSERT(*params->p_ucna_addr == FIRST_UCNA_ADDR,
"First injected UCNAs should get exposed via registers.");
TEST_ASSERT(*params->p_ucna_addr2 == SECOND_UCNA_ADDR,
"Second injected UCNAs should get exposed via registers.");
printf("Test successful.\n"
"UCNA CMCI interrupts received: %ld\n"
"Last UCNA address received via CMCI: %lx\n"
"First UCNA address in vCPU thread: %lx\n"
"Second UCNA address in vCPU thread: %lx\n",
*params->p_i_ucna_rcvd, *params->p_i_ucna_addr,
*params->p_ucna_addr, *params->p_ucna_addr2);
}
static void setup_mce_cap(struct kvm_vcpu *vcpu, bool enable_cmci_p)
{
uint64_t mcg_caps = MCG_CTL_P | MCG_SER_P | MCG_LMCE_P | KVM_MAX_MCE_BANKS;
if (enable_cmci_p)
mcg_caps |= MCG_CMCI_P;
mcg_caps &= supported_mcg_caps | MCG_CAP_BANKS_MASK;
vcpu_ioctl(vcpu, KVM_X86_SETUP_MCE, &mcg_caps);
}
static struct kvm_vcpu *create_vcpu_with_mce_cap(struct kvm_vm *vm, uint32_t vcpuid,
bool enable_cmci_p, void *guest_code)
{
struct kvm_vcpu *vcpu = vm_vcpu_add(vm, vcpuid, guest_code);
setup_mce_cap(vcpu, enable_cmci_p);
return vcpu;
}
int main(int argc, char *argv[])
{
struct thread_params params;
struct kvm_vm *vm;
struct kvm_vcpu *ucna_vcpu;
struct kvm_vcpu *cmcidis_vcpu;
struct kvm_vcpu *cmci_vcpu;
kvm_check_cap(KVM_CAP_MCE);
vm = __vm_create(VM_MODE_DEFAULT, 3, 0);
kvm_ioctl(vm->kvm_fd, KVM_X86_GET_MCE_CAP_SUPPORTED,
&supported_mcg_caps);
if (!(supported_mcg_caps & MCG_CMCI_P)) {
print_skip("MCG_CMCI_P is not supported");
exit(KSFT_SKIP);
}
ucna_vcpu = create_vcpu_with_mce_cap(vm, 0, true, ucna_injection_guest_code);
cmcidis_vcpu = create_vcpu_with_mce_cap(vm, 1, false, cmci_disabled_guest_code);
cmci_vcpu = create_vcpu_with_mce_cap(vm, 2, true, cmci_enabled_guest_code);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(ucna_vcpu);
vcpu_init_descriptor_tables(cmcidis_vcpu);
vcpu_init_descriptor_tables(cmci_vcpu);
vm_install_exception_handler(vm, CMCI_VECTOR, guest_cmci_handler);
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
test_ucna_injection(ucna_vcpu, ¶ms);
run_vcpu_expect_gp(cmcidis_vcpu);
run_vcpu_expect_gp(cmci_vcpu);
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/ucna_injection_test.c |
// SPDX-License-Identifier: GPL-2.0-only
#define _GNU_SOURCE /* for program_invocation_short_name */
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
#include "svm_util.h"
#define L2_GUEST_STACK_SIZE 256
/*
* Arbitrary, never shoved into KVM/hardware, just need to avoid conflict with
* the "real" exceptions used, #SS/#GP/#DF (12/13/8).
*/
#define FAKE_TRIPLE_FAULT_VECTOR 0xaa
/* Arbitrary 32-bit error code injected by this test. */
#define SS_ERROR_CODE 0xdeadbeef
/*
* Bit '0' is set on Intel if the exception occurs while delivering a previous
* event/exception. AMD's wording is ambiguous, but presumably the bit is set
* if the exception occurs while delivering an external event, e.g. NMI or INTR,
* but not for exceptions that occur when delivering other exceptions or
* software interrupts.
*
* Note, Intel's name for it, "External event", is misleading and much more
* aligned with AMD's behavior, but the SDM is quite clear on its behavior.
*/
#define ERROR_CODE_EXT_FLAG BIT(0)
/*
* Bit '1' is set if the fault occurred when looking up a descriptor in the
* IDT, which is the case here as the IDT is empty/NULL.
*/
#define ERROR_CODE_IDT_FLAG BIT(1)
/*
* The #GP that occurs when vectoring #SS should show the index into the IDT
* for #SS, plus have the "IDT flag" set.
*/
#define GP_ERROR_CODE_AMD ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG)
#define GP_ERROR_CODE_INTEL ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG | ERROR_CODE_EXT_FLAG)
/*
* Intel and AMD both shove '0' into the error code on #DF, regardless of what
* led to the double fault.
*/
#define DF_ERROR_CODE 0
#define INTERCEPT_SS (BIT_ULL(SS_VECTOR))
#define INTERCEPT_SS_DF (INTERCEPT_SS | BIT_ULL(DF_VECTOR))
#define INTERCEPT_SS_GP_DF (INTERCEPT_SS_DF | BIT_ULL(GP_VECTOR))
static void l2_ss_pending_test(void)
{
GUEST_SYNC(SS_VECTOR);
}
static void l2_ss_injected_gp_test(void)
{
GUEST_SYNC(GP_VECTOR);
}
static void l2_ss_injected_df_test(void)
{
GUEST_SYNC(DF_VECTOR);
}
static void l2_ss_injected_tf_test(void)
{
GUEST_SYNC(FAKE_TRIPLE_FAULT_VECTOR);
}
static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector,
uint32_t error_code)
{
struct vmcb *vmcb = svm->vmcb;
struct vmcb_control_area *ctrl = &vmcb->control;
vmcb->save.rip = (u64)l2_code;
run_guest(vmcb, svm->vmcb_gpa);
if (vector == FAKE_TRIPLE_FAULT_VECTOR)
return;
GUEST_ASSERT_EQ(ctrl->exit_code, (SVM_EXIT_EXCP_BASE + vector));
GUEST_ASSERT_EQ(ctrl->exit_info_1, error_code);
}
static void l1_svm_code(struct svm_test_data *svm)
{
struct vmcb_control_area *ctrl = &svm->vmcb->control;
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
generic_svm_setup(svm, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
svm->vmcb->save.idtr.limit = 0;
ctrl->intercept |= BIT_ULL(INTERCEPT_SHUTDOWN);
ctrl->intercept_exceptions = INTERCEPT_SS_GP_DF;
svm_run_l2(svm, l2_ss_pending_test, SS_VECTOR, SS_ERROR_CODE);
svm_run_l2(svm, l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_AMD);
ctrl->intercept_exceptions = INTERCEPT_SS_DF;
svm_run_l2(svm, l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);
ctrl->intercept_exceptions = INTERCEPT_SS;
svm_run_l2(svm, l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);
GUEST_ASSERT_EQ(ctrl->exit_code, SVM_EXIT_SHUTDOWN);
GUEST_DONE();
}
static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code)
{
GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code));
GUEST_ASSERT_EQ(vector == SS_VECTOR ? vmlaunch() : vmresume(), 0);
if (vector == FAKE_TRIPLE_FAULT_VECTOR)
return;
GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI);
GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), vector);
GUEST_ASSERT_EQ(vmreadz(VM_EXIT_INTR_ERROR_CODE), error_code);
}
static void l1_vmx_code(struct vmx_pages *vmx)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true);
GUEST_ASSERT_EQ(load_vmcs(vmx), true);
prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_ASSERT_EQ(vmwrite(GUEST_IDTR_LIMIT, 0), 0);
/*
* VMX disallows injecting an exception with error_code[31:16] != 0,
* and hardware will never generate a VM-Exit with bits 31:16 set.
* KVM should likewise truncate the "bad" userspace value.
*/
GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_GP_DF), 0);
vmx_run_l2(l2_ss_pending_test, SS_VECTOR, (u16)SS_ERROR_CODE);
vmx_run_l2(l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_INTEL);
GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_DF), 0);
vmx_run_l2(l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);
GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS), 0);
vmx_run_l2(l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);
GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_TRIPLE_FAULT);
GUEST_DONE();
}
static void __attribute__((__flatten__)) l1_guest_code(void *test_data)
{
if (this_cpu_has(X86_FEATURE_SVM))
l1_svm_code(test_data);
else
l1_vmx_code(test_data);
}
static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector)
{
struct ucall uc;
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(vector == uc.args[1],
"Expected L2 to ask for %d, got %ld", vector, uc.args[1]);
break;
case UCALL_DONE:
TEST_ASSERT(vector == -1,
"Expected L2 to ask for %d, L2 says it's done", vector);
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
default:
TEST_FAIL("Expected L2 to ask for %d, got unexpected ucall %lu", vector, uc.cmd);
}
}
static void queue_ss_exception(struct kvm_vcpu *vcpu, bool inject)
{
struct kvm_vcpu_events events;
vcpu_events_get(vcpu, &events);
TEST_ASSERT(!events.exception.pending,
"Vector %d unexpectedlt pending", events.exception.nr);
TEST_ASSERT(!events.exception.injected,
"Vector %d unexpectedly injected", events.exception.nr);
events.flags = KVM_VCPUEVENT_VALID_PAYLOAD;
events.exception.pending = !inject;
events.exception.injected = inject;
events.exception.nr = SS_VECTOR;
events.exception.has_error_code = true;
events.exception.error_code = SS_ERROR_CODE;
vcpu_events_set(vcpu, &events);
}
/*
* Verify KVM_{G,S}ET_EVENTS play nice with pending vs. injected exceptions
* when an exception is being queued for L2. Specifically, verify that KVM
* honors L1 exception intercept controls when a #SS is pending/injected,
* triggers a #GP on vectoring the #SS, morphs to #DF if #GP isn't intercepted
* by L1, and finally causes (nested) SHUTDOWN if #DF isn't intercepted by L1.
*/
int main(int argc, char *argv[])
{
vm_vaddr_t nested_test_data_gva;
struct kvm_vcpu_events events;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXCEPTION_PAYLOAD));
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vm_enable_cap(vm, KVM_CAP_EXCEPTION_PAYLOAD, -2ul);
if (kvm_cpu_has(X86_FEATURE_SVM))
vcpu_alloc_svm(vm, &nested_test_data_gva);
else
vcpu_alloc_vmx(vm, &nested_test_data_gva);
vcpu_args_set(vcpu, 1, nested_test_data_gva);
/* Run L1 => L2. L2 should sync and request #SS. */
vcpu_run(vcpu);
assert_ucall_vector(vcpu, SS_VECTOR);
/* Pend #SS and request immediate exit. #SS should still be pending. */
queue_ss_exception(vcpu, false);
vcpu->run->immediate_exit = true;
vcpu_run_complete_io(vcpu);
/* Verify the pending events comes back out the same as it went in. */
vcpu_events_get(vcpu, &events);
TEST_ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD,
KVM_VCPUEVENT_VALID_PAYLOAD);
TEST_ASSERT_EQ(events.exception.pending, true);
TEST_ASSERT_EQ(events.exception.nr, SS_VECTOR);
TEST_ASSERT_EQ(events.exception.has_error_code, true);
TEST_ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE);
/*
* Run for real with the pending #SS, L1 should get a VM-Exit due to
* #SS interception and re-enter L2 to request #GP (via injected #SS).
*/
vcpu->run->immediate_exit = false;
vcpu_run(vcpu);
assert_ucall_vector(vcpu, GP_VECTOR);
/*
* Inject #SS, the #SS should bypass interception and cause #GP, which
* L1 should intercept before KVM morphs it to #DF. L1 should then
* disable #GP interception and run L2 to request #DF (via #SS => #GP).
*/
queue_ss_exception(vcpu, true);
vcpu_run(vcpu);
assert_ucall_vector(vcpu, DF_VECTOR);
/*
* Inject #SS, the #SS should bypass interception and cause #GP, which
* L1 is no longer interception, and so should see a #DF VM-Exit. L1
* should then signal that is done.
*/
queue_ss_exception(vcpu, true);
vcpu_run(vcpu);
assert_ucall_vector(vcpu, FAKE_TRIPLE_FAULT_VECTOR);
/*
* Inject #SS yet again. L1 is not intercepting #GP or #DF, and so
* should see nested TRIPLE_FAULT / SHUTDOWN.
*/
queue_ss_exception(vcpu, true);
vcpu_run(vcpu);
assert_ucall_vector(vcpu, -1);
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test for VMX-pmu perf capability msr
*
* Copyright (C) 2021 Intel Corporation
*
* Test to check the effect of various CPUID settings on
* MSR_IA32_PERF_CAPABILITIES MSR, and check that what
* we write with KVM_SET_MSR is _not_ modified by the guest
* and check it can be retrieved with KVM_GET_MSR, also test
* the invalid LBR formats are rejected.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <sys/ioctl.h>
#include <linux/bitmap.h>
#include "kvm_util.h"
#include "vmx.h"
union perf_capabilities {
struct {
u64 lbr_format:6;
u64 pebs_trap:1;
u64 pebs_arch_reg:1;
u64 pebs_format:4;
u64 smm_freeze:1;
u64 full_width_write:1;
u64 pebs_baseline:1;
u64 perf_metrics:1;
u64 pebs_output_pt_available:1;
u64 anythread_deprecated:1;
};
u64 capabilities;
};
/*
* The LBR format and most PEBS features are immutable, all other features are
* fungible (if supported by the host and KVM).
*/
static const union perf_capabilities immutable_caps = {
.lbr_format = -1,
.pebs_trap = 1,
.pebs_arch_reg = 1,
.pebs_format = -1,
.pebs_baseline = 1,
};
static const union perf_capabilities format_caps = {
.lbr_format = -1,
.pebs_format = -1,
};
static void guest_test_perf_capabilities_gp(uint64_t val)
{
uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val);
__GUEST_ASSERT(vector == GP_VECTOR,
"Expected #GP for value '0x%llx', got vector '0x%x'",
val, vector);
}
static void guest_code(uint64_t current_val)
{
int i;
guest_test_perf_capabilities_gp(current_val);
guest_test_perf_capabilities_gp(0);
for (i = 0; i < 64; i++)
guest_test_perf_capabilities_gp(current_val ^ BIT_ULL(i));
GUEST_DONE();
}
/*
* Verify that guest WRMSRs to PERF_CAPABILITIES #GP regardless of the value
* written, that the guest always sees the userspace controlled value, and that
* PERF_CAPABILITIES is immutable after KVM_RUN.
*/
static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, guest_code);
struct ucall uc;
int r, i;
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
vcpu_args_set(vcpu, 1, host_cap.capabilities);
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
break;
default:
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
}
TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES),
host_cap.capabilities);
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0);
TEST_ASSERT(!r, "Post-KVM_RUN write '0' didn't fail");
for (i = 0; i < 64; i++) {
r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES,
host_cap.capabilities ^ BIT_ULL(i));
TEST_ASSERT(!r, "Post-KVM_RUN write '0x%llx'didn't fail",
host_cap.capabilities ^ BIT_ULL(i));
}
kvm_vm_free(vm);
}
/*
* Verify KVM allows writing PERF_CAPABILITIES with all KVM-supported features
* enabled, as well as '0' (to disable all features).
*/
static void test_basic_perf_capabilities(union perf_capabilities host_cap)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL);
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0);
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
kvm_vm_free(vm);
}
static void test_fungible_perf_capabilities(union perf_capabilities host_cap)
{
const uint64_t fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL);
int bit;
for_each_set_bit(bit, &fungible_caps, 64) {
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, BIT_ULL(bit));
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES,
host_cap.capabilities & ~BIT_ULL(bit));
}
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
kvm_vm_free(vm);
}
/*
* Verify KVM rejects attempts to set unsupported and/or immutable features in
* PERF_CAPABILITIES. Note, LBR format and PEBS format need to be validated
* separately as they are multi-bit values, e.g. toggling or setting a single
* bit can generate a false positive without dedicated safeguards.
*/
static void test_immutable_perf_capabilities(union perf_capabilities host_cap)
{
const uint64_t reserved_caps = (~host_cap.capabilities |
immutable_caps.capabilities) &
~format_caps.capabilities;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL);
union perf_capabilities val = host_cap;
int r, bit;
for_each_set_bit(bit, &reserved_caps, 64) {
r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES,
host_cap.capabilities ^ BIT_ULL(bit));
TEST_ASSERT(!r, "%s immutable feature 0x%llx (bit %d) didn't fail",
host_cap.capabilities & BIT_ULL(bit) ? "Setting" : "Clearing",
BIT_ULL(bit), bit);
}
/*
* KVM only supports the host's native LBR format, as well as '0' (to
* disable LBR support). Verify KVM rejects all other LBR formats.
*/
for (val.lbr_format = 1; val.lbr_format; val.lbr_format++) {
if (val.lbr_format == host_cap.lbr_format)
continue;
r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val.capabilities);
TEST_ASSERT(!r, "Bad LBR FMT = 0x%x didn't fail, host = 0x%x",
val.lbr_format, host_cap.lbr_format);
}
/* Ditto for the PEBS format. */
for (val.pebs_format = 1; val.pebs_format; val.pebs_format++) {
if (val.pebs_format == host_cap.pebs_format)
continue;
r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val.capabilities);
TEST_ASSERT(!r, "Bad PEBS FMT = 0x%x didn't fail, host = 0x%x",
val.pebs_format, host_cap.pebs_format);
}
kvm_vm_free(vm);
}
/*
* Test that LBR MSRs are writable when LBRs are enabled, and then verify that
* disabling the vPMU via CPUID also disables LBR support. Set bits 2:0 of
* LBR_TOS as those bits are writable across all uarch implementations (arch
* LBRs will need to poke a different MSR).
*/
static void test_lbr_perf_capabilities(union perf_capabilities host_cap)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int r;
if (!host_cap.lbr_format)
return;
vm = vm_create_with_one_vcpu(&vcpu, NULL);
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
vcpu_set_msr(vcpu, MSR_LBR_TOS, 7);
vcpu_clear_cpuid_entry(vcpu, X86_PROPERTY_PMU_VERSION.function);
r = _vcpu_set_msr(vcpu, MSR_LBR_TOS, 7);
TEST_ASSERT(!r, "Writing LBR_TOS should fail after disabling vPMU");
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
union perf_capabilities host_cap;
TEST_REQUIRE(get_kvm_param_bool("enable_pmu"));
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));
TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION));
TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0);
host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES);
TEST_ASSERT(host_cap.full_width_write,
"Full-width writes should always be supported");
test_basic_perf_capabilities(host_cap);
test_fungible_perf_capabilities(host_cap);
test_immutable_perf_capabilities(host_cap);
test_guest_wrmsr_perf_capabilities(host_cap);
test_lbr_perf_capabilities(host_cap);
}
| linux-master | tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
#include "svm_util.h"
#include <string.h>
#include <sys/ioctl.h>
#include "kselftest.h"
#define ARBITRARY_IO_PORT 0x2000
/* The virtual machine object. */
static struct kvm_vm *vm;
static void l2_guest_code(void)
{
asm volatile("inb %%dx, %%al"
: : [port] "d" (ARBITRARY_IO_PORT) : "rax");
}
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
void l1_guest_code_vmx(struct vmx_pages *vmx)
{
GUEST_ASSERT(vmx->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx));
GUEST_ASSERT(load_vmcs(vmx));
prepare_vmcs(vmx, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_ASSERT(!vmlaunch());
/* L2 should triple fault after a triple fault event injected. */
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT);
GUEST_DONE();
}
void l1_guest_code_svm(struct svm_test_data *svm)
{
struct vmcb *vmcb = svm->vmcb;
generic_svm_setup(svm, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
/* don't intercept shutdown to test the case of SVM allowing to do so */
vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN));
run_guest(vmcb, svm->vmcb_gpa);
/* should not reach here, L1 should crash */
GUEST_ASSERT(0);
}
int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vcpu_events events;
struct ucall uc;
bool has_vmx = kvm_cpu_has(X86_FEATURE_VMX);
bool has_svm = kvm_cpu_has(X86_FEATURE_SVM);
TEST_REQUIRE(has_vmx || has_svm);
TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT));
if (has_vmx) {
vm_vaddr_t vmx_pages_gva;
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_vmx);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vcpu, 1, vmx_pages_gva);
} else {
vm_vaddr_t svm_gva;
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_svm);
vcpu_alloc_svm(vm, &svm_gva);
vcpu_args_set(vcpu, 1, svm_gva);
}
vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
run = vcpu->run;
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
"Expected IN from port %d from L2, got port %d",
ARBITRARY_IO_PORT, run->io.port);
vcpu_events_get(vcpu, &events);
events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
events.triple_fault.pending = true;
vcpu_events_set(vcpu, &events);
run->immediate_exit = true;
vcpu_run_complete_io(vcpu);
vcpu_events_get(vcpu, &events);
TEST_ASSERT(events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT,
"Triple fault event invalid");
TEST_ASSERT(events.triple_fault.pending,
"No triple fault pending");
vcpu_run(vcpu);
if (has_svm) {
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
} else {
switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE:
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
default:
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
}
}
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* VMX control MSR test
*
* Copyright (C) 2022 Google LLC.
*
* Tests for KVM ownership of bits in the VMX entry/exit control MSRs. Checks
* that KVM will set owned bits where appropriate, and will not if
* KVM_X86_QUIRK_TWEAK_VMX_CTRL_MSRS is disabled.
*/
#include <linux/bitmap.h>
#include "kvm_util.h"
#include "vmx.h"
static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index,
uint64_t mask)
{
uint64_t val = vcpu_get_msr(vcpu, msr_index);
uint64_t bit;
mask &= val;
for_each_set_bit(bit, &mask, 64) {
vcpu_set_msr(vcpu, msr_index, val & ~BIT_ULL(bit));
vcpu_set_msr(vcpu, msr_index, val);
}
}
static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index,
uint64_t mask)
{
uint64_t val = vcpu_get_msr(vcpu, msr_index);
uint64_t bit;
mask = ~mask | val;
for_each_clear_bit(bit, &mask, 64) {
vcpu_set_msr(vcpu, msr_index, val | BIT_ULL(bit));
vcpu_set_msr(vcpu, msr_index, val);
}
}
static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index)
{
vmx_fixed0_msr_test(vcpu, msr_index, GENMASK_ULL(31, 0));
vmx_fixed1_msr_test(vcpu, msr_index, GENMASK_ULL(63, 32));
}
static void vmx_save_restore_msrs_test(struct kvm_vcpu *vcpu)
{
vcpu_set_msr(vcpu, MSR_IA32_VMX_VMCS_ENUM, 0);
vcpu_set_msr(vcpu, MSR_IA32_VMX_VMCS_ENUM, -1ull);
vmx_fixed1_msr_test(vcpu, MSR_IA32_VMX_BASIC,
BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55));
vmx_fixed1_msr_test(vcpu, MSR_IA32_VMX_MISC,
BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) |
BIT_ULL(15) | BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30));
vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_PROCBASED_CTLS2);
vmx_fixed1_msr_test(vcpu, MSR_IA32_VMX_EPT_VPID_CAP, -1ull);
vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS);
vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_TRUE_EXIT_CTLS);
vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS);
vmx_fixed1_msr_test(vcpu, MSR_IA32_VMX_VMFUNC, -1ull);
}
static void __ia32_feature_control_msr_test(struct kvm_vcpu *vcpu,
uint64_t msr_bit,
struct kvm_x86_cpu_feature feature)
{
uint64_t val;
vcpu_clear_cpuid_feature(vcpu, feature);
val = vcpu_get_msr(vcpu, MSR_IA32_FEAT_CTL);
vcpu_set_msr(vcpu, MSR_IA32_FEAT_CTL, val | msr_bit | FEAT_CTL_LOCKED);
vcpu_set_msr(vcpu, MSR_IA32_FEAT_CTL, (val & ~msr_bit) | FEAT_CTL_LOCKED);
vcpu_set_msr(vcpu, MSR_IA32_FEAT_CTL, val | msr_bit | FEAT_CTL_LOCKED);
vcpu_set_msr(vcpu, MSR_IA32_FEAT_CTL, (val & ~msr_bit) | FEAT_CTL_LOCKED);
vcpu_set_msr(vcpu, MSR_IA32_FEAT_CTL, val);
if (!kvm_cpu_has(feature))
return;
vcpu_set_cpuid_feature(vcpu, feature);
}
static void ia32_feature_control_msr_test(struct kvm_vcpu *vcpu)
{
uint64_t supported_bits = FEAT_CTL_LOCKED |
FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX |
FEAT_CTL_SGX_LC_ENABLED |
FEAT_CTL_SGX_ENABLED |
FEAT_CTL_LMCE_ENABLED;
int bit, r;
__ia32_feature_control_msr_test(vcpu, FEAT_CTL_VMX_ENABLED_INSIDE_SMX, X86_FEATURE_SMX);
__ia32_feature_control_msr_test(vcpu, FEAT_CTL_VMX_ENABLED_INSIDE_SMX, X86_FEATURE_VMX);
__ia32_feature_control_msr_test(vcpu, FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX, X86_FEATURE_VMX);
__ia32_feature_control_msr_test(vcpu, FEAT_CTL_SGX_LC_ENABLED, X86_FEATURE_SGX_LC);
__ia32_feature_control_msr_test(vcpu, FEAT_CTL_SGX_LC_ENABLED, X86_FEATURE_SGX);
__ia32_feature_control_msr_test(vcpu, FEAT_CTL_SGX_ENABLED, X86_FEATURE_SGX);
__ia32_feature_control_msr_test(vcpu, FEAT_CTL_LMCE_ENABLED, X86_FEATURE_MCE);
for_each_clear_bit(bit, &supported_bits, 64) {
r = _vcpu_set_msr(vcpu, MSR_IA32_FEAT_CTL, BIT(bit));
TEST_ASSERT(r == 0,
"Setting reserved bit %d in IA32_FEATURE_CONTROL should fail", bit);
}
}
int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_DISABLE_QUIRKS2));
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
/* No need to actually do KVM_RUN, thus no guest code. */
vm = vm_create_with_one_vcpu(&vcpu, NULL);
vmx_save_restore_msrs_test(vcpu);
ia32_feature_control_msr_test(vcpu);
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/vmx_msrs_test.c |
// SPDX-License-Identifier: GPL-2.0
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
static void guest_ins_port80(uint8_t *buffer, unsigned int count)
{
unsigned long end;
if (count == 2)
end = (unsigned long)buffer + 1;
else
end = (unsigned long)buffer + 8192;
asm volatile("cld; rep; insb" : "+D"(buffer), "+c"(count) : "d"(0x80) : "memory");
GUEST_ASSERT_EQ(count, 0);
GUEST_ASSERT_EQ((unsigned long)buffer, end);
}
static void guest_code(void)
{
uint8_t buffer[8192];
int i;
/*
* Special case tests. main() will adjust RCX 2 => 1 and 3 => 8192 to
* test that KVM doesn't explode when userspace modifies the "count" on
* a userspace I/O exit. KVM isn't required to play nice with the I/O
* itself as KVM doesn't support manipulating the count, it just needs
* to not explode or overflow a buffer.
*/
guest_ins_port80(buffer, 2);
guest_ins_port80(buffer, 3);
/* Verify KVM fills the buffer correctly when not stuffing RCX. */
memset(buffer, 0, sizeof(buffer));
guest_ins_port80(buffer, 8192);
for (i = 0; i < 8192; i++)
__GUEST_ASSERT(buffer[i] == 0xaa,
"Expected '0xaa', got '0x%x' at buffer[%u]",
buffer[i], i);
GUEST_DONE();
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_regs regs;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
memset(®s, 0, sizeof(regs));
while (1) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
if (get_ucall(vcpu, &uc))
break;
TEST_ASSERT(run->io.port == 0x80,
"Expected I/O at port 0x80, got port 0x%x\n", run->io.port);
/*
* Modify the rep string count in RCX: 2 => 1 and 3 => 8192.
* Note, this abuses KVM's batching of rep string I/O to avoid
* getting stuck in an infinite loop. That behavior isn't in
* scope from a testing perspective as it's not ABI in any way,
* i.e. it really is abusing internal KVM knowledge.
*/
vcpu_regs_get(vcpu, ®s);
if (regs.rcx == 2)
regs.rcx = 1;
if (regs.rcx == 3)
regs.rcx = 8192;
memset((void *)run + run->io.data_offset, 0xaa, 4096);
vcpu_regs_set(vcpu, ®s);
}
switch (uc.cmd) {
case UCALL_DONE:
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/userspace_io_test.c |
// SPDX-License-Identifier: GPL-2.0-only
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "apic.h"
#include "kvm_util.h"
#include "processor.h"
#include "test_util.h"
struct xapic_vcpu {
struct kvm_vcpu *vcpu;
bool is_x2apic;
};
static void xapic_guest_code(void)
{
asm volatile("cli");
xapic_enable();
while (1) {
uint64_t val = (u64)xapic_read_reg(APIC_IRR) |
(u64)xapic_read_reg(APIC_IRR + 0x10) << 32;
xapic_write_reg(APIC_ICR2, val >> 32);
xapic_write_reg(APIC_ICR, val);
GUEST_SYNC(val);
}
}
static void x2apic_guest_code(void)
{
asm volatile("cli");
x2apic_enable();
do {
uint64_t val = x2apic_read_reg(APIC_IRR) |
x2apic_read_reg(APIC_IRR + 0x10) << 32;
x2apic_write_reg(APIC_ICR, val);
GUEST_SYNC(val);
} while (1);
}
static void ____test_icr(struct xapic_vcpu *x, uint64_t val)
{
struct kvm_vcpu *vcpu = x->vcpu;
struct kvm_lapic_state xapic;
struct ucall uc;
uint64_t icr;
/*
* Tell the guest what ICR value to write. Use the IRR to pass info,
* all bits are valid and should not be modified by KVM (ignoring the
* fact that vectors 0-15 are technically illegal).
*/
vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
*((u32 *)&xapic.regs[APIC_IRR]) = val;
*((u32 *)&xapic.regs[APIC_IRR + 0x10]) = val >> 32;
vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic);
vcpu_run(vcpu);
TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
TEST_ASSERT_EQ(uc.args[1], val);
vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) |
(u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32;
if (!x->is_x2apic) {
val &= (-1u | (0xffull << (32 + 24)));
TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
} else {
TEST_ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY);
}
}
#define X2APIC_RSVED_BITS_MASK (GENMASK_ULL(31,20) | \
GENMASK_ULL(17,16) | \
GENMASK_ULL(13,13))
static void __test_icr(struct xapic_vcpu *x, uint64_t val)
{
if (x->is_x2apic) {
/* Hardware writing vICR register requires reserved bits 31:20,
* 17:16 and 13 kept as zero to avoid #GP exception. Data value
* written to vICR should mask out those bits above.
*/
val &= ~X2APIC_RSVED_BITS_MASK;
}
____test_icr(x, val | APIC_ICR_BUSY);
____test_icr(x, val & ~(u64)APIC_ICR_BUSY);
}
static void test_icr(struct xapic_vcpu *x)
{
struct kvm_vcpu *vcpu = x->vcpu;
uint64_t icr, i, j;
icr = APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_FIXED;
for (i = 0; i <= 0xff; i++)
__test_icr(x, icr | i);
icr = APIC_INT_ASSERT | APIC_DM_FIXED;
for (i = 0; i <= 0xff; i++)
__test_icr(x, icr | i);
/*
* Send all flavors of IPIs to non-existent vCPUs. TODO: use number of
* vCPUs, not vcpu.id + 1. Arbitrarily use vector 0xff.
*/
icr = APIC_INT_ASSERT | 0xff;
for (i = 0; i < 0xff; i++) {
if (i == vcpu->id)
continue;
for (j = 0; j < 8; j++)
__test_icr(x, i << (32 + 24) | icr | (j << 8));
}
/* And again with a shorthand destination for all types of IPIs. */
icr = APIC_DEST_ALLBUT | APIC_INT_ASSERT;
for (i = 0; i < 8; i++)
__test_icr(x, icr | (i << 8));
/* And a few garbage value, just make sure it's an IRQ (blocked). */
__test_icr(x, 0xa5a5a5a5a5a5a5a5 & ~APIC_DM_FIXED_MASK);
__test_icr(x, 0x5a5a5a5a5a5a5a5a & ~APIC_DM_FIXED_MASK);
__test_icr(x, -1ull & ~APIC_DM_FIXED_MASK);
}
static void __test_apic_id(struct kvm_vcpu *vcpu, uint64_t apic_base)
{
uint32_t apic_id, expected;
struct kvm_lapic_state xapic;
vcpu_set_msr(vcpu, MSR_IA32_APICBASE, apic_base);
vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
expected = apic_base & X2APIC_ENABLE ? vcpu->id : vcpu->id << 24;
apic_id = *((u32 *)&xapic.regs[APIC_ID]);
TEST_ASSERT(apic_id == expected,
"APIC_ID not set back to %s format; wanted = %x, got = %x",
(apic_base & X2APIC_ENABLE) ? "x2APIC" : "xAPIC",
expected, apic_id);
}
/*
* Verify that KVM switches the APIC_ID between xAPIC and x2APIC when userspace
* stuffs MSR_IA32_APICBASE. Setting the APIC_ID when x2APIC is enabled and
* when the APIC transitions for DISABLED to ENABLED is architectural behavior
* (on Intel), whereas the x2APIC => xAPIC transition behavior is KVM ABI since
* attempted to transition from x2APIC to xAPIC without disabling the APIC is
* architecturally disallowed.
*/
static void test_apic_id(void)
{
const uint32_t NR_VCPUS = 3;
struct kvm_vcpu *vcpus[NR_VCPUS];
uint64_t apic_base;
struct kvm_vm *vm;
int i;
vm = vm_create_with_vcpus(NR_VCPUS, NULL, vcpus);
vm_enable_cap(vm, KVM_CAP_X2APIC_API, KVM_X2APIC_API_USE_32BIT_IDS);
for (i = 0; i < NR_VCPUS; i++) {
apic_base = vcpu_get_msr(vcpus[i], MSR_IA32_APICBASE);
TEST_ASSERT(apic_base & MSR_IA32_APICBASE_ENABLE,
"APIC not in ENABLED state at vCPU RESET");
TEST_ASSERT(!(apic_base & X2APIC_ENABLE),
"APIC not in xAPIC mode at vCPU RESET");
__test_apic_id(vcpus[i], apic_base);
__test_apic_id(vcpus[i], apic_base | X2APIC_ENABLE);
__test_apic_id(vcpus[i], apic_base);
}
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
struct xapic_vcpu x = {
.vcpu = NULL,
.is_x2apic = true,
};
struct kvm_vm *vm;
vm = vm_create_with_one_vcpu(&x.vcpu, x2apic_guest_code);
test_icr(&x);
kvm_vm_free(vm);
/*
* Use a second VM for the xAPIC test so that x2APIC can be hidden from
* the guest in order to test AVIC. KVM disallows changing CPUID after
* KVM_RUN and AVIC is disabled if _any_ vCPU is allowed to use x2APIC.
*/
vm = vm_create_with_one_vcpu(&x.vcpu, xapic_guest_code);
x.is_x2apic = false;
vcpu_clear_cpuid_feature(x.vcpu, X86_FEATURE_X2APIC);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
test_icr(&x);
kvm_vm_free(vm);
test_apic_id();
}
| linux-master | tools/testing/selftests/kvm/x86_64/xapic_state_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* svm_nested_shutdown_test
*
* Copyright (C) 2022, Red Hat, Inc.
*
* Nested SVM testing: test that unintercepted shutdown in L2 doesn't crash the host
*/
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "svm_util.h"
static void l2_guest_code(struct svm_test_data *svm)
{
__asm__ __volatile__("ud2");
}
static void l1_guest_code(struct svm_test_data *svm, struct idt_entry *idt)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
struct vmcb *vmcb = svm->vmcb;
generic_svm_setup(svm, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN));
idt[6].p = 0; // #UD is intercepted but its injection will cause #NP
idt[11].p = 0; // #NP is not intercepted and will cause another
// #NP that will be converted to #DF
idt[8].p = 0; // #DF will cause #NP which will cause SHUTDOWN
run_guest(vmcb, svm->vmcb_gpa);
/* should not reach here */
GUEST_ASSERT(0);
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
vm_vaddr_t svm_gva;
struct kvm_vm *vm;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vcpu_alloc_svm(vm, &svm_gva);
vcpu_args_set(vcpu, 2, svm_gva, vm->idt);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test that KVM_GET_MSR_INDEX_LIST and
* KVM_GET_MSR_FEATURE_INDEX_LIST work as intended
*
* Copyright (C) 2020, Red Hat, Inc.
*/
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
int main(int argc, char *argv[])
{
const struct kvm_msr_list *feature_list;
int i;
/*
* Skip the entire test if MSR_FEATURES isn't supported, other tests
* will cover the "regular" list of MSRs, the coverage here is purely
* opportunistic and not interesting on its own.
*/
TEST_REQUIRE(kvm_has_cap(KVM_CAP_GET_MSR_FEATURES));
(void)kvm_get_msr_index_list();
feature_list = kvm_get_feature_msr_index_list();
for (i = 0; i < feature_list->nmsrs; i++)
kvm_get_feature_msr(feature_list->indices[i]);
}
| linux-master | tools/testing/selftests/kvm/x86_64/get_msr_index_features.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* KVM_SET_SREGS tests
*
* Copyright (C) 2018, Google LLC.
*
* This is a regression test for the bug fixed by the following commit:
* d3802286fa0f ("kvm: x86: Disallow illegal IA32_APIC_BASE MSR values")
*
* That bug allowed a user-mode program that called the KVM_SET_SREGS
* ioctl to put a VCPU's local APIC into an invalid state.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#define TEST_INVALID_CR_BIT(vcpu, cr, orig, bit) \
do { \
struct kvm_sregs new; \
int rc; \
\
/* Skip the sub-test, the feature/bit is supported. */ \
if (orig.cr & bit) \
break; \
\
memcpy(&new, &orig, sizeof(sregs)); \
new.cr |= bit; \
\
rc = _vcpu_sregs_set(vcpu, &new); \
TEST_ASSERT(rc, "KVM allowed invalid " #cr " bit (0x%lx)", bit); \
\
/* Sanity check that KVM didn't change anything. */ \
vcpu_sregs_get(vcpu, &new); \
TEST_ASSERT(!memcmp(&new, &orig, sizeof(new)), "KVM modified sregs"); \
} while (0)
static uint64_t calc_supported_cr4_feature_bits(void)
{
uint64_t cr4;
cr4 = X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE |
X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE | X86_CR4_PGE |
X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT;
if (kvm_cpu_has(X86_FEATURE_UMIP))
cr4 |= X86_CR4_UMIP;
if (kvm_cpu_has(X86_FEATURE_LA57))
cr4 |= X86_CR4_LA57;
if (kvm_cpu_has(X86_FEATURE_VMX))
cr4 |= X86_CR4_VMXE;
if (kvm_cpu_has(X86_FEATURE_SMX))
cr4 |= X86_CR4_SMXE;
if (kvm_cpu_has(X86_FEATURE_FSGSBASE))
cr4 |= X86_CR4_FSGSBASE;
if (kvm_cpu_has(X86_FEATURE_PCID))
cr4 |= X86_CR4_PCIDE;
if (kvm_cpu_has(X86_FEATURE_XSAVE))
cr4 |= X86_CR4_OSXSAVE;
if (kvm_cpu_has(X86_FEATURE_SMEP))
cr4 |= X86_CR4_SMEP;
if (kvm_cpu_has(X86_FEATURE_SMAP))
cr4 |= X86_CR4_SMAP;
if (kvm_cpu_has(X86_FEATURE_PKU))
cr4 |= X86_CR4_PKE;
return cr4;
}
int main(int argc, char *argv[])
{
struct kvm_sregs sregs;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
uint64_t cr4;
int rc, i;
/*
* Create a dummy VM, specifically to avoid doing KVM_SET_CPUID2, and
* use it to verify all supported CR4 bits can be set prior to defining
* the vCPU model, i.e. without doing KVM_SET_CPUID2.
*/
vm = vm_create_barebones();
vcpu = __vm_vcpu_add(vm, 0);
vcpu_sregs_get(vcpu, &sregs);
sregs.cr0 = 0;
sregs.cr4 |= calc_supported_cr4_feature_bits();
cr4 = sregs.cr4;
rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(!rc, "Failed to set supported CR4 bits (0x%lx)", cr4);
vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(sregs.cr4 == cr4, "sregs.CR4 (0x%llx) != CR4 (0x%lx)",
sregs.cr4, cr4);
/* Verify all unsupported features are rejected by KVM. */
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_UMIP);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_LA57);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_VMXE);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_SMXE);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_FSGSBASE);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_PCIDE);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_OSXSAVE);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_SMEP);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_SMAP);
TEST_INVALID_CR_BIT(vcpu, cr4, sregs, X86_CR4_PKE);
for (i = 32; i < 64; i++)
TEST_INVALID_CR_BIT(vcpu, cr0, sregs, BIT(i));
/* NW without CD is illegal, as is PG without PE. */
TEST_INVALID_CR_BIT(vcpu, cr0, sregs, X86_CR0_NW);
TEST_INVALID_CR_BIT(vcpu, cr0, sregs, X86_CR0_PG);
kvm_vm_free(vm);
/* Create a "real" VM and verify APIC_BASE can be set. */
vm = vm_create_with_one_vcpu(&vcpu, NULL);
vcpu_sregs_get(vcpu, &sregs);
sregs.apic_base = 1 << 10;
rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(rc, "Set IA32_APIC_BASE to %llx (invalid)",
sregs.apic_base);
sregs.apic_base = 1 << 11;
rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(!rc, "Couldn't set IA32_APIC_BASE to %llx (valid)",
sregs.apic_base);
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/set_sregs_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test edge cases and race conditions in kvm_recalculate_apic_map().
*/
#include <sys/ioctl.h>
#include <pthread.h>
#include <time.h>
#include "processor.h"
#include "test_util.h"
#include "kvm_util.h"
#include "apic.h"
#define TIMEOUT 5 /* seconds */
#define LAPIC_DISABLED 0
#define LAPIC_X2APIC (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)
#define MAX_XAPIC_ID 0xff
static void *race(void *arg)
{
struct kvm_lapic_state lapic = {};
struct kvm_vcpu *vcpu = arg;
while (1) {
/* Trigger kvm_recalculate_apic_map(). */
vcpu_ioctl(vcpu, KVM_SET_LAPIC, &lapic);
pthread_testcancel();
}
return NULL;
}
int main(void)
{
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
struct kvm_vcpu *vcpuN;
struct kvm_vm *vm;
pthread_t thread;
time_t t;
int i;
kvm_static_assert(KVM_MAX_VCPUS > MAX_XAPIC_ID);
/*
* Create the max number of vCPUs supported by selftests so that KVM
* has decent amount of work to do when recalculating the map, i.e. to
* make the problematic window large enough to hit.
*/
vm = vm_create_with_vcpus(KVM_MAX_VCPUS, NULL, vcpus);
/*
* Enable x2APIC on all vCPUs so that KVM doesn't bail from the recalc
* due to vCPUs having aliased xAPIC IDs (truncated to 8 bits).
*/
for (i = 0; i < KVM_MAX_VCPUS; i++)
vcpu_set_msr(vcpus[i], MSR_IA32_APICBASE, LAPIC_X2APIC);
TEST_ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0);
vcpuN = vcpus[KVM_MAX_VCPUS - 1];
for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_X2APIC);
vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_DISABLED);
}
TEST_ASSERT_EQ(pthread_cancel(thread), 0);
TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
#include <string.h>
#include <sys/ioctl.h>
#include "kselftest.h"
#define ARBITRARY_IO_PORT 0x2000
static struct kvm_vm *vm;
static void l2_guest_code(void)
{
/*
* Generate an exit to L0 userspace, i.e. main(), via I/O to an
* arbitrary port.
*/
asm volatile("inb %%dx, %%al"
: : [port] "d" (ARBITRARY_IO_PORT) : "rax");
}
static void l1_guest_code(struct vmx_pages *vmx_pages)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_ASSERT(load_vmcs(vmx_pages));
/* Prepare the VMCS for L2 execution. */
prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
/*
* L2 must be run without unrestricted guest, verify that the selftests
* library hasn't enabled it. Because KVM selftests jump directly to
* 64-bit mode, unrestricted guest support isn't required.
*/
GUEST_ASSERT(!(vmreadz(CPU_BASED_VM_EXEC_CONTROL) & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ||
!(vmreadz(SECONDARY_VM_EXEC_CONTROL) & SECONDARY_EXEC_UNRESTRICTED_GUEST));
GUEST_ASSERT(!vmlaunch());
/* L2 should triple fault after main() stuffs invalid guest state. */
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT);
GUEST_DONE();
}
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva;
struct kvm_sregs sregs;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct ucall uc;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
/* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vcpu, 1, vmx_pages_gva);
vcpu_run(vcpu);
run = vcpu->run;
/*
* The first exit to L0 userspace should be an I/O access from L2.
* Running L1 should launch L2 without triggering an exit to userspace.
*/
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
"Expected IN from port %d from L2, got port %d",
ARBITRARY_IO_PORT, run->io.port);
/*
* Stuff invalid guest state for L2 by making TR unusuable. The next
* KVM_RUN should induce a TRIPLE_FAULT in L2 as KVM doesn't support
* emulating invalid guest state for L2.
*/
memset(&sregs, 0, sizeof(sregs));
vcpu_sregs_get(vcpu, &sregs);
sregs.tr.unusable = 1;
vcpu_sregs_set(vcpu, &sregs);
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE:
break;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
default:
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
}
}
| linux-master | tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test for x86 KVM_CAP_HYPERV_CPUID
*
* Copyright (C) 2018, Red Hat, Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
static void guest_code(void)
{
}
static bool smt_possible(void)
{
char buf[16];
FILE *f;
bool res = true;
f = fopen("/sys/devices/system/cpu/smt/control", "r");
if (f) {
if (fread(buf, sizeof(*buf), sizeof(buf), f) > 0) {
if (!strncmp(buf, "forceoff", 8) ||
!strncmp(buf, "notsupported", 12))
res = false;
}
fclose(f);
}
return res;
}
static void test_hv_cpuid(const struct kvm_cpuid2 *hv_cpuid_entries,
bool evmcs_expected)
{
int i;
int nent_expected = 10;
u32 test_val;
TEST_ASSERT(hv_cpuid_entries->nent == nent_expected,
"KVM_GET_SUPPORTED_HV_CPUID should return %d entries"
" (returned %d)",
nent_expected, hv_cpuid_entries->nent);
for (i = 0; i < hv_cpuid_entries->nent; i++) {
const struct kvm_cpuid_entry2 *entry = &hv_cpuid_entries->entries[i];
TEST_ASSERT((entry->function >= 0x40000000) &&
(entry->function <= 0x40000082),
"function %x is our of supported range",
entry->function);
TEST_ASSERT(entry->index == 0,
".index field should be zero");
TEST_ASSERT(entry->flags == 0,
".flags field should be zero");
TEST_ASSERT(!entry->padding[0] && !entry->padding[1] &&
!entry->padding[2], "padding should be zero");
switch (entry->function) {
case 0x40000000:
test_val = 0x40000082;
TEST_ASSERT(entry->eax == test_val,
"Wrong max leaf report in 0x40000000.EAX: %x"
" (evmcs=%d)",
entry->eax, evmcs_expected
);
break;
case 0x40000004:
test_val = entry->eax & (1UL << 18);
TEST_ASSERT(!!test_val == !smt_possible(),
"NoNonArchitecturalCoreSharing bit"
" doesn't reflect SMT setting");
break;
case 0x4000000A:
TEST_ASSERT(entry->eax & (1UL << 19),
"Enlightened MSR-Bitmap should always be supported"
" 0x40000000.EAX: %x", entry->eax);
if (evmcs_expected)
TEST_ASSERT((entry->eax & 0xffff) == 0x101,
"Supported Enlightened VMCS version range is supposed to be 1:1"
" 0x40000000.EAX: %x", entry->eax);
break;
default:
break;
}
/*
* If needed for debug:
* fprintf(stdout,
* "CPUID%lx EAX=0x%lx EBX=0x%lx ECX=0x%lx EDX=0x%lx\n",
* entry->function, entry->eax, entry->ebx, entry->ecx,
* entry->edx);
*/
}
}
void test_hv_cpuid_e2big(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
static struct kvm_cpuid2 cpuid = {.nent = 0};
int ret;
if (vcpu)
ret = __vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
else
ret = __kvm_ioctl(vm->kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
TEST_ASSERT(ret == -1 && errno == E2BIG,
"%s KVM_GET_SUPPORTED_HV_CPUID didn't fail with -E2BIG when"
" it should have: %d %d", !vcpu ? "KVM" : "vCPU", ret, errno);
}
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
const struct kvm_cpuid2 *hv_cpuid_entries;
struct kvm_vcpu *vcpu;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
/* Test vCPU ioctl version */
test_hv_cpuid_e2big(vm, vcpu);
hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu);
test_hv_cpuid(hv_cpuid_entries, false);
free((void *)hv_cpuid_entries);
if (!kvm_cpu_has(X86_FEATURE_VMX) ||
!kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
print_skip("Enlightened VMCS is unsupported");
goto do_sys;
}
vcpu_enable_evmcs(vcpu);
hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu);
test_hv_cpuid(hv_cpuid_entries, true);
free((void *)hv_cpuid_entries);
do_sys:
/* Test system ioctl version */
if (!kvm_has_cap(KVM_CAP_SYS_HYPERV_CPUID)) {
print_skip("KVM_CAP_SYS_HYPERV_CPUID not supported");
goto out;
}
test_hv_cpuid_e2big(vm, NULL);
hv_cpuid_entries = kvm_get_supported_hv_cpuid();
test_hv_cpuid(hv_cpuid_entries, kvm_cpu_has(X86_FEATURE_VMX));
out:
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hyper-V HvCallSendSyntheticClusterIpi{,Ex} tests
*
* Copyright (C) 2022, Red Hat, Inc.
*
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <pthread.h>
#include <inttypes.h>
#include "kvm_util.h"
#include "hyperv.h"
#include "test_util.h"
#include "vmx.h"
#define RECEIVER_VCPU_ID_1 2
#define RECEIVER_VCPU_ID_2 65
#define IPI_VECTOR 0xfe
static volatile uint64_t ipis_rcvd[RECEIVER_VCPU_ID_2 + 1];
struct hv_vpset {
u64 format;
u64 valid_bank_mask;
u64 bank_contents[2];
};
enum HV_GENERIC_SET_FORMAT {
HV_GENERIC_SET_SPARSE_4K,
HV_GENERIC_SET_ALL,
};
/* HvCallSendSyntheticClusterIpi hypercall */
struct hv_send_ipi {
u32 vector;
u32 reserved;
u64 cpu_mask;
};
/* HvCallSendSyntheticClusterIpiEx hypercall */
struct hv_send_ipi_ex {
u32 vector;
u32 reserved;
struct hv_vpset vp_set;
};
static inline void hv_init(vm_vaddr_t pgs_gpa)
{
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
}
static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa)
{
u32 vcpu_id;
x2apic_enable();
hv_init(pgs_gpa);
vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
/* Signal sender vCPU we're ready */
ipis_rcvd[vcpu_id] = (u64)-1;
for (;;)
asm volatile("sti; hlt; cli");
}
static void guest_ipi_handler(struct ex_regs *regs)
{
u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
ipis_rcvd[vcpu_id]++;
wrmsr(HV_X64_MSR_EOI, 1);
}
static inline void nop_loop(void)
{
int i;
for (i = 0; i < 100000000; i++)
asm volatile("nop");
}
static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
{
struct hv_send_ipi *ipi = (struct hv_send_ipi *)hcall_page;
struct hv_send_ipi_ex *ipi_ex = (struct hv_send_ipi_ex *)hcall_page;
int stage = 1, ipis_expected[2] = {0};
hv_init(pgs_gpa);
GUEST_SYNC(stage++);
/* Wait for receiver vCPUs to come up */
while (!ipis_rcvd[RECEIVER_VCPU_ID_1] || !ipis_rcvd[RECEIVER_VCPU_ID_2])
nop_loop();
ipis_rcvd[RECEIVER_VCPU_ID_1] = ipis_rcvd[RECEIVER_VCPU_ID_2] = 0;
/* 'Slow' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
ipi->vector = IPI_VECTOR;
ipi->cpu_mask = 1 << RECEIVER_VCPU_ID_1;
hyperv_hypercall(HVCALL_SEND_IPI, pgs_gpa, pgs_gpa + 4096);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
GUEST_SYNC(stage++);
/* 'Fast' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
hyperv_hypercall(HVCALL_SEND_IPI | HV_HYPERCALL_FAST_BIT,
IPI_VECTOR, 1 << RECEIVER_VCPU_ID_1);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
GUEST_SYNC(stage++);
/* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
memset(hcall_page, 0, 4096);
ipi_ex->vector = IPI_VECTOR;
ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
ipi_ex->vp_set.valid_bank_mask = 1 << 0;
ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
pgs_gpa, pgs_gpa + 4096);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
GUEST_SYNC(stage++);
/* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 1);
hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
(1 << HV_HYPERCALL_VARHEAD_OFFSET),
IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
GUEST_SYNC(stage++);
/* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
memset(hcall_page, 0, 4096);
ipi_ex->vector = IPI_VECTOR;
ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
ipi_ex->vp_set.valid_bank_mask = 1 << 1;
ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_2 - 64);
hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
pgs_gpa, pgs_gpa + 4096);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
GUEST_SYNC(stage++);
/* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 1);
hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
(1 << HV_HYPERCALL_VARHEAD_OFFSET),
IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
GUEST_SYNC(stage++);
/* 'Slow' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1,2} */
memset(hcall_page, 0, 4096);
ipi_ex->vector = IPI_VECTOR;
ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
ipi_ex->vp_set.valid_bank_mask = 1 << 1 | 1;
ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
ipi_ex->vp_set.bank_contents[1] = BIT(RECEIVER_VCPU_ID_2 - 64);
hyperv_hypercall(HVCALL_SEND_IPI_EX | (2 << HV_HYPERCALL_VARHEAD_OFFSET),
pgs_gpa, pgs_gpa + 4096);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
GUEST_SYNC(stage++);
/* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1, 2} */
hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2);
hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
(2 << HV_HYPERCALL_VARHEAD_OFFSET),
IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
GUEST_SYNC(stage++);
/* 'Slow' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL */
memset(hcall_page, 0, 4096);
ipi_ex->vector = IPI_VECTOR;
ipi_ex->vp_set.format = HV_GENERIC_SET_ALL;
hyperv_hypercall(HVCALL_SEND_IPI_EX, pgs_gpa, pgs_gpa + 4096);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
GUEST_SYNC(stage++);
/*
* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL.
*/
ipi_ex->vp_set.valid_bank_mask = 0;
hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2);
hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT,
IPI_VECTOR, HV_GENERIC_SET_ALL);
nop_loop();
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
GUEST_SYNC(stage++);
GUEST_DONE();
}
static void *vcpu_thread(void *arg)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)arg;
int old, r;
r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
vcpu->id, r);
vcpu_run(vcpu);
TEST_FAIL("vCPU %u exited unexpectedly", vcpu->id);
return NULL;
}
static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
{
void *retval;
int r;
r = pthread_cancel(thread);
TEST_ASSERT(!r, "pthread_cancel on vcpu_id=%d failed with errno=%d",
vcpu->id, r);
r = pthread_join(thread, &retval);
TEST_ASSERT(!r, "pthread_join on vcpu_id=%d failed with errno=%d",
vcpu->id, r);
TEST_ASSERT(retval == PTHREAD_CANCELED,
"expected retval=%p, got %p", PTHREAD_CANCELED,
retval);
}
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
struct kvm_vcpu *vcpu[3];
vm_vaddr_t hcall_page;
pthread_t threads[2];
int stage = 1, r;
struct ucall uc;
vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
/* Hypercall input/output */
hcall_page = vm_vaddr_alloc_pages(vm, 2);
memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
vm_init_descriptor_tables(vm);
vcpu[1] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_1, receiver_code);
vcpu_init_descriptor_tables(vcpu[1]);
vcpu_args_set(vcpu[1], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
vcpu_set_msr(vcpu[1], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_1);
vcpu_set_hv_cpuid(vcpu[1]);
vcpu[2] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_2, receiver_code);
vcpu_init_descriptor_tables(vcpu[2]);
vcpu_args_set(vcpu[2], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
vcpu_set_msr(vcpu[2], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_2);
vcpu_set_hv_cpuid(vcpu[2]);
vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler);
vcpu_args_set(vcpu[0], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
vcpu_set_hv_cpuid(vcpu[0]);
r = pthread_create(&threads[0], NULL, vcpu_thread, vcpu[1]);
TEST_ASSERT(!r, "pthread_create failed errno=%d", r);
r = pthread_create(&threads[1], NULL, vcpu_thread, vcpu[2]);
TEST_ASSERT(!r, "pthread_create failed errno=%d", errno);
while (true) {
vcpu_run(vcpu[0]);
TEST_ASSERT_KVM_EXIT_REASON(vcpu[0], KVM_EXIT_IO);
switch (get_ucall(vcpu[0], &uc)) {
case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == stage,
"Unexpected stage: %ld (%d expected)\n",
uc.args[1], stage);
break;
case UCALL_DONE:
goto done;
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
stage++;
}
done:
cancel_join_vcpu_thread(threads[0], vcpu[1]);
cancel_join_vcpu_thread(threads[1], vcpu[2]);
kvm_vm_free(vm);
return r;
}
| linux-master | tools/testing/selftests/kvm/x86_64/hyperv_ipi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* svm_int_ctl_test
*
* Copyright (C) 2021, Red Hat, Inc.
*
* Nested SVM testing: test simultaneous use of V_IRQ from L1 and L0.
*/
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "svm_util.h"
#include "apic.h"
bool vintr_irq_called;
bool intr_irq_called;
#define VINTR_IRQ_NUMBER 0x20
#define INTR_IRQ_NUMBER 0x30
static void vintr_irq_handler(struct ex_regs *regs)
{
vintr_irq_called = true;
}
static void intr_irq_handler(struct ex_regs *regs)
{
x2apic_write_reg(APIC_EOI, 0x00);
intr_irq_called = true;
}
static void l2_guest_code(struct svm_test_data *svm)
{
/* This code raises interrupt INTR_IRQ_NUMBER in the L1's LAPIC,
* and since L1 didn't enable virtual interrupt masking,
* L2 should receive it and not L1.
*
* L2 also has virtual interrupt 'VINTR_IRQ_NUMBER' pending in V_IRQ
* so it should also receive it after the following 'sti'.
*/
x2apic_write_reg(APIC_ICR,
APIC_DEST_SELF | APIC_INT_ASSERT | INTR_IRQ_NUMBER);
__asm__ __volatile__(
"sti\n"
"nop\n"
);
GUEST_ASSERT(vintr_irq_called);
GUEST_ASSERT(intr_irq_called);
__asm__ __volatile__(
"vmcall\n"
);
}
static void l1_guest_code(struct svm_test_data *svm)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
struct vmcb *vmcb = svm->vmcb;
x2apic_enable();
/* Prepare for L2 execution. */
generic_svm_setup(svm, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
/* No virtual interrupt masking */
vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
/* No intercepts for real and virtual interrupts */
vmcb->control.intercept &= ~(BIT(INTERCEPT_INTR) | BIT(INTERCEPT_VINTR));
/* Make a virtual interrupt VINTR_IRQ_NUMBER pending */
vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT);
vmcb->control.int_vector = VINTR_IRQ_NUMBER;
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
GUEST_DONE();
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
vm_vaddr_t svm_gva;
struct kvm_vm *vm;
struct ucall uc;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler);
vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler);
vcpu_alloc_svm(vm, &svm_gva);
vcpu_args_set(vcpu, 1, svm_gva);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
/* NOT REACHED */
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
}
done:
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/nx_huge_page_test.c
*
* Usage: to be run via nx_huge_page_test.sh, which does the necessary
* environment setup and teardown
*
* Copyright (C) 2022, Google LLC.
*/
#define _GNU_SOURCE
#include <fcntl.h>
#include <stdint.h>
#include <time.h>
#include <test_util.h>
#include "kvm_util.h"
#include "processor.h"
#define HPAGE_SLOT 10
#define HPAGE_GPA (4UL << 30) /* 4G prevents collision w/ slot 0 */
#define HPAGE_GVA HPAGE_GPA /* GVA is arbitrary, so use GPA. */
#define PAGES_PER_2MB_HUGE_PAGE 512
#define HPAGE_SLOT_NPAGES (3 * PAGES_PER_2MB_HUGE_PAGE)
/*
* Passed by nx_huge_pages_test.sh to provide an easy warning if this test is
* being run without it.
*/
#define MAGIC_TOKEN 887563923
/*
* x86 opcode for the return instruction. Used to call into, and then
* immediately return from, memory backed with hugepages.
*/
#define RETURN_OPCODE 0xC3
/* Call the specified memory address. */
static void guest_do_CALL(uint64_t target)
{
((void (*)(void)) target)();
}
/*
* Exit the VM after each memory access so that the userspace component of the
* test can make assertions about the pages backing the VM.
*
* See the below for an explanation of how each access should affect the
* backing mappings.
*/
void guest_code(void)
{
uint64_t hpage_1 = HPAGE_GVA;
uint64_t hpage_2 = hpage_1 + (PAGE_SIZE * 512);
uint64_t hpage_3 = hpage_2 + (PAGE_SIZE * 512);
READ_ONCE(*(uint64_t *)hpage_1);
GUEST_SYNC(1);
READ_ONCE(*(uint64_t *)hpage_2);
GUEST_SYNC(2);
guest_do_CALL(hpage_1);
GUEST_SYNC(3);
guest_do_CALL(hpage_3);
GUEST_SYNC(4);
READ_ONCE(*(uint64_t *)hpage_1);
GUEST_SYNC(5);
READ_ONCE(*(uint64_t *)hpage_3);
GUEST_SYNC(6);
}
static void check_2m_page_count(struct kvm_vm *vm, int expected_pages_2m)
{
int actual_pages_2m;
actual_pages_2m = vm_get_stat(vm, "pages_2m");
TEST_ASSERT(actual_pages_2m == expected_pages_2m,
"Unexpected 2m page count. Expected %d, got %d",
expected_pages_2m, actual_pages_2m);
}
static void check_split_count(struct kvm_vm *vm, int expected_splits)
{
int actual_splits;
actual_splits = vm_get_stat(vm, "nx_lpage_splits");
TEST_ASSERT(actual_splits == expected_splits,
"Unexpected NX huge page split count. Expected %d, got %d",
expected_splits, actual_splits);
}
static void wait_for_reclaim(int reclaim_period_ms)
{
long reclaim_wait_ms;
struct timespec ts;
reclaim_wait_ms = reclaim_period_ms * 5;
ts.tv_sec = reclaim_wait_ms / 1000;
ts.tv_nsec = (reclaim_wait_ms - (ts.tv_sec * 1000)) * 1000000;
nanosleep(&ts, NULL);
}
void run_test(int reclaim_period_ms, bool disable_nx_huge_pages,
bool reboot_permissions)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
uint64_t nr_bytes;
void *hva;
int r;
vm = vm_create(1);
if (disable_nx_huge_pages) {
r = __vm_disable_nx_huge_pages(vm);
if (reboot_permissions) {
TEST_ASSERT(!r, "Disabling NX huge pages should succeed if process has reboot permissions");
} else {
TEST_ASSERT(r == -1 && errno == EPERM,
"This process should not have permission to disable NX huge pages");
return;
}
}
vcpu = vm_vcpu_add(vm, 0, guest_code);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_HUGETLB,
HPAGE_GPA, HPAGE_SLOT,
HPAGE_SLOT_NPAGES, 0);
nr_bytes = HPAGE_SLOT_NPAGES * vm->page_size;
/*
* Ensure that KVM can map HPAGE_SLOT with huge pages by mapping the
* region into the guest with 2MiB pages whenever TDP is disabled (i.e.
* whenever KVM is shadowing the guest page tables).
*
* When TDP is enabled, KVM should be able to map HPAGE_SLOT with huge
* pages irrespective of the guest page size, so map with 4KiB pages
* to test that that is the case.
*/
if (kvm_is_tdp_enabled())
virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_4K);
else
virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_2M);
hva = addr_gpa2hva(vm, HPAGE_GPA);
memset(hva, RETURN_OPCODE, nr_bytes);
check_2m_page_count(vm, 0);
check_split_count(vm, 0);
/*
* The guest code will first read from the first hugepage, resulting
* in a huge page mapping being created.
*/
vcpu_run(vcpu);
check_2m_page_count(vm, 1);
check_split_count(vm, 0);
/*
* Then the guest code will read from the second hugepage, resulting
* in another huge page mapping being created.
*/
vcpu_run(vcpu);
check_2m_page_count(vm, 2);
check_split_count(vm, 0);
/*
* Next, the guest will execute from the first huge page, causing it
* to be remapped at 4k.
*
* If NX huge pages are disabled, this should have no effect.
*/
vcpu_run(vcpu);
check_2m_page_count(vm, disable_nx_huge_pages ? 2 : 1);
check_split_count(vm, disable_nx_huge_pages ? 0 : 1);
/*
* Executing from the third huge page (previously unaccessed) will
* cause part to be mapped at 4k.
*
* If NX huge pages are disabled, it should be mapped at 2M.
*/
vcpu_run(vcpu);
check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 1);
check_split_count(vm, disable_nx_huge_pages ? 0 : 2);
/* Reading from the first huge page again should have no effect. */
vcpu_run(vcpu);
check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 1);
check_split_count(vm, disable_nx_huge_pages ? 0 : 2);
/* Give recovery thread time to run. */
wait_for_reclaim(reclaim_period_ms);
/*
* Now that the reclaimer has run, all the split pages should be gone.
*
* If NX huge pages are disabled, the relaimer will not run, so
* nothing should change from here on.
*/
check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 1);
check_split_count(vm, 0);
/*
* The 4k mapping on hpage 3 should have been removed, so check that
* reading from it causes a huge page mapping to be installed.
*/
vcpu_run(vcpu);
check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 2);
check_split_count(vm, 0);
kvm_vm_free(vm);
}
static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-p period_ms] [-t token]\n", name);
puts("");
printf(" -p: The NX reclaim period in milliseconds.\n");
printf(" -t: The magic token to indicate environment setup is done.\n");
printf(" -r: The test has reboot permissions and can disable NX huge pages.\n");
puts("");
exit(0);
}
int main(int argc, char **argv)
{
int reclaim_period_ms = 0, token = 0, opt;
bool reboot_permissions = false;
while ((opt = getopt(argc, argv, "hp:t:r")) != -1) {
switch (opt) {
case 'p':
reclaim_period_ms = atoi_positive("Reclaim period", optarg);
break;
case 't':
token = atoi_paranoid(optarg);
break;
case 'r':
reboot_permissions = true;
break;
case 'h':
default:
help(argv[0]);
break;
}
}
TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_DISABLE_NX_HUGE_PAGES));
__TEST_REQUIRE(token == MAGIC_TOKEN,
"This test must be run with the magic token %d.\n"
"This is done by nx_huge_pages_test.sh, which\n"
"also handles environment setup for the test.");
run_test(reclaim_period_ms, false, reboot_permissions);
run_test(reclaim_period_ms, true, reboot_permissions);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include <signal.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include "kselftest.h"
static void guest_ud_handler(struct ex_regs *regs)
{
/* Loop on the ud2 until guest state is made invalid. */
}
static void guest_code(void)
{
asm volatile("ud2");
}
static void __run_vcpu_with_invalid_state(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION,
"Expected emulation failure, got %d\n",
run->emulation_failure.suberror);
}
static void run_vcpu_with_invalid_state(struct kvm_vcpu *vcpu)
{
/*
* Always run twice to verify KVM handles the case where _KVM_ queues
* an exception with invalid state and then exits to userspace, i.e.
* that KVM doesn't explode if userspace ignores the initial error.
*/
__run_vcpu_with_invalid_state(vcpu);
__run_vcpu_with_invalid_state(vcpu);
}
static void set_timer(void)
{
struct itimerval timer;
timer.it_value.tv_sec = 0;
timer.it_value.tv_usec = 200;
timer.it_interval = timer.it_value;
TEST_ASSERT_EQ(setitimer(ITIMER_REAL, &timer, NULL), 0);
}
static void set_or_clear_invalid_guest_state(struct kvm_vcpu *vcpu, bool set)
{
static struct kvm_sregs sregs;
if (!sregs.cr0)
vcpu_sregs_get(vcpu, &sregs);
sregs.tr.unusable = !!set;
vcpu_sregs_set(vcpu, &sregs);
}
static void set_invalid_guest_state(struct kvm_vcpu *vcpu)
{
set_or_clear_invalid_guest_state(vcpu, true);
}
static void clear_invalid_guest_state(struct kvm_vcpu *vcpu)
{
set_or_clear_invalid_guest_state(vcpu, false);
}
static struct kvm_vcpu *get_set_sigalrm_vcpu(struct kvm_vcpu *__vcpu)
{
static struct kvm_vcpu *vcpu = NULL;
if (__vcpu)
vcpu = __vcpu;
return vcpu;
}
static void sigalrm_handler(int sig)
{
struct kvm_vcpu *vcpu = get_set_sigalrm_vcpu(NULL);
struct kvm_vcpu_events events;
TEST_ASSERT(sig == SIGALRM, "Unexpected signal = %d", sig);
vcpu_events_get(vcpu, &events);
/*
* If an exception is pending, attempt KVM_RUN with invalid guest,
* otherwise rearm the timer and keep doing so until the timer fires
* between KVM queueing an exception and re-entering the guest.
*/
if (events.exception.pending) {
set_invalid_guest_state(vcpu);
run_vcpu_with_invalid_state(vcpu);
} else {
set_timer();
}
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
TEST_REQUIRE(host_cpu_is_intel);
TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
get_set_sigalrm_vcpu(vcpu);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
/*
* Stuff invalid guest state for L2 by making TR unusuable. The next
* KVM_RUN should induce a TRIPLE_FAULT in L2 as KVM doesn't support
* emulating invalid guest state for L2.
*/
set_invalid_guest_state(vcpu);
run_vcpu_with_invalid_state(vcpu);
/*
* Verify KVM also handles the case where userspace gains control while
* an exception is pending and stuffs invalid state. Run with valid
* guest state and a timer firing every 200us, and attempt to enter the
* guest with invalid state when the handler interrupts KVM with an
* exception pending.
*/
clear_invalid_guest_state(vcpu);
TEST_ASSERT(signal(SIGALRM, sigalrm_handler) != SIG_ERR,
"Failed to register SIGALRM handler, errno = %d (%s)",
errno, strerror(errno));
set_timer();
run_vcpu_with_invalid_state(vcpu);
}
| linux-master | tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xapic_ipi_test
*
* Copyright (C) 2020, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Test that when the APIC is in xAPIC mode, a vCPU can send an IPI to wake
* another vCPU that is halted when KVM's backing page for the APIC access
* address has been moved by mm.
*
* The test starts two vCPUs: one that sends IPIs and one that continually
* executes HLT. The sender checks that the halter has woken from the HLT and
* has reentered HLT before sending the next IPI. While the vCPUs are running,
* the host continually calls migrate_pages to move all of the process' pages
* amongst the available numa nodes on the machine.
*
* Migration is a command line option. When used on non-numa machines will
* exit with error. Test is still usefull on non-numa for testing IPIs.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <getopt.h>
#include <pthread.h>
#include <inttypes.h>
#include <string.h>
#include <time.h>
#include "kvm_util.h"
#include "numaif.h"
#include "processor.h"
#include "test_util.h"
#include "vmx.h"
/* Default running time for the test */
#define DEFAULT_RUN_SECS 3
/* Default delay between migrate_pages calls (microseconds) */
#define DEFAULT_DELAY_USECS 500000
/*
* Vector for IPI from sender vCPU to halting vCPU.
* Value is arbitrary and was chosen for the alternating bit pattern. Any
* value should work.
*/
#define IPI_VECTOR 0xa5
/*
* Incremented in the IPI handler. Provides evidence to the sender that the IPI
* arrived at the destination
*/
static volatile uint64_t ipis_rcvd;
/* Data struct shared between host main thread and vCPUs */
struct test_data_page {
uint32_t halter_apic_id;
volatile uint64_t hlt_count;
volatile uint64_t wake_count;
uint64_t ipis_sent;
uint64_t migrations_attempted;
uint64_t migrations_completed;
uint32_t icr;
uint32_t icr2;
uint32_t halter_tpr;
uint32_t halter_ppr;
/*
* Record local version register as a cross-check that APIC access
* worked. Value should match what KVM reports (APIC_VERSION in
* arch/x86/kvm/lapic.c). If test is failing, check that values match
* to determine whether APIC access exits are working.
*/
uint32_t halter_lvr;
};
struct thread_params {
struct test_data_page *data;
struct kvm_vcpu *vcpu;
uint64_t *pipis_rcvd; /* host address of ipis_rcvd global */
};
void verify_apic_base_addr(void)
{
uint64_t msr = rdmsr(MSR_IA32_APICBASE);
uint64_t base = GET_APIC_BASE(msr);
GUEST_ASSERT(base == APIC_DEFAULT_GPA);
}
static void halter_guest_code(struct test_data_page *data)
{
verify_apic_base_addr();
xapic_enable();
data->halter_apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID));
data->halter_lvr = xapic_read_reg(APIC_LVR);
/*
* Loop forever HLTing and recording halts & wakes. Disable interrupts
* each time around to minimize window between signaling the pending
* halt to the sender vCPU and executing the halt. No need to disable on
* first run as this vCPU executes first and the host waits for it to
* signal going into first halt before starting the sender vCPU. Record
* TPR and PPR for diagnostic purposes in case the test fails.
*/
for (;;) {
data->halter_tpr = xapic_read_reg(APIC_TASKPRI);
data->halter_ppr = xapic_read_reg(APIC_PROCPRI);
data->hlt_count++;
asm volatile("sti; hlt; cli");
data->wake_count++;
}
}
/*
* Runs on halter vCPU when IPI arrives. Write an arbitrary non-zero value to
* enable diagnosing errant writes to the APIC access address backing page in
* case of test failure.
*/
static void guest_ipi_handler(struct ex_regs *regs)
{
ipis_rcvd++;
xapic_write_reg(APIC_EOI, 77);
}
static void sender_guest_code(struct test_data_page *data)
{
uint64_t last_wake_count;
uint64_t last_hlt_count;
uint64_t last_ipis_rcvd_count;
uint32_t icr_val;
uint32_t icr2_val;
uint64_t tsc_start;
verify_apic_base_addr();
xapic_enable();
/*
* Init interrupt command register for sending IPIs
*
* Delivery mode=fixed, per SDM:
* "Delivers the interrupt specified in the vector field to the target
* processor."
*
* Destination mode=physical i.e. specify target by its local APIC
* ID. This vCPU assumes that the halter vCPU has already started and
* set data->halter_apic_id.
*/
icr_val = (APIC_DEST_PHYSICAL | APIC_DM_FIXED | IPI_VECTOR);
icr2_val = SET_APIC_DEST_FIELD(data->halter_apic_id);
data->icr = icr_val;
data->icr2 = icr2_val;
last_wake_count = data->wake_count;
last_hlt_count = data->hlt_count;
last_ipis_rcvd_count = ipis_rcvd;
for (;;) {
/*
* Send IPI to halter vCPU.
* First IPI can be sent unconditionally because halter vCPU
* starts earlier.
*/
xapic_write_reg(APIC_ICR2, icr2_val);
xapic_write_reg(APIC_ICR, icr_val);
data->ipis_sent++;
/*
* Wait up to ~1 sec for halter to indicate that it has:
* 1. Received the IPI
* 2. Woken up from the halt
* 3. Gone back into halt
* Current CPUs typically run at 2.x Ghz which is ~2
* billion ticks per second.
*/
tsc_start = rdtsc();
while (rdtsc() - tsc_start < 2000000000) {
if ((ipis_rcvd != last_ipis_rcvd_count) &&
(data->wake_count != last_wake_count) &&
(data->hlt_count != last_hlt_count))
break;
}
GUEST_ASSERT((ipis_rcvd != last_ipis_rcvd_count) &&
(data->wake_count != last_wake_count) &&
(data->hlt_count != last_hlt_count));
last_wake_count = data->wake_count;
last_hlt_count = data->hlt_count;
last_ipis_rcvd_count = ipis_rcvd;
}
}
static void *vcpu_thread(void *arg)
{
struct thread_params *params = (struct thread_params *)arg;
struct kvm_vcpu *vcpu = params->vcpu;
struct ucall uc;
int old;
int r;
r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
TEST_ASSERT(r == 0,
"pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
vcpu->id, r);
fprintf(stderr, "vCPU thread running vCPU %u\n", vcpu->id);
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
if (get_ucall(vcpu, &uc) == UCALL_ABORT) {
TEST_ASSERT(false,
"vCPU %u exited with error: %s.\n"
"Sending vCPU sent %lu IPIs to halting vCPU\n"
"Halting vCPU halted %lu times, woke %lu times, received %lu IPIs.\n"
"Halter TPR=%#x PPR=%#x LVR=%#x\n"
"Migrations attempted: %lu\n"
"Migrations completed: %lu\n",
vcpu->id, (const char *)uc.args[0],
params->data->ipis_sent, params->data->hlt_count,
params->data->wake_count,
*params->pipis_rcvd, params->data->halter_tpr,
params->data->halter_ppr, params->data->halter_lvr,
params->data->migrations_attempted,
params->data->migrations_completed);
}
return NULL;
}
static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
{
void *retval;
int r;
r = pthread_cancel(thread);
TEST_ASSERT(r == 0,
"pthread_cancel on vcpu_id=%d failed with errno=%d",
vcpu->id, r);
r = pthread_join(thread, &retval);
TEST_ASSERT(r == 0,
"pthread_join on vcpu_id=%d failed with errno=%d",
vcpu->id, r);
TEST_ASSERT(retval == PTHREAD_CANCELED,
"expected retval=%p, got %p", PTHREAD_CANCELED,
retval);
}
void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs,
uint64_t *pipis_rcvd)
{
long pages_not_moved;
unsigned long nodemask = 0;
unsigned long nodemasks[sizeof(nodemask) * 8];
int nodes = 0;
time_t start_time, last_update, now;
time_t interval_secs = 1;
int i, r;
int from, to;
unsigned long bit;
uint64_t hlt_count;
uint64_t wake_count;
uint64_t ipis_sent;
fprintf(stderr, "Calling migrate_pages every %d microseconds\n",
delay_usecs);
/* Get set of first 64 numa nodes available */
r = get_mempolicy(NULL, &nodemask, sizeof(nodemask) * 8,
0, MPOL_F_MEMS_ALLOWED);
TEST_ASSERT(r == 0, "get_mempolicy failed errno=%d", errno);
fprintf(stderr, "Numa nodes found amongst first %lu possible nodes "
"(each 1-bit indicates node is present): %#lx\n",
sizeof(nodemask) * 8, nodemask);
/* Init array of masks containing a single-bit in each, one for each
* available node. migrate_pages called below requires specifying nodes
* as bit masks.
*/
for (i = 0, bit = 1; i < sizeof(nodemask) * 8; i++, bit <<= 1) {
if (nodemask & bit) {
nodemasks[nodes] = nodemask & bit;
nodes++;
}
}
TEST_ASSERT(nodes > 1,
"Did not find at least 2 numa nodes. Can't do migration\n");
fprintf(stderr, "Migrating amongst %d nodes found\n", nodes);
from = 0;
to = 1;
start_time = time(NULL);
last_update = start_time;
ipis_sent = data->ipis_sent;
hlt_count = data->hlt_count;
wake_count = data->wake_count;
while ((int)(time(NULL) - start_time) < run_secs) {
data->migrations_attempted++;
/*
* migrate_pages with PID=0 will migrate all pages of this
* process between the nodes specified as bitmasks. The page
* backing the APIC access address belongs to this process
* because it is allocated by KVM in the context of the
* KVM_CREATE_VCPU ioctl. If that assumption ever changes this
* test may break or give a false positive signal.
*/
pages_not_moved = migrate_pages(0, sizeof(nodemasks[from]),
&nodemasks[from],
&nodemasks[to]);
if (pages_not_moved < 0)
fprintf(stderr,
"migrate_pages failed, errno=%d\n", errno);
else if (pages_not_moved > 0)
fprintf(stderr,
"migrate_pages could not move %ld pages\n",
pages_not_moved);
else
data->migrations_completed++;
from = to;
to++;
if (to == nodes)
to = 0;
now = time(NULL);
if (((now - start_time) % interval_secs == 0) &&
(now != last_update)) {
last_update = now;
fprintf(stderr,
"%lu seconds: Migrations attempted=%lu completed=%lu, "
"IPIs sent=%lu received=%lu, HLTs=%lu wakes=%lu\n",
now - start_time, data->migrations_attempted,
data->migrations_completed,
data->ipis_sent, *pipis_rcvd,
data->hlt_count, data->wake_count);
TEST_ASSERT(ipis_sent != data->ipis_sent &&
hlt_count != data->hlt_count &&
wake_count != data->wake_count,
"IPI, HLT and wake count have not increased "
"in the last %lu seconds. "
"HLTer is likely hung.\n", interval_secs);
ipis_sent = data->ipis_sent;
hlt_count = data->hlt_count;
wake_count = data->wake_count;
}
usleep(delay_usecs);
}
}
void get_cmdline_args(int argc, char *argv[], int *run_secs,
bool *migrate, int *delay_usecs)
{
for (;;) {
int opt = getopt(argc, argv, "s:d:m");
if (opt == -1)
break;
switch (opt) {
case 's':
*run_secs = parse_size(optarg);
break;
case 'm':
*migrate = true;
break;
case 'd':
*delay_usecs = parse_size(optarg);
break;
default:
TEST_ASSERT(false,
"Usage: -s <runtime seconds>. Default is %d seconds.\n"
"-m adds calls to migrate_pages while vCPUs are running."
" Default is no migrations.\n"
"-d <delay microseconds> - delay between migrate_pages() calls."
" Default is %d microseconds.\n",
DEFAULT_RUN_SECS, DEFAULT_DELAY_USECS);
}
}
}
int main(int argc, char *argv[])
{
int r;
int wait_secs;
const int max_halter_wait = 10;
int run_secs = 0;
int delay_usecs = 0;
struct test_data_page *data;
vm_vaddr_t test_data_page_vaddr;
bool migrate = false;
pthread_t threads[2];
struct thread_params params[2];
struct kvm_vm *vm;
uint64_t *pipis_rcvd;
get_cmdline_args(argc, argv, &run_secs, &migrate, &delay_usecs);
if (run_secs <= 0)
run_secs = DEFAULT_RUN_SECS;
if (delay_usecs <= 0)
delay_usecs = DEFAULT_DELAY_USECS;
vm = vm_create_with_one_vcpu(¶ms[0].vcpu, halter_guest_code);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(params[0].vcpu);
vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
params[1].vcpu = vm_vcpu_add(vm, 1, sender_guest_code);
test_data_page_vaddr = vm_vaddr_alloc_page(vm);
data = addr_gva2hva(vm, test_data_page_vaddr);
memset(data, 0, sizeof(*data));
params[0].data = data;
params[1].data = data;
vcpu_args_set(params[0].vcpu, 1, test_data_page_vaddr);
vcpu_args_set(params[1].vcpu, 1, test_data_page_vaddr);
pipis_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ipis_rcvd);
params[0].pipis_rcvd = pipis_rcvd;
params[1].pipis_rcvd = pipis_rcvd;
/* Start halter vCPU thread and wait for it to execute first HLT. */
r = pthread_create(&threads[0], NULL, vcpu_thread, ¶ms[0]);
TEST_ASSERT(r == 0,
"pthread_create halter failed errno=%d", errno);
fprintf(stderr, "Halter vCPU thread started\n");
wait_secs = 0;
while ((wait_secs < max_halter_wait) && !data->hlt_count) {
sleep(1);
wait_secs++;
}
TEST_ASSERT(data->hlt_count,
"Halter vCPU did not execute first HLT within %d seconds",
max_halter_wait);
fprintf(stderr,
"Halter vCPU thread reported its APIC ID: %u after %d seconds.\n",
data->halter_apic_id, wait_secs);
r = pthread_create(&threads[1], NULL, vcpu_thread, ¶ms[1]);
TEST_ASSERT(r == 0, "pthread_create sender failed errno=%d", errno);
fprintf(stderr,
"IPI sender vCPU thread started. Letting vCPUs run for %d seconds.\n",
run_secs);
if (!migrate)
sleep(run_secs);
else
do_migrations(data, run_secs, delay_usecs, pipis_rcvd);
/*
* Cancel threads and wait for them to stop.
*/
cancel_join_vcpu_thread(threads[0], params[0].vcpu);
cancel_join_vcpu_thread(threads[1], params[1].vcpu);
fprintf(stderr,
"Test successful after running for %d seconds.\n"
"Sending vCPU sent %lu IPIs to halting vCPU\n"
"Halting vCPU halted %lu times, woke %lu times, received %lu IPIs.\n"
"Halter APIC ID=%#x\n"
"Sender ICR value=%#x ICR2 value=%#x\n"
"Halter TPR=%#x PPR=%#x LVR=%#x\n"
"Migrations attempted: %lu\n"
"Migrations completed: %lu\n",
run_secs, data->ipis_sent,
data->hlt_count, data->wake_count, *pipis_rcvd,
data->halter_apic_id,
data->icr, data->icr2,
data->halter_tpr, data->halter_ppr, data->halter_lvr,
data->migrations_attempted, data->migrations_completed);
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020, Google LLC.
*
* Test that KVM emulates instructions in response to EPT violations when
* allow_smaller_maxphyaddr is enabled and guest.MAXPHYADDR < host.MAXPHYADDR.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include "flds_emulation.h"
#include "test_util.h"
#include "kvm_util.h"
#include "vmx.h"
#define MAXPHYADDR 36
#define MEM_REGION_GVA 0x0000123456789000
#define MEM_REGION_GPA 0x0000000700000000
#define MEM_REGION_SLOT 10
#define MEM_REGION_SIZE PAGE_SIZE
static void guest_code(bool tdp_enabled)
{
uint64_t error_code;
uint64_t vector;
vector = kvm_asm_safe_ec(FLDS_MEM_EAX, error_code, "a"(MEM_REGION_GVA));
/*
* When TDP is enabled, flds will trigger an emulation failure, exit to
* userspace, and then the selftest host "VMM" skips the instruction.
*
* When TDP is disabled, no instruction emulation is required so flds
* should generate #PF(RSVD).
*/
if (tdp_enabled) {
GUEST_ASSERT(!vector);
} else {
GUEST_ASSERT_EQ(vector, PF_VECTOR);
GUEST_ASSERT(error_code & PFERR_RSVD_MASK);
}
GUEST_DONE();
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
uint64_t *pte;
uint64_t *hva;
uint64_t gpa;
int rc;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_args_set(vcpu, 1, kvm_is_tdp_enabled());
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vcpu_set_cpuid_maxphyaddr(vcpu, MAXPHYADDR);
rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
vm_enable_cap(vm, KVM_CAP_EXIT_ON_EMULATION_FAILURE, 1);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
MEM_REGION_GPA, MEM_REGION_SLOT,
MEM_REGION_SIZE / PAGE_SIZE, 0);
gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE,
MEM_REGION_GPA, MEM_REGION_SLOT);
TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1);
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
memset(hva, 0, PAGE_SIZE);
pte = vm_get_page_table_entry(vm, MEM_REGION_GVA);
*pte |= BIT_ULL(MAXPHYADDR);
vcpu_run(vcpu);
/*
* When TDP is enabled, KVM must emulate in response the guest physical
* address that is illegal from the guest's perspective, but is legal
* from hardware's perspeective. This should result in an emulation
* failure exit to userspace since KVM doesn't support emulating flds.
*/
if (kvm_is_tdp_enabled()) {
handle_flds_emulation_failure_exit(vcpu);
vcpu_run(vcpu);
}
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
break;
default:
TEST_FAIL("Unrecognized ucall: %lu\n", uc.cmd);
}
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xen_vmcall_test
*
* Copyright © 2020 Amazon.com, Inc. or its affiliates.
*
* Userspace hypercall testing
*/
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#define HCALL_REGION_GPA 0xc0000000ULL
#define HCALL_REGION_SLOT 10
#define INPUTVALUE 17
#define ARGVALUE(x) (0xdeadbeef5a5a0000UL + x)
#define RETVALUE 0xcafef00dfbfbffffUL
#define XEN_HYPERCALL_MSR 0x40000200
#define HV_GUEST_OS_ID_MSR 0x40000000
#define HV_HYPERCALL_MSR 0x40000001
#define HVCALL_SIGNAL_EVENT 0x005d
#define HV_STATUS_INVALID_ALIGNMENT 4
static void guest_code(void)
{
unsigned long rax = INPUTVALUE;
unsigned long rdi = ARGVALUE(1);
unsigned long rsi = ARGVALUE(2);
unsigned long rdx = ARGVALUE(3);
unsigned long rcx;
register unsigned long r10 __asm__("r10") = ARGVALUE(4);
register unsigned long r8 __asm__("r8") = ARGVALUE(5);
register unsigned long r9 __asm__("r9") = ARGVALUE(6);
/* First a direct invocation of 'vmcall' */
__asm__ __volatile__("vmcall" :
"=a"(rax) :
"a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
"r"(r10), "r"(r8), "r"(r9));
GUEST_ASSERT(rax == RETVALUE);
/* Fill in the Xen hypercall page */
__asm__ __volatile__("wrmsr" : : "c" (XEN_HYPERCALL_MSR),
"a" (HCALL_REGION_GPA & 0xffffffff),
"d" (HCALL_REGION_GPA >> 32));
/* Set Hyper-V Guest OS ID */
__asm__ __volatile__("wrmsr" : : "c" (HV_GUEST_OS_ID_MSR),
"a" (0x5a), "d" (0));
/* Hyper-V hypercall page */
u64 msrval = HCALL_REGION_GPA + PAGE_SIZE + 1;
__asm__ __volatile__("wrmsr" : : "c" (HV_HYPERCALL_MSR),
"a" (msrval & 0xffffffff),
"d" (msrval >> 32));
/* Invoke a Xen hypercall */
__asm__ __volatile__("call *%1" : "=a"(rax) :
"r"(HCALL_REGION_GPA + INPUTVALUE * 32),
"a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
"r"(r10), "r"(r8), "r"(r9));
GUEST_ASSERT(rax == RETVALUE);
/* Invoke a Hyper-V hypercall */
rax = 0;
rcx = HVCALL_SIGNAL_EVENT; /* code */
rdx = 0x5a5a5a5a; /* ingpa (badly aligned) */
__asm__ __volatile__("call *%1" : "=a"(rax) :
"r"(HCALL_REGION_GPA + PAGE_SIZE),
"a"(rax), "c"(rcx), "d"(rdx),
"r"(r8));
GUEST_ASSERT(rax == HV_STATUS_INVALID_ALIGNMENT);
GUEST_DONE();
}
int main(int argc, char *argv[])
{
unsigned int xen_caps;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_set_hv_cpuid(vcpu);
struct kvm_xen_hvm_config hvmc = {
.flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
.msr = XEN_HYPERCALL_MSR,
};
vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc);
/* Map a region for the hypercall pages */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
HCALL_REGION_GPA, HCALL_REGION_SLOT, 2, 0);
virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2);
for (;;) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_XEN) {
TEST_ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
TEST_ASSERT_EQ(run->xen.u.hcall.cpl, 0);
TEST_ASSERT_EQ(run->xen.u.hcall.longmode, 1);
TEST_ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE);
TEST_ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1));
TEST_ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2));
TEST_ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3));
TEST_ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4));
TEST_ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5));
TEST_ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6));
run->xen.u.hcall.result = RETVALUE;
continue;
}
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
}
}
done:
kvm_vm_free(vm);
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018, Red Hat, Inc.
*
* Tests for Enlightened VMCS, including nested guest state.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <linux/bitmap.h>
#include "test_util.h"
#include "kvm_util.h"
#include "hyperv.h"
#include "vmx.h"
static int ud_count;
static void guest_ud_handler(struct ex_regs *regs)
{
ud_count++;
regs->rip += 3; /* VMLAUNCH */
}
static void guest_nmi_handler(struct ex_regs *regs)
{
}
static inline void rdmsr_from_l2(uint32_t msr)
{
/* Currently, L1 doesn't preserve GPRs during vmexits. */
__asm__ __volatile__ ("rdmsr" : : "c"(msr) :
"rax", "rbx", "rdx", "rsi", "rdi", "r8", "r9",
"r10", "r11", "r12", "r13", "r14", "r15");
}
/* Exit to L1 from L2 with RDMSR instruction */
void l2_guest_code(void)
{
u64 unused;
GUEST_SYNC(7);
GUEST_SYNC(8);
/* Forced exit to L1 upon restore */
GUEST_SYNC(9);
vmcall();
/* MSR-Bitmap tests */
rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
rdmsr_from_l2(MSR_GS_BASE); /* not intercepted */
vmcall();
rdmsr_from_l2(MSR_GS_BASE); /* intercepted */
/* L2 TLB flush tests */
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0,
HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS);
rdmsr_from_l2(MSR_FS_BASE);
/*
* Note: hypercall status (RAX) is not preserved correctly by L1 after
* synthetic vmexit, use unchecked version.
*/
__hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0,
HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS,
&unused);
/* Done, exit to L1 and never come back. */
vmcall();
}
void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages,
vm_vaddr_t hv_hcall_page_gpa)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
wrmsr(HV_X64_MSR_HYPERCALL, hv_hcall_page_gpa);
x2apic_enable();
GUEST_SYNC(1);
GUEST_SYNC(2);
enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist);
evmcs_enable();
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
GUEST_SYNC(3);
GUEST_ASSERT(load_evmcs(hv_pages));
GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa);
GUEST_SYNC(4);
GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa);
prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_SYNC(5);
GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa);
current_evmcs->revision_id = -1u;
GUEST_ASSERT(vmlaunch());
current_evmcs->revision_id = EVMCS_VERSION;
GUEST_SYNC(6);
vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmreadz(PIN_BASED_VM_EXEC_CONTROL) |
PIN_BASED_NMI_EXITING);
/* L2 TLB flush setup */
current_evmcs->partition_assist_page = hv_pages->partition_assist_gpa;
current_evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
current_evmcs->hv_vm_id = 1;
current_evmcs->hv_vp_id = 1;
current_vp_assist->nested_control.features.directhypercall = 1;
*(u32 *)(hv_pages->partition_assist) = 0;
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI);
GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), NMI_VECTOR);
GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa);
/*
* NMI forces L2->L1 exit, resuming L2 and hope that EVMCS is
* up-to-date (RIP points where it should and not at the beginning
* of l2_guest_code(). GUEST_SYNC(9) checkes that.
*/
GUEST_ASSERT(!vmresume());
GUEST_SYNC(10);
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
current_evmcs->guest_rip += 3; /* vmcall */
/* Intercept RDMSR 0xc0000100 */
vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmreadz(CPU_BASED_VM_EXEC_CONTROL) |
CPU_BASED_USE_MSR_BITMAPS);
__set_bit(MSR_FS_BASE & 0x1fff, vmx_pages->msr + 0x400);
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
current_evmcs->guest_rip += 2; /* rdmsr */
/* Enable enlightened MSR bitmap */
current_evmcs->hv_enlightenments_control.msr_bitmap = 1;
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
current_evmcs->guest_rip += 2; /* rdmsr */
/* Intercept RDMSR 0xc0000101 without telling KVM about it */
__set_bit(MSR_GS_BASE & 0x1fff, vmx_pages->msr + 0x400);
/* Make sure HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP is set */
current_evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
GUEST_ASSERT(!vmresume());
/* Make sure we don't see EXIT_REASON_MSR_READ here so eMSR bitmap works */
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
current_evmcs->guest_rip += 3; /* vmcall */
/* Now tell KVM we've changed MSR-Bitmap */
current_evmcs->hv_clean_fields &= ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
current_evmcs->guest_rip += 2; /* rdmsr */
/*
* L2 TLB flush test. First VMCALL should be handled directly by L0,
* no VMCALL exit expected.
*/
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
current_evmcs->guest_rip += 2; /* rdmsr */
/* Enable synthetic vmexit */
*(u32 *)(hv_pages->partition_assist) = 1;
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH);
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_SYNC(11);
/* Try enlightened vmptrld with an incorrect GPA */
evmcs_vmptrld(0xdeadbeef, hv_pages->enlightened_vmcs);
GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(ud_count == 1);
GUEST_DONE();
}
void inject_nmi(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_events events;
vcpu_events_get(vcpu, &events);
events.nmi.pending = 1;
events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
vcpu_events_set(vcpu, &events);
}
static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm,
struct kvm_vcpu *vcpu)
{
struct kvm_regs regs1, regs2;
struct kvm_x86_state *state;
state = vcpu_save_state(vcpu);
memset(®s1, 0, sizeof(regs1));
vcpu_regs_get(vcpu, ®s1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_set_hv_cpuid(vcpu);
vcpu_enable_evmcs(vcpu);
vcpu_load_state(vcpu, state);
kvm_x86_state_cleanup(state);
memset(®s2, 0, sizeof(regs2));
vcpu_regs_get(vcpu, ®s2);
TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
return vcpu;
}
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0;
vm_vaddr_t hcall_page;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
int stage;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
hcall_page = vm_vaddr_alloc_pages(vm, 1);
memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize());
vcpu_set_hv_cpuid(vcpu);
vcpu_enable_evmcs(vcpu);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
vcpu_args_set(vcpu, 3, vmx_pages_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page));
vcpu_set_msr(vcpu, HV_X64_MSR_VP_INDEX, vcpu->id);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
pr_info("Running L1 which uses EVMCS to run L2\n");
for (stage = 1;; stage++) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
/* UCALL_SYNC is handled here. */
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]);
vcpu = save_restore_vm(vm, vcpu);
/* Force immediate L2->L1 exit before resuming */
if (stage == 8) {
pr_info("Injecting NMI into L1 before L2 had a chance to run after restore\n");
inject_nmi(vcpu);
}
/*
* Do KVM_GET_NESTED_STATE/KVM_SET_NESTED_STATE for a freshly
* restored VM (before the first KVM_RUN) to check that
* KVM_STATE_NESTED_EVMCS is not lost.
*/
if (stage == 9) {
pr_info("Trying extra KVM_GET_NESTED_STATE/KVM_SET_NESTED_STATE cycle\n");
vcpu = save_restore_vm(vm, vcpu);
}
}
done:
kvm_vm_free(vm);
}
| linux-master | tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c |
/*
* mmio_warning_test
*
* Copyright (C) 2019, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Test that we don't get a kernel warning when we call KVM_RUN after a
* triple fault occurs. To get the triple fault to occur we call KVM_RUN
* on a VCPU that hasn't been properly setup.
*
*/
#define _GNU_SOURCE
#include <fcntl.h>
#include <kvm_util.h>
#include <linux/kvm.h>
#include <processor.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <test_util.h>
#include <unistd.h>
#define NTHREAD 4
#define NPROCESS 5
struct thread_context {
int kvmcpu;
struct kvm_run *run;
};
void *thr(void *arg)
{
struct thread_context *tc = (struct thread_context *)arg;
int res;
int kvmcpu = tc->kvmcpu;
struct kvm_run *run = tc->run;
res = ioctl(kvmcpu, KVM_RUN, 0);
pr_info("ret1=%d exit_reason=%d suberror=%d\n",
res, run->exit_reason, run->internal.suberror);
return 0;
}
void test(void)
{
int i, kvm, kvmvm, kvmcpu;
pthread_t th[NTHREAD];
struct kvm_run *run;
struct thread_context tc;
kvm = open("/dev/kvm", O_RDWR);
TEST_ASSERT(kvm != -1, "failed to open /dev/kvm");
kvmvm = __kvm_ioctl(kvm, KVM_CREATE_VM, NULL);
TEST_ASSERT(kvmvm > 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, kvmvm));
kvmcpu = ioctl(kvmvm, KVM_CREATE_VCPU, 0);
TEST_ASSERT(kvmcpu != -1, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, kvmcpu));
run = (struct kvm_run *)mmap(0, 4096, PROT_READ|PROT_WRITE, MAP_SHARED,
kvmcpu, 0);
tc.kvmcpu = kvmcpu;
tc.run = run;
srand(getpid());
for (i = 0; i < NTHREAD; i++) {
pthread_create(&th[i], NULL, thr, (void *)(uintptr_t)&tc);
usleep(rand() % 10000);
}
for (i = 0; i < NTHREAD; i++)
pthread_join(th[i], NULL);
}
int get_warnings_count(void)
{
int warnings;
FILE *f;
f = popen("dmesg | grep \"WARNING:\" | wc -l", "r");
if (fscanf(f, "%d", &warnings) < 1)
warnings = 0;
pclose(f);
return warnings;
}
int main(void)
{
int warnings_before, warnings_after;
TEST_REQUIRE(host_cpu_is_intel);
TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
warnings_before = get_warnings_count();
for (int i = 0; i < NPROCESS; ++i) {
int status;
int pid = fork();
if (pid < 0)
exit(1);
if (pid == 0) {
test();
exit(0);
}
while (waitpid(pid, &status, __WALL) != pid)
;
}
warnings_after = get_warnings_count();
TEST_ASSERT(warnings_before == warnings_after,
"Warnings found in kernel. Run 'dmesg' to inspect them.");
return 0;
}
| linux-master | tools/testing/selftests/kvm/x86_64/mmio_warning_test.c |
Subsets and Splits