| python_code
				 stringlengths 0 1.8M | repo_name
				 stringclasses 7
				values | file_path
				 stringlengths 5 99 | 
|---|---|---|
| 
	#include "../../boot/video-mode.c"
 | 
	linux-master | 
	arch/x86/realmode/rm/video-mode.c | 
| 
	#include "../../boot/regs.c"
 | 
	linux-master | 
	arch/x86/realmode/rm/regs.c | 
| 
	#include "../../boot/video-vesa.c"
 | 
	linux-master | 
	arch/x86/realmode/rm/video-vesa.c | 
| 
	#include "../../boot/video-vga.c"
 | 
	linux-master | 
	arch/x86/realmode/rm/video-vga.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include "wakeup.h"
#include "boot.h"
static void udelay(int loops)
{
	while (loops--)
		io_delay();	/* Approximately 1 us */
}
static void beep(unsigned int hz)
{
	u8 enable;
	if (!hz) {
		enable = 0x00;		/* Turn off speaker */
	} else {
		u16 div = 1193181/hz;
		outb(0xb6, 0x43);	/* Ctr 2, squarewave, load, binary */
		io_delay();
		outb(div, 0x42);	/* LSB of counter */
		io_delay();
		outb(div >> 8, 0x42);	/* MSB of counter */
		io_delay();
		enable = 0x03;		/* Turn on speaker */
	}
	inb(0x61);		/* Dummy read of System Control Port B */
	io_delay();
	outb(enable, 0x61);	/* Enable timer 2 output to speaker */
	io_delay();
}
#define DOT_HZ		880
#define DASH_HZ		587
#define US_PER_DOT	125000
/* Okay, this is totally silly, but it's kind of fun. */
static void send_morse(const char *pattern)
{
	char s;
	while ((s = *pattern++)) {
		switch (s) {
		case '.':
			beep(DOT_HZ);
			udelay(US_PER_DOT);
			beep(0);
			udelay(US_PER_DOT);
			break;
		case '-':
			beep(DASH_HZ);
			udelay(US_PER_DOT * 3);
			beep(0);
			udelay(US_PER_DOT);
			break;
		default:	/* Assume it's a space */
			udelay(US_PER_DOT * 3);
			break;
		}
	}
}
struct port_io_ops pio_ops;
void main(void)
{
	init_default_io_ops();
	/* Kill machine if structures are wrong */
	if (wakeup_header.real_magic != 0x12345678)
		while (1)
			;
	if (wakeup_header.realmode_flags & 4)
		send_morse("...-");
	if (wakeup_header.realmode_flags & 1)
		asm volatile("lcallw   $0xc000,$3");
	if (wakeup_header.realmode_flags & 2) {
		/* Need to call BIOS */
		probe_cards(0);
		set_mode(wakeup_header.video_mode);
	}
}
 | 
	linux-master | 
	arch/x86/realmode/rm/wakemain.c | 
| 
	#include "../../boot/video-bios.c"
 | 
	linux-master | 
	arch/x86/realmode/rm/video-bios.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * common.c - C code for kernel entry and exit
 * Copyright (c) 2015 Andrew Lutomirski
 *
 * Based on asm and ptrace code by many authors.  The code here originated
 * in ptrace.c and signal.c.
 */
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/entry-common.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/export.h>
#include <linux/nospec.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#ifdef CONFIG_XEN_PV
#include <xen/xen-ops.h>
#include <xen/events.h>
#endif
#include <asm/desc.h>
#include <asm/traps.h>
#include <asm/vdso.h>
#include <asm/cpufeature.h>
#include <asm/fpu/api.h>
#include <asm/nospec-branch.h>
#include <asm/io_bitmap.h>
#include <asm/syscall.h>
#include <asm/irq_stack.h>
#ifdef CONFIG_X86_64
static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
{
	/*
	 * Convert negative numbers to very high and thus out of range
	 * numbers for comparisons.
	 */
	unsigned int unr = nr;
	if (likely(unr < NR_syscalls)) {
		unr = array_index_nospec(unr, NR_syscalls);
		regs->ax = sys_call_table[unr](regs);
		return true;
	}
	return false;
}
static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
{
	/*
	 * Adjust the starting offset of the table, and convert numbers
	 * < __X32_SYSCALL_BIT to very high and thus out of range
	 * numbers for comparisons.
	 */
	unsigned int xnr = nr - __X32_SYSCALL_BIT;
	if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
		xnr = array_index_nospec(xnr, X32_NR_syscalls);
		regs->ax = x32_sys_call_table[xnr](regs);
		return true;
	}
	return false;
}
__visible noinstr void do_syscall_64(struct pt_regs *regs, int nr)
{
	add_random_kstack_offset();
	nr = syscall_enter_from_user_mode(regs, nr);
	instrumentation_begin();
	if (!do_syscall_x64(regs, nr) && !do_syscall_x32(regs, nr) && nr != -1) {
		/* Invalid system call, but still a system call. */
		regs->ax = __x64_sys_ni_syscall(regs);
	}
	instrumentation_end();
	syscall_exit_to_user_mode(regs);
}
#endif
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
static __always_inline int syscall_32_enter(struct pt_regs *regs)
{
	if (IS_ENABLED(CONFIG_IA32_EMULATION))
		current_thread_info()->status |= TS_COMPAT;
	return (int)regs->orig_ax;
}
/*
 * Invoke a 32-bit syscall.  Called with IRQs on in CONTEXT_KERNEL.
 */
static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
{
	/*
	 * Convert negative numbers to very high and thus out of range
	 * numbers for comparisons.
	 */
	unsigned int unr = nr;
	if (likely(unr < IA32_NR_syscalls)) {
		unr = array_index_nospec(unr, IA32_NR_syscalls);
		regs->ax = ia32_sys_call_table[unr](regs);
	} else if (nr != -1) {
		regs->ax = __ia32_sys_ni_syscall(regs);
	}
}
/* Handles int $0x80 */
__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
	int nr = syscall_32_enter(regs);
	add_random_kstack_offset();
	/*
	 * Subtlety here: if ptrace pokes something larger than 2^31-1 into
	 * orig_ax, the int return value truncates it. This matches
	 * the semantics of syscall_get_nr().
	 */
	nr = syscall_enter_from_user_mode(regs, nr);
	instrumentation_begin();
	do_syscall_32_irqs_on(regs, nr);
	instrumentation_end();
	syscall_exit_to_user_mode(regs);
}
static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
{
	int nr = syscall_32_enter(regs);
	int res;
	add_random_kstack_offset();
	/*
	 * This cannot use syscall_enter_from_user_mode() as it has to
	 * fetch EBP before invoking any of the syscall entry work
	 * functions.
	 */
	syscall_enter_from_user_mode_prepare(regs);
	instrumentation_begin();
	/* Fetch EBP from where the vDSO stashed it. */
	if (IS_ENABLED(CONFIG_X86_64)) {
		/*
		 * Micro-optimization: the pointer we're following is
		 * explicitly 32 bits, so it can't be out of range.
		 */
		res = __get_user(*(u32 *)®s->bp,
			 (u32 __user __force *)(unsigned long)(u32)regs->sp);
	} else {
		res = get_user(*(u32 *)®s->bp,
		       (u32 __user __force *)(unsigned long)(u32)regs->sp);
	}
	if (res) {
		/* User code screwed up. */
		regs->ax = -EFAULT;
		local_irq_disable();
		instrumentation_end();
		irqentry_exit_to_user_mode(regs);
		return false;
	}
	nr = syscall_enter_from_user_mode_work(regs, nr);
	/* Now this is just like a normal syscall. */
	do_syscall_32_irqs_on(regs, nr);
	instrumentation_end();
	syscall_exit_to_user_mode(regs);
	return true;
}
/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
__visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
{
	/*
	 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
	 * convention.  Adjust regs so it looks like we entered using int80.
	 */
	unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
					vdso_image_32.sym_int80_landing_pad;
	/*
	 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
	 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
	 * Fix it up.
	 */
	regs->ip = landing_pad;
	/* Invoke the syscall. If it failed, keep it simple: use IRET. */
	if (!__do_fast_syscall_32(regs))
		return 0;
#ifdef CONFIG_X86_64
	/*
	 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
	 * SYSRETL is available on all 64-bit CPUs, so we don't need to
	 * bother with SYSEXIT.
	 *
	 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
	 * because the ECX fixup above will ensure that this is essentially
	 * never the case.
	 */
	return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
		regs->ip == landing_pad &&
		(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
#else
	/*
	 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
	 *
	 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
	 * because the ECX fixup above will ensure that this is essentially
	 * never the case.
	 *
	 * We don't allow syscalls at all from VM86 mode, but we still
	 * need to check VM, because we might be returning from sys_vm86.
	 */
	return static_cpu_has(X86_FEATURE_SEP) &&
		regs->cs == __USER_CS && regs->ss == __USER_DS &&
		regs->ip == landing_pad &&
		(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
#endif
}
/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
__visible noinstr long do_SYSENTER_32(struct pt_regs *regs)
{
	/* SYSENTER loses RSP, but the vDSO saved it in RBP. */
	regs->sp = regs->bp;
	/* SYSENTER clobbers EFLAGS.IF.  Assume it was set in usermode. */
	regs->flags |= X86_EFLAGS_IF;
	return do_fast_syscall_32(regs);
}
#endif
SYSCALL_DEFINE0(ni_syscall)
{
	return -ENOSYS;
}
#ifdef CONFIG_XEN_PV
#ifndef CONFIG_PREEMPTION
/*
 * Some hypercalls issued by the toolstack can take many 10s of
 * seconds. Allow tasks running hypercalls via the privcmd driver to
 * be voluntarily preempted even if full kernel preemption is
 * disabled.
 *
 * Such preemptible hypercalls are bracketed by
 * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
 * calls.
 */
DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
/*
 * In case of scheduling the flag must be cleared and restored after
 * returning from schedule as the task might move to a different CPU.
 */
static __always_inline bool get_and_clear_inhcall(void)
{
	bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
	__this_cpu_write(xen_in_preemptible_hcall, false);
	return inhcall;
}
static __always_inline void restore_inhcall(bool inhcall)
{
	__this_cpu_write(xen_in_preemptible_hcall, inhcall);
}
#else
static __always_inline bool get_and_clear_inhcall(void) { return false; }
static __always_inline void restore_inhcall(bool inhcall) { }
#endif
static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	inc_irq_stat(irq_hv_callback_count);
	xen_evtchn_do_upcall();
	set_irq_regs(old_regs);
}
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{
	irqentry_state_t state = irqentry_enter(regs);
	bool inhcall;
	instrumentation_begin();
	run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
	inhcall = get_and_clear_inhcall();
	if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
		irqentry_exit_cond_resched();
		instrumentation_end();
		restore_inhcall(inhcall);
	} else {
		instrumentation_end();
		irqentry_exit(regs, state);
	}
}
#endif /* CONFIG_XEN_PV */
 | 
	linux-master | 
	arch/x86/entry/common.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/* System call table for x86-64. */
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
#include <asm/syscall.h>
#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *);
#include <asm/syscalls_64.h>
#undef __SYSCALL
#define __SYSCALL(nr, sym) __x64_##sym,
asmlinkage const sys_call_ptr_t sys_call_table[] = {
#include <asm/syscalls_64.h>
};
 | 
	linux-master | 
	arch/x86/entry/syscall_64.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/* System call table for i386. */
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
#include <asm/syscall.h>
#ifdef CONFIG_IA32_EMULATION
#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, compat)
#else
#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, native)
#endif
#define __SYSCALL(nr, sym) extern long __ia32_##sym(const struct pt_regs *);
#include <asm/syscalls_32.h>
#undef __SYSCALL
#define __SYSCALL(nr, sym) __ia32_##sym,
__visible const sys_call_ptr_t ia32_sys_call_table[] = {
#include <asm/syscalls_32.h>
};
 | 
	linux-master | 
	arch/x86/entry/syscall_32.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/* System call table for x32 ABI. */
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
#include <asm/syscall.h>
#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *);
#include <asm/syscalls_x32.h>
#undef __SYSCALL
#define __SYSCALL(nr, sym) __x64_##sym,
asmlinkage const sys_call_ptr_t x32_sys_call_table[] = {
#include <asm/syscalls_x32.h>
};
 | 
	linux-master | 
	arch/x86/entry/syscall_x32.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Fast user context implementation of clock_gettime, gettimeofday, and time.
 *
 * Copyright 2006 Andi Kleen, SUSE Labs.
 * Copyright 2019 ARM Limited
 *
 * 32 Bit compat layer by Stefani Seibold <[email protected]>
 *  sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
 */
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include "../../../../lib/vdso/gettimeofday.c"
extern int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
extern __kernel_old_time_t __vdso_time(__kernel_old_time_t *t);
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
	return __cvdso_gettimeofday(tv, tz);
}
int gettimeofday(struct __kernel_old_timeval *, struct timezone *)
	__attribute__((weak, alias("__vdso_gettimeofday")));
__kernel_old_time_t __vdso_time(__kernel_old_time_t *t)
{
	return __cvdso_time(t);
}
__kernel_old_time_t time(__kernel_old_time_t *t)	__attribute__((weak, alias("__vdso_time")));
#if defined(CONFIG_X86_64) && !defined(BUILD_VDSO32_64)
/* both 64-bit and x32 use these */
extern int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
extern int __vdso_clock_getres(clockid_t clock, struct __kernel_timespec *res);
int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
{
	return __cvdso_clock_gettime(clock, ts);
}
int clock_gettime(clockid_t, struct __kernel_timespec *)
	__attribute__((weak, alias("__vdso_clock_gettime")));
int __vdso_clock_getres(clockid_t clock,
			struct __kernel_timespec *res)
{
	return __cvdso_clock_getres(clock, res);
}
int clock_getres(clockid_t, struct __kernel_timespec *)
	__attribute__((weak, alias("__vdso_clock_getres")));
#else
/* i386 only */
extern int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts);
extern int __vdso_clock_getres(clockid_t clock, struct old_timespec32 *res);
int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts)
{
	return __cvdso_clock_gettime32(clock, ts);
}
int clock_gettime(clockid_t, struct old_timespec32 *)
	__attribute__((weak, alias("__vdso_clock_gettime")));
int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts)
{
	return __cvdso_clock_gettime(clock, ts);
}
int clock_gettime64(clockid_t, struct __kernel_timespec *)
	__attribute__((weak, alias("__vdso_clock_gettime64")));
int __vdso_clock_getres(clockid_t clock, struct old_timespec32 *res)
{
	return __cvdso_clock_getres_time32(clock, res);
}
int clock_getres(clockid_t, struct old_timespec32 *)
	__attribute__((weak, alias("__vdso_clock_getres")));
#endif
 | 
	linux-master | 
	arch/x86/entry/vdso/vclock_gettime.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
#include <linux/mm.h>
#include <asm/current.h>
#include <asm/traps.h>
#include <asm/vdso.h>
struct vdso_exception_table_entry {
	int insn, fixup;
};
bool fixup_vdso_exception(struct pt_regs *regs, int trapnr,
			  unsigned long error_code, unsigned long fault_addr)
{
	const struct vdso_image *image = current->mm->context.vdso_image;
	const struct vdso_exception_table_entry *extable;
	unsigned int nr_entries, i;
	unsigned long base;
	/*
	 * Do not attempt to fixup #DB or #BP.  It's impossible to identify
	 * whether or not a #DB/#BP originated from within an SGX enclave and
	 * SGX enclaves are currently the only use case for vDSO fixup.
	 */
	if (trapnr == X86_TRAP_DB || trapnr == X86_TRAP_BP)
		return false;
	if (!current->mm->context.vdso)
		return false;
	base =  (unsigned long)current->mm->context.vdso + image->extable_base;
	nr_entries = image->extable_len / (sizeof(*extable));
	extable = image->extable;
	for (i = 0; i < nr_entries; i++) {
		if (regs->ip == base + extable[i].insn) {
			regs->ip = base + extable[i].fixup;
			regs->di = trapnr;
			regs->si = error_code;
			regs->dx = fault_addr;
			return true;
		}
	}
	return false;
}
 | 
	linux-master | 
	arch/x86/entry/vdso/extable.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright 2006 Andi Kleen, SUSE Labs.
 *
 * Fast user context implementation of getcpu()
 */
#include <linux/kernel.h>
#include <linux/getcpu.h>
#include <asm/segment.h>
#include <vdso/processor.h>
notrace long
__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
{
	vdso_read_cpunode(cpu, node);
	return 0;
}
long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
	__attribute__((weak, alias("__vdso_getcpu")));
 | 
	linux-master | 
	arch/x86/entry/vdso/vgetcpu.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * vdso2c - A vdso image preparation tool
 * Copyright (c) 2014 Andy Lutomirski and others
 *
 * vdso2c requires stripped and unstripped input.  It would be trivial
 * to fully strip the input in here, but, for reasons described below,
 * we need to write a section table.  Doing this is more or less
 * equivalent to dropping all non-allocatable sections, but it's
 * easier to let objcopy handle that instead of doing it ourselves.
 * If we ever need to do something fancier than what objcopy provides,
 * it would be straightforward to add here.
 *
 * We're keep a section table for a few reasons:
 *
 * The Go runtime had a couple of bugs: it would read the section
 * table to try to figure out how many dynamic symbols there were (it
 * shouldn't have looked at the section table at all) and, if there
 * were no SHT_SYNDYM section table entry, it would use an
 * uninitialized value for the number of symbols.  An empty DYNSYM
 * table would work, but I see no reason not to write a valid one (and
 * keep full performance for old Go programs).  This hack is only
 * needed on x86_64.
 *
 * The bug was introduced on 2012-08-31 by:
 * https://code.google.com/p/go/source/detail?r=56ea40aac72b
 * and was fixed on 2014-06-13 by:
 * https://code.google.com/p/go/source/detail?r=fc1cd5e12595
 *
 * Binutils has issues debugging the vDSO: it reads the section table to
 * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
 * would break build-id if we removed the section table.  Binutils
 * also requires that shstrndx != 0.  See:
 * https://sourceware.org/bugzilla/show_bug.cgi?id=17064
 *
 * elfutils might not look for PT_NOTE if there is a section table at
 * all.  I don't know whether this matters for any practical purpose.
 *
 * For simplicity, rather than hacking up a partial section table, we
 * just write a mostly complete one.  We omit non-dynamic symbols,
 * though, since they're rather large.
 *
 * Once binutils gets fixed, we might be able to drop this for all but
 * the 64-bit vdso, since build-id only works in kernel RPMs, and
 * systems that update to new enough kernel RPMs will likely update
 * binutils in sync.  build-id has never worked for home-built kernel
 * RPMs without manual symlinking, and I suspect that no one ever does
 * that.
 */
#include <inttypes.h>
#include <stdint.h>
#include <unistd.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <fcntl.h>
#include <err.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <tools/le_byteshift.h>
#include <linux/elf.h>
#include <linux/types.h>
#include <linux/kernel.h>
const char *outfilename;
/* Symbols that we need in vdso2c. */
enum {
	sym_vvar_start,
	sym_vvar_page,
	sym_pvclock_page,
	sym_hvclock_page,
	sym_timens_page,
};
const int special_pages[] = {
	sym_vvar_page,
	sym_pvclock_page,
	sym_hvclock_page,
	sym_timens_page,
};
struct vdso_sym {
	const char *name;
	bool export;
};
struct vdso_sym required_syms[] = {
	[sym_vvar_start] = {"vvar_start", true},
	[sym_vvar_page] = {"vvar_page", true},
	[sym_pvclock_page] = {"pvclock_page", true},
	[sym_hvclock_page] = {"hvclock_page", true},
	[sym_timens_page] = {"timens_page", true},
	{"VDSO32_NOTE_MASK", true},
	{"__kernel_vsyscall", true},
	{"__kernel_sigreturn", true},
	{"__kernel_rt_sigreturn", true},
	{"int80_landing_pad", true},
	{"vdso32_rt_sigreturn_landing_pad", true},
	{"vdso32_sigreturn_landing_pad", true},
};
__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
static void fail(const char *format, ...)
{
	va_list ap;
	va_start(ap, format);
	fprintf(stderr, "Error: ");
	vfprintf(stderr, format, ap);
	if (outfilename)
		unlink(outfilename);
	exit(1);
	va_end(ap);
}
/*
 * Evil macros for little-endian reads and writes
 */
#define GLE(x, bits, ifnot)						\
	__builtin_choose_expr(						\
		(sizeof(*(x)) == bits/8),				\
		(__typeof__(*(x)))get_unaligned_le##bits(x), ifnot)
extern void bad_get_le(void);
#define LAST_GLE(x)							\
	__builtin_choose_expr(sizeof(*(x)) == 1, *(x), bad_get_le())
#define GET_LE(x)							\
	GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_GLE(x))))
#define PLE(x, val, bits, ifnot)					\
	__builtin_choose_expr(						\
		(sizeof(*(x)) == bits/8),				\
		put_unaligned_le##bits((val), (x)), ifnot)
extern void bad_put_le(void);
#define LAST_PLE(x, val)						\
	__builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), bad_put_le())
#define PUT_LE(x, val)					\
	PLE(x, val, 64, PLE(x, val, 32, PLE(x, val, 16, LAST_PLE(x, val))))
#define NSYMS ARRAY_SIZE(required_syms)
#define BITSFUNC3(name, bits, suffix) name##bits##suffix
#define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix)
#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS, )
#define INT_BITS BITSFUNC2(int, ELF_BITS, _t)
#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
#define ELF_BITS 64
#include "vdso2c.h"
#undef ELF_BITS
#define ELF_BITS 32
#include "vdso2c.h"
#undef ELF_BITS
static void go(void *raw_addr, size_t raw_len,
	       void *stripped_addr, size_t stripped_len,
	       FILE *outfile, const char *name)
{
	Elf64_Ehdr *hdr = (Elf64_Ehdr *)raw_addr;
	if (hdr->e_ident[EI_CLASS] == ELFCLASS64) {
		go64(raw_addr, raw_len, stripped_addr, stripped_len,
		     outfile, name);
	} else if (hdr->e_ident[EI_CLASS] == ELFCLASS32) {
		go32(raw_addr, raw_len, stripped_addr, stripped_len,
		     outfile, name);
	} else {
		fail("unknown ELF class\n");
	}
}
static void map_input(const char *name, void **addr, size_t *len, int prot)
{
	off_t tmp_len;
	int fd = open(name, O_RDONLY);
	if (fd == -1)
		err(1, "open(%s)", name);
	tmp_len = lseek(fd, 0, SEEK_END);
	if (tmp_len == (off_t)-1)
		err(1, "lseek");
	*len = (size_t)tmp_len;
	*addr = mmap(NULL, tmp_len, prot, MAP_PRIVATE, fd, 0);
	if (*addr == MAP_FAILED)
		err(1, "mmap");
	close(fd);
}
int main(int argc, char **argv)
{
	size_t raw_len, stripped_len;
	void *raw_addr, *stripped_addr;
	FILE *outfile;
	char *name, *tmp;
	int namelen;
	if (argc != 4) {
		printf("Usage: vdso2c RAW_INPUT STRIPPED_INPUT OUTPUT\n");
		return 1;
	}
	/*
	 * Figure out the struct name.  If we're writing to a .so file,
	 * generate raw output instead.
	 */
	name = strdup(argv[3]);
	namelen = strlen(name);
	if (namelen >= 3 && !strcmp(name + namelen - 3, ".so")) {
		name = NULL;
	} else {
		tmp = strrchr(name, '/');
		if (tmp)
			name = tmp + 1;
		tmp = strchr(name, '.');
		if (tmp)
			*tmp = '\0';
		for (tmp = name; *tmp; tmp++)
			if (*tmp == '-')
				*tmp = '_';
	}
	map_input(argv[1], &raw_addr, &raw_len, PROT_READ);
	map_input(argv[2], &stripped_addr, &stripped_len, PROT_READ);
	outfilename = argv[3];
	outfile = fopen(outfilename, "w");
	if (!outfile)
		err(1, "fopen(%s)", outfilename);
	go(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name);
	munmap(raw_addr, raw_len);
	munmap(stripped_addr, stripped_len);
	fclose(outfile);
	return 0;
}
 | 
	linux-master | 
	arch/x86/entry/vdso/vdso2c.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * (C) Copyright 2002 Linus Torvalds
 * Portions based on the vdso-randomization code from exec-shield:
 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
 *
 * This file contains the needed initializations to support sysenter.
 */
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/mm_types.h>
#include <linux/elf.h>
#include <asm/processor.h>
#include <asm/vdso.h>
#ifdef CONFIG_COMPAT_VDSO
#define VDSO_DEFAULT	0
#else
#define VDSO_DEFAULT	1
#endif
/*
 * Should the kernel map a VDSO page into processes and pass its
 * address down to glibc upon exec()?
 */
unsigned int __read_mostly vdso32_enabled = VDSO_DEFAULT;
static int __init vdso32_setup(char *s)
{
	vdso32_enabled = simple_strtoul(s, NULL, 0);
	if (vdso32_enabled > 1) {
		pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
		vdso32_enabled = 0;
	}
	return 1;
}
/*
 * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
 * behavior on both 64-bit and 32-bit kernels.
 * On 32-bit kernels, vdso=[012] means the same thing.
 */
__setup("vdso32=", vdso32_setup);
#ifdef CONFIG_X86_32
__setup_param("vdso=", vdso_setup, vdso32_setup, 0);
#endif
#ifdef CONFIG_X86_64
#ifdef CONFIG_SYSCTL
/* Register vsyscall32 into the ABI table */
#include <linux/sysctl.h>
static struct ctl_table abi_table2[] = {
	{
		.procname	= "vsyscall32",
		.data		= &vdso32_enabled,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= SYSCTL_ZERO,
		.extra2		= SYSCTL_ONE,
	},
	{}
};
static __init int ia32_binfmt_init(void)
{
	register_sysctl("abi", abi_table2);
	return 0;
}
__initcall(ia32_binfmt_init);
#endif /* CONFIG_SYSCTL */
#endif	/* CONFIG_X86_64 */
 | 
	linux-master | 
	arch/x86/entry/vdso/vdso32-setup.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright 2007 Andi Kleen, SUSE Labs.
 *
 * This contains most of the x86 vDSO kernel-side code.
 */
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/elf.h>
#include <linux/cpu.h>
#include <linux/ptrace.h>
#include <linux/time_namespace.h>
#include <asm/pvclock.h>
#include <asm/vgtod.h>
#include <asm/proto.h>
#include <asm/vdso.h>
#include <asm/vvar.h>
#include <asm/tlb.h>
#include <asm/page.h>
#include <asm/desc.h>
#include <asm/cpufeature.h>
#include <clocksource/hyperv_timer.h>
#undef _ASM_X86_VVAR_H
#define EMIT_VVAR(name, offset)	\
	const size_t name ## _offset = offset;
#include <asm/vvar.h>
struct vdso_data *arch_get_vdso_data(void *vvar_page)
{
	return (struct vdso_data *)(vvar_page + _vdso_data_offset);
}
#undef EMIT_VVAR
unsigned int vclocks_used __read_mostly;
#if defined(CONFIG_X86_64)
unsigned int __read_mostly vdso64_enabled = 1;
#endif
int __init init_vdso_image(const struct vdso_image *image)
{
	BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
	BUG_ON(image->size % PAGE_SIZE != 0);
	apply_alternatives((struct alt_instr *)(image->data + image->alt),
			   (struct alt_instr *)(image->data + image->alt +
						image->alt_len));
	return 0;
}
static const struct vm_special_mapping vvar_mapping;
struct linux_binprm;
static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
		      struct vm_area_struct *vma, struct vm_fault *vmf)
{
	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
	if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
		return VM_FAULT_SIGBUS;
	vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
	get_page(vmf->page);
	return 0;
}
static void vdso_fix_landing(const struct vdso_image *image,
		struct vm_area_struct *new_vma)
{
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
	if (in_ia32_syscall() && image == &vdso_image_32) {
		struct pt_regs *regs = current_pt_regs();
		unsigned long vdso_land = image->sym_int80_landing_pad;
		unsigned long old_land_addr = vdso_land +
			(unsigned long)current->mm->context.vdso;
		/* Fixing userspace landing - look at do_fast_syscall_32 */
		if (regs->ip == old_land_addr)
			regs->ip = new_vma->vm_start + vdso_land;
	}
#endif
}
static int vdso_mremap(const struct vm_special_mapping *sm,
		struct vm_area_struct *new_vma)
{
	const struct vdso_image *image = current->mm->context.vdso_image;
	vdso_fix_landing(image, new_vma);
	current->mm->context.vdso = (void __user *)new_vma->vm_start;
	return 0;
}
#ifdef CONFIG_TIME_NS
/*
 * The vvar page layout depends on whether a task belongs to the root or
 * non-root time namespace. Whenever a task changes its namespace, the VVAR
 * page tables are cleared and then they will re-faulted with a
 * corresponding layout.
 * See also the comment near timens_setup_vdso_data() for details.
 */
int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
	struct mm_struct *mm = task->mm;
	struct vm_area_struct *vma;
	VMA_ITERATOR(vmi, mm, 0);
	mmap_read_lock(mm);
	for_each_vma(vmi, vma) {
		if (vma_is_special_mapping(vma, &vvar_mapping))
			zap_vma_pages(vma);
	}
	mmap_read_unlock(mm);
	return 0;
}
#endif
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
		      struct vm_area_struct *vma, struct vm_fault *vmf)
{
	const struct vdso_image *image = vma->vm_mm->context.vdso_image;
	unsigned long pfn;
	long sym_offset;
	if (!image)
		return VM_FAULT_SIGBUS;
	sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
		image->sym_vvar_start;
	/*
	 * Sanity check: a symbol offset of zero means that the page
	 * does not exist for this vdso image, not that the page is at
	 * offset zero relative to the text mapping.  This should be
	 * impossible here, because sym_offset should only be zero for
	 * the page past the end of the vvar mapping.
	 */
	if (sym_offset == 0)
		return VM_FAULT_SIGBUS;
	if (sym_offset == image->sym_vvar_page) {
		struct page *timens_page = find_timens_vvar_page(vma);
		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
		/*
		 * If a task belongs to a time namespace then a namespace
		 * specific VVAR is mapped with the sym_vvar_page offset and
		 * the real VVAR page is mapped with the sym_timens_page
		 * offset.
		 * See also the comment near timens_setup_vdso_data().
		 */
		if (timens_page) {
			unsigned long addr;
			vm_fault_t err;
			/*
			 * Optimization: inside time namespace pre-fault
			 * VVAR page too. As on timens page there are only
			 * offsets for clocks on VVAR, it'll be faulted
			 * shortly by VDSO code.
			 */
			addr = vmf->address + (image->sym_timens_page - sym_offset);
			err = vmf_insert_pfn(vma, addr, pfn);
			if (unlikely(err & VM_FAULT_ERROR))
				return err;
			pfn = page_to_pfn(timens_page);
		}
		return vmf_insert_pfn(vma, vmf->address, pfn);
	} else if (sym_offset == image->sym_pvclock_page) {
		struct pvclock_vsyscall_time_info *pvti =
			pvclock_get_pvti_cpu0_va();
		if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
			return vmf_insert_pfn_prot(vma, vmf->address,
					__pa(pvti) >> PAGE_SHIFT,
					pgprot_decrypted(vma->vm_page_prot));
		}
	} else if (sym_offset == image->sym_hvclock_page) {
		pfn = hv_get_tsc_pfn();
		if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
			return vmf_insert_pfn(vma, vmf->address, pfn);
	} else if (sym_offset == image->sym_timens_page) {
		struct page *timens_page = find_timens_vvar_page(vma);
		if (!timens_page)
			return VM_FAULT_SIGBUS;
		pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
		return vmf_insert_pfn(vma, vmf->address, pfn);
	}
	return VM_FAULT_SIGBUS;
}
static const struct vm_special_mapping vdso_mapping = {
	.name = "[vdso]",
	.fault = vdso_fault,
	.mremap = vdso_mremap,
};
static const struct vm_special_mapping vvar_mapping = {
	.name = "[vvar]",
	.fault = vvar_fault,
};
/*
 * Add vdso and vvar mappings to current process.
 * @image          - blob to map
 * @addr           - request a specific address (zero to map at free addr)
 */
static int map_vdso(const struct vdso_image *image, unsigned long addr)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long text_start;
	int ret = 0;
	if (mmap_write_lock_killable(mm))
		return -EINTR;
	addr = get_unmapped_area(NULL, addr,
				 image->size - image->sym_vvar_start, 0, 0);
	if (IS_ERR_VALUE(addr)) {
		ret = addr;
		goto up_fail;
	}
	text_start = addr - image->sym_vvar_start;
	/*
	 * MAYWRITE to allow gdb to COW and set breakpoints
	 */
	vma = _install_special_mapping(mm,
				       text_start,
				       image->size,
				       VM_READ|VM_EXEC|
				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
				       &vdso_mapping);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto up_fail;
	}
	vma = _install_special_mapping(mm,
				       addr,
				       -image->sym_vvar_start,
				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
				       VM_PFNMAP,
				       &vvar_mapping);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		do_munmap(mm, text_start, image->size, NULL);
	} else {
		current->mm->context.vdso = (void __user *)text_start;
		current->mm->context.vdso_image = image;
	}
up_fail:
	mmap_write_unlock(mm);
	return ret;
}
#ifdef CONFIG_X86_64
/*
 * Put the vdso above the (randomized) stack with another randomized
 * offset.  This way there is no hole in the middle of address space.
 * To save memory make sure it is still in the same PTE as the stack
 * top.  This doesn't give that many random bits.
 *
 * Note that this algorithm is imperfect: the distribution of the vdso
 * start address within a PMD is biased toward the end.
 *
 * Only used for the 64-bit and x32 vdsos.
 */
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
	unsigned long addr, end;
	unsigned offset;
	/*
	 * Round up the start address.  It can start out unaligned as a result
	 * of stack start randomization.
	 */
	start = PAGE_ALIGN(start);
	/* Round the lowest possible end address up to a PMD boundary. */
	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
	if (end >= DEFAULT_MAP_WINDOW)
		end = DEFAULT_MAP_WINDOW;
	end -= len;
	if (end > start) {
		offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
		addr = start + (offset << PAGE_SHIFT);
	} else {
		addr = start;
	}
	/*
	 * Forcibly align the final address in case we have a hardware
	 * issue that requires alignment for performance reasons.
	 */
	addr = align_vdso_addr(addr);
	return addr;
}
static int map_vdso_randomized(const struct vdso_image *image)
{
	unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
	return map_vdso(image, addr);
}
#endif
int map_vdso_once(const struct vdso_image *image, unsigned long addr)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	VMA_ITERATOR(vmi, mm, 0);
	mmap_write_lock(mm);
	/*
	 * Check if we have already mapped vdso blob - fail to prevent
	 * abusing from userspace install_special_mapping, which may
	 * not do accounting and rlimit right.
	 * We could search vma near context.vdso, but it's a slowpath,
	 * so let's explicitly check all VMAs to be completely sure.
	 */
	for_each_vma(vmi, vma) {
		if (vma_is_special_mapping(vma, &vdso_mapping) ||
				vma_is_special_mapping(vma, &vvar_mapping)) {
			mmap_write_unlock(mm);
			return -EEXIST;
		}
	}
	mmap_write_unlock(mm);
	return map_vdso(image, addr);
}
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
static int load_vdso32(void)
{
	if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
		return 0;
	return map_vdso(&vdso_image_32, 0);
}
#endif
#ifdef CONFIG_X86_64
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
	if (!vdso64_enabled)
		return 0;
	return map_vdso_randomized(&vdso_image_64);
}
#ifdef CONFIG_COMPAT
int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
				       int uses_interp, bool x32)
{
#ifdef CONFIG_X86_X32_ABI
	if (x32) {
		if (!vdso64_enabled)
			return 0;
		return map_vdso_randomized(&vdso_image_x32);
	}
#endif
#ifdef CONFIG_IA32_EMULATION
	return load_vdso32();
#else
	return 0;
#endif
}
#endif
#else
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
	return load_vdso32();
}
#endif
bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
{
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
	const struct vdso_image *image = current->mm->context.vdso_image;
	unsigned long vdso = (unsigned long) current->mm->context.vdso;
	if (in_ia32_syscall() && image == &vdso_image_32) {
		if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
		    regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
			return true;
	}
#endif
	return false;
}
#ifdef CONFIG_X86_64
static __init int vdso_setup(char *s)
{
	vdso64_enabled = simple_strtoul(s, NULL, 0);
	return 1;
}
__setup("vdso=", vdso_setup);
#endif /* CONFIG_X86_64 */
 | 
	linux-master | 
	arch/x86/entry/vdso/vma.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#define BUILD_VDSO32
#include "fake_32bit_build.h"
#include "../vclock_gettime.c"
 | 
	linux-master | 
	arch/x86/entry/vdso/vdso32/vclock_gettime.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include "fake_32bit_build.h"
#include "../vgetcpu.c"
 | 
	linux-master | 
	arch/x86/entry/vdso/vdso32/vgetcpu.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2012-2014 Andy Lutomirski <[email protected]>
 *
 * Based on the original implementation which is:
 *  Copyright (C) 2001 Andrea Arcangeli <[email protected]> SuSE
 *  Copyright 2003 Andi Kleen, SuSE Labs.
 *
 *  Parts of the original code have been moved to arch/x86/vdso/vma.c
 *
 * This file implements vsyscall emulation.  vsyscalls are a legacy ABI:
 * Userspace can request certain kernel services by calling fixed
 * addresses.  This concept is problematic:
 *
 * - It interferes with ASLR.
 * - It's awkward to write code that lives in kernel addresses but is
 *   callable by userspace at fixed addresses.
 * - The whole concept is impossible for 32-bit compat userspace.
 * - UML cannot easily virtualize a vsyscall.
 *
 * As of mid-2014, I believe that there is no new userspace code that
 * will use a vsyscall if the vDSO is present.  I hope that there will
 * soon be no new userspace code that will ever use a vsyscall.
 *
 * The code in this file emulates vsyscalls when notified of a page
 * fault to a vsyscall address.
 */
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/sched/signal.h>
#include <linux/mm_types.h>
#include <linux/syscalls.h>
#include <linux/ratelimit.h>
#include <asm/vsyscall.h>
#include <asm/unistd.h>
#include <asm/fixmap.h>
#include <asm/traps.h>
#include <asm/paravirt.h>
#define CREATE_TRACE_POINTS
#include "vsyscall_trace.h"
static enum { EMULATE, XONLY, NONE } vsyscall_mode __ro_after_init =
#ifdef CONFIG_LEGACY_VSYSCALL_NONE
	NONE;
#elif defined(CONFIG_LEGACY_VSYSCALL_XONLY)
	XONLY;
#else
	#error VSYSCALL config is broken
#endif
static int __init vsyscall_setup(char *str)
{
	if (str) {
		if (!strcmp("emulate", str))
			vsyscall_mode = EMULATE;
		else if (!strcmp("xonly", str))
			vsyscall_mode = XONLY;
		else if (!strcmp("none", str))
			vsyscall_mode = NONE;
		else
			return -EINVAL;
		return 0;
	}
	return -EINVAL;
}
early_param("vsyscall", vsyscall_setup);
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
			      const char *message)
{
	if (!show_unhandled_signals)
		return;
	printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
			   level, current->comm, task_pid_nr(current),
			   message, regs->ip, regs->cs,
			   regs->sp, regs->ax, regs->si, regs->di);
}
static int addr_to_vsyscall_nr(unsigned long addr)
{
	int nr;
	if ((addr & ~0xC00UL) != VSYSCALL_ADDR)
		return -EINVAL;
	nr = (addr & 0xC00UL) >> 10;
	if (nr >= 3)
		return -EINVAL;
	return nr;
}
static bool write_ok_or_segv(unsigned long ptr, size_t size)
{
	/*
	 * XXX: if access_ok, get_user, and put_user handled
	 * sig_on_uaccess_err, this could go away.
	 */
	if (!access_ok((void __user *)ptr, size)) {
		struct thread_struct *thread = ¤t->thread;
		thread->error_code	= X86_PF_USER | X86_PF_WRITE;
		thread->cr2		= ptr;
		thread->trap_nr		= X86_TRAP_PF;
		force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)ptr);
		return false;
	} else {
		return true;
	}
}
bool emulate_vsyscall(unsigned long error_code,
		      struct pt_regs *regs, unsigned long address)
{
	struct task_struct *tsk;
	unsigned long caller;
	int vsyscall_nr, syscall_nr, tmp;
	int prev_sig_on_uaccess_err;
	long ret;
	unsigned long orig_dx;
	/* Write faults or kernel-privilege faults never get fixed up. */
	if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
		return false;
	if (!(error_code & X86_PF_INSTR)) {
		/* Failed vsyscall read */
		if (vsyscall_mode == EMULATE)
			return false;
		/*
		 * User code tried and failed to read the vsyscall page.
		 */
		warn_bad_vsyscall(KERN_INFO, regs, "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
		return false;
	}
	/*
	 * No point in checking CS -- the only way to get here is a user mode
	 * trap to a high address, which means that we're in 64-bit user code.
	 */
	WARN_ON_ONCE(address != regs->ip);
	if (vsyscall_mode == NONE) {
		warn_bad_vsyscall(KERN_INFO, regs,
				  "vsyscall attempted with vsyscall=none");
		return false;
	}
	vsyscall_nr = addr_to_vsyscall_nr(address);
	trace_emulate_vsyscall(vsyscall_nr);
	if (vsyscall_nr < 0) {
		warn_bad_vsyscall(KERN_WARNING, regs,
				  "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
		goto sigsegv;
	}
	if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
		warn_bad_vsyscall(KERN_WARNING, regs,
				  "vsyscall with bad stack (exploit attempt?)");
		goto sigsegv;
	}
	tsk = current;
	/*
	 * Check for access_ok violations and find the syscall nr.
	 *
	 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
	 * 64-bit, so we don't need to special-case it here.  For all the
	 * vsyscalls, NULL means "don't write anything" not "write it at
	 * address 0".
	 */
	switch (vsyscall_nr) {
	case 0:
		if (!write_ok_or_segv(regs->di, sizeof(struct __kernel_old_timeval)) ||
		    !write_ok_or_segv(regs->si, sizeof(struct timezone))) {
			ret = -EFAULT;
			goto check_fault;
		}
		syscall_nr = __NR_gettimeofday;
		break;
	case 1:
		if (!write_ok_or_segv(regs->di, sizeof(__kernel_old_time_t))) {
			ret = -EFAULT;
			goto check_fault;
		}
		syscall_nr = __NR_time;
		break;
	case 2:
		if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
		    !write_ok_or_segv(regs->si, sizeof(unsigned))) {
			ret = -EFAULT;
			goto check_fault;
		}
		syscall_nr = __NR_getcpu;
		break;
	}
	/*
	 * Handle seccomp.  regs->ip must be the original value.
	 * See seccomp_send_sigsys and Documentation/userspace-api/seccomp_filter.rst.
	 *
	 * We could optimize the seccomp disabled case, but performance
	 * here doesn't matter.
	 */
	regs->orig_ax = syscall_nr;
	regs->ax = -ENOSYS;
	tmp = secure_computing();
	if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
		warn_bad_vsyscall(KERN_DEBUG, regs,
				  "seccomp tried to change syscall nr or ip");
		force_exit_sig(SIGSYS);
		return true;
	}
	regs->orig_ax = -1;
	if (tmp)
		goto do_ret;  /* skip requested */
	/*
	 * With a real vsyscall, page faults cause SIGSEGV.  We want to
	 * preserve that behavior to make writing exploits harder.
	 */
	prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
	current->thread.sig_on_uaccess_err = 1;
	ret = -EFAULT;
	switch (vsyscall_nr) {
	case 0:
		/* this decodes regs->di and regs->si on its own */
		ret = __x64_sys_gettimeofday(regs);
		break;
	case 1:
		/* this decodes regs->di on its own */
		ret = __x64_sys_time(regs);
		break;
	case 2:
		/* while we could clobber regs->dx, we didn't in the past... */
		orig_dx = regs->dx;
		regs->dx = 0;
		/* this decodes regs->di, regs->si and regs->dx on its own */
		ret = __x64_sys_getcpu(regs);
		regs->dx = orig_dx;
		break;
	}
	current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
check_fault:
	if (ret == -EFAULT) {
		/* Bad news -- userspace fed a bad pointer to a vsyscall. */
		warn_bad_vsyscall(KERN_INFO, regs,
				  "vsyscall fault (exploit attempt?)");
		/*
		 * If we failed to generate a signal for any reason,
		 * generate one here.  (This should be impossible.)
		 */
		if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
				 !sigismember(&tsk->pending.signal, SIGSEGV)))
			goto sigsegv;
		return true;  /* Don't emulate the ret. */
	}
	regs->ax = ret;
do_ret:
	/* Emulate a ret instruction. */
	regs->ip = caller;
	regs->sp += 8;
	return true;
sigsegv:
	force_sig(SIGSEGV);
	return true;
}
/*
 * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
 * not need special handling anymore:
 */
static const char *gate_vma_name(struct vm_area_struct *vma)
{
	return "[vsyscall]";
}
static const struct vm_operations_struct gate_vma_ops = {
	.name = gate_vma_name,
};
static struct vm_area_struct gate_vma __ro_after_init = {
	.vm_start	= VSYSCALL_ADDR,
	.vm_end		= VSYSCALL_ADDR + PAGE_SIZE,
	.vm_page_prot	= PAGE_READONLY_EXEC,
	.vm_flags	= VM_READ | VM_EXEC,
	.vm_ops		= &gate_vma_ops,
};
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
#ifdef CONFIG_COMPAT
	if (!mm || !test_bit(MM_CONTEXT_HAS_VSYSCALL, &mm->context.flags))
		return NULL;
#endif
	if (vsyscall_mode == NONE)
		return NULL;
	return &gate_vma;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
	struct vm_area_struct *vma = get_gate_vma(mm);
	if (!vma)
		return 0;
	return (addr >= vma->vm_start) && (addr < vma->vm_end);
}
/*
 * Use this when you have no reliable mm, typically from interrupt
 * context. It is less reliable than using a task's mm and may give
 * false positives.
 */
int in_gate_area_no_mm(unsigned long addr)
{
	return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
}
/*
 * The VSYSCALL page is the only user-accessible page in the kernel address
 * range.  Normally, the kernel page tables can have _PAGE_USER clear, but
 * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
 * are enabled.
 *
 * Some day we may create a "minimal" vsyscall mode in which we emulate
 * vsyscalls but leave the page not present.  If so, we skip calling
 * this.
 */
void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
{
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
	set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
	p4d = p4d_offset(pgd, VSYSCALL_ADDR);
#if CONFIG_PGTABLE_LEVELS >= 5
	set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
#endif
	pud = pud_offset(p4d, VSYSCALL_ADDR);
	set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
	pmd = pmd_offset(pud, VSYSCALL_ADDR);
	set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
}
void __init map_vsyscall(void)
{
	extern char __vsyscall_page;
	unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
	/*
	 * For full emulation, the page needs to exist for real.  In
	 * execute-only mode, there is no PTE at all backing the vsyscall
	 * page.
	 */
	if (vsyscall_mode == EMULATE) {
		__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
			     PAGE_KERNEL_VVAR);
		set_vsyscall_pgtable_user_bits(swapper_pg_dir);
	}
	if (vsyscall_mode == XONLY)
		vm_flags_init(&gate_vma, VM_EXEC);
	BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
		     (unsigned long)VSYSCALL_ADDR);
}
 | 
	linux-master | 
	arch/x86/entry/vsyscall/vsyscall_64.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * IOSF-SB MailBox Interface Driver
 * Copyright (c) 2013, Intel Corporation.
 *
 * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
 * mailbox interface (MBI) to communicate with multiple devices. This
 * driver implements access to this interface for those platforms that can
 * enumerate the device using PCI.
 */
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
#include <linux/capability.h>
#include <linux/pm_qos.h>
#include <linux/wait.h>
#include <asm/iosf_mbi.h>
#define PCI_DEVICE_ID_INTEL_BAYTRAIL		0x0F00
#define PCI_DEVICE_ID_INTEL_BRASWELL		0x2280
#define PCI_DEVICE_ID_INTEL_QUARK_X1000		0x0958
#define PCI_DEVICE_ID_INTEL_TANGIER		0x1170
static struct pci_dev *mbi_pdev;
static DEFINE_SPINLOCK(iosf_mbi_lock);
/**************** Generic iosf_mbi access helpers ****************/
static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
{
	return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE;
}
static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
{
	int result;
	if (!mbi_pdev)
		return -ENODEV;
	if (mcrx) {
		result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
						mcrx);
		if (result < 0)
			goto fail_read;
	}
	result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
	if (result < 0)
		goto fail_read;
	result = pci_read_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
	if (result < 0)
		goto fail_read;
	return 0;
fail_read:
	dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
	return result;
}
static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
{
	int result;
	if (!mbi_pdev)
		return -ENODEV;
	result = pci_write_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
	if (result < 0)
		goto fail_write;
	if (mcrx) {
		result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
						mcrx);
		if (result < 0)
			goto fail_write;
	}
	result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
	if (result < 0)
		goto fail_write;
	return 0;
fail_write:
	dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
	return result;
}
int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
{
	u32 mcr, mcrx;
	unsigned long flags;
	int ret;
	/* Access to the GFX unit is handled by GPU code */
	if (port == BT_MBI_UNIT_GFX) {
		WARN_ON(1);
		return -EPERM;
	}
	mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
	mcrx = offset & MBI_MASK_HI;
	spin_lock_irqsave(&iosf_mbi_lock, flags);
	ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
	return ret;
}
EXPORT_SYMBOL(iosf_mbi_read);
int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
{
	u32 mcr, mcrx;
	unsigned long flags;
	int ret;
	/* Access to the GFX unit is handled by GPU code */
	if (port == BT_MBI_UNIT_GFX) {
		WARN_ON(1);
		return -EPERM;
	}
	mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
	mcrx = offset & MBI_MASK_HI;
	spin_lock_irqsave(&iosf_mbi_lock, flags);
	ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
	return ret;
}
EXPORT_SYMBOL(iosf_mbi_write);
int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
{
	u32 mcr, mcrx;
	u32 value;
	unsigned long flags;
	int ret;
	/* Access to the GFX unit is handled by GPU code */
	if (port == BT_MBI_UNIT_GFX) {
		WARN_ON(1);
		return -EPERM;
	}
	mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
	mcrx = offset & MBI_MASK_HI;
	spin_lock_irqsave(&iosf_mbi_lock, flags);
	/* Read current mdr value */
	ret = iosf_mbi_pci_read_mdr(mcrx, mcr & MBI_RD_MASK, &value);
	if (ret < 0) {
		spin_unlock_irqrestore(&iosf_mbi_lock, flags);
		return ret;
	}
	/* Apply mask */
	value &= ~mask;
	mdr &= mask;
	value |= mdr;
	/* Write back */
	ret = iosf_mbi_pci_write_mdr(mcrx, mcr | MBI_WR_MASK, value);
	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
	return ret;
}
EXPORT_SYMBOL(iosf_mbi_modify);
bool iosf_mbi_available(void)
{
	/* Mbi isn't hot-pluggable. No remove routine is provided */
	return mbi_pdev;
}
EXPORT_SYMBOL(iosf_mbi_available);
/*
 **************** P-Unit/kernel shared I2C bus arbitration ****************
 *
 * Some Bay Trail and Cherry Trail devices have the P-Unit and us (the kernel)
 * share a single I2C bus to the PMIC. Below are helpers to arbitrate the
 * accesses between the kernel and the P-Unit.
 *
 * See arch/x86/include/asm/iosf_mbi.h for kernel-doc text for each function.
 */
#define SEMAPHORE_TIMEOUT		500
#define PUNIT_SEMAPHORE_BYT		0x7
#define PUNIT_SEMAPHORE_CHT		0x10e
#define PUNIT_SEMAPHORE_BIT		BIT(0)
#define PUNIT_SEMAPHORE_ACQUIRE		BIT(1)
static DEFINE_MUTEX(iosf_mbi_pmic_access_mutex);
static BLOCKING_NOTIFIER_HEAD(iosf_mbi_pmic_bus_access_notifier);
static DECLARE_WAIT_QUEUE_HEAD(iosf_mbi_pmic_access_waitq);
static u32 iosf_mbi_pmic_punit_access_count;
static u32 iosf_mbi_pmic_i2c_access_count;
static u32 iosf_mbi_sem_address;
static unsigned long iosf_mbi_sem_acquired;
static struct pm_qos_request iosf_mbi_pm_qos;
void iosf_mbi_punit_acquire(void)
{
	/* Wait for any I2C PMIC accesses from in kernel drivers to finish. */
	mutex_lock(&iosf_mbi_pmic_access_mutex);
	while (iosf_mbi_pmic_i2c_access_count != 0) {
		mutex_unlock(&iosf_mbi_pmic_access_mutex);
		wait_event(iosf_mbi_pmic_access_waitq,
			   iosf_mbi_pmic_i2c_access_count == 0);
		mutex_lock(&iosf_mbi_pmic_access_mutex);
	}
	/*
	 * We do not need to do anything to allow the PUNIT to safely access
	 * the PMIC, other then block in kernel accesses to the PMIC.
	 */
	iosf_mbi_pmic_punit_access_count++;
	mutex_unlock(&iosf_mbi_pmic_access_mutex);
}
EXPORT_SYMBOL(iosf_mbi_punit_acquire);
void iosf_mbi_punit_release(void)
{
	bool do_wakeup;
	mutex_lock(&iosf_mbi_pmic_access_mutex);
	iosf_mbi_pmic_punit_access_count--;
	do_wakeup = iosf_mbi_pmic_punit_access_count == 0;
	mutex_unlock(&iosf_mbi_pmic_access_mutex);
	if (do_wakeup)
		wake_up(&iosf_mbi_pmic_access_waitq);
}
EXPORT_SYMBOL(iosf_mbi_punit_release);
static int iosf_mbi_get_sem(u32 *sem)
{
	int ret;
	ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
			    iosf_mbi_sem_address, sem);
	if (ret) {
		dev_err(&mbi_pdev->dev, "Error P-Unit semaphore read failed\n");
		return ret;
	}
	*sem &= PUNIT_SEMAPHORE_BIT;
	return 0;
}
static void iosf_mbi_reset_semaphore(void)
{
	if (iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ,
			    iosf_mbi_sem_address, 0, PUNIT_SEMAPHORE_BIT))
		dev_err(&mbi_pdev->dev, "Error P-Unit semaphore reset failed\n");
	cpu_latency_qos_update_request(&iosf_mbi_pm_qos, PM_QOS_DEFAULT_VALUE);
	blocking_notifier_call_chain(&iosf_mbi_pmic_bus_access_notifier,
				     MBI_PMIC_BUS_ACCESS_END, NULL);
}
/*
 * This function blocks P-Unit accesses to the PMIC I2C bus, so that kernel
 * I2C code, such as e.g. a fuel-gauge driver, can access it safely.
 *
 * This function may be called by I2C controller code while an I2C driver has
 * already blocked P-Unit accesses because it wants them blocked over multiple
 * i2c-transfers, for e.g. read-modify-write of an I2C client register.
 *
 * To allow safe PMIC i2c bus accesses this function takes the following steps:
 *
 * 1) Some code sends request to the P-Unit which make it access the PMIC
 *    I2C bus. Testing has shown that the P-Unit does not check its internal
 *    PMIC bus semaphore for these requests. Callers of these requests call
 *    iosf_mbi_punit_acquire()/_release() around their P-Unit accesses, these
 *    functions increase/decrease iosf_mbi_pmic_punit_access_count, so first
 *    we wait for iosf_mbi_pmic_punit_access_count to become 0.
 *
 * 2) Check iosf_mbi_pmic_i2c_access_count, if access has already
 *    been blocked by another caller, we only need to increment
 *    iosf_mbi_pmic_i2c_access_count and we can skip the other steps.
 *
 * 3) Some code makes such P-Unit requests from atomic contexts where it
 *    cannot call iosf_mbi_punit_acquire() as that may sleep.
 *    As the second step we call a notifier chain which allows any code
 *    needing P-Unit resources from atomic context to acquire them before
 *    we take control over the PMIC I2C bus.
 *
 * 4) When CPU cores enter C6 or C7 the P-Unit needs to talk to the PMIC
 *    if this happens while the kernel itself is accessing the PMIC I2C bus
 *    the SoC hangs.
 *    As the third step we call cpu_latency_qos_update_request() to disallow the
 *    CPU to enter C6 or C7.
 *
 * 5) The P-Unit has a PMIC bus semaphore which we can request to stop
 *    autonomous P-Unit tasks from accessing the PMIC I2C bus while we hold it.
 *    As the fourth and final step we request this semaphore and wait for our
 *    request to be acknowledged.
 */
int iosf_mbi_block_punit_i2c_access(void)
{
	unsigned long start, end;
	int ret = 0;
	u32 sem;
	if (WARN_ON(!mbi_pdev || !iosf_mbi_sem_address))
		return -ENXIO;
	mutex_lock(&iosf_mbi_pmic_access_mutex);
	while (iosf_mbi_pmic_punit_access_count != 0) {
		mutex_unlock(&iosf_mbi_pmic_access_mutex);
		wait_event(iosf_mbi_pmic_access_waitq,
			   iosf_mbi_pmic_punit_access_count == 0);
		mutex_lock(&iosf_mbi_pmic_access_mutex);
	}
	if (iosf_mbi_pmic_i2c_access_count > 0)
		goto success;
	blocking_notifier_call_chain(&iosf_mbi_pmic_bus_access_notifier,
				     MBI_PMIC_BUS_ACCESS_BEGIN, NULL);
	/*
	 * Disallow the CPU to enter C6 or C7 state, entering these states
	 * requires the P-Unit to talk to the PMIC and if this happens while
	 * we're holding the semaphore, the SoC hangs.
	 */
	cpu_latency_qos_update_request(&iosf_mbi_pm_qos, 0);
	/* host driver writes to side band semaphore register */
	ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
			     iosf_mbi_sem_address, PUNIT_SEMAPHORE_ACQUIRE);
	if (ret) {
		dev_err(&mbi_pdev->dev, "Error P-Unit semaphore request failed\n");
		goto error;
	}
	/* host driver waits for bit 0 to be set in semaphore register */
	start = jiffies;
	end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
	do {
		ret = iosf_mbi_get_sem(&sem);
		if (!ret && sem) {
			iosf_mbi_sem_acquired = jiffies;
			dev_dbg(&mbi_pdev->dev, "P-Unit semaphore acquired after %ums\n",
				jiffies_to_msecs(jiffies - start));
			goto success;
		}
		usleep_range(1000, 2000);
	} while (time_before(jiffies, end));
	ret = -ETIMEDOUT;
	dev_err(&mbi_pdev->dev, "Error P-Unit semaphore timed out, resetting\n");
error:
	iosf_mbi_reset_semaphore();
	if (!iosf_mbi_get_sem(&sem))
		dev_err(&mbi_pdev->dev, "P-Unit semaphore: %d\n", sem);
success:
	if (!WARN_ON(ret))
		iosf_mbi_pmic_i2c_access_count++;
	mutex_unlock(&iosf_mbi_pmic_access_mutex);
	return ret;
}
EXPORT_SYMBOL(iosf_mbi_block_punit_i2c_access);
void iosf_mbi_unblock_punit_i2c_access(void)
{
	bool do_wakeup = false;
	mutex_lock(&iosf_mbi_pmic_access_mutex);
	iosf_mbi_pmic_i2c_access_count--;
	if (iosf_mbi_pmic_i2c_access_count == 0) {
		iosf_mbi_reset_semaphore();
		dev_dbg(&mbi_pdev->dev, "punit semaphore held for %ums\n",
			jiffies_to_msecs(jiffies - iosf_mbi_sem_acquired));
		do_wakeup = true;
	}
	mutex_unlock(&iosf_mbi_pmic_access_mutex);
	if (do_wakeup)
		wake_up(&iosf_mbi_pmic_access_waitq);
}
EXPORT_SYMBOL(iosf_mbi_unblock_punit_i2c_access);
int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
{
	int ret;
	/* Wait for the bus to go inactive before registering */
	iosf_mbi_punit_acquire();
	ret = blocking_notifier_chain_register(
				&iosf_mbi_pmic_bus_access_notifier, nb);
	iosf_mbi_punit_release();
	return ret;
}
EXPORT_SYMBOL(iosf_mbi_register_pmic_bus_access_notifier);
int iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
	struct notifier_block *nb)
{
	iosf_mbi_assert_punit_acquired();
	return blocking_notifier_chain_unregister(
				&iosf_mbi_pmic_bus_access_notifier, nb);
}
EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier_unlocked);
int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
{
	int ret;
	/* Wait for the bus to go inactive before unregistering */
	iosf_mbi_punit_acquire();
	ret = iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(nb);
	iosf_mbi_punit_release();
	return ret;
}
EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier);
void iosf_mbi_assert_punit_acquired(void)
{
	WARN_ON(iosf_mbi_pmic_punit_access_count == 0);
}
EXPORT_SYMBOL(iosf_mbi_assert_punit_acquired);
/**************** iosf_mbi debug code ****************/
#ifdef CONFIG_IOSF_MBI_DEBUG
static u32	dbg_mdr;
static u32	dbg_mcr;
static u32	dbg_mcrx;
static int mcr_get(void *data, u64 *val)
{
	*val = *(u32 *)data;
	return 0;
}
static int mcr_set(void *data, u64 val)
{
	u8 command = ((u32)val & 0xFF000000) >> 24,
	   port	   = ((u32)val & 0x00FF0000) >> 16,
	   offset  = ((u32)val & 0x0000FF00) >> 8;
	int err;
	*(u32 *)data = val;
	if (!capable(CAP_SYS_RAWIO))
		return -EACCES;
	if (command & 1u)
		err = iosf_mbi_write(port,
			       command,
			       dbg_mcrx | offset,
			       dbg_mdr);
	else
		err = iosf_mbi_read(port,
			      command,
			      dbg_mcrx | offset,
			      &dbg_mdr);
	return err;
}
DEFINE_SIMPLE_ATTRIBUTE(iosf_mcr_fops, mcr_get, mcr_set , "%llx\n");
static struct dentry *iosf_dbg;
static void iosf_sideband_debug_init(void)
{
	iosf_dbg = debugfs_create_dir("iosf_sb", NULL);
	/* mdr */
	debugfs_create_x32("mdr", 0660, iosf_dbg, &dbg_mdr);
	/* mcrx */
	debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx);
	/* mcr - initiates mailbox transaction */
	debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops);
}
static void iosf_debugfs_init(void)
{
	iosf_sideband_debug_init();
}
static void iosf_debugfs_remove(void)
{
	debugfs_remove_recursive(iosf_dbg);
}
#else
static inline void iosf_debugfs_init(void) { }
static inline void iosf_debugfs_remove(void) { }
#endif /* CONFIG_IOSF_MBI_DEBUG */
static int iosf_mbi_probe(struct pci_dev *pdev,
			  const struct pci_device_id *dev_id)
{
	int ret;
	ret = pci_enable_device(pdev);
	if (ret < 0) {
		dev_err(&pdev->dev, "error: could not enable device\n");
		return ret;
	}
	mbi_pdev = pci_dev_get(pdev);
	iosf_mbi_sem_address = dev_id->driver_data;
	return 0;
}
static const struct pci_device_id iosf_mbi_pci_ids[] = {
	{ PCI_DEVICE_DATA(INTEL, BAYTRAIL, PUNIT_SEMAPHORE_BYT) },
	{ PCI_DEVICE_DATA(INTEL, BRASWELL, PUNIT_SEMAPHORE_CHT) },
	{ PCI_DEVICE_DATA(INTEL, QUARK_X1000, 0) },
	{ PCI_DEVICE_DATA(INTEL, TANGIER, 0) },
	{ 0, },
};
MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
static struct pci_driver iosf_mbi_pci_driver = {
	.name		= "iosf_mbi_pci",
	.probe		= iosf_mbi_probe,
	.id_table	= iosf_mbi_pci_ids,
};
static int __init iosf_mbi_init(void)
{
	iosf_debugfs_init();
	cpu_latency_qos_add_request(&iosf_mbi_pm_qos, PM_QOS_DEFAULT_VALUE);
	return pci_register_driver(&iosf_mbi_pci_driver);
}
static void __exit iosf_mbi_exit(void)
{
	iosf_debugfs_remove();
	pci_unregister_driver(&iosf_mbi_pci_driver);
	pci_dev_put(mbi_pdev);
	mbi_pdev = NULL;
	cpu_latency_qos_remove_request(&iosf_mbi_pm_qos);
}
module_init(iosf_mbi_init);
module_exit(iosf_mbi_exit);
MODULE_AUTHOR("David E. Box <[email protected]>");
MODULE_DESCRIPTION("IOSF Mailbox Interface accessor");
MODULE_LICENSE("GPL v2");
 | 
	linux-master | 
	arch/x86/platform/intel/iosf_mbi.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * System Specific setup for PCEngines ALIX.
 * At the moment this means setup of GPIO control of LEDs
 * on Alix.2/3/6 boards.
 *
 * Copyright (C) 2008 Constantin Baranov <[email protected]>
 * Copyright (C) 2011 Ed Wildgoose <[email protected]>
 *                and Philip Prindeville <[email protected]>
 *
 * TODO: There are large similarities with leds-net5501.c
 * by Alessandro Zummo <[email protected]>
 * In the future leds-net5501.c should be migrated over to platform
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/string.h>
#include <linux/moduleparam.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <linux/gpio/machine.h>
#include <linux/dmi.h>
#include <asm/geode.h>
#define BIOS_SIGNATURE_TINYBIOS		0xf0000
#define BIOS_SIGNATURE_COREBOOT		0x500
#define BIOS_REGION_SIZE		0x10000
/*
 * This driver is not modular, but to keep back compatibility
 * with existing use cases, continuing with module_param is
 * the easiest way forward.
 */
static bool force = 0;
module_param(force, bool, 0444);
/* FIXME: Award bios is not automatically detected as Alix platform */
MODULE_PARM_DESC(force, "Force detection as ALIX.2/ALIX.3 platform");
static struct gpio_keys_button alix_gpio_buttons[] = {
	{
		.code			= KEY_RESTART,
		.gpio			= 24,
		.active_low		= 1,
		.desc			= "Reset button",
		.type			= EV_KEY,
		.wakeup			= 0,
		.debounce_interval	= 100,
		.can_disable		= 0,
	}
};
static struct gpio_keys_platform_data alix_buttons_data = {
	.buttons			= alix_gpio_buttons,
	.nbuttons			= ARRAY_SIZE(alix_gpio_buttons),
	.poll_interval			= 20,
};
static struct platform_device alix_buttons_dev = {
	.name				= "gpio-keys-polled",
	.id				= 1,
	.dev = {
		.platform_data		= &alix_buttons_data,
	}
};
static struct gpio_led alix_leds[] = {
	{
		.name = "alix:1",
		.default_trigger = "default-on",
	},
	{
		.name = "alix:2",
		.default_trigger = "default-off",
	},
	{
		.name = "alix:3",
		.default_trigger = "default-off",
	},
};
static struct gpio_led_platform_data alix_leds_data = {
	.num_leds = ARRAY_SIZE(alix_leds),
	.leds = alix_leds,
};
static struct gpiod_lookup_table alix_leds_gpio_table = {
	.dev_id = "leds-gpio",
	.table = {
		/* The Geode GPIOs should be on the CS5535 companion chip */
		GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_LOW),
		GPIO_LOOKUP_IDX("cs5535-gpio", 25, NULL, 1, GPIO_ACTIVE_LOW),
		GPIO_LOOKUP_IDX("cs5535-gpio", 27, NULL, 2, GPIO_ACTIVE_LOW),
		{ }
	},
};
static struct platform_device alix_leds_dev = {
	.name = "leds-gpio",
	.id = -1,
	.dev.platform_data = &alix_leds_data,
};
static struct platform_device *alix_devs[] __initdata = {
	&alix_buttons_dev,
	&alix_leds_dev,
};
static void __init register_alix(void)
{
	/* Setup LED control through leds-gpio driver */
	gpiod_add_lookup_table(&alix_leds_gpio_table);
	platform_add_devices(alix_devs, ARRAY_SIZE(alix_devs));
}
static bool __init alix_present(unsigned long bios_phys,
				const char *alix_sig,
				size_t alix_sig_len)
{
	const size_t bios_len = BIOS_REGION_SIZE;
	const char *bios_virt;
	const char *scan_end;
	const char *p;
	char name[64];
	if (force) {
		printk(KERN_NOTICE "%s: forced to skip BIOS test, "
		       "assume system is ALIX.2/ALIX.3\n",
		       KBUILD_MODNAME);
		return true;
	}
	bios_virt = phys_to_virt(bios_phys);
	scan_end = bios_virt + bios_len - (alix_sig_len + 2);
	for (p = bios_virt; p < scan_end; p++) {
		const char *tail;
		char *a;
		if (memcmp(p, alix_sig, alix_sig_len) != 0)
			continue;
		memcpy(name, p, sizeof(name));
		/* remove the first \0 character from string */
		a = strchr(name, '\0');
		if (a)
			*a = ' ';
		/* cut the string at a newline */
		a = strchr(name, '\r');
		if (a)
			*a = '\0';
		tail = p + alix_sig_len;
		if ((tail[0] == '2' || tail[0] == '3' || tail[0] == '6')) {
			printk(KERN_INFO
			       "%s: system is recognized as \"%s\"\n",
			       KBUILD_MODNAME, name);
			return true;
		}
	}
	return false;
}
static bool __init alix_present_dmi(void)
{
	const char *vendor, *product;
	vendor = dmi_get_system_info(DMI_SYS_VENDOR);
	if (!vendor || strcmp(vendor, "PC Engines"))
		return false;
	product = dmi_get_system_info(DMI_PRODUCT_NAME);
	if (!product || (strcmp(product, "ALIX.2D") && strcmp(product, "ALIX.6")))
		return false;
	printk(KERN_INFO "%s: system is recognized as \"%s %s\"\n",
	       KBUILD_MODNAME, vendor, product);
	return true;
}
static int __init alix_init(void)
{
	const char tinybios_sig[] = "PC Engines ALIX.";
	const char coreboot_sig[] = "PC Engines\0ALIX.";
	if (!is_geode())
		return 0;
	if (alix_present(BIOS_SIGNATURE_TINYBIOS, tinybios_sig, sizeof(tinybios_sig) - 1) ||
	    alix_present(BIOS_SIGNATURE_COREBOOT, coreboot_sig, sizeof(coreboot_sig) - 1) ||
	    alix_present_dmi())
		register_alix();
	return 0;
}
device_initcall(alix_init);
 | 
	linux-master | 
	arch/x86/platform/geode/alix.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * System Specific setup for Soekris net5501
 * At the moment this means setup of GPIO control of LEDs and buttons
 * on net5501 boards.
 *
 * Copyright (C) 2008-2009 Tower Technologies
 * Written by Alessandro Zummo <[email protected]>
 *
 * Copyright (C) 2008 Constantin Baranov <[email protected]>
 * Copyright (C) 2011 Ed Wildgoose <[email protected]>
 *                and Philip Prindeville <[email protected]>
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/string.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <linux/gpio/machine.h>
#include <asm/geode.h>
#define BIOS_REGION_BASE		0xffff0000
#define BIOS_REGION_SIZE		0x00010000
static struct gpio_keys_button net5501_gpio_buttons[] = {
	{
		.code = KEY_RESTART,
		.gpio = 24,
		.active_low = 1,
		.desc = "Reset button",
		.type = EV_KEY,
		.wakeup = 0,
		.debounce_interval = 100,
		.can_disable = 0,
	}
};
static struct gpio_keys_platform_data net5501_buttons_data = {
	.buttons = net5501_gpio_buttons,
	.nbuttons = ARRAY_SIZE(net5501_gpio_buttons),
	.poll_interval = 20,
};
static struct platform_device net5501_buttons_dev = {
	.name = "gpio-keys-polled",
	.id = 1,
	.dev = {
		.platform_data = &net5501_buttons_data,
	}
};
static struct gpio_led net5501_leds[] = {
	{
		.name = "net5501:1",
		.default_trigger = "default-on",
	},
};
static struct gpio_led_platform_data net5501_leds_data = {
	.num_leds = ARRAY_SIZE(net5501_leds),
	.leds = net5501_leds,
};
static struct gpiod_lookup_table net5501_leds_gpio_table = {
	.dev_id = "leds-gpio",
	.table = {
		/* The Geode GPIOs should be on the CS5535 companion chip */
		GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_HIGH),
		{ }
	},
};
static struct platform_device net5501_leds_dev = {
	.name = "leds-gpio",
	.id = -1,
	.dev.platform_data = &net5501_leds_data,
};
static struct platform_device *net5501_devs[] __initdata = {
	&net5501_buttons_dev,
	&net5501_leds_dev,
};
static void __init register_net5501(void)
{
	/* Setup LED control through leds-gpio driver */
	gpiod_add_lookup_table(&net5501_leds_gpio_table);
	platform_add_devices(net5501_devs, ARRAY_SIZE(net5501_devs));
}
struct net5501_board {
	u16	offset;
	u16	len;
	char	*sig;
};
static struct net5501_board __initdata boards[] = {
	{ 0xb7b, 7, "net5501" },	/* net5501 v1.33/1.33c */
	{ 0xb1f, 7, "net5501" },	/* net5501 v1.32i */
};
static bool __init net5501_present(void)
{
	int i;
	unsigned char *rombase, *bios;
	bool found = false;
	rombase = ioremap(BIOS_REGION_BASE, BIOS_REGION_SIZE - 1);
	if (!rombase) {
		printk(KERN_ERR "%s: failed to get rombase\n", KBUILD_MODNAME);
		return found;
	}
	bios = rombase + 0x20;	/* null terminated */
	if (memcmp(bios, "comBIOS", 7))
		goto unmap;
	for (i = 0; i < ARRAY_SIZE(boards); i++) {
		unsigned char *model = rombase + boards[i].offset;
		if (!memcmp(model, boards[i].sig, boards[i].len)) {
			printk(KERN_INFO "%s: system is recognized as \"%s\"\n",
			       KBUILD_MODNAME, model);
			found = true;
			break;
		}
	}
unmap:
	iounmap(rombase);
	return found;
}
static int __init net5501_init(void)
{
	if (!is_geode())
		return 0;
	if (!net5501_present())
		return 0;
	register_net5501();
	return 0;
}
device_initcall(net5501_init);
 | 
	linux-master | 
	arch/x86/platform/geode/net5501.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * System Specific setup for Traverse Technologies GEOS.
 * At the moment this means setup of GPIO control of LEDs.
 *
 * Copyright (C) 2008 Constantin Baranov <[email protected]>
 * Copyright (C) 2011 Ed Wildgoose <[email protected]>
 *                and Philip Prindeville <[email protected]>
 *
 * TODO: There are large similarities with leds-net5501.c
 * by Alessandro Zummo <[email protected]>
 * In the future leds-net5501.c should be migrated over to platform
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/string.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <linux/gpio/machine.h>
#include <linux/dmi.h>
#include <asm/geode.h>
static struct gpio_keys_button geos_gpio_buttons[] = {
	{
		.code = KEY_RESTART,
		.gpio = 3,
		.active_low = 1,
		.desc = "Reset button",
		.type = EV_KEY,
		.wakeup = 0,
		.debounce_interval = 100,
		.can_disable = 0,
	}
};
static struct gpio_keys_platform_data geos_buttons_data = {
	.buttons = geos_gpio_buttons,
	.nbuttons = ARRAY_SIZE(geos_gpio_buttons),
	.poll_interval = 20,
};
static struct platform_device geos_buttons_dev = {
	.name = "gpio-keys-polled",
	.id = 1,
	.dev = {
		.platform_data = &geos_buttons_data,
	}
};
static struct gpio_led geos_leds[] = {
	{
		.name = "geos:1",
		.default_trigger = "default-on",
	},
	{
		.name = "geos:2",
		.default_trigger = "default-off",
	},
	{
		.name = "geos:3",
		.default_trigger = "default-off",
	},
};
static struct gpio_led_platform_data geos_leds_data = {
	.num_leds = ARRAY_SIZE(geos_leds),
	.leds = geos_leds,
};
static struct gpiod_lookup_table geos_leds_gpio_table = {
	.dev_id = "leds-gpio",
	.table = {
		/* The Geode GPIOs should be on the CS5535 companion chip */
		GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_LOW),
		GPIO_LOOKUP_IDX("cs5535-gpio", 25, NULL, 1, GPIO_ACTIVE_LOW),
		GPIO_LOOKUP_IDX("cs5535-gpio", 27, NULL, 2, GPIO_ACTIVE_LOW),
		{ }
	},
};
static struct platform_device geos_leds_dev = {
	.name = "leds-gpio",
	.id = -1,
	.dev.platform_data = &geos_leds_data,
};
static struct platform_device *geos_devs[] __initdata = {
	&geos_buttons_dev,
	&geos_leds_dev,
};
static void __init register_geos(void)
{
	/* Setup LED control through leds-gpio driver */
	gpiod_add_lookup_table(&geos_leds_gpio_table);
	platform_add_devices(geos_devs, ARRAY_SIZE(geos_devs));
}
static int __init geos_init(void)
{
	const char *vendor, *product;
	if (!is_geode())
		return 0;
	vendor = dmi_get_system_info(DMI_SYS_VENDOR);
	if (!vendor || strcmp(vendor, "Traverse Technologies"))
		return 0;
	product = dmi_get_system_info(DMI_PRODUCT_NAME);
	if (!product || strcmp(product, "Geos"))
		return 0;
	printk(KERN_INFO "%s: system is recognized as \"%s %s\"\n",
	       KBUILD_MODNAME, vendor, product);
	register_geos();
	return 0;
}
device_initcall(geos_init);
 | 
	linux-master | 
	arch/x86/platform/geode/geos.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Technologic Systems TS-5500 Single Board Computer support
 *
 * Copyright (C) 2013-2014 Savoir-faire Linux Inc.
 *	Vivien Didelot <[email protected]>
 *
 * This driver registers the Technologic Systems TS-5500 Single Board Computer
 * (SBC) and its devices, and exposes information to userspace such as jumpers'
 * state or available options. For further information about sysfs entries, see
 * Documentation/ABI/testing/sysfs-platform-ts5500.
 *
 * This code may be extended to support similar x86-based platforms.
 * Actually, the TS-5500 and TS-5400 are supported.
 */
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/init.h>
#include <linux/platform_data/max197.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
/* Product code register */
#define TS5500_PRODUCT_CODE_ADDR	0x74
#define TS5500_PRODUCT_CODE		0x60	/* TS-5500 product code */
#define TS5400_PRODUCT_CODE		0x40	/* TS-5400 product code */
/* SRAM/RS-485/ADC options, and RS-485 RTS/Automatic RS-485 flags register */
#define TS5500_SRAM_RS485_ADC_ADDR	0x75
#define TS5500_SRAM			BIT(0)	/* SRAM option */
#define TS5500_RS485			BIT(1)	/* RS-485 option */
#define TS5500_ADC			BIT(2)	/* A/D converter option */
#define TS5500_RS485_RTS		BIT(6)	/* RTS for RS-485 */
#define TS5500_RS485_AUTO		BIT(7)	/* Automatic RS-485 */
/* External Reset/Industrial Temperature Range options register */
#define TS5500_ERESET_ITR_ADDR		0x76
#define TS5500_ERESET			BIT(0)	/* External Reset option */
#define TS5500_ITR			BIT(1)	/* Indust. Temp. Range option */
/* LED/Jumpers register */
#define TS5500_LED_JP_ADDR		0x77
#define TS5500_LED			BIT(0)	/* LED flag */
#define TS5500_JP1			BIT(1)	/* Automatic CMOS */
#define TS5500_JP2			BIT(2)	/* Enable Serial Console */
#define TS5500_JP3			BIT(3)	/* Write Enable Drive A */
#define TS5500_JP4			BIT(4)	/* Fast Console (115K baud) */
#define TS5500_JP5			BIT(5)	/* User Jumper */
#define TS5500_JP6			BIT(6)	/* Console on COM1 (req. JP2) */
#define TS5500_JP7			BIT(7)	/* Undocumented (Unused) */
/* A/D Converter registers */
#define TS5500_ADC_CONV_BUSY_ADDR	0x195	/* Conversion state register */
#define TS5500_ADC_CONV_BUSY		BIT(0)
#define TS5500_ADC_CONV_INIT_LSB_ADDR	0x196	/* Start conv. / LSB register */
#define TS5500_ADC_CONV_MSB_ADDR	0x197	/* MSB register */
#define TS5500_ADC_CONV_DELAY		12	/* usec */
/**
 * struct ts5500_sbc - TS-5500 board description
 * @name:	Board model name.
 * @id:		Board product ID.
 * @sram:	Flag for SRAM option.
 * @rs485:	Flag for RS-485 option.
 * @adc:	Flag for Analog/Digital converter option.
 * @ereset:	Flag for External Reset option.
 * @itr:	Flag for Industrial Temperature Range option.
 * @jumpers:	Bitfield for jumpers' state.
 */
struct ts5500_sbc {
	const char *name;
	int	id;
	bool	sram;
	bool	rs485;
	bool	adc;
	bool	ereset;
	bool	itr;
	u8	jumpers;
};
/* Board signatures in BIOS shadow RAM */
static const struct {
	const char * const string;
	const ssize_t offset;
} ts5500_signatures[] __initconst = {
	{ "TS-5x00 AMD Elan", 0xb14 },
};
static int __init ts5500_check_signature(void)
{
	void __iomem *bios;
	int i, ret = -ENODEV;
	bios = ioremap(0xf0000, 0x10000);
	if (!bios)
		return -ENOMEM;
	for (i = 0; i < ARRAY_SIZE(ts5500_signatures); i++) {
		if (check_signature(bios + ts5500_signatures[i].offset,
				    ts5500_signatures[i].string,
				    strlen(ts5500_signatures[i].string))) {
			ret = 0;
			break;
		}
	}
	iounmap(bios);
	return ret;
}
static int __init ts5500_detect_config(struct ts5500_sbc *sbc)
{
	u8 tmp;
	int ret = 0;
	if (!request_region(TS5500_PRODUCT_CODE_ADDR, 4, "ts5500"))
		return -EBUSY;
	sbc->id = inb(TS5500_PRODUCT_CODE_ADDR);
	if (sbc->id == TS5500_PRODUCT_CODE) {
		sbc->name = "TS-5500";
	} else if (sbc->id == TS5400_PRODUCT_CODE) {
		sbc->name = "TS-5400";
	} else {
		pr_err("ts5500: unknown product code 0x%x\n", sbc->id);
		ret = -ENODEV;
		goto cleanup;
	}
	tmp = inb(TS5500_SRAM_RS485_ADC_ADDR);
	sbc->sram = tmp & TS5500_SRAM;
	sbc->rs485 = tmp & TS5500_RS485;
	sbc->adc = tmp & TS5500_ADC;
	tmp = inb(TS5500_ERESET_ITR_ADDR);
	sbc->ereset = tmp & TS5500_ERESET;
	sbc->itr = tmp & TS5500_ITR;
	tmp = inb(TS5500_LED_JP_ADDR);
	sbc->jumpers = tmp & ~TS5500_LED;
cleanup:
	release_region(TS5500_PRODUCT_CODE_ADDR, 4);
	return ret;
}
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
		char *buf)
{
	struct ts5500_sbc *sbc = dev_get_drvdata(dev);
	return sprintf(buf, "%s\n", sbc->name);
}
static DEVICE_ATTR_RO(name);
static ssize_t id_show(struct device *dev, struct device_attribute *attr,
		char *buf)
{
	struct ts5500_sbc *sbc = dev_get_drvdata(dev);
	return sprintf(buf, "0x%.2x\n", sbc->id);
}
static DEVICE_ATTR_RO(id);
static ssize_t jumpers_show(struct device *dev, struct device_attribute *attr,
		char *buf)
{
	struct ts5500_sbc *sbc = dev_get_drvdata(dev);
	return sprintf(buf, "0x%.2x\n", sbc->jumpers >> 1);
}
static DEVICE_ATTR_RO(jumpers);
#define TS5500_ATTR_BOOL(_field)					\
	static ssize_t _field##_show(struct device *dev,		\
			struct device_attribute *attr, char *buf)	\
	{								\
		struct ts5500_sbc *sbc = dev_get_drvdata(dev);		\
									\
		return sprintf(buf, "%d\n", sbc->_field);		\
	}								\
	static DEVICE_ATTR_RO(_field)
TS5500_ATTR_BOOL(sram);
TS5500_ATTR_BOOL(rs485);
TS5500_ATTR_BOOL(adc);
TS5500_ATTR_BOOL(ereset);
TS5500_ATTR_BOOL(itr);
static struct attribute *ts5500_attributes[] = {
	&dev_attr_id.attr,
	&dev_attr_name.attr,
	&dev_attr_jumpers.attr,
	&dev_attr_sram.attr,
	&dev_attr_rs485.attr,
	&dev_attr_adc.attr,
	&dev_attr_ereset.attr,
	&dev_attr_itr.attr,
	NULL
};
static const struct attribute_group ts5500_attr_group = {
	.attrs = ts5500_attributes,
};
static struct resource ts5500_dio1_resource[] = {
	DEFINE_RES_IRQ_NAMED(7, "DIO1 interrupt"),
};
static struct platform_device ts5500_dio1_pdev = {
	.name = "ts5500-dio1",
	.id = -1,
	.resource = ts5500_dio1_resource,
	.num_resources = 1,
};
static struct resource ts5500_dio2_resource[] = {
	DEFINE_RES_IRQ_NAMED(6, "DIO2 interrupt"),
};
static struct platform_device ts5500_dio2_pdev = {
	.name = "ts5500-dio2",
	.id = -1,
	.resource = ts5500_dio2_resource,
	.num_resources = 1,
};
static void ts5500_led_set(struct led_classdev *led_cdev,
			   enum led_brightness brightness)
{
	outb(!!brightness, TS5500_LED_JP_ADDR);
}
static enum led_brightness ts5500_led_get(struct led_classdev *led_cdev)
{
	return (inb(TS5500_LED_JP_ADDR) & TS5500_LED) ? LED_FULL : LED_OFF;
}
static struct led_classdev ts5500_led_cdev = {
	.name = "ts5500:green:",
	.brightness_set = ts5500_led_set,
	.brightness_get = ts5500_led_get,
};
static int ts5500_adc_convert(u8 ctrl)
{
	u8 lsb, msb;
	/* Start conversion (ensure the 3 MSB are set to 0) */
	outb(ctrl & 0x1f, TS5500_ADC_CONV_INIT_LSB_ADDR);
	/*
	 * The platform has CPLD logic driving the A/D converter.
	 * The conversion must complete within 11 microseconds,
	 * otherwise we have to re-initiate a conversion.
	 */
	udelay(TS5500_ADC_CONV_DELAY);
	if (inb(TS5500_ADC_CONV_BUSY_ADDR) & TS5500_ADC_CONV_BUSY)
		return -EBUSY;
	/* Read the raw data */
	lsb = inb(TS5500_ADC_CONV_INIT_LSB_ADDR);
	msb = inb(TS5500_ADC_CONV_MSB_ADDR);
	return (msb << 8) | lsb;
}
static struct max197_platform_data ts5500_adc_pdata = {
	.convert = ts5500_adc_convert,
};
static struct platform_device ts5500_adc_pdev = {
	.name = "max197",
	.id = -1,
	.dev = {
		.platform_data = &ts5500_adc_pdata,
	},
};
static int __init ts5500_init(void)
{
	struct platform_device *pdev;
	struct ts5500_sbc *sbc;
	int err;
	/*
	 * There is no DMI available or PCI bridge subvendor info,
	 * only the BIOS provides a 16-bit identification call.
	 * It is safer to find a signature in the BIOS shadow RAM.
	 */
	err = ts5500_check_signature();
	if (err)
		return err;
	pdev = platform_device_register_simple("ts5500", -1, NULL, 0);
	if (IS_ERR(pdev))
		return PTR_ERR(pdev);
	sbc = devm_kzalloc(&pdev->dev, sizeof(struct ts5500_sbc), GFP_KERNEL);
	if (!sbc) {
		err = -ENOMEM;
		goto error;
	}
	err = ts5500_detect_config(sbc);
	if (err)
		goto error;
	platform_set_drvdata(pdev, sbc);
	err = sysfs_create_group(&pdev->dev.kobj, &ts5500_attr_group);
	if (err)
		goto error;
	if (sbc->id == TS5500_PRODUCT_CODE) {
		ts5500_dio1_pdev.dev.parent = &pdev->dev;
		if (platform_device_register(&ts5500_dio1_pdev))
			dev_warn(&pdev->dev, "DIO1 block registration failed\n");
		ts5500_dio2_pdev.dev.parent = &pdev->dev;
		if (platform_device_register(&ts5500_dio2_pdev))
			dev_warn(&pdev->dev, "DIO2 block registration failed\n");
	}
	if (led_classdev_register(&pdev->dev, &ts5500_led_cdev))
		dev_warn(&pdev->dev, "LED registration failed\n");
	if (sbc->adc) {
		ts5500_adc_pdev.dev.parent = &pdev->dev;
		if (platform_device_register(&ts5500_adc_pdev))
			dev_warn(&pdev->dev, "ADC registration failed\n");
	}
	return 0;
error:
	platform_device_unregister(pdev);
	return err;
}
device_initcall(ts5500_init);
 | 
	linux-master | 
	arch/x86/platform/ts5500/ts5500.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Support for OLPC XO-1.5 System Control Interrupts (SCI)
 *
 * Copyright (C) 2009-2010 One Laptop per Child
 */
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/power_supply.h>
#include <linux/olpc-ec.h>
#include <linux/acpi.h>
#include <asm/olpc.h>
#define DRV_NAME			"olpc-xo15-sci"
#define PFX				DRV_NAME ": "
#define XO15_SCI_CLASS			DRV_NAME
#define XO15_SCI_DEVICE_NAME		"OLPC XO-1.5 SCI"
static unsigned long			xo15_sci_gpe;
static bool				lid_wake_on_close;
/*
 * The normal ACPI LID wakeup behavior is wake-on-open, but not
 * wake-on-close. This is implemented as standard by the XO-1.5 DSDT.
 *
 * We provide here a sysfs attribute that will additionally enable
 * wake-on-close behavior. This is useful (e.g.) when we opportunistically
 * suspend with the display running; if the lid is then closed, we want to
 * wake up to turn the display off.
 *
 * This is controlled through a custom method in the XO-1.5 DSDT.
 */
static int set_lid_wake_behavior(bool wake_on_close)
{
	acpi_status status;
	status = acpi_execute_simple_method(NULL, "\\_SB.PCI0.LID.LIDW", wake_on_close);
	if (ACPI_FAILURE(status)) {
		pr_warn(PFX "failed to set lid behavior\n");
		return 1;
	}
	lid_wake_on_close = wake_on_close;
	return 0;
}
static ssize_t
lid_wake_on_close_show(struct kobject *s, struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%u\n", lid_wake_on_close);
}
static ssize_t lid_wake_on_close_store(struct kobject *s,
				       struct kobj_attribute *attr,
				       const char *buf, size_t n)
{
	unsigned int val;
	if (sscanf(buf, "%u", &val) != 1)
		return -EINVAL;
	set_lid_wake_behavior(!!val);
	return n;
}
static struct kobj_attribute lid_wake_on_close_attr =
	__ATTR(lid_wake_on_close, 0644,
	       lid_wake_on_close_show,
	       lid_wake_on_close_store);
static void battery_status_changed(void)
{
	struct power_supply *psy = power_supply_get_by_name("olpc_battery");
	if (psy) {
		power_supply_changed(psy);
		power_supply_put(psy);
	}
}
static void ac_status_changed(void)
{
	struct power_supply *psy = power_supply_get_by_name("olpc_ac");
	if (psy) {
		power_supply_changed(psy);
		power_supply_put(psy);
	}
}
static void process_sci_queue(void)
{
	u16 data;
	int r;
	do {
		r = olpc_ec_sci_query(&data);
		if (r || !data)
			break;
		pr_debug(PFX "SCI 0x%x received\n", data);
		switch (data) {
		case EC_SCI_SRC_BATERR:
		case EC_SCI_SRC_BATSOC:
		case EC_SCI_SRC_BATTERY:
		case EC_SCI_SRC_BATCRIT:
			battery_status_changed();
			break;
		case EC_SCI_SRC_ACPWR:
			ac_status_changed();
			break;
		}
	} while (data);
	if (r)
		pr_err(PFX "Failed to clear SCI queue");
}
static void process_sci_queue_work(struct work_struct *work)
{
	process_sci_queue();
}
static DECLARE_WORK(sci_work, process_sci_queue_work);
static u32 xo15_sci_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context)
{
	schedule_work(&sci_work);
	return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
}
static int xo15_sci_add(struct acpi_device *device)
{
	unsigned long long tmp;
	acpi_status status;
	int r;
	if (!device)
		return -EINVAL;
	strcpy(acpi_device_name(device), XO15_SCI_DEVICE_NAME);
	strcpy(acpi_device_class(device), XO15_SCI_CLASS);
	/* Get GPE bit assignment (EC events). */
	status = acpi_evaluate_integer(device->handle, "_GPE", NULL, &tmp);
	if (ACPI_FAILURE(status))
		return -EINVAL;
	xo15_sci_gpe = tmp;
	status = acpi_install_gpe_handler(NULL, xo15_sci_gpe,
					  ACPI_GPE_EDGE_TRIGGERED,
					  xo15_sci_gpe_handler, device);
	if (ACPI_FAILURE(status))
		return -ENODEV;
	dev_info(&device->dev, "Initialized, GPE = 0x%lx\n", xo15_sci_gpe);
	r = sysfs_create_file(&device->dev.kobj, &lid_wake_on_close_attr.attr);
	if (r)
		goto err_sysfs;
	/* Flush queue, and enable all SCI events */
	process_sci_queue();
	olpc_ec_mask_write(EC_SCI_SRC_ALL);
	acpi_enable_gpe(NULL, xo15_sci_gpe);
	/* Enable wake-on-EC */
	if (device->wakeup.flags.valid)
		device_init_wakeup(&device->dev, true);
	return 0;
err_sysfs:
	acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
	cancel_work_sync(&sci_work);
	return r;
}
static void xo15_sci_remove(struct acpi_device *device)
{
	acpi_disable_gpe(NULL, xo15_sci_gpe);
	acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
	cancel_work_sync(&sci_work);
	sysfs_remove_file(&device->dev.kobj, &lid_wake_on_close_attr.attr);
}
#ifdef CONFIG_PM_SLEEP
static int xo15_sci_resume(struct device *dev)
{
	/* Enable all EC events */
	olpc_ec_mask_write(EC_SCI_SRC_ALL);
	/* Power/battery status might have changed */
	battery_status_changed();
	ac_status_changed();
	return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(xo15_sci_pm, NULL, xo15_sci_resume);
static const struct acpi_device_id xo15_sci_device_ids[] = {
	{"XO15EC", 0},
	{"", 0},
};
static struct acpi_driver xo15_sci_drv = {
	.name = DRV_NAME,
	.class = XO15_SCI_CLASS,
	.ids = xo15_sci_device_ids,
	.ops = {
		.add = xo15_sci_add,
		.remove = xo15_sci_remove,
	},
	.drv.pm = &xo15_sci_pm,
};
static int __init xo15_sci_init(void)
{
	return acpi_bus_register_driver(&xo15_sci_drv);
}
device_initcall(xo15_sci_init);
 | 
	linux-master | 
	arch/x86/platform/olpc/olpc-xo15-sci.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Support for power management features of the OLPC XO-1 laptop
 *
 * Copyright (C) 2010 Andres Salomon <[email protected]>
 * Copyright (C) 2010 One Laptop per Child
 * Copyright (C) 2006 Red Hat, Inc.
 * Copyright (C) 2006 Advanced Micro Devices, Inc.
 */
#include <linux/cs5535.h>
#include <linux/platform_device.h>
#include <linux/export.h>
#include <linux/pm.h>
#include <linux/suspend.h>
#include <linux/olpc-ec.h>
#include <asm/io.h>
#include <asm/olpc.h>
#define DRV_NAME "olpc-xo1-pm"
static unsigned long acpi_base;
static unsigned long pms_base;
static u16 wakeup_mask = CS5536_PM_PWRBTN;
static struct {
	unsigned long address;
	unsigned short segment;
} ofw_bios_entry = { 0xF0000 + PAGE_OFFSET, __KERNEL_CS };
/* Set bits in the wakeup mask */
void olpc_xo1_pm_wakeup_set(u16 value)
{
	wakeup_mask |= value;
}
EXPORT_SYMBOL_GPL(olpc_xo1_pm_wakeup_set);
/* Clear bits in the wakeup mask */
void olpc_xo1_pm_wakeup_clear(u16 value)
{
	wakeup_mask &= ~value;
}
EXPORT_SYMBOL_GPL(olpc_xo1_pm_wakeup_clear);
static int xo1_power_state_enter(suspend_state_t pm_state)
{
	unsigned long saved_sci_mask;
	/* Only STR is supported */
	if (pm_state != PM_SUSPEND_MEM)
		return -EINVAL;
	/*
	 * Save SCI mask (this gets lost since PM1_EN is used as a mask for
	 * wakeup events, which is not necessarily the same event set)
	 */
	saved_sci_mask = inl(acpi_base + CS5536_PM1_STS);
	saved_sci_mask &= 0xffff0000;
	/* Save CPU state */
	do_olpc_suspend_lowlevel();
	/* Resume path starts here */
	/* Restore SCI mask (using dword access to CS5536_PM1_EN) */
	outl(saved_sci_mask, acpi_base + CS5536_PM1_STS);
	return 0;
}
asmlinkage __visible int xo1_do_sleep(u8 sleep_state)
{
	void *pgd_addr = __va(read_cr3_pa());
	/* Program wakeup mask (using dword access to CS5536_PM1_EN) */
	outl(wakeup_mask << 16, acpi_base + CS5536_PM1_STS);
	__asm__("movl %0,%%eax" : : "r" (pgd_addr));
	__asm__("call *(%%edi); cld"
		: : "D" (&ofw_bios_entry));
	__asm__("movb $0x34, %al\n\t"
		"outb %al, $0x70\n\t"
		"movb $0x30, %al\n\t"
		"outb %al, $0x71\n\t");
	return 0;
}
static void xo1_power_off(void)
{
	printk(KERN_INFO "OLPC XO-1 power off sequence...\n");
	/* Enable all of these controls with 0 delay */
	outl(0x40000000, pms_base + CS5536_PM_SCLK);
	outl(0x40000000, pms_base + CS5536_PM_IN_SLPCTL);
	outl(0x40000000, pms_base + CS5536_PM_WKXD);
	outl(0x40000000, pms_base + CS5536_PM_WKD);
	/* Clear status bits (possibly unnecessary) */
	outl(0x0002ffff, pms_base  + CS5536_PM_SSC);
	outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS);
	/* Write SLP_EN bit to start the machinery */
	outl(0x00002000, acpi_base + CS5536_PM1_CNT);
}
static int xo1_power_state_valid(suspend_state_t pm_state)
{
	/* suspend-to-RAM only */
	return pm_state == PM_SUSPEND_MEM;
}
static const struct platform_suspend_ops xo1_suspend_ops = {
	.valid = xo1_power_state_valid,
	.enter = xo1_power_state_enter,
};
static int xo1_pm_probe(struct platform_device *pdev)
{
	struct resource *res;
	/* don't run on non-XOs */
	if (!machine_is_olpc())
		return -ENODEV;
	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
	if (!res) {
		dev_err(&pdev->dev, "can't fetch device resource info\n");
		return -EIO;
	}
	if (strcmp(pdev->name, "cs5535-pms") == 0)
		pms_base = res->start;
	else if (strcmp(pdev->name, "olpc-xo1-pm-acpi") == 0)
		acpi_base = res->start;
	/* If we have both addresses, we can override the poweroff hook */
	if (pms_base && acpi_base) {
		suspend_set_ops(&xo1_suspend_ops);
		pm_power_off = xo1_power_off;
		printk(KERN_INFO "OLPC XO-1 support registered\n");
	}
	return 0;
}
static int xo1_pm_remove(struct platform_device *pdev)
{
	if (strcmp(pdev->name, "cs5535-pms") == 0)
		pms_base = 0;
	else if (strcmp(pdev->name, "olpc-xo1-pm-acpi") == 0)
		acpi_base = 0;
	pm_power_off = NULL;
	return 0;
}
static struct platform_driver cs5535_pms_driver = {
	.driver = {
		.name = "cs5535-pms",
	},
	.probe = xo1_pm_probe,
	.remove = xo1_pm_remove,
};
static struct platform_driver cs5535_acpi_driver = {
	.driver = {
		.name = "olpc-xo1-pm-acpi",
	},
	.probe = xo1_pm_probe,
	.remove = xo1_pm_remove,
};
static int __init xo1_pm_init(void)
{
	int r;
	r = platform_driver_register(&cs5535_pms_driver);
	if (r)
		return r;
	r = platform_driver_register(&cs5535_acpi_driver);
	if (r)
		platform_driver_unregister(&cs5535_pms_driver);
	return r;
}
arch_initcall(xo1_pm_init);
 | 
	linux-master | 
	arch/x86/platform/olpc/olpc-xo1-pm.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * OLPC-specific OFW device tree support code.
 *
 * Paul Mackerras	August 1996.
 * Copyright (C) 1996-2005 Paul Mackerras.
 *
 *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
 *    {engebret|bergner}@us.ibm.com
 *
 *  Adapted for sparc by David S. Miller [email protected]
 *  Adapted for x86/OLPC by Andres Salomon <[email protected]>
 */
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_pdt.h>
#include <asm/olpc.h>
#include <asm/olpc_ofw.h>
static phandle __init olpc_dt_getsibling(phandle node)
{
	const void *args[] = { (void *)node };
	void *res[] = { &node };
	if ((s32)node == -1)
		return 0;
	if (olpc_ofw("peer", args, res) || (s32)node == -1)
		return 0;
	return node;
}
static phandle __init olpc_dt_getchild(phandle node)
{
	const void *args[] = { (void *)node };
	void *res[] = { &node };
	if ((s32)node == -1)
		return 0;
	if (olpc_ofw("child", args, res) || (s32)node == -1) {
		pr_err("PROM: %s: fetching child failed!\n", __func__);
		return 0;
	}
	return node;
}
static int __init olpc_dt_getproplen(phandle node, const char *prop)
{
	const void *args[] = { (void *)node, prop };
	int len;
	void *res[] = { &len };
	if ((s32)node == -1)
		return -1;
	if (olpc_ofw("getproplen", args, res)) {
		pr_err("PROM: %s: getproplen failed!\n", __func__);
		return -1;
	}
	return len;
}
static int __init olpc_dt_getproperty(phandle node, const char *prop,
		char *buf, int bufsize)
{
	int plen;
	plen = olpc_dt_getproplen(node, prop);
	if (plen > bufsize || plen < 1) {
		return -1;
	} else {
		const void *args[] = { (void *)node, prop, buf, (void *)plen };
		void *res[] = { &plen };
		if (olpc_ofw("getprop", args, res)) {
			pr_err("PROM: %s: getprop failed!\n", __func__);
			return -1;
		}
	}
	return plen;
}
static int __init olpc_dt_nextprop(phandle node, char *prev, char *buf)
{
	const void *args[] = { (void *)node, prev, buf };
	int success;
	void *res[] = { &success };
	buf[0] = '\0';
	if ((s32)node == -1)
		return -1;
	if (olpc_ofw("nextprop", args, res) || success != 1)
		return -1;
	return 0;
}
static int __init olpc_dt_pkg2path(phandle node, char *buf,
		const int buflen, int *len)
{
	const void *args[] = { (void *)node, buf, (void *)buflen };
	void *res[] = { len };
	if ((s32)node == -1)
		return -1;
	if (olpc_ofw("package-to-path", args, res) || *len < 1)
		return -1;
	return 0;
}
static unsigned int prom_early_allocated __initdata;
void * __init prom_early_alloc(unsigned long size)
{
	static u8 *mem;
	static size_t free_mem;
	void *res;
	if (free_mem < size) {
		const size_t chunk_size = max(PAGE_SIZE, size);
		/*
		 * To minimize the number of allocations, grab at least
		 * PAGE_SIZE of memory (that's an arbitrary choice that's
		 * fast enough on the platforms we care about while minimizing
		 * wasted bootmem) and hand off chunks of it to callers.
		 */
		res = memblock_alloc(chunk_size, SMP_CACHE_BYTES);
		if (!res)
			panic("%s: Failed to allocate %zu bytes\n", __func__,
			      chunk_size);
		BUG_ON(!res);
		prom_early_allocated += chunk_size;
		memset(res, 0, chunk_size);
		free_mem = chunk_size;
		mem = res;
	}
	/* allocate from the local cache */
	free_mem -= size;
	res = mem;
	mem += size;
	return res;
}
static struct of_pdt_ops prom_olpc_ops __initdata = {
	.nextprop = olpc_dt_nextprop,
	.getproplen = olpc_dt_getproplen,
	.getproperty = olpc_dt_getproperty,
	.getchild = olpc_dt_getchild,
	.getsibling = olpc_dt_getsibling,
	.pkg2path = olpc_dt_pkg2path,
};
static phandle __init olpc_dt_finddevice(const char *path)
{
	phandle node;
	const void *args[] = { path };
	void *res[] = { &node };
	if (olpc_ofw("finddevice", args, res)) {
		pr_err("olpc_dt: finddevice failed!\n");
		return 0;
	}
	if ((s32) node == -1)
		return 0;
	return node;
}
static int __init olpc_dt_interpret(const char *words)
{
	int result;
	const void *args[] = { words };
	void *res[] = { &result };
	if (olpc_ofw("interpret", args, res)) {
		pr_err("olpc_dt: interpret failed!\n");
		return -1;
	}
	return result;
}
/*
 * Extract board revision directly from OFW device tree.
 * We can't use olpc_platform_info because that hasn't been set up yet.
 */
static u32 __init olpc_dt_get_board_revision(void)
{
	phandle node;
	__be32 rev;
	int r;
	node = olpc_dt_finddevice("/");
	if (!node)
		return 0;
	r = olpc_dt_getproperty(node, "board-revision-int",
				(char *) &rev, sizeof(rev));
	if (r < 0)
		return 0;
	return be32_to_cpu(rev);
}
static int __init olpc_dt_compatible_match(phandle node, const char *compat)
{
	char buf[64], *p;
	int plen, len;
	plen = olpc_dt_getproperty(node, "compatible", buf, sizeof(buf));
	if (plen <= 0)
		return 0;
	len = strlen(compat);
	for (p = buf; p < buf + plen; p += strlen(p) + 1) {
		if (strcmp(p, compat) == 0)
			return 1;
	}
	return 0;
}
static void __init olpc_dt_fixup(void)
{
	phandle node;
	u32 board_rev;
	node = olpc_dt_finddevice("/battery@0");
	if (!node)
		return;
	board_rev = olpc_dt_get_board_revision();
	if (!board_rev)
		return;
	if (board_rev >= olpc_board_pre(0xd0)) {
		/* XO-1.5 */
		if (olpc_dt_compatible_match(node, "olpc,xo1.5-battery"))
			return;
		/* Add olpc,xo1.5-battery compatible marker to battery node */
		olpc_dt_interpret("\" /battery@0\" find-device");
		olpc_dt_interpret("  \" olpc,xo1.5-battery\" +compatible");
		olpc_dt_interpret("device-end");
		if (olpc_dt_compatible_match(node, "olpc,xo1-battery")) {
			/*
			 * If we have a olpc,xo1-battery compatible, then we're
			 * running a new enough firmware that already has
			 * the dcon node.
			 */
			return;
		}
		/* Add dcon device */
		olpc_dt_interpret("\" /pci/display@1\" find-device");
		olpc_dt_interpret("  new-device");
		olpc_dt_interpret("    \" dcon\" device-name");
		olpc_dt_interpret("    \" olpc,xo1-dcon\" +compatible");
		olpc_dt_interpret("  finish-device");
		olpc_dt_interpret("device-end");
	} else {
		/* XO-1 */
		if (olpc_dt_compatible_match(node, "olpc,xo1-battery")) {
			/*
			 * If we have a olpc,xo1-battery compatible, then we're
			 * running a new enough firmware that already has
			 * the dcon and RTC nodes.
			 */
			return;
		}
		/* Add dcon device, mark RTC as olpc,xo1-rtc */
		olpc_dt_interpret("\" /pci/display@1,1\" find-device");
		olpc_dt_interpret("  new-device");
		olpc_dt_interpret("    \" dcon\" device-name");
		olpc_dt_interpret("    \" olpc,xo1-dcon\" +compatible");
		olpc_dt_interpret("  finish-device");
		olpc_dt_interpret("device-end");
		olpc_dt_interpret("\" /rtc\" find-device");
		olpc_dt_interpret(" \" olpc,xo1-rtc\" +compatible");
		olpc_dt_interpret("device-end");
	}
	/* Add olpc,xo1-battery compatible marker to battery node */
	olpc_dt_interpret("\" /battery@0\" find-device");
	olpc_dt_interpret("  \" olpc,xo1-battery\" +compatible");
	olpc_dt_interpret("device-end");
}
void __init olpc_dt_build_devicetree(void)
{
	phandle root;
	if (!olpc_ofw_is_installed())
		return;
	olpc_dt_fixup();
	root = olpc_dt_getsibling(0);
	if (!root) {
		pr_err("PROM: unable to get root node from OFW!\n");
		return;
	}
	of_pdt_build_devicetree(root, &prom_olpc_ops);
	pr_info("PROM DT: Built device tree with %u bytes of memory.\n",
			prom_early_allocated);
}
 | 
	linux-master | 
	arch/x86/platform/olpc/olpc_dt.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/spinlock_types.h>
#include <linux/init.h>
#include <linux/pgtable.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/io.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
#include <asm/olpc_ofw.h>
/* address of OFW callback interface; will be NULL if OFW isn't found */
static int (*olpc_ofw_cif)(int *);
/* page dir entry containing OFW's pgdir table; filled in by head_32.S */
u32 olpc_ofw_pgd __initdata;
static DEFINE_SPINLOCK(ofw_lock);
#define MAXARGS 10
void __init setup_olpc_ofw_pgd(void)
{
	pgd_t *base, *ofw_pde;
	if (!olpc_ofw_cif)
		return;
	/* fetch OFW's PDE */
	base = early_ioremap(olpc_ofw_pgd, sizeof(olpc_ofw_pgd) * PTRS_PER_PGD);
	if (!base) {
		printk(KERN_ERR "failed to remap OFW's pgd - disabling OFW!\n");
		olpc_ofw_cif = NULL;
		return;
	}
	ofw_pde = &base[OLPC_OFW_PDE_NR];
	/* install OFW's PDE permanently into the kernel's pgtable */
	set_pgd(&swapper_pg_dir[OLPC_OFW_PDE_NR], *ofw_pde);
	/* implicit optimization barrier here due to uninline function return */
	early_iounmap(base, sizeof(olpc_ofw_pgd) * PTRS_PER_PGD);
}
int __olpc_ofw(const char *name, int nr_args, const void **args, int nr_res,
		void **res)
{
	int ofw_args[MAXARGS + 3];
	unsigned long flags;
	int ret, i, *p;
	BUG_ON(nr_args + nr_res > MAXARGS);
	if (!olpc_ofw_cif)
		return -EIO;
	ofw_args[0] = (int)name;
	ofw_args[1] = nr_args;
	ofw_args[2] = nr_res;
	p = &ofw_args[3];
	for (i = 0; i < nr_args; i++, p++)
		*p = (int)args[i];
	/* call into ofw */
	spin_lock_irqsave(&ofw_lock, flags);
	ret = olpc_ofw_cif(ofw_args);
	spin_unlock_irqrestore(&ofw_lock, flags);
	if (!ret) {
		for (i = 0; i < nr_res; i++, p++)
			*((int *)res[i]) = *p;
	}
	return ret;
}
EXPORT_SYMBOL_GPL(__olpc_ofw);
bool olpc_ofw_present(void)
{
	return olpc_ofw_cif != NULL;
}
EXPORT_SYMBOL_GPL(olpc_ofw_present);
/* OFW cif _should_ be above this address */
#define OFW_MIN 0xff000000
/* OFW starts on a 1MB boundary */
#define OFW_BOUND (1<<20)
void __init olpc_ofw_detect(void)
{
	struct olpc_ofw_header *hdr = &boot_params.olpc_ofw_header;
	unsigned long start;
	/* ensure OFW booted us by checking for "OFW " string */
	if (hdr->ofw_magic != OLPC_OFW_SIG)
		return;
	olpc_ofw_cif = (int (*)(int *))hdr->cif_handler;
	if ((unsigned long)olpc_ofw_cif < OFW_MIN) {
		printk(KERN_ERR "OFW detected, but cif has invalid address 0x%lx - disabling.\n",
				(unsigned long)olpc_ofw_cif);
		olpc_ofw_cif = NULL;
		return;
	}
	/* determine where OFW starts in memory */
	start = round_down((unsigned long)olpc_ofw_cif, OFW_BOUND);
	printk(KERN_INFO "OFW detected in memory, cif @ 0x%lx (reserving top %ldMB)\n",
			(unsigned long)olpc_ofw_cif, (-start) >> 20);
	reserve_top_address(-start);
}
bool __init olpc_ofw_is_installed(void)
{
	return olpc_ofw_cif != NULL;
}
 | 
	linux-master | 
	arch/x86/platform/olpc/olpc_ofw.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Support for OLPC XO-1 System Control Interrupts (SCI)
 *
 * Copyright (C) 2010 One Laptop per Child
 * Copyright (C) 2006 Red Hat, Inc.
 * Copyright (C) 2006 Advanced Micro Devices, Inc.
 */
#include <linux/cs5535.h>
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_wakeup.h>
#include <linux/power_supply.h>
#include <linux/suspend.h>
#include <linux/workqueue.h>
#include <linux/olpc-ec.h>
#include <asm/io.h>
#include <asm/msr.h>
#include <asm/olpc.h>
#define DRV_NAME	"olpc-xo1-sci"
#define PFX		DRV_NAME ": "
static unsigned long acpi_base;
static struct input_dev *power_button_idev;
static struct input_dev *ebook_switch_idev;
static struct input_dev *lid_switch_idev;
static int sci_irq;
static bool lid_open;
static bool lid_inverted;
static int lid_wake_mode;
enum lid_wake_modes {
	LID_WAKE_ALWAYS,
	LID_WAKE_OPEN,
	LID_WAKE_CLOSE,
};
static const char * const lid_wake_mode_names[] = {
	[LID_WAKE_ALWAYS] = "always",
	[LID_WAKE_OPEN] = "open",
	[LID_WAKE_CLOSE] = "close",
};
static void battery_status_changed(void)
{
	struct power_supply *psy = power_supply_get_by_name("olpc_battery");
	if (psy) {
		power_supply_changed(psy);
		power_supply_put(psy);
	}
}
static void ac_status_changed(void)
{
	struct power_supply *psy = power_supply_get_by_name("olpc_ac");
	if (psy) {
		power_supply_changed(psy);
		power_supply_put(psy);
	}
}
/* Report current ebook switch state through input layer */
static void send_ebook_state(void)
{
	unsigned char state;
	if (olpc_ec_cmd(EC_READ_EB_MODE, NULL, 0, &state, 1)) {
		pr_err(PFX "failed to get ebook state\n");
		return;
	}
	if (test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == !!state)
		return; /* Nothing new to report. */
	input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state);
	input_sync(ebook_switch_idev);
	pm_wakeup_event(&ebook_switch_idev->dev, 0);
}
static void flip_lid_inverter(void)
{
	/* gpio is high; invert so we'll get l->h event interrupt */
	if (lid_inverted)
		cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_INPUT_INVERT);
	else
		cs5535_gpio_set(OLPC_GPIO_LID, GPIO_INPUT_INVERT);
	lid_inverted = !lid_inverted;
}
static void detect_lid_state(void)
{
	/*
	 * the edge detector hookup on the gpio inputs on the geode is
	 * odd, to say the least.  See http://dev.laptop.org/ticket/5703
	 * for details, but in a nutshell:  we don't use the edge
	 * detectors.  instead, we make use of an anomaly:  with the both
	 * edge detectors turned off, we still get an edge event on a
	 * positive edge transition.  to take advantage of this, we use the
	 * front-end inverter to ensure that that's the edge we're always
	 * going to see next.
	 */
	int state;
	state = cs5535_gpio_isset(OLPC_GPIO_LID, GPIO_READ_BACK);
	lid_open = !state ^ !lid_inverted; /* x ^^ y */
	if (!state)
		return;
	flip_lid_inverter();
}
/* Report current lid switch state through input layer */
static void send_lid_state(void)
{
	if (!!test_bit(SW_LID, lid_switch_idev->sw) == !lid_open)
		return; /* Nothing new to report. */
	input_report_switch(lid_switch_idev, SW_LID, !lid_open);
	input_sync(lid_switch_idev);
	pm_wakeup_event(&lid_switch_idev->dev, 0);
}
static ssize_t lid_wake_mode_show(struct device *dev,
				  struct device_attribute *attr, char *buf)
{
	const char *mode = lid_wake_mode_names[lid_wake_mode];
	return sprintf(buf, "%s\n", mode);
}
static ssize_t lid_wake_mode_set(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf, size_t count)
{
	int i;
	for (i = 0; i < ARRAY_SIZE(lid_wake_mode_names); i++) {
		const char *mode = lid_wake_mode_names[i];
		if (strlen(mode) != count || strncasecmp(mode, buf, count))
			continue;
		lid_wake_mode = i;
		return count;
	}
	return -EINVAL;
}
static DEVICE_ATTR(lid_wake_mode, S_IWUSR | S_IRUGO, lid_wake_mode_show,
		   lid_wake_mode_set);
static struct attribute *lid_attrs[] = {
	&dev_attr_lid_wake_mode.attr,
	NULL,
};
ATTRIBUTE_GROUPS(lid);
/*
 * Process all items in the EC's SCI queue.
 *
 * This is handled in a workqueue because olpc_ec_cmd can be slow (and
 * can even timeout).
 *
 * If propagate_events is false, the queue is drained without events being
 * generated for the interrupts.
 */
static void process_sci_queue(bool propagate_events)
{
	int r;
	u16 data;
	do {
		r = olpc_ec_sci_query(&data);
		if (r || !data)
			break;
		pr_debug(PFX "SCI 0x%x received\n", data);
		switch (data) {
		case EC_SCI_SRC_BATERR:
		case EC_SCI_SRC_BATSOC:
		case EC_SCI_SRC_BATTERY:
		case EC_SCI_SRC_BATCRIT:
			battery_status_changed();
			break;
		case EC_SCI_SRC_ACPWR:
			ac_status_changed();
			break;
		}
		if (data == EC_SCI_SRC_EBOOK && propagate_events)
			send_ebook_state();
	} while (data);
	if (r)
		pr_err(PFX "Failed to clear SCI queue");
}
static void process_sci_queue_work(struct work_struct *work)
{
	process_sci_queue(true);
}
static DECLARE_WORK(sci_work, process_sci_queue_work);
static irqreturn_t xo1_sci_intr(int irq, void *dev_id)
{
	struct platform_device *pdev = dev_id;
	u32 sts;
	u32 gpe;
	sts = inl(acpi_base + CS5536_PM1_STS);
	outl(sts | 0xffff, acpi_base + CS5536_PM1_STS);
	gpe = inl(acpi_base + CS5536_PM_GPE0_STS);
	outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS);
	dev_dbg(&pdev->dev, "sts %x gpe %x\n", sts, gpe);
	if (sts & CS5536_PWRBTN_FLAG) {
		if (!(sts & CS5536_WAK_FLAG)) {
			/* Only report power button input when it was pressed
			 * during regular operation (as opposed to when it
			 * was used to wake the system). */
			input_report_key(power_button_idev, KEY_POWER, 1);
			input_sync(power_button_idev);
			input_report_key(power_button_idev, KEY_POWER, 0);
			input_sync(power_button_idev);
		}
		/* Report the wakeup event in all cases. */
		pm_wakeup_event(&power_button_idev->dev, 0);
	}
	if ((sts & (CS5536_RTC_FLAG | CS5536_WAK_FLAG)) ==
			(CS5536_RTC_FLAG | CS5536_WAK_FLAG)) {
		/* When the system is woken by the RTC alarm, report the
		 * event on the rtc device. */
		struct device *rtc = bus_find_device_by_name(
			&platform_bus_type, NULL, "rtc_cmos");
		if (rtc) {
			pm_wakeup_event(rtc, 0);
			put_device(rtc);
		}
	}
	if (gpe & CS5536_GPIOM7_PME_FLAG) { /* EC GPIO */
		cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_NEGATIVE_EDGE_STS);
		schedule_work(&sci_work);
	}
	cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS);
	cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS);
	detect_lid_state();
	send_lid_state();
	return IRQ_HANDLED;
}
static int xo1_sci_suspend(struct platform_device *pdev, pm_message_t state)
{
	if (device_may_wakeup(&power_button_idev->dev))
		olpc_xo1_pm_wakeup_set(CS5536_PM_PWRBTN);
	else
		olpc_xo1_pm_wakeup_clear(CS5536_PM_PWRBTN);
	if (device_may_wakeup(&ebook_switch_idev->dev))
		olpc_ec_wakeup_set(EC_SCI_SRC_EBOOK);
	else
		olpc_ec_wakeup_clear(EC_SCI_SRC_EBOOK);
	if (!device_may_wakeup(&lid_switch_idev->dev)) {
		cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
	} else if ((lid_open && lid_wake_mode == LID_WAKE_OPEN) ||
		   (!lid_open && lid_wake_mode == LID_WAKE_CLOSE)) {
		flip_lid_inverter();
		/* we may have just caused an event */
		cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS);
		cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS);
		cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
	}
	return 0;
}
static int xo1_sci_resume(struct platform_device *pdev)
{
	/*
	 * We don't know what may have happened while we were asleep.
	 * Reestablish our lid setup so we're sure to catch all transitions.
	 */
	detect_lid_state();
	send_lid_state();
	cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
	/* Enable all EC events */
	olpc_ec_mask_write(EC_SCI_SRC_ALL);
	/* Power/battery status might have changed too */
	battery_status_changed();
	ac_status_changed();
	return 0;
}
static int setup_sci_interrupt(struct platform_device *pdev)
{
	u32 lo, hi;
	u32 sts;
	int r;
	rdmsr(0x51400020, lo, hi);
	sci_irq = (lo >> 20) & 15;
	if (sci_irq) {
		dev_info(&pdev->dev, "SCI is mapped to IRQ %d\n", sci_irq);
	} else {
		/* Zero means masked */
		dev_info(&pdev->dev, "SCI unmapped. Mapping to IRQ 3\n");
		sci_irq = 3;
		lo |= 0x00300000;
		wrmsrl(0x51400020, lo);
	}
	/* Select level triggered in PIC */
	if (sci_irq < 8) {
		lo = inb(CS5536_PIC_INT_SEL1);
		lo |= 1 << sci_irq;
		outb(lo, CS5536_PIC_INT_SEL1);
	} else {
		lo = inb(CS5536_PIC_INT_SEL2);
		lo |= 1 << (sci_irq - 8);
		outb(lo, CS5536_PIC_INT_SEL2);
	}
	/* Enable interesting SCI events, and clear pending interrupts */
	sts = inl(acpi_base + CS5536_PM1_STS);
	outl(((CS5536_PM_PWRBTN | CS5536_PM_RTC) << 16) | 0xffff,
	     acpi_base + CS5536_PM1_STS);
	r = request_irq(sci_irq, xo1_sci_intr, 0, DRV_NAME, pdev);
	if (r)
		dev_err(&pdev->dev, "can't request interrupt\n");
	return r;
}
static int setup_ec_sci(void)
{
	int r;
	r = gpio_request(OLPC_GPIO_ECSCI, "OLPC-ECSCI");
	if (r)
		return r;
	gpio_direction_input(OLPC_GPIO_ECSCI);
	/* Clear pending EC SCI events */
	cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_NEGATIVE_EDGE_STS);
	cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_POSITIVE_EDGE_STS);
	/*
	 * Enable EC SCI events, and map them to both a PME and the SCI
	 * interrupt.
	 *
	 * Ordinarily, in addition to functioning as GPIOs, Geode GPIOs can
	 * be mapped to regular interrupts *or* Geode-specific Power
	 * Management Events (PMEs) - events that bring the system out of
	 * suspend. In this case, we want both of those things - the system
	 * wakeup, *and* the ability to get an interrupt when an event occurs.
	 *
	 * To achieve this, we map the GPIO to a PME, and then we use one
	 * of the many generic knobs on the CS5535 PIC to additionally map the
	 * PME to the regular SCI interrupt line.
	 */
	cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_EVENTS_ENABLE);
	/* Set the SCI to cause a PME event on group 7 */
	cs5535_gpio_setup_event(OLPC_GPIO_ECSCI, 7, 1);
	/* And have group 7 also fire the SCI interrupt */
	cs5535_pic_unreqz_select_high(7, sci_irq);
	return 0;
}
static void free_ec_sci(void)
{
	gpio_free(OLPC_GPIO_ECSCI);
}
static int setup_lid_events(void)
{
	int r;
	r = gpio_request(OLPC_GPIO_LID, "OLPC-LID");
	if (r)
		return r;
	gpio_direction_input(OLPC_GPIO_LID);
	cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_INPUT_INVERT);
	lid_inverted = 0;
	/* Clear edge detection and event enable for now */
	cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
	cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_EN);
	cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_EN);
	cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS);
	cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS);
	/* Set the LID to cause an PME event on group 6 */
	cs5535_gpio_setup_event(OLPC_GPIO_LID, 6, 1);
	/* Set PME group 6 to fire the SCI interrupt */
	cs5535_gpio_set_irq(6, sci_irq);
	/* Enable the event */
	cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
	return 0;
}
static void free_lid_events(void)
{
	gpio_free(OLPC_GPIO_LID);
}
static int setup_power_button(struct platform_device *pdev)
{
	int r;
	power_button_idev = input_allocate_device();
	if (!power_button_idev)
		return -ENOMEM;
	power_button_idev->name = "Power Button";
	power_button_idev->phys = DRV_NAME "/input0";
	set_bit(EV_KEY, power_button_idev->evbit);
	set_bit(KEY_POWER, power_button_idev->keybit);
	power_button_idev->dev.parent = &pdev->dev;
	device_init_wakeup(&power_button_idev->dev, 1);
	r = input_register_device(power_button_idev);
	if (r) {
		dev_err(&pdev->dev, "failed to register power button: %d\n", r);
		input_free_device(power_button_idev);
	}
	return r;
}
static void free_power_button(void)
{
	input_unregister_device(power_button_idev);
}
static int setup_ebook_switch(struct platform_device *pdev)
{
	int r;
	ebook_switch_idev = input_allocate_device();
	if (!ebook_switch_idev)
		return -ENOMEM;
	ebook_switch_idev->name = "EBook Switch";
	ebook_switch_idev->phys = DRV_NAME "/input1";
	set_bit(EV_SW, ebook_switch_idev->evbit);
	set_bit(SW_TABLET_MODE, ebook_switch_idev->swbit);
	ebook_switch_idev->dev.parent = &pdev->dev;
	device_set_wakeup_capable(&ebook_switch_idev->dev, true);
	r = input_register_device(ebook_switch_idev);
	if (r) {
		dev_err(&pdev->dev, "failed to register ebook switch: %d\n", r);
		input_free_device(ebook_switch_idev);
	}
	return r;
}
static void free_ebook_switch(void)
{
	input_unregister_device(ebook_switch_idev);
}
static int setup_lid_switch(struct platform_device *pdev)
{
	int r;
	lid_switch_idev = input_allocate_device();
	if (!lid_switch_idev)
		return -ENOMEM;
	lid_switch_idev->name = "Lid Switch";
	lid_switch_idev->phys = DRV_NAME "/input2";
	set_bit(EV_SW, lid_switch_idev->evbit);
	set_bit(SW_LID, lid_switch_idev->swbit);
	lid_switch_idev->dev.parent = &pdev->dev;
	device_set_wakeup_capable(&lid_switch_idev->dev, true);
	r = input_register_device(lid_switch_idev);
	if (r) {
		dev_err(&pdev->dev, "failed to register lid switch: %d\n", r);
		goto err_register;
	}
	return 0;
err_register:
	input_free_device(lid_switch_idev);
	return r;
}
static void free_lid_switch(void)
{
	input_unregister_device(lid_switch_idev);
}
static int xo1_sci_probe(struct platform_device *pdev)
{
	struct resource *res;
	int r;
	/* don't run on non-XOs */
	if (!machine_is_olpc())
		return -ENODEV;
	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
	if (!res) {
		dev_err(&pdev->dev, "can't fetch device resource info\n");
		return -EIO;
	}
	acpi_base = res->start;
	r = setup_power_button(pdev);
	if (r)
		return r;
	r = setup_ebook_switch(pdev);
	if (r)
		goto err_ebook;
	r = setup_lid_switch(pdev);
	if (r)
		goto err_lid;
	r = setup_lid_events();
	if (r)
		goto err_lidevt;
	r = setup_ec_sci();
	if (r)
		goto err_ecsci;
	/* Enable PME generation for EC-generated events */
	outl(CS5536_GPIOM6_PME_EN | CS5536_GPIOM7_PME_EN,
		acpi_base + CS5536_PM_GPE0_EN);
	/* Clear pending events */
	outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS);
	process_sci_queue(false);
	/* Initial sync */
	send_ebook_state();
	detect_lid_state();
	send_lid_state();
	r = setup_sci_interrupt(pdev);
	if (r)
		goto err_sci;
	/* Enable all EC events */
	olpc_ec_mask_write(EC_SCI_SRC_ALL);
	return r;
err_sci:
	free_ec_sci();
err_ecsci:
	free_lid_events();
err_lidevt:
	free_lid_switch();
err_lid:
	free_ebook_switch();
err_ebook:
	free_power_button();
	return r;
}
static int xo1_sci_remove(struct platform_device *pdev)
{
	free_irq(sci_irq, pdev);
	cancel_work_sync(&sci_work);
	free_ec_sci();
	free_lid_events();
	free_lid_switch();
	free_ebook_switch();
	free_power_button();
	acpi_base = 0;
	return 0;
}
static struct platform_driver xo1_sci_driver = {
	.driver = {
		.name = "olpc-xo1-sci-acpi",
		.dev_groups = lid_groups,
	},
	.probe = xo1_sci_probe,
	.remove = xo1_sci_remove,
	.suspend = xo1_sci_suspend,
	.resume = xo1_sci_resume,
};
static int __init xo1_sci_init(void)
{
	return platform_driver_register(&xo1_sci_driver);
}
arch_initcall(xo1_sci_init);
 | 
	linux-master | 
	arch/x86/platform/olpc/olpc-xo1-sci.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Support for the OLPC DCON and OLPC EC access
 *
 * Copyright © 2006  Advanced Micro Devices, Inc.
 * Copyright © 2007-2008  Andres Salomon <[email protected]>
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/syscore_ops.h>
#include <linux/mutex.h>
#include <linux/olpc-ec.h>
#include <asm/geode.h>
#include <asm/setup.h>
#include <asm/olpc.h>
#include <asm/olpc_ofw.h>
struct olpc_platform_t olpc_platform_info;
EXPORT_SYMBOL_GPL(olpc_platform_info);
/* what the timeout *should* be (in ms) */
#define EC_BASE_TIMEOUT 20
/* the timeout that bugs in the EC might force us to actually use */
static int ec_timeout = EC_BASE_TIMEOUT;
static int __init olpc_ec_timeout_set(char *str)
{
	if (get_option(&str, &ec_timeout) != 1) {
		ec_timeout = EC_BASE_TIMEOUT;
		printk(KERN_ERR "olpc-ec:  invalid argument to "
				"'olpc_ec_timeout=', ignoring!\n");
	}
	printk(KERN_DEBUG "olpc-ec:  using %d ms delay for EC commands.\n",
			ec_timeout);
	return 1;
}
__setup("olpc_ec_timeout=", olpc_ec_timeout_set);
/*
 * These {i,o}bf_status functions return whether the buffers are full or not.
 */
static inline unsigned int ibf_status(unsigned int port)
{
	return !!(inb(port) & 0x02);
}
static inline unsigned int obf_status(unsigned int port)
{
	return inb(port) & 0x01;
}
#define wait_on_ibf(p, d) __wait_on_ibf(__LINE__, (p), (d))
static int __wait_on_ibf(unsigned int line, unsigned int port, int desired)
{
	unsigned int timeo;
	int state = ibf_status(port);
	for (timeo = ec_timeout; state != desired && timeo; timeo--) {
		mdelay(1);
		state = ibf_status(port);
	}
	if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) &&
			timeo < (ec_timeout - EC_BASE_TIMEOUT)) {
		printk(KERN_WARNING "olpc-ec:  %d: waited %u ms for IBF!\n",
				line, ec_timeout - timeo);
	}
	return !(state == desired);
}
#define wait_on_obf(p, d) __wait_on_obf(__LINE__, (p), (d))
static int __wait_on_obf(unsigned int line, unsigned int port, int desired)
{
	unsigned int timeo;
	int state = obf_status(port);
	for (timeo = ec_timeout; state != desired && timeo; timeo--) {
		mdelay(1);
		state = obf_status(port);
	}
	if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) &&
			timeo < (ec_timeout - EC_BASE_TIMEOUT)) {
		printk(KERN_WARNING "olpc-ec:  %d: waited %u ms for OBF!\n",
				line, ec_timeout - timeo);
	}
	return !(state == desired);
}
/*
 * This allows the kernel to run Embedded Controller commands.  The EC is
 * documented at <http://wiki.laptop.org/go/Embedded_controller>, and the
 * available EC commands are here:
 * <http://wiki.laptop.org/go/Ec_specification>.  Unfortunately, while
 * OpenFirmware's source is available, the EC's is not.
 */
static int olpc_xo1_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf,
		size_t outlen, void *arg)
{
	int ret = -EIO;
	int i;
	int restarts = 0;
	/* Clear OBF */
	for (i = 0; i < 10 && (obf_status(0x6c) == 1); i++)
		inb(0x68);
	if (i == 10) {
		printk(KERN_ERR "olpc-ec:  timeout while attempting to "
				"clear OBF flag!\n");
		goto err;
	}
	if (wait_on_ibf(0x6c, 0)) {
		printk(KERN_ERR "olpc-ec:  timeout waiting for EC to "
				"quiesce!\n");
		goto err;
	}
restart:
	/*
	 * Note that if we time out during any IBF checks, that's a failure;
	 * we have to return.  There's no way for the kernel to clear that.
	 *
	 * If we time out during an OBF check, we can restart the command;
	 * reissuing it will clear the OBF flag, and we should be alright.
	 * The OBF flag will sometimes misbehave due to what we believe
	 * is a hardware quirk..
	 */
	pr_devel("olpc-ec:  running cmd 0x%x\n", cmd);
	outb(cmd, 0x6c);
	if (wait_on_ibf(0x6c, 0)) {
		printk(KERN_ERR "olpc-ec:  timeout waiting for EC to read "
				"command!\n");
		goto err;
	}
	if (inbuf && inlen) {
		/* write data to EC */
		for (i = 0; i < inlen; i++) {
			pr_devel("olpc-ec:  sending cmd arg 0x%x\n", inbuf[i]);
			outb(inbuf[i], 0x68);
			if (wait_on_ibf(0x6c, 0)) {
				printk(KERN_ERR "olpc-ec:  timeout waiting for"
						" EC accept data!\n");
				goto err;
			}
		}
	}
	if (outbuf && outlen) {
		/* read data from EC */
		for (i = 0; i < outlen; i++) {
			if (wait_on_obf(0x6c, 1)) {
				printk(KERN_ERR "olpc-ec:  timeout waiting for"
						" EC to provide data!\n");
				if (restarts++ < 10)
					goto restart;
				goto err;
			}
			outbuf[i] = inb(0x68);
			pr_devel("olpc-ec:  received 0x%x\n", outbuf[i]);
		}
	}
	ret = 0;
err:
	return ret;
}
static bool __init check_ofw_architecture(struct device_node *root)
{
	const char *olpc_arch;
	int propsize;
	olpc_arch = of_get_property(root, "architecture", &propsize);
	return propsize == 5 && strncmp("OLPC", olpc_arch, 5) == 0;
}
static u32 __init get_board_revision(struct device_node *root)
{
	int propsize;
	const __be32 *rev;
	rev = of_get_property(root, "board-revision-int", &propsize);
	if (propsize != 4)
		return 0;
	return be32_to_cpu(*rev);
}
static bool __init platform_detect(void)
{
	struct device_node *root = of_find_node_by_path("/");
	bool success;
	if (!root)
		return false;
	success = check_ofw_architecture(root);
	if (success) {
		olpc_platform_info.boardrev = get_board_revision(root);
		olpc_platform_info.flags |= OLPC_F_PRESENT;
		pr_info("OLPC board revision %s%X\n",
			((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "",
			olpc_platform_info.boardrev >> 4);
	}
	of_node_put(root);
	return success;
}
static int __init add_xo1_platform_devices(void)
{
	struct platform_device *pdev;
	pdev = platform_device_register_simple("xo1-rfkill", -1, NULL, 0);
	if (IS_ERR(pdev))
		return PTR_ERR(pdev);
	pdev = platform_device_register_simple("olpc-xo1", -1, NULL, 0);
	return PTR_ERR_OR_ZERO(pdev);
}
static int olpc_xo1_ec_suspend(struct platform_device *pdev)
{
	/*
	 * Squelch SCIs while suspended.  This is a fix for
	 * <http://dev.laptop.org/ticket/1835>.
	 */
	return olpc_ec_cmd(EC_SET_SCI_INHIBIT, NULL, 0, NULL, 0);
}
static int olpc_xo1_ec_resume(struct platform_device *pdev)
{
	/* Tell the EC to stop inhibiting SCIs */
	olpc_ec_cmd(EC_SET_SCI_INHIBIT_RELEASE, NULL, 0, NULL, 0);
	/*
	 * Tell the wireless module to restart USB communication.
	 * Must be done twice.
	 */
	olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0);
	olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0);
	return 0;
}
static struct olpc_ec_driver ec_xo1_driver = {
	.suspend = olpc_xo1_ec_suspend,
	.resume = olpc_xo1_ec_resume,
	.ec_cmd = olpc_xo1_ec_cmd,
#ifdef CONFIG_OLPC_XO1_SCI
	/*
	 * XO-1 EC wakeups are available when olpc-xo1-sci driver is
	 * compiled in
	 */
	.wakeup_available = true,
#endif
};
static struct olpc_ec_driver ec_xo1_5_driver = {
	.ec_cmd = olpc_xo1_ec_cmd,
#ifdef CONFIG_OLPC_XO15_SCI
	/*
	 * XO-1.5 EC wakeups are available when olpc-xo15-sci driver is
	 * compiled in
	 */
	.wakeup_available = true,
#endif
};
static int __init olpc_init(void)
{
	int r = 0;
	if (!olpc_ofw_present() || !platform_detect())
		return 0;
	/* register the XO-1 and 1.5-specific EC handler */
	if (olpc_platform_info.boardrev < olpc_board_pre(0xd0))	/* XO-1 */
		olpc_ec_driver_register(&ec_xo1_driver, NULL);
	else
		olpc_ec_driver_register(&ec_xo1_5_driver, NULL);
	platform_device_register_simple("olpc-ec", -1, NULL, 0);
	/* assume B1 and above models always have a DCON */
	if (olpc_board_at_least(olpc_board(0xb1)))
		olpc_platform_info.flags |= OLPC_F_DCON;
#ifdef CONFIG_PCI_OLPC
	/* If the VSA exists let it emulate PCI, if not emulate in kernel.
	 * XO-1 only. */
	if (olpc_platform_info.boardrev < olpc_board_pre(0xd0) &&
			!cs5535_has_vsa2())
		x86_init.pci.arch_init = pci_olpc_init;
#endif
	if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) { /* XO-1 */
		r = add_xo1_platform_devices();
		if (r)
			return r;
	}
	return 0;
}
postcore_initcall(olpc_init);
 | 
	linux-master | 
	arch/x86/platform/olpc/olpc.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Support for OLPC XO-1 Real Time Clock (RTC)
 *
 * Copyright (C) 2011 One Laptop per Child
 */
#include <linux/mc146818rtc.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/of.h>
#include <asm/msr.h>
#include <asm/olpc.h>
#include <asm/x86_init.h>
static void rtc_wake_on(struct device *dev)
{
	olpc_xo1_pm_wakeup_set(CS5536_PM_RTC);
}
static void rtc_wake_off(struct device *dev)
{
	olpc_xo1_pm_wakeup_clear(CS5536_PM_RTC);
}
static struct resource rtc_platform_resource[] = {
	[0] = {
		.start	= RTC_PORT(0),
		.end	= RTC_PORT(1),
		.flags	= IORESOURCE_IO,
	},
	[1] = {
		.start	= RTC_IRQ,
		.end	= RTC_IRQ,
		.flags	= IORESOURCE_IRQ,
	}
};
static struct cmos_rtc_board_info rtc_info = {
	.rtc_day_alarm = 0,
	.rtc_mon_alarm = 0,
	.rtc_century = 0,
	.wake_on = rtc_wake_on,
	.wake_off = rtc_wake_off,
};
static struct platform_device xo1_rtc_device = {
	.name = "rtc_cmos",
	.id = -1,
	.num_resources = ARRAY_SIZE(rtc_platform_resource),
	.dev.platform_data = &rtc_info,
	.resource = rtc_platform_resource,
};
static int __init xo1_rtc_init(void)
{
	int r;
	struct device_node *node;
	node = of_find_compatible_node(NULL, NULL, "olpc,xo1-rtc");
	if (!node)
		return 0;
	of_node_put(node);
	pr_info("olpc-xo1-rtc: Initializing OLPC XO-1 RTC\n");
	rdmsrl(MSR_RTC_DOMA_OFFSET, rtc_info.rtc_day_alarm);
	rdmsrl(MSR_RTC_MONA_OFFSET, rtc_info.rtc_mon_alarm);
	rdmsrl(MSR_RTC_CEN_OFFSET, rtc_info.rtc_century);
	r = platform_device_register(&xo1_rtc_device);
	if (r)
		return r;
	x86_platform.legacy.rtc = 0;
	device_init_wakeup(&xo1_rtc_device.dev, 1);
	return 0;
}
arch_initcall(xo1_rtc_init);
 | 
	linux-master | 
	arch/x86/platform/olpc/olpc-xo1-rtc.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Eurobraille/Iris power off support.
 *
 * Eurobraille's Iris machine is a PC with no APM or ACPI support.
 * It is shutdown by a special I/O sequence which this module provides.
 *
 *  Copyright (C) Shérab <[email protected]>
 */
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <asm/io.h>
#define IRIS_GIO_BASE		0x340
#define IRIS_GIO_INPUT		IRIS_GIO_BASE
#define IRIS_GIO_OUTPUT		(IRIS_GIO_BASE + 1)
#define IRIS_GIO_PULSE		0x80 /* First byte to send */
#define IRIS_GIO_REST		0x00 /* Second byte to send */
#define IRIS_GIO_NODEV		0xff /* Likely not an Iris */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sébastien Hinderer <[email protected]>");
MODULE_DESCRIPTION("A power_off handler for Iris devices from EuroBraille");
static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Set to one to force poweroff handler installation.");
static void (*old_pm_power_off)(void);
static void iris_power_off(void)
{
	outb(IRIS_GIO_PULSE, IRIS_GIO_OUTPUT);
	msleep(850);
	outb(IRIS_GIO_REST, IRIS_GIO_OUTPUT);
}
/*
 * Before installing the power_off handler, try to make sure the OS is
 * running on an Iris.  Since Iris does not support DMI, this is done
 * by reading its input port and seeing whether the read value is
 * meaningful.
 */
static int iris_probe(struct platform_device *pdev)
{
	unsigned char status = inb(IRIS_GIO_INPUT);
	if (status == IRIS_GIO_NODEV) {
		printk(KERN_ERR "This machine does not seem to be an Iris. "
			"Power off handler not installed.\n");
		return -ENODEV;
	}
	old_pm_power_off = pm_power_off;
	pm_power_off = &iris_power_off;
	printk(KERN_INFO "Iris power_off handler installed.\n");
	return 0;
}
static int iris_remove(struct platform_device *pdev)
{
	pm_power_off = old_pm_power_off;
	printk(KERN_INFO "Iris power_off handler uninstalled.\n");
	return 0;
}
static struct platform_driver iris_driver = {
	.driver		= {
		.name   = "iris",
	},
	.probe          = iris_probe,
	.remove         = iris_remove,
};
static struct resource iris_resources[] = {
	{
		.start  = IRIS_GIO_BASE,
		.end    = IRIS_GIO_OUTPUT,
		.flags  = IORESOURCE_IO,
		.name   = "address"
	}
};
static struct platform_device *iris_device;
static int iris_init(void)
{
	int ret;
	if (force != 1) {
		printk(KERN_ERR "The force parameter has not been set to 1."
			" The Iris poweroff handler will not be installed.\n");
		return -ENODEV;
	}
	ret = platform_driver_register(&iris_driver);
	if (ret < 0) {
		printk(KERN_ERR "Failed to register iris platform driver: %d\n",
			ret);
		return ret;
	}
	iris_device = platform_device_register_simple("iris", (-1),
				iris_resources, ARRAY_SIZE(iris_resources));
	if (IS_ERR(iris_device)) {
		printk(KERN_ERR "Failed to register iris platform device\n");
		platform_driver_unregister(&iris_driver);
		return PTR_ERR(iris_device);
	}
	return 0;
}
static void iris_exit(void)
{
	platform_device_unregister(iris_device);
	platform_driver_unregister(&iris_driver);
}
module_init(iris_init);
module_exit(iris_exit);
 | 
	linux-master | 
	arch/x86/platform/iris/iris.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Intel SOC Punit device state debug driver
 * Punit controls power management for North Complex devices (Graphics
 * blocks, Image Signal Processing, video processing, display, DSP etc.)
 *
 * Copyright (c) 2015, Intel Corporation.
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/io.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
/* Subsystem config/status Video processor */
#define VED_SS_PM0		0x32
/* Subsystem config/status ISP (Image Signal Processor) */
#define ISP_SS_PM0		0x39
/* Subsystem config/status Input/output controller */
#define MIO_SS_PM		0x3B
/* Shift bits for getting status for video, isp and i/o */
#define SSS_SHIFT		24
/* Power gate status reg */
#define PWRGT_STATUS		0x61
/* Shift bits for getting status for graphics rendering */
#define RENDER_POS		0
/* Shift bits for getting status for media control */
#define MEDIA_POS		2
/* Shift bits for getting status for Valley View/Baytrail display */
#define VLV_DISPLAY_POS		6
/* Subsystem config/status display for Cherry Trail SOC */
#define CHT_DSP_SSS		0x36
/* Shift bits for getting status for display */
#define CHT_DSP_SSS_POS		16
struct punit_device {
	char *name;
	int reg;
	int sss_pos;
};
static const struct punit_device punit_device_tng[] = {
	{ "DISPLAY",	CHT_DSP_SSS,	SSS_SHIFT },
	{ "VED",	VED_SS_PM0,	SSS_SHIFT },
	{ "ISP",	ISP_SS_PM0,	SSS_SHIFT },
	{ "MIO",	MIO_SS_PM,	SSS_SHIFT },
	{ NULL }
};
static const struct punit_device punit_device_byt[] = {
	{ "GFX RENDER",	PWRGT_STATUS,	RENDER_POS },
	{ "GFX MEDIA",	PWRGT_STATUS,	MEDIA_POS },
	{ "DISPLAY",	PWRGT_STATUS,	VLV_DISPLAY_POS },
	{ "VED",	VED_SS_PM0,	SSS_SHIFT },
	{ "ISP",	ISP_SS_PM0,	SSS_SHIFT },
	{ "MIO",	MIO_SS_PM,	SSS_SHIFT },
	{ NULL }
};
static const struct punit_device punit_device_cht[] = {
	{ "GFX RENDER",	PWRGT_STATUS,	RENDER_POS },
	{ "GFX MEDIA",	PWRGT_STATUS,	MEDIA_POS },
	{ "DISPLAY",	CHT_DSP_SSS,	CHT_DSP_SSS_POS },
	{ "VED",	VED_SS_PM0,	SSS_SHIFT },
	{ "ISP",	ISP_SS_PM0,	SSS_SHIFT },
	{ "MIO",	MIO_SS_PM,	SSS_SHIFT },
	{ NULL }
};
static const char * const dstates[] = {"D0", "D0i1", "D0i2", "D0i3"};
static int punit_dev_state_show(struct seq_file *seq_file, void *unused)
{
	u32 punit_pwr_status;
	struct punit_device *punit_devp = seq_file->private;
	int index;
	int status;
	seq_puts(seq_file, "\n\nPUNIT NORTH COMPLEX DEVICES :\n");
	while (punit_devp->name) {
		status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
				       punit_devp->reg, &punit_pwr_status);
		if (status) {
			seq_printf(seq_file, "%9s : Read Failed\n",
				   punit_devp->name);
		} else  {
			index = (punit_pwr_status >> punit_devp->sss_pos) & 3;
			seq_printf(seq_file, "%9s : %s\n", punit_devp->name,
				   dstates[index]);
		}
		punit_devp++;
	}
	return 0;
}
DEFINE_SHOW_ATTRIBUTE(punit_dev_state);
static struct dentry *punit_dbg_file;
static void punit_dbgfs_register(struct punit_device *punit_device)
{
	punit_dbg_file = debugfs_create_dir("punit_atom", NULL);
	debugfs_create_file("dev_power_state", 0444, punit_dbg_file,
			    punit_device, &punit_dev_state_fops);
}
static void punit_dbgfs_unregister(void)
{
	debugfs_remove_recursive(punit_dbg_file);
}
#define X86_MATCH(model, data)						 \
	X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
					   X86_FEATURE_MWAIT, data)
static const struct x86_cpu_id intel_punit_cpu_ids[] = {
	X86_MATCH(ATOM_SILVERMONT,		&punit_device_byt),
	X86_MATCH(ATOM_SILVERMONT_MID,		&punit_device_tng),
	X86_MATCH(ATOM_AIRMONT,			&punit_device_cht),
	{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_punit_cpu_ids);
static int __init punit_atom_debug_init(void)
{
	const struct x86_cpu_id *id;
	id = x86_match_cpu(intel_punit_cpu_ids);
	if (!id)
		return -ENODEV;
	punit_dbgfs_register((struct punit_device *)id->driver_data);
	return 0;
}
static void __exit punit_atom_debug_exit(void)
{
	punit_dbgfs_unregister();
}
module_init(punit_atom_debug_init);
module_exit(punit_atom_debug_exit);
MODULE_AUTHOR("Kumar P, Mahesh <[email protected]>");
MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>");
MODULE_DESCRIPTION("Driver for Punit devices states debugging");
MODULE_LICENSE("GPL v2");
 | 
	linux-master | 
	arch/x86/platform/atom/punit_atom_debug.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Intel MID Power Management Unit (PWRMU) device driver
 *
 * Copyright (C) 2016, Intel Corporation
 *
 * Author: Andy Shevchenko <[email protected]>
 *
 * Intel MID Power Management Unit device driver handles the South Complex PCI
 * devices such as GPDMA, SPI, I2C, PWM, and so on. By default PCI core
 * modifies bits in PMCSR register in the PCI configuration space. This is not
 * enough on some SoCs like Intel Tangier. In such case PCI core sets a new
 * power state of the device in question through a PM hook registered in struct
 * pci_platform_pm_ops (see drivers/pci/pci-mid.c).
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <asm/intel-mid.h>
/* Registers */
#define PM_STS			0x00
#define PM_CMD			0x04
#define PM_ICS			0x08
#define PM_WKC(x)		(0x10 + (x) * 4)
#define PM_WKS(x)		(0x18 + (x) * 4)
#define PM_SSC(x)		(0x20 + (x) * 4)
#define PM_SSS(x)		(0x30 + (x) * 4)
/* Bits in PM_STS */
#define PM_STS_BUSY		(1 << 8)
/* Bits in PM_CMD */
#define PM_CMD_CMD(x)		((x) << 0)
#define PM_CMD_IOC		(1 << 8)
#define PM_CMD_CM_NOP		(0 << 9)
#define PM_CMD_CM_IMMEDIATE	(1 << 9)
#define PM_CMD_CM_DELAY		(2 << 9)
#define PM_CMD_CM_TRIGGER	(3 << 9)
/* System states */
#define PM_CMD_SYS_STATE_S5	(5 << 16)
/* Trigger variants */
#define PM_CMD_CFG_TRIGGER_NC	(3 << 19)
/* Message to wait for TRIGGER_NC case */
#define TRIGGER_NC_MSG_2	(2 << 22)
/* List of commands */
#define CMD_SET_CFG		0x01
/* Bits in PM_ICS */
#define PM_ICS_INT_STATUS(x)	((x) & 0xff)
#define PM_ICS_IE		(1 << 8)
#define PM_ICS_IP		(1 << 9)
#define PM_ICS_SW_INT_STS	(1 << 10)
/* List of interrupts */
#define INT_INVALID		0
#define INT_CMD_COMPLETE	1
#define INT_CMD_ERR		2
#define INT_WAKE_EVENT		3
#define INT_LSS_POWER_ERR	4
#define INT_S0iX_MSG_ERR	5
#define INT_NO_C6		6
#define INT_TRIGGER_ERR		7
#define INT_INACTIVITY		8
/* South Complex devices */
#define LSS_MAX_SHARED_DEVS	4
#define LSS_MAX_DEVS		64
#define LSS_WS_BITS		1	/* wake state width */
#define LSS_PWS_BITS		2	/* power state width */
/* Supported device IDs */
#define PCI_DEVICE_ID_PENWELL	0x0828
#define PCI_DEVICE_ID_TANGIER	0x11a1
struct mid_pwr_dev {
	struct pci_dev *pdev;
	pci_power_t state;
};
struct mid_pwr {
	struct device *dev;
	void __iomem *regs;
	int irq;
	bool available;
	struct mutex lock;
	struct mid_pwr_dev lss[LSS_MAX_DEVS][LSS_MAX_SHARED_DEVS];
};
static struct mid_pwr *midpwr;
static u32 mid_pwr_get_state(struct mid_pwr *pwr, int reg)
{
	return readl(pwr->regs + PM_SSS(reg));
}
static void mid_pwr_set_state(struct mid_pwr *pwr, int reg, u32 value)
{
	writel(value, pwr->regs + PM_SSC(reg));
}
static void mid_pwr_set_wake(struct mid_pwr *pwr, int reg, u32 value)
{
	writel(value, pwr->regs + PM_WKC(reg));
}
static void mid_pwr_interrupt_disable(struct mid_pwr *pwr)
{
	writel(~PM_ICS_IE, pwr->regs + PM_ICS);
}
static bool mid_pwr_is_busy(struct mid_pwr *pwr)
{
	return !!(readl(pwr->regs + PM_STS) & PM_STS_BUSY);
}
/* Wait 500ms that the latest PWRMU command finished */
static int mid_pwr_wait(struct mid_pwr *pwr)
{
	unsigned int count = 500000;
	bool busy;
	do {
		busy = mid_pwr_is_busy(pwr);
		if (!busy)
			return 0;
		udelay(1);
	} while (--count);
	return -EBUSY;
}
static int mid_pwr_wait_for_cmd(struct mid_pwr *pwr, u8 cmd)
{
	writel(PM_CMD_CMD(cmd) | PM_CMD_CM_IMMEDIATE, pwr->regs + PM_CMD);
	return mid_pwr_wait(pwr);
}
static int __update_power_state(struct mid_pwr *pwr, int reg, int bit, int new)
{
	int curstate;
	u32 power;
	int ret;
	/* Check if the device is already in desired state */
	power = mid_pwr_get_state(pwr, reg);
	curstate = (power >> bit) & 3;
	if (curstate == new)
		return 0;
	/* Update the power state */
	mid_pwr_set_state(pwr, reg, (power & ~(3 << bit)) | (new << bit));
	/* Send command to SCU */
	ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG);
	if (ret)
		return ret;
	/* Check if the device is already in desired state */
	power = mid_pwr_get_state(pwr, reg);
	curstate = (power >> bit) & 3;
	if (curstate != new)
		return -EAGAIN;
	return 0;
}
static pci_power_t __find_weakest_power_state(struct mid_pwr_dev *lss,
					      struct pci_dev *pdev,
					      pci_power_t state)
{
	pci_power_t weakest = PCI_D3hot;
	unsigned int j;
	/* Find device in cache or first free cell */
	for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) {
		if (lss[j].pdev == pdev || !lss[j].pdev)
			break;
	}
	/* Store the desired state in cache */
	if (j < LSS_MAX_SHARED_DEVS) {
		lss[j].pdev = pdev;
		lss[j].state = state;
	} else {
		dev_WARN(&pdev->dev, "No room for device in PWRMU LSS cache\n");
		weakest = state;
	}
	/* Find the power state we may use */
	for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) {
		if (lss[j].state < weakest)
			weakest = lss[j].state;
	}
	return weakest;
}
static int __set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev,
			     pci_power_t state, int id, int reg, int bit)
{
	const char *name;
	int ret;
	state = __find_weakest_power_state(pwr->lss[id], pdev, state);
	name = pci_power_name(state);
	ret = __update_power_state(pwr, reg, bit, (__force int)state);
	if (ret) {
		dev_warn(&pdev->dev, "Can't set power state %s: %d\n", name, ret);
		return ret;
	}
	dev_vdbg(&pdev->dev, "Set power state %s\n", name);
	return 0;
}
static int mid_pwr_set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev,
				   pci_power_t state)
{
	int id, reg, bit;
	int ret;
	id = intel_mid_pwr_get_lss_id(pdev);
	if (id < 0)
		return id;
	reg = (id * LSS_PWS_BITS) / 32;
	bit = (id * LSS_PWS_BITS) % 32;
	/* We support states between PCI_D0 and PCI_D3hot */
	if (state < PCI_D0)
		state = PCI_D0;
	if (state > PCI_D3hot)
		state = PCI_D3hot;
	mutex_lock(&pwr->lock);
	ret = __set_power_state(pwr, pdev, state, id, reg, bit);
	mutex_unlock(&pwr->lock);
	return ret;
}
int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
{
	struct mid_pwr *pwr = midpwr;
	int ret = 0;
	might_sleep();
	if (pwr && pwr->available)
		ret = mid_pwr_set_power_state(pwr, pdev, state);
	dev_vdbg(&pdev->dev, "set_power_state() returns %d\n", ret);
	return 0;
}
pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev)
{
	struct mid_pwr *pwr = midpwr;
	int id, reg, bit;
	u32 power;
	if (!pwr || !pwr->available)
		return PCI_UNKNOWN;
	id = intel_mid_pwr_get_lss_id(pdev);
	if (id < 0)
		return PCI_UNKNOWN;
	reg = (id * LSS_PWS_BITS) / 32;
	bit = (id * LSS_PWS_BITS) % 32;
	power = mid_pwr_get_state(pwr, reg);
	return (__force pci_power_t)((power >> bit) & 3);
}
void intel_mid_pwr_power_off(void)
{
	struct mid_pwr *pwr = midpwr;
	u32 cmd = PM_CMD_SYS_STATE_S5 |
		  PM_CMD_CMD(CMD_SET_CFG) |
		  PM_CMD_CM_TRIGGER |
		  PM_CMD_CFG_TRIGGER_NC |
		  TRIGGER_NC_MSG_2;
	/* Send command to SCU */
	writel(cmd, pwr->regs + PM_CMD);
	mid_pwr_wait(pwr);
}
int intel_mid_pwr_get_lss_id(struct pci_dev *pdev)
{
	int vndr;
	u8 id;
	/*
	 * Mapping to PWRMU index is kept in the Logical SubSystem ID byte of
	 * Vendor capability.
	 */
	vndr = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
	if (!vndr)
		return -EINVAL;
	/* Read the Logical SubSystem ID byte */
	pci_read_config_byte(pdev, vndr + INTEL_MID_PWR_LSS_OFFSET, &id);
	if (!(id & INTEL_MID_PWR_LSS_TYPE))
		return -ENODEV;
	id &= ~INTEL_MID_PWR_LSS_TYPE;
	if (id >= LSS_MAX_DEVS)
		return -ERANGE;
	return id;
}
static irqreturn_t mid_pwr_irq_handler(int irq, void *dev_id)
{
	struct mid_pwr *pwr = dev_id;
	u32 ics;
	ics = readl(pwr->regs + PM_ICS);
	if (!(ics & PM_ICS_IP))
		return IRQ_NONE;
	writel(ics | PM_ICS_IP, pwr->regs + PM_ICS);
	dev_warn(pwr->dev, "Unexpected IRQ: %#x\n", PM_ICS_INT_STATUS(ics));
	return IRQ_HANDLED;
}
struct mid_pwr_device_info {
	int (*set_initial_state)(struct mid_pwr *pwr);
};
static int mid_pwr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct mid_pwr_device_info *info = (void *)id->driver_data;
	struct device *dev = &pdev->dev;
	struct mid_pwr *pwr;
	int ret;
	ret = pcim_enable_device(pdev);
	if (ret < 0) {
		dev_err(&pdev->dev, "error: could not enable device\n");
		return ret;
	}
	ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
	if (ret) {
		dev_err(&pdev->dev, "I/O memory remapping failed\n");
		return ret;
	}
	pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
	if (!pwr)
		return -ENOMEM;
	pwr->dev = dev;
	pwr->regs = pcim_iomap_table(pdev)[0];
	pwr->irq = pdev->irq;
	mutex_init(&pwr->lock);
	/* Disable interrupts */
	mid_pwr_interrupt_disable(pwr);
	if (info && info->set_initial_state) {
		ret = info->set_initial_state(pwr);
		if (ret)
			dev_warn(dev, "Can't set initial state: %d\n", ret);
	}
	ret = devm_request_irq(dev, pdev->irq, mid_pwr_irq_handler,
			       IRQF_NO_SUSPEND, pci_name(pdev), pwr);
	if (ret)
		return ret;
	pwr->available = true;
	midpwr = pwr;
	pci_set_drvdata(pdev, pwr);
	return 0;
}
static int mid_set_initial_state(struct mid_pwr *pwr, const u32 *states)
{
	unsigned int i, j;
	int ret;
	/*
	 * Enable wake events.
	 *
	 * PWRMU supports up to 32 sources for wake up the system. Ungate them
	 * all here.
	 */
	mid_pwr_set_wake(pwr, 0, 0xffffffff);
	mid_pwr_set_wake(pwr, 1, 0xffffffff);
	/*
	 * Power off South Complex devices.
	 *
	 * There is a map (see a note below) of 64 devices with 2 bits per each
	 * on 32-bit HW registers. The following calls set all devices to one
	 * known initial state, i.e. PCI_D3hot. This is done in conjunction
	 * with PMCSR setting in arch/x86/pci/intel_mid_pci.c.
	 *
	 * NOTE: The actual device mapping is provided by a platform at run
	 * time using vendor capability of PCI configuration space.
	 */
	mid_pwr_set_state(pwr, 0, states[0]);
	mid_pwr_set_state(pwr, 1, states[1]);
	mid_pwr_set_state(pwr, 2, states[2]);
	mid_pwr_set_state(pwr, 3, states[3]);
	/* Send command to SCU */
	ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG);
	if (ret)
		return ret;
	for (i = 0; i < LSS_MAX_DEVS; i++) {
		for (j = 0; j < LSS_MAX_SHARED_DEVS; j++)
			pwr->lss[i][j].state = PCI_D3hot;
	}
	return 0;
}
static int pnw_set_initial_state(struct mid_pwr *pwr)
{
	/* On Penwell SRAM must stay powered on */
	static const u32 states[] = {
		0xf00fffff,		/* PM_SSC(0) */
		0xffffffff,		/* PM_SSC(1) */
		0xffffffff,		/* PM_SSC(2) */
		0xffffffff,		/* PM_SSC(3) */
	};
	return mid_set_initial_state(pwr, states);
}
static int tng_set_initial_state(struct mid_pwr *pwr)
{
	static const u32 states[] = {
		0xffffffff,		/* PM_SSC(0) */
		0xffffffff,		/* PM_SSC(1) */
		0xffffffff,		/* PM_SSC(2) */
		0xffffffff,		/* PM_SSC(3) */
	};
	return mid_set_initial_state(pwr, states);
}
static const struct mid_pwr_device_info pnw_info = {
	.set_initial_state = pnw_set_initial_state,
};
static const struct mid_pwr_device_info tng_info = {
	.set_initial_state = tng_set_initial_state,
};
/* This table should be in sync with the one in drivers/pci/pci-mid.c */
static const struct pci_device_id mid_pwr_pci_ids[] = {
	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL), (kernel_ulong_t)&pnw_info },
	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER), (kernel_ulong_t)&tng_info },
	{}
};
static struct pci_driver mid_pwr_pci_driver = {
	.name		= "intel_mid_pwr",
	.probe		= mid_pwr_probe,
	.id_table	= mid_pwr_pci_ids,
};
builtin_pci_driver(mid_pwr_pci_driver);
 | 
	linux-master | 
	arch/x86/platform/intel-mid/pwr.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Intel MID platform setup code
 *
 * (C) Copyright 2008, 2012, 2021 Intel Corporation
 * Author: Jacob Pan ([email protected])
 * Author: Sathyanarayanan Kuppuswamy <[email protected]>
 */
#define pr_fmt(fmt) "intel_mid: " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/regulator/machine.h>
#include <linux/scatterlist.h>
#include <linux/irq.h>
#include <linux/export.h>
#include <linux/notifier.h>
#include <asm/setup.h>
#include <asm/mpspec_def.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/intel-mid.h>
#include <asm/io.h>
#include <asm/i8259.h>
#include <asm/intel_scu_ipc.h>
#include <asm/reboot.h>
#define IPCMSG_COLD_OFF		0x80	/* Only for Tangier */
#define IPCMSG_COLD_RESET	0xF1
static void intel_mid_power_off(void)
{
	/* Shut down South Complex via PWRMU */
	intel_mid_pwr_power_off();
	/* Only for Tangier, the rest will ignore this command */
	intel_scu_ipc_dev_simple_command(NULL, IPCMSG_COLD_OFF, 1);
};
static void intel_mid_reboot(void)
{
	intel_scu_ipc_dev_simple_command(NULL, IPCMSG_COLD_RESET, 0);
}
static void __init intel_mid_time_init(void)
{
	/* Lapic only, no apbt */
	x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
	x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
}
static void intel_mid_arch_setup(void)
{
	switch (boot_cpu_data.x86_model) {
	case 0x3C:
	case 0x4A:
		x86_platform.legacy.rtc = 1;
		break;
	default:
		break;
	}
	/*
	 * Intel MID platforms are using explicitly defined regulators.
	 *
	 * Let the regulator core know that we do not have any additional
	 * regulators left. This lets it substitute unprovided regulators with
	 * dummy ones:
	 */
	regulator_has_full_constraints();
}
/*
 * Moorestown does not have external NMI source nor port 0x61 to report
 * NMI status. The possible NMI sources are from pmu as a result of NMI
 * watchdog or lock debug. Reading io port 0x61 results in 0xff which
 * misled NMI handler.
 */
static unsigned char intel_mid_get_nmi_reason(void)
{
	return 0;
}
/*
 * Moorestown specific x86_init function overrides and early setup
 * calls.
 */
void __init x86_intel_mid_early_setup(void)
{
	x86_init.resources.probe_roms = x86_init_noop;
	x86_init.resources.reserve_resources = x86_init_noop;
	x86_init.timers.timer_init = intel_mid_time_init;
	x86_init.timers.setup_percpu_clockev = x86_init_noop;
	x86_init.irqs.pre_vector_init = x86_init_noop;
	x86_init.oem.arch_setup = intel_mid_arch_setup;
	x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
	x86_init.pci.arch_init = intel_mid_pci_init;
	x86_init.pci.fixup_irqs = x86_init_noop;
	legacy_pic = &null_legacy_pic;
	/*
	 * Do nothing for now as everything needed done in
	 * x86_intel_mid_early_setup() below.
	 */
	x86_init.acpi.reduced_hw_early_init = x86_init_noop;
	pm_power_off = intel_mid_power_off;
	machine_ops.emergency_restart  = intel_mid_reboot;
	/* Avoid searching for BIOS MP tables */
	x86_init.mpparse.find_smp_config = x86_init_noop;
	x86_init.mpparse.get_smp_config = x86_init_uint_noop;
	set_bit(MP_BUS_ISA, mp_bus_not_pci);
}
 | 
	linux-master | 
	arch/x86/platform/intel-mid/intel-mid.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
#include <xen/hvc-console.h>
#include <asm/io_apic.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
#include <asm/x86_init.h>
#include <asm/xen/interface.h>
#include <xen/xen.h>
#include <xen/interface/hvm/start_info.h>
/*
 * PVH variables.
 *
 * pvh_bootparams and pvh_start_info need to live in a data segment since
 * they are used after startup_{32|64}, which clear .bss, are invoked.
 */
struct boot_params __initdata pvh_bootparams;
struct hvm_start_info __initdata pvh_start_info;
const unsigned int __initconst pvh_start_info_sz = sizeof(pvh_start_info);
static u64 __init pvh_get_root_pointer(void)
{
	return pvh_start_info.rsdp_paddr;
}
/*
 * Xen guests are able to obtain the memory map from the hypervisor via the
 * HYPERVISOR_memory_op hypercall.
 * If we are trying to boot a Xen PVH guest, it is expected that the kernel
 * will have been configured to provide an override for this routine to do
 * just that.
 */
void __init __weak mem_map_via_hcall(struct boot_params *ptr __maybe_unused)
{
	xen_raw_printk("Error: Could not find memory map\n");
	BUG();
}
static void __init init_pvh_bootparams(bool xen_guest)
{
	if ((pvh_start_info.version > 0) && (pvh_start_info.memmap_entries)) {
		struct hvm_memmap_table_entry *ep;
		int i;
		ep = __va(pvh_start_info.memmap_paddr);
		pvh_bootparams.e820_entries = pvh_start_info.memmap_entries;
		for (i = 0; i < pvh_bootparams.e820_entries ; i++, ep++) {
			pvh_bootparams.e820_table[i].addr = ep->addr;
			pvh_bootparams.e820_table[i].size = ep->size;
			pvh_bootparams.e820_table[i].type = ep->type;
		}
	} else if (xen_guest) {
		mem_map_via_hcall(&pvh_bootparams);
	} else {
		/* Non-xen guests are not supported by version 0 */
		BUG();
	}
	if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
		pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr =
			ISA_START_ADDRESS;
		pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size =
			ISA_END_ADDRESS - ISA_START_ADDRESS;
		pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type =
			E820_TYPE_RESERVED;
		pvh_bootparams.e820_entries++;
	} else
		xen_raw_printk("Warning: Can fit ISA range into e820\n");
	pvh_bootparams.hdr.cmd_line_ptr =
		pvh_start_info.cmdline_paddr;
	/* The first module is always ramdisk. */
	if (pvh_start_info.nr_modules) {
		struct hvm_modlist_entry *modaddr =
			__va(pvh_start_info.modlist_paddr);
		pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
		pvh_bootparams.hdr.ramdisk_size = modaddr->size;
	}
	/*
	 * See Documentation/arch/x86/boot.rst.
	 *
	 * Version 2.12 supports Xen entry point but we will use default x86/PC
	 * environment (i.e. hardware_subarch 0).
	 */
	pvh_bootparams.hdr.version = (2 << 8) | 12;
	pvh_bootparams.hdr.type_of_loader = ((xen_guest ? 0x9 : 0xb) << 4) | 0;
	x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
}
/*
 * If we are trying to boot a Xen PVH guest, it is expected that the kernel
 * will have been configured to provide the required override for this routine.
 */
void __init __weak xen_pvh_init(struct boot_params *boot_params)
{
	xen_raw_printk("Error: Missing xen PVH initialization\n");
	BUG();
}
static void __init hypervisor_specific_init(bool xen_guest)
{
	if (xen_guest)
		xen_pvh_init(&pvh_bootparams);
}
/*
 * This routine (and those that it might call) should not use
 * anything that lives in .bss since that segment will be cleared later.
 */
void __init xen_prepare_pvh(void)
{
	u32 msr = xen_cpuid_base();
	bool xen_guest = !!msr;
	if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
		xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
				pvh_start_info.magic);
		BUG();
	}
	memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
	hypervisor_specific_init(xen_guest);
	init_pvh_bootparams(xen_guest);
}
 | 
	linux-master | 
	arch/x86/platform/pvh/enlighten.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * BIOS run time interface routines.
 *
 * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
 * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
 * Copyright (c) Russ Anderson <[email protected]>
 */
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/efi.h>
#include <linux/io.h>
#include <asm/pgalloc.h>
#include <asm/uv/bios.h>
#include <asm/uv/uv_hub.h>
unsigned long uv_systab_phys __ro_after_init = EFI_INVALID_TABLE_ADDR;
struct uv_systab *uv_systab;
static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
			u64 a4, u64 a5)
{
	struct uv_systab *tab = uv_systab;
	s64 ret;
	if (!tab || !tab->function)
		/*
		 * BIOS does not support UV systab
		 */
		return BIOS_STATUS_UNIMPLEMENTED;
	ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
	return ret;
}
static s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4,
		u64 a5)
{
	s64 ret;
	if (down_interruptible(&__efi_uv_runtime_lock))
		return BIOS_STATUS_ABORT;
	ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
	up(&__efi_uv_runtime_lock);
	return ret;
}
static s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
		u64 a4, u64 a5)
{
	unsigned long bios_flags;
	s64 ret;
	if (down_interruptible(&__efi_uv_runtime_lock))
		return BIOS_STATUS_ABORT;
	local_irq_save(bios_flags);
	ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
	local_irq_restore(bios_flags);
	up(&__efi_uv_runtime_lock);
	return ret;
}
long sn_partition_id;
EXPORT_SYMBOL_GPL(sn_partition_id);
long sn_coherency_id;
EXPORT_SYMBOL_GPL(sn_coherency_id);
long sn_region_size;
EXPORT_SYMBOL_GPL(sn_region_size);
long system_serial_number;
int uv_type;
s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
		long *region, long *ssn)
{
	s64 ret;
	u64 v0, v1;
	union partition_info_u part;
	ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc,
				(u64)(&v0), (u64)(&v1), 0, 0);
	if (ret != BIOS_STATUS_SUCCESS)
		return ret;
	part.val = v0;
	if (uvtype)
		*uvtype = part.hub_version;
	if (partid)
		*partid = part.partition_id;
	if (coher)
		*coher = part.coherence_id;
	if (region)
		*region = part.region_size;
	if (ssn)
		*ssn = v1;
	return ret;
}
int
uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
			   unsigned long *intr_mmr_offset)
{
	u64 watchlist;
	s64 ret;
	/*
	 * bios returns watchlist number or negative error number.
	 */
	ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
			mq_size, (u64)intr_mmr_offset,
			(u64)&watchlist, 0);
	if (ret < BIOS_STATUS_SUCCESS)
		return ret;
	return watchlist;
}
EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_alloc);
int
uv_bios_mq_watchlist_free(int blade, int watchlist_num)
{
	return (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_FREE,
				blade, watchlist_num, 0, 0, 0);
}
EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_free);
s64
uv_bios_change_memprotect(u64 paddr, u64 len, enum uv_memprotect perms)
{
	return uv_bios_call_irqsave(UV_BIOS_MEMPROTECT, paddr, len,
					perms, 0, 0);
}
EXPORT_SYMBOL_GPL(uv_bios_change_memprotect);
s64
uv_bios_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
{
	return uv_bios_call_irqsave(UV_BIOS_GET_PARTITION_ADDR, (u64)cookie,
				    (u64)addr, buf, (u64)len, 0);
}
EXPORT_SYMBOL_GPL(uv_bios_reserved_page_pa);
s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
{
	return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
			   (u64)ticks_per_second, 0, 0, 0);
}
/*
 * uv_bios_set_legacy_vga_target - Set Legacy VGA I/O Target
 * @decode: true to enable target, false to disable target
 * @domain: PCI domain number
 * @bus: PCI bus number
 *
 * Returns:
 *    0: Success
 *    -EINVAL: Invalid domain or bus number
 *    -ENOSYS: Capability not available
 *    -EBUSY: Legacy VGA I/O cannot be retargeted at this time
 */
int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus)
{
	return uv_bios_call(UV_BIOS_SET_LEGACY_VGA_TARGET,
				(u64)decode, (u64)domain, (u64)bus, 0, 0);
}
extern s64 uv_bios_get_master_nasid(u64 size, u64 *master_nasid)
{
	return uv_bios_call(UV_BIOS_EXTRA, 0, UV_BIOS_EXTRA_MASTER_NASID, 0,
				size, (u64)master_nasid);
}
EXPORT_SYMBOL_GPL(uv_bios_get_master_nasid);
extern s64 uv_bios_get_heapsize(u64 nasid, u64 size, u64 *heap_size)
{
	return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_GET_HEAPSIZE,
				0, size, (u64)heap_size);
}
EXPORT_SYMBOL_GPL(uv_bios_get_heapsize);
extern s64 uv_bios_install_heap(u64 nasid, u64 heap_size, u64 *bios_heap)
{
	return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_INSTALL_HEAP,
				0, heap_size, (u64)bios_heap);
}
EXPORT_SYMBOL_GPL(uv_bios_install_heap);
extern s64 uv_bios_obj_count(u64 nasid, u64 size, u64 *objcnt)
{
	return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_OBJECT_COUNT,
				0, size, (u64)objcnt);
}
EXPORT_SYMBOL_GPL(uv_bios_obj_count);
extern s64 uv_bios_enum_objs(u64 nasid, u64 size, u64 *objbuf)
{
	return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_ENUM_OBJECTS,
				0, size, (u64)objbuf);
}
EXPORT_SYMBOL_GPL(uv_bios_enum_objs);
extern s64 uv_bios_enum_ports(u64 nasid, u64 obj_id, u64 size, u64 *portbuf)
{
	return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_ENUM_PORTS,
				obj_id, size, (u64)portbuf);
}
EXPORT_SYMBOL_GPL(uv_bios_enum_ports);
extern s64 uv_bios_get_geoinfo(u64 nasid, u64 size, u64 *buf)
{
	return uv_bios_call(UV_BIOS_GET_GEOINFO, nasid, (u64)buf, size, 0, 0);
}
EXPORT_SYMBOL_GPL(uv_bios_get_geoinfo);
extern s64 uv_bios_get_pci_topology(u64 size, u64 *buf)
{
	return uv_bios_call(UV_BIOS_GET_PCI_TOPOLOGY, (u64)buf, size, 0, 0, 0);
}
EXPORT_SYMBOL_GPL(uv_bios_get_pci_topology);
unsigned long get_uv_systab_phys(bool msg)
{
	if ((uv_systab_phys == EFI_INVALID_TABLE_ADDR) ||
	    !uv_systab_phys || efi_runtime_disabled()) {
		if (msg)
			pr_crit("UV: UVsystab: missing\n");
		return 0;
	}
	return uv_systab_phys;
}
int uv_bios_init(void)
{
	unsigned long uv_systab_phys_addr;
	uv_systab = NULL;
	uv_systab_phys_addr = get_uv_systab_phys(1);
	if (!uv_systab_phys_addr)
		return -EEXIST;
	uv_systab = ioremap(uv_systab_phys_addr, sizeof(struct uv_systab));
	if (!uv_systab || strncmp(uv_systab->signature, UV_SYSTAB_SIG, 4)) {
		pr_err("UV: UVsystab: bad signature!\n");
		iounmap(uv_systab);
		return -EINVAL;
	}
	/* Starting with UV4 the UV systab size is variable */
	if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) {
		int size = uv_systab->size;
		iounmap(uv_systab);
		uv_systab = ioremap(uv_systab_phys_addr, size);
		if (!uv_systab) {
			pr_err("UV: UVsystab: ioremap(%d) failed!\n", size);
			return -EFAULT;
		}
	}
	pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
	return 0;
}
 | 
	linux-master | 
	arch/x86/platform/uv/bios_uv.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * SGI NMI support routines
 *
 * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
 * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
 * Copyright (c) Mike Travis
 */
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/kdb.h>
#include <linux/kexec.h>
#include <linux/kgdb.h>
#include <linux/moduleparam.h>
#include <linux/nmi.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/slab.h>
#include <linux/clocksource.h>
#include <asm/apic.h>
#include <asm/current.h>
#include <asm/kdebug.h>
#include <asm/local64.h>
#include <asm/nmi.h>
#include <asm/reboot.h>
#include <asm/traps.h>
#include <asm/uv/uv.h>
#include <asm/uv/uv_hub.h>
#include <asm/uv/uv_mmrs.h>
/*
 * UV handler for NMI
 *
 * Handle system-wide NMI events generated by the global 'power nmi' command.
 *
 * Basic operation is to field the NMI interrupt on each CPU and wait
 * until all CPU's have arrived into the nmi handler.  If some CPU's do not
 * make it into the handler, try and force them in with the IPI(NMI) signal.
 *
 * We also have to lessen UV Hub MMR accesses as much as possible as this
 * disrupts the UV Hub's primary mission of directing NumaLink traffic and
 * can cause system problems to occur.
 *
 * To do this we register our primary NMI notifier on the NMI_UNKNOWN
 * chain.  This reduces the number of false NMI calls when the perf
 * tools are running which generate an enormous number of NMIs per
 * second (~4M/s for 1024 CPU threads).  Our secondary NMI handler is
 * very short as it only checks that if it has been "pinged" with the
 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
 *
 */
static struct uv_hub_nmi_s **uv_hub_nmi_list;
DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
/* Newer SMM NMI handler, not present in all systems */
static unsigned long uvh_nmi_mmrx;		/* UVH_EVENT_OCCURRED0/1 */
static unsigned long uvh_nmi_mmrx_clear;	/* UVH_EVENT_OCCURRED0/1_ALIAS */
static int uvh_nmi_mmrx_shift;			/* UVH_EVENT_OCCURRED0/1_EXTIO_INT0_SHFT */
static char *uvh_nmi_mmrx_type;			/* "EXTIO_INT0" */
/* Non-zero indicates newer SMM NMI handler present */
static unsigned long uvh_nmi_mmrx_supported;	/* UVH_EXTIO_INT0_BROADCAST */
/* Indicates to BIOS that we want to use the newer SMM NMI handler */
static unsigned long uvh_nmi_mmrx_req;		/* UVH_BIOS_KERNEL_MMR_ALIAS_2 */
static int uvh_nmi_mmrx_req_shift;		/* 62 */
/* UV hubless values */
#define NMI_CONTROL_PORT	0x70
#define NMI_DUMMY_PORT		0x71
#define PAD_OWN_GPP_D_0		0x2c
#define GPI_NMI_STS_GPP_D_0	0x164
#define GPI_NMI_ENA_GPP_D_0	0x174
#define STS_GPP_D_0_MASK	0x1
#define PAD_CFG_DW0_GPP_D_0	0x4c0
#define GPIROUTNMI		(1ul << 17)
#define PCH_PCR_GPIO_1_BASE	0xfdae0000ul
#define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset))
static u64 *pch_base;
static unsigned long nmi_mmr;
static unsigned long nmi_mmr_clear;
static unsigned long nmi_mmr_pending;
static atomic_t	uv_in_nmi;
static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
static atomic_t uv_nmi_slave_continue;
static cpumask_var_t uv_nmi_cpu_mask;
static atomic_t uv_nmi_kexec_failed;
/* Values for uv_nmi_slave_continue */
#define SLAVE_CLEAR	0
#define SLAVE_CONTINUE	1
#define SLAVE_EXIT	2
/*
 * Default is all stack dumps go to the console and buffer.
 * Lower level to send to log buffer only.
 */
static int uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);
/*
 * The following values show statistics on how perf events are affecting
 * this system.
 */
static int param_get_local64(char *buffer, const struct kernel_param *kp)
{
	return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
}
static int param_set_local64(const char *val, const struct kernel_param *kp)
{
	/* Clear on any write */
	local64_set((local64_t *)kp->arg, 0);
	return 0;
}
static const struct kernel_param_ops param_ops_local64 = {
	.get = param_get_local64,
	.set = param_set_local64,
};
#define param_check_local64(name, p) __param_check(name, p, local64_t)
static local64_t uv_nmi_count;
module_param_named(nmi_count, uv_nmi_count, local64, 0644);
static local64_t uv_nmi_misses;
module_param_named(nmi_misses, uv_nmi_misses, local64, 0644);
static local64_t uv_nmi_ping_count;
module_param_named(ping_count, uv_nmi_ping_count, local64, 0644);
static local64_t uv_nmi_ping_misses;
module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644);
/*
 * Following values allow tuning for large systems under heavy loading
 */
static int uv_nmi_initial_delay = 100;
module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
static int uv_nmi_slave_delay = 100;
module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644);
static int uv_nmi_loop_delay = 100;
module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644);
static int uv_nmi_trigger_delay = 10000;
module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644);
static int uv_nmi_wait_count = 100;
module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
static int uv_nmi_retry_count = 500;
module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
static bool uv_pch_intr_enable = true;
static bool uv_pch_intr_now_enabled;
module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644);
static bool uv_pch_init_enable = true;
module_param_named(pch_init_enable, uv_pch_init_enable, bool, 0644);
static int uv_nmi_debug;
module_param_named(debug, uv_nmi_debug, int, 0644);
#define nmi_debug(fmt, ...)				\
	do {						\
		if (uv_nmi_debug)			\
			pr_info(fmt, ##__VA_ARGS__);	\
	} while (0)
/* Valid NMI Actions */
#define	ACTION_LEN	16
static struct nmi_action {
	char	*action;
	char	*desc;
} valid_acts[] = {
	{	"kdump",	"do kernel crash dump"			},
	{	"dump",		"dump process stack for each cpu"	},
	{	"ips",		"dump Inst Ptr info for each cpu"	},
	{	"kdb",		"enter KDB (needs kgdboc= assignment)"	},
	{	"kgdb",		"enter KGDB (needs gdb target remote)"	},
	{	"health",	"check if CPUs respond to NMI"		},
};
typedef char action_t[ACTION_LEN];
static action_t uv_nmi_action = { "dump" };
static int param_get_action(char *buffer, const struct kernel_param *kp)
{
	return sprintf(buffer, "%s\n", uv_nmi_action);
}
static int param_set_action(const char *val, const struct kernel_param *kp)
{
	int i;
	int n = ARRAY_SIZE(valid_acts);
	char arg[ACTION_LEN];
	/* (remove possible '\n') */
	strscpy(arg, val, strnchrnul(val, sizeof(arg)-1, '\n') - val + 1);
	for (i = 0; i < n; i++)
		if (!strcmp(arg, valid_acts[i].action))
			break;
	if (i < n) {
		strscpy(uv_nmi_action, arg, sizeof(uv_nmi_action));
		pr_info("UV: New NMI action:%s\n", uv_nmi_action);
		return 0;
	}
	pr_err("UV: Invalid NMI action:%s, valid actions are:\n", arg);
	for (i = 0; i < n; i++)
		pr_err("UV: %-8s - %s\n",
			valid_acts[i].action, valid_acts[i].desc);
	return -EINVAL;
}
static const struct kernel_param_ops param_ops_action = {
	.get = param_get_action,
	.set = param_set_action,
};
#define param_check_action(name, p) __param_check(name, p, action_t)
module_param_named(action, uv_nmi_action, action, 0644);
static inline bool uv_nmi_action_is(const char *action)
{
	return (strncmp(uv_nmi_action, action, strlen(action)) == 0);
}
/* Setup which NMI support is present in system */
static void uv_nmi_setup_mmrs(void)
{
	bool new_nmi_method_only = false;
	/* First determine arch specific MMRs to handshake with BIOS */
	if (UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK) {	/* UV2,3,4 setup */
		uvh_nmi_mmrx = UVH_EVENT_OCCURRED0;
		uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED0_ALIAS;
		uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT;
		uvh_nmi_mmrx_type = "OCRD0-EXTIO_INT0";
		uvh_nmi_mmrx_supported = UVH_EXTIO_INT0_BROADCAST;
		uvh_nmi_mmrx_req = UVH_BIOS_KERNEL_MMR_ALIAS_2;
		uvh_nmi_mmrx_req_shift = 62;
	} else if (UVH_EVENT_OCCURRED1_EXTIO_INT0_MASK) { /* UV5+ setup */
		uvh_nmi_mmrx = UVH_EVENT_OCCURRED1;
		uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED1_ALIAS;
		uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED1_EXTIO_INT0_SHFT;
		uvh_nmi_mmrx_type = "OCRD1-EXTIO_INT0";
		new_nmi_method_only = true;		/* Newer nmi always valid on UV5+ */
		uvh_nmi_mmrx_req = 0;			/* no request bit to clear */
	} else {
		pr_err("UV:%s:NMI support not available on this system\n", __func__);
		return;
	}
	/* Then find out if new NMI is supported */
	if (new_nmi_method_only || uv_read_local_mmr(uvh_nmi_mmrx_supported)) {
		if (uvh_nmi_mmrx_req)
			uv_write_local_mmr(uvh_nmi_mmrx_req,
						1UL << uvh_nmi_mmrx_req_shift);
		nmi_mmr = uvh_nmi_mmrx;
		nmi_mmr_clear = uvh_nmi_mmrx_clear;
		nmi_mmr_pending = 1UL << uvh_nmi_mmrx_shift;
		pr_info("UV: SMI NMI support: %s\n", uvh_nmi_mmrx_type);
	} else {
		nmi_mmr = UVH_NMI_MMR;
		nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
		nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
		pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE);
	}
}
/* Read NMI MMR and check if NMI flag was set by BMC. */
static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
{
	hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr);
	atomic_inc(&hub_nmi->read_mmr_count);
	return !!(hub_nmi->nmi_value & nmi_mmr_pending);
}
static inline void uv_local_mmr_clear_nmi(void)
{
	uv_write_local_mmr(nmi_mmr_clear, nmi_mmr_pending);
}
/*
 * UV hubless NMI handler functions
 */
static inline void uv_reassert_nmi(void)
{
	/* (from arch/x86/include/asm/mach_traps.h) */
	outb(0x8f, NMI_CONTROL_PORT);
	inb(NMI_DUMMY_PORT);		/* dummy read */
	outb(0x0f, NMI_CONTROL_PORT);
	inb(NMI_DUMMY_PORT);		/* dummy read */
}
static void uv_init_hubless_pch_io(int offset, int mask, int data)
{
	int *addr = PCH_PCR_GPIO_ADDRESS(offset);
	int readd = readl(addr);
	if (mask) {			/* OR in new data */
		int writed = (readd & ~mask) | data;
		nmi_debug("UV:PCH: %p = %x & %x | %x (%x)\n",
			addr, readd, ~mask, data, writed);
		writel(writed, addr);
	} else if (readd & data) {	/* clear status bit */
		nmi_debug("UV:PCH: %p = %x\n", addr, data);
		writel(data, addr);
	}
	(void)readl(addr);		/* flush write data */
}
static void uv_nmi_setup_hubless_intr(void)
{
	uv_pch_intr_now_enabled = uv_pch_intr_enable;
	uv_init_hubless_pch_io(
		PAD_CFG_DW0_GPP_D_0, GPIROUTNMI,
		uv_pch_intr_now_enabled ? GPIROUTNMI : 0);
	nmi_debug("UV:NMI: GPP_D_0 interrupt %s\n",
		uv_pch_intr_now_enabled ? "enabled" : "disabled");
}
static struct init_nmi {
	unsigned int	offset;
	unsigned int	mask;
	unsigned int	data;
} init_nmi[] = {
	{	/* HOSTSW_OWN_GPP_D_0 */
	.offset = 0x84,
	.mask = 0x1,
	.data = 0x0,	/* ACPI Mode */
	},
/* Clear status: */
	{	/* GPI_INT_STS_GPP_D_0 */
	.offset = 0x104,
	.mask = 0x0,
	.data = 0x1,	/* Clear Status */
	},
	{	/* GPI_GPE_STS_GPP_D_0 */
	.offset = 0x124,
	.mask = 0x0,
	.data = 0x1,	/* Clear Status */
	},
	{	/* GPI_SMI_STS_GPP_D_0 */
	.offset = 0x144,
	.mask = 0x0,
	.data = 0x1,	/* Clear Status */
	},
	{	/* GPI_NMI_STS_GPP_D_0 */
	.offset = 0x164,
	.mask = 0x0,
	.data = 0x1,	/* Clear Status */
	},
/* Disable interrupts: */
	{	/* GPI_INT_EN_GPP_D_0 */
	.offset = 0x114,
	.mask = 0x1,
	.data = 0x0,	/* Disable interrupt generation */
	},
	{	/* GPI_GPE_EN_GPP_D_0 */
	.offset = 0x134,
	.mask = 0x1,
	.data = 0x0,	/* Disable interrupt generation */
	},
	{	/* GPI_SMI_EN_GPP_D_0 */
	.offset = 0x154,
	.mask = 0x1,
	.data = 0x0,	/* Disable interrupt generation */
	},
	{	/* GPI_NMI_EN_GPP_D_0 */
	.offset = 0x174,
	.mask = 0x1,
	.data = 0x0,	/* Disable interrupt generation */
	},
/* Setup GPP_D_0 Pad Config: */
	{	/* PAD_CFG_DW0_GPP_D_0 */
	.offset = 0x4c0,
	.mask = 0xffffffff,
	.data = 0x82020100,
/*
 *  31:30 Pad Reset Config (PADRSTCFG): = 2h  # PLTRST# (default)
 *
 *  29    RX Pad State Select (RXPADSTSEL): = 0 # Raw RX pad state directly
 *                                                from RX buffer (default)
 *
 *  28    RX Raw Override to '1' (RXRAW1): = 0 # No Override
 *
 *  26:25 RX Level/Edge Configuration (RXEVCFG):
 *      = 0h # Level
 *      = 1h # Edge
 *
 *  23    RX Invert (RXINV): = 0 # No Inversion (signal active high)
 *
 *  20    GPIO Input Route IOxAPIC (GPIROUTIOXAPIC):
 * = 0 # Routing does not cause peripheral IRQ...
 *     # (we want an NMI not an IRQ)
 *
 *  19    GPIO Input Route SCI (GPIROUTSCI): = 0 # Routing does not cause SCI.
 *  18    GPIO Input Route SMI (GPIROUTSMI): = 0 # Routing does not cause SMI.
 *  17    GPIO Input Route NMI (GPIROUTNMI): = 1 # Routing can cause NMI.
 *
 *  11:10 Pad Mode (PMODE1/0): = 0h = GPIO control the Pad.
 *   9    GPIO RX Disable (GPIORXDIS):
 * = 0 # Enable the input buffer (active low enable)
 *
 *   8    GPIO TX Disable (GPIOTXDIS):
 * = 1 # Disable the output buffer; i.e. Hi-Z
 *
 *   1 GPIO RX State (GPIORXSTATE): This is the current internal RX pad state..
 *   0 GPIO TX State (GPIOTXSTATE):
 * = 0 # (Leave at default)
 */
	},
/* Pad Config DW1 */
	{	/* PAD_CFG_DW1_GPP_D_0 */
	.offset = 0x4c4,
	.mask = 0x3c00,
	.data = 0,	/* Termination = none (default) */
	},
};
static void uv_init_hubless_pch_d0(void)
{
	int i, read;
	read = *PCH_PCR_GPIO_ADDRESS(PAD_OWN_GPP_D_0);
	if (read != 0) {
		pr_info("UV: Hubless NMI already configured\n");
		return;
	}
	nmi_debug("UV: Initializing UV Hubless NMI on PCH\n");
	for (i = 0; i < ARRAY_SIZE(init_nmi); i++) {
		uv_init_hubless_pch_io(init_nmi[i].offset,
					init_nmi[i].mask,
					init_nmi[i].data);
	}
}
static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
{
	int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0);
	int status = *pstat;
	hub_nmi->nmi_value = status;
	atomic_inc(&hub_nmi->read_mmr_count);
	if (!(status & STS_GPP_D_0_MASK))	/* Not a UV external NMI */
		return 0;
	*pstat = STS_GPP_D_0_MASK;	/* Is a UV NMI: clear GPP_D_0 status */
	(void)*pstat;			/* Flush write */
	return 1;
}
static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
{
	if (hub_nmi->hub_present)
		return uv_nmi_test_mmr(hub_nmi);
	if (hub_nmi->pch_owner)		/* Only PCH owner can check status */
		return uv_nmi_test_hubless(hub_nmi);
	return -1;
}
/*
 * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
 * return true.  If first CPU in on the system, set global "in_nmi" flag.
 */
static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
{
	int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
	if (first) {
		atomic_set(&hub_nmi->cpu_owner, cpu);
		if (atomic_add_unless(&uv_in_nmi, 1, 1))
			atomic_set(&uv_nmi_cpu, cpu);
		atomic_inc(&hub_nmi->nmi_count);
	}
	return first;
}
/* Check if this is a system NMI event */
static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
{
	int cpu = smp_processor_id();
	int nmi = 0;
	int nmi_detected = 0;
	local64_inc(&uv_nmi_count);
	this_cpu_inc(uv_cpu_nmi.queries);
	do {
		nmi = atomic_read(&hub_nmi->in_nmi);
		if (nmi)
			break;
		if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
			nmi_detected = uv_test_nmi(hub_nmi);
			/* Check flag for UV external NMI */
			if (nmi_detected > 0) {
				uv_set_in_nmi(cpu, hub_nmi);
				nmi = 1;
				break;
			}
			/* A non-PCH node in a hubless system waits for NMI */
			else if (nmi_detected < 0)
				goto slave_wait;
			/* MMR/PCH NMI flag is clear */
			raw_spin_unlock(&hub_nmi->nmi_lock);
		} else {
			/* Wait a moment for the HUB NMI locker to set flag */
slave_wait:		cpu_relax();
			udelay(uv_nmi_slave_delay);
			/* Re-check hub in_nmi flag */
			nmi = atomic_read(&hub_nmi->in_nmi);
			if (nmi)
				break;
		}
		/*
		 * Check if this BMC missed setting the MMR NMI flag (or)
		 * UV hubless system where only PCH owner can check flag
		 */
		if (!nmi) {
			nmi = atomic_read(&uv_in_nmi);
			if (nmi)
				uv_set_in_nmi(cpu, hub_nmi);
		}
		/* If we're holding the hub lock, release it now */
		if (nmi_detected < 0)
			raw_spin_unlock(&hub_nmi->nmi_lock);
	} while (0);
	if (!nmi)
		local64_inc(&uv_nmi_misses);
	return nmi;
}
/* Need to reset the NMI MMR register, but only once per hub. */
static inline void uv_clear_nmi(int cpu)
{
	struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
	if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
		atomic_set(&hub_nmi->cpu_owner, -1);
		atomic_set(&hub_nmi->in_nmi, 0);
		if (hub_nmi->hub_present)
			uv_local_mmr_clear_nmi();
		else
			uv_reassert_nmi();
		raw_spin_unlock(&hub_nmi->nmi_lock);
	}
}
/* Ping non-responding CPU's attempting to force them into the NMI handler */
static void uv_nmi_nr_cpus_ping(void)
{
	int cpu;
	for_each_cpu(cpu, uv_nmi_cpu_mask)
		uv_cpu_nmi_per(cpu).pinging = 1;
	__apic_send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
}
/* Clean up flags for CPU's that ignored both NMI and ping */
static void uv_nmi_cleanup_mask(void)
{
	int cpu;
	for_each_cpu(cpu, uv_nmi_cpu_mask) {
		uv_cpu_nmi_per(cpu).pinging =  0;
		uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
		cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
	}
}
/* Loop waiting as CPU's enter NMI handler */
static int uv_nmi_wait_cpus(int first)
{
	int i, j, k, n = num_online_cpus();
	int last_k = 0, waiting = 0;
	int cpu = smp_processor_id();
	if (first) {
		cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
		k = 0;
	} else {
		k = n - cpumask_weight(uv_nmi_cpu_mask);
	}
	/* PCH NMI causes only one CPU to respond */
	if (first && uv_pch_intr_now_enabled) {
		cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
		return n - k - 1;
	}
	udelay(uv_nmi_initial_delay);
	for (i = 0; i < uv_nmi_retry_count; i++) {
		int loop_delay = uv_nmi_loop_delay;
		for_each_cpu(j, uv_nmi_cpu_mask) {
			if (uv_cpu_nmi_per(j).state) {
				cpumask_clear_cpu(j, uv_nmi_cpu_mask);
				if (++k >= n)
					break;
			}
		}
		if (k >= n) {		/* all in? */
			k = n;
			break;
		}
		if (last_k != k) {	/* abort if no new CPU's coming in */
			last_k = k;
			waiting = 0;
		} else if (++waiting > uv_nmi_wait_count)
			break;
		/* Extend delay if waiting only for CPU 0: */
		if (waiting && (n - k) == 1 &&
		    cpumask_test_cpu(0, uv_nmi_cpu_mask))
			loop_delay *= 100;
		udelay(loop_delay);
	}
	atomic_set(&uv_nmi_cpus_in_nmi, k);
	return n - k;
}
/* Wait until all slave CPU's have entered UV NMI handler */
static void uv_nmi_wait(int master)
{
	/* Indicate this CPU is in: */
	this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
	/* If not the first CPU in (the master), then we are a slave CPU */
	if (!master)
		return;
	do {
		/* Wait for all other CPU's to gather here */
		if (!uv_nmi_wait_cpus(1))
			break;
		/* If not all made it in, send IPI NMI to them */
		pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
			 cpumask_weight(uv_nmi_cpu_mask),
			 cpumask_pr_args(uv_nmi_cpu_mask));
		uv_nmi_nr_cpus_ping();
		/* If all CPU's are in, then done */
		if (!uv_nmi_wait_cpus(0))
			break;
		pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n",
			 cpumask_weight(uv_nmi_cpu_mask),
			 cpumask_pr_args(uv_nmi_cpu_mask));
	} while (0);
	pr_alert("UV: %d of %d CPUs in NMI\n",
		atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
}
/* Dump Instruction Pointer header */
static void uv_nmi_dump_cpu_ip_hdr(void)
{
	pr_info("\nUV: %4s %6s %-32s %s   (Note: PID 0 not listed)\n",
		"CPU", "PID", "COMMAND", "IP");
}
/* Dump Instruction Pointer info */
static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
{
	pr_info("UV: %4d %6d %-32.32s %pS",
		cpu, current->pid, current->comm, (void *)regs->ip);
}
/*
 * Dump this CPU's state.  If action was set to "kdump" and the crash_kexec
 * failed, then we provide "dump" as an alternate action.  Action "dump" now
 * also includes the show "ips" (instruction pointers) action whereas the
 * action "ips" only displays instruction pointers for the non-idle CPU's.
 * This is an abbreviated form of the "ps" command.
 */
static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
{
	const char *dots = " ................................. ";
	if (cpu == 0)
		uv_nmi_dump_cpu_ip_hdr();
	if (current->pid != 0 || !uv_nmi_action_is("ips"))
		uv_nmi_dump_cpu_ip(cpu, regs);
	if (uv_nmi_action_is("dump")) {
		pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
		show_regs(regs);
	}
	this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
}
/* Trigger a slave CPU to dump it's state */
static void uv_nmi_trigger_dump(int cpu)
{
	int retry = uv_nmi_trigger_delay;
	if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
		return;
	uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
	do {
		cpu_relax();
		udelay(10);
		if (uv_cpu_nmi_per(cpu).state
				!= UV_NMI_STATE_DUMP)
			return;
	} while (--retry > 0);
	pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
	uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
}
/* Wait until all CPU's ready to exit */
static void uv_nmi_sync_exit(int master)
{
	atomic_dec(&uv_nmi_cpus_in_nmi);
	if (master) {
		while (atomic_read(&uv_nmi_cpus_in_nmi) > 0)
			cpu_relax();
		atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
	} else {
		while (atomic_read(&uv_nmi_slave_continue))
			cpu_relax();
	}
}
/* Current "health" check is to check which CPU's are responsive */
static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
{
	if (master) {
		int in = atomic_read(&uv_nmi_cpus_in_nmi);
		int out = num_online_cpus() - in;
		pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out);
		atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
	} else {
		while (!atomic_read(&uv_nmi_slave_continue))
			cpu_relax();
	}
	uv_nmi_sync_exit(master);
}
/* Walk through CPU list and dump state of each */
static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
{
	if (master) {
		int tcpu;
		int ignored = 0;
		int saved_console_loglevel = console_loglevel;
		pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
			uv_nmi_action_is("ips") ? "IPs" : "processes",
			atomic_read(&uv_nmi_cpus_in_nmi), cpu);
		console_loglevel = uv_nmi_loglevel;
		atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
		for_each_online_cpu(tcpu) {
			if (cpumask_test_cpu(tcpu, uv_nmi_cpu_mask))
				ignored++;
			else if (tcpu == cpu)
				uv_nmi_dump_state_cpu(tcpu, regs);
			else
				uv_nmi_trigger_dump(tcpu);
		}
		if (ignored)
			pr_alert("UV: %d CPUs ignored NMI\n", ignored);
		console_loglevel = saved_console_loglevel;
		pr_alert("UV: process trace complete\n");
	} else {
		while (!atomic_read(&uv_nmi_slave_continue))
			cpu_relax();
		while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
			cpu_relax();
		uv_nmi_dump_state_cpu(cpu, regs);
	}
	uv_nmi_sync_exit(master);
}
static void uv_nmi_touch_watchdogs(void)
{
	touch_softlockup_watchdog_sync();
	clocksource_touch_watchdog();
	rcu_cpu_stall_reset();
	touch_nmi_watchdog();
}
static void uv_nmi_kdump(int cpu, int main, struct pt_regs *regs)
{
	/* Check if kdump kernel loaded for both main and secondary CPUs */
	if (!kexec_crash_image) {
		if (main)
			pr_err("UV: NMI error: kdump kernel not loaded\n");
		return;
	}
	/* Call crash to dump system state */
	if (main) {
		pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
		crash_kexec(regs);
		pr_emerg("UV: crash_kexec unexpectedly returned\n");
		atomic_set(&uv_nmi_kexec_failed, 1);
	} else { /* secondary */
		/* If kdump kernel fails, secondaries will exit this loop */
		while (atomic_read(&uv_nmi_kexec_failed) == 0) {
			/* Once shootdown cpus starts, they do not return */
			run_crash_ipi_callback(regs);
			mdelay(10);
		}
	}
}
#ifdef CONFIG_KGDB
#ifdef CONFIG_KGDB_KDB
static inline int uv_nmi_kdb_reason(void)
{
	return KDB_REASON_SYSTEM_NMI;
}
#else /* !CONFIG_KGDB_KDB */
static inline int uv_nmi_kdb_reason(void)
{
	/* Ensure user is expecting to attach gdb remote */
	if (uv_nmi_action_is("kgdb"))
		return 0;
	pr_err("UV: NMI error: KDB is not enabled in this kernel\n");
	return -1;
}
#endif /* CONFIG_KGDB_KDB */
/*
 * Call KGDB/KDB from NMI handler
 *
 * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or
 * 'kdb' has no affect on which is used.  See the KGDB documentation for further
 * information.
 */
static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
{
	if (master) {
		int reason = uv_nmi_kdb_reason();
		int ret;
		if (reason < 0)
			return;
		/* Call KGDB NMI handler as MASTER */
		ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
				&uv_nmi_slave_continue);
		if (ret) {
			pr_alert("KGDB returned error, is kgdboc set?\n");
			atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
		}
	} else {
		/* Wait for KGDB signal that it's ready for slaves to enter */
		int sig;
		do {
			cpu_relax();
			sig = atomic_read(&uv_nmi_slave_continue);
		} while (!sig);
		/* Call KGDB as slave */
		if (sig == SLAVE_CONTINUE)
			kgdb_nmicallback(cpu, regs);
	}
	uv_nmi_sync_exit(master);
}
#else /* !CONFIG_KGDB */
static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
{
	pr_err("UV: NMI error: KGDB is not enabled in this kernel\n");
}
#endif /* !CONFIG_KGDB */
/*
 * UV NMI handler
 */
static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
{
	struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
	int cpu = smp_processor_id();
	int master = 0;
	unsigned long flags;
	local_irq_save(flags);
	/* If not a UV System NMI, ignore */
	if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
		local_irq_restore(flags);
		return NMI_DONE;
	}
	/* Indicate we are the first CPU into the NMI handler */
	master = (atomic_read(&uv_nmi_cpu) == cpu);
	/* If NMI action is "kdump", then attempt to do it */
	if (uv_nmi_action_is("kdump")) {
		uv_nmi_kdump(cpu, master, regs);
		/* Unexpected return, revert action to "dump" */
		if (master)
			strscpy(uv_nmi_action, "dump", sizeof(uv_nmi_action));
	}
	/* Pause as all CPU's enter the NMI handler */
	uv_nmi_wait(master);
	/* Process actions other than "kdump": */
	if (uv_nmi_action_is("health")) {
		uv_nmi_action_health(cpu, regs, master);
	} else if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) {
		uv_nmi_dump_state(cpu, regs, master);
	} else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) {
		uv_call_kgdb_kdb(cpu, regs, master);
	} else {
		if (master)
			pr_alert("UV: unknown NMI action: %s\n", uv_nmi_action);
		uv_nmi_sync_exit(master);
	}
	/* Clear per_cpu "in_nmi" flag */
	this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
	/* Clear MMR NMI flag on each hub */
	uv_clear_nmi(cpu);
	/* Clear global flags */
	if (master) {
		if (!cpumask_empty(uv_nmi_cpu_mask))
			uv_nmi_cleanup_mask();
		atomic_set(&uv_nmi_cpus_in_nmi, -1);
		atomic_set(&uv_nmi_cpu, -1);
		atomic_set(&uv_in_nmi, 0);
		atomic_set(&uv_nmi_kexec_failed, 0);
		atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
	}
	uv_nmi_touch_watchdogs();
	local_irq_restore(flags);
	return NMI_HANDLED;
}
/*
 * NMI handler for pulling in CPU's when perf events are grabbing our NMI
 */
static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
{
	int ret;
	this_cpu_inc(uv_cpu_nmi.queries);
	if (!this_cpu_read(uv_cpu_nmi.pinging)) {
		local64_inc(&uv_nmi_ping_misses);
		return NMI_DONE;
	}
	this_cpu_inc(uv_cpu_nmi.pings);
	local64_inc(&uv_nmi_ping_count);
	ret = uv_handle_nmi(reason, regs);
	this_cpu_write(uv_cpu_nmi.pinging, 0);
	return ret;
}
static void uv_register_nmi_notifier(void)
{
	if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
		pr_warn("UV: NMI handler failed to register\n");
	if (register_nmi_handler(NMI_LOCAL, uv_handle_nmi_ping, 0, "uvping"))
		pr_warn("UV: PING NMI handler failed to register\n");
}
void uv_nmi_init(void)
{
	unsigned int value;
	/*
	 * Unmask NMI on all CPU's
	 */
	value = apic_read(APIC_LVT1) | APIC_DM_NMI;
	value &= ~APIC_LVT_MASKED;
	apic_write(APIC_LVT1, value);
}
/* Setup HUB NMI info */
static void __init uv_nmi_setup_common(bool hubbed)
{
	int size = sizeof(void *) * (1 << NODES_SHIFT);
	int cpu;
	uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
	nmi_debug("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
	BUG_ON(!uv_hub_nmi_list);
	size = sizeof(struct uv_hub_nmi_s);
	for_each_present_cpu(cpu) {
		int nid = cpu_to_node(cpu);
		if (uv_hub_nmi_list[nid] == NULL) {
			uv_hub_nmi_list[nid] = kzalloc_node(size,
							    GFP_KERNEL, nid);
			BUG_ON(!uv_hub_nmi_list[nid]);
			raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
			atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
			uv_hub_nmi_list[nid]->hub_present = hubbed;
			uv_hub_nmi_list[nid]->pch_owner = (nid == 0);
		}
		uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
	}
	BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
}
/* Setup for UV Hub systems */
void __init uv_nmi_setup(void)
{
	uv_nmi_setup_mmrs();
	uv_nmi_setup_common(true);
	uv_register_nmi_notifier();
	pr_info("UV: Hub NMI enabled\n");
}
/* Setup for UV Hubless systems */
void __init uv_nmi_setup_hubless(void)
{
	uv_nmi_setup_common(false);
	pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE);
	nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
		pch_base, PCH_PCR_GPIO_1_BASE);
	if (uv_pch_init_enable)
		uv_init_hubless_pch_d0();
	uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0,
				STS_GPP_D_0_MASK, STS_GPP_D_0_MASK);
	uv_nmi_setup_hubless_intr();
	/* Ensure NMI enabled in Processor Interface Reg: */
	uv_reassert_nmi();
	uv_register_nmi_notifier();
	pr_info("UV: PCH NMI enabled\n");
}
 | 
	linux-master | 
	arch/x86/platform/uv/uv_nmi.c | 
| 
	/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * SGI UV IRQ functions
 *
 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
 */
#include <linux/export.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <asm/irqdomain.h>
#include <asm/apic.h>
#include <asm/uv/uv_irq.h>
#include <asm/uv/uv_hub.h>
/* MMR offset and pnode of hub sourcing interrupts for a given irq */
struct uv_irq_2_mmr_pnode {
	unsigned long		offset;
	int			pnode;
};
static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
{
	unsigned long mmr_value;
	struct uv_IO_APIC_route_entry *entry;
	BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
		     sizeof(unsigned long));
	mmr_value = 0;
	entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
	entry->vector		= cfg->vector;
	entry->delivery_mode	= apic->delivery_mode;
	entry->dest_mode	= apic->dest_mode_logical;
	entry->polarity		= 0;
	entry->trigger		= 0;
	entry->mask		= 0;
	entry->dest		= cfg->dest_apicid;
	uv_write_global_mmr64(info->pnode, info->offset, mmr_value);
}
static void uv_noop(struct irq_data *data) { }
static int
uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
		    bool force)
{
	struct irq_data *parent = data->parent_data;
	struct irq_cfg *cfg = irqd_cfg(data);
	int ret;
	ret = parent->chip->irq_set_affinity(parent, mask, force);
	if (ret >= 0) {
		uv_program_mmr(cfg, data->chip_data);
		vector_schedule_cleanup(cfg);
	}
	return ret;
}
static struct irq_chip uv_irq_chip = {
	.name			= "UV-CORE",
	.irq_mask		= uv_noop,
	.irq_unmask		= uv_noop,
	.irq_eoi		= apic_ack_irq,
	.irq_set_affinity	= uv_set_irq_affinity,
};
static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq,
			   unsigned int nr_irqs, void *arg)
{
	struct uv_irq_2_mmr_pnode *chip_data;
	struct irq_alloc_info *info = arg;
	struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
	int ret;
	if (nr_irqs > 1 || !info || info->type != X86_IRQ_ALLOC_TYPE_UV)
		return -EINVAL;
	chip_data = kmalloc_node(sizeof(*chip_data), GFP_KERNEL,
				 irq_data_get_node(irq_data));
	if (!chip_data)
		return -ENOMEM;
	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
	if (ret >= 0) {
		if (info->uv.limit == UV_AFFINITY_CPU)
			irq_set_status_flags(virq, IRQ_NO_BALANCING);
		else
			irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
		chip_data->pnode = uv_blade_to_pnode(info->uv.blade);
		chip_data->offset = info->uv.offset;
		irq_domain_set_info(domain, virq, virq, &uv_irq_chip, chip_data,
				    handle_percpu_irq, NULL, info->uv.name);
	} else {
		kfree(chip_data);
	}
	return ret;
}
static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
			   unsigned int nr_irqs)
{
	struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
	BUG_ON(nr_irqs != 1);
	kfree(irq_data->chip_data);
	irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
	irq_clear_status_flags(virq, IRQ_NO_BALANCING);
	irq_domain_free_irqs_top(domain, virq, nr_irqs);
}
/*
 * Re-target the irq to the specified CPU and enable the specified MMR located
 * on the specified blade to allow the sending of MSIs to the specified CPU.
 */
static int uv_domain_activate(struct irq_domain *domain,
			      struct irq_data *irq_data, bool reserve)
{
	uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
	return 0;
}
/*
 * Disable the specified MMR located on the specified blade so that MSIs are
 * longer allowed to be sent.
 */
static void uv_domain_deactivate(struct irq_domain *domain,
				 struct irq_data *irq_data)
{
	unsigned long mmr_value;
	struct uv_IO_APIC_route_entry *entry;
	mmr_value = 0;
	entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
	entry->mask = 1;
	uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
}
static const struct irq_domain_ops uv_domain_ops = {
	.alloc		= uv_domain_alloc,
	.free		= uv_domain_free,
	.activate	= uv_domain_activate,
	.deactivate	= uv_domain_deactivate,
};
static struct irq_domain *uv_get_irq_domain(void)
{
	static struct irq_domain *uv_domain;
	static DEFINE_MUTEX(uv_lock);
	struct fwnode_handle *fn;
	mutex_lock(&uv_lock);
	if (uv_domain)
		goto out;
	fn = irq_domain_alloc_named_fwnode("UV-CORE");
	if (!fn)
		goto out;
	uv_domain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0, fn,
						&uv_domain_ops, NULL);
	if (!uv_domain)
		irq_domain_free_fwnode(fn);
out:
	mutex_unlock(&uv_lock);
	return uv_domain;
}
/*
 * Set up a mapping of an available irq and vector, and enable the specified
 * MMR that defines the MSI that is to be sent to the specified CPU when an
 * interrupt is raised.
 */
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
		 unsigned long mmr_offset, int limit)
{
	struct irq_alloc_info info;
	struct irq_domain *domain = uv_get_irq_domain();
	if (!domain)
		return -ENOMEM;
	init_irq_alloc_info(&info, cpumask_of(cpu));
	info.type = X86_IRQ_ALLOC_TYPE_UV;
	info.uv.limit = limit;
	info.uv.blade = mmr_blade;
	info.uv.offset = mmr_offset;
	info.uv.name = irq_name;
	return irq_domain_alloc_irqs(domain, 1,
				     uv_blade_to_memory_nid(mmr_blade), &info);
}
EXPORT_SYMBOL_GPL(uv_setup_irq);
/*
 * Tear down a mapping of an irq and vector, and disable the specified MMR that
 * defined the MSI that was to be sent to the specified CPU when an interrupt
 * was raised.
 *
 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
 */
void uv_teardown_irq(unsigned int irq)
{
	irq_domain_free_irqs(irq, 1);
}
EXPORT_SYMBOL_GPL(uv_teardown_irq);
 | 
	linux-master | 
	arch/x86/platform/uv/uv_irq.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * SGI RTC clock/timer routines.
 *
 *  (C) Copyright 2020 Hewlett Packard Enterprise Development LP
 *  Copyright (c) 2009-2013 Silicon Graphics, Inc.  All Rights Reserved.
 *  Copyright (c) Dimitri Sivanich
 */
#include <linux/clockchips.h>
#include <linux/slab.h>
#include <asm/uv/uv_mmrs.h>
#include <asm/uv/uv_hub.h>
#include <asm/uv/bios.h>
#include <asm/uv/uv.h>
#include <asm/apic.h>
#include <asm/cpu.h>
#define RTC_NAME		"sgi_rtc"
static u64 uv_read_rtc(struct clocksource *cs);
static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
static int uv_rtc_shutdown(struct clock_event_device *evt);
static struct clocksource clocksource_uv = {
	.name		= RTC_NAME,
	.rating		= 299,
	.read		= uv_read_rtc,
	.mask		= (u64)UVH_RTC_REAL_TIME_CLOCK_MASK,
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clock_event_device clock_event_device_uv = {
	.name			= RTC_NAME,
	.features		= CLOCK_EVT_FEAT_ONESHOT,
	.shift			= 20,
	.rating			= 400,
	.irq			= -1,
	.set_next_event		= uv_rtc_next_event,
	.set_state_shutdown	= uv_rtc_shutdown,
	.event_handler		= NULL,
};
static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
/* There is one of these allocated per node */
struct uv_rtc_timer_head {
	spinlock_t	lock;
	/* next cpu waiting for timer, local node relative: */
	int		next_cpu;
	/* number of cpus on this node: */
	int		ncpus;
	struct {
		int	lcpu;		/* systemwide logical cpu number */
		u64	expires;	/* next timer expiration for this cpu */
	} cpu[];
};
/*
 * Access to uv_rtc_timer_head via blade id.
 */
static struct uv_rtc_timer_head		**blade_info __read_mostly;
static int				uv_rtc_evt_enable;
/*
 * Hardware interface routines
 */
/* Send IPIs to another node */
static void uv_rtc_send_IPI(int cpu)
{
	unsigned long apicid, val;
	int pnode;
	apicid = cpu_physical_id(cpu);
	pnode = uv_apicid_to_pnode(apicid);
	val = (1UL << UVH_IPI_INT_SEND_SHFT) |
	      (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
	      (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
	uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
}
/* Check for an RTC interrupt pending */
static int uv_intr_pending(int pnode)
{
	return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED2) &
		UVH_EVENT_OCCURRED2_RTC_1_MASK;
}
/* Setup interrupt and return non-zero if early expiration occurred. */
static int uv_setup_intr(int cpu, u64 expires)
{
	u64 val;
	unsigned long apicid = cpu_physical_id(cpu);
	int pnode = uv_cpu_to_pnode(cpu);
	uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
		UVH_RTC1_INT_CONFIG_M_MASK);
	uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
	uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED2_ALIAS,
			      UVH_EVENT_OCCURRED2_RTC_1_MASK);
	val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
		((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
	/* Set configuration */
	uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
	/* Initialize comparator value */
	uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
	if (uv_read_rtc(NULL) <= expires)
		return 0;
	return !uv_intr_pending(pnode);
}
/*
 * Per-cpu timer tracking routines
 */
static __init void uv_rtc_deallocate_timers(void)
{
	int bid;
	for_each_possible_blade(bid) {
		kfree(blade_info[bid]);
	}
	kfree(blade_info);
}
/* Allocate per-node list of cpu timer expiration times. */
static __init int uv_rtc_allocate_timers(void)
{
	int cpu;
	blade_info = kcalloc(uv_possible_blades, sizeof(void *), GFP_KERNEL);
	if (!blade_info)
		return -ENOMEM;
	for_each_present_cpu(cpu) {
		int nid = cpu_to_node(cpu);
		int bid = uv_cpu_to_blade_id(cpu);
		int bcpu = uv_cpu_blade_processor_id(cpu);
		struct uv_rtc_timer_head *head = blade_info[bid];
		if (!head) {
			head = kmalloc_node(struct_size(head, cpu,
				uv_blade_nr_possible_cpus(bid)),
				GFP_KERNEL, nid);
			if (!head) {
				uv_rtc_deallocate_timers();
				return -ENOMEM;
			}
			spin_lock_init(&head->lock);
			head->ncpus = uv_blade_nr_possible_cpus(bid);
			head->next_cpu = -1;
			blade_info[bid] = head;
		}
		head->cpu[bcpu].lcpu = cpu;
		head->cpu[bcpu].expires = ULLONG_MAX;
	}
	return 0;
}
/* Find and set the next expiring timer.  */
static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
{
	u64 lowest = ULLONG_MAX;
	int c, bcpu = -1;
	head->next_cpu = -1;
	for (c = 0; c < head->ncpus; c++) {
		u64 exp = head->cpu[c].expires;
		if (exp < lowest) {
			bcpu = c;
			lowest = exp;
		}
	}
	if (bcpu >= 0) {
		head->next_cpu = bcpu;
		c = head->cpu[bcpu].lcpu;
		if (uv_setup_intr(c, lowest))
			/* If we didn't set it up in time, trigger */
			uv_rtc_send_IPI(c);
	} else {
		uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
			UVH_RTC1_INT_CONFIG_M_MASK);
	}
}
/*
 * Set expiration time for current cpu.
 *
 * Returns 1 if we missed the expiration time.
 */
static int uv_rtc_set_timer(int cpu, u64 expires)
{
	int pnode = uv_cpu_to_pnode(cpu);
	int bid = uv_cpu_to_blade_id(cpu);
	struct uv_rtc_timer_head *head = blade_info[bid];
	int bcpu = uv_cpu_blade_processor_id(cpu);
	u64 *t = &head->cpu[bcpu].expires;
	unsigned long flags;
	int next_cpu;
	spin_lock_irqsave(&head->lock, flags);
	next_cpu = head->next_cpu;
	*t = expires;
	/* Will this one be next to go off? */
	if (next_cpu < 0 || bcpu == next_cpu ||
			expires < head->cpu[next_cpu].expires) {
		head->next_cpu = bcpu;
		if (uv_setup_intr(cpu, expires)) {
			*t = ULLONG_MAX;
			uv_rtc_find_next_timer(head, pnode);
			spin_unlock_irqrestore(&head->lock, flags);
			return -ETIME;
		}
	}
	spin_unlock_irqrestore(&head->lock, flags);
	return 0;
}
/*
 * Unset expiration time for current cpu.
 *
 * Returns 1 if this timer was pending.
 */
static int uv_rtc_unset_timer(int cpu, int force)
{
	int pnode = uv_cpu_to_pnode(cpu);
	int bid = uv_cpu_to_blade_id(cpu);
	struct uv_rtc_timer_head *head = blade_info[bid];
	int bcpu = uv_cpu_blade_processor_id(cpu);
	u64 *t = &head->cpu[bcpu].expires;
	unsigned long flags;
	int rc = 0;
	spin_lock_irqsave(&head->lock, flags);
	if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
		rc = 1;
	if (rc) {
		*t = ULLONG_MAX;
		/* Was the hardware setup for this timer? */
		if (head->next_cpu == bcpu)
			uv_rtc_find_next_timer(head, pnode);
	}
	spin_unlock_irqrestore(&head->lock, flags);
	return rc;
}
/*
 * Kernel interface routines.
 */
/*
 * Read the RTC.
 *
 * Starting with HUB rev 2.0, the UV RTC register is replicated across all
 * cachelines of it's own page.  This allows faster simultaneous reads
 * from a given socket.
 */
static u64 uv_read_rtc(struct clocksource *cs)
{
	unsigned long offset;
	if (uv_get_min_hub_revision_id() == 1)
		offset = 0;
	else
		offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
	return (u64)uv_read_local_mmr(UVH_RTC | offset);
}
/*
 * Program the next event, relative to now
 */
static int uv_rtc_next_event(unsigned long delta,
			     struct clock_event_device *ced)
{
	int ced_cpu = cpumask_first(ced->cpumask);
	return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
}
/*
 * Shutdown the RTC timer
 */
static int uv_rtc_shutdown(struct clock_event_device *evt)
{
	int ced_cpu = cpumask_first(evt->cpumask);
	uv_rtc_unset_timer(ced_cpu, 1);
	return 0;
}
static void uv_rtc_interrupt(void)
{
	int cpu = smp_processor_id();
	struct clock_event_device *ced = &per_cpu(cpu_ced, cpu);
	if (!ced || !ced->event_handler)
		return;
	if (uv_rtc_unset_timer(cpu, 0) != 1)
		return;
	ced->event_handler(ced);
}
static int __init uv_enable_evt_rtc(char *str)
{
	uv_rtc_evt_enable = 1;
	return 1;
}
__setup("uvrtcevt", uv_enable_evt_rtc);
static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
{
	struct clock_event_device *ced = this_cpu_ptr(&cpu_ced);
	*ced = clock_event_device_uv;
	ced->cpumask = cpumask_of(smp_processor_id());
	clockevents_register_device(ced);
}
static __init int uv_rtc_setup_clock(void)
{
	int rc;
	if (!is_uv_system())
		return -ENODEV;
	rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second);
	if (rc)
		printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc);
	else
		printk(KERN_INFO "UV RTC clocksource registered freq %lu MHz\n",
			sn_rtc_cycles_per_second/(unsigned long)1E6);
	if (rc || !uv_rtc_evt_enable || x86_platform_ipi_callback)
		return rc;
	/* Setup and register clockevents */
	rc = uv_rtc_allocate_timers();
	if (rc)
		goto error;
	x86_platform_ipi_callback = uv_rtc_interrupt;
	clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
				NSEC_PER_SEC, clock_event_device_uv.shift);
	clock_event_device_uv.min_delta_ns = NSEC_PER_SEC /
						sn_rtc_cycles_per_second;
	clock_event_device_uv.min_delta_ticks = 1;
	clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
				(NSEC_PER_SEC / sn_rtc_cycles_per_second);
	clock_event_device_uv.max_delta_ticks = clocksource_uv.mask;
	rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
	if (rc) {
		x86_platform_ipi_callback = NULL;
		uv_rtc_deallocate_timers();
		goto error;
	}
	printk(KERN_INFO "UV RTC clockevents registered\n");
	return 0;
error:
	clocksource_unregister(&clocksource_uv);
	printk(KERN_INFO "UV RTC clockevents failed rc %d\n", rc);
	return rc;
}
arch_initcall(uv_rtc_setup_clock);
 | 
	linux-master | 
	arch/x86/platform/uv/uv_time.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 *  Copyright (c) 2001,2002 Christer Weinigel <[email protected]>
 *
 *  National Semiconductor SCx200 support.
 */
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/scx200.h>
#include <linux/scx200_gpio.h>
/* Verify that the configuration block really is there */
#define scx200_cb_probe(base) (inw((base) + SCx200_CBA) == (base))
MODULE_AUTHOR("Christer Weinigel <[email protected]>");
MODULE_DESCRIPTION("NatSemi SCx200 Driver");
MODULE_LICENSE("GPL");
unsigned scx200_gpio_base = 0;
unsigned long scx200_gpio_shadow[2];
unsigned scx200_cb_base = 0;
static struct pci_device_id scx200_tbl[] = {
	{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) },
	{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) },
	{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_XBUS)   },
	{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SC1100_XBUS)   },
	{ },
};
MODULE_DEVICE_TABLE(pci,scx200_tbl);
static int scx200_probe(struct pci_dev *, const struct pci_device_id *);
static struct pci_driver scx200_pci_driver = {
	.name = "scx200",
	.id_table = scx200_tbl,
	.probe = scx200_probe,
};
static DEFINE_MUTEX(scx200_gpio_config_lock);
static void scx200_init_shadow(void)
{
	int bank;
	/* read the current values driven on the GPIO signals */
	for (bank = 0; bank < 2; ++bank)
		scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank);
}
static int scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	unsigned base;
	if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE ||
	    pdev->device == PCI_DEVICE_ID_NS_SC1100_BRIDGE) {
		base = pci_resource_start(pdev, 0);
		pr_info("GPIO base 0x%x\n", base);
		if (!request_region(base, SCx200_GPIO_SIZE,
				    "NatSemi SCx200 GPIO")) {
			pr_err("can't allocate I/O for GPIOs\n");
			return -EBUSY;
		}
		scx200_gpio_base = base;
		scx200_init_shadow();
	} else {
		/* find the base of the Configuration Block */
		if (scx200_cb_probe(SCx200_CB_BASE_FIXED)) {
			scx200_cb_base = SCx200_CB_BASE_FIXED;
		} else {
			pci_read_config_dword(pdev, SCx200_CBA_SCRATCH, &base);
			if (scx200_cb_probe(base)) {
				scx200_cb_base = base;
			} else {
				pr_warn("Configuration Block not found\n");
				return -ENODEV;
			}
		}
		pr_info("Configuration Block base 0x%x\n", scx200_cb_base);
	}
	return 0;
}
u32 scx200_gpio_configure(unsigned index, u32 mask, u32 bits)
{
	u32 config, new_config;
	mutex_lock(&scx200_gpio_config_lock);
	outl(index, scx200_gpio_base + 0x20);
	config = inl(scx200_gpio_base + 0x24);
	new_config = (config & mask) | bits;
	outl(new_config, scx200_gpio_base + 0x24);
	mutex_unlock(&scx200_gpio_config_lock);
	return config;
}
static int __init scx200_init(void)
{
	pr_info("NatSemi SCx200 Driver\n");
	return pci_register_driver(&scx200_pci_driver);
}
static void __exit scx200_cleanup(void)
{
	pci_unregister_driver(&scx200_pci_driver);
	release_region(scx200_gpio_base, SCx200_GPIO_SIZE);
}
module_init(scx200_init);
module_exit(scx200_cleanup);
EXPORT_SYMBOL(scx200_gpio_base);
EXPORT_SYMBOL(scx200_gpio_shadow);
EXPORT_SYMBOL(scx200_gpio_configure);
EXPORT_SYMBOL(scx200_cb_base);
 | 
	linux-master | 
	arch/x86/platform/scx200/scx200_32.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * imr.c -- Intel Isolated Memory Region driver
 *
 * Copyright(c) 2013 Intel Corporation.
 * Copyright(c) 2015 Bryan O'Donoghue <[email protected]>
 *
 * IMR registers define an isolated region of memory that can
 * be masked to prohibit certain system agents from accessing memory.
 * When a device behind a masked port performs an access - snooped or
 * not, an IMR may optionally prevent that transaction from changing
 * the state of memory or from getting correct data in response to the
 * operation.
 *
 * Write data will be dropped and reads will return 0xFFFFFFFF, the
 * system will reset and system BIOS will print out an error message to
 * inform the user that an IMR has been violated.
 *
 * This code is based on the Linux MTRR code and reference code from
 * Intel's Quark BSP EFI, Linux and grub code.
 *
 * See quark-x1000-datasheet.pdf for register definitions.
 * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm-generic/sections.h>
#include <asm/cpu_device_id.h>
#include <asm/imr.h>
#include <asm/iosf_mbi.h>
#include <asm/io.h>
#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/types.h>
struct imr_device {
	bool		init;
	struct mutex	lock;
	int		max_imr;
	int		reg_base;
};
static struct imr_device imr_dev;
/*
 * IMR read/write mask control registers.
 * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for
 * bit definitions.
 *
 * addr_hi
 * 31		Lock bit
 * 30:24	Reserved
 * 23:2		1 KiB aligned lo address
 * 1:0		Reserved
 *
 * addr_hi
 * 31:24	Reserved
 * 23:2		1 KiB aligned hi address
 * 1:0		Reserved
 */
#define IMR_LOCK	BIT(31)
struct imr_regs {
	u32 addr_lo;
	u32 addr_hi;
	u32 rmask;
	u32 wmask;
};
#define IMR_NUM_REGS	(sizeof(struct imr_regs)/sizeof(u32))
#define IMR_SHIFT	8
#define imr_to_phys(x)	((x) << IMR_SHIFT)
#define phys_to_imr(x)	((x) >> IMR_SHIFT)
/**
 * imr_is_enabled - true if an IMR is enabled false otherwise.
 *
 * Determines if an IMR is enabled based on address range and read/write
 * mask. An IMR set with an address range set to zero and a read/write
 * access mask set to all is considered to be disabled. An IMR in any
 * other state - for example set to zero but without read/write access
 * all is considered to be enabled. This definition of disabled is how
 * firmware switches off an IMR and is maintained in kernel for
 * consistency.
 *
 * @imr:	pointer to IMR descriptor.
 * @return:	true if IMR enabled false if disabled.
 */
static inline int imr_is_enabled(struct imr_regs *imr)
{
	return !(imr->rmask == IMR_READ_ACCESS_ALL &&
		 imr->wmask == IMR_WRITE_ACCESS_ALL &&
		 imr_to_phys(imr->addr_lo) == 0 &&
		 imr_to_phys(imr->addr_hi) == 0);
}
/**
 * imr_read - read an IMR at a given index.
 *
 * Requires caller to hold imr mutex.
 *
 * @idev:	pointer to imr_device structure.
 * @imr_id:	IMR entry to read.
 * @imr:	IMR structure representing address and access masks.
 * @return:	0 on success or error code passed from mbi_iosf on failure.
 */
static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
{
	u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
	int ret;
	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_lo);
	if (ret)
		return ret;
	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_hi);
	if (ret)
		return ret;
	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->rmask);
	if (ret)
		return ret;
	return iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->wmask);
}
/**
 * imr_write - write an IMR at a given index.
 *
 * Requires caller to hold imr mutex.
 * Note lock bits need to be written independently of address bits.
 *
 * @idev:	pointer to imr_device structure.
 * @imr_id:	IMR entry to write.
 * @imr:	IMR structure representing address and access masks.
 * @return:	0 on success or error code passed from mbi_iosf on failure.
 */
static int imr_write(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
{
	unsigned long flags;
	u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
	int ret;
	local_irq_save(flags);
	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_lo);
	if (ret)
		goto failed;
	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_hi);
	if (ret)
		goto failed;
	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->rmask);
	if (ret)
		goto failed;
	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->wmask);
	if (ret)
		goto failed;
	local_irq_restore(flags);
	return 0;
failed:
	/*
	 * If writing to the IOSF failed then we're in an unknown state,
	 * likely a very bad state. An IMR in an invalid state will almost
	 * certainly lead to a memory access violation.
	 */
	local_irq_restore(flags);
	WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n",
	     imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK);
	return ret;
}
/**
 * imr_dbgfs_state_show - print state of IMR registers.
 *
 * @s:		pointer to seq_file for output.
 * @unused:	unused parameter.
 * @return:	0 on success or error code passed from mbi_iosf on failure.
 */
static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
{
	phys_addr_t base;
	phys_addr_t end;
	int i;
	struct imr_device *idev = s->private;
	struct imr_regs imr;
	size_t size;
	int ret = -ENODEV;
	mutex_lock(&idev->lock);
	for (i = 0; i < idev->max_imr; i++) {
		ret = imr_read(idev, i, &imr);
		if (ret)
			break;
		/*
		 * Remember to add IMR_ALIGN bytes to size to indicate the
		 * inherent IMR_ALIGN size bytes contained in the masked away
		 * lower ten bits.
		 */
		if (imr_is_enabled(&imr)) {
			base = imr_to_phys(imr.addr_lo);
			end = imr_to_phys(imr.addr_hi) + IMR_MASK;
			size = end - base + 1;
		} else {
			base = 0;
			end = 0;
			size = 0;
		}
		seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
			   "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
			   &base, &end, size, imr.rmask, imr.wmask,
			   imr_is_enabled(&imr) ? "enabled " : "disabled",
			   imr.addr_lo & IMR_LOCK ? "locked" : "unlocked");
	}
	mutex_unlock(&idev->lock);
	return ret;
}
DEFINE_SHOW_ATTRIBUTE(imr_dbgfs_state);
/**
 * imr_debugfs_register - register debugfs hooks.
 *
 * @idev:	pointer to imr_device structure.
 */
static void imr_debugfs_register(struct imr_device *idev)
{
	debugfs_create_file("imr_state", 0444, NULL, idev,
			    &imr_dbgfs_state_fops);
}
/**
 * imr_check_params - check passed address range IMR alignment and non-zero size
 *
 * @base:	base address of intended IMR.
 * @size:	size of intended IMR.
 * @return:	zero on valid range -EINVAL on unaligned base/size.
 */
static int imr_check_params(phys_addr_t base, size_t size)
{
	if ((base & IMR_MASK) || (size & IMR_MASK)) {
		pr_err("base %pa size 0x%08zx must align to 1KiB\n",
			&base, size);
		return -EINVAL;
	}
	if (size == 0)
		return -EINVAL;
	return 0;
}
/**
 * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends.
 *
 * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the
 * value in the register. We need to subtract IMR_ALIGN bytes from input sizes
 * as a result.
 *
 * @size:	input size bytes.
 * @return:	reduced size.
 */
static inline size_t imr_raw_size(size_t size)
{
	return size - IMR_ALIGN;
}
/**
 * imr_address_overlap - detects an address overlap.
 *
 * @addr:	address to check against an existing IMR.
 * @imr:	imr being checked.
 * @return:	true for overlap false for no overlap.
 */
static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
{
	return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi);
}
/**
 * imr_add_range - add an Isolated Memory Region.
 *
 * @base:	physical base address of region aligned to 1KiB.
 * @size:	physical size of region in bytes must be aligned to 1KiB.
 * @read_mask:	read access mask.
 * @write_mask:	write access mask.
 * @return:	zero on success or negative value indicating error.
 */
int imr_add_range(phys_addr_t base, size_t size,
		  unsigned int rmask, unsigned int wmask)
{
	phys_addr_t end;
	unsigned int i;
	struct imr_device *idev = &imr_dev;
	struct imr_regs imr;
	size_t raw_size;
	int reg;
	int ret;
	if (WARN_ONCE(idev->init == false, "driver not initialized"))
		return -ENODEV;
	ret = imr_check_params(base, size);
	if (ret)
		return ret;
	/* Tweak the size value. */
	raw_size = imr_raw_size(size);
	end = base + raw_size;
	/*
	 * Check for reserved IMR value common to firmware, kernel and grub
	 * indicating a disabled IMR.
	 */
	imr.addr_lo = phys_to_imr(base);
	imr.addr_hi = phys_to_imr(end);
	imr.rmask = rmask;
	imr.wmask = wmask;
	if (!imr_is_enabled(&imr))
		return -ENOTSUPP;
	mutex_lock(&idev->lock);
	/*
	 * Find a free IMR while checking for an existing overlapping range.
	 * Note there's no restriction in silicon to prevent IMR overlaps.
	 * For the sake of simplicity and ease in defining/debugging an IMR
	 * memory map we exclude IMR overlaps.
	 */
	reg = -1;
	for (i = 0; i < idev->max_imr; i++) {
		ret = imr_read(idev, i, &imr);
		if (ret)
			goto failed;
		/* Find overlap @ base or end of requested range. */
		ret = -EINVAL;
		if (imr_is_enabled(&imr)) {
			if (imr_address_overlap(base, &imr))
				goto failed;
			if (imr_address_overlap(end, &imr))
				goto failed;
		} else {
			reg = i;
		}
	}
	/* Error out if we have no free IMR entries. */
	if (reg == -1) {
		ret = -ENOMEM;
		goto failed;
	}
	pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n",
		 reg, &base, &end, raw_size, rmask, wmask);
	/* Enable IMR at specified range and access mask. */
	imr.addr_lo = phys_to_imr(base);
	imr.addr_hi = phys_to_imr(end);
	imr.rmask = rmask;
	imr.wmask = wmask;
	ret = imr_write(idev, reg, &imr);
	if (ret < 0) {
		/*
		 * In the highly unlikely event iosf_mbi_write failed
		 * attempt to rollback the IMR setup skipping the trapping
		 * of further IOSF write failures.
		 */
		imr.addr_lo = 0;
		imr.addr_hi = 0;
		imr.rmask = IMR_READ_ACCESS_ALL;
		imr.wmask = IMR_WRITE_ACCESS_ALL;
		imr_write(idev, reg, &imr);
	}
failed:
	mutex_unlock(&idev->lock);
	return ret;
}
EXPORT_SYMBOL_GPL(imr_add_range);
/**
 * __imr_remove_range - delete an Isolated Memory Region.
 *
 * This function allows you to delete an IMR by its index specified by reg or
 * by address range specified by base and size respectively. If you specify an
 * index on its own the base and size parameters are ignored.
 * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored.
 * imr_remove_range(-1, base, size); delete IMR from base to base+size.
 *
 * @reg:	imr index to remove.
 * @base:	physical base address of region aligned to 1 KiB.
 * @size:	physical size of region in bytes aligned to 1 KiB.
 * @return:	-EINVAL on invalid range or out or range id
 *		-ENODEV if reg is valid but no IMR exists or is locked
 *		0 on success.
 */
static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
{
	phys_addr_t end;
	bool found = false;
	unsigned int i;
	struct imr_device *idev = &imr_dev;
	struct imr_regs imr;
	size_t raw_size;
	int ret = 0;
	if (WARN_ONCE(idev->init == false, "driver not initialized"))
		return -ENODEV;
	/*
	 * Validate address range if deleting by address, else we are
	 * deleting by index where base and size will be ignored.
	 */
	if (reg == -1) {
		ret = imr_check_params(base, size);
		if (ret)
			return ret;
	}
	/* Tweak the size value. */
	raw_size = imr_raw_size(size);
	end = base + raw_size;
	mutex_lock(&idev->lock);
	if (reg >= 0) {
		/* If a specific IMR is given try to use it. */
		ret = imr_read(idev, reg, &imr);
		if (ret)
			goto failed;
		if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) {
			ret = -ENODEV;
			goto failed;
		}
		found = true;
	} else {
		/* Search for match based on address range. */
		for (i = 0; i < idev->max_imr; i++) {
			ret = imr_read(idev, i, &imr);
			if (ret)
				goto failed;
			if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK)
				continue;
			if ((imr_to_phys(imr.addr_lo) == base) &&
			    (imr_to_phys(imr.addr_hi) == end)) {
				found = true;
				reg = i;
				break;
			}
		}
	}
	if (!found) {
		ret = -ENODEV;
		goto failed;
	}
	pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size);
	/* Tear down the IMR. */
	imr.addr_lo = 0;
	imr.addr_hi = 0;
	imr.rmask = IMR_READ_ACCESS_ALL;
	imr.wmask = IMR_WRITE_ACCESS_ALL;
	ret = imr_write(idev, reg, &imr);
failed:
	mutex_unlock(&idev->lock);
	return ret;
}
/**
 * imr_remove_range - delete an Isolated Memory Region by address
 *
 * This function allows you to delete an IMR by an address range specified
 * by base and size respectively.
 * imr_remove_range(base, size); delete IMR from base to base+size.
 *
 * @base:	physical base address of region aligned to 1 KiB.
 * @size:	physical size of region in bytes aligned to 1 KiB.
 * @return:	-EINVAL on invalid range or out or range id
 *		-ENODEV if reg is valid but no IMR exists or is locked
 *		0 on success.
 */
int imr_remove_range(phys_addr_t base, size_t size)
{
	return __imr_remove_range(-1, base, size);
}
EXPORT_SYMBOL_GPL(imr_remove_range);
/**
 * imr_clear - delete an Isolated Memory Region by index
 *
 * This function allows you to delete an IMR by an address range specified
 * by the index of the IMR. Useful for initial sanitization of the IMR
 * address map.
 * imr_ge(base, size); delete IMR from base to base+size.
 *
 * @reg:	imr index to remove.
 * @return:	-EINVAL on invalid range or out or range id
 *		-ENODEV if reg is valid but no IMR exists or is locked
 *		0 on success.
 */
static inline int imr_clear(int reg)
{
	return __imr_remove_range(reg, 0, 0);
}
/**
 * imr_fixup_memmap - Tear down IMRs used during bootup.
 *
 * BIOS and Grub both setup IMRs around compressed kernel, initrd memory
 * that need to be removed before the kernel hands out one of the IMR
 * encased addresses to a downstream DMA agent such as the SD or Ethernet.
 * IMRs on Galileo are setup to immediately reset the system on violation.
 * As a result if you're running a root filesystem from SD - you'll need
 * the boot-time IMRs torn down or you'll find seemingly random resets when
 * using your filesystem.
 *
 * @idev:	pointer to imr_device structure.
 * @return:
 */
static void __init imr_fixup_memmap(struct imr_device *idev)
{
	phys_addr_t base = virt_to_phys(&_text);
	size_t size = virt_to_phys(&__end_rodata) - base;
	unsigned long start, end;
	int i;
	int ret;
	/* Tear down all existing unlocked IMRs. */
	for (i = 0; i < idev->max_imr; i++)
		imr_clear(i);
	start = (unsigned long)_text;
	end = (unsigned long)__end_rodata - 1;
	/*
	 * Setup an unlocked IMR around the physical extent of the kernel
	 * from the beginning of the .text section to the end of the
	 * .rodata section as one physically contiguous block.
	 *
	 * We don't round up @size since it is already PAGE_SIZE aligned.
	 * See vmlinux.lds.S for details.
	 */
	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
	if (ret < 0) {
		pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
			size / 1024, start, end);
	} else {
		pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n",
			size / 1024, start, end);
	}
}
static const struct x86_cpu_id imr_ids[] __initconst = {
	X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
	{}
};
/**
 * imr_init - entry point for IMR driver.
 *
 * return: -ENODEV for no IMR support 0 if good to go.
 */
static int __init imr_init(void)
{
	struct imr_device *idev = &imr_dev;
	if (!x86_match_cpu(imr_ids) || !iosf_mbi_available())
		return -ENODEV;
	idev->max_imr = QUARK_X1000_IMR_MAX;
	idev->reg_base = QUARK_X1000_IMR_REGBASE;
	idev->init = true;
	mutex_init(&idev->lock);
	imr_debugfs_register(idev);
	imr_fixup_memmap(idev);
	return 0;
}
device_initcall(imr_init);
 | 
	linux-master | 
	arch/x86/platform/intel-quark/imr.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * imr_selftest.c -- Intel Isolated Memory Region self-test driver
 *
 * Copyright(c) 2013 Intel Corporation.
 * Copyright(c) 2015 Bryan O'Donoghue <[email protected]>
 *
 * IMR self test. The purpose of this module is to run a set of tests on the
 * IMR API to validate it's sanity. We check for overlapping, reserved
 * addresses and setup/teardown sanity.
 *
 */
#include <asm-generic/sections.h>
#include <asm/cpu_device_id.h>
#include <asm/imr.h>
#include <asm/io.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/types.h>
#define SELFTEST KBUILD_MODNAME ": "
/**
 * imr_self_test_result - Print result string for self test.
 *
 * @res:	result code - true if test passed false otherwise.
 * @fmt:	format string.
 * ...		variadic argument list.
 */
static __printf(2, 3)
void __init imr_self_test_result(int res, const char *fmt, ...)
{
	va_list vlist;
	/* Print pass/fail. */
	if (res)
		pr_info(SELFTEST "pass ");
	else
		pr_info(SELFTEST "fail ");
	/* Print variable string. */
	va_start(vlist, fmt);
	vprintk(fmt, vlist);
	va_end(vlist);
	/* Optional warning. */
	WARN(res == 0, "test failed");
}
#undef SELFTEST
/**
 * imr_self_test
 *
 * Verify IMR self_test with some simple tests to verify overlap,
 * zero sized allocations and 1 KiB sized areas.
 *
 */
static void __init imr_self_test(void)
{
	phys_addr_t base  = virt_to_phys(&_text);
	size_t size = virt_to_phys(&__end_rodata) - base;
	const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
	int ret;
	/* Test zero zero. */
	ret = imr_add_range(0, 0, 0, 0);
	imr_self_test_result(ret < 0, "zero sized IMR\n");
	/* Test exact overlap. */
	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
	imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
	/* Test overlap with base inside of existing. */
	base += size - IMR_ALIGN;
	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
	imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
	/* Test overlap with end inside of existing. */
	base -= size + IMR_ALIGN * 2;
	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
	imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
	/* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */
	ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL,
			    IMR_WRITE_ACCESS_ALL);
	imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n");
	/* Test that a 1 KiB IMR @ zero with CPU only will work. */
	ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU);
	imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n");
	if (ret >= 0) {
		ret = imr_remove_range(0, IMR_ALIGN);
		imr_self_test_result(ret == 0, "teardown - cpu-access\n");
	}
	/* Test 2 KiB works. */
	size = IMR_ALIGN * 2;
	ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL, IMR_WRITE_ACCESS_ALL);
	imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n");
	if (ret >= 0) {
		ret = imr_remove_range(0, size);
		imr_self_test_result(ret == 0, "teardown 2KiB\n");
	}
}
static const struct x86_cpu_id imr_ids[] __initconst = {
	X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
	{}
};
/**
 * imr_self_test_init - entry point for IMR driver.
 *
 * return: -ENODEV for no IMR support 0 if good to go.
 */
static int __init imr_self_test_init(void)
{
	if (x86_match_cpu(imr_ids))
		imr_self_test();
	return 0;
}
/**
 * imr_self_test_exit - exit point for IMR code.
 *
 * return:
 */
device_initcall(imr_self_test_init);
 | 
	linux-master | 
	arch/x86/platform/intel-quark/imr_selftest.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * Common EFI memory map functions.
 */
#define pr_fmt(fmt) "efi: " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/efi.h>
#include <linux/io.h>
#include <asm/early_ioremap.h>
#include <asm/efi.h>
#include <linux/memblock.h>
#include <linux/slab.h>
static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
{
	return memblock_phys_alloc(size, SMP_CACHE_BYTES);
}
static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
{
	unsigned int order = get_order(size);
	struct page *p = alloc_pages(GFP_KERNEL, order);
	if (!p)
		return 0;
	return PFN_PHYS(page_to_pfn(p));
}
void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
{
	if (flags & EFI_MEMMAP_MEMBLOCK) {
		if (slab_is_available())
			memblock_free_late(phys, size);
		else
			memblock_phys_free(phys, size);
	} else if (flags & EFI_MEMMAP_SLAB) {
		struct page *p = pfn_to_page(PHYS_PFN(phys));
		unsigned int order = get_order(size);
		free_pages((unsigned long) page_address(p), order);
	}
}
/**
 * efi_memmap_alloc - Allocate memory for the EFI memory map
 * @num_entries: Number of entries in the allocated map.
 * @data: efi memmap installation parameters
 *
 * Depending on whether mm_init() has already been invoked or not,
 * either memblock or "normal" page allocation is used.
 *
 * Returns zero on success, a negative error code on failure.
 */
int __init efi_memmap_alloc(unsigned int num_entries,
		struct efi_memory_map_data *data)
{
	/* Expect allocation parameters are zero initialized */
	WARN_ON(data->phys_map || data->size);
	data->size = num_entries * efi.memmap.desc_size;
	data->desc_version = efi.memmap.desc_version;
	data->desc_size = efi.memmap.desc_size;
	data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
	data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
	if (slab_is_available()) {
		data->flags |= EFI_MEMMAP_SLAB;
		data->phys_map = __efi_memmap_alloc_late(data->size);
	} else {
		data->flags |= EFI_MEMMAP_MEMBLOCK;
		data->phys_map = __efi_memmap_alloc_early(data->size);
	}
	if (!data->phys_map)
		return -ENOMEM;
	return 0;
}
/**
 * efi_memmap_install - Install a new EFI memory map in efi.memmap
 * @data: efi memmap installation parameters
 *
 * Unlike efi_memmap_init_*(), this function does not allow the caller
 * to switch from early to late mappings. It simply uses the existing
 * mapping function and installs the new memmap.
 *
 * Returns zero on success, a negative error code on failure.
 */
int __init efi_memmap_install(struct efi_memory_map_data *data)
{
	efi_memmap_unmap();
	if (efi_enabled(EFI_PARAVIRT))
		return 0;
	return __efi_memmap_init(data);
}
/**
 * efi_memmap_split_count - Count number of additional EFI memmap entries
 * @md: EFI memory descriptor to split
 * @range: Address range (start, end) to split around
 *
 * Returns the number of additional EFI memmap entries required to
 * accommodate @range.
 */
int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
{
	u64 m_start, m_end;
	u64 start, end;
	int count = 0;
	start = md->phys_addr;
	end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
	/* modifying range */
	m_start = range->start;
	m_end = range->end;
	if (m_start <= start) {
		/* split into 2 parts */
		if (start < m_end && m_end < end)
			count++;
	}
	if (start < m_start && m_start < end) {
		/* split into 3 parts */
		if (m_end < end)
			count += 2;
		/* split into 2 parts */
		if (end <= m_end)
			count++;
	}
	return count;
}
/**
 * efi_memmap_insert - Insert a memory region in an EFI memmap
 * @old_memmap: The existing EFI memory map structure
 * @buf: Address of buffer to store new map
 * @mem: Memory map entry to insert
 *
 * It is suggested that you call efi_memmap_split_count() first
 * to see how large @buf needs to be.
 */
void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
			      struct efi_mem_range *mem)
{
	u64 m_start, m_end, m_attr;
	efi_memory_desc_t *md;
	u64 start, end;
	void *old, *new;
	/* modifying range */
	m_start = mem->range.start;
	m_end = mem->range.end;
	m_attr = mem->attribute;
	/*
	 * The EFI memory map deals with regions in EFI_PAGE_SIZE
	 * units. Ensure that the region described by 'mem' is aligned
	 * correctly.
	 */
	if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
	    !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
		WARN_ON(1);
		return;
	}
	for (old = old_memmap->map, new = buf;
	     old < old_memmap->map_end;
	     old += old_memmap->desc_size, new += old_memmap->desc_size) {
		/* copy original EFI memory descriptor */
		memcpy(new, old, old_memmap->desc_size);
		md = new;
		start = md->phys_addr;
		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
		if (m_start <= start && end <= m_end)
			md->attribute |= m_attr;
		if (m_start <= start &&
		    (start < m_end && m_end < end)) {
			/* first part */
			md->attribute |= m_attr;
			md->num_pages = (m_end - md->phys_addr + 1) >>
				EFI_PAGE_SHIFT;
			/* latter part */
			new += old_memmap->desc_size;
			memcpy(new, old, old_memmap->desc_size);
			md = new;
			md->phys_addr = m_end + 1;
			md->num_pages = (end - md->phys_addr + 1) >>
				EFI_PAGE_SHIFT;
		}
		if ((start < m_start && m_start < end) && m_end < end) {
			/* first part */
			md->num_pages = (m_start - md->phys_addr) >>
				EFI_PAGE_SHIFT;
			/* middle part */
			new += old_memmap->desc_size;
			memcpy(new, old, old_memmap->desc_size);
			md = new;
			md->attribute |= m_attr;
			md->phys_addr = m_start;
			md->num_pages = (m_end - m_start + 1) >>
				EFI_PAGE_SHIFT;
			/* last part */
			new += old_memmap->desc_size;
			memcpy(new, old, old_memmap->desc_size);
			md = new;
			md->phys_addr = m_end + 1;
			md->num_pages = (end - m_end) >>
				EFI_PAGE_SHIFT;
		}
		if ((start < m_start && m_start < end) &&
		    (end <= m_end)) {
			/* first part */
			md->num_pages = (m_start - md->phys_addr) >>
				EFI_PAGE_SHIFT;
			/* latter part */
			new += old_memmap->desc_size;
			memcpy(new, old, old_memmap->desc_size);
			md = new;
			md->phys_addr = m_start;
			md->num_pages = (end - md->phys_addr + 1) >>
				EFI_PAGE_SHIFT;
			md->attribute |= m_attr;
		}
	}
}
 | 
	linux-master | 
	arch/x86/platform/efi/memmap.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * fake_mem.c
 *
 * Copyright (C) 2015 FUJITSU LIMITED
 * Author: Taku Izumi <[email protected]>
 *
 * This code introduces new boot option named "efi_fake_mem"
 * By specifying this parameter, you can add arbitrary attribute to
 * specific memory range by updating original (firmware provided) EFI
 * memmap.
 */
#include <linux/kernel.h>
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/types.h>
#include <linux/sort.h>
#include <asm/e820/api.h>
#include <asm/efi.h>
#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
static struct efi_mem_range efi_fake_mems[EFI_MAX_FAKEMEM];
static int nr_fake_mem;
static int __init cmp_fake_mem(const void *x1, const void *x2)
{
	const struct efi_mem_range *m1 = x1;
	const struct efi_mem_range *m2 = x2;
	if (m1->range.start < m2->range.start)
		return -1;
	if (m1->range.start > m2->range.start)
		return 1;
	return 0;
}
static void __init efi_fake_range(struct efi_mem_range *efi_range)
{
	struct efi_memory_map_data data = { 0 };
	int new_nr_map = efi.memmap.nr_map;
	efi_memory_desc_t *md;
	void *new_memmap;
	/* count up the number of EFI memory descriptor */
	for_each_efi_memory_desc(md)
		new_nr_map += efi_memmap_split_count(md, &efi_range->range);
	/* allocate memory for new EFI memmap */
	if (efi_memmap_alloc(new_nr_map, &data) != 0)
		return;
	/* create new EFI memmap */
	new_memmap = early_memremap(data.phys_map, data.size);
	if (!new_memmap) {
		__efi_memmap_free(data.phys_map, data.size, data.flags);
		return;
	}
	efi_memmap_insert(&efi.memmap, new_memmap, efi_range);
	/* swap into new EFI memmap */
	early_memunmap(new_memmap, data.size);
	efi_memmap_install(&data);
}
void __init efi_fake_memmap(void)
{
	int i;
	if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
		return;
	for (i = 0; i < nr_fake_mem; i++)
		efi_fake_range(&efi_fake_mems[i]);
	/* print new EFI memmap */
	efi_print_memmap();
}
static int __init setup_fake_mem(char *p)
{
	u64 start = 0, mem_size = 0, attribute = 0;
	int i;
	if (!p)
		return -EINVAL;
	while (*p != '\0') {
		mem_size = memparse(p, &p);
		if (*p == '@')
			start = memparse(p+1, &p);
		else
			break;
		if (*p == ':')
			attribute = simple_strtoull(p+1, &p, 0);
		else
			break;
		if (nr_fake_mem >= EFI_MAX_FAKEMEM)
			break;
		efi_fake_mems[nr_fake_mem].range.start = start;
		efi_fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
		efi_fake_mems[nr_fake_mem].attribute = attribute;
		nr_fake_mem++;
		if (*p == ',')
			p++;
	}
	sort(efi_fake_mems, nr_fake_mem, sizeof(struct efi_mem_range),
	     cmp_fake_mem, NULL);
	for (i = 0; i < nr_fake_mem; i++)
		pr_info("efi_fake_mem: add attr=0x%016llx to [mem 0x%016llx-0x%016llx]",
			efi_fake_mems[i].attribute, efi_fake_mems[i].range.start,
			efi_fake_mems[i].range.end);
	return *p == '\0' ? 0 : -EINVAL;
}
early_param("efi_fake_mem", setup_fake_mem);
void __init efi_fake_memmap_early(void)
{
	int i;
	/*
	 * The late efi_fake_mem() call can handle all requests if
	 * EFI_MEMORY_SP support is disabled.
	 */
	if (!efi_soft_reserve_enabled())
		return;
	if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
		return;
	/*
	 * Given that efi_fake_memmap() needs to perform memblock
	 * allocations it needs to run after e820__memblock_setup().
	 * However, if efi_fake_mem specifies EFI_MEMORY_SP for a given
	 * address range that potentially needs to mark the memory as
	 * reserved prior to e820__memblock_setup(). Update e820
	 * directly if EFI_MEMORY_SP is specified for an
	 * EFI_CONVENTIONAL_MEMORY descriptor.
	 */
	for (i = 0; i < nr_fake_mem; i++) {
		struct efi_mem_range *mem = &efi_fake_mems[i];
		efi_memory_desc_t *md;
		u64 m_start, m_end;
		if ((mem->attribute & EFI_MEMORY_SP) == 0)
			continue;
		m_start = mem->range.start;
		m_end = mem->range.end;
		for_each_efi_memory_desc(md) {
			u64 start, end, size;
			if (md->type != EFI_CONVENTIONAL_MEMORY)
				continue;
			start = md->phys_addr;
			end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
			if (m_start <= end && m_end >= start)
				/* fake range overlaps descriptor */;
			else
				continue;
			/*
			 * Trim the boundary of the e820 update to the
			 * descriptor in case the fake range overlaps
			 * !EFI_CONVENTIONAL_MEMORY
			 */
			start = max(start, m_start);
			end = min(end, m_end);
			size = end - start + 1;
			if (end <= start)
				continue;
			/*
			 * Ensure each efi_fake_mem instance results in
			 * a unique e820 resource
			 */
			e820__range_remove(start, size, E820_TYPE_RAM, 1);
			e820__range_add(start, size, E820_TYPE_SOFT_RESERVED);
			e820__update_table(e820_table);
		}
	}
}
 | 
	linux-master | 
	arch/x86/platform/efi/fake_mem.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * x86_64 specific EFI support functions
 * Based on Extensible Firmware Interface Specification version 1.0
 *
 * Copyright (C) 2005-2008 Intel Co.
 *	Fenghua Yu <[email protected]>
 *	Bibo Mao <[email protected]>
 *	Chandramouli Narayanan <[email protected]>
 *	Huang Ying <[email protected]>
 *
 * Code to convert EFI to E820 map has been implemented in elilo bootloader
 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
 * is setup appropriately for EFI runtime code.
 * - mouli 06/14/2007.
 *
 */
#define pr_fmt(fmt) "efi: " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/mc146818rtc.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/ucs2_string.h>
#include <linux/cc_platform.h>
#include <linux/sched/task.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/e820/api.h>
#include <asm/tlbflush.h>
#include <asm/proto.h>
#include <asm/efi.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/realmode.h>
#include <asm/time.h>
#include <asm/pgalloc.h>
#include <asm/sev.h>
/*
 * We allocate runtime services regions top-down, starting from -4G, i.e.
 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
 */
static u64 efi_va = EFI_VA_START;
static struct mm_struct *efi_prev_mm;
/*
 * We need our own copy of the higher levels of the page tables
 * because we want to avoid inserting EFI region mappings (EFI_VA_END
 * to EFI_VA_START) into the standard kernel page tables. Everything
 * else can be shared, see efi_sync_low_kernel_mappings().
 *
 * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
 * allocation.
 */
int __init efi_alloc_page_tables(void)
{
	pgd_t *pgd, *efi_pgd;
	p4d_t *p4d;
	pud_t *pud;
	gfp_t gfp_mask;
	gfp_mask = GFP_KERNEL | __GFP_ZERO;
	efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
	if (!efi_pgd)
		goto fail;
	pgd = efi_pgd + pgd_index(EFI_VA_END);
	p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
	if (!p4d)
		goto free_pgd;
	pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
	if (!pud)
		goto free_p4d;
	efi_mm.pgd = efi_pgd;
	mm_init_cpumask(&efi_mm);
	init_new_context(NULL, &efi_mm);
	return 0;
free_p4d:
	if (pgtable_l5_enabled())
		free_page((unsigned long)pgd_page_vaddr(*pgd));
free_pgd:
	free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
fail:
	return -ENOMEM;
}
/*
 * Add low kernel mappings for passing arguments to EFI functions.
 */
void efi_sync_low_kernel_mappings(void)
{
	unsigned num_entries;
	pgd_t *pgd_k, *pgd_efi;
	p4d_t *p4d_k, *p4d_efi;
	pud_t *pud_k, *pud_efi;
	pgd_t *efi_pgd = efi_mm.pgd;
	pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
	pgd_k = pgd_offset_k(PAGE_OFFSET);
	num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
	memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
	pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
	pgd_k = pgd_offset_k(EFI_VA_END);
	p4d_efi = p4d_offset(pgd_efi, 0);
	p4d_k = p4d_offset(pgd_k, 0);
	num_entries = p4d_index(EFI_VA_END);
	memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries);
	/*
	 * We share all the PUD entries apart from those that map the
	 * EFI regions. Copy around them.
	 */
	BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
	BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
	p4d_efi = p4d_offset(pgd_efi, EFI_VA_END);
	p4d_k = p4d_offset(pgd_k, EFI_VA_END);
	pud_efi = pud_offset(p4d_efi, 0);
	pud_k = pud_offset(p4d_k, 0);
	num_entries = pud_index(EFI_VA_END);
	memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
	pud_efi = pud_offset(p4d_efi, EFI_VA_START);
	pud_k = pud_offset(p4d_k, EFI_VA_START);
	num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
	memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
}
/*
 * Wrapper for slow_virt_to_phys() that handles NULL addresses.
 */
static inline phys_addr_t
virt_to_phys_or_null_size(void *va, unsigned long size)
{
	phys_addr_t pa;
	if (!va)
		return 0;
	if (virt_addr_valid(va))
		return virt_to_phys(va);
	pa = slow_virt_to_phys(va);
	/* check if the object crosses a page boundary */
	if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
		return 0;
	return pa;
}
#define virt_to_phys_or_null(addr)				\
	virt_to_phys_or_null_size((addr), sizeof(*(addr)))
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
	extern const u8 __efi64_thunk_ret_tramp[];
	unsigned long pfn, text, pf, rodata, tramp;
	struct page *page;
	unsigned npages;
	pgd_t *pgd = efi_mm.pgd;
	/*
	 * It can happen that the physical address of new_memmap lands in memory
	 * which is not mapped in the EFI page table. Therefore we need to go
	 * and ident-map those pages containing the map before calling
	 * phys_efi_set_virtual_address_map().
	 */
	pfn = pa_memmap >> PAGE_SHIFT;
	pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
	if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
		pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
		return 1;
	}
	/*
	 * Certain firmware versions are way too sentimental and still believe
	 * they are exclusive and unquestionable owners of the first physical page,
	 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
	 * (but then write-access it later during SetVirtualAddressMap()).
	 *
	 * Create a 1:1 mapping for this page, to avoid triple faults during early
	 * boot with such firmware. We are free to hand this page to the BIOS,
	 * as trim_bios_range() will reserve the first page and isolate it away
	 * from memory allocators anyway.
	 */
	if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
		pr_err("Failed to create 1:1 mapping for the first page!\n");
		return 1;
	}
	/*
	 * When SEV-ES is active, the GHCB as set by the kernel will be used
	 * by firmware. Create a 1:1 unencrypted mapping for each GHCB.
	 */
	if (sev_es_efi_map_ghcbs(pgd)) {
		pr_err("Failed to create 1:1 mapping for the GHCBs!\n");
		return 1;
	}
	/*
	 * When making calls to the firmware everything needs to be 1:1
	 * mapped and addressable with 32-bit pointers. Map the kernel
	 * text and allocate a new stack because we can't rely on the
	 * stack pointer being < 4GB.
	 */
	if (!efi_is_mixed())
		return 0;
	page = alloc_page(GFP_KERNEL|__GFP_DMA32);
	if (!page) {
		pr_err("Unable to allocate EFI runtime stack < 4GB\n");
		return 1;
	}
	efi_mixed_mode_stack_pa = page_to_phys(page + 1); /* stack grows down */
	npages = (_etext - _text) >> PAGE_SHIFT;
	text = __pa(_text);
	if (kernel_unmap_pages_in_pgd(pgd, text, npages)) {
		pr_err("Failed to unmap kernel text 1:1 mapping\n");
		return 1;
	}
	npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
	rodata = __pa(__start_rodata);
	pfn = rodata >> PAGE_SHIFT;
	pf = _PAGE_NX | _PAGE_ENC;
	if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
		pr_err("Failed to map kernel rodata 1:1\n");
		return 1;
	}
	tramp = __pa(__efi64_thunk_ret_tramp);
	pfn = tramp >> PAGE_SHIFT;
	pf = _PAGE_ENC;
	if (kernel_map_pages_in_pgd(pgd, pfn, tramp, 1, pf)) {
		pr_err("Failed to map mixed mode return trampoline\n");
		return 1;
	}
	return 0;
}
static void __init __map_region(efi_memory_desc_t *md, u64 va)
{
	unsigned long flags = _PAGE_RW;
	unsigned long pfn;
	pgd_t *pgd = efi_mm.pgd;
	/*
	 * EFI_RUNTIME_SERVICES_CODE regions typically cover PE/COFF
	 * executable images in memory that consist of both R-X and
	 * RW- sections, so we cannot apply read-only or non-exec
	 * permissions just yet. However, modern EFI systems provide
	 * a memory attributes table that describes those sections
	 * with the appropriate restricted permissions, which are
	 * applied in efi_runtime_update_mappings() below. All other
	 * regions can be mapped non-executable at this point, with
	 * the exception of boot services code regions, but those will
	 * be unmapped again entirely in efi_free_boot_services().
	 */
	if (md->type != EFI_BOOT_SERVICES_CODE &&
	    md->type != EFI_RUNTIME_SERVICES_CODE)
		flags |= _PAGE_NX;
	if (!(md->attribute & EFI_MEMORY_WB))
		flags |= _PAGE_PCD;
	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
	    md->type != EFI_MEMORY_MAPPED_IO)
		flags |= _PAGE_ENC;
	pfn = md->phys_addr >> PAGE_SHIFT;
	if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
		pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
			   md->phys_addr, va);
}
void __init efi_map_region(efi_memory_desc_t *md)
{
	unsigned long size = md->num_pages << PAGE_SHIFT;
	u64 pa = md->phys_addr;
	/*
	 * Make sure the 1:1 mappings are present as a catch-all for b0rked
	 * firmware which doesn't update all internal pointers after switching
	 * to virtual mode and would otherwise crap on us.
	 */
	__map_region(md, md->phys_addr);
	/*
	 * Enforce the 1:1 mapping as the default virtual address when
	 * booting in EFI mixed mode, because even though we may be
	 * running a 64-bit kernel, the firmware may only be 32-bit.
	 */
	if (efi_is_mixed()) {
		md->virt_addr = md->phys_addr;
		return;
	}
	efi_va -= size;
	/* Is PA 2M-aligned? */
	if (!(pa & (PMD_SIZE - 1))) {
		efi_va &= PMD_MASK;
	} else {
		u64 pa_offset = pa & (PMD_SIZE - 1);
		u64 prev_va = efi_va;
		/* get us the same offset within this 2M page */
		efi_va = (efi_va & PMD_MASK) + pa_offset;
		if (efi_va > prev_va)
			efi_va -= PMD_SIZE;
	}
	if (efi_va < EFI_VA_END) {
		pr_warn(FW_WARN "VA address range overflow!\n");
		return;
	}
	/* Do the VA map */
	__map_region(md, efi_va);
	md->virt_addr = efi_va;
}
/*
 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
 * md->virt_addr is the original virtual address which had been mapped in kexec
 * 1st kernel.
 */
void __init efi_map_region_fixed(efi_memory_desc_t *md)
{
	__map_region(md, md->phys_addr);
	__map_region(md, md->virt_addr);
}
void __init parse_efi_setup(u64 phys_addr, u32 data_len)
{
	efi_setup = phys_addr + sizeof(struct setup_data);
}
static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
{
	unsigned long pfn;
	pgd_t *pgd = efi_mm.pgd;
	int err1, err2;
	/* Update the 1:1 mapping */
	pfn = md->phys_addr >> PAGE_SHIFT;
	err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
	if (err1) {
		pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
			   md->phys_addr, md->virt_addr);
	}
	err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
	if (err2) {
		pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
			   md->phys_addr, md->virt_addr);
	}
	return err1 || err2;
}
bool efi_disable_ibt_for_runtime __ro_after_init = true;
static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md,
				      bool has_ibt)
{
	unsigned long pf = 0;
	efi_disable_ibt_for_runtime |= !has_ibt;
	if (md->attribute & EFI_MEMORY_XP)
		pf |= _PAGE_NX;
	if (!(md->attribute & EFI_MEMORY_RO))
		pf |= _PAGE_RW;
	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
		pf |= _PAGE_ENC;
	return efi_update_mappings(md, pf);
}
void __init efi_runtime_update_mappings(void)
{
	efi_memory_desc_t *md;
	/*
	 * Use the EFI Memory Attribute Table for mapping permissions if it
	 * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
	 */
	if (efi_enabled(EFI_MEM_ATTR)) {
		efi_disable_ibt_for_runtime = false;
		efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
		return;
	}
	/*
	 * EFI_MEMORY_ATTRIBUTES_TABLE is intended to replace
	 * EFI_PROPERTIES_TABLE. So, use EFI_PROPERTIES_TABLE to update
	 * permissions only if EFI_MEMORY_ATTRIBUTES_TABLE is not
	 * published by the firmware. Even if we find a buggy implementation of
	 * EFI_MEMORY_ATTRIBUTES_TABLE, don't fall back to
	 * EFI_PROPERTIES_TABLE, because of the same reason.
	 */
	if (!efi_enabled(EFI_NX_PE_DATA))
		return;
	for_each_efi_memory_desc(md) {
		unsigned long pf = 0;
		if (!(md->attribute & EFI_MEMORY_RUNTIME))
			continue;
		if (!(md->attribute & EFI_MEMORY_WB))
			pf |= _PAGE_PCD;
		if ((md->attribute & EFI_MEMORY_XP) ||
			(md->type == EFI_RUNTIME_SERVICES_DATA))
			pf |= _PAGE_NX;
		if (!(md->attribute & EFI_MEMORY_RO) &&
			(md->type != EFI_RUNTIME_SERVICES_CODE))
			pf |= _PAGE_RW;
		if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
			pf |= _PAGE_ENC;
		efi_update_mappings(md, pf);
	}
}
void __init efi_dump_pagetable(void)
{
#ifdef CONFIG_EFI_PGT_DUMP
	ptdump_walk_pgd_level(NULL, &efi_mm);
#endif
}
/*
 * Makes the calling thread switch to/from efi_mm context. Can be used
 * in a kernel thread and user context. Preemption needs to remain disabled
 * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
 * can not change under us.
 * It should be ensured that there are no concurrent calls to this function.
 */
static void efi_enter_mm(void)
{
	efi_prev_mm = current->active_mm;
	current->active_mm = &efi_mm;
	switch_mm(efi_prev_mm, &efi_mm, NULL);
}
static void efi_leave_mm(void)
{
	current->active_mm = efi_prev_mm;
	switch_mm(&efi_mm, efi_prev_mm, NULL);
}
void arch_efi_call_virt_setup(void)
{
	efi_sync_low_kernel_mappings();
	efi_fpu_begin();
	firmware_restrict_branch_speculation_start();
	efi_enter_mm();
}
void arch_efi_call_virt_teardown(void)
{
	efi_leave_mm();
	firmware_restrict_branch_speculation_end();
	efi_fpu_end();
}
static DEFINE_SPINLOCK(efi_runtime_lock);
/*
 * DS and ES contain user values.  We need to save them.
 * The 32-bit EFI code needs a valid DS, ES, and SS.  There's no
 * need to save the old SS: __KERNEL_DS is always acceptable.
 */
#define __efi_thunk(func, ...)						\
({									\
	unsigned short __ds, __es;					\
	efi_status_t ____s;						\
									\
	savesegment(ds, __ds);						\
	savesegment(es, __es);						\
									\
	loadsegment(ss, __KERNEL_DS);					\
	loadsegment(ds, __KERNEL_DS);					\
	loadsegment(es, __KERNEL_DS);					\
									\
	____s = efi64_thunk(efi.runtime->mixed_mode.func, __VA_ARGS__);	\
									\
	loadsegment(ds, __ds);						\
	loadsegment(es, __es);						\
									\
	____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32;	\
	____s;								\
})
/*
 * Switch to the EFI page tables early so that we can access the 1:1
 * runtime services mappings which are not mapped in any other page
 * tables.
 *
 * Also, disable interrupts because the IDT points to 64-bit handlers,
 * which aren't going to function correctly when we switch to 32-bit.
 */
#define efi_thunk(func...)						\
({									\
	efi_status_t __s;						\
									\
	arch_efi_call_virt_setup();					\
									\
	__s = __efi_thunk(func);					\
									\
	arch_efi_call_virt_teardown();					\
									\
	__s;								\
})
static efi_status_t __init __no_sanitize_address
efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
				  unsigned long descriptor_size,
				  u32 descriptor_version,
				  efi_memory_desc_t *virtual_map)
{
	efi_status_t status;
	unsigned long flags;
	efi_sync_low_kernel_mappings();
	local_irq_save(flags);
	efi_enter_mm();
	status = __efi_thunk(set_virtual_address_map, memory_map_size,
			     descriptor_size, descriptor_version, virtual_map);
	efi_leave_mm();
	local_irq_restore(flags);
	return status;
}
static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
	return EFI_UNSUPPORTED;
}
static efi_status_t efi_thunk_set_time(efi_time_t *tm)
{
	return EFI_UNSUPPORTED;
}
static efi_status_t
efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
			  efi_time_t *tm)
{
	return EFI_UNSUPPORTED;
}
static efi_status_t
efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
	return EFI_UNSUPPORTED;
}
static unsigned long efi_name_size(efi_char16_t *name)
{
	return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
}
static efi_status_t
efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
		       u32 *attr, unsigned long *data_size, void *data)
{
	u8 buf[24] __aligned(8);
	efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
	efi_status_t status;
	u32 phys_name, phys_vendor, phys_attr;
	u32 phys_data_size, phys_data;
	unsigned long flags;
	spin_lock_irqsave(&efi_runtime_lock, flags);
	*vnd = *vendor;
	phys_data_size = virt_to_phys_or_null(data_size);
	phys_vendor = virt_to_phys_or_null(vnd);
	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
	phys_attr = virt_to_phys_or_null(attr);
	phys_data = virt_to_phys_or_null_size(data, *data_size);
	if (!phys_name || (data && !phys_data))
		status = EFI_INVALID_PARAMETER;
	else
		status = efi_thunk(get_variable, phys_name, phys_vendor,
				   phys_attr, phys_data_size, phys_data);
	spin_unlock_irqrestore(&efi_runtime_lock, flags);
	return status;
}
static efi_status_t
efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
		       u32 attr, unsigned long data_size, void *data)
{
	u8 buf[24] __aligned(8);
	efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
	u32 phys_name, phys_vendor, phys_data;
	efi_status_t status;
	unsigned long flags;
	spin_lock_irqsave(&efi_runtime_lock, flags);
	*vnd = *vendor;
	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
	phys_vendor = virt_to_phys_or_null(vnd);
	phys_data = virt_to_phys_or_null_size(data, data_size);
	if (!phys_name || (data && !phys_data))
		status = EFI_INVALID_PARAMETER;
	else
		status = efi_thunk(set_variable, phys_name, phys_vendor,
				   attr, data_size, phys_data);
	spin_unlock_irqrestore(&efi_runtime_lock, flags);
	return status;
}
static efi_status_t
efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
				   u32 attr, unsigned long data_size,
				   void *data)
{
	u8 buf[24] __aligned(8);
	efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
	u32 phys_name, phys_vendor, phys_data;
	efi_status_t status;
	unsigned long flags;
	if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
		return EFI_NOT_READY;
	*vnd = *vendor;
	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
	phys_vendor = virt_to_phys_or_null(vnd);
	phys_data = virt_to_phys_or_null_size(data, data_size);
	if (!phys_name || (data && !phys_data))
		status = EFI_INVALID_PARAMETER;
	else
		status = efi_thunk(set_variable, phys_name, phys_vendor,
				   attr, data_size, phys_data);
	spin_unlock_irqrestore(&efi_runtime_lock, flags);
	return status;
}
static efi_status_t
efi_thunk_get_next_variable(unsigned long *name_size,
			    efi_char16_t *name,
			    efi_guid_t *vendor)
{
	u8 buf[24] __aligned(8);
	efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
	efi_status_t status;
	u32 phys_name_size, phys_name, phys_vendor;
	unsigned long flags;
	spin_lock_irqsave(&efi_runtime_lock, flags);
	*vnd = *vendor;
	phys_name_size = virt_to_phys_or_null(name_size);
	phys_vendor = virt_to_phys_or_null(vnd);
	phys_name = virt_to_phys_or_null_size(name, *name_size);
	if (!phys_name)
		status = EFI_INVALID_PARAMETER;
	else
		status = efi_thunk(get_next_variable, phys_name_size,
				   phys_name, phys_vendor);
	spin_unlock_irqrestore(&efi_runtime_lock, flags);
	*vendor = *vnd;
	return status;
}
static efi_status_t
efi_thunk_get_next_high_mono_count(u32 *count)
{
	return EFI_UNSUPPORTED;
}
static void
efi_thunk_reset_system(int reset_type, efi_status_t status,
		       unsigned long data_size, efi_char16_t *data)
{
	u32 phys_data;
	unsigned long flags;
	spin_lock_irqsave(&efi_runtime_lock, flags);
	phys_data = virt_to_phys_or_null_size(data, data_size);
	efi_thunk(reset_system, reset_type, status, data_size, phys_data);
	spin_unlock_irqrestore(&efi_runtime_lock, flags);
}
static efi_status_t
efi_thunk_update_capsule(efi_capsule_header_t **capsules,
			 unsigned long count, unsigned long sg_list)
{
	/*
	 * To properly support this function we would need to repackage
	 * 'capsules' because the firmware doesn't understand 64-bit
	 * pointers.
	 */
	return EFI_UNSUPPORTED;
}
static efi_status_t
efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
			      u64 *remaining_space,
			      u64 *max_variable_size)
{
	efi_status_t status;
	u32 phys_storage, phys_remaining, phys_max;
	unsigned long flags;
	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
		return EFI_UNSUPPORTED;
	spin_lock_irqsave(&efi_runtime_lock, flags);
	phys_storage = virt_to_phys_or_null(storage_space);
	phys_remaining = virt_to_phys_or_null(remaining_space);
	phys_max = virt_to_phys_or_null(max_variable_size);
	status = efi_thunk(query_variable_info, attr, phys_storage,
			   phys_remaining, phys_max);
	spin_unlock_irqrestore(&efi_runtime_lock, flags);
	return status;
}
static efi_status_t
efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
					  u64 *remaining_space,
					  u64 *max_variable_size)
{
	efi_status_t status;
	u32 phys_storage, phys_remaining, phys_max;
	unsigned long flags;
	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
		return EFI_UNSUPPORTED;
	if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
		return EFI_NOT_READY;
	phys_storage = virt_to_phys_or_null(storage_space);
	phys_remaining = virt_to_phys_or_null(remaining_space);
	phys_max = virt_to_phys_or_null(max_variable_size);
	status = efi_thunk(query_variable_info, attr, phys_storage,
			   phys_remaining, phys_max);
	spin_unlock_irqrestore(&efi_runtime_lock, flags);
	return status;
}
static efi_status_t
efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
			     unsigned long count, u64 *max_size,
			     int *reset_type)
{
	/*
	 * To properly support this function we would need to repackage
	 * 'capsules' because the firmware doesn't understand 64-bit
	 * pointers.
	 */
	return EFI_UNSUPPORTED;
}
void __init efi_thunk_runtime_setup(void)
{
	if (!IS_ENABLED(CONFIG_EFI_MIXED))
		return;
	efi.get_time = efi_thunk_get_time;
	efi.set_time = efi_thunk_set_time;
	efi.get_wakeup_time = efi_thunk_get_wakeup_time;
	efi.set_wakeup_time = efi_thunk_set_wakeup_time;
	efi.get_variable = efi_thunk_get_variable;
	efi.get_next_variable = efi_thunk_get_next_variable;
	efi.set_variable = efi_thunk_set_variable;
	efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
	efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
	efi.reset_system = efi_thunk_reset_system;
	efi.query_variable_info = efi_thunk_query_variable_info;
	efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
	efi.update_capsule = efi_thunk_update_capsule;
	efi.query_capsule_caps = efi_thunk_query_capsule_caps;
}
efi_status_t __init __no_sanitize_address
efi_set_virtual_address_map(unsigned long memory_map_size,
			    unsigned long descriptor_size,
			    u32 descriptor_version,
			    efi_memory_desc_t *virtual_map,
			    unsigned long systab_phys)
{
	const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
	efi_status_t status;
	unsigned long flags;
	if (efi_is_mixed())
		return efi_thunk_set_virtual_address_map(memory_map_size,
							 descriptor_size,
							 descriptor_version,
							 virtual_map);
	efi_enter_mm();
	efi_fpu_begin();
	/* Disable interrupts around EFI calls: */
	local_irq_save(flags);
	status = arch_efi_call_virt(efi.runtime, set_virtual_address_map,
				    memory_map_size, descriptor_size,
				    descriptor_version, virtual_map);
	local_irq_restore(flags);
	efi_fpu_end();
	/* grab the virtually remapped EFI runtime services table pointer */
	efi.runtime = READ_ONCE(systab->runtime);
	efi_leave_mm();
	return status;
}
 | 
	linux-master | 
	arch/x86/platform/efi/efi_64.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * Common EFI (Extensible Firmware Interface) support functions
 * Based on Extensible Firmware Interface Specification version 1.0
 *
 * Copyright (C) 1999 VA Linux Systems
 * Copyright (C) 1999 Walt Drummond <[email protected]>
 * Copyright (C) 1999-2002 Hewlett-Packard Co.
 *	David Mosberger-Tang <[email protected]>
 *	Stephane Eranian <[email protected]>
 * Copyright (C) 2005-2008 Intel Co.
 *	Fenghua Yu <[email protected]>
 *	Bibo Mao <[email protected]>
 *	Chandramouli Narayanan <[email protected]>
 *	Huang Ying <[email protected]>
 * Copyright (C) 2013 SuSE Labs
 *	Borislav Petkov <[email protected]> - runtime services VA mapping
 *
 * Copied from efi_32.c to eliminate the duplicated code between EFI
 * 32/64 support code. --ying 2007-10-26
 *
 * All EFI Runtime Services are not implemented yet as EFI only
 * supports physical mode addressing on SoftSDV. This is to be fixed
 * in a future version.  --drummond 1999-07-20
 *
 * Implemented EFI runtime services and virtual mode calls.  --davidm
 *
 * Goutham Rao: <[email protected]>
 *	Skip non-WB memory and ignore empty memory ranges.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/efi.h>
#include <linux/efi-bgrt.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/time.h>
#include <linux/io.h>
#include <linux/reboot.h>
#include <linux/bcd.h>
#include <asm/setup.h>
#include <asm/efi.h>
#include <asm/e820/api.h>
#include <asm/time.h>
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
#include <asm/uv/uv.h>
static unsigned long efi_systab_phys __initdata;
static unsigned long prop_phys = EFI_INVALID_TABLE_ADDR;
static unsigned long uga_phys = EFI_INVALID_TABLE_ADDR;
static unsigned long efi_runtime, efi_nr_tables;
unsigned long efi_fw_vendor, efi_config_table;
static const efi_config_table_type_t arch_tables[] __initconst = {
	{EFI_PROPERTIES_TABLE_GUID,	&prop_phys,		"PROP"		},
	{UGA_IO_PROTOCOL_GUID,		&uga_phys,		"UGA"		},
#ifdef CONFIG_X86_UV
	{UV_SYSTEM_TABLE_GUID,		&uv_systab_phys,	"UVsystab"	},
#endif
	{},
};
static const unsigned long * const efi_tables[] = {
	&efi.acpi,
	&efi.acpi20,
	&efi.smbios,
	&efi.smbios3,
	&uga_phys,
#ifdef CONFIG_X86_UV
	&uv_systab_phys,
#endif
	&efi_fw_vendor,
	&efi_runtime,
	&efi_config_table,
	&efi.esrt,
	&prop_phys,
	&efi_mem_attr_table,
#ifdef CONFIG_EFI_RCI2_TABLE
	&rci2_table_phys,
#endif
	&efi.tpm_log,
	&efi.tpm_final_log,
	&efi_rng_seed,
#ifdef CONFIG_LOAD_UEFI_KEYS
	&efi.mokvar_table,
#endif
#ifdef CONFIG_EFI_COCO_SECRET
	&efi.coco_secret,
#endif
#ifdef CONFIG_UNACCEPTED_MEMORY
	&efi.unaccepted,
#endif
};
u64 efi_setup;		/* efi setup_data physical address */
static int add_efi_memmap __initdata;
static int __init setup_add_efi_memmap(char *arg)
{
	add_efi_memmap = 1;
	return 0;
}
early_param("add_efi_memmap", setup_add_efi_memmap);
/*
 * Tell the kernel about the EFI memory map.  This might include
 * more than the max 128 entries that can fit in the passed in e820
 * legacy (zeropage) memory map, but the kernel's e820 table can hold
 * E820_MAX_ENTRIES.
 */
static void __init do_add_efi_memmap(void)
{
	efi_memory_desc_t *md;
	if (!efi_enabled(EFI_MEMMAP))
		return;
	for_each_efi_memory_desc(md) {
		unsigned long long start = md->phys_addr;
		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
		int e820_type;
		switch (md->type) {
		case EFI_LOADER_CODE:
		case EFI_LOADER_DATA:
		case EFI_BOOT_SERVICES_CODE:
		case EFI_BOOT_SERVICES_DATA:
		case EFI_CONVENTIONAL_MEMORY:
			if (efi_soft_reserve_enabled()
			    && (md->attribute & EFI_MEMORY_SP))
				e820_type = E820_TYPE_SOFT_RESERVED;
			else if (md->attribute & EFI_MEMORY_WB)
				e820_type = E820_TYPE_RAM;
			else
				e820_type = E820_TYPE_RESERVED;
			break;
		case EFI_ACPI_RECLAIM_MEMORY:
			e820_type = E820_TYPE_ACPI;
			break;
		case EFI_ACPI_MEMORY_NVS:
			e820_type = E820_TYPE_NVS;
			break;
		case EFI_UNUSABLE_MEMORY:
			e820_type = E820_TYPE_UNUSABLE;
			break;
		case EFI_PERSISTENT_MEMORY:
			e820_type = E820_TYPE_PMEM;
			break;
		default:
			/*
			 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
			 * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
			 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
			 */
			e820_type = E820_TYPE_RESERVED;
			break;
		}
		e820__range_add(start, size, e820_type);
	}
	e820__update_table(e820_table);
}
/*
 * Given add_efi_memmap defaults to 0 and there is no alternative
 * e820 mechanism for soft-reserved memory, import the full EFI memory
 * map if soft reservations are present and enabled. Otherwise, the
 * mechanism to disable the kernel's consideration of EFI_MEMORY_SP is
 * the efi=nosoftreserve option.
 */
static bool do_efi_soft_reserve(void)
{
	efi_memory_desc_t *md;
	if (!efi_enabled(EFI_MEMMAP))
		return false;
	if (!efi_soft_reserve_enabled())
		return false;
	for_each_efi_memory_desc(md)
		if (md->type == EFI_CONVENTIONAL_MEMORY &&
		    (md->attribute & EFI_MEMORY_SP))
			return true;
	return false;
}
int __init efi_memblock_x86_reserve_range(void)
{
	struct efi_info *e = &boot_params.efi_info;
	struct efi_memory_map_data data;
	phys_addr_t pmap;
	int rv;
	if (efi_enabled(EFI_PARAVIRT))
		return 0;
	/* Can't handle firmware tables above 4GB on i386 */
	if (IS_ENABLED(CONFIG_X86_32) && e->efi_memmap_hi > 0) {
		pr_err("Memory map is above 4GB, disabling EFI.\n");
		return -EINVAL;
	}
	pmap = (phys_addr_t)(e->efi_memmap | ((u64)e->efi_memmap_hi << 32));
	data.phys_map		= pmap;
	data.size 		= e->efi_memmap_size;
	data.desc_size		= e->efi_memdesc_size;
	data.desc_version	= e->efi_memdesc_version;
	if (!efi_enabled(EFI_PARAVIRT)) {
		rv = efi_memmap_init_early(&data);
		if (rv)
			return rv;
	}
	if (add_efi_memmap || do_efi_soft_reserve())
		do_add_efi_memmap();
	efi_fake_memmap_early();
	WARN(efi.memmap.desc_version != 1,
	     "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
	     efi.memmap.desc_version);
	memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
	set_bit(EFI_PRESERVE_BS_REGIONS, &efi.flags);
	return 0;
}
#define OVERFLOW_ADDR_SHIFT	(64 - EFI_PAGE_SHIFT)
#define OVERFLOW_ADDR_MASK	(U64_MAX << OVERFLOW_ADDR_SHIFT)
#define U64_HIGH_BIT		(~(U64_MAX >> 1))
static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
{
	u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
	u64 end_hi = 0;
	char buf[64];
	if (md->num_pages == 0) {
		end = 0;
	} else if (md->num_pages > EFI_PAGES_MAX ||
		   EFI_PAGES_MAX - md->num_pages <
		   (md->phys_addr >> EFI_PAGE_SHIFT)) {
		end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
			>> OVERFLOW_ADDR_SHIFT;
		if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
			end_hi += 1;
	} else {
		return true;
	}
	pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
	if (end_hi) {
		pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
			i, efi_md_typeattr_format(buf, sizeof(buf), md),
			md->phys_addr, end_hi, end);
	} else {
		pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
			i, efi_md_typeattr_format(buf, sizeof(buf), md),
			md->phys_addr, end);
	}
	return false;
}
static void __init efi_clean_memmap(void)
{
	efi_memory_desc_t *out = efi.memmap.map;
	const efi_memory_desc_t *in = out;
	const efi_memory_desc_t *end = efi.memmap.map_end;
	int i, n_removal;
	for (i = n_removal = 0; in < end; i++) {
		if (efi_memmap_entry_valid(in, i)) {
			if (out != in)
				memcpy(out, in, efi.memmap.desc_size);
			out = (void *)out + efi.memmap.desc_size;
		} else {
			n_removal++;
		}
		in = (void *)in + efi.memmap.desc_size;
	}
	if (n_removal > 0) {
		struct efi_memory_map_data data = {
			.phys_map	= efi.memmap.phys_map,
			.desc_version	= efi.memmap.desc_version,
			.desc_size	= efi.memmap.desc_size,
			.size		= efi.memmap.desc_size * (efi.memmap.nr_map - n_removal),
			.flags		= 0,
		};
		pr_warn("Removing %d invalid memory map entries.\n", n_removal);
		efi_memmap_install(&data);
	}
}
/*
 * Firmware can use EfiMemoryMappedIO to request that MMIO regions be
 * mapped by the OS so they can be accessed by EFI runtime services, but
 * should have no other significance to the OS (UEFI r2.10, sec 7.2).
 * However, most bootloaders and EFI stubs convert EfiMemoryMappedIO
 * regions to E820_TYPE_RESERVED entries, which prevent Linux from
 * allocating space from them (see remove_e820_regions()).
 *
 * Some platforms use EfiMemoryMappedIO entries for PCI MMCONFIG space and
 * PCI host bridge windows, which means Linux can't allocate BAR space for
 * hot-added devices.
 *
 * Remove large EfiMemoryMappedIO regions from the E820 map to avoid this
 * problem.
 *
 * Retain small EfiMemoryMappedIO regions because on some platforms, these
 * describe non-window space that's included in host bridge _CRS.  If we
 * assign that space to PCI devices, they don't work.
 */
static void __init efi_remove_e820_mmio(void)
{
	efi_memory_desc_t *md;
	u64 size, start, end;
	int i = 0;
	for_each_efi_memory_desc(md) {
		if (md->type == EFI_MEMORY_MAPPED_IO) {
			size = md->num_pages << EFI_PAGE_SHIFT;
			start = md->phys_addr;
			end = start + size - 1;
			if (size >= 256*1024) {
				pr_info("Remove mem%02u: MMIO range=[0x%08llx-0x%08llx] (%lluMB) from e820 map\n",
					i, start, end, size >> 20);
				e820__range_remove(start, size,
						   E820_TYPE_RESERVED, 1);
			} else {
				pr_info("Not removing mem%02u: MMIO range=[0x%08llx-0x%08llx] (%lluKB) from e820 map\n",
					i, start, end, size >> 10);
			}
		}
		i++;
	}
}
void __init efi_print_memmap(void)
{
	efi_memory_desc_t *md;
	int i = 0;
	for_each_efi_memory_desc(md) {
		char buf[64];
		pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
			i++, efi_md_typeattr_format(buf, sizeof(buf), md),
			md->phys_addr,
			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
			(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
	}
}
static int __init efi_systab_init(unsigned long phys)
{
	int size = efi_enabled(EFI_64BIT) ? sizeof(efi_system_table_64_t)
					  : sizeof(efi_system_table_32_t);
	const efi_table_hdr_t *hdr;
	bool over4g = false;
	void *p;
	int ret;
	hdr = p = early_memremap_ro(phys, size);
	if (p == NULL) {
		pr_err("Couldn't map the system table!\n");
		return -ENOMEM;
	}
	ret = efi_systab_check_header(hdr);
	if (ret) {
		early_memunmap(p, size);
		return ret;
	}
	if (efi_enabled(EFI_64BIT)) {
		const efi_system_table_64_t *systab64 = p;
		efi_runtime	= systab64->runtime;
		over4g		= systab64->runtime > U32_MAX;
		if (efi_setup) {
			struct efi_setup_data *data;
			data = early_memremap_ro(efi_setup, sizeof(*data));
			if (!data) {
				early_memunmap(p, size);
				return -ENOMEM;
			}
			efi_fw_vendor		= (unsigned long)data->fw_vendor;
			efi_config_table	= (unsigned long)data->tables;
			over4g |= data->fw_vendor	> U32_MAX ||
				  data->tables		> U32_MAX;
			early_memunmap(data, sizeof(*data));
		} else {
			efi_fw_vendor		= systab64->fw_vendor;
			efi_config_table	= systab64->tables;
			over4g |= systab64->fw_vendor	> U32_MAX ||
				  systab64->tables	> U32_MAX;
		}
		efi_nr_tables = systab64->nr_tables;
	} else {
		const efi_system_table_32_t *systab32 = p;
		efi_fw_vendor		= systab32->fw_vendor;
		efi_runtime		= systab32->runtime;
		efi_config_table	= systab32->tables;
		efi_nr_tables		= systab32->nr_tables;
	}
	efi.runtime_version = hdr->revision;
	efi_systab_report_header(hdr, efi_fw_vendor);
	early_memunmap(p, size);
	if (IS_ENABLED(CONFIG_X86_32) && over4g) {
		pr_err("EFI data located above 4GB, disabling EFI.\n");
		return -EINVAL;
	}
	return 0;
}
static int __init efi_config_init(const efi_config_table_type_t *arch_tables)
{
	void *config_tables;
	int sz, ret;
	if (efi_nr_tables == 0)
		return 0;
	if (efi_enabled(EFI_64BIT))
		sz = sizeof(efi_config_table_64_t);
	else
		sz = sizeof(efi_config_table_32_t);
	/*
	 * Let's see what config tables the firmware passed to us.
	 */
	config_tables = early_memremap(efi_config_table, efi_nr_tables * sz);
	if (config_tables == NULL) {
		pr_err("Could not map Configuration table!\n");
		return -ENOMEM;
	}
	ret = efi_config_parse_tables(config_tables, efi_nr_tables,
				      arch_tables);
	early_memunmap(config_tables, efi_nr_tables * sz);
	return ret;
}
void __init efi_init(void)
{
	if (IS_ENABLED(CONFIG_X86_32) &&
	    (boot_params.efi_info.efi_systab_hi ||
	     boot_params.efi_info.efi_memmap_hi)) {
		pr_info("Table located above 4GB, disabling EFI.\n");
		return;
	}
	efi_systab_phys = boot_params.efi_info.efi_systab |
			  ((__u64)boot_params.efi_info.efi_systab_hi << 32);
	if (efi_systab_init(efi_systab_phys))
		return;
	if (efi_reuse_config(efi_config_table, efi_nr_tables))
		return;
	if (efi_config_init(arch_tables))
		return;
	/*
	 * Note: We currently don't support runtime services on an EFI
	 * that doesn't match the kernel 32/64-bit mode.
	 */
	if (!efi_runtime_supported())
		pr_err("No EFI runtime due to 32/64-bit mismatch with kernel\n");
	if (!efi_runtime_supported() || efi_runtime_disabled()) {
		efi_memmap_unmap();
		return;
	}
	/* Parse the EFI Properties table if it exists */
	if (prop_phys != EFI_INVALID_TABLE_ADDR) {
		efi_properties_table_t *tbl;
		tbl = early_memremap_ro(prop_phys, sizeof(*tbl));
		if (tbl == NULL) {
			pr_err("Could not map Properties table!\n");
		} else {
			if (tbl->memory_protection_attribute &
			    EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA)
				set_bit(EFI_NX_PE_DATA, &efi.flags);
			early_memunmap(tbl, sizeof(*tbl));
		}
	}
	set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
	efi_clean_memmap();
	efi_remove_e820_mmio();
	if (efi_enabled(EFI_DBG))
		efi_print_memmap();
}
/* Merge contiguous regions of the same type and attribute */
static void __init efi_merge_regions(void)
{
	efi_memory_desc_t *md, *prev_md = NULL;
	for_each_efi_memory_desc(md) {
		u64 prev_size;
		if (!prev_md) {
			prev_md = md;
			continue;
		}
		if (prev_md->type != md->type ||
		    prev_md->attribute != md->attribute) {
			prev_md = md;
			continue;
		}
		prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
		if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
			prev_md->num_pages += md->num_pages;
			md->type = EFI_RESERVED_TYPE;
			md->attribute = 0;
			continue;
		}
		prev_md = md;
	}
}
static void *realloc_pages(void *old_memmap, int old_shift)
{
	void *ret;
	ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
	if (!ret)
		goto out;
	/*
	 * A first-time allocation doesn't have anything to copy.
	 */
	if (!old_memmap)
		return ret;
	memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
out:
	free_pages((unsigned long)old_memmap, old_shift);
	return ret;
}
/*
 * Iterate the EFI memory map in reverse order because the regions
 * will be mapped top-down. The end result is the same as if we had
 * mapped things forward, but doesn't require us to change the
 * existing implementation of efi_map_region().
 */
static inline void *efi_map_next_entry_reverse(void *entry)
{
	/* Initial call */
	if (!entry)
		return efi.memmap.map_end - efi.memmap.desc_size;
	entry -= efi.memmap.desc_size;
	if (entry < efi.memmap.map)
		return NULL;
	return entry;
}
/*
 * efi_map_next_entry - Return the next EFI memory map descriptor
 * @entry: Previous EFI memory map descriptor
 *
 * This is a helper function to iterate over the EFI memory map, which
 * we do in different orders depending on the current configuration.
 *
 * To begin traversing the memory map @entry must be %NULL.
 *
 * Returns %NULL when we reach the end of the memory map.
 */
static void *efi_map_next_entry(void *entry)
{
	if (efi_enabled(EFI_64BIT)) {
		/*
		 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
		 * config table feature requires us to map all entries
		 * in the same order as they appear in the EFI memory
		 * map. That is to say, entry N must have a lower
		 * virtual address than entry N+1. This is because the
		 * firmware toolchain leaves relative references in
		 * the code/data sections, which are split and become
		 * separate EFI memory regions. Mapping things
		 * out-of-order leads to the firmware accessing
		 * unmapped addresses.
		 *
		 * Since we need to map things this way whether or not
		 * the kernel actually makes use of
		 * EFI_PROPERTIES_TABLE, let's just switch to this
		 * scheme by default for 64-bit.
		 */
		return efi_map_next_entry_reverse(entry);
	}
	/* Initial call */
	if (!entry)
		return efi.memmap.map;
	entry += efi.memmap.desc_size;
	if (entry >= efi.memmap.map_end)
		return NULL;
	return entry;
}
static bool should_map_region(efi_memory_desc_t *md)
{
	/*
	 * Runtime regions always require runtime mappings (obviously).
	 */
	if (md->attribute & EFI_MEMORY_RUNTIME)
		return true;
	/*
	 * 32-bit EFI doesn't suffer from the bug that requires us to
	 * reserve boot services regions, and mixed mode support
	 * doesn't exist for 32-bit kernels.
	 */
	if (IS_ENABLED(CONFIG_X86_32))
		return false;
	/*
	 * EFI specific purpose memory may be reserved by default
	 * depending on kernel config and boot options.
	 */
	if (md->type == EFI_CONVENTIONAL_MEMORY &&
	    efi_soft_reserve_enabled() &&
	    (md->attribute & EFI_MEMORY_SP))
		return false;
	/*
	 * Map all of RAM so that we can access arguments in the 1:1
	 * mapping when making EFI runtime calls.
	 */
	if (efi_is_mixed()) {
		if (md->type == EFI_CONVENTIONAL_MEMORY ||
		    md->type == EFI_LOADER_DATA ||
		    md->type == EFI_LOADER_CODE)
			return true;
	}
	/*
	 * Map boot services regions as a workaround for buggy
	 * firmware that accesses them even when they shouldn't.
	 *
	 * See efi_{reserve,free}_boot_services().
	 */
	if (md->type == EFI_BOOT_SERVICES_CODE ||
	    md->type == EFI_BOOT_SERVICES_DATA)
		return true;
	return false;
}
/*
 * Map the efi memory ranges of the runtime services and update new_mmap with
 * virtual addresses.
 */
static void * __init efi_map_regions(int *count, int *pg_shift)
{
	void *p, *new_memmap = NULL;
	unsigned long left = 0;
	unsigned long desc_size;
	efi_memory_desc_t *md;
	desc_size = efi.memmap.desc_size;
	p = NULL;
	while ((p = efi_map_next_entry(p))) {
		md = p;
		if (!should_map_region(md))
			continue;
		efi_map_region(md);
		if (left < desc_size) {
			new_memmap = realloc_pages(new_memmap, *pg_shift);
			if (!new_memmap)
				return NULL;
			left += PAGE_SIZE << *pg_shift;
			(*pg_shift)++;
		}
		memcpy(new_memmap + (*count * desc_size), md, desc_size);
		left -= desc_size;
		(*count)++;
	}
	return new_memmap;
}
static void __init kexec_enter_virtual_mode(void)
{
#ifdef CONFIG_KEXEC_CORE
	efi_memory_desc_t *md;
	unsigned int num_pages;
	/*
	 * We don't do virtual mode, since we don't do runtime services, on
	 * non-native EFI.
	 */
	if (efi_is_mixed()) {
		efi_memmap_unmap();
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
		return;
	}
	if (efi_alloc_page_tables()) {
		pr_err("Failed to allocate EFI page tables\n");
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
		return;
	}
	/*
	* Map efi regions which were passed via setup_data. The virt_addr is a
	* fixed addr which was used in first kernel of a kexec boot.
	*/
	for_each_efi_memory_desc(md)
		efi_map_region_fixed(md); /* FIXME: add error handling */
	/*
	 * Unregister the early EFI memmap from efi_init() and install
	 * the new EFI memory map.
	 */
	efi_memmap_unmap();
	if (efi_memmap_init_late(efi.memmap.phys_map,
				 efi.memmap.desc_size * efi.memmap.nr_map)) {
		pr_err("Failed to remap late EFI memory map\n");
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
		return;
	}
	num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
	num_pages >>= PAGE_SHIFT;
	if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
		return;
	}
	efi_sync_low_kernel_mappings();
	efi_native_runtime_setup();
#endif
}
/*
 * This function will switch the EFI runtime services to virtual mode.
 * Essentially, we look through the EFI memmap and map every region that
 * has the runtime attribute bit set in its memory descriptor into the
 * efi_pgd page table.
 *
 * The new method does a pagetable switch in a preemption-safe manner
 * so that we're in a different address space when calling a runtime
 * function. For function arguments passing we do copy the PUDs of the
 * kernel page table into efi_pgd prior to each call.
 *
 * Specially for kexec boot, efi runtime maps in previous kernel should
 * be passed in via setup_data. In that case runtime ranges will be mapped
 * to the same virtual addresses as the first kernel, see
 * kexec_enter_virtual_mode().
 */
static void __init __efi_enter_virtual_mode(void)
{
	int count = 0, pg_shift = 0;
	void *new_memmap = NULL;
	efi_status_t status;
	unsigned long pa;
	if (efi_alloc_page_tables()) {
		pr_err("Failed to allocate EFI page tables\n");
		goto err;
	}
	efi_merge_regions();
	new_memmap = efi_map_regions(&count, &pg_shift);
	if (!new_memmap) {
		pr_err("Error reallocating memory, EFI runtime non-functional!\n");
		goto err;
	}
	pa = __pa(new_memmap);
	/*
	 * Unregister the early EFI memmap from efi_init() and install
	 * the new EFI memory map that we are about to pass to the
	 * firmware via SetVirtualAddressMap().
	 */
	efi_memmap_unmap();
	if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
		pr_err("Failed to remap late EFI memory map\n");
		goto err;
	}
	if (efi_enabled(EFI_DBG)) {
		pr_info("EFI runtime memory map:\n");
		efi_print_memmap();
	}
	if (efi_setup_page_tables(pa, 1 << pg_shift))
		goto err;
	efi_sync_low_kernel_mappings();
	status = efi_set_virtual_address_map(efi.memmap.desc_size * count,
					     efi.memmap.desc_size,
					     efi.memmap.desc_version,
					     (efi_memory_desc_t *)pa,
					     efi_systab_phys);
	if (status != EFI_SUCCESS) {
		pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n",
		       status);
		goto err;
	}
	efi_check_for_embedded_firmwares();
	efi_free_boot_services();
	if (!efi_is_mixed())
		efi_native_runtime_setup();
	else
		efi_thunk_runtime_setup();
	/*
	 * Apply more restrictive page table mapping attributes now that
	 * SVAM() has been called and the firmware has performed all
	 * necessary relocation fixups for the new virtual addresses.
	 */
	efi_runtime_update_mappings();
	/* clean DUMMY object */
	efi_delete_dummy_variable();
	return;
err:
	clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
}
void __init efi_enter_virtual_mode(void)
{
	if (efi_enabled(EFI_PARAVIRT))
		return;
	efi.runtime = (efi_runtime_services_t *)efi_runtime;
	if (efi_setup)
		kexec_enter_virtual_mode();
	else
		__efi_enter_virtual_mode();
	efi_dump_pagetable();
}
bool efi_is_table_address(unsigned long phys_addr)
{
	unsigned int i;
	if (phys_addr == EFI_INVALID_TABLE_ADDR)
		return false;
	for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
		if (*(efi_tables[i]) == phys_addr)
			return true;
	return false;
}
char *efi_systab_show_arch(char *str)
{
	if (uga_phys != EFI_INVALID_TABLE_ADDR)
		str += sprintf(str, "UGA=0x%lx\n", uga_phys);
	return str;
}
#define EFI_FIELD(var) efi_ ## var
#define EFI_ATTR_SHOW(name) \
static ssize_t name##_show(struct kobject *kobj, \
				struct kobj_attribute *attr, char *buf) \
{ \
	return sprintf(buf, "0x%lx\n", EFI_FIELD(name)); \
}
EFI_ATTR_SHOW(fw_vendor);
EFI_ATTR_SHOW(runtime);
EFI_ATTR_SHOW(config_table);
struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor);
struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime);
struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table);
umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
	if (attr == &efi_attr_fw_vendor.attr) {
		if (efi_enabled(EFI_PARAVIRT) ||
				efi_fw_vendor == EFI_INVALID_TABLE_ADDR)
			return 0;
	} else if (attr == &efi_attr_runtime.attr) {
		if (efi_runtime == EFI_INVALID_TABLE_ADDR)
			return 0;
	} else if (attr == &efi_attr_config_table.attr) {
		if (efi_config_table == EFI_INVALID_TABLE_ADDR)
			return 0;
	}
	return attr->mode;
}
 | 
	linux-master | 
	arch/x86/platform/efi/efi.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) "efi: " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/types.h>
#include <linux/efi.h>
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <asm/e820/api.h>
#include <asm/efi.h>
#include <asm/uv/uv.h>
#include <asm/cpu_device_id.h>
#include <asm/realmode.h>
#include <asm/reboot.h>
#define EFI_MIN_RESERVE 5120
#define EFI_DUMMY_GUID \
	EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9)
#define QUARK_CSH_SIGNATURE		0x5f435348	/* _CSH */
#define QUARK_SECURITY_HEADER_SIZE	0x400
/*
 * Header prepended to the standard EFI capsule on Quark systems the are based
 * on Intel firmware BSP.
 * @csh_signature:	Unique identifier to sanity check signed module
 * 			presence ("_CSH").
 * @version:		Current version of CSH used. Should be one for Quark A0.
 * @modulesize:		Size of the entire module including the module header
 * 			and payload.
 * @security_version_number_index: Index of SVN to use for validation of signed
 * 			module.
 * @security_version_number: Used to prevent against roll back of modules.
 * @rsvd_module_id:	Currently unused for Clanton (Quark).
 * @rsvd_module_vendor:	Vendor Identifier. For Intel products value is
 * 			0x00008086.
 * @rsvd_date:		BCD representation of build date as yyyymmdd, where
 * 			yyyy=4 digit year, mm=1-12, dd=1-31.
 * @headersize:		Total length of the header including including any
 * 			padding optionally added by the signing tool.
 * @hash_algo:		What Hash is used in the module signing.
 * @cryp_algo:		What Crypto is used in the module signing.
 * @keysize:		Total length of the key data including including any
 * 			padding optionally added by the signing tool.
 * @signaturesize:	Total length of the signature including including any
 * 			padding optionally added by the signing tool.
 * @rsvd_next_header:	32-bit pointer to the next Secure Boot Module in the
 * 			chain, if there is a next header.
 * @rsvd:		Reserved, padding structure to required size.
 *
 * See also QuartSecurityHeader_t in
 * Quark_EDKII_v1.2.1.1/QuarkPlatformPkg/Include/QuarkBootRom.h
 * from https://downloadcenter.intel.com/download/23197/Intel-Quark-SoC-X1000-Board-Support-Package-BSP
 */
struct quark_security_header {
	u32 csh_signature;
	u32 version;
	u32 modulesize;
	u32 security_version_number_index;
	u32 security_version_number;
	u32 rsvd_module_id;
	u32 rsvd_module_vendor;
	u32 rsvd_date;
	u32 headersize;
	u32 hash_algo;
	u32 cryp_algo;
	u32 keysize;
	u32 signaturesize;
	u32 rsvd_next_header;
	u32 rsvd[2];
};
static const efi_char16_t efi_dummy_name[] = L"DUMMY";
static bool efi_no_storage_paranoia;
/*
 * Some firmware implementations refuse to boot if there's insufficient
 * space in the variable store. The implementation of garbage collection
 * in some FW versions causes stale (deleted) variables to take up space
 * longer than intended and space is only freed once the store becomes
 * almost completely full.
 *
 * Enabling this option disables the space checks in
 * efi_query_variable_store() and forces garbage collection.
 *
 * Only enable this option if deleting EFI variables does not free up
 * space in your variable store, e.g. if despite deleting variables
 * you're unable to create new ones.
 */
static int __init setup_storage_paranoia(char *arg)
{
	efi_no_storage_paranoia = true;
	return 0;
}
early_param("efi_no_storage_paranoia", setup_storage_paranoia);
/*
 * Deleting the dummy variable which kicks off garbage collection
*/
void efi_delete_dummy_variable(void)
{
	efi.set_variable_nonblocking((efi_char16_t *)efi_dummy_name,
				     &EFI_DUMMY_GUID,
				     EFI_VARIABLE_NON_VOLATILE |
				     EFI_VARIABLE_BOOTSERVICE_ACCESS |
				     EFI_VARIABLE_RUNTIME_ACCESS, 0, NULL);
}
u64 efivar_reserved_space(void)
{
	if (efi_no_storage_paranoia)
		return 0;
	return EFI_MIN_RESERVE;
}
EXPORT_SYMBOL_GPL(efivar_reserved_space);
/*
 * In the nonblocking case we do not attempt to perform garbage
 * collection if we do not have enough free space. Rather, we do the
 * bare minimum check and give up immediately if the available space
 * is below EFI_MIN_RESERVE.
 *
 * This function is intended to be small and simple because it is
 * invoked from crash handler paths.
 */
static efi_status_t
query_variable_store_nonblocking(u32 attributes, unsigned long size)
{
	efi_status_t status;
	u64 storage_size, remaining_size, max_size;
	status = efi.query_variable_info_nonblocking(attributes, &storage_size,
						     &remaining_size,
						     &max_size);
	if (status != EFI_SUCCESS)
		return status;
	if (remaining_size - size < EFI_MIN_RESERVE)
		return EFI_OUT_OF_RESOURCES;
	return EFI_SUCCESS;
}
/*
 * Some firmware implementations refuse to boot if there's insufficient space
 * in the variable store. Ensure that we never use more than a safe limit.
 *
 * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
 * store.
 */
efi_status_t efi_query_variable_store(u32 attributes, unsigned long size,
				      bool nonblocking)
{
	efi_status_t status;
	u64 storage_size, remaining_size, max_size;
	if (!(attributes & EFI_VARIABLE_NON_VOLATILE))
		return 0;
	if (nonblocking)
		return query_variable_store_nonblocking(attributes, size);
	status = efi.query_variable_info(attributes, &storage_size,
					 &remaining_size, &max_size);
	if (status != EFI_SUCCESS)
		return status;
	/*
	 * We account for that by refusing the write if permitting it would
	 * reduce the available space to under 5KB. This figure was provided by
	 * Samsung, so should be safe.
	 */
	if ((remaining_size - size < EFI_MIN_RESERVE) &&
		!efi_no_storage_paranoia) {
		/*
		 * Triggering garbage collection may require that the firmware
		 * generate a real EFI_OUT_OF_RESOURCES error. We can force
		 * that by attempting to use more space than is available.
		 */
		unsigned long dummy_size = remaining_size + 1024;
		void *dummy = kzalloc(dummy_size, GFP_KERNEL);
		if (!dummy)
			return EFI_OUT_OF_RESOURCES;
		status = efi.set_variable((efi_char16_t *)efi_dummy_name,
					  &EFI_DUMMY_GUID,
					  EFI_VARIABLE_NON_VOLATILE |
					  EFI_VARIABLE_BOOTSERVICE_ACCESS |
					  EFI_VARIABLE_RUNTIME_ACCESS,
					  dummy_size, dummy);
		if (status == EFI_SUCCESS) {
			/*
			 * This should have failed, so if it didn't make sure
			 * that we delete it...
			 */
			efi_delete_dummy_variable();
		}
		kfree(dummy);
		/*
		 * The runtime code may now have triggered a garbage collection
		 * run, so check the variable info again
		 */
		status = efi.query_variable_info(attributes, &storage_size,
						 &remaining_size, &max_size);
		if (status != EFI_SUCCESS)
			return status;
		/*
		 * There still isn't enough room, so return an error
		 */
		if (remaining_size - size < EFI_MIN_RESERVE)
			return EFI_OUT_OF_RESOURCES;
	}
	return EFI_SUCCESS;
}
EXPORT_SYMBOL_GPL(efi_query_variable_store);
/*
 * The UEFI specification makes it clear that the operating system is
 * free to do whatever it wants with boot services code after
 * ExitBootServices() has been called. Ignoring this recommendation a
 * significant bunch of EFI implementations continue calling into boot
 * services code (SetVirtualAddressMap). In order to work around such
 * buggy implementations we reserve boot services region during EFI
 * init and make sure it stays executable. Then, after
 * SetVirtualAddressMap(), it is discarded.
 *
 * However, some boot services regions contain data that is required
 * by drivers, so we need to track which memory ranges can never be
 * freed. This is done by tagging those regions with the
 * EFI_MEMORY_RUNTIME attribute.
 *
 * Any driver that wants to mark a region as reserved must use
 * efi_mem_reserve() which will insert a new EFI memory descriptor
 * into efi.memmap (splitting existing regions if necessary) and tag
 * it with EFI_MEMORY_RUNTIME.
 */
void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
{
	struct efi_memory_map_data data = { 0 };
	struct efi_mem_range mr;
	efi_memory_desc_t md;
	int num_entries;
	void *new;
	if (efi_mem_desc_lookup(addr, &md) ||
	    md.type != EFI_BOOT_SERVICES_DATA) {
		pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr);
		return;
	}
	if (addr + size > md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT)) {
		pr_err("Region spans EFI memory descriptors, %pa\n", &addr);
		return;
	}
	size += addr % EFI_PAGE_SIZE;
	size = round_up(size, EFI_PAGE_SIZE);
	addr = round_down(addr, EFI_PAGE_SIZE);
	mr.range.start = addr;
	mr.range.end = addr + size - 1;
	mr.attribute = md.attribute | EFI_MEMORY_RUNTIME;
	num_entries = efi_memmap_split_count(&md, &mr.range);
	num_entries += efi.memmap.nr_map;
	if (efi_memmap_alloc(num_entries, &data) != 0) {
		pr_err("Could not allocate boot services memmap\n");
		return;
	}
	new = early_memremap_prot(data.phys_map, data.size,
				  pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL)));
	if (!new) {
		pr_err("Failed to map new boot services memmap\n");
		return;
	}
	efi_memmap_insert(&efi.memmap, new, &mr);
	early_memunmap(new, data.size);
	efi_memmap_install(&data);
	e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
	e820__update_table(e820_table);
}
/*
 * Helper function for efi_reserve_boot_services() to figure out if we
 * can free regions in efi_free_boot_services().
 *
 * Use this function to ensure we do not free regions owned by somebody
 * else. We must only reserve (and then free) regions:
 *
 * - Not within any part of the kernel
 * - Not the BIOS reserved area (E820_TYPE_RESERVED, E820_TYPE_NVS, etc)
 */
static __init bool can_free_region(u64 start, u64 size)
{
	if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end))
		return false;
	if (!e820__mapped_all(start, start+size, E820_TYPE_RAM))
		return false;
	return true;
}
void __init efi_reserve_boot_services(void)
{
	efi_memory_desc_t *md;
	if (!efi_enabled(EFI_MEMMAP))
		return;
	for_each_efi_memory_desc(md) {
		u64 start = md->phys_addr;
		u64 size = md->num_pages << EFI_PAGE_SHIFT;
		bool already_reserved;
		if (md->type != EFI_BOOT_SERVICES_CODE &&
		    md->type != EFI_BOOT_SERVICES_DATA)
			continue;
		already_reserved = memblock_is_region_reserved(start, size);
		/*
		 * Because the following memblock_reserve() is paired
		 * with memblock_free_late() for this region in
		 * efi_free_boot_services(), we must be extremely
		 * careful not to reserve, and subsequently free,
		 * critical regions of memory (like the kernel image) or
		 * those regions that somebody else has already
		 * reserved.
		 *
		 * A good example of a critical region that must not be
		 * freed is page zero (first 4Kb of memory), which may
		 * contain boot services code/data but is marked
		 * E820_TYPE_RESERVED by trim_bios_range().
		 */
		if (!already_reserved) {
			memblock_reserve(start, size);
			/*
			 * If we are the first to reserve the region, no
			 * one else cares about it. We own it and can
			 * free it later.
			 */
			if (can_free_region(start, size))
				continue;
		}
		/*
		 * We don't own the region. We must not free it.
		 *
		 * Setting this bit for a boot services region really
		 * doesn't make sense as far as the firmware is
		 * concerned, but it does provide us with a way to tag
		 * those regions that must not be paired with
		 * memblock_free_late().
		 */
		md->attribute |= EFI_MEMORY_RUNTIME;
	}
}
/*
 * Apart from having VA mappings for EFI boot services code/data regions,
 * (duplicate) 1:1 mappings were also created as a quirk for buggy firmware. So,
 * unmap both 1:1 and VA mappings.
 */
static void __init efi_unmap_pages(efi_memory_desc_t *md)
{
	pgd_t *pgd = efi_mm.pgd;
	u64 pa = md->phys_addr;
	u64 va = md->virt_addr;
	/*
	 * EFI mixed mode has all RAM mapped to access arguments while making
	 * EFI runtime calls, hence don't unmap EFI boot services code/data
	 * regions.
	 */
	if (efi_is_mixed())
		return;
	if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages))
		pr_err("Failed to unmap 1:1 mapping for 0x%llx\n", pa);
	if (kernel_unmap_pages_in_pgd(pgd, va, md->num_pages))
		pr_err("Failed to unmap VA mapping for 0x%llx\n", va);
}
void __init efi_free_boot_services(void)
{
	struct efi_memory_map_data data = { 0 };
	efi_memory_desc_t *md;
	int num_entries = 0;
	void *new, *new_md;
	/* Keep all regions for /sys/kernel/debug/efi */
	if (efi_enabled(EFI_DBG))
		return;
	for_each_efi_memory_desc(md) {
		unsigned long long start = md->phys_addr;
		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
		size_t rm_size;
		if (md->type != EFI_BOOT_SERVICES_CODE &&
		    md->type != EFI_BOOT_SERVICES_DATA) {
			num_entries++;
			continue;
		}
		/* Do not free, someone else owns it: */
		if (md->attribute & EFI_MEMORY_RUNTIME) {
			num_entries++;
			continue;
		}
		/*
		 * Before calling set_virtual_address_map(), EFI boot services
		 * code/data regions were mapped as a quirk for buggy firmware.
		 * Unmap them from efi_pgd before freeing them up.
		 */
		efi_unmap_pages(md);
		/*
		 * Nasty quirk: if all sub-1MB memory is used for boot
		 * services, we can get here without having allocated the
		 * real mode trampoline.  It's too late to hand boot services
		 * memory back to the memblock allocator, so instead
		 * try to manually allocate the trampoline if needed.
		 *
		 * I've seen this on a Dell XPS 13 9350 with firmware
		 * 1.4.4 with SGX enabled booting Linux via Fedora 24's
		 * grub2-efi on a hard disk.  (And no, I don't know why
		 * this happened, but Linux should still try to boot rather
		 * panicking early.)
		 */
		rm_size = real_mode_size_needed();
		if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
			set_real_mode_mem(start);
			start += rm_size;
			size -= rm_size;
		}
		/*
		 * Don't free memory under 1M for two reasons:
		 * - BIOS might clobber it
		 * - Crash kernel needs it to be reserved
		 */
		if (start + size < SZ_1M)
			continue;
		if (start < SZ_1M) {
			size -= (SZ_1M - start);
			start = SZ_1M;
		}
		memblock_free_late(start, size);
	}
	if (!num_entries)
		return;
	if (efi_memmap_alloc(num_entries, &data) != 0) {
		pr_err("Failed to allocate new EFI memmap\n");
		return;
	}
	new = memremap(data.phys_map, data.size, MEMREMAP_WB);
	if (!new) {
		pr_err("Failed to map new EFI memmap\n");
		return;
	}
	/*
	 * Build a new EFI memmap that excludes any boot services
	 * regions that are not tagged EFI_MEMORY_RUNTIME, since those
	 * regions have now been freed.
	 */
	new_md = new;
	for_each_efi_memory_desc(md) {
		if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
		    (md->type == EFI_BOOT_SERVICES_CODE ||
		     md->type == EFI_BOOT_SERVICES_DATA))
			continue;
		memcpy(new_md, md, efi.memmap.desc_size);
		new_md += efi.memmap.desc_size;
	}
	memunmap(new);
	if (efi_memmap_install(&data) != 0) {
		pr_err("Could not install new EFI memmap\n");
		return;
	}
}
/*
 * A number of config table entries get remapped to virtual addresses
 * after entering EFI virtual mode. However, the kexec kernel requires
 * their physical addresses therefore we pass them via setup_data and
 * correct those entries to their respective physical addresses here.
 *
 * Currently only handles smbios which is necessary for some firmware
 * implementation.
 */
int __init efi_reuse_config(u64 tables, int nr_tables)
{
	int i, sz, ret = 0;
	void *p, *tablep;
	struct efi_setup_data *data;
	if (nr_tables == 0)
		return 0;
	if (!efi_setup)
		return 0;
	if (!efi_enabled(EFI_64BIT))
		return 0;
	data = early_memremap(efi_setup, sizeof(*data));
	if (!data) {
		ret = -ENOMEM;
		goto out;
	}
	if (!data->smbios)
		goto out_memremap;
	sz = sizeof(efi_config_table_64_t);
	p = tablep = early_memremap(tables, nr_tables * sz);
	if (!p) {
		pr_err("Could not map Configuration table!\n");
		ret = -ENOMEM;
		goto out_memremap;
	}
	for (i = 0; i < nr_tables; i++) {
		efi_guid_t guid;
		guid = ((efi_config_table_64_t *)p)->guid;
		if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID))
			((efi_config_table_64_t *)p)->table = data->smbios;
		p += sz;
	}
	early_memunmap(tablep, nr_tables * sz);
out_memremap:
	early_memunmap(data, sizeof(*data));
out:
	return ret;
}
void __init efi_apply_memmap_quirks(void)
{
	/*
	 * Once setup is done earlier, unmap the EFI memory map on mismatched
	 * firmware/kernel architectures since there is no support for runtime
	 * services.
	 */
	if (!efi_runtime_supported()) {
		pr_info("Setup done, disabling due to 32/64-bit mismatch\n");
		efi_memmap_unmap();
	}
}
/*
 * For most modern platforms the preferred method of powering off is via
 * ACPI. However, there are some that are known to require the use of
 * EFI runtime services and for which ACPI does not work at all.
 *
 * Using EFI is a last resort, to be used only if no other option
 * exists.
 */
bool efi_reboot_required(void)
{
	if (!acpi_gbl_reduced_hardware)
		return false;
	efi_reboot_quirk_mode = EFI_RESET_WARM;
	return true;
}
bool efi_poweroff_required(void)
{
	return acpi_gbl_reduced_hardware || acpi_no_s5;
}
#ifdef CONFIG_EFI_CAPSULE_QUIRK_QUARK_CSH
static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
				  size_t hdr_bytes)
{
	struct quark_security_header *csh = *pkbuff;
	/* Only process data block that is larger than the security header */
	if (hdr_bytes < sizeof(struct quark_security_header))
		return 0;
	if (csh->csh_signature != QUARK_CSH_SIGNATURE ||
	    csh->headersize != QUARK_SECURITY_HEADER_SIZE)
		return 1;
	/* Only process data block if EFI header is included */
	if (hdr_bytes < QUARK_SECURITY_HEADER_SIZE +
			sizeof(efi_capsule_header_t))
		return 0;
	pr_debug("Quark security header detected\n");
	if (csh->rsvd_next_header != 0) {
		pr_err("multiple Quark security headers not supported\n");
		return -EINVAL;
	}
	*pkbuff += csh->headersize;
	cap_info->total_size = csh->headersize;
	/*
	 * Update the first page pointer to skip over the CSH header.
	 */
	cap_info->phys[0] += csh->headersize;
	/*
	 * cap_info->capsule should point at a virtual mapping of the entire
	 * capsule, starting at the capsule header. Our image has the Quark
	 * security header prepended, so we cannot rely on the default vmap()
	 * mapping created by the generic capsule code.
	 * Given that the Quark firmware does not appear to care about the
	 * virtual mapping, let's just point cap_info->capsule at our copy
	 * of the capsule header.
	 */
	cap_info->capsule = &cap_info->header;
	return 1;
}
static const struct x86_cpu_id efi_capsule_quirk_ids[] = {
	X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000,
				   &qrk_capsule_setup_info),
	{ }
};
int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
			   size_t hdr_bytes)
{
	int (*quirk_handler)(struct capsule_info *, void **, size_t);
	const struct x86_cpu_id *id;
	int ret;
	if (hdr_bytes < sizeof(efi_capsule_header_t))
		return 0;
	cap_info->total_size = 0;
	id = x86_match_cpu(efi_capsule_quirk_ids);
	if (id) {
		/*
		 * The quirk handler is supposed to return
		 *  - a value > 0 if the setup should continue, after advancing
		 *    kbuff as needed
		 *  - 0 if not enough hdr_bytes are available yet
		 *  - a negative error code otherwise
		 */
		quirk_handler = (typeof(quirk_handler))id->driver_data;
		ret = quirk_handler(cap_info, &kbuff, hdr_bytes);
		if (ret <= 0)
			return ret;
	}
	memcpy(&cap_info->header, kbuff, sizeof(cap_info->header));
	cap_info->total_size += cap_info->header.imagesize;
	return __efi_capsule_setup_info(cap_info);
}
#endif
/*
 * If any access by any efi runtime service causes a page fault, then,
 * 1. If it's efi_reset_system(), reboot through BIOS.
 * 2. If any other efi runtime service, then
 *    a. Return error status to the efi caller process.
 *    b. Disable EFI Runtime Services forever and
 *    c. Freeze efi_rts_wq and schedule new process.
 *
 * @return: Returns, if the page fault is not handled. This function
 * will never return if the page fault is handled successfully.
 */
void efi_crash_gracefully_on_page_fault(unsigned long phys_addr)
{
	if (!IS_ENABLED(CONFIG_X86_64))
		return;
	/*
	 * If we get an interrupt/NMI while processing an EFI runtime service
	 * then this is a regular OOPS, not an EFI failure.
	 */
	if (in_interrupt())
		return;
	/*
	 * Make sure that an efi runtime service caused the page fault.
	 * READ_ONCE() because we might be OOPSing in a different thread,
	 * and we don't want to trip KTSAN while trying to OOPS.
	 */
	if (READ_ONCE(efi_rts_work.efi_rts_id) == EFI_NONE ||
	    current_work() != &efi_rts_work.work)
		return;
	/*
	 * Address range 0x0000 - 0x0fff is always mapped in the efi_pgd, so
	 * page faulting on these addresses isn't expected.
	 */
	if (phys_addr <= 0x0fff)
		return;
	/*
	 * Print stack trace as it might be useful to know which EFI Runtime
	 * Service is buggy.
	 */
	WARN(1, FW_BUG "Page fault caused by firmware at PA: 0x%lx\n",
	     phys_addr);
	/*
	 * Buggy efi_reset_system() is handled differently from other EFI
	 * Runtime Services as it doesn't use efi_rts_wq. Although,
	 * native_machine_emergency_restart() says that machine_real_restart()
	 * could fail, it's better not to complicate this fault handler
	 * because this case occurs *very* rarely and hence could be improved
	 * on a need by basis.
	 */
	if (efi_rts_work.efi_rts_id == EFI_RESET_SYSTEM) {
		pr_info("efi_reset_system() buggy! Reboot through BIOS\n");
		machine_real_restart(MRR_BIOS);
		return;
	}
	/*
	 * Before calling EFI Runtime Service, the kernel has switched the
	 * calling process to efi_mm. Hence, switch back to task_mm.
	 */
	arch_efi_call_virt_teardown();
	/* Signal error status to the efi caller process */
	efi_rts_work.status = EFI_ABORTED;
	complete(&efi_rts_work.efi_rts_comp);
	clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
	pr_info("Froze efi_rts_wq and disabled EFI Runtime Services\n");
	/*
	 * Call schedule() in an infinite loop, so that any spurious wake ups
	 * will never run efi_rts_wq again.
	 */
	for (;;) {
		set_current_state(TASK_IDLE);
		schedule();
	}
}
 | 
	linux-master | 
	arch/x86/platform/efi/quirks.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2013 Red Hat, Inc., Dave Young <[email protected]>
 */
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/efi.h>
#include <linux/slab.h>
#include <asm/efi.h>
#include <asm/setup.h>
struct efi_runtime_map_entry {
	efi_memory_desc_t md;
	struct kobject kobj;   /* kobject for each entry */
};
static struct efi_runtime_map_entry **map_entries;
struct map_attribute {
	struct attribute attr;
	ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf);
};
static inline struct map_attribute *to_map_attr(struct attribute *attr)
{
	return container_of(attr, struct map_attribute, attr);
}
static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf)
{
	return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type);
}
#define EFI_RUNTIME_FIELD(var) entry->md.var
#define EFI_RUNTIME_U64_ATTR_SHOW(name) \
static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \
{ \
	return snprintf(buf, PAGE_SIZE, "0x%llx\n", EFI_RUNTIME_FIELD(name)); \
}
EFI_RUNTIME_U64_ATTR_SHOW(phys_addr);
EFI_RUNTIME_U64_ATTR_SHOW(virt_addr);
EFI_RUNTIME_U64_ATTR_SHOW(num_pages);
EFI_RUNTIME_U64_ATTR_SHOW(attribute);
static inline struct efi_runtime_map_entry *to_map_entry(struct kobject *kobj)
{
	return container_of(kobj, struct efi_runtime_map_entry, kobj);
}
static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
			      char *buf)
{
	struct efi_runtime_map_entry *entry = to_map_entry(kobj);
	struct map_attribute *map_attr = to_map_attr(attr);
	return map_attr->show(entry, buf);
}
static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
/*
 * These are default attributes that are added for every memmap entry.
 */
static struct attribute *def_attrs[] = {
	&map_type_attr.attr,
	&map_phys_addr_attr.attr,
	&map_virt_addr_attr.attr,
	&map_num_pages_attr.attr,
	&map_attribute_attr.attr,
	NULL
};
ATTRIBUTE_GROUPS(def);
static const struct sysfs_ops map_attr_ops = {
	.show = map_attr_show,
};
static void map_release(struct kobject *kobj)
{
	struct efi_runtime_map_entry *entry;
	entry = to_map_entry(kobj);
	kfree(entry);
}
static const struct kobj_type __refconst map_ktype = {
	.sysfs_ops	= &map_attr_ops,
	.default_groups	= def_groups,
	.release	= map_release,
};
static struct kset *map_kset;
static struct efi_runtime_map_entry *
add_sysfs_runtime_map_entry(struct kobject *kobj, int nr,
			    efi_memory_desc_t *md)
{
	int ret;
	struct efi_runtime_map_entry *entry;
	if (!map_kset) {
		map_kset = kset_create_and_add("runtime-map", NULL, kobj);
		if (!map_kset)
			return ERR_PTR(-ENOMEM);
	}
	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry) {
		kset_unregister(map_kset);
		map_kset = NULL;
		return ERR_PTR(-ENOMEM);
	}
	memcpy(&entry->md, md, sizeof(efi_memory_desc_t));
	kobject_init(&entry->kobj, &map_ktype);
	entry->kobj.kset = map_kset;
	ret = kobject_add(&entry->kobj, NULL, "%d", nr);
	if (ret) {
		kobject_put(&entry->kobj);
		kset_unregister(map_kset);
		map_kset = NULL;
		return ERR_PTR(ret);
	}
	return entry;
}
int efi_get_runtime_map_size(void)
{
	return efi.memmap.nr_map * efi.memmap.desc_size;
}
int efi_get_runtime_map_desc_size(void)
{
	return efi.memmap.desc_size;
}
int efi_runtime_map_copy(void *buf, size_t bufsz)
{
	size_t sz = efi_get_runtime_map_size();
	if (sz > bufsz)
		sz = bufsz;
	memcpy(buf, efi.memmap.map, sz);
	return 0;
}
static int __init efi_runtime_map_init(void)
{
	int i, j, ret = 0;
	struct efi_runtime_map_entry *entry;
	efi_memory_desc_t *md;
	if (!efi_enabled(EFI_MEMMAP) || !efi_kobj)
		return 0;
	map_entries = kcalloc(efi.memmap.nr_map, sizeof(entry), GFP_KERNEL);
	if (!map_entries) {
		ret = -ENOMEM;
		goto out;
	}
	i = 0;
	for_each_efi_memory_desc(md) {
		entry = add_sysfs_runtime_map_entry(efi_kobj, i, md);
		if (IS_ERR(entry)) {
			ret = PTR_ERR(entry);
			goto out_add_entry;
		}
		*(map_entries + i++) = entry;
	}
	return 0;
out_add_entry:
	for (j = i - 1; j >= 0; j--) {
		entry = *(map_entries + j);
		kobject_put(&entry->kobj);
	}
out:
	return ret;
}
subsys_initcall_sync(efi_runtime_map_init);
 | 
	linux-master | 
	arch/x86/platform/efi/runtime-map.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * Extensible Firmware Interface
 *
 * Based on Extensible Firmware Interface Specification version 1.0
 *
 * Copyright (C) 1999 VA Linux Systems
 * Copyright (C) 1999 Walt Drummond <[email protected]>
 * Copyright (C) 1999-2002 Hewlett-Packard Co.
 *	David Mosberger-Tang <[email protected]>
 *	Stephane Eranian <[email protected]>
 *
 * All EFI Runtime Services are not implemented yet as EFI only
 * supports physical mode addressing on SoftSDV. This is to be fixed
 * in a future version.  --drummond 1999-07-20
 *
 * Implemented EFI runtime services and virtual mode calls.  --davidm
 *
 * Goutham Rao: <[email protected]>
 *	Skip non-WB memory and ignore empty memory ranges.
 */
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/efi.h>
#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/desc.h>
#include <asm/page.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/efi.h>
void __init efi_map_region(efi_memory_desc_t *md)
{
	u64 start_pfn, end_pfn, end;
	unsigned long size;
	void *va;
	start_pfn	= PFN_DOWN(md->phys_addr);
	size		= md->num_pages << PAGE_SHIFT;
	end		= md->phys_addr + size;
	end_pfn 	= PFN_UP(end);
	if (pfn_range_is_mapped(start_pfn, end_pfn)) {
		va = __va(md->phys_addr);
		if (!(md->attribute & EFI_MEMORY_WB))
			set_memory_uc((unsigned long)va, md->num_pages);
	} else {
		va = ioremap_cache(md->phys_addr, size);
	}
	md->virt_addr = (unsigned long)va;
	if (!va)
		pr_err("ioremap of 0x%llX failed!\n", md->phys_addr);
}
/*
 * To make EFI call EFI runtime service in physical addressing mode we need
 * prolog/epilog before/after the invocation to claim the EFI runtime service
 * handler exclusively and to duplicate a memory mapping in low memory space,
 * say 0 - 3G.
 */
int __init efi_alloc_page_tables(void)
{
	return 0;
}
void efi_sync_low_kernel_mappings(void) {}
void __init efi_dump_pagetable(void)
{
#ifdef CONFIG_EFI_PGT_DUMP
	ptdump_walk_pgd_level(NULL, &init_mm);
#endif
}
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
	return 0;
}
void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
efi_status_t efi_call_svam(efi_runtime_services_t * const *,
			   u32, u32, u32, void *, u32);
efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size,
						unsigned long descriptor_size,
						u32 descriptor_version,
						efi_memory_desc_t *virtual_map,
						unsigned long systab_phys)
{
	const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
	struct desc_ptr gdt_descr;
	efi_status_t status;
	unsigned long flags;
	pgd_t *save_pgd;
	/* Current pgd is swapper_pg_dir, we'll restore it later: */
	save_pgd = swapper_pg_dir;
	load_cr3(initial_page_table);
	__flush_tlb_all();
	gdt_descr.address = get_cpu_gdt_paddr(0);
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);
	/* Disable interrupts around EFI calls: */
	local_irq_save(flags);
	status = efi_call_svam(&systab->runtime,
			       memory_map_size, descriptor_size,
			       descriptor_version, virtual_map,
			       __pa(&efi.runtime));
	local_irq_restore(flags);
	load_fixmap_gdt(0);
	load_cr3(save_pgd);
	__flush_tlb_all();
	return status;
}
void __init efi_runtime_update_mappings(void)
{
	if (__supported_pte_mask & _PAGE_NX) {
		efi_memory_desc_t *md;
		/* Make EFI runtime service code area executable */
		for_each_efi_memory_desc(md) {
			if (md->type != EFI_RUNTIME_SERVICES_CODE)
				continue;
			set_memory_x(md->virt_addr, md->num_pages);
		}
	}
}
void arch_efi_call_virt_setup(void)
{
	efi_fpu_begin();
	firmware_restrict_branch_speculation_start();
}
void arch_efi_call_virt_teardown(void)
{
	firmware_restrict_branch_speculation_end();
	efi_fpu_end();
}
 | 
	linux-master | 
	arch/x86/platform/efi/efi_32.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Intel CE4100  platform specific setup code
 *
 * (C) Copyright 2010 Intel Corporation
 */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/reboot.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
#include <asm/ce4100.h>
#include <asm/prom.h>
#include <asm/setup.h>
#include <asm/i8259.h>
#include <asm/io.h>
#include <asm/io_apic.h>
#include <asm/emergency-restart.h>
/*
 * The CE4100 platform has an internal 8051 Microcontroller which is
 * responsible for signaling to the external Power Management Unit the
 * intention to reset, reboot or power off the system. This 8051 device has
 * its command register mapped at I/O port 0xcf9 and the value 0x4 is used
 * to power off the system.
 */
static void ce4100_power_off(void)
{
	outb(0x4, 0xcf9);
}
#ifdef CONFIG_SERIAL_8250
static unsigned int mem_serial_in(struct uart_port *p, int offset)
{
	offset = offset << p->regshift;
	return readl(p->membase + offset);
}
/*
 * The UART Tx interrupts are not set under some conditions and therefore serial
 * transmission hangs. This is a silicon issue and has not been root caused. The
 * workaround for this silicon issue checks UART_LSR_THRE bit and UART_LSR_TEMT
 * bit of LSR register in interrupt handler to see whether at least one of these
 * two bits is set, if so then process the transmit request. If this workaround
 * is not applied, then the serial transmission may hang. This workaround is for
 * errata number 9 in Errata - B step.
*/
static unsigned int ce4100_mem_serial_in(struct uart_port *p, int offset)
{
	unsigned int ret, ier, lsr;
	if (offset == UART_IIR) {
		offset = offset << p->regshift;
		ret = readl(p->membase + offset);
		if (ret & UART_IIR_NO_INT) {
			/* see if the TX interrupt should have really set */
			ier = mem_serial_in(p, UART_IER);
			/* see if the UART's XMIT interrupt is enabled */
			if (ier & UART_IER_THRI) {
				lsr = mem_serial_in(p, UART_LSR);
				/* now check to see if the UART should be
				   generating an interrupt (but isn't) */
				if (lsr & (UART_LSR_THRE | UART_LSR_TEMT))
					ret &= ~UART_IIR_NO_INT;
			}
		}
	} else
		ret =  mem_serial_in(p, offset);
	return ret;
}
static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value)
{
	offset = offset << p->regshift;
	writel(value, p->membase + offset);
}
static void ce4100_serial_fixup(int port, struct uart_port *up,
	u32 *capabilities)
{
#ifdef CONFIG_EARLY_PRINTK
	/*
	 * Over ride the legacy port configuration that comes from
	 * asm/serial.h. Using the ioport driver then switching to the
	 * PCI memmaped driver hangs the IOAPIC
	 */
	if (up->iotype !=  UPIO_MEM32) {
		up->uartclk  = 14745600;
		up->mapbase = 0xdffe0200;
		set_fixmap_nocache(FIX_EARLYCON_MEM_BASE,
				up->mapbase & PAGE_MASK);
		up->membase =
			(void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
		up->membase += up->mapbase & ~PAGE_MASK;
		up->mapbase += port * 0x100;
		up->membase += port * 0x100;
		up->iotype   = UPIO_MEM32;
		up->regshift = 2;
		up->irq = 4;
	}
#endif
	up->iobase = 0;
	up->serial_in = ce4100_mem_serial_in;
	up->serial_out = ce4100_mem_serial_out;
	*capabilities |= (1 << 12);
}
static __init void sdv_serial_fixup(void)
{
	serial8250_set_isa_configurator(ce4100_serial_fixup);
}
#else
static inline void sdv_serial_fixup(void) {};
#endif
static void __init sdv_arch_setup(void)
{
	sdv_serial_fixup();
}
static void sdv_pci_init(void)
{
	x86_of_pci_init();
}
/*
 * CE4100 specific x86_init function overrides and early setup
 * calls.
 */
void __init x86_ce4100_early_setup(void)
{
	x86_init.oem.arch_setup = sdv_arch_setup;
	x86_init.resources.probe_roms = x86_init_noop;
	x86_init.mpparse.get_smp_config = x86_init_uint_noop;
	x86_init.mpparse.find_smp_config = x86_init_noop;
	x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck;
	x86_init.pci.init = ce4100_pci_init;
	x86_init.pci.init_irq = sdv_pci_init;
	/*
	 * By default, the reboot method is ACPI which is supported by the
	 * CE4100 bootloader CEFDK using FADT.ResetReg Address and ResetValue
	 * the bootloader will however issue a system power off instead of
	 * reboot. By using BOOT_KBD we ensure proper system reboot as
	 * expected.
	 */
	reboot_type = BOOT_KBD;
	pm_power_off = ce4100_power_off;
}
 | 
	linux-master | 
	arch/x86/platform/ce4100/ce4100.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * Hyper-V Isolation VM interface with paravisor and hypervisor
 *
 * Author:
 *  Tianyu Lan <[email protected]>
 */
#include <linux/bitfield.h>
#include <linux/hyperv.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/svm.h>
#include <asm/sev.h>
#include <asm/io.h>
#include <asm/coco.h>
#include <asm/mem_encrypt.h>
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
#include <asm/mtrr.h>
#include <asm/io_apic.h>
#include <asm/realmode.h>
#include <asm/e820/api.h>
#include <asm/desc.h>
#include <uapi/asm/vmx.h>
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define GHCB_USAGE_HYPERV_CALL	1
union hv_ghcb {
	struct ghcb ghcb;
	struct {
		u64 hypercalldata[509];
		u64 outputgpa;
		union {
			union {
				struct {
					u32 callcode        : 16;
					u32 isfast          : 1;
					u32 reserved1       : 14;
					u32 isnested        : 1;
					u32 countofelements : 12;
					u32 reserved2       : 4;
					u32 repstartindex   : 12;
					u32 reserved3       : 4;
				};
				u64 asuint64;
			} hypercallinput;
			union {
				struct {
					u16 callstatus;
					u16 reserved1;
					u32 elementsprocessed : 12;
					u32 reserved2         : 20;
				};
				u64 asunit64;
			} hypercalloutput;
		};
		u64 reserved2;
	} hypercall;
} __packed __aligned(HV_HYP_PAGE_SIZE);
/* Only used in an SNP VM with the paravisor */
static u16 hv_ghcb_version __ro_after_init;
/* Functions only used in an SNP VM with the paravisor go here. */
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
{
	union hv_ghcb *hv_ghcb;
	void **ghcb_base;
	unsigned long flags;
	u64 status;
	if (!hv_ghcb_pg)
		return -EFAULT;
	WARN_ON(in_nmi());
	local_irq_save(flags);
	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
	hv_ghcb = (union hv_ghcb *)*ghcb_base;
	if (!hv_ghcb) {
		local_irq_restore(flags);
		return -EFAULT;
	}
	hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
	hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
	hv_ghcb->hypercall.outputgpa = (u64)output;
	hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
	hv_ghcb->hypercall.hypercallinput.callcode = control;
	if (input_size)
		memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
	VMGEXIT();
	hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
	memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
	       sizeof(hv_ghcb->ghcb.save.valid_bitmap));
	status = hv_ghcb->hypercall.hypercalloutput.callstatus;
	local_irq_restore(flags);
	return status;
}
static inline u64 rd_ghcb_msr(void)
{
	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
}
static inline void wr_ghcb_msr(u64 val)
{
	native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
}
static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
				   u64 exit_info_1, u64 exit_info_2)
{
	/* Fill in protocol and format specifiers */
	ghcb->protocol_version = hv_ghcb_version;
	ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
	ghcb_set_sw_exit_code(ghcb, exit_code);
	ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
	ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
	VMGEXIT();
	if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
		return ES_VMM_ERROR;
	else
		return ES_OK;
}
void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
{
	u64 val = GHCB_MSR_TERM_REQ;
	/* Tell the hypervisor what went wrong. */
	val |= GHCB_SEV_TERM_REASON(set, reason);
	/* Request Guest Termination from Hypvervisor */
	wr_ghcb_msr(val);
	VMGEXIT();
	while (true)
		asm volatile("hlt\n" : : : "memory");
}
bool hv_ghcb_negotiate_protocol(void)
{
	u64 ghcb_gpa;
	u64 val;
	/* Save ghcb page gpa. */
	ghcb_gpa = rd_ghcb_msr();
	/* Do the GHCB protocol version negotiation */
	wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
	VMGEXIT();
	val = rd_ghcb_msr();
	if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
		return false;
	if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
	    GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
		return false;
	hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
			     GHCB_PROTOCOL_MAX);
	/* Write ghcb page back after negotiating protocol. */
	wr_ghcb_msr(ghcb_gpa);
	VMGEXIT();
	return true;
}
static void hv_ghcb_msr_write(u64 msr, u64 value)
{
	union hv_ghcb *hv_ghcb;
	void **ghcb_base;
	unsigned long flags;
	if (!hv_ghcb_pg)
		return;
	WARN_ON(in_nmi());
	local_irq_save(flags);
	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
	hv_ghcb = (union hv_ghcb *)*ghcb_base;
	if (!hv_ghcb) {
		local_irq_restore(flags);
		return;
	}
	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
	ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
	ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
		pr_warn("Fail to write msr via ghcb %llx.\n", msr);
	local_irq_restore(flags);
}
static void hv_ghcb_msr_read(u64 msr, u64 *value)
{
	union hv_ghcb *hv_ghcb;
	void **ghcb_base;
	unsigned long flags;
	/* Check size of union hv_ghcb here. */
	BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
	if (!hv_ghcb_pg)
		return;
	WARN_ON(in_nmi());
	local_irq_save(flags);
	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
	hv_ghcb = (union hv_ghcb *)*ghcb_base;
	if (!hv_ghcb) {
		local_irq_restore(flags);
		return;
	}
	ghcb_set_rcx(&hv_ghcb->ghcb, msr);
	if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
		pr_warn("Fail to read msr via ghcb %llx.\n", msr);
	else
		*value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
			| ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
	local_irq_restore(flags);
}
/* Only used in a fully enlightened SNP VM, i.e. without the paravisor */
static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
/* Functions only used in an SNP VM without the paravisor go here. */
#define hv_populate_vmcb_seg(seg, gdtr_base)			\
do {								\
	if (seg.selector) {					\
		seg.base = 0;					\
		seg.limit = HV_AP_SEGMENT_LIMIT;		\
		seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5);	\
		seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
	}							\
} while (0)							\
static int snp_set_vmsa(void *va, bool vmsa)
{
	u64 attrs;
	/*
	 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
	 * using the RMPADJUST instruction. However, for the instruction to
	 * succeed it must target the permissions of a lesser privileged
	 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
	 * instruction in the AMD64 APM Volume 3).
	 */
	attrs = 1;
	if (vmsa)
		attrs |= RMPADJUST_VMSA_PAGE_BIT;
	return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
}
static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
{
	int err;
	err = snp_set_vmsa(vmsa, false);
	if (err)
		pr_err("clear VMSA page failed (%u), leaking page\n", err);
	else
		free_page((unsigned long)vmsa);
}
int hv_snp_boot_ap(int cpu, unsigned long start_ip)
{
	struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
		__get_free_page(GFP_KERNEL | __GFP_ZERO);
	struct sev_es_save_area *cur_vmsa;
	struct desc_ptr gdtr;
	u64 ret, retry = 5;
	struct hv_enable_vp_vtl *start_vp_input;
	unsigned long flags;
	if (!vmsa)
		return -ENOMEM;
	native_store_gdt(&gdtr);
	vmsa->gdtr.base = gdtr.address;
	vmsa->gdtr.limit = gdtr.size;
	asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
	hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
	asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
	hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
	asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
	hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
	asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
	hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
	vmsa->efer = native_read_msr(MSR_EFER);
	asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
	asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
	asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
	vmsa->xcr0 = 1;
	vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
	vmsa->rip = (u64)secondary_startup_64_no_verify;
	vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
	/*
	 * Set the SNP-specific fields for this VMSA:
	 *   VMPL level
	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
	 */
	vmsa->vmpl = 0;
	vmsa->sev_features = sev_status >> 2;
	ret = snp_set_vmsa(vmsa, true);
	if (!ret) {
		pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
		free_page((u64)vmsa);
		return ret;
	}
	local_irq_save(flags);
	start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
	memset(start_vp_input, 0, sizeof(*start_vp_input));
	start_vp_input->partition_id = -1;
	start_vp_input->vp_index = cpu;
	start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
	*(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
	do {
		ret = hv_do_hypercall(HVCALL_START_VP,
				      start_vp_input, NULL);
	} while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
	local_irq_restore(flags);
	if (!hv_result_success(ret)) {
		pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
		snp_cleanup_vmsa(vmsa);
		vmsa = NULL;
	}
	cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
	/* Free up any previous VMSA page */
	if (cur_vmsa)
		snp_cleanup_vmsa(cur_vmsa);
	/* Record the current VMSA page */
	per_cpu(hv_sev_vmsa, cpu) = vmsa;
	return ret;
}
#else
static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
#endif /* CONFIG_AMD_MEM_ENCRYPT */
#ifdef CONFIG_INTEL_TDX_GUEST
static void hv_tdx_msr_write(u64 msr, u64 val)
{
	struct tdx_hypercall_args args = {
		.r10 = TDX_HYPERCALL_STANDARD,
		.r11 = EXIT_REASON_MSR_WRITE,
		.r12 = msr,
		.r13 = val,
	};
	u64 ret = __tdx_hypercall(&args);
	WARN_ONCE(ret, "Failed to emulate MSR write: %lld\n", ret);
}
static void hv_tdx_msr_read(u64 msr, u64 *val)
{
	struct tdx_hypercall_args args = {
		.r10 = TDX_HYPERCALL_STANDARD,
		.r11 = EXIT_REASON_MSR_READ,
		.r12 = msr,
	};
	u64 ret = __tdx_hypercall_ret(&args);
	if (WARN_ONCE(ret, "Failed to emulate MSR read: %lld\n", ret))
		*val = 0;
	else
		*val = args.r11;
}
u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
{
	struct tdx_hypercall_args args = { };
	args.r10 = control;
	args.rdx = param1;
	args.r8  = param2;
	(void)__tdx_hypercall_ret(&args);
	return args.r11;
}
#else
static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
#endif /* CONFIG_INTEL_TDX_GUEST */
#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
void hv_ivm_msr_write(u64 msr, u64 value)
{
	if (!ms_hyperv.paravisor_present)
		return;
	if (hv_isolation_type_tdx())
		hv_tdx_msr_write(msr, value);
	else if (hv_isolation_type_snp())
		hv_ghcb_msr_write(msr, value);
}
void hv_ivm_msr_read(u64 msr, u64 *value)
{
	if (!ms_hyperv.paravisor_present)
		return;
	if (hv_isolation_type_tdx())
		hv_tdx_msr_read(msr, value);
	else if (hv_isolation_type_snp())
		hv_ghcb_msr_read(msr, value);
}
/*
 * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
 *
 * In Isolation VM, all guest memory is encrypted from host and guest
 * needs to set memory visible to host via hvcall before sharing memory
 * with host.
 */
static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
			   enum hv_mem_host_visibility visibility)
{
	struct hv_gpa_range_for_visibility *input;
	u16 pages_processed;
	u64 hv_status;
	unsigned long flags;
	/* no-op if partition isolation is not enabled */
	if (!hv_is_isolation_supported())
		return 0;
	if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
		pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
			HV_MAX_MODIFY_GPA_REP_COUNT);
		return -EINVAL;
	}
	local_irq_save(flags);
	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
	if (unlikely(!input)) {
		local_irq_restore(flags);
		return -EINVAL;
	}
	input->partition_id = HV_PARTITION_ID_SELF;
	input->host_visibility = visibility;
	input->reserved0 = 0;
	input->reserved1 = 0;
	memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
	hv_status = hv_do_rep_hypercall(
			HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
			0, input, &pages_processed);
	local_irq_restore(flags);
	if (hv_result_success(hv_status))
		return 0;
	else
		return -EFAULT;
}
/*
 * hv_vtom_set_host_visibility - Set specified memory visible to host.
 *
 * In Isolation VM, all guest memory is encrypted from host and guest
 * needs to set memory visible to host via hvcall before sharing memory
 * with host. This function works as wrap of hv_mark_gpa_visibility()
 * with memory base and size.
 */
static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
{
	enum hv_mem_host_visibility visibility = enc ?
			VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
	u64 *pfn_array;
	int ret = 0;
	bool result = true;
	int i, pfn;
	pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
	if (!pfn_array)
		return false;
	for (i = 0, pfn = 0; i < pagecount; i++) {
		pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
		pfn++;
		if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
			ret = hv_mark_gpa_visibility(pfn, pfn_array,
						     visibility);
			if (ret) {
				result = false;
				goto err_free_pfn_array;
			}
			pfn = 0;
		}
	}
 err_free_pfn_array:
	kfree(pfn_array);
	return result;
}
static bool hv_vtom_tlb_flush_required(bool private)
{
	return true;
}
static bool hv_vtom_cache_flush_required(void)
{
	return false;
}
static bool hv_is_private_mmio(u64 addr)
{
	/*
	 * Hyper-V always provides a single IO-APIC in a guest VM.
	 * When a paravisor is used, it is emulated by the paravisor
	 * in the guest context and must be mapped private.
	 */
	if (addr >= HV_IOAPIC_BASE_ADDRESS &&
	    addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE))
		return true;
	/* Same with a vTPM */
	if (addr >= VTPM_BASE_ADDRESS &&
	    addr < (VTPM_BASE_ADDRESS + PAGE_SIZE))
		return true;
	return false;
}
void __init hv_vtom_init(void)
{
	enum hv_isolation_type type = hv_get_isolation_type();
	switch (type) {
	case HV_ISOLATION_TYPE_VBS:
		fallthrough;
	/*
	 * By design, a VM using vTOM doesn't see the SEV setting,
	 * so SEV initialization is bypassed and sev_status isn't set.
	 * Set it here to indicate a vTOM VM.
	 *
	 * Note: if CONFIG_AMD_MEM_ENCRYPT is not set, sev_status is
	 * defined as 0ULL, to which we can't assigned a value.
	 */
#ifdef CONFIG_AMD_MEM_ENCRYPT
	case HV_ISOLATION_TYPE_SNP:
		sev_status = MSR_AMD64_SNP_VTOM;
		cc_vendor = CC_VENDOR_AMD;
		break;
#endif
	case HV_ISOLATION_TYPE_TDX:
		cc_vendor = CC_VENDOR_INTEL;
		break;
	default:
		panic("hv_vtom_init: unsupported isolation type %d\n", type);
	}
	cc_set_mask(ms_hyperv.shared_gpa_boundary);
	physical_mask &= ms_hyperv.shared_gpa_boundary - 1;
	x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
	x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
	x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
	x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
	/* Set WB as the default cache mode. */
	mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
}
#endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
enum hv_isolation_type hv_get_isolation_type(void)
{
	if (!(ms_hyperv.priv_high & HV_ISOLATION))
		return HV_ISOLATION_TYPE_NONE;
	return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
}
EXPORT_SYMBOL_GPL(hv_get_isolation_type);
/*
 * hv_is_isolation_supported - Check system runs in the Hyper-V
 * isolation VM.
 */
bool hv_is_isolation_supported(void)
{
	if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
		return false;
	if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
		return false;
	return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
}
DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
/*
 * hv_isolation_type_snp - Check if the system runs in an AMD SEV-SNP based
 * isolation VM.
 */
bool hv_isolation_type_snp(void)
{
	return static_branch_unlikely(&isolation_type_snp);
}
DEFINE_STATIC_KEY_FALSE(isolation_type_tdx);
/*
 * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based
 * isolated VM.
 */
bool hv_isolation_type_tdx(void)
{
	return static_branch_unlikely(&isolation_type_tdx);
}
 | 
	linux-master | 
	arch/x86/hyperv/ivm.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * X86 specific Hyper-V initialization code.
 *
 * Copyright (C) 2016, Microsoft, Inc.
 *
 * Author : K. Y. Srinivasan <[email protected]>
 */
#include <linux/efi.h>
#include <linux/types.h>
#include <linux/bitfield.h>
#include <linux/io.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/sev.h>
#include <asm/ibt.h>
#include <asm/hypervisor.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
#include <asm/idtentry.h>
#include <asm/set_memory.h>
#include <linux/kexec.h>
#include <linux/version.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/hyperv.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/cpuhotplug.h>
#include <linux/syscore_ops.h>
#include <clocksource/hyperv_timer.h>
#include <linux/highmem.h>
int hyperv_init_cpuhp;
u64 hv_current_partition_id = ~0ull;
EXPORT_SYMBOL_GPL(hv_current_partition_id);
void *hv_hypercall_pg;
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
union hv_ghcb * __percpu *hv_ghcb_pg;
/* Storage to save the hypercall page temporarily for hibernation */
static void *hv_hypercall_pg_saved;
struct hv_vp_assist_page **hv_vp_assist_page;
EXPORT_SYMBOL_GPL(hv_vp_assist_page);
static int hyperv_init_ghcb(void)
{
	u64 ghcb_gpa;
	void *ghcb_va;
	void **ghcb_base;
	if (!ms_hyperv.paravisor_present || !hv_isolation_type_snp())
		return 0;
	if (!hv_ghcb_pg)
		return -EINVAL;
	/*
	 * GHCB page is allocated by paravisor. The address
	 * returned by MSR_AMD64_SEV_ES_GHCB is above shared
	 * memory boundary and map it here.
	 */
	rdmsrl(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
	/* Mask out vTOM bit. ioremap_cache() maps decrypted */
	ghcb_gpa &= ~ms_hyperv.shared_gpa_boundary;
	ghcb_va = (void *)ioremap_cache(ghcb_gpa, HV_HYP_PAGE_SIZE);
	if (!ghcb_va)
		return -ENOMEM;
	ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
	*ghcb_base = ghcb_va;
	return 0;
}
static int hv_cpu_init(unsigned int cpu)
{
	union hv_vp_assist_msr_contents msr = { 0 };
	struct hv_vp_assist_page **hvp;
	int ret;
	ret = hv_common_cpu_init(cpu);
	if (ret)
		return ret;
	if (!hv_vp_assist_page)
		return 0;
	hvp = &hv_vp_assist_page[cpu];
	if (hv_root_partition) {
		/*
		 * For root partition we get the hypervisor provided VP assist
		 * page, instead of allocating a new page.
		 */
		rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
		*hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT,
				PAGE_SIZE, MEMREMAP_WB);
	} else {
		/*
		 * The VP assist page is an "overlay" page (see Hyper-V TLFS's
		 * Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed
		 * out to make sure we always write the EOI MSR in
		 * hv_apic_eoi_write() *after* the EOI optimization is disabled
		 * in hv_cpu_die(), otherwise a CPU may not be stopped in the
		 * case of CPU offlining and the VM will hang.
		 */
		if (!*hvp) {
			*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO);
			/*
			 * Hyper-V should never specify a VM that is a Confidential
			 * VM and also running in the root partition. Root partition
			 * is blocked to run in Confidential VM. So only decrypt assist
			 * page in non-root partition here.
			 */
			if (*hvp && !ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
				WARN_ON_ONCE(set_memory_decrypted((unsigned long)(*hvp), 1));
				memset(*hvp, 0, PAGE_SIZE);
			}
		}
		if (*hvp)
			msr.pfn = vmalloc_to_pfn(*hvp);
	}
	if (!WARN_ON(!(*hvp))) {
		msr.enable = 1;
		wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
	}
	return hyperv_init_ghcb();
}
static void (*hv_reenlightenment_cb)(void);
static void hv_reenlightenment_notify(struct work_struct *dummy)
{
	struct hv_tsc_emulation_status emu_status;
	rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
	/* Don't issue the callback if TSC accesses are not emulated */
	if (hv_reenlightenment_cb && emu_status.inprogress)
		hv_reenlightenment_cb();
}
static DECLARE_DELAYED_WORK(hv_reenlightenment_work, hv_reenlightenment_notify);
void hyperv_stop_tsc_emulation(void)
{
	u64 freq;
	struct hv_tsc_emulation_status emu_status;
	rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
	emu_status.inprogress = 0;
	wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
	rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
	tsc_khz = div64_u64(freq, 1000);
}
EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
static inline bool hv_reenlightenment_available(void)
{
	/*
	 * Check for required features and privileges to make TSC frequency
	 * change notifications work.
	 */
	return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
		ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE &&
		ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT;
}
DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)
{
	apic_eoi();
	inc_irq_stat(irq_hv_reenlightenment_count);
	schedule_delayed_work(&hv_reenlightenment_work, HZ/10);
}
void set_hv_tscchange_cb(void (*cb)(void))
{
	struct hv_reenlightenment_control re_ctrl = {
		.vector = HYPERV_REENLIGHTENMENT_VECTOR,
		.enabled = 1,
	};
	struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1};
	if (!hv_reenlightenment_available()) {
		pr_warn("Hyper-V: reenlightenment support is unavailable\n");
		return;
	}
	if (!hv_vp_index)
		return;
	hv_reenlightenment_cb = cb;
	/* Make sure callback is registered before we write to MSRs */
	wmb();
	re_ctrl.target_vp = hv_vp_index[get_cpu()];
	wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
	wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl));
	put_cpu();
}
EXPORT_SYMBOL_GPL(set_hv_tscchange_cb);
void clear_hv_tscchange_cb(void)
{
	struct hv_reenlightenment_control re_ctrl;
	if (!hv_reenlightenment_available())
		return;
	rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
	re_ctrl.enabled = 0;
	wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
	hv_reenlightenment_cb = NULL;
}
EXPORT_SYMBOL_GPL(clear_hv_tscchange_cb);
static int hv_cpu_die(unsigned int cpu)
{
	struct hv_reenlightenment_control re_ctrl;
	unsigned int new_cpu;
	void **ghcb_va;
	if (hv_ghcb_pg) {
		ghcb_va = (void **)this_cpu_ptr(hv_ghcb_pg);
		if (*ghcb_va)
			iounmap(*ghcb_va);
		*ghcb_va = NULL;
	}
	hv_common_cpu_die(cpu);
	if (hv_vp_assist_page && hv_vp_assist_page[cpu]) {
		union hv_vp_assist_msr_contents msr = { 0 };
		if (hv_root_partition) {
			/*
			 * For root partition the VP assist page is mapped to
			 * hypervisor provided page, and thus we unmap the
			 * page here and nullify it, so that in future we have
			 * correct page address mapped in hv_cpu_init.
			 */
			memunmap(hv_vp_assist_page[cpu]);
			hv_vp_assist_page[cpu] = NULL;
			rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
			msr.enable = 0;
		}
		wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
	}
	if (hv_reenlightenment_cb == NULL)
		return 0;
	rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
	if (re_ctrl.target_vp == hv_vp_index[cpu]) {
		/*
		 * Reassign reenlightenment notifications to some other online
		 * CPU or just disable the feature if there are no online CPUs
		 * left (happens on hibernation).
		 */
		new_cpu = cpumask_any_but(cpu_online_mask, cpu);
		if (new_cpu < nr_cpu_ids)
			re_ctrl.target_vp = hv_vp_index[new_cpu];
		else
			re_ctrl.enabled = 0;
		wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
	}
	return 0;
}
static int __init hv_pci_init(void)
{
	int gen2vm = efi_enabled(EFI_BOOT);
	/*
	 * For Generation-2 VM, we exit from pci_arch_init() by returning 0.
	 * The purpose is to suppress the harmless warning:
	 * "PCI: Fatal: No config space access function found"
	 */
	if (gen2vm)
		return 0;
	/* For Generation-1 VM, we'll proceed in pci_arch_init().  */
	return 1;
}
static int hv_suspend(void)
{
	union hv_x64_msr_hypercall_contents hypercall_msr;
	int ret;
	if (hv_root_partition)
		return -EPERM;
	/*
	 * Reset the hypercall page as it is going to be invalidated
	 * across hibernation. Setting hv_hypercall_pg to NULL ensures
	 * that any subsequent hypercall operation fails safely instead of
	 * crashing due to an access of an invalid page. The hypercall page
	 * pointer is restored on resume.
	 */
	hv_hypercall_pg_saved = hv_hypercall_pg;
	hv_hypercall_pg = NULL;
	/* Disable the hypercall page in the hypervisor */
	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
	hypercall_msr.enable = 0;
	wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
	ret = hv_cpu_die(0);
	return ret;
}
static void hv_resume(void)
{
	union hv_x64_msr_hypercall_contents hypercall_msr;
	int ret;
	ret = hv_cpu_init(0);
	WARN_ON(ret);
	/* Re-enable the hypercall page */
	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
	hypercall_msr.enable = 1;
	hypercall_msr.guest_physical_address =
		vmalloc_to_pfn(hv_hypercall_pg_saved);
	wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
	hv_hypercall_pg = hv_hypercall_pg_saved;
	hv_hypercall_pg_saved = NULL;
	/*
	 * Reenlightenment notifications are disabled by hv_cpu_die(0),
	 * reenable them here if hv_reenlightenment_cb was previously set.
	 */
	if (hv_reenlightenment_cb)
		set_hv_tscchange_cb(hv_reenlightenment_cb);
}
/* Note: when the ops are called, only CPU0 is online and IRQs are disabled. */
static struct syscore_ops hv_syscore_ops = {
	.suspend	= hv_suspend,
	.resume		= hv_resume,
};
static void (* __initdata old_setup_percpu_clockev)(void);
static void __init hv_stimer_setup_percpu_clockev(void)
{
	/*
	 * Ignore any errors in setting up stimer clockevents
	 * as we can run with the LAPIC timer as a fallback.
	 */
	(void)hv_stimer_alloc(false);
	/*
	 * Still register the LAPIC timer, because the direct-mode STIMER is
	 * not supported by old versions of Hyper-V. This also allows users
	 * to switch to LAPIC timer via /sys, if they want to.
	 */
	if (old_setup_percpu_clockev)
		old_setup_percpu_clockev();
}
static void __init hv_get_partition_id(void)
{
	struct hv_get_partition_id *output_page;
	u64 status;
	unsigned long flags;
	local_irq_save(flags);
	output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
	status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output_page);
	if (!hv_result_success(status)) {
		/* No point in proceeding if this failed */
		pr_err("Failed to get partition ID: %lld\n", status);
		BUG();
	}
	hv_current_partition_id = output_page->partition_id;
	local_irq_restore(flags);
}
static u8 __init get_vtl(void)
{
	u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS;
	struct hv_get_vp_registers_input *input;
	struct hv_get_vp_registers_output *output;
	unsigned long flags;
	u64 ret;
	local_irq_save(flags);
	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
	output = (struct hv_get_vp_registers_output *)input;
	memset(input, 0, struct_size(input, element, 1));
	input->header.partitionid = HV_PARTITION_ID_SELF;
	input->header.vpindex = HV_VP_INDEX_SELF;
	input->header.inputvtl = 0;
	input->element[0].name0 = HV_X64_REGISTER_VSM_VP_STATUS;
	ret = hv_do_hypercall(control, input, output);
	if (hv_result_success(ret)) {
		ret = output->as64.low & HV_X64_VTL_MASK;
	} else {
		pr_err("Failed to get VTL(%lld) and set VTL to zero by default.\n", ret);
		ret = 0;
	}
	local_irq_restore(flags);
	return ret;
}
/*
 * This function is to be invoked early in the boot sequence after the
 * hypervisor has been detected.
 *
 * 1. Setup the hypercall page.
 * 2. Register Hyper-V specific clocksource.
 * 3. Setup Hyper-V specific APIC entry points.
 */
void __init hyperv_init(void)
{
	u64 guest_id;
	union hv_x64_msr_hypercall_contents hypercall_msr;
	int cpuhp;
	if (x86_hyper_type != X86_HYPER_MS_HYPERV)
		return;
	if (hv_common_init())
		return;
	/*
	 * The VP assist page is useless to a TDX guest: the only use we
	 * would have for it is lazy EOI, which can not be used with TDX.
	 */
	if (hv_isolation_type_tdx())
		hv_vp_assist_page = NULL;
	else
		hv_vp_assist_page = kcalloc(num_possible_cpus(),
					    sizeof(*hv_vp_assist_page),
					    GFP_KERNEL);
	if (!hv_vp_assist_page) {
		ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
		if (!hv_isolation_type_tdx())
			goto common_free;
	}
	if (ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
		/* Negotiate GHCB Version. */
		if (!hv_ghcb_negotiate_protocol())
			hv_ghcb_terminate(SEV_TERM_SET_GEN,
					  GHCB_SEV_ES_PROT_UNSUPPORTED);
		hv_ghcb_pg = alloc_percpu(union hv_ghcb *);
		if (!hv_ghcb_pg)
			goto free_vp_assist_page;
	}
	cpuhp = cpuhp_setup_state(CPUHP_AP_HYPERV_ONLINE, "x86/hyperv_init:online",
				  hv_cpu_init, hv_cpu_die);
	if (cpuhp < 0)
		goto free_ghcb_page;
	/*
	 * Setup the hypercall page and enable hypercalls.
	 * 1. Register the guest ID
	 * 2. Enable the hypercall and register the hypercall page
	 *
	 * A TDX VM with no paravisor only uses TDX GHCI rather than hv_hypercall_pg:
	 * when the hypercall input is a page, such a VM must pass a decrypted
	 * page to Hyper-V, e.g. hv_post_message() uses the per-CPU page
	 * hyperv_pcpu_input_arg, which is decrypted if no paravisor is present.
	 *
	 * A TDX VM with the paravisor uses hv_hypercall_pg for most hypercalls,
	 * which are handled by the paravisor and the VM must use an encrypted
	 * input page: in such a VM, the hyperv_pcpu_input_arg is encrypted and
	 * used in the hypercalls, e.g. see hv_mark_gpa_visibility() and
	 * hv_arch_irq_unmask(). Such a VM uses TDX GHCI for two hypercalls:
	 * 1. HVCALL_SIGNAL_EVENT: see vmbus_set_event() and _hv_do_fast_hypercall8().
	 * 2. HVCALL_POST_MESSAGE: the input page must be a decrypted page, i.e.
	 * hv_post_message() in such a VM can't use the encrypted hyperv_pcpu_input_arg;
	 * instead, hv_post_message() uses the post_msg_page, which is decrypted
	 * in such a VM and is only used in such a VM.
	 */
	guest_id = hv_generate_guest_id(LINUX_VERSION_CODE);
	wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
	/* With the paravisor, the VM must also write the ID via GHCB/GHCI */
	hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id);
	/* A TDX VM with no paravisor only uses TDX GHCI rather than hv_hypercall_pg */
	if (hv_isolation_type_tdx() && !ms_hyperv.paravisor_present)
		goto skip_hypercall_pg_init;
	hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
			VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
			VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
			__builtin_return_address(0));
	if (hv_hypercall_pg == NULL)
		goto clean_guest_os_id;
	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
	hypercall_msr.enable = 1;
	if (hv_root_partition) {
		struct page *pg;
		void *src;
		/*
		 * For the root partition, the hypervisor will set up its
		 * hypercall page. The hypervisor guarantees it will not show
		 * up in the root's address space. The root can't change the
		 * location of the hypercall page.
		 *
		 * Order is important here. We must enable the hypercall page
		 * so it is populated with code, then copy the code to an
		 * executable page.
		 */
		wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
		pg = vmalloc_to_page(hv_hypercall_pg);
		src = memremap(hypercall_msr.guest_physical_address << PAGE_SHIFT, PAGE_SIZE,
				MEMREMAP_WB);
		BUG_ON(!src);
		memcpy_to_page(pg, 0, src, HV_HYP_PAGE_SIZE);
		memunmap(src);
		hv_remap_tsc_clocksource();
	} else {
		hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
		wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
	}
skip_hypercall_pg_init:
	/*
	 * Some versions of Hyper-V that provide IBT in guest VMs have a bug
	 * in that there's no ENDBR64 instruction at the entry to the
	 * hypercall page. Because hypercalls are invoked via an indirect call
	 * to the hypercall page, all hypercall attempts fail when IBT is
	 * enabled, and Linux panics. For such buggy versions, disable IBT.
	 *
	 * Fixed versions of Hyper-V always provide ENDBR64 on the hypercall
	 * page, so if future Linux kernel versions enable IBT for 32-bit
	 * builds, additional hypercall page hackery will be required here
	 * to provide an ENDBR32.
	 */
#ifdef CONFIG_X86_KERNEL_IBT
	if (cpu_feature_enabled(X86_FEATURE_IBT) &&
	    *(u32 *)hv_hypercall_pg != gen_endbr()) {
		setup_clear_cpu_cap(X86_FEATURE_IBT);
		pr_warn("Hyper-V: Disabling IBT because of Hyper-V bug\n");
	}
#endif
	/*
	 * hyperv_init() is called before LAPIC is initialized: see
	 * apic_intr_mode_init() -> x86_platform.apic_post_init() and
	 * apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
	 * depends on LAPIC, so hv_stimer_alloc() should be called from
	 * x86_init.timers.setup_percpu_clockev.
	 */
	old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
	x86_init.timers.setup_percpu_clockev = hv_stimer_setup_percpu_clockev;
	hv_apic_init();
	x86_init.pci.arch_init = hv_pci_init;
	register_syscore_ops(&hv_syscore_ops);
	hyperv_init_cpuhp = cpuhp;
	if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID)
		hv_get_partition_id();
	BUG_ON(hv_root_partition && hv_current_partition_id == ~0ull);
#ifdef CONFIG_PCI_MSI
	/*
	 * If we're running as root, we want to create our own PCI MSI domain.
	 * We can't set this in hv_pci_init because that would be too late.
	 */
	if (hv_root_partition)
		x86_init.irqs.create_pci_msi_domain = hv_create_pci_msi_domain;
#endif
	/* Query the VMs extended capability once, so that it can be cached. */
	hv_query_ext_cap(0);
	/* Find the VTL */
	if (!ms_hyperv.paravisor_present && hv_isolation_type_snp())
		ms_hyperv.vtl = get_vtl();
	return;
clean_guest_os_id:
	wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
	hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
	cpuhp_remove_state(cpuhp);
free_ghcb_page:
	free_percpu(hv_ghcb_pg);
free_vp_assist_page:
	kfree(hv_vp_assist_page);
	hv_vp_assist_page = NULL;
common_free:
	hv_common_free();
}
/*
 * This routine is called before kexec/kdump, it does the required cleanup.
 */
void hyperv_cleanup(void)
{
	union hv_x64_msr_hypercall_contents hypercall_msr;
	union hv_reference_tsc_msr tsc_msr;
	/* Reset our OS id */
	wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
	hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
	/*
	 * Reset hypercall page reference before reset the page,
	 * let hypercall operations fail safely rather than
	 * panic the kernel for using invalid hypercall page
	 */
	hv_hypercall_pg = NULL;
	/* Reset the hypercall page */
	hypercall_msr.as_uint64 = hv_get_register(HV_X64_MSR_HYPERCALL);
	hypercall_msr.enable = 0;
	hv_set_register(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
	/* Reset the TSC page */
	tsc_msr.as_uint64 = hv_get_register(HV_X64_MSR_REFERENCE_TSC);
	tsc_msr.enable = 0;
	hv_set_register(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
}
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
{
	static bool panic_reported;
	u64 guest_id;
	if (in_die && !panic_on_oops)
		return;
	/*
	 * We prefer to report panic on 'die' chain as we have proper
	 * registers to report, but if we miss it (e.g. on BUG()) we need
	 * to report it on 'panic'.
	 */
	if (panic_reported)
		return;
	panic_reported = true;
	rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
	wrmsrl(HV_X64_MSR_CRASH_P0, err);
	wrmsrl(HV_X64_MSR_CRASH_P1, guest_id);
	wrmsrl(HV_X64_MSR_CRASH_P2, regs->ip);
	wrmsrl(HV_X64_MSR_CRASH_P3, regs->ax);
	wrmsrl(HV_X64_MSR_CRASH_P4, regs->sp);
	/*
	 * Let Hyper-V know there is crash data available
	 */
	wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
}
EXPORT_SYMBOL_GPL(hyperv_report_panic);
bool hv_is_hyperv_initialized(void)
{
	union hv_x64_msr_hypercall_contents hypercall_msr;
	/*
	 * Ensure that we're really on Hyper-V, and not a KVM or Xen
	 * emulation of Hyper-V
	 */
	if (x86_hyper_type != X86_HYPER_MS_HYPERV)
		return false;
	/* A TDX VM with no paravisor uses TDX GHCI call rather than hv_hypercall_pg */
	if (hv_isolation_type_tdx() && !ms_hyperv.paravisor_present)
		return true;
	/*
	 * Verify that earlier initialization succeeded by checking
	 * that the hypercall page is setup
	 */
	hypercall_msr.as_uint64 = 0;
	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
	return hypercall_msr.enable;
}
EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
 | 
	linux-master | 
	arch/x86/hyperv/hv_init.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * Irqdomain for Linux to run as the root partition on Microsoft Hypervisor.
 *
 * Authors:
 *  Sunil Muthuswamy <[email protected]>
 *  Wei Liu <[email protected]>
 */
#include <linux/pci.h>
#include <linux/irq.h>
#include <asm/mshyperv.h>
static int hv_map_interrupt(union hv_device_id device_id, bool level,
		int cpu, int vector, struct hv_interrupt_entry *entry)
{
	struct hv_input_map_device_interrupt *input;
	struct hv_output_map_device_interrupt *output;
	struct hv_device_interrupt_descriptor *intr_desc;
	unsigned long flags;
	u64 status;
	int nr_bank, var_size;
	local_irq_save(flags);
	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
	output = *this_cpu_ptr(hyperv_pcpu_output_arg);
	intr_desc = &input->interrupt_descriptor;
	memset(input, 0, sizeof(*input));
	input->partition_id = hv_current_partition_id;
	input->device_id = device_id.as_uint64;
	intr_desc->interrupt_type = HV_X64_INTERRUPT_TYPE_FIXED;
	intr_desc->vector_count = 1;
	intr_desc->target.vector = vector;
	if (level)
		intr_desc->trigger_mode = HV_INTERRUPT_TRIGGER_MODE_LEVEL;
	else
		intr_desc->trigger_mode = HV_INTERRUPT_TRIGGER_MODE_EDGE;
	intr_desc->target.vp_set.valid_bank_mask = 0;
	intr_desc->target.vp_set.format = HV_GENERIC_SET_SPARSE_4K;
	nr_bank = cpumask_to_vpset(&(intr_desc->target.vp_set), cpumask_of(cpu));
	if (nr_bank < 0) {
		local_irq_restore(flags);
		pr_err("%s: unable to generate VP set\n", __func__);
		return EINVAL;
	}
	intr_desc->target.flags = HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
	/*
	 * var-sized hypercall, var-size starts after vp_mask (thus
	 * vp_set.format does not count, but vp_set.valid_bank_mask
	 * does).
	 */
	var_size = nr_bank + 1;
	status = hv_do_rep_hypercall(HVCALL_MAP_DEVICE_INTERRUPT, 0, var_size,
			input, output);
	*entry = output->interrupt_entry;
	local_irq_restore(flags);
	if (!hv_result_success(status))
		pr_err("%s: hypercall failed, status %lld\n", __func__, status);
	return hv_result(status);
}
static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry)
{
	unsigned long flags;
	struct hv_input_unmap_device_interrupt *input;
	struct hv_interrupt_entry *intr_entry;
	u64 status;
	local_irq_save(flags);
	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
	memset(input, 0, sizeof(*input));
	intr_entry = &input->interrupt_entry;
	input->partition_id = hv_current_partition_id;
	input->device_id = id;
	*intr_entry = *old_entry;
	status = hv_do_hypercall(HVCALL_UNMAP_DEVICE_INTERRUPT, input, NULL);
	local_irq_restore(flags);
	return hv_result(status);
}
#ifdef CONFIG_PCI_MSI
struct rid_data {
	struct pci_dev *bridge;
	u32 rid;
};
static int get_rid_cb(struct pci_dev *pdev, u16 alias, void *data)
{
	struct rid_data *rd = data;
	u8 bus = PCI_BUS_NUM(rd->rid);
	if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus) {
		rd->bridge = pdev;
		rd->rid = alias;
	}
	return 0;
}
static union hv_device_id hv_build_pci_dev_id(struct pci_dev *dev)
{
	union hv_device_id dev_id;
	struct rid_data data = {
		.bridge = NULL,
		.rid = PCI_DEVID(dev->bus->number, dev->devfn)
	};
	pci_for_each_dma_alias(dev, get_rid_cb, &data);
	dev_id.as_uint64 = 0;
	dev_id.device_type = HV_DEVICE_TYPE_PCI;
	dev_id.pci.segment = pci_domain_nr(dev->bus);
	dev_id.pci.bdf.bus = PCI_BUS_NUM(data.rid);
	dev_id.pci.bdf.device = PCI_SLOT(data.rid);
	dev_id.pci.bdf.function = PCI_FUNC(data.rid);
	dev_id.pci.source_shadow = HV_SOURCE_SHADOW_NONE;
	if (data.bridge) {
		int pos;
		/*
		 * Microsoft Hypervisor requires a bus range when the bridge is
		 * running in PCI-X mode.
		 *
		 * To distinguish conventional vs PCI-X bridge, we can check
		 * the bridge's PCI-X Secondary Status Register, Secondary Bus
		 * Mode and Frequency bits. See PCI Express to PCI/PCI-X Bridge
		 * Specification Revision 1.0 5.2.2.1.3.
		 *
		 * Value zero means it is in conventional mode, otherwise it is
		 * in PCI-X mode.
		 */
		pos = pci_find_capability(data.bridge, PCI_CAP_ID_PCIX);
		if (pos) {
			u16 status;
			pci_read_config_word(data.bridge, pos +
					PCI_X_BRIDGE_SSTATUS, &status);
			if (status & PCI_X_SSTATUS_FREQ) {
				/* Non-zero, PCI-X mode */
				u8 sec_bus, sub_bus;
				dev_id.pci.source_shadow = HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE;
				pci_read_config_byte(data.bridge, PCI_SECONDARY_BUS, &sec_bus);
				dev_id.pci.shadow_bus_range.secondary_bus = sec_bus;
				pci_read_config_byte(data.bridge, PCI_SUBORDINATE_BUS, &sub_bus);
				dev_id.pci.shadow_bus_range.subordinate_bus = sub_bus;
			}
		}
	}
	return dev_id;
}
static int hv_map_msi_interrupt(struct pci_dev *dev, int cpu, int vector,
				struct hv_interrupt_entry *entry)
{
	union hv_device_id device_id = hv_build_pci_dev_id(dev);
	return hv_map_interrupt(device_id, false, cpu, vector, entry);
}
static inline void entry_to_msi_msg(struct hv_interrupt_entry *entry, struct msi_msg *msg)
{
	/* High address is always 0 */
	msg->address_hi = 0;
	msg->address_lo = entry->msi_entry.address.as_uint32;
	msg->data = entry->msi_entry.data.as_uint32;
}
static int hv_unmap_msi_interrupt(struct pci_dev *dev, struct hv_interrupt_entry *old_entry);
static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
	struct msi_desc *msidesc;
	struct pci_dev *dev;
	struct hv_interrupt_entry out_entry, *stored_entry;
	struct irq_cfg *cfg = irqd_cfg(data);
	const cpumask_t *affinity;
	int cpu;
	u64 status;
	msidesc = irq_data_get_msi_desc(data);
	dev = msi_desc_to_pci_dev(msidesc);
	if (!cfg) {
		pr_debug("%s: cfg is NULL", __func__);
		return;
	}
	affinity = irq_data_get_effective_affinity_mask(data);
	cpu = cpumask_first_and(affinity, cpu_online_mask);
	if (data->chip_data) {
		/*
		 * This interrupt is already mapped. Let's unmap first.
		 *
		 * We don't use retarget interrupt hypercalls here because
		 * Microsoft Hypervisor doens't allow root to change the vector
		 * or specify VPs outside of the set that is initially used
		 * during mapping.
		 */
		stored_entry = data->chip_data;
		data->chip_data = NULL;
		status = hv_unmap_msi_interrupt(dev, stored_entry);
		kfree(stored_entry);
		if (status != HV_STATUS_SUCCESS) {
			pr_debug("%s: failed to unmap, status %lld", __func__, status);
			return;
		}
	}
	stored_entry = kzalloc(sizeof(*stored_entry), GFP_ATOMIC);
	if (!stored_entry) {
		pr_debug("%s: failed to allocate chip data\n", __func__);
		return;
	}
	status = hv_map_msi_interrupt(dev, cpu, cfg->vector, &out_entry);
	if (status != HV_STATUS_SUCCESS) {
		kfree(stored_entry);
		return;
	}
	*stored_entry = out_entry;
	data->chip_data = stored_entry;
	entry_to_msi_msg(&out_entry, msg);
	return;
}
static int hv_unmap_msi_interrupt(struct pci_dev *dev, struct hv_interrupt_entry *old_entry)
{
	return hv_unmap_interrupt(hv_build_pci_dev_id(dev).as_uint64, old_entry);
}
static void hv_teardown_msi_irq(struct pci_dev *dev, struct irq_data *irqd)
{
	struct hv_interrupt_entry old_entry;
	struct msi_msg msg;
	u64 status;
	if (!irqd->chip_data) {
		pr_debug("%s: no chip data\n!", __func__);
		return;
	}
	old_entry = *(struct hv_interrupt_entry *)irqd->chip_data;
	entry_to_msi_msg(&old_entry, &msg);
	kfree(irqd->chip_data);
	irqd->chip_data = NULL;
	status = hv_unmap_msi_interrupt(dev, &old_entry);
	if (status != HV_STATUS_SUCCESS)
		pr_err("%s: hypercall failed, status %lld\n", __func__, status);
}
static void hv_msi_free_irq(struct irq_domain *domain,
			    struct msi_domain_info *info, unsigned int virq)
{
	struct irq_data *irqd = irq_get_irq_data(virq);
	struct msi_desc *desc;
	if (!irqd)
		return;
	desc = irq_data_get_msi_desc(irqd);
	if (!desc || !desc->irq || WARN_ON_ONCE(!dev_is_pci(desc->dev)))
		return;
	hv_teardown_msi_irq(to_pci_dev(desc->dev), irqd);
}
/*
 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
 * which implement the MSI or MSI-X Capability Structure.
 */
static struct irq_chip hv_pci_msi_controller = {
	.name			= "HV-PCI-MSI",
	.irq_unmask		= pci_msi_unmask_irq,
	.irq_mask		= pci_msi_mask_irq,
	.irq_ack		= irq_chip_ack_parent,
	.irq_retrigger		= irq_chip_retrigger_hierarchy,
	.irq_compose_msi_msg	= hv_irq_compose_msi_msg,
	.irq_set_affinity	= msi_domain_set_affinity,
	.flags			= IRQCHIP_SKIP_SET_WAKE,
};
static struct msi_domain_ops pci_msi_domain_ops = {
	.msi_free		= hv_msi_free_irq,
	.msi_prepare		= pci_msi_prepare,
};
static struct msi_domain_info hv_pci_msi_domain_info = {
	.flags		= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
			  MSI_FLAG_PCI_MSIX,
	.ops		= &pci_msi_domain_ops,
	.chip		= &hv_pci_msi_controller,
	.handler	= handle_edge_irq,
	.handler_name	= "edge",
};
struct irq_domain * __init hv_create_pci_msi_domain(void)
{
	struct irq_domain *d = NULL;
	struct fwnode_handle *fn;
	fn = irq_domain_alloc_named_fwnode("HV-PCI-MSI");
	if (fn)
		d = pci_msi_create_irq_domain(fn, &hv_pci_msi_domain_info, x86_vector_domain);
	/* No point in going further if we can't get an irq domain */
	BUG_ON(!d);
	return d;
}
#endif /* CONFIG_PCI_MSI */
int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry)
{
	union hv_device_id device_id;
	device_id.as_uint64 = 0;
	device_id.device_type = HV_DEVICE_TYPE_IOAPIC;
	device_id.ioapic.ioapic_id = (u8)ioapic_id;
	return hv_unmap_interrupt(device_id.as_uint64, entry);
}
EXPORT_SYMBOL_GPL(hv_unmap_ioapic_interrupt);
int hv_map_ioapic_interrupt(int ioapic_id, bool level, int cpu, int vector,
		struct hv_interrupt_entry *entry)
{
	union hv_device_id device_id;
	device_id.as_uint64 = 0;
	device_id.device_type = HV_DEVICE_TYPE_IOAPIC;
	device_id.ioapic.ioapic_id = (u8)ioapic_id;
	return hv_map_interrupt(device_id, level, cpu, vector, entry);
}
EXPORT_SYMBOL_GPL(hv_map_ioapic_interrupt);
 | 
	linux-master | 
	arch/x86/hyperv/irqdomain.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * Hyper-V specific spinlock code.
 *
 * Copyright (C) 2018, Intel, Inc.
 *
 * Author : Yi Sun <[email protected]>
 */
#define pr_fmt(fmt) "Hyper-V: " fmt
#include <linux/spinlock.h>
#include <asm/mshyperv.h>
#include <asm/paravirt.h>
#include <asm/apic.h>
static bool __initdata hv_pvspin = true;
static void hv_qlock_kick(int cpu)
{
	__apic_send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
}
static void hv_qlock_wait(u8 *byte, u8 val)
{
	unsigned long flags;
	if (in_nmi())
		return;
	/*
	 * Reading HV_X64_MSR_GUEST_IDLE MSR tells the hypervisor that the
	 * vCPU can be put into 'idle' state. This 'idle' state is
	 * terminated by an IPI, usually from hv_qlock_kick(), even if
	 * interrupts are disabled on the vCPU.
	 *
	 * To prevent a race against the unlock path it is required to
	 * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE
	 * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between
	 * the lock value check and the rdmsrl() then the vCPU might be put
	 * into 'idle' state by the hypervisor and kept in that state for
	 * an unspecified amount of time.
	 */
	local_irq_save(flags);
	/*
	 * Only issue the rdmsrl() when the lock state has not changed.
	 */
	if (READ_ONCE(*byte) == val) {
		unsigned long msr_val;
		rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
		(void)msr_val;
	}
	local_irq_restore(flags);
}
/*
 * Hyper-V does not support this so far.
 */
__visible bool hv_vcpu_is_preempted(int vcpu)
{
	return false;
}
PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
void __init hv_init_spinlocks(void)
{
	if (!hv_pvspin || !apic ||
	    !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
	    !(ms_hyperv.features & HV_MSR_GUEST_IDLE_AVAILABLE)) {
		pr_info("PV spinlocks disabled\n");
		return;
	}
	pr_info("PV spinlocks enabled\n");
	__pv_init_lock_hash();
	pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
	pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
	pv_ops.lock.wait = hv_qlock_wait;
	pv_ops.lock.kick = hv_qlock_kick;
	pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
}
static __init int hv_parse_nopvspin(char *arg)
{
	hv_pvspin = false;
	return 0;
}
early_param("hv_nopvspin", hv_parse_nopvspin);
 | 
	linux-master | 
	arch/x86/hyperv/hv_spinlock.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * Hyper-V nested virtualization code.
 *
 * Copyright (C) 2018, Microsoft, Inc.
 *
 * Author : Lan Tianyu <[email protected]>
 */
#define pr_fmt(fmt)  "Hyper-V: " fmt
#include <linux/types.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
#include <asm/tlbflush.h>
#include <asm/trace/hyperv.h>
int hyperv_flush_guest_mapping(u64 as)
{
	struct hv_guest_mapping_flush *flush;
	u64 status;
	unsigned long flags;
	int ret = -ENOTSUPP;
	if (!hv_hypercall_pg)
		goto fault;
	local_irq_save(flags);
	flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
	if (unlikely(!flush)) {
		local_irq_restore(flags);
		goto fault;
	}
	flush->address_space = as;
	flush->flags = 0;
	status = hv_do_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE,
				 flush, NULL);
	local_irq_restore(flags);
	if (hv_result_success(status))
		ret = 0;
fault:
	trace_hyperv_nested_flush_guest_mapping(as, ret);
	return ret;
}
EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping);
int hyperv_fill_flush_guest_mapping_list(
		struct hv_guest_mapping_flush_list *flush,
		u64 start_gfn, u64 pages)
{
	u64 cur = start_gfn;
	u64 additional_pages;
	int gpa_n = 0;
	do {
		/*
		 * If flush requests exceed max flush count, go back to
		 * flush tlbs without range.
		 */
		if (gpa_n >= HV_MAX_FLUSH_REP_COUNT)
			return -ENOSPC;
		additional_pages = min_t(u64, pages, HV_MAX_FLUSH_PAGES) - 1;
		flush->gpa_list[gpa_n].page.additional_pages = additional_pages;
		flush->gpa_list[gpa_n].page.largepage = false;
		flush->gpa_list[gpa_n].page.basepfn = cur;
		pages -= additional_pages + 1;
		cur += additional_pages + 1;
		gpa_n++;
	} while (pages > 0);
	return gpa_n;
}
EXPORT_SYMBOL_GPL(hyperv_fill_flush_guest_mapping_list);
int hyperv_flush_guest_mapping_range(u64 as,
		hyperv_fill_flush_list_func fill_flush_list_func, void *data)
{
	struct hv_guest_mapping_flush_list *flush;
	u64 status;
	unsigned long flags;
	int ret = -ENOTSUPP;
	int gpa_n = 0;
	if (!hv_hypercall_pg || !fill_flush_list_func)
		goto fault;
	local_irq_save(flags);
	flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
	if (unlikely(!flush)) {
		local_irq_restore(flags);
		goto fault;
	}
	flush->address_space = as;
	flush->flags = 0;
	gpa_n = fill_flush_list_func(flush, data);
	if (gpa_n < 0) {
		local_irq_restore(flags);
		goto fault;
	}
	status = hv_do_rep_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST,
				     gpa_n, 0, flush, NULL);
	local_irq_restore(flags);
	if (hv_result_success(status))
		ret = 0;
	else
		ret = hv_result(status);
fault:
	trace_hyperv_nested_flush_guest_mapping_range(as, ret);
	return ret;
}
EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping_range);
 | 
	linux-master | 
	arch/x86/hyperv/nested.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * Hyper-V specific APIC code.
 *
 * Copyright (C) 2018, Microsoft, Inc.
 *
 * Author : K. Y. Srinivasan <[email protected]>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 * NON INFRINGEMENT.  See the GNU General Public License for more
 * details.
 *
 */
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/clockchips.h>
#include <linux/hyperv.h>
#include <linux/slab.h>
#include <linux/cpuhotplug.h>
#include <asm/hypervisor.h>
#include <asm/mshyperv.h>
#include <asm/apic.h>
#include <asm/trace/hyperv.h>
static struct apic orig_apic;
static u64 hv_apic_icr_read(void)
{
	u64 reg_val;
	rdmsrl(HV_X64_MSR_ICR, reg_val);
	return reg_val;
}
static void hv_apic_icr_write(u32 low, u32 id)
{
	u64 reg_val;
	reg_val = SET_XAPIC_DEST_FIELD(id);
	reg_val = reg_val << 32;
	reg_val |= low;
	wrmsrl(HV_X64_MSR_ICR, reg_val);
}
static u32 hv_apic_read(u32 reg)
{
	u32 reg_val, hi;
	switch (reg) {
	case APIC_EOI:
		rdmsr(HV_X64_MSR_EOI, reg_val, hi);
		(void)hi;
		return reg_val;
	case APIC_TASKPRI:
		rdmsr(HV_X64_MSR_TPR, reg_val, hi);
		(void)hi;
		return reg_val;
	default:
		return native_apic_mem_read(reg);
	}
}
static void hv_apic_write(u32 reg, u32 val)
{
	switch (reg) {
	case APIC_EOI:
		wrmsr(HV_X64_MSR_EOI, val, 0);
		break;
	case APIC_TASKPRI:
		wrmsr(HV_X64_MSR_TPR, val, 0);
		break;
	default:
		native_apic_mem_write(reg, val);
	}
}
static void hv_apic_eoi_write(void)
{
	struct hv_vp_assist_page *hvp = hv_vp_assist_page[smp_processor_id()];
	if (hvp && (xchg(&hvp->apic_assist, 0) & 0x1))
		return;
	wrmsr(HV_X64_MSR_EOI, APIC_EOI_ACK, 0);
}
static bool cpu_is_self(int cpu)
{
	return cpu == smp_processor_id();
}
/*
 * IPI implementation on Hyper-V.
 */
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
		bool exclude_self)
{
	struct hv_send_ipi_ex *ipi_arg;
	unsigned long flags;
	int nr_bank = 0;
	u64 status = HV_STATUS_INVALID_PARAMETER;
	if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
		return false;
	local_irq_save(flags);
	ipi_arg = *this_cpu_ptr(hyperv_pcpu_input_arg);
	if (unlikely(!ipi_arg))
		goto ipi_mask_ex_done;
	ipi_arg->vector = vector;
	ipi_arg->reserved = 0;
	ipi_arg->vp_set.valid_bank_mask = 0;
	/*
	 * Use HV_GENERIC_SET_ALL and avoid converting cpumask to VP_SET
	 * when the IPI is sent to all currently present CPUs.
	 */
	if (!cpumask_equal(mask, cpu_present_mask) || exclude_self) {
		ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
		nr_bank = cpumask_to_vpset_skip(&(ipi_arg->vp_set), mask,
				exclude_self ? cpu_is_self : NULL);
		/*
		 * 'nr_bank <= 0' means some CPUs in cpumask can't be
		 * represented in VP_SET. Return an error and fall back to
		 * native (architectural) method of sending IPIs.
		 */
		if (nr_bank <= 0)
			goto ipi_mask_ex_done;
	} else {
		ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
	}
	status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
			      ipi_arg, NULL);
ipi_mask_ex_done:
	local_irq_restore(flags);
	return hv_result_success(status);
}
static bool __send_ipi_mask(const struct cpumask *mask, int vector,
		bool exclude_self)
{
	int cur_cpu, vcpu, this_cpu = smp_processor_id();
	struct hv_send_ipi ipi_arg;
	u64 status;
	unsigned int weight;
	trace_hyperv_send_ipi_mask(mask, vector);
	weight = cpumask_weight(mask);
	/*
	 * Do nothing if
	 *   1. the mask is empty
	 *   2. the mask only contains self when exclude_self is true
	 */
	if (weight == 0 ||
	    (exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
		return true;
	/* A fully enlightened TDX VM uses GHCI rather than hv_hypercall_pg. */
	if (!hv_hypercall_pg) {
		if (ms_hyperv.paravisor_present || !hv_isolation_type_tdx())
			return false;
	}
	if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
		return false;
	/*
	 * From the supplied CPU set we need to figure out if we can get away
	 * with cheaper HVCALL_SEND_IPI hypercall. This is possible when the
	 * highest VP number in the set is < 64. As VP numbers are usually in
	 * ascending order and match Linux CPU ids, here is an optimization:
	 * we check the VP number for the highest bit in the supplied set first
	 * so we can quickly find out if using HVCALL_SEND_IPI_EX hypercall is
	 * a must. We will also check all VP numbers when walking the supplied
	 * CPU set to remain correct in all cases.
	 */
	if (hv_cpu_number_to_vp_number(cpumask_last(mask)) >= 64)
		goto do_ex_hypercall;
	ipi_arg.vector = vector;
	ipi_arg.cpu_mask = 0;
	for_each_cpu(cur_cpu, mask) {
		if (exclude_self && cur_cpu == this_cpu)
			continue;
		vcpu = hv_cpu_number_to_vp_number(cur_cpu);
		if (vcpu == VP_INVAL)
			return false;
		/*
		 * This particular version of the IPI hypercall can
		 * only target upto 64 CPUs.
		 */
		if (vcpu >= 64)
			goto do_ex_hypercall;
		__set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask);
	}
	status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
				     ipi_arg.cpu_mask);
	return hv_result_success(status);
do_ex_hypercall:
	return __send_ipi_mask_ex(mask, vector, exclude_self);
}
static bool __send_ipi_one(int cpu, int vector)
{
	int vp = hv_cpu_number_to_vp_number(cpu);
	u64 status;
	trace_hyperv_send_ipi_one(cpu, vector);
	if (vp == VP_INVAL)
		return false;
	/* A fully enlightened TDX VM uses GHCI rather than hv_hypercall_pg. */
	if (!hv_hypercall_pg) {
		if (ms_hyperv.paravisor_present || !hv_isolation_type_tdx())
			return false;
	}
	if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
		return false;
	if (vp >= 64)
		return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
	status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
	return hv_result_success(status);
}
static void hv_send_ipi(int cpu, int vector)
{
	if (!__send_ipi_one(cpu, vector))
		orig_apic.send_IPI(cpu, vector);
}
static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
{
	if (!__send_ipi_mask(mask, vector, false))
		orig_apic.send_IPI_mask(mask, vector);
}
static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
{
	if (!__send_ipi_mask(mask, vector, true))
		orig_apic.send_IPI_mask_allbutself(mask, vector);
}
static void hv_send_ipi_allbutself(int vector)
{
	hv_send_ipi_mask_allbutself(cpu_online_mask, vector);
}
static void hv_send_ipi_all(int vector)
{
	if (!__send_ipi_mask(cpu_online_mask, vector, false))
		orig_apic.send_IPI_all(vector);
}
static void hv_send_ipi_self(int vector)
{
	if (!__send_ipi_one(smp_processor_id(), vector))
		orig_apic.send_IPI_self(vector);
}
void __init hv_apic_init(void)
{
	if (ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) {
		pr_info("Hyper-V: Using IPI hypercalls\n");
		/*
		 * Set the IPI entry points.
		 */
		orig_apic = *apic;
		apic_update_callback(send_IPI, hv_send_ipi);
		apic_update_callback(send_IPI_mask, hv_send_ipi_mask);
		apic_update_callback(send_IPI_mask_allbutself, hv_send_ipi_mask_allbutself);
		apic_update_callback(send_IPI_allbutself, hv_send_ipi_allbutself);
		apic_update_callback(send_IPI_all, hv_send_ipi_all);
		apic_update_callback(send_IPI_self, hv_send_ipi_self);
	}
	if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) {
		pr_info("Hyper-V: Using enlightened APIC (%s mode)",
			x2apic_enabled() ? "x2apic" : "xapic");
		/*
		 * When in x2apic mode, don't use the Hyper-V specific APIC
		 * accessors since the field layout in the ICR register is
		 * different in x2apic mode. Furthermore, the architectural
		 * x2apic MSRs function just as well as the Hyper-V
		 * synthetic APIC MSRs, so there's no benefit in having
		 * separate Hyper-V accessors for x2apic mode. The only
		 * exception is hv_apic_eoi_write, because it benefits from
		 * lazy EOI when available, but the same accessor works for
		 * both xapic and x2apic because the field layout is the same.
		 */
		apic_update_callback(eoi, hv_apic_eoi_write);
		if (!x2apic_enabled()) {
			apic_update_callback(read, hv_apic_read);
			apic_update_callback(write, hv_apic_write);
			apic_update_callback(icr_write, hv_apic_icr_write);
			apic_update_callback(icr_read, hv_apic_icr_read);
		}
	}
}
 | 
	linux-master | 
	arch/x86/hyperv/hv_apic.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2023, Microsoft Corporation.
 *
 * Author:
 *   Saurabh Sengar <[email protected]>
 */
#include <asm/apic.h>
#include <asm/boot.h>
#include <asm/desc.h>
#include <asm/i8259.h>
#include <asm/mshyperv.h>
#include <asm/realmode.h>
extern struct boot_params boot_params;
static struct real_mode_header hv_vtl_real_mode_header;
void __init hv_vtl_init_platform(void)
{
	pr_info("Linux runs in Hyper-V Virtual Trust Level\n");
	x86_platform.realmode_reserve = x86_init_noop;
	x86_platform.realmode_init = x86_init_noop;
	x86_init.irqs.pre_vector_init = x86_init_noop;
	x86_init.timers.timer_init = x86_init_noop;
	/* Avoid searching for BIOS MP tables */
	x86_init.mpparse.find_smp_config = x86_init_noop;
	x86_init.mpparse.get_smp_config = x86_init_uint_noop;
	x86_platform.get_wallclock = get_rtc_noop;
	x86_platform.set_wallclock = set_rtc_noop;
	x86_platform.get_nmi_reason = hv_get_nmi_reason;
	x86_platform.legacy.i8042 = X86_LEGACY_I8042_PLATFORM_ABSENT;
	x86_platform.legacy.rtc = 0;
	x86_platform.legacy.warm_reset = 0;
	x86_platform.legacy.reserve_bios_regions = 0;
	x86_platform.legacy.devices.pnpbios = 0;
}
static inline u64 hv_vtl_system_desc_base(struct ldttss_desc *desc)
{
	return ((u64)desc->base3 << 32) | ((u64)desc->base2 << 24) |
		(desc->base1 << 16) | desc->base0;
}
static inline u32 hv_vtl_system_desc_limit(struct ldttss_desc *desc)
{
	return ((u32)desc->limit1 << 16) | (u32)desc->limit0;
}
typedef void (*secondary_startup_64_fn)(void*, void*);
static void hv_vtl_ap_entry(void)
{
	((secondary_startup_64_fn)secondary_startup_64)(&boot_params, &boot_params);
}
static int hv_vtl_bringup_vcpu(u32 target_vp_index, u64 eip_ignored)
{
	u64 status;
	int ret = 0;
	struct hv_enable_vp_vtl *input;
	unsigned long irq_flags;
	struct desc_ptr gdt_ptr;
	struct desc_ptr idt_ptr;
	struct ldttss_desc *tss;
	struct ldttss_desc *ldt;
	struct desc_struct *gdt;
	u64 rsp = current->thread.sp;
	u64 rip = (u64)&hv_vtl_ap_entry;
	native_store_gdt(&gdt_ptr);
	store_idt(&idt_ptr);
	gdt = (struct desc_struct *)((void *)(gdt_ptr.address));
	tss = (struct ldttss_desc *)(gdt + GDT_ENTRY_TSS);
	ldt = (struct ldttss_desc *)(gdt + GDT_ENTRY_LDT);
	local_irq_save(irq_flags);
	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
	memset(input, 0, sizeof(*input));
	input->partition_id = HV_PARTITION_ID_SELF;
	input->vp_index = target_vp_index;
	input->target_vtl.target_vtl = HV_VTL_MGMT;
	/*
	 * The x86_64 Linux kernel follows the 16-bit -> 32-bit -> 64-bit
	 * mode transition sequence after waking up an AP with SIPI whose
	 * vector points to the 16-bit AP startup trampoline code. Here in
	 * VTL2, we can't perform that sequence as the AP has to start in
	 * the 64-bit mode.
	 *
	 * To make this happen, we tell the hypervisor to load a valid 64-bit
	 * context (most of which is just magic numbers from the CPU manual)
	 * so that AP jumps right to the 64-bit entry of the kernel, and the
	 * control registers are loaded with values that let the AP fetch the
	 * code and data and carry on with work it gets assigned.
	 */
	input->vp_context.rip = rip;
	input->vp_context.rsp = rsp;
	input->vp_context.rflags = 0x0000000000000002;
	input->vp_context.efer = __rdmsr(MSR_EFER);
	input->vp_context.cr0 = native_read_cr0();
	input->vp_context.cr3 = __native_read_cr3();
	input->vp_context.cr4 = native_read_cr4();
	input->vp_context.msr_cr_pat = __rdmsr(MSR_IA32_CR_PAT);
	input->vp_context.idtr.limit = idt_ptr.size;
	input->vp_context.idtr.base = idt_ptr.address;
	input->vp_context.gdtr.limit = gdt_ptr.size;
	input->vp_context.gdtr.base = gdt_ptr.address;
	/* Non-system desc (64bit), long, code, present */
	input->vp_context.cs.selector = __KERNEL_CS;
	input->vp_context.cs.base = 0;
	input->vp_context.cs.limit = 0xffffffff;
	input->vp_context.cs.attributes = 0xa09b;
	/* Non-system desc (64bit), data, present, granularity, default */
	input->vp_context.ss.selector = __KERNEL_DS;
	input->vp_context.ss.base = 0;
	input->vp_context.ss.limit = 0xffffffff;
	input->vp_context.ss.attributes = 0xc093;
	/* System desc (128bit), present, LDT */
	input->vp_context.ldtr.selector = GDT_ENTRY_LDT * 8;
	input->vp_context.ldtr.base = hv_vtl_system_desc_base(ldt);
	input->vp_context.ldtr.limit = hv_vtl_system_desc_limit(ldt);
	input->vp_context.ldtr.attributes = 0x82;
	/* System desc (128bit), present, TSS, 0x8b - busy, 0x89 -- default */
	input->vp_context.tr.selector = GDT_ENTRY_TSS * 8;
	input->vp_context.tr.base = hv_vtl_system_desc_base(tss);
	input->vp_context.tr.limit = hv_vtl_system_desc_limit(tss);
	input->vp_context.tr.attributes = 0x8b;
	status = hv_do_hypercall(HVCALL_ENABLE_VP_VTL, input, NULL);
	if (!hv_result_success(status) &&
	    hv_result(status) != HV_STATUS_VTL_ALREADY_ENABLED) {
		pr_err("HVCALL_ENABLE_VP_VTL failed for VP : %d ! [Err: %#llx\n]",
		       target_vp_index, status);
		ret = -EINVAL;
		goto free_lock;
	}
	status = hv_do_hypercall(HVCALL_START_VP, input, NULL);
	if (!hv_result_success(status)) {
		pr_err("HVCALL_START_VP failed for VP : %d ! [Err: %#llx]\n",
		       target_vp_index, status);
		ret = -EINVAL;
	}
free_lock:
	local_irq_restore(irq_flags);
	return ret;
}
static int hv_vtl_apicid_to_vp_id(u32 apic_id)
{
	u64 control;
	u64 status;
	unsigned long irq_flags;
	struct hv_get_vp_from_apic_id_in *input;
	u32 *output, ret;
	local_irq_save(irq_flags);
	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
	memset(input, 0, sizeof(*input));
	input->partition_id = HV_PARTITION_ID_SELF;
	input->apic_ids[0] = apic_id;
	output = (u32 *)input;
	control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_ID_FROM_APIC_ID;
	status = hv_do_hypercall(control, input, output);
	ret = output[0];
	local_irq_restore(irq_flags);
	if (!hv_result_success(status)) {
		pr_err("failed to get vp id from apic id %d, status %#llx\n",
		       apic_id, status);
		return -EINVAL;
	}
	return ret;
}
static int hv_vtl_wakeup_secondary_cpu(int apicid, unsigned long start_eip)
{
	int vp_id;
	pr_debug("Bringing up CPU with APIC ID %d in VTL2...\n", apicid);
	vp_id = hv_vtl_apicid_to_vp_id(apicid);
	if (vp_id < 0) {
		pr_err("Couldn't find CPU with APIC ID %d\n", apicid);
		return -EINVAL;
	}
	if (vp_id > ms_hyperv.max_vp_index) {
		pr_err("Invalid CPU id %d for APIC ID %d\n", vp_id, apicid);
		return -EINVAL;
	}
	return hv_vtl_bringup_vcpu(vp_id, start_eip);
}
static int __init hv_vtl_early_init(void)
{
	/*
	 * `boot_cpu_has` returns the runtime feature support,
	 * and here is the earliest it can be used.
	 */
	if (cpu_feature_enabled(X86_FEATURE_XSAVE))
		panic("XSAVE has to be disabled as it is not supported by this module.\n"
			  "Please add 'noxsave' to the kernel command line.\n");
	real_mode_header = &hv_vtl_real_mode_header;
	apic_update_callback(wakeup_secondary_cpu_64, hv_vtl_wakeup_secondary_cpu);
	return 0;
}
early_initcall(hv_vtl_early_init);
 | 
	linux-master | 
	arch/x86/hyperv/hv_vtl.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/clockchips.h>
#include <linux/acpi.h>
#include <linux/hyperv.h>
#include <linux/slab.h>
#include <linux/cpuhotplug.h>
#include <linux/minmax.h>
#include <asm/hypervisor.h>
#include <asm/mshyperv.h>
#include <asm/apic.h>
#include <asm/trace/hyperv.h>
/*
 * See struct hv_deposit_memory. The first u64 is partition ID, the rest
 * are GPAs.
 */
#define HV_DEPOSIT_MAX (HV_HYP_PAGE_SIZE / sizeof(u64) - 1)
/* Deposits exact number of pages. Must be called with interrupts enabled.  */
int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
{
	struct page **pages, *page;
	int *counts;
	int num_allocations;
	int i, j, page_count;
	int order;
	u64 status;
	int ret;
	u64 base_pfn;
	struct hv_deposit_memory *input_page;
	unsigned long flags;
	if (num_pages > HV_DEPOSIT_MAX)
		return -E2BIG;
	if (!num_pages)
		return 0;
	/* One buffer for page pointers and counts */
	page = alloc_page(GFP_KERNEL);
	if (!page)
		return -ENOMEM;
	pages = page_address(page);
	counts = kcalloc(HV_DEPOSIT_MAX, sizeof(int), GFP_KERNEL);
	if (!counts) {
		free_page((unsigned long)pages);
		return -ENOMEM;
	}
	/* Allocate all the pages before disabling interrupts */
	i = 0;
	while (num_pages) {
		/* Find highest order we can actually allocate */
		order = 31 - __builtin_clz(num_pages);
		while (1) {
			pages[i] = alloc_pages_node(node, GFP_KERNEL, order);
			if (pages[i])
				break;
			if (!order) {
				ret = -ENOMEM;
				num_allocations = i;
				goto err_free_allocations;
			}
			--order;
		}
		split_page(pages[i], order);
		counts[i] = 1 << order;
		num_pages -= counts[i];
		i++;
	}
	num_allocations = i;
	local_irq_save(flags);
	input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
	input_page->partition_id = partition_id;
	/* Populate gpa_page_list - these will fit on the input page */
	for (i = 0, page_count = 0; i < num_allocations; ++i) {
		base_pfn = page_to_pfn(pages[i]);
		for (j = 0; j < counts[i]; ++j, ++page_count)
			input_page->gpa_page_list[page_count] = base_pfn + j;
	}
	status = hv_do_rep_hypercall(HVCALL_DEPOSIT_MEMORY,
				     page_count, 0, input_page, NULL);
	local_irq_restore(flags);
	if (!hv_result_success(status)) {
		pr_err("Failed to deposit pages: %lld\n", status);
		ret = hv_result(status);
		goto err_free_allocations;
	}
	ret = 0;
	goto free_buf;
err_free_allocations:
	for (i = 0; i < num_allocations; ++i) {
		base_pfn = page_to_pfn(pages[i]);
		for (j = 0; j < counts[i]; ++j)
			__free_page(pfn_to_page(base_pfn + j));
	}
free_buf:
	free_page((unsigned long)pages);
	kfree(counts);
	return ret;
}
int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
{
	struct hv_add_logical_processor_in *input;
	struct hv_add_logical_processor_out *output;
	u64 status;
	unsigned long flags;
	int ret = HV_STATUS_SUCCESS;
	int pxm = node_to_pxm(node);
	/*
	 * When adding a logical processor, the hypervisor may return
	 * HV_STATUS_INSUFFICIENT_MEMORY. When that happens, we deposit more
	 * pages and retry.
	 */
	do {
		local_irq_save(flags);
		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
		/* We don't do anything with the output right now */
		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
		input->lp_index = lp_index;
		input->apic_id = apic_id;
		input->flags = 0;
		input->proximity_domain_info.domain_id = pxm;
		input->proximity_domain_info.flags.reserved = 0;
		input->proximity_domain_info.flags.proximity_info_valid = 1;
		input->proximity_domain_info.flags.proximity_preferred = 1;
		status = hv_do_hypercall(HVCALL_ADD_LOGICAL_PROCESSOR,
					 input, output);
		local_irq_restore(flags);
		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
			if (!hv_result_success(status)) {
				pr_err("%s: cpu %u apic ID %u, %lld\n", __func__,
				       lp_index, apic_id, status);
				ret = hv_result(status);
			}
			break;
		}
		ret = hv_call_deposit_pages(node, hv_current_partition_id, 1);
	} while (!ret);
	return ret;
}
int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
{
	struct hv_create_vp *input;
	u64 status;
	unsigned long irq_flags;
	int ret = HV_STATUS_SUCCESS;
	int pxm = node_to_pxm(node);
	/* Root VPs don't seem to need pages deposited */
	if (partition_id != hv_current_partition_id) {
		/* The value 90 is empirically determined. It may change. */
		ret = hv_call_deposit_pages(node, partition_id, 90);
		if (ret)
			return ret;
	}
	do {
		local_irq_save(irq_flags);
		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
		input->partition_id = partition_id;
		input->vp_index = vp_index;
		input->flags = flags;
		input->subnode_type = HvSubnodeAny;
		if (node != NUMA_NO_NODE) {
			input->proximity_domain_info.domain_id = pxm;
			input->proximity_domain_info.flags.reserved = 0;
			input->proximity_domain_info.flags.proximity_info_valid = 1;
			input->proximity_domain_info.flags.proximity_preferred = 1;
		} else {
			input->proximity_domain_info.as_uint64 = 0;
		}
		status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
		local_irq_restore(irq_flags);
		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
			if (!hv_result_success(status)) {
				pr_err("%s: vcpu %u, lp %u, %lld\n", __func__,
				       vp_index, flags, status);
				ret = hv_result(status);
			}
			break;
		}
		ret = hv_call_deposit_pages(node, partition_id, 1);
	} while (!ret);
	return ret;
}
 | 
	linux-master | 
	arch/x86/hyperv/hv_proc.c | 
| 
	#define pr_fmt(fmt)  "Hyper-V: " fmt
#include <linux/hyperv.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/fpu/api.h>
#include <asm/mshyperv.h>
#include <asm/msr.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#define CREATE_TRACE_POINTS
#include <asm/trace/hyperv.h>
/* Each gva in gva_list encodes up to 4096 pages to flush */
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
				      const struct flush_tlb_info *info);
/*
 * Fills in gva_list starting from offset. Returns the number of items added.
 */
static inline int fill_gva_list(u64 gva_list[], int offset,
				unsigned long start, unsigned long end)
{
	int gva_n = offset;
	unsigned long cur = start, diff;
	do {
		diff = end > cur ? end - cur : 0;
		gva_list[gva_n] = cur & PAGE_MASK;
		/*
		 * Lower 12 bits encode the number of additional
		 * pages to flush (in addition to the 'cur' page).
		 */
		if (diff >= HV_TLB_FLUSH_UNIT) {
			gva_list[gva_n] |= ~PAGE_MASK;
			cur += HV_TLB_FLUSH_UNIT;
		}  else if (diff) {
			gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
			cur = end;
		}
		gva_n++;
	} while (cur < end);
	return gva_n - offset;
}
static bool cpu_is_lazy(int cpu)
{
	return per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
}
static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
				   const struct flush_tlb_info *info)
{
	int cpu, vcpu, gva_n, max_gvas;
	struct hv_tlb_flush *flush;
	u64 status;
	unsigned long flags;
	bool do_lazy = !info->freed_tables;
	trace_hyperv_mmu_flush_tlb_multi(cpus, info);
	if (!hv_hypercall_pg)
		goto do_native;
	local_irq_save(flags);
	flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
	if (unlikely(!flush)) {
		local_irq_restore(flags);
		goto do_native;
	}
	if (info->mm) {
		/*
		 * AddressSpace argument must match the CR3 with PCID bits
		 * stripped out.
		 */
		flush->address_space = virt_to_phys(info->mm->pgd);
		flush->address_space &= CR3_ADDR_MASK;
		flush->flags = 0;
	} else {
		flush->address_space = 0;
		flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
	}
	flush->processor_mask = 0;
	if (cpumask_equal(cpus, cpu_present_mask)) {
		flush->flags |= HV_FLUSH_ALL_PROCESSORS;
	} else {
		/*
		 * From the supplied CPU set we need to figure out if we can get
		 * away with cheaper HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}
		 * hypercalls. This is possible when the highest VP number in
		 * the set is < 64. As VP numbers are usually in ascending order
		 * and match Linux CPU ids, here is an optimization: we check
		 * the VP number for the highest bit in the supplied set first
		 * so we can quickly find out if using *_EX hypercalls is a
		 * must. We will also check all VP numbers when walking the
		 * supplied CPU set to remain correct in all cases.
		 */
		cpu = cpumask_last(cpus);
		if (cpu < nr_cpumask_bits && hv_cpu_number_to_vp_number(cpu) >= 64)
			goto do_ex_hypercall;
		for_each_cpu(cpu, cpus) {
			if (do_lazy && cpu_is_lazy(cpu))
				continue;
			vcpu = hv_cpu_number_to_vp_number(cpu);
			if (vcpu == VP_INVAL) {
				local_irq_restore(flags);
				goto do_native;
			}
			if (vcpu >= 64)
				goto do_ex_hypercall;
			__set_bit(vcpu, (unsigned long *)
				  &flush->processor_mask);
		}
		/* nothing to flush if 'processor_mask' ends up being empty */
		if (!flush->processor_mask) {
			local_irq_restore(flags);
			return;
		}
	}
	/*
	 * We can flush not more than max_gvas with one hypercall. Flush the
	 * whole address space if we were asked to do more.
	 */
	max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
	if (info->end == TLB_FLUSH_ALL) {
		flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
		status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
					 flush, NULL);
	} else if (info->end &&
		   ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
		status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
					 flush, NULL);
	} else {
		gva_n = fill_gva_list(flush->gva_list, 0,
				      info->start, info->end);
		status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
					     gva_n, 0, flush, NULL);
	}
	goto check_status;
do_ex_hypercall:
	status = hyperv_flush_tlb_others_ex(cpus, info);
check_status:
	local_irq_restore(flags);
	if (hv_result_success(status))
		return;
do_native:
	native_flush_tlb_multi(cpus, info);
}
static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
				      const struct flush_tlb_info *info)
{
	int nr_bank = 0, max_gvas, gva_n;
	struct hv_tlb_flush_ex *flush;
	u64 status;
	if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
		return HV_STATUS_INVALID_PARAMETER;
	flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
	if (info->mm) {
		/*
		 * AddressSpace argument must match the CR3 with PCID bits
		 * stripped out.
		 */
		flush->address_space = virt_to_phys(info->mm->pgd);
		flush->address_space &= CR3_ADDR_MASK;
		flush->flags = 0;
	} else {
		flush->address_space = 0;
		flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
	}
	flush->hv_vp_set.valid_bank_mask = 0;
	flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
	nr_bank = cpumask_to_vpset_skip(&flush->hv_vp_set, cpus,
			info->freed_tables ? NULL : cpu_is_lazy);
	if (nr_bank < 0)
		return HV_STATUS_INVALID_PARAMETER;
	/*
	 * We can flush not more than max_gvas with one hypercall. Flush the
	 * whole address space if we were asked to do more.
	 */
	max_gvas =
		(PAGE_SIZE - sizeof(*flush) - nr_bank *
		 sizeof(flush->hv_vp_set.bank_contents[0])) /
		sizeof(flush->gva_list[0]);
	if (info->end == TLB_FLUSH_ALL) {
		flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
		status = hv_do_rep_hypercall(
			HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
			0, nr_bank, flush, NULL);
	} else if (info->end &&
		   ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
		status = hv_do_rep_hypercall(
			HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
			0, nr_bank, flush, NULL);
	} else {
		gva_n = fill_gva_list(flush->gva_list, nr_bank,
				      info->start, info->end);
		status = hv_do_rep_hypercall(
			HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
			gva_n, nr_bank, flush, NULL);
	}
	return status;
}
void hyperv_setup_mmu_ops(void)
{
	if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
		return;
	pr_info("Using hypercall for remote TLB flush\n");
	pv_ops.mmu.flush_tlb_multi = hyperv_flush_tlb_multi;
	pv_ops.mmu.tlb_remove_table = tlb_remove_table;
}
 | 
	linux-master | 
	arch/x86/hyperv/mmu.c | 
| 
	/*
 * Copyright 2003 PathScale, Inc.
 *
 * Licensed under the GPL
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/utsname.h>
#include <asm/current.h>
#include <asm/ptrace.h>
#include <asm/sysrq.h>
void show_regs(struct pt_regs *regs)
{
	printk("\n");
	print_modules();
	printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current),
		current->comm, print_tainted(), init_utsname()->release);
	printk(KERN_INFO "RIP: %04lx:%pS\n", PT_REGS_CS(regs) & 0xffff,
	       (void *)PT_REGS_IP(regs));
	printk(KERN_INFO "RSP: %016lx  EFLAGS: %08lx\n", PT_REGS_SP(regs),
	       PT_REGS_EFLAGS(regs));
	printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
	       PT_REGS_AX(regs), PT_REGS_BX(regs), PT_REGS_CX(regs));
	printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
	       PT_REGS_DX(regs), PT_REGS_SI(regs), PT_REGS_DI(regs));
	printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
	       PT_REGS_BP(regs), PT_REGS_R8(regs), PT_REGS_R9(regs));
	printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
	       PT_REGS_R10(regs), PT_REGS_R11(regs), PT_REGS_R12(regs));
	printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
	       PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs));
}
 | 
	linux-master | 
	arch/x86/um/sysrq_64.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include <linux/syscalls.h>
#include <os.h>
SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
{
	return -EINVAL;
}
 | 
	linux-master | 
	arch/x86/um/syscalls_32.c | 
| 
	/*
 * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <[email protected]>
 * Licensed under the GPL
 */
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <asm/ptrace-abi.h>
#include <os.h>
#include <skas.h>
#include <sysdep/tls.h>
/*
 * If needed we can detect when it's uninitialized.
 *
 * These are initialized in an initcall and unchanged thereafter.
 */
static int host_supports_tls = -1;
int host_gdt_entry_tls_min;
int do_set_thread_area(struct user_desc *info)
{
	int ret;
	u32 cpu;
	cpu = get_cpu();
	ret = os_set_thread_area(info, userspace_pid[cpu]);
	put_cpu();
	if (ret)
		printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
		       "index = %d\n", ret, info->entry_number);
	return ret;
}
int do_get_thread_area(struct user_desc *info)
{
	int ret;
	u32 cpu;
	cpu = get_cpu();
	ret = os_get_thread_area(info, userspace_pid[cpu]);
	put_cpu();
	if (ret)
		printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
		       "index = %d\n", ret, info->entry_number);
	return ret;
}
/*
 * sys_get_thread_area: get a yet unused TLS descriptor index.
 * XXX: Consider leaving one free slot for glibc usage at first place. This must
 * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
 *
 * Also, this must be tested when compiling in SKAS mode with dynamic linking
 * and running against NPTL.
 */
static int get_free_idx(struct task_struct* task)
{
	struct thread_struct *t = &task->thread;
	int idx;
	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
		if (!t->arch.tls_array[idx].present)
			return idx + GDT_ENTRY_TLS_MIN;
	return -ESRCH;
}
static inline void clear_user_desc(struct user_desc* info)
{
	/* Postcondition: LDT_empty(info) returns true. */
	memset(info, 0, sizeof(*info));
	/*
	 * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
	 * indeed an empty user_desc.
	 */
	info->read_exec_only = 1;
	info->seg_not_present = 1;
}
#define O_FORCE 1
static int load_TLS(int flags, struct task_struct *to)
{
	int ret = 0;
	int idx;
	for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
		struct uml_tls_struct* curr =
			&to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
		/*
		 * Actually, now if it wasn't flushed it gets cleared and
		 * flushed to the host, which will clear it.
		 */
		if (!curr->present) {
			if (!curr->flushed) {
				clear_user_desc(&curr->tls);
				curr->tls.entry_number = idx;
			} else {
				WARN_ON(!LDT_empty(&curr->tls));
				continue;
			}
		}
		if (!(flags & O_FORCE) && curr->flushed)
			continue;
		ret = do_set_thread_area(&curr->tls);
		if (ret)
			goto out;
		curr->flushed = 1;
	}
out:
	return ret;
}
/*
 * Verify if we need to do a flush for the new process, i.e. if there are any
 * present desc's, only if they haven't been flushed.
 */
static inline int needs_TLS_update(struct task_struct *task)
{
	int i;
	int ret = 0;
	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
		struct uml_tls_struct* curr =
			&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
		/*
		 * Can't test curr->present, we may need to clear a descriptor
		 * which had a value.
		 */
		if (curr->flushed)
			continue;
		ret = 1;
		break;
	}
	return ret;
}
/*
 * On a newly forked process, the TLS descriptors haven't yet been flushed. So
 * we mark them as such and the first switch_to will do the job.
 */
void clear_flushed_tls(struct task_struct *task)
{
	int i;
	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
		struct uml_tls_struct* curr =
			&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
		/*
		 * Still correct to do this, if it wasn't present on the host it
		 * will remain as flushed as it was.
		 */
		if (!curr->present)
			continue;
		curr->flushed = 0;
	}
}
/*
 * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
 * common host process. So this is needed in SKAS0 too.
 *
 * However, if each thread had a different host process (and this was discussed
 * for SMP support) this won't be needed.
 *
 * And this will not need be used when (and if) we'll add support to the host
 * SKAS patch.
 */
int arch_switch_tls(struct task_struct *to)
{
	if (!host_supports_tls)
		return 0;
	/*
	 * We have no need whatsoever to switch TLS for kernel threads; beyond
	 * that, that would also result in us calling os_set_thread_area with
	 * userspace_pid[cpu] == 0, which gives an error.
	 */
	if (likely(to->mm))
		return load_TLS(O_FORCE, to);
	return 0;
}
static int set_tls_entry(struct task_struct* task, struct user_desc *info,
			 int idx, int flushed)
{
	struct thread_struct *t = &task->thread;
	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
		return -EINVAL;
	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
	return 0;
}
int arch_set_tls(struct task_struct *new, unsigned long tls)
{
	struct user_desc info;
	int idx, ret = -EFAULT;
	if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
		goto out;
	ret = -EINVAL;
	if (LDT_empty(&info))
		goto out;
	idx = info.entry_number;
	ret = set_tls_entry(new, &info, idx, 0);
out:
	return ret;
}
/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
static int get_tls_entry(struct task_struct *task, struct user_desc *info,
			 int idx)
{
	struct thread_struct *t = &task->thread;
	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
		return -EINVAL;
	if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
		goto clear;
	*info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
out:
	/*
	 * Temporary debugging check, to make sure that things have been
	 * flushed. This could be triggered if load_TLS() failed.
	 */
	if (unlikely(task == current &&
		     !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
		printk(KERN_ERR "get_tls_entry: task with pid %d got here "
				"without flushed TLS.", current->pid);
	}
	return 0;
clear:
	/*
	 * When the TLS entry has not been set, the values read to user in the
	 * tls_array are 0 (because it's cleared at boot, see
	 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
	 */
	clear_user_desc(info);
	info->entry_number = idx;
	goto out;
}
SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
{
	struct user_desc info;
	int idx, ret;
	if (!host_supports_tls)
		return -ENOSYS;
	if (copy_from_user(&info, user_desc, sizeof(info)))
		return -EFAULT;
	idx = info.entry_number;
	if (idx == -1) {
		idx = get_free_idx(current);
		if (idx < 0)
			return idx;
		info.entry_number = idx;
		/* Tell the user which slot we chose for him.*/
		if (put_user(idx, &user_desc->entry_number))
			return -EFAULT;
	}
	ret = do_set_thread_area(&info);
	if (ret)
		return ret;
	return set_tls_entry(current, &info, idx, 1);
}
/*
 * Perform set_thread_area on behalf of the traced child.
 * Note: error handling is not done on the deferred load, and this differ from
 * i386. However the only possible error are caused by bugs.
 */
int ptrace_set_thread_area(struct task_struct *child, int idx,
			   struct user_desc __user *user_desc)
{
	struct user_desc info;
	if (!host_supports_tls)
		return -EIO;
	if (copy_from_user(&info, user_desc, sizeof(info)))
		return -EFAULT;
	return set_tls_entry(child, &info, idx, 0);
}
SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
{
	struct user_desc info;
	int idx, ret;
	if (!host_supports_tls)
		return -ENOSYS;
	if (get_user(idx, &user_desc->entry_number))
		return -EFAULT;
	ret = get_tls_entry(current, &info, idx);
	if (ret < 0)
		goto out;
	if (copy_to_user(user_desc, &info, sizeof(info)))
		ret = -EFAULT;
out:
	return ret;
}
/*
 * Perform get_thread_area on behalf of the traced child.
 */
int ptrace_get_thread_area(struct task_struct *child, int idx,
		struct user_desc __user *user_desc)
{
	struct user_desc info;
	int ret;
	if (!host_supports_tls)
		return -EIO;
	ret = get_tls_entry(child, &info, idx);
	if (ret < 0)
		goto out;
	if (copy_to_user(user_desc, &info, sizeof(info)))
		ret = -EFAULT;
out:
	return ret;
}
/*
 * This code is really i386-only, but it detects and logs x86_64 GDT indexes
 * if a 32-bit UML is running on a 64-bit host.
 */
static int __init __setup_host_supports_tls(void)
{
	check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
	if (host_supports_tls) {
		printk(KERN_INFO "Host TLS support detected\n");
		printk(KERN_INFO "Detected host type: ");
		switch (host_gdt_entry_tls_min) {
		case GDT_ENTRY_TLS_MIN_I386:
			printk(KERN_CONT "i386");
			break;
		case GDT_ENTRY_TLS_MIN_X86_64:
			printk(KERN_CONT "x86_64");
			break;
		}
		printk(KERN_CONT " (GDT indexes %d to %d)\n",
		       host_gdt_entry_tls_min,
		       host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
	} else
		printk(KERN_ERR "  Host TLS support NOT detected! "
				"TLS support inside UML will not work\n");
	return 0;
}
__initcall(__setup_host_supports_tls);
 | 
	linux-master | 
	arch/x86/um/tls_32.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stddef.h>
#include <signal.h>
#include <poll.h>
#include <sys/mman.h>
#include <sys/user.h>
#define __FRAME_OFFSETS
#include <linux/ptrace.h>
#include <asm/types.h>
#include <linux/kbuild.h>
#define DEFINE_LONGS(sym, val)	\
	COMMENT(#val " / sizeof(unsigned long)");	\
	DEFINE(sym, val / sizeof(unsigned long))
void foo(void)
{
#ifdef __i386__
	DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_fpregs_struct));
	DEFINE_LONGS(HOST_FPX_SIZE, sizeof(struct user_fpxregs_struct));
	DEFINE(HOST_IP, EIP);
	DEFINE(HOST_SP, UESP);
	DEFINE(HOST_EFLAGS, EFL);
	DEFINE(HOST_AX, EAX);
	DEFINE(HOST_BX, EBX);
	DEFINE(HOST_CX, ECX);
	DEFINE(HOST_DX, EDX);
	DEFINE(HOST_SI, ESI);
	DEFINE(HOST_DI, EDI);
	DEFINE(HOST_BP, EBP);
	DEFINE(HOST_CS, CS);
	DEFINE(HOST_SS, SS);
	DEFINE(HOST_DS, DS);
	DEFINE(HOST_FS, FS);
	DEFINE(HOST_ES, ES);
	DEFINE(HOST_GS, GS);
	DEFINE(HOST_ORIG_AX, ORIG_EAX);
#else
#ifdef FP_XSTATE_MAGIC1
	DEFINE_LONGS(HOST_FP_SIZE, 2696);
#else
	DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
#endif
	DEFINE_LONGS(HOST_BX, RBX);
	DEFINE_LONGS(HOST_CX, RCX);
	DEFINE_LONGS(HOST_DI, RDI);
	DEFINE_LONGS(HOST_SI, RSI);
	DEFINE_LONGS(HOST_DX, RDX);
	DEFINE_LONGS(HOST_BP, RBP);
	DEFINE_LONGS(HOST_AX, RAX);
	DEFINE_LONGS(HOST_R8, R8);
	DEFINE_LONGS(HOST_R9, R9);
	DEFINE_LONGS(HOST_R10, R10);
	DEFINE_LONGS(HOST_R11, R11);
	DEFINE_LONGS(HOST_R12, R12);
	DEFINE_LONGS(HOST_R13, R13);
	DEFINE_LONGS(HOST_R14, R14);
	DEFINE_LONGS(HOST_R15, R15);
	DEFINE_LONGS(HOST_ORIG_AX, ORIG_RAX);
	DEFINE_LONGS(HOST_CS, CS);
	DEFINE_LONGS(HOST_SS, SS);
	DEFINE_LONGS(HOST_EFLAGS, EFLAGS);
#if 0
	DEFINE_LONGS(HOST_FS, FS);
	DEFINE_LONGS(HOST_GS, GS);
	DEFINE_LONGS(HOST_DS, DS);
	DEFINE_LONGS(HOST_ES, ES);
#endif
	DEFINE_LONGS(HOST_IP, RIP);
	DEFINE_LONGS(HOST_SP, RSP);
#endif
	DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct));
	DEFINE(UM_POLLIN, POLLIN);
	DEFINE(UM_POLLPRI, POLLPRI);
	DEFINE(UM_POLLOUT, POLLOUT);
	DEFINE(UM_PROT_READ, PROT_READ);
	DEFINE(UM_PROT_WRITE, PROT_WRITE);
	DEFINE(UM_PROT_EXEC, PROT_EXEC);
}
 | 
	linux-master | 
	arch/x86/um/user-offsets.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include <linux/elf.h>
#include <linux/coredump.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <asm/elf.h>
Elf32_Half elf_core_extra_phdrs(struct coredump_params *cprm)
{
	return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
}
int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
{
	if ( vsyscall_ehdr ) {
		const struct elfhdr *const ehdrp =
			(struct elfhdr *) vsyscall_ehdr;
		const struct elf_phdr *const phdrp =
			(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
		int i;
		Elf32_Off ofs = 0;
		for (i = 0; i < ehdrp->e_phnum; ++i) {
			struct elf_phdr phdr = phdrp[i];
			if (phdr.p_type == PT_LOAD) {
				ofs = phdr.p_offset = offset;
				offset += phdr.p_filesz;
			} else {
				phdr.p_offset += ofs;
			}
			phdr.p_paddr = 0; /* match other core phdrs */
			if (!dump_emit(cprm, &phdr, sizeof(phdr)))
				return 0;
		}
	}
	return 1;
}
int elf_core_write_extra_data(struct coredump_params *cprm)
{
	if ( vsyscall_ehdr ) {
		const struct elfhdr *const ehdrp =
			(struct elfhdr *) vsyscall_ehdr;
		const struct elf_phdr *const phdrp =
			(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
		int i;
		for (i = 0; i < ehdrp->e_phnum; ++i) {
			if (phdrp[i].p_type == PT_LOAD) {
				void *addr = (void *) phdrp[i].p_vaddr;
				size_t filesz = phdrp[i].p_filesz;
				if (!dump_emit(cprm, addr, filesz))
					return 0;
			}
		}
	}
	return 1;
}
size_t elf_core_extra_data_size(struct coredump_params *cprm)
{
	if ( vsyscall_ehdr ) {
		const struct elfhdr *const ehdrp =
			(struct elfhdr *)vsyscall_ehdr;
		const struct elf_phdr *const phdrp =
			(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
		int i;
		for (i = 0; i < ehdrp->e_phnum; ++i)
			if (phdrp[i].p_type == PT_LOAD)
				return (size_t) phdrp[i].p_filesz;
	}
	return 0;
}
 | 
	linux-master | 
	arch/x86/um/elfcore.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * System call table for UML/x86-64, copied from arch/x86/kernel/syscall_*.c
 * with some changes for UML.
 */
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <asm/syscall.h>
/*
 * Below you can see, in terms of #define's, the differences between the x86-64
 * and the UML syscall table.
 */
/* Not going to be implemented by UML, since we have no hardware. */
#define sys_iopl sys_ni_syscall
#define sys_ioperm sys_ni_syscall
#define __SYSCALL(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#include <asm/syscalls_64.h>
#undef __SYSCALL
#define __SYSCALL(nr, sym) sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
#include <asm/syscalls_64.h>
};
int syscall_table_size = sizeof(sys_call_table);
 | 
	linux-master | 
	arch/x86/um/sys_call_table_64.c | 
| 
	/*
 * Copyright 2003 PathScale, Inc.
 *
 * Licensed under the GPL
 */
#include <sysdep/ptrace.h>
void arch_check_bugs(void)
{
}
void arch_examine_signal(int sig, struct uml_pt_regs *regs)
{
}
 | 
	linux-master | 
	arch/x86/um/bugs_64.c | 
| 
	/*
 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 * Licensed under the GPL
 */
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/ptrace-abi.h>
#include <registers.h>
#include <skas.h>
extern int arch_switch_tls(struct task_struct *to);
void arch_switch_to(struct task_struct *to)
{
	int err = arch_switch_tls(to);
	if (!err)
		return;
	if (err != -EINVAL)
		printk(KERN_WARNING "arch_switch_tls failed, errno %d, "
		       "not EINVAL\n", -err);
	else
		printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n");
}
int is_syscall(unsigned long addr)
{
	unsigned short instr;
	int n;
	n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
	if (n) {
		/* access_process_vm() grants access to vsyscall and stub,
		 * while copy_from_user doesn't. Maybe access_process_vm is
		 * slow, but that doesn't matter, since it will be called only
		 * in case of singlestepping, if copy_from_user failed.
		 */
		n = access_process_vm(current, addr, &instr, sizeof(instr),
				FOLL_FORCE);
		if (n != sizeof(instr)) {
			printk(KERN_ERR "is_syscall : failed to read "
			       "instruction from 0x%lx\n", addr);
			return 1;
		}
	}
	/* int 0x80 or sysenter */
	return (instr == 0x80cd) || (instr == 0x340f);
}
/* determines which flags the user has access to. */
/* 1 = access 0 = no access */
#define FLAG_MASK 0x00044dd5
static const int reg_offsets[] = {
	[EBX] = HOST_BX,
	[ECX] = HOST_CX,
	[EDX] = HOST_DX,
	[ESI] = HOST_SI,
	[EDI] = HOST_DI,
	[EBP] = HOST_BP,
	[EAX] = HOST_AX,
	[DS] = HOST_DS,
	[ES] = HOST_ES,
	[FS] = HOST_FS,
	[GS] = HOST_GS,
	[EIP] = HOST_IP,
	[CS] = HOST_CS,
	[EFL] = HOST_EFLAGS,
	[UESP] = HOST_SP,
	[SS] = HOST_SS,
	[ORIG_EAX] = HOST_ORIG_AX,
};
int putreg(struct task_struct *child, int regno, unsigned long value)
{
	regno >>= 2;
	switch (regno) {
	case EBX:
	case ECX:
	case EDX:
	case ESI:
	case EDI:
	case EBP:
	case EAX:
	case EIP:
	case UESP:
		break;
	case ORIG_EAX:
		/* Update the syscall number. */
		UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
		break;
	case FS:
		if (value && (value & 3) != 3)
			return -EIO;
		break;
	case GS:
		if (value && (value & 3) != 3)
			return -EIO;
		break;
	case DS:
	case ES:
		if (value && (value & 3) != 3)
			return -EIO;
		value &= 0xffff;
		break;
	case SS:
	case CS:
		if ((value & 3) != 3)
			return -EIO;
		value &= 0xffff;
		break;
	case EFL:
		value &= FLAG_MASK;
		child->thread.regs.regs.gp[HOST_EFLAGS] |= value;
		return 0;
	default :
		panic("Bad register in putreg() : %d\n", regno);
	}
	child->thread.regs.regs.gp[reg_offsets[regno]] = value;
	return 0;
}
int poke_user(struct task_struct *child, long addr, long data)
{
	if ((addr & 3) || addr < 0)
		return -EIO;
	if (addr < MAX_REG_OFFSET)
		return putreg(child, addr, data);
	else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
		 (addr <= offsetof(struct user, u_debugreg[7]))) {
		addr -= offsetof(struct user, u_debugreg[0]);
		addr = addr >> 2;
		if ((addr == 4) || (addr == 5))
			return -EIO;
		child->thread.arch.debugregs[addr] = data;
		return 0;
	}
	return -EIO;
}
unsigned long getreg(struct task_struct *child, int regno)
{
	unsigned long mask = ~0UL;
	regno >>= 2;
	switch (regno) {
	case FS:
	case GS:
	case DS:
	case ES:
	case SS:
	case CS:
		mask = 0xffff;
		break;
	case EIP:
	case UESP:
	case EAX:
	case EBX:
	case ECX:
	case EDX:
	case ESI:
	case EDI:
	case EBP:
	case EFL:
	case ORIG_EAX:
		break;
	default:
		panic("Bad register in getreg() : %d\n", regno);
	}
	return mask & child->thread.regs.regs.gp[reg_offsets[regno]];
}
/* read the word at location addr in the USER area. */
int peek_user(struct task_struct *child, long addr, long data)
{
	unsigned long tmp;
	if ((addr & 3) || addr < 0)
		return -EIO;
	tmp = 0;  /* Default return condition */
	if (addr < MAX_REG_OFFSET) {
		tmp = getreg(child, addr);
	}
	else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
		 (addr <= offsetof(struct user, u_debugreg[7]))) {
		addr -= offsetof(struct user, u_debugreg[0]);
		addr = addr >> 2;
		tmp = child->thread.arch.debugregs[addr];
	}
	return put_user(tmp, (unsigned long __user *) data);
}
static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
	int err, n, cpu = task_cpu(child);
	struct user_i387_struct fpregs;
	err = save_i387_registers(userspace_pid[cpu],
				  (unsigned long *) &fpregs);
	if (err)
		return err;
	n = copy_to_user(buf, &fpregs, sizeof(fpregs));
	if(n > 0)
		return -EFAULT;
	return n;
}
static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
	int n, cpu = task_cpu(child);
	struct user_i387_struct fpregs;
	n = copy_from_user(&fpregs, buf, sizeof(fpregs));
	if (n > 0)
		return -EFAULT;
	return restore_i387_registers(userspace_pid[cpu],
				    (unsigned long *) &fpregs);
}
static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
{
	int err, n, cpu = task_cpu(child);
	struct user_fxsr_struct fpregs;
	err = save_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
	if (err)
		return err;
	n = copy_to_user(buf, &fpregs, sizeof(fpregs));
	if(n > 0)
		return -EFAULT;
	return n;
}
static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
{
	int n, cpu = task_cpu(child);
	struct user_fxsr_struct fpregs;
	n = copy_from_user(&fpregs, buf, sizeof(fpregs));
	if (n > 0)
		return -EFAULT;
	return restore_fpx_registers(userspace_pid[cpu],
				     (unsigned long *) &fpregs);
}
long subarch_ptrace(struct task_struct *child, long request,
		    unsigned long addr, unsigned long data)
{
	int ret = -EIO;
	void __user *datap = (void __user *) data;
	switch (request) {
	case PTRACE_GETFPREGS: /* Get the child FPU state. */
		ret = get_fpregs(datap, child);
		break;
	case PTRACE_SETFPREGS: /* Set the child FPU state. */
		ret = set_fpregs(datap, child);
		break;
	case PTRACE_GETFPXREGS: /* Get the child FPU state. */
		ret = get_fpxregs(datap, child);
		break;
	case PTRACE_SETFPXREGS: /* Set the child FPU state. */
		ret = set_fpxregs(datap, child);
		break;
	default:
		ret = -EIO;
	}
	return ret;
}
 | 
	linux-master | 
	arch/x86/um/ptrace_32.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2011 Richard Weinberger <[email protected]>
 */
#include <linux/mm.h>
#include <asm/elf.h>
static struct vm_area_struct gate_vma;
static int __init gate_vma_init(void)
{
	if (!FIXADDR_USER_START)
		return 0;
	vma_init(&gate_vma, NULL);
	gate_vma.vm_start = FIXADDR_USER_START;
	gate_vma.vm_end = FIXADDR_USER_END;
	vm_flags_init(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC);
	gate_vma.vm_page_prot = PAGE_READONLY;
	return 0;
}
__initcall(gate_vma_init);
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
	return FIXADDR_USER_START ? &gate_vma : NULL;
}
int in_gate_area_no_mm(unsigned long addr)
{
	if (!FIXADDR_USER_START)
		return 0;
	if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
		return 1;
	return 0;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
	struct vm_area_struct *vma = get_gate_vma(mm);
	if (!vma)
		return 0;
	return (addr >= vma->vm_start) && (addr < vma->vm_end);
}
 | 
	linux-master | 
	arch/x86/um/mem_32.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2011 Richard Weinberger <[email protected]>
 * Mostly copied from arch/x86/lib/delay.c
 */
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/param.h>
void __delay(unsigned long loops)
{
	asm volatile(
		"test %0,%0\n"
		"jz 3f\n"
		"jmp 1f\n"
		".align 16\n"
		"1: jmp 2f\n"
		".align 16\n"
		"2: dec %0\n"
		" jnz 2b\n"
		"3: dec %0\n"
		: /* we don't need output */
		: "a" (loops)
	);
}
EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
	int d0;
	xloops *= 4;
	asm("mull %%edx"
		: "=d" (xloops), "=&a" (d0)
		: "1" (xloops), "0"
		(loops_per_jiffy * (HZ/4)));
	__delay(++xloops);
}
EXPORT_SYMBOL(__const_udelay);
void __udelay(unsigned long usecs)
{
	__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
}
EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long nsecs)
{
	__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
}
EXPORT_SYMBOL(__ndelay);
 | 
	linux-master | 
	arch/x86/um/delay.c | 
| 
	/*
 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 * Licensed under the GPL
 */
#include <errno.h>
#include <ptrace_user.h>
int ptrace_getregs(long pid, unsigned long *regs_out)
{
	if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0)
		return -errno;
	return 0;
}
int ptrace_setregs(long pid, unsigned long *regs)
{
	if (ptrace(PTRACE_SETREGS, pid, 0, regs) < 0)
		return -errno;
	return 0;
}
 | 
	linux-master | 
	arch/x86/um/ptrace_user.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * System call table for UML/i386, copied from arch/x86/kernel/syscall_*.c
 * with some changes for UML.
 */
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <asm/syscall.h>
/*
 * Below you can see, in terms of #define's, the differences between the x86-64
 * and the UML syscall table.
 */
/* Not going to be implemented by UML, since we have no hardware. */
#define sys_iopl sys_ni_syscall
#define sys_ioperm sys_ni_syscall
#define sys_vm86old sys_ni_syscall
#define sys_vm86 sys_ni_syscall
#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, native)
#define __SYSCALL(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#include <asm/syscalls_32.h>
#undef __SYSCALL
#define __SYSCALL(nr, sym) sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
#include <asm/syscalls_32.h>
};
int syscall_table_size = sizeof(sys_call_table);
 | 
	linux-master | 
	arch/x86/um/sys_call_table_32.c | 
| 
	/*
 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 * Licensed under the GPL
 */
#include <signal.h>
#include <kern_util.h>
#include <longjmp.h>
#include <sysdep/ptrace.h>
#include <generated/asm-offsets.h>
/* Set during early boot */
static int host_has_cmov = 1;
static jmp_buf cmov_test_return;
static void cmov_sigill_test_handler(int sig)
{
	host_has_cmov = 0;
	longjmp(cmov_test_return, 1);
}
void arch_check_bugs(void)
{
	struct sigaction old, new;
	printk(UM_KERN_INFO "Checking for host processor cmov support...");
	new.sa_handler = cmov_sigill_test_handler;
	/* Make sure that SIGILL is enabled after the handler longjmps back */
	new.sa_flags = SA_NODEFER;
	sigemptyset(&new.sa_mask);
	sigaction(SIGILL, &new, &old);
	if (setjmp(cmov_test_return) == 0) {
		unsigned long foo = 0;
		__asm__ __volatile__("cmovz %0, %1" : "=r" (foo) : "0" (foo));
		printk(UM_KERN_CONT "Yes\n");
	} else
		printk(UM_KERN_CONT "No\n");
	sigaction(SIGILL, &old, &new);
}
void arch_examine_signal(int sig, struct uml_pt_regs *regs)
{
	unsigned char tmp[2];
	/*
	 * This is testing for a cmov (0x0f 0x4x) instruction causing a
	 * SIGILL in init.
	 */
	if ((sig != SIGILL) || (get_current_pid() != 1))
		return;
	if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) {
		printk(UM_KERN_ERR "SIGILL in init, could not read "
		       "instructions!\n");
		return;
	}
	if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40))
		return;
	if (host_has_cmov == 0)
		printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
		       "processor doesn't implement.  Boot a filesystem "
		       "compiled for older processors");
	else if (host_has_cmov == 1)
		printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
		       "processor claims to implement");
	else
		printk(UM_KERN_ERR "Bad value for host_has_cmov (%d)",
			host_has_cmov);
}
 | 
	linux-master | 
	arch/x86/um/bugs_32.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include <linux/sched.h>
#include <asm/ptrace-abi.h>
void clear_flushed_tls(struct task_struct *task)
{
}
int arch_set_tls(struct task_struct *t, unsigned long tls)
{
	/*
	 * If CLONE_SETTLS is set, we need to save the thread id
	 * so it can be set during context switches.
	 */
	t->thread.arch.fs = tls;
	return 0;
}
 | 
	linux-master | 
	arch/x86/um/tls_64.c | 
| 
	/*
 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 * Licensed under the GPL
 */
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <os.h>
#include <skas.h>
#include <sysdep/tls.h>
static inline int modify_ldt (int func, void *ptr, unsigned long bytecount)
{
	return syscall(__NR_modify_ldt, func, ptr, bytecount);
}
static long write_ldt_entry(struct mm_id *mm_idp, int func,
		     struct user_desc *desc, void **addr, int done)
{
	long res;
	void *stub_addr;
	BUILD_BUG_ON(sizeof(*desc) % sizeof(long));
	res = syscall_stub_data(mm_idp, (unsigned long *)desc,
				sizeof(*desc) / sizeof(long),
				addr, &stub_addr);
	if (!res) {
		unsigned long args[] = { func,
					 (unsigned long)stub_addr,
					 sizeof(*desc),
					 0, 0, 0 };
		res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
				       0, addr, done);
	}
	return res;
}
/*
 * In skas mode, we hold our own ldt data in UML.
 * Thus, the code implementing sys_modify_ldt_skas
 * is very similar to (and mostly stolen from) sys_modify_ldt
 * for arch/i386/kernel/ldt.c
 * The routines copied and modified in part are:
 * - read_ldt
 * - read_default_ldt
 * - write_ldt
 * - sys_modify_ldt_skas
 */
static int read_ldt(void __user * ptr, unsigned long bytecount)
{
	int i, err = 0;
	unsigned long size;
	uml_ldt_t *ldt = ¤t->mm->context.arch.ldt;
	if (!ldt->entry_count)
		goto out;
	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
	err = bytecount;
	mutex_lock(&ldt->lock);
	if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
		size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
		if (size > bytecount)
			size = bytecount;
		if (copy_to_user(ptr, ldt->u.entries, size))
			err = -EFAULT;
		bytecount -= size;
		ptr += size;
	}
	else {
		for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
		     i++) {
			size = PAGE_SIZE;
			if (size > bytecount)
				size = bytecount;
			if (copy_to_user(ptr, ldt->u.pages[i], size)) {
				err = -EFAULT;
				break;
			}
			bytecount -= size;
			ptr += size;
		}
	}
	mutex_unlock(&ldt->lock);
	if (bytecount == 0 || err == -EFAULT)
		goto out;
	if (clear_user(ptr, bytecount))
		err = -EFAULT;
out:
	return err;
}
static int read_default_ldt(void __user * ptr, unsigned long bytecount)
{
	int err;
	if (bytecount > 5*LDT_ENTRY_SIZE)
		bytecount = 5*LDT_ENTRY_SIZE;
	err = bytecount;
	/*
	 * UML doesn't support lcall7 and lcall27.
	 * So, we don't really have a default ldt, but emulate
	 * an empty ldt of common host default ldt size.
	 */
	if (clear_user(ptr, bytecount))
		err = -EFAULT;
	return err;
}
static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
{
	uml_ldt_t *ldt = ¤t->mm->context.arch.ldt;
	struct mm_id * mm_idp = ¤t->mm->context.id;
	int i, err;
	struct user_desc ldt_info;
	struct ldt_entry entry0, *ldt_p;
	void *addr = NULL;
	err = -EINVAL;
	if (bytecount != sizeof(ldt_info))
		goto out;
	err = -EFAULT;
	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
		goto out;
	err = -EINVAL;
	if (ldt_info.entry_number >= LDT_ENTRIES)
		goto out;
	if (ldt_info.contents == 3) {
		if (func == 1)
			goto out;
		if (ldt_info.seg_not_present == 0)
			goto out;
	}
	mutex_lock(&ldt->lock);
	err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
	if (err)
		goto out_unlock;
	if (ldt_info.entry_number >= ldt->entry_count &&
	    ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
		for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
		     i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
		     i++) {
			if (i == 0)
				memcpy(&entry0, ldt->u.entries,
				       sizeof(entry0));
			ldt->u.pages[i] = (struct ldt_entry *)
				__get_free_page(GFP_KERNEL|__GFP_ZERO);
			if (!ldt->u.pages[i]) {
				err = -ENOMEM;
				/* Undo the change in host */
				memset(&ldt_info, 0, sizeof(ldt_info));
				write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
				goto out_unlock;
			}
			if (i == 0) {
				memcpy(ldt->u.pages[0], &entry0,
				       sizeof(entry0));
				memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
				       sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
			}
			ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
		}
	}
	if (ldt->entry_count <= ldt_info.entry_number)
		ldt->entry_count = ldt_info.entry_number + 1;
	if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
		ldt_p = ldt->u.entries + ldt_info.entry_number;
	else
		ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
			ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
	if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
	   (func == 1 || LDT_empty(&ldt_info))) {
		ldt_p->a = 0;
		ldt_p->b = 0;
	}
	else{
		if (func == 1)
			ldt_info.useable = 0;
		ldt_p->a = LDT_entry_a(&ldt_info);
		ldt_p->b = LDT_entry_b(&ldt_info);
	}
	err = 0;
out_unlock:
	mutex_unlock(&ldt->lock);
out:
	return err;
}
static long do_modify_ldt_skas(int func, void __user *ptr,
			       unsigned long bytecount)
{
	int ret = -ENOSYS;
	switch (func) {
		case 0:
			ret = read_ldt(ptr, bytecount);
			break;
		case 1:
		case 0x11:
			ret = write_ldt(ptr, bytecount, func);
			break;
		case 2:
			ret = read_default_ldt(ptr, bytecount);
			break;
	}
	return ret;
}
static DEFINE_SPINLOCK(host_ldt_lock);
static short dummy_list[9] = {0, -1};
static short * host_ldt_entries = NULL;
static void ldt_get_host_info(void)
{
	long ret;
	struct ldt_entry * ldt;
	short *tmp;
	int i, size, k, order;
	spin_lock(&host_ldt_lock);
	if (host_ldt_entries != NULL) {
		spin_unlock(&host_ldt_lock);
		return;
	}
	host_ldt_entries = dummy_list+1;
	spin_unlock(&host_ldt_lock);
	for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
		;
	ldt = (struct ldt_entry *)
	      __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
	if (ldt == NULL) {
		printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
		       "for host ldt\n");
		return;
	}
	ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
	if (ret < 0) {
		printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
		goto out_free;
	}
	if (ret == 0) {
		/* default_ldt is active, simply write an empty entry 0 */
		host_ldt_entries = dummy_list;
		goto out_free;
	}
	for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
		if (ldt[i].a != 0 || ldt[i].b != 0)
			size++;
	}
	if (size < ARRAY_SIZE(dummy_list))
		host_ldt_entries = dummy_list;
	else {
		size = (size + 1) * sizeof(dummy_list[0]);
		tmp = kmalloc(size, GFP_KERNEL);
		if (tmp == NULL) {
			printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
			       "host ldt list\n");
			goto out_free;
		}
		host_ldt_entries = tmp;
	}
	for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
		if (ldt[i].a != 0 || ldt[i].b != 0)
			host_ldt_entries[k++] = i;
	}
	host_ldt_entries[k] = -1;
out_free:
	free_pages((unsigned long)ldt, order);
}
long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
{
	struct user_desc desc;
	short * num_p;
	int i;
	long page, err=0;
	void *addr = NULL;
	mutex_init(&new_mm->arch.ldt.lock);
	if (!from_mm) {
		memset(&desc, 0, sizeof(desc));
		/*
		 * Now we try to retrieve info about the ldt, we
		 * inherited from the host. All ldt-entries found
		 * will be reset in the following loop
		 */
		ldt_get_host_info();
		for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
			desc.entry_number = *num_p;
			err = write_ldt_entry(&new_mm->id, 1, &desc,
					      &addr, *(num_p + 1) == -1);
			if (err)
				break;
		}
		new_mm->arch.ldt.entry_count = 0;
		goto out;
	}
	/*
	 * Our local LDT is used to supply the data for
	 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
	 * i.e., we have to use the stub for modify_ldt, which
	 * can't handle the big read buffer of up to 64kB.
	 */
	mutex_lock(&from_mm->arch.ldt.lock);
	if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
		memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
		       sizeof(new_mm->arch.ldt.u.entries));
	else {
		i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
		while (i-->0) {
			page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
			if (!page) {
				err = -ENOMEM;
				break;
			}
			new_mm->arch.ldt.u.pages[i] =
				(struct ldt_entry *) page;
			memcpy(new_mm->arch.ldt.u.pages[i],
			       from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
		}
	}
	new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
	mutex_unlock(&from_mm->arch.ldt.lock);
    out:
	return err;
}
void free_ldt(struct mm_context *mm)
{
	int i;
	if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
		i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
		while (i-- > 0)
			free_page((long) mm->arch.ldt.u.pages[i]);
	}
	mm->arch.ldt.entry_count = 0;
}
SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
		unsigned long , bytecount)
{
	/* See non-um modify_ldt() for why we do this cast */
	return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
}
 | 
	linux-master | 
	arch/x86/um/ldt.c | 
| 
	/*
 * Copyright (C) 2001 - 2003 Jeff Dike ([email protected])
 * Licensed under the GPL
 */
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/kallsyms.h>
#include <asm/ptrace.h>
#include <asm/sysrq.h>
/* This is declared by <linux/sched.h> */
void show_regs(struct pt_regs *regs)
{
        printk("\n");
        printk("EIP: %04lx:[<%08lx>] CPU: %d %s", 
	       0xffff & PT_REGS_CS(regs), PT_REGS_IP(regs),
	       smp_processor_id(), print_tainted());
        if (PT_REGS_CS(regs) & 3)
                printk(" ESP: %04lx:%08lx", 0xffff & PT_REGS_SS(regs),
		       PT_REGS_SP(regs));
        printk(" EFLAGS: %08lx\n    %s\n", PT_REGS_EFLAGS(regs),
	       print_tainted());
        printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
               PT_REGS_AX(regs), PT_REGS_BX(regs), 
	       PT_REGS_CX(regs), PT_REGS_DX(regs));
        printk("ESI: %08lx EDI: %08lx EBP: %08lx",
	       PT_REGS_SI(regs), PT_REGS_DI(regs), PT_REGS_BP(regs));
        printk(" DS: %04lx ES: %04lx\n",
	       0xffff & PT_REGS_DS(regs), 
	       0xffff & PT_REGS_ES(regs));
}
 | 
	linux-master | 
	arch/x86/um/sysrq_32.c | 
| 
	/*
 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 * Copyright 2003 PathScale, Inc.
 *
 * Licensed under the GPL
 */
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <asm/prctl.h> /* XXX This should get the constants from libc */
#include <registers.h>
#include <os.h>
long arch_prctl(struct task_struct *task, int option,
		unsigned long __user *arg2)
{
	unsigned long *ptr = arg2, tmp;
	long ret;
	int pid = task->mm->context.id.u.pid;
	/*
	 * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to
	 * be safe), we need to call arch_prctl on the host because
	 * setting %fs may result in something else happening (like a
	 * GDT or thread.fs being set instead).  So, we let the host
	 * fiddle the registers and thread struct and restore the
	 * registers afterwards.
	 *
	 * So, the saved registers are stored to the process (this
	 * needed because a stub may have been the last thing to run),
	 * arch_prctl is run on the host, then the registers are read
	 * back.
	 */
	switch (option) {
	case ARCH_SET_FS:
	case ARCH_SET_GS:
		ret = restore_pid_registers(pid, ¤t->thread.regs.regs);
		if (ret)
			return ret;
		break;
	case ARCH_GET_FS:
	case ARCH_GET_GS:
		/*
		 * With these two, we read to a local pointer and
		 * put_user it to the userspace pointer that we were
		 * given.  If addr isn't valid (because it hasn't been
		 * faulted in or is just bogus), we want put_user to
		 * fault it in (or return -EFAULT) instead of having
		 * the host return -EFAULT.
		 */
		ptr = &tmp;
	}
	ret = os_arch_prctl(pid, option, ptr);
	if (ret)
		return ret;
	switch (option) {
	case ARCH_SET_FS:
		current->thread.arch.fs = (unsigned long) ptr;
		ret = save_registers(pid, ¤t->thread.regs.regs);
		break;
	case ARCH_SET_GS:
		ret = save_registers(pid, ¤t->thread.regs.regs);
		break;
	case ARCH_GET_FS:
		ret = put_user(tmp, arg2);
		break;
	case ARCH_GET_GS:
		ret = put_user(tmp, arg2);
		break;
	}
	return ret;
}
SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
{
	return arch_prctl(current, option, (unsigned long __user *) arg2);
}
void arch_switch_to(struct task_struct *to)
{
	if ((to->thread.arch.fs == 0) || (to->mm == NULL))
		return;
	arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs);
}
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
		unsigned long, prot, unsigned long, flags,
		unsigned long, fd, unsigned long, off)
{
	if (off & ~PAGE_MASK)
		return -EINVAL;
	return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
}
 | 
	linux-master | 
	arch/x86/um/syscalls_64.c | 
| 
	/*
 * Copyright (C) 2003 PathScale, Inc.
 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 * Licensed under the GPL
 */
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/kernel.h>
#include <asm/unistd.h>
#include <linux/uaccess.h>
#include <asm/ucontext.h>
#include <frame_kern.h>
#include <registers.h>
#include <skas.h>
#ifdef CONFIG_X86_32
/*
 * FPU tag word conversions.
 */
static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
{
	unsigned int tmp; /* to avoid 16 bit prefixes in the code */
	/* Transform each pair of bits into 01 (valid) or 00 (empty) */
	tmp = ~twd;
	tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
	/* and move the valid bits to the lower byte. */
	tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
	tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
	tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
	return tmp;
}
static inline unsigned long twd_fxsr_to_i387(struct user_fxsr_struct *fxsave)
{
	struct _fpxreg *st = NULL;
	unsigned long twd = (unsigned long) fxsave->twd;
	unsigned long tag;
	unsigned long ret = 0xffff0000;
	int i;
#define FPREG_ADDR(f, n)	((char *)&(f)->st_space + (n) * 16)
	for (i = 0; i < 8; i++) {
		if (twd & 0x1) {
			st = (struct _fpxreg *) FPREG_ADDR(fxsave, i);
			switch (st->exponent & 0x7fff) {
			case 0x7fff:
				tag = 2;		/* Special */
				break;
			case 0x0000:
				if ( !st->significand[0] &&
				     !st->significand[1] &&
				     !st->significand[2] &&
				     !st->significand[3] ) {
					tag = 1;	/* Zero */
				} else {
					tag = 2;	/* Special */
				}
				break;
			default:
				if (st->significand[3] & 0x8000) {
					tag = 0;	/* Valid */
				} else {
					tag = 2;	/* Special */
				}
				break;
			}
		} else {
			tag = 3;			/* Empty */
		}
		ret |= (tag << (2 * i));
		twd = twd >> 1;
	}
	return ret;
}
static int convert_fxsr_to_user(struct _fpstate __user *buf,
				struct user_fxsr_struct *fxsave)
{
	unsigned long env[7];
	struct _fpreg __user *to;
	struct _fpxreg *from;
	int i;
	env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
	env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
	env[2] = twd_fxsr_to_i387(fxsave);
	env[3] = fxsave->fip;
	env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
	env[5] = fxsave->foo;
	env[6] = fxsave->fos;
	if (__copy_to_user(buf, env, 7 * sizeof(unsigned long)))
		return 1;
	to = &buf->_st[0];
	from = (struct _fpxreg *) &fxsave->st_space[0];
	for (i = 0; i < 8; i++, to++, from++) {
		unsigned long __user *t = (unsigned long __user *)to;
		unsigned long *f = (unsigned long *)from;
		if (__put_user(*f, t) ||
				__put_user(*(f + 1), t + 1) ||
				__put_user(from->exponent, &to->exponent))
			return 1;
	}
	return 0;
}
static int convert_fxsr_from_user(struct user_fxsr_struct *fxsave,
				  struct _fpstate __user *buf)
{
	unsigned long env[7];
	struct _fpxreg *to;
	struct _fpreg __user *from;
	int i;
	if (copy_from_user( env, buf, 7 * sizeof(long)))
		return 1;
	fxsave->cwd = (unsigned short)(env[0] & 0xffff);
	fxsave->swd = (unsigned short)(env[1] & 0xffff);
	fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
	fxsave->fip = env[3];
	fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
	fxsave->fcs = (env[4] & 0xffff);
	fxsave->foo = env[5];
	fxsave->fos = env[6];
	to = (struct _fpxreg *) &fxsave->st_space[0];
	from = &buf->_st[0];
	for (i = 0; i < 8; i++, to++, from++) {
		unsigned long *t = (unsigned long *)to;
		unsigned long __user *f = (unsigned long __user *)from;
		if (__get_user(*t, f) ||
		    __get_user(*(t + 1), f + 1) ||
		    __get_user(to->exponent, &from->exponent))
			return 1;
	}
	return 0;
}
extern int have_fpx_regs;
#endif
static int copy_sc_from_user(struct pt_regs *regs,
			     struct sigcontext __user *from)
{
	struct sigcontext sc;
	int err, pid;
	/* Always make any pending restarted system calls return -EINTR */
	current->restart_block.fn = do_no_restart_syscall;
	err = copy_from_user(&sc, from, sizeof(sc));
	if (err)
		return err;
#define GETREG(regno, regname) regs->regs.gp[HOST_##regno] = sc.regname
#ifdef CONFIG_X86_32
	GETREG(GS, gs);
	GETREG(FS, fs);
	GETREG(ES, es);
	GETREG(DS, ds);
#endif
	GETREG(DI, di);
	GETREG(SI, si);
	GETREG(BP, bp);
	GETREG(SP, sp);
	GETREG(BX, bx);
	GETREG(DX, dx);
	GETREG(CX, cx);
	GETREG(AX, ax);
	GETREG(IP, ip);
#ifdef CONFIG_X86_64
	GETREG(R8, r8);
	GETREG(R9, r9);
	GETREG(R10, r10);
	GETREG(R11, r11);
	GETREG(R12, r12);
	GETREG(R13, r13);
	GETREG(R14, r14);
	GETREG(R15, r15);
#endif
	GETREG(CS, cs);
	GETREG(EFLAGS, flags);
#ifdef CONFIG_X86_32
	GETREG(SS, ss);
#endif
#undef GETREG
	pid = userspace_pid[current_thread_info()->cpu];
#ifdef CONFIG_X86_32
	if (have_fpx_regs) {
		struct user_fxsr_struct fpx;
		err = copy_from_user(&fpx,
			&((struct _fpstate __user *)sc.fpstate)->_fxsr_env[0],
				     sizeof(struct user_fxsr_struct));
		if (err)
			return 1;
		err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate);
		if (err)
			return 1;
		err = restore_fpx_registers(pid, (unsigned long *) &fpx);
		if (err < 0) {
			printk(KERN_ERR "copy_sc_from_user - "
			       "restore_fpx_registers failed, errno = %d\n",
			       -err);
			return 1;
		}
	} else
#endif
	{
		err = copy_from_user(regs->regs.fp, (void *)sc.fpstate,
				     sizeof(struct _xstate));
		if (err)
			return 1;
	}
	return 0;
}
static int copy_sc_to_user(struct sigcontext __user *to,
			   struct _xstate __user *to_fp, struct pt_regs *regs,
			   unsigned long mask)
{
	struct sigcontext sc;
	struct faultinfo * fi = ¤t->thread.arch.faultinfo;
	int err, pid;
	memset(&sc, 0, sizeof(struct sigcontext));
#define PUTREG(regno, regname) sc.regname = regs->regs.gp[HOST_##regno]
#ifdef CONFIG_X86_32
	PUTREG(GS, gs);
	PUTREG(FS, fs);
	PUTREG(ES, es);
	PUTREG(DS, ds);
#endif
	PUTREG(DI, di);
	PUTREG(SI, si);
	PUTREG(BP, bp);
	PUTREG(SP, sp);
	PUTREG(BX, bx);
	PUTREG(DX, dx);
	PUTREG(CX, cx);
	PUTREG(AX, ax);
#ifdef CONFIG_X86_64
	PUTREG(R8, r8);
	PUTREG(R9, r9);
	PUTREG(R10, r10);
	PUTREG(R11, r11);
	PUTREG(R12, r12);
	PUTREG(R13, r13);
	PUTREG(R14, r14);
	PUTREG(R15, r15);
#endif
	sc.cr2 = fi->cr2;
	sc.err = fi->error_code;
	sc.trapno = fi->trap_no;
	PUTREG(IP, ip);
	PUTREG(CS, cs);
	PUTREG(EFLAGS, flags);
#ifdef CONFIG_X86_32
	PUTREG(SP, sp_at_signal);
	PUTREG(SS, ss);
#endif
#undef PUTREG
	sc.oldmask = mask;
	sc.fpstate = (unsigned long)to_fp;
	err = copy_to_user(to, &sc, sizeof(struct sigcontext));
	if (err)
		return 1;
	pid = userspace_pid[current_thread_info()->cpu];
#ifdef CONFIG_X86_32
	if (have_fpx_regs) {
		struct user_fxsr_struct fpx;
		err = save_fpx_registers(pid, (unsigned long *) &fpx);
		if (err < 0){
			printk(KERN_ERR "copy_sc_to_user - save_fpx_registers "
			       "failed, errno = %d\n", err);
			return 1;
		}
		err = convert_fxsr_to_user(&to_fp->fpstate, &fpx);
		if (err)
			return 1;
		err |= __put_user(fpx.swd, &to_fp->fpstate.status);
		err |= __put_user(X86_FXSR_MAGIC, &to_fp->fpstate.magic);
		if (err)
			return 1;
		if (copy_to_user(&to_fp->fpstate._fxsr_env[0], &fpx,
				 sizeof(struct user_fxsr_struct)))
			return 1;
	} else
#endif
	{
		if (copy_to_user(to_fp, regs->regs.fp, sizeof(struct _xstate)))
			return 1;
	}
	return 0;
}
#ifdef CONFIG_X86_32
static int copy_ucontext_to_user(struct ucontext __user *uc,
				 struct _xstate __user *fp, sigset_t *set,
				 unsigned long sp)
{
	int err = 0;
	err |= __save_altstack(&uc->uc_stack, sp);
	err |= copy_sc_to_user(&uc->uc_mcontext, fp, ¤t->thread.regs, 0);
	err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set));
	return err;
}
struct sigframe
{
	char __user *pretcode;
	int sig;
	struct sigcontext sc;
	struct _xstate fpstate;
	unsigned long extramask[_NSIG_WORDS-1];
	char retcode[8];
};
struct rt_sigframe
{
	char __user *pretcode;
	int sig;
	struct siginfo __user *pinfo;
	void __user *puc;
	struct siginfo info;
	struct ucontext uc;
	struct _xstate fpstate;
	char retcode[8];
};
int setup_signal_stack_sc(unsigned long stack_top, struct ksignal *ksig,
			  struct pt_regs *regs, sigset_t *mask)
{
	struct sigframe __user *frame;
	void __user *restorer;
	int err = 0, sig = ksig->sig;
	/* This is the same calculation as i386 - ((sp + 4) & 15) == 0 */
	stack_top = ((stack_top + 4) & -16UL) - 4;
	frame = (struct sigframe __user *) stack_top - 1;
	if (!access_ok(frame, sizeof(*frame)))
		return 1;
	restorer = frame->retcode;
	if (ksig->ka.sa.sa_flags & SA_RESTORER)
		restorer = ksig->ka.sa.sa_restorer;
	err |= __put_user(restorer, &frame->pretcode);
	err |= __put_user(sig, &frame->sig);
	err |= copy_sc_to_user(&frame->sc, &frame->fpstate, regs, mask->sig[0]);
	if (_NSIG_WORDS > 1)
		err |= __copy_to_user(&frame->extramask, &mask->sig[1],
				      sizeof(frame->extramask));
	/*
	 * This is popl %eax ; movl $,%eax ; int $0x80
	 *
	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
	 * reasons and because gdb uses it as a signature to notice
	 * signal handler stack frames.
	 */
	err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
	err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
	err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
	if (err)
		return err;
	PT_REGS_SP(regs) = (unsigned long) frame;
	PT_REGS_IP(regs) = (unsigned long) ksig->ka.sa.sa_handler;
	PT_REGS_AX(regs) = (unsigned long) sig;
	PT_REGS_DX(regs) = (unsigned long) 0;
	PT_REGS_CX(regs) = (unsigned long) 0;
	return 0;
}
int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
			  struct pt_regs *regs, sigset_t *mask)
{
	struct rt_sigframe __user *frame;
	void __user *restorer;
	int err = 0, sig = ksig->sig;
	stack_top &= -8UL;
	frame = (struct rt_sigframe __user *) stack_top - 1;
	if (!access_ok(frame, sizeof(*frame)))
		return 1;
	restorer = frame->retcode;
	if (ksig->ka.sa.sa_flags & SA_RESTORER)
		restorer = ksig->ka.sa.sa_restorer;
	err |= __put_user(restorer, &frame->pretcode);
	err |= __put_user(sig, &frame->sig);
	err |= __put_user(&frame->info, &frame->pinfo);
	err |= __put_user(&frame->uc, &frame->puc);
	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
	err |= copy_ucontext_to_user(&frame->uc, &frame->fpstate, mask,
					PT_REGS_SP(regs));
	/*
	 * This is movl $,%eax ; int $0x80
	 *
	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
	 * reasons and because gdb uses it as a signature to notice
	 * signal handler stack frames.
	 */
	err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
	err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
	err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
	if (err)
		return err;
	PT_REGS_SP(regs) = (unsigned long) frame;
	PT_REGS_IP(regs) = (unsigned long) ksig->ka.sa.sa_handler;
	PT_REGS_AX(regs) = (unsigned long) sig;
	PT_REGS_DX(regs) = (unsigned long) &frame->info;
	PT_REGS_CX(regs) = (unsigned long) &frame->uc;
	return 0;
}
long sys_sigreturn(void)
{
	unsigned long sp = PT_REGS_SP(¤t->thread.regs);
	struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
	sigset_t set;
	struct sigcontext __user *sc = &frame->sc;
	int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
	if (copy_from_user(&set.sig[0], &sc->oldmask, sizeof(set.sig[0])) ||
	    copy_from_user(&set.sig[1], frame->extramask, sig_size))
		goto segfault;
	set_current_blocked(&set);
	if (copy_sc_from_user(¤t->thread.regs, sc))
		goto segfault;
	/* Avoid ERESTART handling */
	PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1;
	return PT_REGS_SYSCALL_RET(¤t->thread.regs);
 segfault:
	force_sig(SIGSEGV);
	return 0;
}
#else
struct rt_sigframe
{
	char __user *pretcode;
	struct ucontext uc;
	struct siginfo info;
	struct _xstate fpstate;
};
int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
			  struct pt_regs *regs, sigset_t *set)
{
	struct rt_sigframe __user *frame;
	int err = 0, sig = ksig->sig;
	unsigned long fp_to;
	frame = (struct rt_sigframe __user *)
		round_down(stack_top - sizeof(struct rt_sigframe), 16);
	/* Subtract 128 for a red zone and 8 for proper alignment */
	frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8);
	if (!access_ok(frame, sizeof(*frame)))
		goto out;
	if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
		err |= copy_siginfo_to_user(&frame->info, &ksig->info);
		if (err)
			goto out;
	}
	/* Create the ucontext.  */
	err |= __put_user(0, &frame->uc.uc_flags);
	err |= __put_user(0, &frame->uc.uc_link);
	err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
	err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
			       set->sig[0]);
	fp_to = (unsigned long)&frame->fpstate;
	err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate);
	if (sizeof(*set) == 16) {
		err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
		err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
	}
	else
		err |= __copy_to_user(&frame->uc.uc_sigmask, set,
				      sizeof(*set));
	/*
	 * Set up to return from userspace.  If provided, use a stub
	 * already in userspace.
	 */
	/* x86-64 should always use SA_RESTORER. */
	if (ksig->ka.sa.sa_flags & SA_RESTORER)
		err |= __put_user((void *)ksig->ka.sa.sa_restorer,
				  &frame->pretcode);
	else
		/* could use a vstub here */
		return err;
	if (err)
		return err;
	PT_REGS_SP(regs) = (unsigned long) frame;
	PT_REGS_DI(regs) = sig;
	/* In case the signal handler was declared without prototypes */
	PT_REGS_AX(regs) = 0;
	/*
	 * This also works for non SA_SIGINFO handlers because they expect the
	 * next argument after the signal number on the stack.
	 */
	PT_REGS_SI(regs) = (unsigned long) &frame->info;
	PT_REGS_DX(regs) = (unsigned long) &frame->uc;
	PT_REGS_IP(regs) = (unsigned long) ksig->ka.sa.sa_handler;
 out:
	return err;
}
#endif
long sys_rt_sigreturn(void)
{
	unsigned long sp = PT_REGS_SP(¤t->thread.regs);
	struct rt_sigframe __user *frame =
		(struct rt_sigframe __user *)(sp - sizeof(long));
	struct ucontext __user *uc = &frame->uc;
	sigset_t set;
	if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
		goto segfault;
	set_current_blocked(&set);
	if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext))
		goto segfault;
	/* Avoid ERESTART handling */
	PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1;
	return PT_REGS_SYSCALL_RET(¤t->thread.regs);
 segfault:
	force_sig(SIGSEGV);
	return 0;
}
 | 
	linux-master | 
	arch/x86/um/signal.c | 
| 
	/* 
 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 * Licensed under the GPL
 */
#include <sysdep/ptrace.h>
/* These two are from asm-um/uaccess.h and linux/module.h, check them. */
struct exception_table_entry
{
	unsigned long insn;
	unsigned long fixup;
};
const struct exception_table_entry *search_exception_tables(unsigned long add);
/* Compare this to arch/i386/mm/extable.c:fixup_exception() */
int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
{
	const struct exception_table_entry *fixup;
	fixup = search_exception_tables(address);
	if (fixup) {
		UPT_IP(regs) = fixup->fixup;
		return 1;
	}
	return 0;
}
 | 
	linux-master | 
	arch/x86/um/fault.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <asm/elf.h>
const char *arch_vma_name(struct vm_area_struct *vma)
{
	if (vma->vm_mm && vma->vm_start == um_vdso_addr)
		return "[vdso]";
	return NULL;
}
 | 
	linux-master | 
	arch/x86/um/mem_64.c | 
| 
	/*
 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 * Licensed under the GPL
 */
#include <sysdep/stub.h>
#include <sysdep/faultinfo.h>
#include <sysdep/mcontext.h>
#include <sys/ucontext.h>
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig, siginfo_t *info, void *p)
{
	struct faultinfo *f = get_stub_data();
	ucontext_t *uc = p;
	GET_FAULTINFO_FROM_MC(*f, &uc->uc_mcontext);
	trap_myself();
}
 | 
	linux-master | 
	arch/x86/um/stub_segv.c | 
| 
	/*
 * Copyright 2003 PathScale, Inc.
 * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 *
 * Licensed under the GPL
 */
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/errno.h>
#define __FRAME_OFFSETS
#include <asm/ptrace.h>
#include <linux/uaccess.h>
#include <registers.h>
#include <asm/ptrace-abi.h>
/*
 * determines which flags the user has access to.
 * 1 = access 0 = no access
 */
#define FLAG_MASK 0x44dd5UL
static const int reg_offsets[] =
{
	[R8 >> 3] = HOST_R8,
	[R9 >> 3] = HOST_R9,
	[R10 >> 3] = HOST_R10,
	[R11 >> 3] = HOST_R11,
	[R12 >> 3] = HOST_R12,
	[R13 >> 3] = HOST_R13,
	[R14 >> 3] = HOST_R14,
	[R15 >> 3] = HOST_R15,
	[RIP >> 3] = HOST_IP,
	[RSP >> 3] = HOST_SP,
	[RAX >> 3] = HOST_AX,
	[RBX >> 3] = HOST_BX,
	[RCX >> 3] = HOST_CX,
	[RDX >> 3] = HOST_DX,
	[RSI >> 3] = HOST_SI,
	[RDI >> 3] = HOST_DI,
	[RBP >> 3] = HOST_BP,
	[CS >> 3] = HOST_CS,
	[SS >> 3] = HOST_SS,
	[FS_BASE >> 3] = HOST_FS_BASE,
	[GS_BASE >> 3] = HOST_GS_BASE,
	[DS >> 3] = HOST_DS,
	[ES >> 3] = HOST_ES,
	[FS >> 3] = HOST_FS,
	[GS >> 3] = HOST_GS,
	[EFLAGS >> 3] = HOST_EFLAGS,
	[ORIG_RAX >> 3] = HOST_ORIG_AX,
};
int putreg(struct task_struct *child, int regno, unsigned long value)
{
	switch (regno) {
	case R8:
	case R9:
	case R10:
	case R11:
	case R12:
	case R13:
	case R14:
	case R15:
	case RIP:
	case RSP:
	case RAX:
	case RBX:
	case RCX:
	case RDX:
	case RSI:
	case RDI:
	case RBP:
		break;
	case ORIG_RAX:
		/* Update the syscall number. */
		UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
		break;
	case FS:
	case GS:
	case DS:
	case ES:
	case SS:
	case CS:
		if (value && (value & 3) != 3)
			return -EIO;
		value &= 0xffff;
		break;
	case FS_BASE:
	case GS_BASE:
		if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
			return -EIO;
		break;
	case EFLAGS:
		value &= FLAG_MASK;
		child->thread.regs.regs.gp[HOST_EFLAGS] |= value;
		return 0;
	default:
		panic("Bad register in putreg(): %d\n", regno);
	}
	child->thread.regs.regs.gp[reg_offsets[regno >> 3]] = value;
	return 0;
}
int poke_user(struct task_struct *child, long addr, long data)
{
	if ((addr & 3) || addr < 0)
		return -EIO;
	if (addr < MAX_REG_OFFSET)
		return putreg(child, addr, data);
	else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
		(addr <= offsetof(struct user, u_debugreg[7]))) {
		addr -= offsetof(struct user, u_debugreg[0]);
		addr = addr >> 3;
		if ((addr == 4) || (addr == 5))
			return -EIO;
		child->thread.arch.debugregs[addr] = data;
		return 0;
	}
	return -EIO;
}
unsigned long getreg(struct task_struct *child, int regno)
{
	unsigned long mask = ~0UL;
	switch (regno) {
	case R8:
	case R9:
	case R10:
	case R11:
	case R12:
	case R13:
	case R14:
	case R15:
	case RIP:
	case RSP:
	case RAX:
	case RBX:
	case RCX:
	case RDX:
	case RSI:
	case RDI:
	case RBP:
	case ORIG_RAX:
	case EFLAGS:
	case FS_BASE:
	case GS_BASE:
		break;
	case FS:
	case GS:
	case DS:
	case ES:
	case SS:
	case CS:
		mask = 0xffff;
		break;
	default:
		panic("Bad register in getreg: %d\n", regno);
	}
	return mask & child->thread.regs.regs.gp[reg_offsets[regno >> 3]];
}
int peek_user(struct task_struct *child, long addr, long data)
{
	/* read the word at location addr in the USER area. */
	unsigned long tmp;
	if ((addr & 3) || addr < 0)
		return -EIO;
	tmp = 0;  /* Default return condition */
	if (addr < MAX_REG_OFFSET)
		tmp = getreg(child, addr);
	else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
		(addr <= offsetof(struct user, u_debugreg[7]))) {
		addr -= offsetof(struct user, u_debugreg[0]);
		addr = addr >> 2;
		tmp = child->thread.arch.debugregs[addr];
	}
	return put_user(tmp, (unsigned long *) data);
}
/* XXX Mostly copied from sys-i386 */
int is_syscall(unsigned long addr)
{
	unsigned short instr;
	int n;
	n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
	if (n) {
		/*
		 * access_process_vm() grants access to vsyscall and stub,
		 * while copy_from_user doesn't. Maybe access_process_vm is
		 * slow, but that doesn't matter, since it will be called only
		 * in case of singlestepping, if copy_from_user failed.
		 */
		n = access_process_vm(current, addr, &instr, sizeof(instr),
				FOLL_FORCE);
		if (n != sizeof(instr)) {
			printk("is_syscall : failed to read instruction from "
			       "0x%lx\n", addr);
			return 1;
		}
	}
	/* sysenter */
	return instr == 0x050f;
}
static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
	int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
	struct user_i387_struct fpregs;
	err = save_i387_registers(userspace_pid[cpu],
				  (unsigned long *) &fpregs);
	if (err)
		return err;
	n = copy_to_user(buf, &fpregs, sizeof(fpregs));
	if (n > 0)
		return -EFAULT;
	return n;
}
static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
	int n, cpu = ((struct thread_info *) child->stack)->cpu;
	struct user_i387_struct fpregs;
	n = copy_from_user(&fpregs, buf, sizeof(fpregs));
	if (n > 0)
		return -EFAULT;
	return restore_i387_registers(userspace_pid[cpu],
				      (unsigned long *) &fpregs);
}
long subarch_ptrace(struct task_struct *child, long request,
		    unsigned long addr, unsigned long data)
{
	int ret = -EIO;
	void __user *datap = (void __user *) data;
	switch (request) {
	case PTRACE_GETFPREGS: /* Get the child FPU state. */
		ret = get_fpregs(datap, child);
		break;
	case PTRACE_SETFPREGS: /* Set the child FPU state. */
		ret = set_fpregs(datap, child);
		break;
	case PTRACE_ARCH_PRCTL:
		/* XXX Calls ptrace on the host - needs some SMP thinking */
		ret = arch_prctl(child, data, (void __user *) addr);
		break;
	}
	return ret;
}
 | 
	linux-master | 
	arch/x86/um/ptrace_64.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2011 Richard Weinberger <[email protected]>
 *
 * This vDSO turns all calls into a syscall so that UML can trap them.
 */
/* Disable profiling for userspace code */
#define DISABLE_BRANCH_PROFILING
#include <linux/time.h>
#include <linux/getcpu.h>
#include <asm/unistd.h>
int __vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts)
{
	long ret;
	asm("syscall"
		: "=a" (ret)
		: "0" (__NR_clock_gettime), "D" (clock), "S" (ts)
		: "rcx", "r11", "memory");
	return ret;
}
int clock_gettime(clockid_t, struct __kernel_old_timespec *)
	__attribute__((weak, alias("__vdso_clock_gettime")));
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
	long ret;
	asm("syscall"
		: "=a" (ret)
		: "0" (__NR_gettimeofday), "D" (tv), "S" (tz)
		: "rcx", "r11", "memory");
	return ret;
}
int gettimeofday(struct __kernel_old_timeval *, struct timezone *)
	__attribute__((weak, alias("__vdso_gettimeofday")));
__kernel_old_time_t __vdso_time(__kernel_old_time_t *t)
{
	long secs;
	asm volatile("syscall"
		: "=a" (secs)
		: "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
	return secs;
}
__kernel_old_time_t time(__kernel_old_time_t *t) __attribute__((weak, alias("__vdso_time")));
long
__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
{
	/*
	 * UML does not support SMP, we can cheat here. :)
	 */
	if (cpu)
		*cpu = 0;
	if (node)
		*node = 0;
	return 0;
}
long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
	__attribute__((weak, alias("__vdso_getcpu")));
 | 
	linux-master | 
	arch/x86/um/vdso/um_vdso.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2011 Richard Weinberger <[email protected]>
 */
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/elf.h>
#include <linux/init.h>
static unsigned int __read_mostly vdso_enabled = 1;
unsigned long um_vdso_addr;
extern unsigned long task_size;
extern char vdso_start[], vdso_end[];
static struct page **vdsop;
static int __init init_vdso(void)
{
	struct page *um_vdso;
	BUG_ON(vdso_end - vdso_start > PAGE_SIZE);
	um_vdso_addr = task_size - PAGE_SIZE;
	vdsop = kmalloc(sizeof(struct page *), GFP_KERNEL);
	if (!vdsop)
		goto oom;
	um_vdso = alloc_page(GFP_KERNEL);
	if (!um_vdso) {
		kfree(vdsop);
		goto oom;
	}
	copy_page(page_address(um_vdso), vdso_start);
	*vdsop = um_vdso;
	return 0;
oom:
	printk(KERN_ERR "Cannot allocate vdso\n");
	vdso_enabled = 0;
	return -ENOMEM;
}
subsys_initcall(init_vdso);
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
	int err;
	struct mm_struct *mm = current->mm;
	if (!vdso_enabled)
		return 0;
	if (mmap_write_lock_killable(mm))
		return -EINTR;
	err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
		VM_READ|VM_EXEC|
		VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
		vdsop);
	mmap_write_unlock(mm);
	return err;
}
 | 
	linux-master | 
	arch/x86/um/vdso/vma.c | 
| 
	/*
 * Copyright (C) 2007 Jeff Dike (jdike@{addtoit.com,linux.intel.com})
 * Licensed under the GPL
 */
#include <sys/ptrace.h>
#include <asm/ptrace.h>
int os_arch_prctl(int pid, int option, unsigned long *arg2)
{
	return ptrace(PTRACE_ARCH_PRCTL, pid, (unsigned long) arg2, option);
}
 | 
	linux-master | 
	arch/x86/um/os-Linux/prctl.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include <sys/ucontext.h>
#define __FRAME_OFFSETS
#include <asm/ptrace.h>
#include <sysdep/ptrace.h>
void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
{
#ifdef __i386__
#define COPY2(X,Y) regs->gp[X] = mc->gregs[REG_##Y]
#define COPY(X) regs->gp[X] = mc->gregs[REG_##X]
#define COPY_SEG(X) regs->gp[X] = mc->gregs[REG_##X] & 0xffff;
#define COPY_SEG_CPL3(X) regs->gp[X] = (mc->gregs[REG_##X] & 0xffff) | 3;
	COPY_SEG(GS); COPY_SEG(FS); COPY_SEG(ES); COPY_SEG(DS);
	COPY(EDI); COPY(ESI); COPY(EBP);
	COPY2(UESP, ESP); /* sic */
	COPY(EBX); COPY(EDX); COPY(ECX); COPY(EAX);
	COPY(EIP); COPY_SEG_CPL3(CS); COPY(EFL); COPY_SEG_CPL3(SS);
#else
#define COPY2(X,Y) regs->gp[X/sizeof(unsigned long)] = mc->gregs[REG_##Y]
#define COPY(X) regs->gp[X/sizeof(unsigned long)] = mc->gregs[REG_##X]
	COPY(R8); COPY(R9); COPY(R10); COPY(R11);
	COPY(R12); COPY(R13); COPY(R14); COPY(R15);
	COPY(RDI); COPY(RSI); COPY(RBP); COPY(RBX);
	COPY(RDX); COPY(RAX); COPY(RCX); COPY(RSP);
	COPY(RIP);
	COPY2(EFLAGS, EFL);
	COPY2(CS, CSGSFS);
	regs->gp[CS / sizeof(unsigned long)] &= 0xffff;
	regs->gp[CS / sizeof(unsigned long)] |= 3;
#endif
}
 | 
	linux-master | 
	arch/x86/um/os-Linux/mcontext.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <linux/unistd.h>
#include <sys/ptrace.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <sysdep/tls.h>
#ifndef PTRACE_GET_THREAD_AREA
#define PTRACE_GET_THREAD_AREA 25
#endif
#ifndef PTRACE_SET_THREAD_AREA
#define PTRACE_SET_THREAD_AREA 26
#endif
/* Checks whether host supports TLS, and sets *tls_min according to the value
 * valid on the host.
 * i386 host have it == 6; x86_64 host have it == 12, for i386 emulation. */
void check_host_supports_tls(int *supports_tls, int *tls_min)
{
	/* Values for x86 and x86_64.*/
	int val[] = {GDT_ENTRY_TLS_MIN_I386, GDT_ENTRY_TLS_MIN_X86_64};
	int i;
	for (i = 0; i < ARRAY_SIZE(val); i++) {
		user_desc_t info;
		info.entry_number = val[i];
		if (syscall(__NR_get_thread_area, &info) == 0) {
			*tls_min = val[i];
			*supports_tls = 1;
			return;
		} else {
			if (errno == EINVAL)
				continue;
			else if (errno == ENOSYS)
				*supports_tls = 0;
			return;
		}
	}
	*supports_tls = 0;
}
int os_set_thread_area(user_desc_t *info, int pid)
{
	int ret;
	ret = ptrace(PTRACE_SET_THREAD_AREA, pid, info->entry_number,
		     (unsigned long) info);
	if (ret < 0)
		ret = -errno;
	return ret;
}
int os_get_thread_area(user_desc_t *info, int pid)
{
	int ret;
	ret = ptrace(PTRACE_GET_THREAD_AREA, pid, info->entry_number,
		     (unsigned long) info);
	if (ret < 0)
		ret = -errno;
	return ret;
}
 | 
	linux-master | 
	arch/x86/um/os-Linux/tls.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <sys/mman.h>
#include <longjmp.h>
#ifdef __i386__
static jmp_buf buf;
static void segfault(int sig)
{
	longjmp(buf, 1);
}
static int page_ok(unsigned long page)
{
	unsigned long *address = (unsigned long *) (page << UM_KERN_PAGE_SHIFT);
	unsigned long n = ~0UL;
	void *mapped = NULL;
	int ok = 0;
	/*
	 * First see if the page is readable.  If it is, it may still
	 * be a VDSO, so we go on to see if it's writable.  If not
	 * then try mapping memory there.  If that fails, then we're
	 * still in the kernel area.  As a sanity check, we'll fail if
	 * the mmap succeeds, but gives us an address different from
	 * what we wanted.
	 */
	if (setjmp(buf) == 0)
		n = *address;
	else {
		mapped = mmap(address, UM_KERN_PAGE_SIZE,
			      PROT_READ | PROT_WRITE,
			      MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
		if (mapped == MAP_FAILED)
			return 0;
		if (mapped != address)
			goto out;
	}
	/*
	 * Now, is it writeable?  If so, then we're in user address
	 * space.  If not, then try mprotecting it and try the write
	 * again.
	 */
	if (setjmp(buf) == 0) {
		*address = n;
		ok = 1;
		goto out;
	} else if (mprotect(address, UM_KERN_PAGE_SIZE,
			    PROT_READ | PROT_WRITE) != 0)
		goto out;
	if (setjmp(buf) == 0) {
		*address = n;
		ok = 1;
	}
 out:
	if (mapped != NULL)
		munmap(mapped, UM_KERN_PAGE_SIZE);
	return ok;
}
unsigned long os_get_top_address(void)
{
	struct sigaction sa, old;
	unsigned long bottom = 0;
	/*
	 * A 32-bit UML on a 64-bit host gets confused about the VDSO at
	 * 0xffffe000.  It is mapped, is readable, can be reprotected writeable
	 * and written.  However, exec discovers later that it can't be
	 * unmapped.  So, just set the highest address to be checked to just
	 * below it.  This might waste some address space on 4G/4G 32-bit
	 * hosts, but shouldn't hurt otherwise.
	 */
	unsigned long top = 0xffffd000 >> UM_KERN_PAGE_SHIFT;
	unsigned long test, original;
	printf("Locating the bottom of the address space ... ");
	fflush(stdout);
	/*
	 * We're going to be longjmping out of the signal handler, so
	 * SA_DEFER needs to be set.
	 */
	sa.sa_handler = segfault;
	sigemptyset(&sa.sa_mask);
	sa.sa_flags = SA_NODEFER;
	if (sigaction(SIGSEGV, &sa, &old)) {
		perror("os_get_top_address");
		exit(1);
	}
	/* Manually scan the address space, bottom-up, until we find
	 * the first valid page (or run out of them).
	 */
	for (bottom = 0; bottom < top; bottom++) {
		if (page_ok(bottom))
			break;
	}
	/* If we've got this far, we ran out of pages. */
	if (bottom == top) {
		fprintf(stderr, "Unable to determine bottom of address "
			"space.\n");
		exit(1);
	}
	printf("0x%lx\n", bottom << UM_KERN_PAGE_SHIFT);
	printf("Locating the top of the address space ... ");
	fflush(stdout);
	original = bottom;
	/* This could happen with a 4G/4G split */
	if (page_ok(top))
		goto out;
	do {
		test = bottom + (top - bottom) / 2;
		if (page_ok(test))
			bottom = test;
		else
			top = test;
	} while (top - bottom > 1);
out:
	/* Restore the old SIGSEGV handling */
	if (sigaction(SIGSEGV, &old, NULL)) {
		perror("os_get_top_address");
		exit(1);
	}
	top <<= UM_KERN_PAGE_SHIFT;
	printf("0x%lx\n", top);
	return top;
}
#else
unsigned long os_get_top_address(void)
{
	/* The old value of CONFIG_TOP_ADDR */
	return 0x7fc0002000;
}
#endif
 | 
	linux-master | 
	arch/x86/um/os-Linux/task_size.c | 
| 
	/*
 * Copyright (C) 2004 PathScale, Inc
 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 * Licensed under the GPL
 */
#include <errno.h>
#include <stdlib.h>
#include <sys/ptrace.h>
#ifdef __i386__
#include <sys/user.h>
#endif
#include <longjmp.h>
#include <sysdep/ptrace_user.h>
#include <sys/uio.h>
#include <asm/sigcontext.h>
#include <linux/elf.h>
#include <registers.h>
int have_xstate_support;
int save_i387_registers(int pid, unsigned long *fp_regs)
{
	if (ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs) < 0)
		return -errno;
	return 0;
}
int save_fp_registers(int pid, unsigned long *fp_regs)
{
#ifdef PTRACE_GETREGSET
	struct iovec iov;
	if (have_xstate_support) {
		iov.iov_base = fp_regs;
		iov.iov_len = FP_SIZE * sizeof(unsigned long);
		if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
			return -errno;
		return 0;
	} else
#endif
		return save_i387_registers(pid, fp_regs);
}
int restore_i387_registers(int pid, unsigned long *fp_regs)
{
	if (ptrace(PTRACE_SETFPREGS, pid, 0, fp_regs) < 0)
		return -errno;
	return 0;
}
int restore_fp_registers(int pid, unsigned long *fp_regs)
{
#ifdef PTRACE_SETREGSET
	struct iovec iov;
	if (have_xstate_support) {
		iov.iov_base = fp_regs;
		iov.iov_len = FP_SIZE * sizeof(unsigned long);
		if (ptrace(PTRACE_SETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
			return -errno;
		return 0;
	} else
#endif
		return restore_i387_registers(pid, fp_regs);
}
#ifdef __i386__
int have_fpx_regs = 1;
int save_fpx_registers(int pid, unsigned long *fp_regs)
{
	if (ptrace(PTRACE_GETFPXREGS, pid, 0, fp_regs) < 0)
		return -errno;
	return 0;
}
int restore_fpx_registers(int pid, unsigned long *fp_regs)
{
	if (ptrace(PTRACE_SETFPXREGS, pid, 0, fp_regs) < 0)
		return -errno;
	return 0;
}
int get_fp_registers(int pid, unsigned long *regs)
{
	if (have_fpx_regs)
		return save_fpx_registers(pid, regs);
	else
		return save_fp_registers(pid, regs);
}
int put_fp_registers(int pid, unsigned long *regs)
{
	if (have_fpx_regs)
		return restore_fpx_registers(pid, regs);
	else
		return restore_fp_registers(pid, regs);
}
void arch_init_registers(int pid)
{
	struct user_fpxregs_struct fpx_regs;
	int err;
	err = ptrace(PTRACE_GETFPXREGS, pid, 0, &fpx_regs);
	if (!err)
		return;
	if (errno != EIO)
		panic("check_ptrace : PTRACE_GETFPXREGS failed, errno = %d",
		      errno);
	have_fpx_regs = 0;
}
#else
int get_fp_registers(int pid, unsigned long *regs)
{
	return save_fp_registers(pid, regs);
}
int put_fp_registers(int pid, unsigned long *regs)
{
	return restore_fp_registers(pid, regs);
}
void arch_init_registers(int pid)
{
#ifdef PTRACE_GETREGSET
	void * fp_regs;
	struct iovec iov;
	fp_regs = malloc(FP_SIZE * sizeof(unsigned long));
	if(fp_regs == NULL)
		return;
	iov.iov_base = fp_regs;
	iov.iov_len = FP_SIZE * sizeof(unsigned long);
	if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) == 0)
		have_xstate_support = 1;
	free(fp_regs);
#endif
}
#endif
unsigned long get_thread_reg(int reg, jmp_buf *buf)
{
	switch (reg) {
#ifdef __i386__
	case HOST_IP:
		return buf[0]->__eip;
	case HOST_SP:
		return buf[0]->__esp;
	case HOST_BP:
		return buf[0]->__ebp;
#else
	case HOST_IP:
		return buf[0]->__rip;
	case HOST_SP:
		return buf[0]->__rsp;
	case HOST_BP:
		return buf[0]->__rbp;
#endif
	default:
		printk(UM_KERN_ERR "get_thread_regs - unknown register %d\n",
		       reg);
		return 0;
	}
}
 | 
	linux-master | 
	arch/x86/um/os-Linux/registers.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Confidential Computing Platform Capability checks
 *
 * Copyright (C) 2021 Advanced Micro Devices, Inc.
 *
 * Author: Tom Lendacky <[email protected]>
 */
#include <linux/export.h>
#include <linux/cc_platform.h>
#include <asm/coco.h>
#include <asm/processor.h>
enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
static u64 cc_mask __ro_after_init;
static bool noinstr intel_cc_platform_has(enum cc_attr attr)
{
	switch (attr) {
	case CC_ATTR_GUEST_UNROLL_STRING_IO:
	case CC_ATTR_HOTPLUG_DISABLED:
	case CC_ATTR_GUEST_MEM_ENCRYPT:
	case CC_ATTR_MEM_ENCRYPT:
		return true;
	default:
		return false;
	}
}
/*
 * Handle the SEV-SNP vTOM case where sme_me_mask is zero, and
 * the other levels of SME/SEV functionality, including C-bit
 * based SEV-SNP, are not enabled.
 */
static __maybe_unused __always_inline bool amd_cc_platform_vtom(enum cc_attr attr)
{
	switch (attr) {
	case CC_ATTR_GUEST_MEM_ENCRYPT:
	case CC_ATTR_MEM_ENCRYPT:
		return true;
	default:
		return false;
	}
}
/*
 * SME and SEV are very similar but they are not the same, so there are
 * times that the kernel will need to distinguish between SME and SEV. The
 * cc_platform_has() function is used for this.  When a distinction isn't
 * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
 *
 * The trampoline code is a good example for this requirement.  Before
 * paging is activated, SME will access all memory as decrypted, but SEV
 * will access all memory as encrypted.  So, when APs are being brought
 * up under SME the trampoline area cannot be encrypted, whereas under SEV
 * the trampoline area must be encrypted.
 */
static bool noinstr amd_cc_platform_has(enum cc_attr attr)
{
#ifdef CONFIG_AMD_MEM_ENCRYPT
	if (sev_status & MSR_AMD64_SNP_VTOM)
		return amd_cc_platform_vtom(attr);
	switch (attr) {
	case CC_ATTR_MEM_ENCRYPT:
		return sme_me_mask;
	case CC_ATTR_HOST_MEM_ENCRYPT:
		return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
	case CC_ATTR_GUEST_MEM_ENCRYPT:
		return sev_status & MSR_AMD64_SEV_ENABLED;
	case CC_ATTR_GUEST_STATE_ENCRYPT:
		return sev_status & MSR_AMD64_SEV_ES_ENABLED;
	/*
	 * With SEV, the rep string I/O instructions need to be unrolled
	 * but SEV-ES supports them through the #VC handler.
	 */
	case CC_ATTR_GUEST_UNROLL_STRING_IO:
		return (sev_status & MSR_AMD64_SEV_ENABLED) &&
			!(sev_status & MSR_AMD64_SEV_ES_ENABLED);
	case CC_ATTR_GUEST_SEV_SNP:
		return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
	default:
		return false;
	}
#else
	return false;
#endif
}
bool noinstr cc_platform_has(enum cc_attr attr)
{
	switch (cc_vendor) {
	case CC_VENDOR_AMD:
		return amd_cc_platform_has(attr);
	case CC_VENDOR_INTEL:
		return intel_cc_platform_has(attr);
	default:
		return false;
	}
}
EXPORT_SYMBOL_GPL(cc_platform_has);
u64 cc_mkenc(u64 val)
{
	/*
	 * Both AMD and Intel use a bit in the page table to indicate
	 * encryption status of the page.
	 *
	 * - for AMD, bit *set* means the page is encrypted
	 * - for AMD with vTOM and for Intel, *clear* means encrypted
	 */
	switch (cc_vendor) {
	case CC_VENDOR_AMD:
		if (sev_status & MSR_AMD64_SNP_VTOM)
			return val & ~cc_mask;
		else
			return val | cc_mask;
	case CC_VENDOR_INTEL:
		return val & ~cc_mask;
	default:
		return val;
	}
}
u64 cc_mkdec(u64 val)
{
	/* See comment in cc_mkenc() */
	switch (cc_vendor) {
	case CC_VENDOR_AMD:
		if (sev_status & MSR_AMD64_SNP_VTOM)
			return val | cc_mask;
		else
			return val & ~cc_mask;
	case CC_VENDOR_INTEL:
		return val | cc_mask;
	default:
		return val;
	}
}
EXPORT_SYMBOL_GPL(cc_mkdec);
__init void cc_set_mask(u64 mask)
{
	cc_mask = mask;
}
 | 
	linux-master | 
	arch/x86/coco/core.c | 
| 
	#include <asm/tdx.h>
#include <asm/pgtable.h>
static unsigned long try_accept_one(phys_addr_t start, unsigned long len,
				    enum pg_level pg_level)
{
	unsigned long accept_size = page_level_size(pg_level);
	u64 tdcall_rcx;
	u8 page_size;
	if (!IS_ALIGNED(start, accept_size))
		return 0;
	if (len < accept_size)
		return 0;
	/*
	 * Pass the page physical address to the TDX module to accept the
	 * pending, private page.
	 *
	 * Bits 2:0 of RCX encode page size: 0 - 4K, 1 - 2M, 2 - 1G.
	 */
	switch (pg_level) {
	case PG_LEVEL_4K:
		page_size = 0;
		break;
	case PG_LEVEL_2M:
		page_size = 1;
		break;
	case PG_LEVEL_1G:
		page_size = 2;
		break;
	default:
		return 0;
	}
	tdcall_rcx = start | page_size;
	if (__tdx_module_call(TDX_ACCEPT_PAGE, tdcall_rcx, 0, 0, 0, NULL))
		return 0;
	return accept_size;
}
bool tdx_accept_memory(phys_addr_t start, phys_addr_t end)
{
	/*
	 * For shared->private conversion, accept the page using
	 * TDX_ACCEPT_PAGE TDX module call.
	 */
	while (start < end) {
		unsigned long len = end - start;
		unsigned long accept_size;
		/*
		 * Try larger accepts first. It gives chance to VMM to keep
		 * 1G/2M Secure EPT entries where possible and speeds up
		 * process by cutting number of hypercalls (if successful).
		 */
		accept_size = try_accept_one(start, len, PG_LEVEL_1G);
		if (!accept_size)
			accept_size = try_accept_one(start, len, PG_LEVEL_2M);
		if (!accept_size)
			accept_size = try_accept_one(start, len, PG_LEVEL_4K);
		if (!accept_size)
			return false;
		start += accept_size;
	}
	return true;
}
 | 
	linux-master | 
	arch/x86/coco/tdx/tdx-shared.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021-2022 Intel Corporation */
#undef pr_fmt
#define pr_fmt(fmt)     "tdx: " fmt
#include <linux/cpufeature.h>
#include <linux/export.h>
#include <linux/io.h>
#include <asm/coco.h>
#include <asm/tdx.h>
#include <asm/vmx.h>
#include <asm/insn.h>
#include <asm/insn-eval.h>
#include <asm/pgtable.h>
/* MMIO direction */
#define EPT_READ	0
#define EPT_WRITE	1
/* Port I/O direction */
#define PORT_READ	0
#define PORT_WRITE	1
/* See Exit Qualification for I/O Instructions in VMX documentation */
#define VE_IS_IO_IN(e)		((e) & BIT(3))
#define VE_GET_IO_SIZE(e)	(((e) & GENMASK(2, 0)) + 1)
#define VE_GET_PORT_NUM(e)	((e) >> 16)
#define VE_IS_IO_STRING(e)	((e) & BIT(4))
#define ATTR_DEBUG		BIT(0)
#define ATTR_SEPT_VE_DISABLE	BIT(28)
/* TDX Module call error codes */
#define TDCALL_RETURN_CODE(a)	((a) >> 32)
#define TDCALL_INVALID_OPERAND	0xc0000100
#define TDREPORT_SUBTYPE_0	0
/* Called from __tdx_hypercall() for unrecoverable failure */
noinstr void __tdx_hypercall_failed(void)
{
	instrumentation_begin();
	panic("TDVMCALL failed. TDX module bug?");
}
#ifdef CONFIG_KVM_GUEST
long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
		       unsigned long p3, unsigned long p4)
{
	struct tdx_hypercall_args args = {
		.r10 = nr,
		.r11 = p1,
		.r12 = p2,
		.r13 = p3,
		.r14 = p4,
	};
	return __tdx_hypercall(&args);
}
EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
#endif
/*
 * Used for TDX guests to make calls directly to the TD module.  This
 * should only be used for calls that have no legitimate reason to fail
 * or where the kernel can not survive the call failing.
 */
static inline void tdx_module_call(u64 fn, u64 rcx, u64 rdx, u64 r8, u64 r9,
				   struct tdx_module_output *out)
{
	if (__tdx_module_call(fn, rcx, rdx, r8, r9, out))
		panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
}
/**
 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
 *                           subtype 0) using TDG.MR.REPORT TDCALL.
 * @reportdata: Address of the input buffer which contains user-defined
 *              REPORTDATA to be included into TDREPORT.
 * @tdreport: Address of the output buffer to store TDREPORT.
 *
 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
 * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
 * It is used in the TDX guest driver module to get the TDREPORT0.
 *
 * Return 0 on success, -EINVAL for invalid operands, or -EIO on
 * other TDCALL failures.
 */
int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
{
	u64 ret;
	ret = __tdx_module_call(TDX_GET_REPORT, virt_to_phys(tdreport),
				virt_to_phys(reportdata), TDREPORT_SUBTYPE_0,
				0, NULL);
	if (ret) {
		if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
			return -EINVAL;
		return -EIO;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
static void __noreturn tdx_panic(const char *msg)
{
	struct tdx_hypercall_args args = {
		.r10 = TDX_HYPERCALL_STANDARD,
		.r11 = TDVMCALL_REPORT_FATAL_ERROR,
		.r12 = 0, /* Error code: 0 is Panic */
	};
	union {
		/* Define register order according to the GHCI */
		struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
		char str[64];
	} message;
	/* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
	strncpy(message.str, msg, 64);
	args.r8  = message.r8;
	args.r9  = message.r9;
	args.r14 = message.r14;
	args.r15 = message.r15;
	args.rdi = message.rdi;
	args.rsi = message.rsi;
	args.rbx = message.rbx;
	args.rdx = message.rdx;
	/*
	 * This hypercall should never return and it is not safe
	 * to keep the guest running. Call it forever if it
	 * happens to return.
	 */
	while (1)
		__tdx_hypercall(&args);
}
static void tdx_parse_tdinfo(u64 *cc_mask)
{
	struct tdx_module_output out;
	unsigned int gpa_width;
	u64 td_attr;
	/*
	 * TDINFO TDX module call is used to get the TD execution environment
	 * information like GPA width, number of available vcpus, debug mode
	 * information, etc. More details about the ABI can be found in TDX
	 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
	 * [TDG.VP.INFO].
	 */
	tdx_module_call(TDX_GET_INFO, 0, 0, 0, 0, &out);
	/*
	 * The highest bit of a guest physical address is the "sharing" bit.
	 * Set it for shared pages and clear it for private pages.
	 *
	 * The GPA width that comes out of this call is critical. TDX guests
	 * can not meaningfully run without it.
	 */
	gpa_width = out.rcx & GENMASK(5, 0);
	*cc_mask = BIT_ULL(gpa_width - 1);
	/*
	 * The kernel can not handle #VE's when accessing normal kernel
	 * memory.  Ensure that no #VE will be delivered for accesses to
	 * TD-private memory.  Only VMM-shared memory (MMIO) will #VE.
	 */
	td_attr = out.rdx;
	if (!(td_attr & ATTR_SEPT_VE_DISABLE)) {
		const char *msg = "TD misconfiguration: SEPT_VE_DISABLE attribute must be set.";
		/* Relax SEPT_VE_DISABLE check for debug TD. */
		if (td_attr & ATTR_DEBUG)
			pr_warn("%s\n", msg);
		else
			tdx_panic(msg);
	}
}
/*
 * The TDX module spec states that #VE may be injected for a limited set of
 * reasons:
 *
 *  - Emulation of the architectural #VE injection on EPT violation;
 *
 *  - As a result of guest TD execution of a disallowed instruction,
 *    a disallowed MSR access, or CPUID virtualization;
 *
 *  - A notification to the guest TD about anomalous behavior;
 *
 * The last one is opt-in and is not used by the kernel.
 *
 * The Intel Software Developer's Manual describes cases when instruction
 * length field can be used in section "Information for VM Exits Due to
 * Instruction Execution".
 *
 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
 * information if #VE occurred due to instruction execution, but not for EPT
 * violations.
 */
static int ve_instr_len(struct ve_info *ve)
{
	switch (ve->exit_reason) {
	case EXIT_REASON_HLT:
	case EXIT_REASON_MSR_READ:
	case EXIT_REASON_MSR_WRITE:
	case EXIT_REASON_CPUID:
	case EXIT_REASON_IO_INSTRUCTION:
		/* It is safe to use ve->instr_len for #VE due instructions */
		return ve->instr_len;
	case EXIT_REASON_EPT_VIOLATION:
		/*
		 * For EPT violations, ve->insn_len is not defined. For those,
		 * the kernel must decode instructions manually and should not
		 * be using this function.
		 */
		WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
		return 0;
	default:
		WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
		return ve->instr_len;
	}
}
static u64 __cpuidle __halt(const bool irq_disabled)
{
	struct tdx_hypercall_args args = {
		.r10 = TDX_HYPERCALL_STANDARD,
		.r11 = hcall_func(EXIT_REASON_HLT),
		.r12 = irq_disabled,
	};
	/*
	 * Emulate HLT operation via hypercall. More info about ABI
	 * can be found in TDX Guest-Host-Communication Interface
	 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
	 *
	 * The VMM uses the "IRQ disabled" param to understand IRQ
	 * enabled status (RFLAGS.IF) of the TD guest and to determine
	 * whether or not it should schedule the halted vCPU if an
	 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
	 * can keep the vCPU in virtual HLT, even if an IRQ is
	 * pending, without hanging/breaking the guest.
	 */
	return __tdx_hypercall(&args);
}
static int handle_halt(struct ve_info *ve)
{
	const bool irq_disabled = irqs_disabled();
	if (__halt(irq_disabled))
		return -EIO;
	return ve_instr_len(ve);
}
void __cpuidle tdx_safe_halt(void)
{
	const bool irq_disabled = false;
	/*
	 * Use WARN_ONCE() to report the failure.
	 */
	if (__halt(irq_disabled))
		WARN_ONCE(1, "HLT instruction emulation failed\n");
}
static int read_msr(struct pt_regs *regs, struct ve_info *ve)
{
	struct tdx_hypercall_args args = {
		.r10 = TDX_HYPERCALL_STANDARD,
		.r11 = hcall_func(EXIT_REASON_MSR_READ),
		.r12 = regs->cx,
	};
	/*
	 * Emulate the MSR read via hypercall. More info about ABI
	 * can be found in TDX Guest-Host-Communication Interface
	 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
	 */
	if (__tdx_hypercall_ret(&args))
		return -EIO;
	regs->ax = lower_32_bits(args.r11);
	regs->dx = upper_32_bits(args.r11);
	return ve_instr_len(ve);
}
static int write_msr(struct pt_regs *regs, struct ve_info *ve)
{
	struct tdx_hypercall_args args = {
		.r10 = TDX_HYPERCALL_STANDARD,
		.r11 = hcall_func(EXIT_REASON_MSR_WRITE),
		.r12 = regs->cx,
		.r13 = (u64)regs->dx << 32 | regs->ax,
	};
	/*
	 * Emulate the MSR write via hypercall. More info about ABI
	 * can be found in TDX Guest-Host-Communication Interface
	 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
	 */
	if (__tdx_hypercall(&args))
		return -EIO;
	return ve_instr_len(ve);
}
static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
{
	struct tdx_hypercall_args args = {
		.r10 = TDX_HYPERCALL_STANDARD,
		.r11 = hcall_func(EXIT_REASON_CPUID),
		.r12 = regs->ax,
		.r13 = regs->cx,
	};
	/*
	 * Only allow VMM to control range reserved for hypervisor
	 * communication.
	 *
	 * Return all-zeros for any CPUID outside the range. It matches CPU
	 * behaviour for non-supported leaf.
	 */
	if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
		regs->ax = regs->bx = regs->cx = regs->dx = 0;
		return ve_instr_len(ve);
	}
	/*
	 * Emulate the CPUID instruction via a hypercall. More info about
	 * ABI can be found in TDX Guest-Host-Communication Interface
	 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
	 */
	if (__tdx_hypercall_ret(&args))
		return -EIO;
	/*
	 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
	 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
	 * So copy the register contents back to pt_regs.
	 */
	regs->ax = args.r12;
	regs->bx = args.r13;
	regs->cx = args.r14;
	regs->dx = args.r15;
	return ve_instr_len(ve);
}
static bool mmio_read(int size, unsigned long addr, unsigned long *val)
{
	struct tdx_hypercall_args args = {
		.r10 = TDX_HYPERCALL_STANDARD,
		.r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
		.r12 = size,
		.r13 = EPT_READ,
		.r14 = addr,
		.r15 = *val,
	};
	if (__tdx_hypercall_ret(&args))
		return false;
	*val = args.r11;
	return true;
}
static bool mmio_write(int size, unsigned long addr, unsigned long val)
{
	return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
			       EPT_WRITE, addr, val);
}
static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
{
	unsigned long *reg, val, vaddr;
	char buffer[MAX_INSN_SIZE];
	enum insn_mmio_type mmio;
	struct insn insn = {};
	int size, extend_size;
	u8 extend_val = 0;
	/* Only in-kernel MMIO is supported */
	if (WARN_ON_ONCE(user_mode(regs)))
		return -EFAULT;
	if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
		return -EFAULT;
	if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
		return -EINVAL;
	mmio = insn_decode_mmio(&insn, &size);
	if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
		return -EINVAL;
	if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
		reg = insn_get_modrm_reg_ptr(&insn, regs);
		if (!reg)
			return -EINVAL;
	}
	/*
	 * Reject EPT violation #VEs that split pages.
	 *
	 * MMIO accesses are supposed to be naturally aligned and therefore
	 * never cross page boundaries. Seeing split page accesses indicates
	 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
	 *
	 * load_unaligned_zeropad() will recover using exception fixups.
	 */
	vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
	if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
		return -EFAULT;
	/* Handle writes first */
	switch (mmio) {
	case INSN_MMIO_WRITE:
		memcpy(&val, reg, size);
		if (!mmio_write(size, ve->gpa, val))
			return -EIO;
		return insn.length;
	case INSN_MMIO_WRITE_IMM:
		val = insn.immediate.value;
		if (!mmio_write(size, ve->gpa, val))
			return -EIO;
		return insn.length;
	case INSN_MMIO_READ:
	case INSN_MMIO_READ_ZERO_EXTEND:
	case INSN_MMIO_READ_SIGN_EXTEND:
		/* Reads are handled below */
		break;
	case INSN_MMIO_MOVS:
	case INSN_MMIO_DECODE_FAILED:
		/*
		 * MMIO was accessed with an instruction that could not be
		 * decoded or handled properly. It was likely not using io.h
		 * helpers or accessed MMIO accidentally.
		 */
		return -EINVAL;
	default:
		WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
		return -EINVAL;
	}
	/* Handle reads */
	if (!mmio_read(size, ve->gpa, &val))
		return -EIO;
	switch (mmio) {
	case INSN_MMIO_READ:
		/* Zero-extend for 32-bit operation */
		extend_size = size == 4 ? sizeof(*reg) : 0;
		break;
	case INSN_MMIO_READ_ZERO_EXTEND:
		/* Zero extend based on operand size */
		extend_size = insn.opnd_bytes;
		break;
	case INSN_MMIO_READ_SIGN_EXTEND:
		/* Sign extend based on operand size */
		extend_size = insn.opnd_bytes;
		if (size == 1 && val & BIT(7))
			extend_val = 0xFF;
		else if (size > 1 && val & BIT(15))
			extend_val = 0xFF;
		break;
	default:
		/* All other cases has to be covered with the first switch() */
		WARN_ON_ONCE(1);
		return -EINVAL;
	}
	if (extend_size)
		memset(reg, extend_val, extend_size);
	memcpy(reg, &val, size);
	return insn.length;
}
static bool handle_in(struct pt_regs *regs, int size, int port)
{
	struct tdx_hypercall_args args = {
		.r10 = TDX_HYPERCALL_STANDARD,
		.r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
		.r12 = size,
		.r13 = PORT_READ,
		.r14 = port,
	};
	u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
	bool success;
	/*
	 * Emulate the I/O read via hypercall. More info about ABI can be found
	 * in TDX Guest-Host-Communication Interface (GHCI) section titled
	 * "TDG.VP.VMCALL<Instruction.IO>".
	 */
	success = !__tdx_hypercall_ret(&args);
	/* Update part of the register affected by the emulated instruction */
	regs->ax &= ~mask;
	if (success)
		regs->ax |= args.r11 & mask;
	return success;
}
static bool handle_out(struct pt_regs *regs, int size, int port)
{
	u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
	/*
	 * Emulate the I/O write via hypercall. More info about ABI can be found
	 * in TDX Guest-Host-Communication Interface (GHCI) section titled
	 * "TDG.VP.VMCALL<Instruction.IO>".
	 */
	return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
			       PORT_WRITE, port, regs->ax & mask);
}
/*
 * Emulate I/O using hypercall.
 *
 * Assumes the IO instruction was using ax, which is enforced
 * by the standard io.h macros.
 *
 * Return True on success or False on failure.
 */
static int handle_io(struct pt_regs *regs, struct ve_info *ve)
{
	u32 exit_qual = ve->exit_qual;
	int size, port;
	bool in, ret;
	if (VE_IS_IO_STRING(exit_qual))
		return -EIO;
	in   = VE_IS_IO_IN(exit_qual);
	size = VE_GET_IO_SIZE(exit_qual);
	port = VE_GET_PORT_NUM(exit_qual);
	if (in)
		ret = handle_in(regs, size, port);
	else
		ret = handle_out(regs, size, port);
	if (!ret)
		return -EIO;
	return ve_instr_len(ve);
}
/*
 * Early #VE exception handler. Only handles a subset of port I/O.
 * Intended only for earlyprintk. If failed, return false.
 */
__init bool tdx_early_handle_ve(struct pt_regs *regs)
{
	struct ve_info ve;
	int insn_len;
	tdx_get_ve_info(&ve);
	if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
		return false;
	insn_len = handle_io(regs, &ve);
	if (insn_len < 0)
		return false;
	regs->ip += insn_len;
	return true;
}
void tdx_get_ve_info(struct ve_info *ve)
{
	struct tdx_module_output out;
	/*
	 * Called during #VE handling to retrieve the #VE info from the
	 * TDX module.
	 *
	 * This has to be called early in #VE handling.  A "nested" #VE which
	 * occurs before this will raise a #DF and is not recoverable.
	 *
	 * The call retrieves the #VE info from the TDX module, which also
	 * clears the "#VE valid" flag. This must be done before anything else
	 * because any #VE that occurs while the valid flag is set will lead to
	 * #DF.
	 *
	 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
	 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
	 */
	tdx_module_call(TDX_GET_VEINFO, 0, 0, 0, 0, &out);
	/* Transfer the output parameters */
	ve->exit_reason = out.rcx;
	ve->exit_qual   = out.rdx;
	ve->gla         = out.r8;
	ve->gpa         = out.r9;
	ve->instr_len   = lower_32_bits(out.r10);
	ve->instr_info  = upper_32_bits(out.r10);
}
/*
 * Handle the user initiated #VE.
 *
 * On success, returns the number of bytes RIP should be incremented (>=0)
 * or -errno on error.
 */
static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
{
	switch (ve->exit_reason) {
	case EXIT_REASON_CPUID:
		return handle_cpuid(regs, ve);
	default:
		pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
		return -EIO;
	}
}
static inline bool is_private_gpa(u64 gpa)
{
	return gpa == cc_mkenc(gpa);
}
/*
 * Handle the kernel #VE.
 *
 * On success, returns the number of bytes RIP should be incremented (>=0)
 * or -errno on error.
 */
static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
{
	switch (ve->exit_reason) {
	case EXIT_REASON_HLT:
		return handle_halt(ve);
	case EXIT_REASON_MSR_READ:
		return read_msr(regs, ve);
	case EXIT_REASON_MSR_WRITE:
		return write_msr(regs, ve);
	case EXIT_REASON_CPUID:
		return handle_cpuid(regs, ve);
	case EXIT_REASON_EPT_VIOLATION:
		if (is_private_gpa(ve->gpa))
			panic("Unexpected EPT-violation on private memory.");
		return handle_mmio(regs, ve);
	case EXIT_REASON_IO_INSTRUCTION:
		return handle_io(regs, ve);
	default:
		pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
		return -EIO;
	}
}
bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
{
	int insn_len;
	if (user_mode(regs))
		insn_len = virt_exception_user(regs, ve);
	else
		insn_len = virt_exception_kernel(regs, ve);
	if (insn_len < 0)
		return false;
	/* After successful #VE handling, move the IP */
	regs->ip += insn_len;
	return true;
}
static bool tdx_tlb_flush_required(bool private)
{
	/*
	 * TDX guest is responsible for flushing TLB on private->shared
	 * transition. VMM is responsible for flushing on shared->private.
	 *
	 * The VMM _can't_ flush private addresses as it can't generate PAs
	 * with the guest's HKID.  Shared memory isn't subject to integrity
	 * checking, i.e. the VMM doesn't need to flush for its own protection.
	 *
	 * There's no need to flush when converting from shared to private,
	 * as flushing is the VMM's responsibility in this case, e.g. it must
	 * flush to avoid integrity failures in the face of a buggy or
	 * malicious guest.
	 */
	return !private;
}
static bool tdx_cache_flush_required(void)
{
	/*
	 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
	 * TDX doesn't have such capability.
	 *
	 * Flush cache unconditionally.
	 */
	return true;
}
/*
 * Inform the VMM of the guest's intent for this physical page: shared with
 * the VMM or private to the guest.  The VMM is expected to change its mapping
 * of the page in response.
 */
static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
{
	phys_addr_t start = __pa(vaddr);
	phys_addr_t end   = __pa(vaddr + numpages * PAGE_SIZE);
	if (!enc) {
		/* Set the shared (decrypted) bits: */
		start |= cc_mkdec(0);
		end   |= cc_mkdec(0);
	}
	/*
	 * Notify the VMM about page mapping conversion. More info about ABI
	 * can be found in TDX Guest-Host-Communication Interface (GHCI),
	 * section "TDG.VP.VMCALL<MapGPA>"
	 */
	if (_tdx_hypercall(TDVMCALL_MAP_GPA, start, end - start, 0, 0))
		return false;
	/* shared->private conversion requires memory to be accepted before use */
	if (enc)
		return tdx_accept_memory(start, end);
	return true;
}
static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
					  bool enc)
{
	/*
	 * Only handle shared->private conversion here.
	 * See the comment in tdx_early_init().
	 */
	if (enc)
		return tdx_enc_status_changed(vaddr, numpages, enc);
	return true;
}
static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
					 bool enc)
{
	/*
	 * Only handle private->shared conversion here.
	 * See the comment in tdx_early_init().
	 */
	if (!enc)
		return tdx_enc_status_changed(vaddr, numpages, enc);
	return true;
}
void __init tdx_early_init(void)
{
	u64 cc_mask;
	u32 eax, sig[3];
	cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2],  &sig[1]);
	if (memcmp(TDX_IDENT, sig, sizeof(sig)))
		return;
	setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
	cc_vendor = CC_VENDOR_INTEL;
	tdx_parse_tdinfo(&cc_mask);
	cc_set_mask(cc_mask);
	/* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
	tdx_module_call(TDX_WR, 0, TDCS_NOTIFY_ENABLES, 0, -1ULL, NULL);
	/*
	 * All bits above GPA width are reserved and kernel treats shared bit
	 * as flag, not as part of physical address.
	 *
	 * Adjust physical mask to only cover valid GPA bits.
	 */
	physical_mask &= cc_mask - 1;
	/*
	 * The kernel mapping should match the TDX metadata for the page.
	 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
	 * owned by the caller and can catch even _momentary_ mismatches.  Bad
	 * things happen on mismatch:
	 *
	 *   - Private mapping => Shared Page  == Guest shutdown
         *   - Shared mapping  => Private Page == Recoverable #VE
	 *
	 * guest.enc_status_change_prepare() converts the page from
	 * shared=>private before the mapping becomes private.
	 *
	 * guest.enc_status_change_finish() converts the page from
	 * private=>shared after the mapping becomes private.
	 *
	 * In both cases there is a temporary shared mapping to a private page,
	 * which can result in a #VE.  But, there is never a private mapping to
	 * a shared page.
	 */
	x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
	x86_platform.guest.enc_status_change_finish  = tdx_enc_status_change_finish;
	x86_platform.guest.enc_cache_flush_required  = tdx_cache_flush_required;
	x86_platform.guest.enc_tlb_flush_required    = tdx_tlb_flush_required;
	/*
	 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
	 * bringup low level code. That raises #VE which cannot be handled
	 * there.
	 *
	 * Intel-TDX has a secure RDMSR hypercall, but that needs to be
	 * implemented seperately in the low level startup ASM code.
	 * Until that is in place, disable parallel bringup for TDX.
	 */
	x86_cpuinit.parallel_bringup = false;
	pr_info("Guest detected\n");
}
 | 
	linux-master | 
	arch/x86/coco/tdx/tdx.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
#include <linux/audit_arch.h>
#include <asm/unistd_32.h>
#include <asm/audit.h>
unsigned ia32_dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
unsigned ia32_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
unsigned ia32_write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
unsigned ia32_read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
unsigned ia32_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int ia32_classify_syscall(unsigned syscall)
{
	switch (syscall) {
	case __NR_open:
		return AUDITSC_OPEN;
	case __NR_openat:
		return AUDITSC_OPENAT;
	case __NR_socketcall:
		return AUDITSC_SOCKETCALL;
	case __NR_execve:
	case __NR_execveat:
		return AUDITSC_EXECVE;
	case __NR_openat2:
		return AUDITSC_OPENAT2;
	default:
		return AUDITSC_COMPAT;
	}
}
 | 
	linux-master | 
	arch/x86/ia32/audit.c | 
| 
	/*
 *  Copyright (C) 2001  MandrakeSoft S.A.
 *  Copyright 2010 Red Hat, Inc. and/or its affiliates.
 *
 *    MandrakeSoft S.A.
 *    43, rue d'Aboukir
 *    75002 Paris - France
 *    http://www.linux-mandrake.com/
 *    http://www.mandrakesoft.com/
 *
 *  This library is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU Lesser General Public
 *  License as published by the Free Software Foundation; either
 *  version 2 of the License, or (at your option) any later version.
 *
 *  This library is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  Lesser General Public License for more details.
 *
 *  You should have received a copy of the GNU Lesser General Public
 *  License along with this library; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 *
 *  Yunhong Jiang <[email protected]>
 *  Yaozu (Eddie) Dong <[email protected]>
 *  Based on Xen 3.1 code.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/smp.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/nospec.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/current.h>
#include <trace/events/kvm.h>
#include "ioapic.h"
#include "lapic.h"
#include "irq.h"
static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
		bool line_status);
static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
				      struct kvm_ioapic *ioapic,
				      int trigger_mode,
				      int pin);
static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic)
{
	unsigned long result = 0;
	switch (ioapic->ioregsel) {
	case IOAPIC_REG_VERSION:
		result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
			  | (IOAPIC_VERSION_ID & 0xff));
		break;
	case IOAPIC_REG_APIC_ID:
	case IOAPIC_REG_ARB_ID:
		result = ((ioapic->id & 0xf) << 24);
		break;
	default:
		{
			u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
			u64 redir_content = ~0ULL;
			if (redir_index < IOAPIC_NUM_PINS) {
				u32 index = array_index_nospec(
					redir_index, IOAPIC_NUM_PINS);
				redir_content = ioapic->redirtbl[index].bits;
			}
			result = (ioapic->ioregsel & 0x1) ?
			    (redir_content >> 32) & 0xffffffff :
			    redir_content & 0xffffffff;
			break;
		}
	}
	return result;
}
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
	ioapic->rtc_status.pending_eoi = 0;
	bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_IDS);
}
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
{
	if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
		kvm_rtc_eoi_tracking_restore_all(ioapic);
}
static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
{
	bool new_val, old_val;
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
	struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
	union kvm_ioapic_redirect_entry *e;
	e = &ioapic->redirtbl[RTC_GSI];
	if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
				 e->fields.dest_id,
				 kvm_lapic_irq_dest_mode(!!e->fields.dest_mode)))
		return;
	new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
	old_val = test_bit(vcpu->vcpu_id, dest_map->map);
	if (new_val == old_val)
		return;
	if (new_val) {
		__set_bit(vcpu->vcpu_id, dest_map->map);
		dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
		ioapic->rtc_status.pending_eoi++;
	} else {
		__clear_bit(vcpu->vcpu_id, dest_map->map);
		ioapic->rtc_status.pending_eoi--;
		rtc_status_pending_eoi_check_valid(ioapic);
	}
}
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
{
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
	spin_lock(&ioapic->lock);
	__rtc_irq_eoi_tracking_restore_one(vcpu);
	spin_unlock(&ioapic->lock);
}
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
{
	struct kvm_vcpu *vcpu;
	unsigned long i;
	if (RTC_GSI >= IOAPIC_NUM_PINS)
		return;
	rtc_irq_eoi_tracking_reset(ioapic);
	kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
	    __rtc_irq_eoi_tracking_restore_one(vcpu);
}
static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu,
			int vector)
{
	struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
	/* RTC special handling */
	if (test_bit(vcpu->vcpu_id, dest_map->map) &&
	    (vector == dest_map->vectors[vcpu->vcpu_id]) &&
	    (test_and_clear_bit(vcpu->vcpu_id,
				ioapic->rtc_status.dest_map.map))) {
		--ioapic->rtc_status.pending_eoi;
		rtc_status_pending_eoi_check_valid(ioapic);
	}
}
static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
{
	if (ioapic->rtc_status.pending_eoi > 0)
		return true; /* coalesced */
	return false;
}
static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
{
	unsigned long i;
	struct kvm_vcpu *vcpu;
	union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
	kvm_for_each_vcpu(i, vcpu, ioapic->kvm) {
		if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
					 entry->fields.dest_id,
					 entry->fields.dest_mode) ||
		    kvm_apic_pending_eoi(vcpu, entry->fields.vector))
			continue;
		/*
		 * If no longer has pending EOI in LAPICs, update
		 * EOI for this vector.
		 */
		rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
		break;
	}
}
static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
		int irq_level, bool line_status)
{
	union kvm_ioapic_redirect_entry entry;
	u32 mask = 1 << irq;
	u32 old_irr;
	int edge, ret;
	entry = ioapic->redirtbl[irq];
	edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
	if (!irq_level) {
		ioapic->irr &= ~mask;
		ret = 1;
		goto out;
	}
	/*
	 * AMD SVM AVIC accelerate EOI write iff the interrupt is edge
	 * triggered, in which case the in-kernel IOAPIC will not be able
	 * to receive the EOI.  In this case, we do a lazy update of the
	 * pending EOI when trying to set IOAPIC irq.
	 */
	if (edge && kvm_apicv_activated(ioapic->kvm))
		ioapic_lazy_update_eoi(ioapic, irq);
	/*
	 * Return 0 for coalesced interrupts; for edge-triggered interrupts,
	 * this only happens if a previous edge has not been delivered due
	 * to masking.  For level interrupts, the remote_irr field tells
	 * us if the interrupt is waiting for an EOI.
	 *
	 * RTC is special: it is edge-triggered, but userspace likes to know
	 * if it has been already ack-ed via EOI because coalesced RTC
	 * interrupts lead to time drift in Windows guests.  So we track
	 * EOI manually for the RTC interrupt.
	 */
	if (irq == RTC_GSI && line_status &&
		rtc_irq_check_coalesced(ioapic)) {
		ret = 0;
		goto out;
	}
	old_irr = ioapic->irr;
	ioapic->irr |= mask;
	if (edge) {
		ioapic->irr_delivered &= ~mask;
		if (old_irr == ioapic->irr) {
			ret = 0;
			goto out;
		}
	}
	ret = ioapic_service(ioapic, irq, line_status);
out:
	trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
	return ret;
}
static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
{
	u32 idx;
	rtc_irq_eoi_tracking_reset(ioapic);
	for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
		ioapic_set_irq(ioapic, idx, 1, true);
	kvm_rtc_eoi_tracking_restore_all(ioapic);
}
void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
{
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
	struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
	union kvm_ioapic_redirect_entry *e;
	int index;
	spin_lock(&ioapic->lock);
	/* Make sure we see any missing RTC EOI */
	if (test_bit(vcpu->vcpu_id, dest_map->map))
		__set_bit(dest_map->vectors[vcpu->vcpu_id],
			  ioapic_handled_vectors);
	for (index = 0; index < IOAPIC_NUM_PINS; index++) {
		e = &ioapic->redirtbl[index];
		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
		    kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
		    index == RTC_GSI) {
			u16 dm = kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
			if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
						e->fields.dest_id, dm) ||
			    kvm_apic_pending_eoi(vcpu, e->fields.vector))
				__set_bit(e->fields.vector,
					  ioapic_handled_vectors);
		}
	}
	spin_unlock(&ioapic->lock);
}
void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
{
	if (!ioapic_in_kernel(kvm))
		return;
	kvm_make_scan_ioapic_request(kvm);
}
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
	unsigned index;
	bool mask_before, mask_after;
	union kvm_ioapic_redirect_entry *e;
	int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
	DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
	switch (ioapic->ioregsel) {
	case IOAPIC_REG_VERSION:
		/* Writes are ignored. */
		break;
	case IOAPIC_REG_APIC_ID:
		ioapic->id = (val >> 24) & 0xf;
		break;
	case IOAPIC_REG_ARB_ID:
		break;
	default:
		index = (ioapic->ioregsel - 0x10) >> 1;
		if (index >= IOAPIC_NUM_PINS)
			return;
		index = array_index_nospec(index, IOAPIC_NUM_PINS);
		e = &ioapic->redirtbl[index];
		mask_before = e->fields.mask;
		/* Preserve read-only fields */
		old_remote_irr = e->fields.remote_irr;
		old_delivery_status = e->fields.delivery_status;
		old_dest_id = e->fields.dest_id;
		old_dest_mode = e->fields.dest_mode;
		if (ioapic->ioregsel & 1) {
			e->bits &= 0xffffffff;
			e->bits |= (u64) val << 32;
		} else {
			e->bits &= ~0xffffffffULL;
			e->bits |= (u32) val;
		}
		e->fields.remote_irr = old_remote_irr;
		e->fields.delivery_status = old_delivery_status;
		/*
		 * Some OSes (Linux, Xen) assume that Remote IRR bit will
		 * be cleared by IOAPIC hardware when the entry is configured
		 * as edge-triggered. This behavior is used to simulate an
		 * explicit EOI on IOAPICs that don't have the EOI register.
		 */
		if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
			e->fields.remote_irr = 0;
		mask_after = e->fields.mask;
		if (mask_before != mask_after)
			kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
		    ioapic->irr & (1 << index) && !e->fields.mask && !e->fields.remote_irr) {
			/*
			 * Pending status in irr may be outdated: the IRQ line may have
			 * already been deasserted by a device while the IRQ was masked.
			 * This occurs, for instance, if the interrupt is handled in a
			 * Linux guest as a oneshot interrupt (IRQF_ONESHOT). In this
			 * case the guest acknowledges the interrupt to the device in
			 * its threaded irq handler, i.e. after the EOI but before
			 * unmasking, so at the time of unmasking the IRQ line is
			 * already down but our pending irr bit is still set. In such
			 * cases, injecting this pending interrupt to the guest is
			 * buggy: the guest will receive an extra unwanted interrupt.
			 *
			 * So we need to check here if the IRQ is actually still pending.
			 * As we are generally not able to probe the IRQ line status
			 * directly, we do it through irqfd resampler. Namely, we clear
			 * the pending status and notify the resampler that this interrupt
			 * is done, without actually injecting it into the guest. If the
			 * IRQ line is actually already deasserted, we are done. If it is
			 * still asserted, a new interrupt will be shortly triggered
			 * through irqfd and injected into the guest.
			 *
			 * If, however, it's not possible to resample (no irqfd resampler
			 * registered for this irq), then unconditionally inject this
			 * pending interrupt into the guest, so the guest will not miss
			 * an interrupt, although may get an extra unwanted interrupt.
			 */
			if (kvm_notify_irqfd_resampler(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index))
				ioapic->irr &= ~(1 << index);
			else
				ioapic_service(ioapic, index, false);
		}
		if (e->fields.delivery_mode == APIC_DM_FIXED) {
			struct kvm_lapic_irq irq;
			irq.vector = e->fields.vector;
			irq.delivery_mode = e->fields.delivery_mode << 8;
			irq.dest_mode =
			    kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
			irq.level = false;
			irq.trig_mode = e->fields.trig_mode;
			irq.shorthand = APIC_DEST_NOSHORT;
			irq.dest_id = e->fields.dest_id;
			irq.msi_redir_hint = false;
			bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
			kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
						 vcpu_bitmap);
			if (old_dest_mode != e->fields.dest_mode ||
			    old_dest_id != e->fields.dest_id) {
				/*
				 * Update vcpu_bitmap with vcpus specified in
				 * the previous request as well. This is done to
				 * keep ioapic_handled_vectors synchronized.
				 */
				irq.dest_id = old_dest_id;
				irq.dest_mode =
				    kvm_lapic_irq_dest_mode(
					!!e->fields.dest_mode);
				kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
							 vcpu_bitmap);
			}
			kvm_make_scan_ioapic_request_mask(ioapic->kvm,
							  vcpu_bitmap);
		} else {
			kvm_make_scan_ioapic_request(ioapic->kvm);
		}
		break;
	}
}
static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
{
	union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
	struct kvm_lapic_irq irqe;
	int ret;
	if (entry->fields.mask ||
	    (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
	    entry->fields.remote_irr))
		return -1;
	irqe.dest_id = entry->fields.dest_id;
	irqe.vector = entry->fields.vector;
	irqe.dest_mode = kvm_lapic_irq_dest_mode(!!entry->fields.dest_mode);
	irqe.trig_mode = entry->fields.trig_mode;
	irqe.delivery_mode = entry->fields.delivery_mode << 8;
	irqe.level = 1;
	irqe.shorthand = APIC_DEST_NOSHORT;
	irqe.msi_redir_hint = false;
	if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
		ioapic->irr_delivered |= 1 << irq;
	if (irq == RTC_GSI && line_status) {
		/*
		 * pending_eoi cannot ever become negative (see
		 * rtc_status_pending_eoi_check_valid) and the caller
		 * ensures that it is only called if it is >= zero, namely
		 * if rtc_irq_check_coalesced returns false).
		 */
		BUG_ON(ioapic->rtc_status.pending_eoi != 0);
		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
					       &ioapic->rtc_status.dest_map);
		ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
	} else
		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
	if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
		entry->fields.remote_irr = 1;
	return ret;
}
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
		       int level, bool line_status)
{
	int ret, irq_level;
	BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
	spin_lock(&ioapic->lock);
	irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
					 irq_source_id, level);
	ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
	spin_unlock(&ioapic->lock);
	return ret;
}
void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
{
	int i;
	spin_lock(&ioapic->lock);
	for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
		__clear_bit(irq_source_id, &ioapic->irq_states[i]);
	spin_unlock(&ioapic->lock);
}
static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
{
	int i;
	struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
						 eoi_inject.work);
	spin_lock(&ioapic->lock);
	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
		if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
			continue;
		if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
			ioapic_service(ioapic, i, false);
	}
	spin_unlock(&ioapic->lock);
}
#define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
				      struct kvm_ioapic *ioapic,
				      int trigger_mode,
				      int pin)
{
	struct kvm_lapic *apic = vcpu->arch.apic;
	union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[pin];
	/*
	 * We are dropping lock while calling ack notifiers because ack
	 * notifier callbacks for assigned devices call into IOAPIC
	 * recursively. Since remote_irr is cleared only after call
	 * to notifiers if the same vector will be delivered while lock
	 * is dropped it will be put into irr and will be delivered
	 * after ack notifier returns.
	 */
	spin_unlock(&ioapic->lock);
	kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin);
	spin_lock(&ioapic->lock);
	if (trigger_mode != IOAPIC_LEVEL_TRIG ||
	    kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
		return;
	ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
	ent->fields.remote_irr = 0;
	if (!ent->fields.mask && (ioapic->irr & (1 << pin))) {
		++ioapic->irq_eoi[pin];
		if (ioapic->irq_eoi[pin] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
			/*
			 * Real hardware does not deliver the interrupt
			 * immediately during eoi broadcast, and this
			 * lets a buggy guest make slow progress
			 * even if it does not correctly handle a
			 * level-triggered interrupt.  Emulate this
			 * behavior if we detect an interrupt storm.
			 */
			schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
			ioapic->irq_eoi[pin] = 0;
			trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
		} else {
			ioapic_service(ioapic, pin, false);
		}
	} else {
		ioapic->irq_eoi[pin] = 0;
	}
}
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
{
	int i;
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
	spin_lock(&ioapic->lock);
	rtc_irq_eoi(ioapic, vcpu, vector);
	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
		if (ent->fields.vector != vector)
			continue;
		kvm_ioapic_update_eoi_one(vcpu, ioapic, trigger_mode, i);
	}
	spin_unlock(&ioapic->lock);
}
static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
{
	return container_of(dev, struct kvm_ioapic, dev);
}
static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
{
	return ((addr >= ioapic->base_address &&
		 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
}
static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
				gpa_t addr, int len, void *val)
{
	struct kvm_ioapic *ioapic = to_ioapic(this);
	u32 result;
	if (!ioapic_in_range(ioapic, addr))
		return -EOPNOTSUPP;
	ASSERT(!(addr & 0xf));	/* check alignment */
	addr &= 0xff;
	spin_lock(&ioapic->lock);
	switch (addr) {
	case IOAPIC_REG_SELECT:
		result = ioapic->ioregsel;
		break;
	case IOAPIC_REG_WINDOW:
		result = ioapic_read_indirect(ioapic);
		break;
	default:
		result = 0;
		break;
	}
	spin_unlock(&ioapic->lock);
	switch (len) {
	case 8:
		*(u64 *) val = result;
		break;
	case 1:
	case 2:
	case 4:
		memcpy(val, (char *)&result, len);
		break;
	default:
		printk(KERN_WARNING "ioapic: wrong length %d\n", len);
	}
	return 0;
}
static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
				 gpa_t addr, int len, const void *val)
{
	struct kvm_ioapic *ioapic = to_ioapic(this);
	u32 data;
	if (!ioapic_in_range(ioapic, addr))
		return -EOPNOTSUPP;
	ASSERT(!(addr & 0xf));	/* check alignment */
	switch (len) {
	case 8:
	case 4:
		data = *(u32 *) val;
		break;
	case 2:
		data = *(u16 *) val;
		break;
	case 1:
		data = *(u8  *) val;
		break;
	default:
		printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
		return 0;
	}
	addr &= 0xff;
	spin_lock(&ioapic->lock);
	switch (addr) {
	case IOAPIC_REG_SELECT:
		ioapic->ioregsel = data & 0xFF; /* 8-bit register */
		break;
	case IOAPIC_REG_WINDOW:
		ioapic_write_indirect(ioapic, data);
		break;
	default:
		break;
	}
	spin_unlock(&ioapic->lock);
	return 0;
}
static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
{
	int i;
	cancel_delayed_work_sync(&ioapic->eoi_inject);
	for (i = 0; i < IOAPIC_NUM_PINS; i++)
		ioapic->redirtbl[i].fields.mask = 1;
	ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
	ioapic->ioregsel = 0;
	ioapic->irr = 0;
	ioapic->irr_delivered = 0;
	ioapic->id = 0;
	memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
	rtc_irq_eoi_tracking_reset(ioapic);
}
static const struct kvm_io_device_ops ioapic_mmio_ops = {
	.read     = ioapic_mmio_read,
	.write    = ioapic_mmio_write,
};
int kvm_ioapic_init(struct kvm *kvm)
{
	struct kvm_ioapic *ioapic;
	int ret;
	ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL_ACCOUNT);
	if (!ioapic)
		return -ENOMEM;
	spin_lock_init(&ioapic->lock);
	INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
	kvm->arch.vioapic = ioapic;
	kvm_ioapic_reset(ioapic);
	kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
	ioapic->kvm = kvm;
	mutex_lock(&kvm->slots_lock);
	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
				      IOAPIC_MEM_LENGTH, &ioapic->dev);
	mutex_unlock(&kvm->slots_lock);
	if (ret < 0) {
		kvm->arch.vioapic = NULL;
		kfree(ioapic);
	}
	return ret;
}
void kvm_ioapic_destroy(struct kvm *kvm)
{
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
	if (!ioapic)
		return;
	cancel_delayed_work_sync(&ioapic->eoi_inject);
	mutex_lock(&kvm->slots_lock);
	kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
	mutex_unlock(&kvm->slots_lock);
	kvm->arch.vioapic = NULL;
	kfree(ioapic);
}
void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
	spin_lock(&ioapic->lock);
	memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
	state->irr &= ~ioapic->irr_delivered;
	spin_unlock(&ioapic->lock);
}
void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
	spin_lock(&ioapic->lock);
	memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
	ioapic->irr = 0;
	ioapic->irr_delivered = 0;
	kvm_make_scan_ioapic_request(kvm);
	kvm_ioapic_inject_all(ioapic, state->irr);
	spin_unlock(&ioapic->lock);
}
 | 
	linux-master | 
	arch/x86/kvm/ioapic.c | 
| 
	/* SPDX-License-Identifier: GPL-2.0 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kvm_host.h>
#include "x86.h"
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
#include "smm.h"
#include "cpuid.h"
#include "trace.h"
#define CHECK_SMRAM32_OFFSET(field, offset) \
	ASSERT_STRUCT_OFFSET(struct kvm_smram_state_32, field, offset - 0xFE00)
#define CHECK_SMRAM64_OFFSET(field, offset) \
	ASSERT_STRUCT_OFFSET(struct kvm_smram_state_64, field, offset - 0xFE00)
static void check_smram_offsets(void)
{
	/* 32 bit SMRAM image */
	CHECK_SMRAM32_OFFSET(reserved1,			0xFE00);
	CHECK_SMRAM32_OFFSET(smbase,			0xFEF8);
	CHECK_SMRAM32_OFFSET(smm_revision,		0xFEFC);
	CHECK_SMRAM32_OFFSET(io_inst_restart,		0xFF00);
	CHECK_SMRAM32_OFFSET(auto_hlt_restart,		0xFF02);
	CHECK_SMRAM32_OFFSET(io_restart_rdi,		0xFF04);
	CHECK_SMRAM32_OFFSET(io_restart_rcx,		0xFF08);
	CHECK_SMRAM32_OFFSET(io_restart_rsi,		0xFF0C);
	CHECK_SMRAM32_OFFSET(io_restart_rip,		0xFF10);
	CHECK_SMRAM32_OFFSET(cr4,			0xFF14);
	CHECK_SMRAM32_OFFSET(reserved2,			0xFF18);
	CHECK_SMRAM32_OFFSET(int_shadow,		0xFF1A);
	CHECK_SMRAM32_OFFSET(reserved3,			0xFF1B);
	CHECK_SMRAM32_OFFSET(ds,			0xFF2C);
	CHECK_SMRAM32_OFFSET(fs,			0xFF38);
	CHECK_SMRAM32_OFFSET(gs,			0xFF44);
	CHECK_SMRAM32_OFFSET(idtr,			0xFF50);
	CHECK_SMRAM32_OFFSET(tr,			0xFF5C);
	CHECK_SMRAM32_OFFSET(gdtr,			0xFF6C);
	CHECK_SMRAM32_OFFSET(ldtr,			0xFF78);
	CHECK_SMRAM32_OFFSET(es,			0xFF84);
	CHECK_SMRAM32_OFFSET(cs,			0xFF90);
	CHECK_SMRAM32_OFFSET(ss,			0xFF9C);
	CHECK_SMRAM32_OFFSET(es_sel,			0xFFA8);
	CHECK_SMRAM32_OFFSET(cs_sel,			0xFFAC);
	CHECK_SMRAM32_OFFSET(ss_sel,			0xFFB0);
	CHECK_SMRAM32_OFFSET(ds_sel,			0xFFB4);
	CHECK_SMRAM32_OFFSET(fs_sel,			0xFFB8);
	CHECK_SMRAM32_OFFSET(gs_sel,			0xFFBC);
	CHECK_SMRAM32_OFFSET(ldtr_sel,			0xFFC0);
	CHECK_SMRAM32_OFFSET(tr_sel,			0xFFC4);
	CHECK_SMRAM32_OFFSET(dr7,			0xFFC8);
	CHECK_SMRAM32_OFFSET(dr6,			0xFFCC);
	CHECK_SMRAM32_OFFSET(gprs,			0xFFD0);
	CHECK_SMRAM32_OFFSET(eip,			0xFFF0);
	CHECK_SMRAM32_OFFSET(eflags,			0xFFF4);
	CHECK_SMRAM32_OFFSET(cr3,			0xFFF8);
	CHECK_SMRAM32_OFFSET(cr0,			0xFFFC);
	/* 64 bit SMRAM image */
	CHECK_SMRAM64_OFFSET(es,			0xFE00);
	CHECK_SMRAM64_OFFSET(cs,			0xFE10);
	CHECK_SMRAM64_OFFSET(ss,			0xFE20);
	CHECK_SMRAM64_OFFSET(ds,			0xFE30);
	CHECK_SMRAM64_OFFSET(fs,			0xFE40);
	CHECK_SMRAM64_OFFSET(gs,			0xFE50);
	CHECK_SMRAM64_OFFSET(gdtr,			0xFE60);
	CHECK_SMRAM64_OFFSET(ldtr,			0xFE70);
	CHECK_SMRAM64_OFFSET(idtr,			0xFE80);
	CHECK_SMRAM64_OFFSET(tr,			0xFE90);
	CHECK_SMRAM64_OFFSET(io_restart_rip,		0xFEA0);
	CHECK_SMRAM64_OFFSET(io_restart_rcx,		0xFEA8);
	CHECK_SMRAM64_OFFSET(io_restart_rsi,		0xFEB0);
	CHECK_SMRAM64_OFFSET(io_restart_rdi,		0xFEB8);
	CHECK_SMRAM64_OFFSET(io_restart_dword,		0xFEC0);
	CHECK_SMRAM64_OFFSET(reserved1,			0xFEC4);
	CHECK_SMRAM64_OFFSET(io_inst_restart,		0xFEC8);
	CHECK_SMRAM64_OFFSET(auto_hlt_restart,		0xFEC9);
	CHECK_SMRAM64_OFFSET(amd_nmi_mask,		0xFECA);
	CHECK_SMRAM64_OFFSET(int_shadow,		0xFECB);
	CHECK_SMRAM64_OFFSET(reserved2,			0xFECC);
	CHECK_SMRAM64_OFFSET(efer,			0xFED0);
	CHECK_SMRAM64_OFFSET(svm_guest_flag,		0xFED8);
	CHECK_SMRAM64_OFFSET(svm_guest_vmcb_gpa,	0xFEE0);
	CHECK_SMRAM64_OFFSET(svm_guest_virtual_int,	0xFEE8);
	CHECK_SMRAM64_OFFSET(reserved3,			0xFEF0);
	CHECK_SMRAM64_OFFSET(smm_revison,		0xFEFC);
	CHECK_SMRAM64_OFFSET(smbase,			0xFF00);
	CHECK_SMRAM64_OFFSET(reserved4,			0xFF04);
	CHECK_SMRAM64_OFFSET(ssp,			0xFF18);
	CHECK_SMRAM64_OFFSET(svm_guest_pat,		0xFF20);
	CHECK_SMRAM64_OFFSET(svm_host_efer,		0xFF28);
	CHECK_SMRAM64_OFFSET(svm_host_cr4,		0xFF30);
	CHECK_SMRAM64_OFFSET(svm_host_cr3,		0xFF38);
	CHECK_SMRAM64_OFFSET(svm_host_cr0,		0xFF40);
	CHECK_SMRAM64_OFFSET(cr4,			0xFF48);
	CHECK_SMRAM64_OFFSET(cr3,			0xFF50);
	CHECK_SMRAM64_OFFSET(cr0,			0xFF58);
	CHECK_SMRAM64_OFFSET(dr7,			0xFF60);
	CHECK_SMRAM64_OFFSET(dr6,			0xFF68);
	CHECK_SMRAM64_OFFSET(rflags,			0xFF70);
	CHECK_SMRAM64_OFFSET(rip,			0xFF78);
	CHECK_SMRAM64_OFFSET(gprs,			0xFF80);
	BUILD_BUG_ON(sizeof(union kvm_smram) != 512);
}
#undef CHECK_SMRAM64_OFFSET
#undef CHECK_SMRAM32_OFFSET
void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
{
	trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
	if (entering_smm) {
		vcpu->arch.hflags |= HF_SMM_MASK;
	} else {
		vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
		/* Process a latched INIT or SMI, if any.  */
		kvm_make_request(KVM_REQ_EVENT, vcpu);
		/*
		 * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
		 * on SMM exit we still need to reload them from
		 * guest memory
		 */
		vcpu->arch.pdptrs_from_userspace = false;
	}
	kvm_mmu_reset_context(vcpu);
}
void process_smi(struct kvm_vcpu *vcpu)
{
	vcpu->arch.smi_pending = true;
	kvm_make_request(KVM_REQ_EVENT, vcpu);
}
static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
{
	u32 flags = 0;
	flags |= seg->g       << 23;
	flags |= seg->db      << 22;
	flags |= seg->l       << 21;
	flags |= seg->avl     << 20;
	flags |= seg->present << 15;
	flags |= seg->dpl     << 13;
	flags |= seg->s       << 12;
	flags |= seg->type    << 8;
	return flags;
}
static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu,
				  struct kvm_smm_seg_state_32 *state,
				  u32 *selector, int n)
{
	struct kvm_segment seg;
	kvm_get_segment(vcpu, &seg, n);
	*selector = seg.selector;
	state->base = seg.base;
	state->limit = seg.limit;
	state->flags = enter_smm_get_segment_flags(&seg);
}
#ifdef CONFIG_X86_64
static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu,
				  struct kvm_smm_seg_state_64 *state,
				  int n)
{
	struct kvm_segment seg;
	kvm_get_segment(vcpu, &seg, n);
	state->selector = seg.selector;
	state->attributes = enter_smm_get_segment_flags(&seg) >> 8;
	state->limit = seg.limit;
	state->base = seg.base;
}
#endif
static void enter_smm_save_state_32(struct kvm_vcpu *vcpu,
				    struct kvm_smram_state_32 *smram)
{
	struct desc_ptr dt;
	unsigned long val;
	int i;
	smram->cr0     = kvm_read_cr0(vcpu);
	smram->cr3     = kvm_read_cr3(vcpu);
	smram->eflags  = kvm_get_rflags(vcpu);
	smram->eip     = kvm_rip_read(vcpu);
	for (i = 0; i < 8; i++)
		smram->gprs[i] = kvm_register_read_raw(vcpu, i);
	kvm_get_dr(vcpu, 6, &val);
	smram->dr6     = (u32)val;
	kvm_get_dr(vcpu, 7, &val);
	smram->dr7     = (u32)val;
	enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR);
	enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR);
	static_call(kvm_x86_get_gdt)(vcpu, &dt);
	smram->gdtr.base = dt.address;
	smram->gdtr.limit = dt.size;
	static_call(kvm_x86_get_idt)(vcpu, &dt);
	smram->idtr.base = dt.address;
	smram->idtr.limit = dt.size;
	enter_smm_save_seg_32(vcpu, &smram->es, &smram->es_sel, VCPU_SREG_ES);
	enter_smm_save_seg_32(vcpu, &smram->cs, &smram->cs_sel, VCPU_SREG_CS);
	enter_smm_save_seg_32(vcpu, &smram->ss, &smram->ss_sel, VCPU_SREG_SS);
	enter_smm_save_seg_32(vcpu, &smram->ds, &smram->ds_sel, VCPU_SREG_DS);
	enter_smm_save_seg_32(vcpu, &smram->fs, &smram->fs_sel, VCPU_SREG_FS);
	enter_smm_save_seg_32(vcpu, &smram->gs, &smram->gs_sel, VCPU_SREG_GS);
	smram->cr4 = kvm_read_cr4(vcpu);
	smram->smm_revision = 0x00020000;
	smram->smbase = vcpu->arch.smbase;
	smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
}
#ifdef CONFIG_X86_64
static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
				    struct kvm_smram_state_64 *smram)
{
	struct desc_ptr dt;
	unsigned long val;
	int i;
	for (i = 0; i < 16; i++)
		smram->gprs[15 - i] = kvm_register_read_raw(vcpu, i);
	smram->rip    = kvm_rip_read(vcpu);
	smram->rflags = kvm_get_rflags(vcpu);
	kvm_get_dr(vcpu, 6, &val);
	smram->dr6 = val;
	kvm_get_dr(vcpu, 7, &val);
	smram->dr7 = val;
	smram->cr0 = kvm_read_cr0(vcpu);
	smram->cr3 = kvm_read_cr3(vcpu);
	smram->cr4 = kvm_read_cr4(vcpu);
	smram->smbase = vcpu->arch.smbase;
	smram->smm_revison = 0x00020064;
	smram->efer = vcpu->arch.efer;
	enter_smm_save_seg_64(vcpu, &smram->tr, VCPU_SREG_TR);
	static_call(kvm_x86_get_idt)(vcpu, &dt);
	smram->idtr.limit = dt.size;
	smram->idtr.base = dt.address;
	enter_smm_save_seg_64(vcpu, &smram->ldtr, VCPU_SREG_LDTR);
	static_call(kvm_x86_get_gdt)(vcpu, &dt);
	smram->gdtr.limit = dt.size;
	smram->gdtr.base = dt.address;
	enter_smm_save_seg_64(vcpu, &smram->es, VCPU_SREG_ES);
	enter_smm_save_seg_64(vcpu, &smram->cs, VCPU_SREG_CS);
	enter_smm_save_seg_64(vcpu, &smram->ss, VCPU_SREG_SS);
	enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
	enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
	enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
	smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
}
#endif
void enter_smm(struct kvm_vcpu *vcpu)
{
	struct kvm_segment cs, ds;
	struct desc_ptr dt;
	unsigned long cr0;
	union kvm_smram smram;
	check_smram_offsets();
	memset(smram.bytes, 0, sizeof(smram.bytes));
#ifdef CONFIG_X86_64
	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
		enter_smm_save_state_64(vcpu, &smram.smram64);
	else
#endif
		enter_smm_save_state_32(vcpu, &smram.smram32);
	/*
	 * Give enter_smm() a chance to make ISA-specific changes to the vCPU
	 * state (e.g. leave guest mode) after we've saved the state into the
	 * SMM state-save area.
	 *
	 * Kill the VM in the unlikely case of failure, because the VM
	 * can be in undefined state in this case.
	 */
	if (static_call(kvm_x86_enter_smm)(vcpu, &smram))
		goto error;
	kvm_smm_changed(vcpu, true);
	if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram)))
		goto error;
	if (static_call(kvm_x86_get_nmi_mask)(vcpu))
		vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
	else
		static_call(kvm_x86_set_nmi_mask)(vcpu, true);
	kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
	kvm_rip_write(vcpu, 0x8000);
	static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
	cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
	static_call(kvm_x86_set_cr0)(vcpu, cr0);
	vcpu->arch.cr0 = cr0;
	static_call(kvm_x86_set_cr4)(vcpu, 0);
	/* Undocumented: IDT limit is set to zero on entry to SMM.  */
	dt.address = dt.size = 0;
	static_call(kvm_x86_set_idt)(vcpu, &dt);
	if (WARN_ON_ONCE(kvm_set_dr(vcpu, 7, DR7_FIXED_1)))
		goto error;
	cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
	cs.base = vcpu->arch.smbase;
	ds.selector = 0;
	ds.base = 0;
	cs.limit    = ds.limit = 0xffffffff;
	cs.type     = ds.type = 0x3;
	cs.dpl      = ds.dpl = 0;
	cs.db       = ds.db = 0;
	cs.s        = ds.s = 1;
	cs.l        = ds.l = 0;
	cs.g        = ds.g = 1;
	cs.avl      = ds.avl = 0;
	cs.present  = ds.present = 1;
	cs.unusable = ds.unusable = 0;
	cs.padding  = ds.padding = 0;
	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
#ifdef CONFIG_X86_64
	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
		if (static_call(kvm_x86_set_efer)(vcpu, 0))
			goto error;
#endif
	kvm_update_cpuid_runtime(vcpu);
	kvm_mmu_reset_context(vcpu);
	return;
error:
	kvm_vm_dead(vcpu->kvm);
}
static void rsm_set_desc_flags(struct kvm_segment *desc, u32 flags)
{
	desc->g    = (flags >> 23) & 1;
	desc->db   = (flags >> 22) & 1;
	desc->l    = (flags >> 21) & 1;
	desc->avl  = (flags >> 20) & 1;
	desc->present = (flags >> 15) & 1;
	desc->dpl  = (flags >> 13) & 3;
	desc->s    = (flags >> 12) & 1;
	desc->type = (flags >>  8) & 15;
	desc->unusable = !desc->present;
	desc->padding = 0;
}
static int rsm_load_seg_32(struct kvm_vcpu *vcpu,
			   const struct kvm_smm_seg_state_32 *state,
			   u16 selector, int n)
{
	struct kvm_segment desc;
	desc.selector =           selector;
	desc.base =               state->base;
	desc.limit =              state->limit;
	rsm_set_desc_flags(&desc, state->flags);
	kvm_set_segment(vcpu, &desc, n);
	return X86EMUL_CONTINUE;
}
#ifdef CONFIG_X86_64
static int rsm_load_seg_64(struct kvm_vcpu *vcpu,
			   const struct kvm_smm_seg_state_64 *state,
			   int n)
{
	struct kvm_segment desc;
	desc.selector =           state->selector;
	rsm_set_desc_flags(&desc, state->attributes << 8);
	desc.limit =              state->limit;
	desc.base =               state->base;
	kvm_set_segment(vcpu, &desc, n);
	return X86EMUL_CONTINUE;
}
#endif
static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu,
				    u64 cr0, u64 cr3, u64 cr4)
{
	int bad;
	u64 pcid;
	/* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
	pcid = 0;
	if (cr4 & X86_CR4_PCIDE) {
		pcid = cr3 & 0xfff;
		cr3 &= ~0xfff;
	}
	bad = kvm_set_cr3(vcpu, cr3);
	if (bad)
		return X86EMUL_UNHANDLEABLE;
	/*
	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
	 * Then enable protected mode.	However, PCID cannot be enabled
	 * if EFER.LMA=0, so set it separately.
	 */
	bad = kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE);
	if (bad)
		return X86EMUL_UNHANDLEABLE;
	bad = kvm_set_cr0(vcpu, cr0);
	if (bad)
		return X86EMUL_UNHANDLEABLE;
	if (cr4 & X86_CR4_PCIDE) {
		bad = kvm_set_cr4(vcpu, cr4);
		if (bad)
			return X86EMUL_UNHANDLEABLE;
		if (pcid) {
			bad = kvm_set_cr3(vcpu, cr3 | pcid);
			if (bad)
				return X86EMUL_UNHANDLEABLE;
		}
	}
	return X86EMUL_CONTINUE;
}
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
			     const struct kvm_smram_state_32 *smstate)
{
	struct kvm_vcpu *vcpu = ctxt->vcpu;
	struct desc_ptr dt;
	int i, r;
	ctxt->eflags =  smstate->eflags | X86_EFLAGS_FIXED;
	ctxt->_eip =  smstate->eip;
	for (i = 0; i < 8; i++)
		*reg_write(ctxt, i) = smstate->gprs[i];
	if (kvm_set_dr(vcpu, 6, smstate->dr6))
		return X86EMUL_UNHANDLEABLE;
	if (kvm_set_dr(vcpu, 7, smstate->dr7))
		return X86EMUL_UNHANDLEABLE;
	rsm_load_seg_32(vcpu, &smstate->tr, smstate->tr_sel, VCPU_SREG_TR);
	rsm_load_seg_32(vcpu, &smstate->ldtr, smstate->ldtr_sel, VCPU_SREG_LDTR);
	dt.address =               smstate->gdtr.base;
	dt.size =                  smstate->gdtr.limit;
	static_call(kvm_x86_set_gdt)(vcpu, &dt);
	dt.address =               smstate->idtr.base;
	dt.size =                  smstate->idtr.limit;
	static_call(kvm_x86_set_idt)(vcpu, &dt);
	rsm_load_seg_32(vcpu, &smstate->es, smstate->es_sel, VCPU_SREG_ES);
	rsm_load_seg_32(vcpu, &smstate->cs, smstate->cs_sel, VCPU_SREG_CS);
	rsm_load_seg_32(vcpu, &smstate->ss, smstate->ss_sel, VCPU_SREG_SS);
	rsm_load_seg_32(vcpu, &smstate->ds, smstate->ds_sel, VCPU_SREG_DS);
	rsm_load_seg_32(vcpu, &smstate->fs, smstate->fs_sel, VCPU_SREG_FS);
	rsm_load_seg_32(vcpu, &smstate->gs, smstate->gs_sel, VCPU_SREG_GS);
	vcpu->arch.smbase = smstate->smbase;
	r = rsm_enter_protected_mode(vcpu, smstate->cr0,
					smstate->cr3, smstate->cr4);
	if (r != X86EMUL_CONTINUE)
		return r;
	static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
	ctxt->interruptibility = (u8)smstate->int_shadow;
	return r;
}
#ifdef CONFIG_X86_64
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
			     const struct kvm_smram_state_64 *smstate)
{
	struct kvm_vcpu *vcpu = ctxt->vcpu;
	struct desc_ptr dt;
	int i, r;
	for (i = 0; i < 16; i++)
		*reg_write(ctxt, i) = smstate->gprs[15 - i];
	ctxt->_eip   = smstate->rip;
	ctxt->eflags = smstate->rflags | X86_EFLAGS_FIXED;
	if (kvm_set_dr(vcpu, 6, smstate->dr6))
		return X86EMUL_UNHANDLEABLE;
	if (kvm_set_dr(vcpu, 7, smstate->dr7))
		return X86EMUL_UNHANDLEABLE;
	vcpu->arch.smbase =         smstate->smbase;
	if (kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
		return X86EMUL_UNHANDLEABLE;
	rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR);
	dt.size =                   smstate->idtr.limit;
	dt.address =                smstate->idtr.base;
	static_call(kvm_x86_set_idt)(vcpu, &dt);
	rsm_load_seg_64(vcpu, &smstate->ldtr, VCPU_SREG_LDTR);
	dt.size =                   smstate->gdtr.limit;
	dt.address =                smstate->gdtr.base;
	static_call(kvm_x86_set_gdt)(vcpu, &dt);
	r = rsm_enter_protected_mode(vcpu, smstate->cr0, smstate->cr3, smstate->cr4);
	if (r != X86EMUL_CONTINUE)
		return r;
	rsm_load_seg_64(vcpu, &smstate->es, VCPU_SREG_ES);
	rsm_load_seg_64(vcpu, &smstate->cs, VCPU_SREG_CS);
	rsm_load_seg_64(vcpu, &smstate->ss, VCPU_SREG_SS);
	rsm_load_seg_64(vcpu, &smstate->ds, VCPU_SREG_DS);
	rsm_load_seg_64(vcpu, &smstate->fs, VCPU_SREG_FS);
	rsm_load_seg_64(vcpu, &smstate->gs, VCPU_SREG_GS);
	static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
	ctxt->interruptibility = (u8)smstate->int_shadow;
	return X86EMUL_CONTINUE;
}
#endif
int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
{
	struct kvm_vcpu *vcpu = ctxt->vcpu;
	unsigned long cr0;
	union kvm_smram smram;
	u64 smbase;
	int ret;
	smbase = vcpu->arch.smbase;
	ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, smram.bytes, sizeof(smram));
	if (ret < 0)
		return X86EMUL_UNHANDLEABLE;
	if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0)
		static_call(kvm_x86_set_nmi_mask)(vcpu, false);
	kvm_smm_changed(vcpu, false);
	/*
	 * Get back to real mode, to prepare a safe state in which to load
	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
	 * supports long mode.
	 */
#ifdef CONFIG_X86_64
	if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
		struct kvm_segment cs_desc;
		unsigned long cr4;
		/* Zero CR4.PCIDE before CR0.PG.  */
		cr4 = kvm_read_cr4(vcpu);
		if (cr4 & X86_CR4_PCIDE)
			kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE);
		/* A 32-bit code segment is required to clear EFER.LMA.  */
		memset(&cs_desc, 0, sizeof(cs_desc));
		cs_desc.type = 0xb;
		cs_desc.s = cs_desc.g = cs_desc.present = 1;
		kvm_set_segment(vcpu, &cs_desc, VCPU_SREG_CS);
	}
#endif
	/* For the 64-bit case, this will clear EFER.LMA.  */
	cr0 = kvm_read_cr0(vcpu);
	if (cr0 & X86_CR0_PE)
		kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
#ifdef CONFIG_X86_64
	if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
		unsigned long cr4, efer;
		/* Clear CR4.PAE before clearing EFER.LME. */
		cr4 = kvm_read_cr4(vcpu);
		if (cr4 & X86_CR4_PAE)
			kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PAE);
		/* And finally go back to 32-bit mode.  */
		efer = 0;
		kvm_set_msr(vcpu, MSR_EFER, efer);
	}
#endif
	/*
	 * Give leave_smm() a chance to make ISA-specific changes to the vCPU
	 * state (e.g. enter guest mode) before loading state from the SMM
	 * state-save area.
	 */
	if (static_call(kvm_x86_leave_smm)(vcpu, &smram))
		return X86EMUL_UNHANDLEABLE;
#ifdef CONFIG_X86_64
	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
		return rsm_load_state_64(ctxt, &smram.smram64);
	else
#endif
		return rsm_load_state_32(ctxt, &smram.smram32);
}
 | 
	linux-master | 
	arch/x86/kvm/smm.c | 
| 
	/*
 * 8259 interrupt controller emulation
 *
 * Copyright (c) 2003-2004 Fabrice Bellard
 * Copyright (c) 2007 Intel Corporation
 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 * Authors:
 *   Yaozu (Eddie) Dong <[email protected]>
 *   Port from Qemu.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include "irq.h"
#include <linux/kvm_host.h>
#include "trace.h"
#define pr_pic_unimpl(fmt, ...)	\
	pr_err_ratelimited("pic: " fmt, ## __VA_ARGS__)
static void pic_irq_request(struct kvm *kvm, int level);
static void pic_lock(struct kvm_pic *s)
	__acquires(&s->lock)
{
	spin_lock(&s->lock);
}
static void pic_unlock(struct kvm_pic *s)
	__releases(&s->lock)
{
	bool wakeup = s->wakeup_needed;
	struct kvm_vcpu *vcpu;
	unsigned long i;
	s->wakeup_needed = false;
	spin_unlock(&s->lock);
	if (wakeup) {
		kvm_for_each_vcpu(i, vcpu, s->kvm) {
			if (kvm_apic_accept_pic_intr(vcpu)) {
				kvm_make_request(KVM_REQ_EVENT, vcpu);
				kvm_vcpu_kick(vcpu);
				return;
			}
		}
	}
}
static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
{
	s->isr &= ~(1 << irq);
	if (s != &s->pics_state->pics[0])
		irq += 8;
	/*
	 * We are dropping lock while calling ack notifiers since ack
	 * notifier callbacks for assigned devices call into PIC recursively.
	 * Other interrupt may be delivered to PIC while lock is dropped but
	 * it should be safe since PIC state is already updated at this stage.
	 */
	pic_unlock(s->pics_state);
	kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
	pic_lock(s->pics_state);
}
/*
 * set irq level. If an edge is detected, then the IRR is set to 1
 */
static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
{
	int mask, ret = 1;
	mask = 1 << irq;
	if (s->elcr & mask)	/* level triggered */
		if (level) {
			ret = !(s->irr & mask);
			s->irr |= mask;
			s->last_irr |= mask;
		} else {
			s->irr &= ~mask;
			s->last_irr &= ~mask;
		}
	else	/* edge triggered */
		if (level) {
			if ((s->last_irr & mask) == 0) {
				ret = !(s->irr & mask);
				s->irr |= mask;
			}
			s->last_irr |= mask;
		} else
			s->last_irr &= ~mask;
	return (s->imr & mask) ? -1 : ret;
}
/*
 * return the highest priority found in mask (highest = smallest
 * number). Return 8 if no irq
 */
static inline int get_priority(struct kvm_kpic_state *s, int mask)
{
	int priority;
	if (mask == 0)
		return 8;
	priority = 0;
	while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
		priority++;
	return priority;
}
/*
 * return the pic wanted interrupt. return -1 if none
 */
static int pic_get_irq(struct kvm_kpic_state *s)
{
	int mask, cur_priority, priority;
	mask = s->irr & ~s->imr;
	priority = get_priority(s, mask);
	if (priority == 8)
		return -1;
	/*
	 * compute current priority. If special fully nested mode on the
	 * master, the IRQ coming from the slave is not taken into account
	 * for the priority computation.
	 */
	mask = s->isr;
	if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
		mask &= ~(1 << 2);
	cur_priority = get_priority(s, mask);
	if (priority < cur_priority)
		/*
		 * higher priority found: an irq should be generated
		 */
		return (priority + s->priority_add) & 7;
	else
		return -1;
}
/*
 * raise irq to CPU if necessary. must be called every time the active
 * irq may change
 */
static void pic_update_irq(struct kvm_pic *s)
{
	int irq2, irq;
	irq2 = pic_get_irq(&s->pics[1]);
	if (irq2 >= 0) {
		/*
		 * if irq request by slave pic, signal master PIC
		 */
		pic_set_irq1(&s->pics[0], 2, 1);
		pic_set_irq1(&s->pics[0], 2, 0);
	}
	irq = pic_get_irq(&s->pics[0]);
	pic_irq_request(s->kvm, irq >= 0);
}
void kvm_pic_update_irq(struct kvm_pic *s)
{
	pic_lock(s);
	pic_update_irq(s);
	pic_unlock(s);
}
int kvm_pic_set_irq(struct kvm_pic *s, int irq, int irq_source_id, int level)
{
	int ret, irq_level;
	BUG_ON(irq < 0 || irq >= PIC_NUM_PINS);
	pic_lock(s);
	irq_level = __kvm_irq_line_state(&s->irq_states[irq],
					 irq_source_id, level);
	ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, irq_level);
	pic_update_irq(s);
	trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
			      s->pics[irq >> 3].imr, ret == 0);
	pic_unlock(s);
	return ret;
}
void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id)
{
	int i;
	pic_lock(s);
	for (i = 0; i < PIC_NUM_PINS; i++)
		__clear_bit(irq_source_id, &s->irq_states[i]);
	pic_unlock(s);
}
/*
 * acknowledge interrupt 'irq'
 */
static inline void pic_intack(struct kvm_kpic_state *s, int irq)
{
	s->isr |= 1 << irq;
	/*
	 * We don't clear a level sensitive interrupt here
	 */
	if (!(s->elcr & (1 << irq)))
		s->irr &= ~(1 << irq);
	if (s->auto_eoi) {
		if (s->rotate_on_auto_eoi)
			s->priority_add = (irq + 1) & 7;
		pic_clear_isr(s, irq);
	}
}
int kvm_pic_read_irq(struct kvm *kvm)
{
	int irq, irq2, intno;
	struct kvm_pic *s = kvm->arch.vpic;
	s->output = 0;
	pic_lock(s);
	irq = pic_get_irq(&s->pics[0]);
	if (irq >= 0) {
		pic_intack(&s->pics[0], irq);
		if (irq == 2) {
			irq2 = pic_get_irq(&s->pics[1]);
			if (irq2 >= 0)
				pic_intack(&s->pics[1], irq2);
			else
				/*
				 * spurious IRQ on slave controller
				 */
				irq2 = 7;
			intno = s->pics[1].irq_base + irq2;
		} else
			intno = s->pics[0].irq_base + irq;
	} else {
		/*
		 * spurious IRQ on host controller
		 */
		irq = 7;
		intno = s->pics[0].irq_base + irq;
	}
	pic_update_irq(s);
	pic_unlock(s);
	return intno;
}
static void kvm_pic_reset(struct kvm_kpic_state *s)
{
	int irq;
	unsigned long i;
	struct kvm_vcpu *vcpu;
	u8 edge_irr = s->irr & ~s->elcr;
	bool found = false;
	s->last_irr = 0;
	s->irr &= s->elcr;
	s->imr = 0;
	s->priority_add = 0;
	s->special_mask = 0;
	s->read_reg_select = 0;
	if (!s->init4) {
		s->special_fully_nested_mode = 0;
		s->auto_eoi = 0;
	}
	s->init_state = 1;
	kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
		if (kvm_apic_accept_pic_intr(vcpu)) {
			found = true;
			break;
		}
	if (!found)
		return;
	for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
		if (edge_irr & (1 << irq))
			pic_clear_isr(s, irq);
}
static void pic_ioport_write(void *opaque, u32 addr, u32 val)
{
	struct kvm_kpic_state *s = opaque;
	int priority, cmd, irq;
	addr &= 1;
	if (addr == 0) {
		if (val & 0x10) {
			s->init4 = val & 1;
			if (val & 0x02)
				pr_pic_unimpl("single mode not supported");
			if (val & 0x08)
				pr_pic_unimpl(
						"level sensitive irq not supported");
			kvm_pic_reset(s);
		} else if (val & 0x08) {
			if (val & 0x04)
				s->poll = 1;
			if (val & 0x02)
				s->read_reg_select = val & 1;
			if (val & 0x40)
				s->special_mask = (val >> 5) & 1;
		} else {
			cmd = val >> 5;
			switch (cmd) {
			case 0:
			case 4:
				s->rotate_on_auto_eoi = cmd >> 2;
				break;
			case 1:	/* end of interrupt */
			case 5:
				priority = get_priority(s, s->isr);
				if (priority != 8) {
					irq = (priority + s->priority_add) & 7;
					if (cmd == 5)
						s->priority_add = (irq + 1) & 7;
					pic_clear_isr(s, irq);
					pic_update_irq(s->pics_state);
				}
				break;
			case 3:
				irq = val & 7;
				pic_clear_isr(s, irq);
				pic_update_irq(s->pics_state);
				break;
			case 6:
				s->priority_add = (val + 1) & 7;
				pic_update_irq(s->pics_state);
				break;
			case 7:
				irq = val & 7;
				s->priority_add = (irq + 1) & 7;
				pic_clear_isr(s, irq);
				pic_update_irq(s->pics_state);
				break;
			default:
				break;	/* no operation */
			}
		}
	} else
		switch (s->init_state) {
		case 0: { /* normal mode */
			u8 imr_diff = s->imr ^ val,
				off = (s == &s->pics_state->pics[0]) ? 0 : 8;
			s->imr = val;
			for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
				if (imr_diff & (1 << irq))
					kvm_fire_mask_notifiers(
						s->pics_state->kvm,
						SELECT_PIC(irq + off),
						irq + off,
						!!(s->imr & (1 << irq)));
			pic_update_irq(s->pics_state);
			break;
		}
		case 1:
			s->irq_base = val & 0xf8;
			s->init_state = 2;
			break;
		case 2:
			if (s->init4)
				s->init_state = 3;
			else
				s->init_state = 0;
			break;
		case 3:
			s->special_fully_nested_mode = (val >> 4) & 1;
			s->auto_eoi = (val >> 1) & 1;
			s->init_state = 0;
			break;
		}
}
static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
{
	int ret;
	ret = pic_get_irq(s);
	if (ret >= 0) {
		if (addr1 >> 7) {
			s->pics_state->pics[0].isr &= ~(1 << 2);
			s->pics_state->pics[0].irr &= ~(1 << 2);
		}
		s->irr &= ~(1 << ret);
		pic_clear_isr(s, ret);
		if (addr1 >> 7 || ret != 2)
			pic_update_irq(s->pics_state);
		/* Bit 7 is 1, means there's an interrupt */
		ret |= 0x80;
	} else {
		/* Bit 7 is 0, means there's no interrupt */
		ret = 0x07;
		pic_update_irq(s->pics_state);
	}
	return ret;
}
static u32 pic_ioport_read(void *opaque, u32 addr)
{
	struct kvm_kpic_state *s = opaque;
	int ret;
	if (s->poll) {
		ret = pic_poll_read(s, addr);
		s->poll = 0;
	} else
		if ((addr & 1) == 0)
			if (s->read_reg_select)
				ret = s->isr;
			else
				ret = s->irr;
		else
			ret = s->imr;
	return ret;
}
static void elcr_ioport_write(void *opaque, u32 val)
{
	struct kvm_kpic_state *s = opaque;
	s->elcr = val & s->elcr_mask;
}
static u32 elcr_ioport_read(void *opaque)
{
	struct kvm_kpic_state *s = opaque;
	return s->elcr;
}
static int picdev_write(struct kvm_pic *s,
			 gpa_t addr, int len, const void *val)
{
	unsigned char data = *(unsigned char *)val;
	if (len != 1) {
		pr_pic_unimpl("non byte write\n");
		return 0;
	}
	switch (addr) {
	case 0x20:
	case 0x21:
		pic_lock(s);
		pic_ioport_write(&s->pics[0], addr, data);
		pic_unlock(s);
		break;
	case 0xa0:
	case 0xa1:
		pic_lock(s);
		pic_ioport_write(&s->pics[1], addr, data);
		pic_unlock(s);
		break;
	case 0x4d0:
	case 0x4d1:
		pic_lock(s);
		elcr_ioport_write(&s->pics[addr & 1], data);
		pic_unlock(s);
		break;
	default:
		return -EOPNOTSUPP;
	}
	return 0;
}
static int picdev_read(struct kvm_pic *s,
		       gpa_t addr, int len, void *val)
{
	unsigned char *data = (unsigned char *)val;
	if (len != 1) {
		memset(val, 0, len);
		pr_pic_unimpl("non byte read\n");
		return 0;
	}
	switch (addr) {
	case 0x20:
	case 0x21:
	case 0xa0:
	case 0xa1:
		pic_lock(s);
		*data = pic_ioport_read(&s->pics[addr >> 7], addr);
		pic_unlock(s);
		break;
	case 0x4d0:
	case 0x4d1:
		pic_lock(s);
		*data = elcr_ioport_read(&s->pics[addr & 1]);
		pic_unlock(s);
		break;
	default:
		return -EOPNOTSUPP;
	}
	return 0;
}
static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
			       gpa_t addr, int len, const void *val)
{
	return picdev_write(container_of(dev, struct kvm_pic, dev_master),
			    addr, len, val);
}
static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
			      gpa_t addr, int len, void *val)
{
	return picdev_read(container_of(dev, struct kvm_pic, dev_master),
			    addr, len, val);
}
static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
			      gpa_t addr, int len, const void *val)
{
	return picdev_write(container_of(dev, struct kvm_pic, dev_slave),
			    addr, len, val);
}
static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
			     gpa_t addr, int len, void *val)
{
	return picdev_read(container_of(dev, struct kvm_pic, dev_slave),
			    addr, len, val);
}
static int picdev_elcr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
			     gpa_t addr, int len, const void *val)
{
	return picdev_write(container_of(dev, struct kvm_pic, dev_elcr),
			    addr, len, val);
}
static int picdev_elcr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
			    gpa_t addr, int len, void *val)
{
	return picdev_read(container_of(dev, struct kvm_pic, dev_elcr),
			    addr, len, val);
}
/*
 * callback when PIC0 irq status changed
 */
static void pic_irq_request(struct kvm *kvm, int level)
{
	struct kvm_pic *s = kvm->arch.vpic;
	if (!s->output)
		s->wakeup_needed = true;
	s->output = level;
}
static const struct kvm_io_device_ops picdev_master_ops = {
	.read     = picdev_master_read,
	.write    = picdev_master_write,
};
static const struct kvm_io_device_ops picdev_slave_ops = {
	.read     = picdev_slave_read,
	.write    = picdev_slave_write,
};
static const struct kvm_io_device_ops picdev_elcr_ops = {
	.read     = picdev_elcr_read,
	.write    = picdev_elcr_write,
};
int kvm_pic_init(struct kvm *kvm)
{
	struct kvm_pic *s;
	int ret;
	s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL_ACCOUNT);
	if (!s)
		return -ENOMEM;
	spin_lock_init(&s->lock);
	s->kvm = kvm;
	s->pics[0].elcr_mask = 0xf8;
	s->pics[1].elcr_mask = 0xde;
	s->pics[0].pics_state = s;
	s->pics[1].pics_state = s;
	/*
	 * Initialize PIO device
	 */
	kvm_iodevice_init(&s->dev_master, &picdev_master_ops);
	kvm_iodevice_init(&s->dev_slave, &picdev_slave_ops);
	kvm_iodevice_init(&s->dev_elcr, &picdev_elcr_ops);
	mutex_lock(&kvm->slots_lock);
	ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x20, 2,
				      &s->dev_master);
	if (ret < 0)
		goto fail_unlock;
	ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0xa0, 2, &s->dev_slave);
	if (ret < 0)
		goto fail_unreg_2;
	ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_elcr);
	if (ret < 0)
		goto fail_unreg_1;
	mutex_unlock(&kvm->slots_lock);
	kvm->arch.vpic = s;
	return 0;
fail_unreg_1:
	kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_slave);
fail_unreg_2:
	kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_master);
fail_unlock:
	mutex_unlock(&kvm->slots_lock);
	kfree(s);
	return ret;
}
void kvm_pic_destroy(struct kvm *kvm)
{
	struct kvm_pic *vpic = kvm->arch.vpic;
	if (!vpic)
		return;
	mutex_lock(&kvm->slots_lock);
	kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
	kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
	kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_elcr);
	mutex_unlock(&kvm->slots_lock);
	kvm->arch.vpic = NULL;
	kfree(vpic);
}
 | 
	linux-master | 
	arch/x86/kvm/i8259.c | 
| 
	// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
 *
 * KVM Xen emulation
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "x86.h"
#include "xen.h"
#include "hyperv.h"
#include "lapic.h"
#include <linux/eventfd.h>
#include <linux/kvm_host.h>
#include <linux/sched/stat.h>
#include <trace/events/kvm.h>
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/version.h>
#include <xen/interface/event_channel.h>
#include <xen/interface/sched.h>
#include <asm/xen/cpuid.h>
#include "cpuid.h"
#include "trace.h"
static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm);
static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r);
DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
{
	struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
	struct pvclock_wall_clock *wc;
	gpa_t gpa = gfn_to_gpa(gfn);
	u32 *wc_sec_hi;
	u32 wc_version;
	u64 wall_nsec;
	int ret = 0;
	int idx = srcu_read_lock(&kvm->srcu);
	if (gfn == KVM_XEN_INVALID_GFN) {
		kvm_gpc_deactivate(gpc);
		goto out;
	}
	do {
		ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE);
		if (ret)
			goto out;
		/*
		 * This code mirrors kvm_write_wall_clock() except that it writes
		 * directly through the pfn cache and doesn't mark the page dirty.
		 */
		wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
		/* It could be invalid again already, so we need to check */
		read_lock_irq(&gpc->lock);
		if (gpc->valid)
			break;
		read_unlock_irq(&gpc->lock);
	} while (1);
	/* Paranoia checks on the 32-bit struct layout */
	BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
	BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
#ifdef CONFIG_X86_64
	/* Paranoia checks on the 64-bit struct layout */
	BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
	BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
	if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
		struct shared_info *shinfo = gpc->khva;
		wc_sec_hi = &shinfo->wc_sec_hi;
		wc = &shinfo->wc;
	} else
#endif
	{
		struct compat_shared_info *shinfo = gpc->khva;
		wc_sec_hi = &shinfo->arch.wc_sec_hi;
		wc = &shinfo->wc;
	}
	/* Increment and ensure an odd value */
	wc_version = wc->version = (wc->version + 1) | 1;
	smp_wmb();
	wc->nsec = do_div(wall_nsec,  1000000000);
	wc->sec = (u32)wall_nsec;
	*wc_sec_hi = wall_nsec >> 32;
	smp_wmb();
	wc->version = wc_version + 1;
	read_unlock_irq(&gpc->lock);
	kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
out:
	srcu_read_unlock(&kvm->srcu, idx);
	return ret;
}
void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
{
	if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) {
		struct kvm_xen_evtchn e;
		e.vcpu_id = vcpu->vcpu_id;
		e.vcpu_idx = vcpu->vcpu_idx;
		e.port = vcpu->arch.xen.timer_virq;
		e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
		kvm_xen_set_evtchn(&e, vcpu->kvm);
		vcpu->arch.xen.timer_expires = 0;
		atomic_set(&vcpu->arch.xen.timer_pending, 0);
	}
}
static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
{
	struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu,
					     arch.xen.timer);
	if (atomic_read(&vcpu->arch.xen.timer_pending))
		return HRTIMER_NORESTART;
	atomic_inc(&vcpu->arch.xen.timer_pending);
	kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
	kvm_vcpu_kick(vcpu);
	return HRTIMER_NORESTART;
}
static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns)
{
	atomic_set(&vcpu->arch.xen.timer_pending, 0);
	vcpu->arch.xen.timer_expires = guest_abs;
	if (delta_ns <= 0) {
		xen_timer_callback(&vcpu->arch.xen.timer);
	} else {
		ktime_t ktime_now = ktime_get();
		hrtimer_start(&vcpu->arch.xen.timer,
			      ktime_add_ns(ktime_now, delta_ns),
			      HRTIMER_MODE_ABS_HARD);
	}
}
static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
{
	hrtimer_cancel(&vcpu->arch.xen.timer);
	vcpu->arch.xen.timer_expires = 0;
	atomic_set(&vcpu->arch.xen.timer_pending, 0);
}
static void kvm_xen_init_timer(struct kvm_vcpu *vcpu)
{
	hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC,
		     HRTIMER_MODE_ABS_HARD);
	vcpu->arch.xen.timer.function = xen_timer_callback;
}
static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
{
	struct kvm_vcpu_xen *vx = &v->arch.xen;
	struct gfn_to_pfn_cache *gpc1 = &vx->runstate_cache;
	struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache;
	size_t user_len, user_len1, user_len2;
	struct vcpu_runstate_info rs;
	unsigned long flags;
	size_t times_ofs;
	uint8_t *update_bit = NULL;
	uint64_t entry_time;
	uint64_t *rs_times;
	int *rs_state;
	/*
	 * The only difference between 32-bit and 64-bit versions of the
	 * runstate struct is the alignment of uint64_t in 32-bit, which
	 * means that the 64-bit version has an additional 4 bytes of
	 * padding after the first field 'state'. Let's be really really
	 * paranoid about that, and matching it with our internal data
	 * structures that we memcpy into it...
	 */
	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
	BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
	BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
#ifdef CONFIG_X86_64
	/*
	 * The 64-bit structure has 4 bytes of padding before 'state_entry_time'
	 * so each subsequent field is shifted by 4, and it's 4 bytes longer.
	 */
	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
		     offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
		     offsetof(struct compat_vcpu_runstate_info, time) + 4);
	BUILD_BUG_ON(sizeof(struct vcpu_runstate_info) != 0x2c + 4);
#endif
	/*
	 * The state field is in the same place at the start of both structs,
	 * and is the same size (int) as vx->current_runstate.
	 */
	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
		     offsetof(struct compat_vcpu_runstate_info, state));
	BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) !=
		     sizeof(vx->current_runstate));
	BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
		     sizeof(vx->current_runstate));
	/*
	 * The state_entry_time field is 64 bits in both versions, and the
	 * XEN_RUNSTATE_UPDATE flag is in the top bit, which given that x86
	 * is little-endian means that it's in the last *byte* of the word.
	 * That detail is important later.
	 */
	BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
		     sizeof(uint64_t));
	BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
		     sizeof(uint64_t));
	BUILD_BUG_ON((XEN_RUNSTATE_UPDATE >> 56) != 0x80);
	/*
	 * The time array is four 64-bit quantities in both versions, matching
	 * the vx->runstate_times and immediately following state_entry_time.
	 */
	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
		     offsetof(struct vcpu_runstate_info, time) - sizeof(uint64_t));
	BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
		     offsetof(struct compat_vcpu_runstate_info, time) - sizeof(uint64_t));
	BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
		     sizeof_field(struct compat_vcpu_runstate_info, time));
	BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
		     sizeof(vx->runstate_times));
	if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
		user_len = sizeof(struct vcpu_runstate_info);
		times_ofs = offsetof(struct vcpu_runstate_info,
				     state_entry_time);
	} else {
		user_len = sizeof(struct compat_vcpu_runstate_info);
		times_ofs = offsetof(struct compat_vcpu_runstate_info,
				     state_entry_time);
	}
	/*
	 * There are basically no alignment constraints. The guest can set it
	 * up so it crosses from one page to the next, and at arbitrary byte
	 * alignment (and the 32-bit ABI doesn't align the 64-bit integers
	 * anyway, even if the overall struct had been 64-bit aligned).
	 */
	if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) {
		user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK);
		user_len2 = user_len - user_len1;
	} else {
		user_len1 = user_len;
		user_len2 = 0;
	}
	BUG_ON(user_len1 + user_len2 != user_len);
 retry:
	/*
	 * Attempt to obtain the GPC lock on *both* (if there are two)
	 * gfn_to_pfn caches that cover the region.
	 */
	if (atomic) {
		local_irq_save(flags);
		if (!read_trylock(&gpc1->lock)) {
			local_irq_restore(flags);
			return;
		}
	} else {
		read_lock_irqsave(&gpc1->lock, flags);
	}
	while (!kvm_gpc_check(gpc1, user_len1)) {
		read_unlock_irqrestore(&gpc1->lock, flags);
		/* When invoked from kvm_sched_out() we cannot sleep */
		if (atomic)
			return;
		if (kvm_gpc_refresh(gpc1, user_len1))
			return;
		read_lock_irqsave(&gpc1->lock, flags);
	}
	if (likely(!user_len2)) {
		/*
		 * Set up three pointers directly to the runstate_info
		 * struct in the guest (via the GPC).
		 *
		 *  • @rs_state   → state field
		 *  • @rs_times   → state_entry_time field.
		 *  • @update_bit → last byte of state_entry_time, which
		 *                  contains the XEN_RUNSTATE_UPDATE bit.
		 */
		rs_state = gpc1->khva;
		rs_times = gpc1->khva + times_ofs;
		if (v->kvm->arch.xen.runstate_update_flag)
			update_bit = ((void *)(&rs_times[1])) - 1;
	} else {
		/*
		 * The guest's runstate_info is split across two pages and we
		 * need to hold and validate both GPCs simultaneously. We can
		 * declare a lock ordering GPC1 > GPC2 because nothing else
		 * takes them more than one at a time. Set a subclass on the
		 * gpc1 lock to make lockdep shut up about it.
		 */
		lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_);
		if (atomic) {
			if (!read_trylock(&gpc2->lock)) {
				read_unlock_irqrestore(&gpc1->lock, flags);
				return;
			}
		} else {
			read_lock(&gpc2->lock);
		}
		if (!kvm_gpc_check(gpc2, user_len2)) {
			read_unlock(&gpc2->lock);
			read_unlock_irqrestore(&gpc1->lock, flags);
			/* When invoked from kvm_sched_out() we cannot sleep */
			if (atomic)
				return;
			/*
			 * Use kvm_gpc_activate() here because if the runstate
			 * area was configured in 32-bit mode and only extends
			 * to the second page now because the guest changed to
			 * 64-bit mode, the second GPC won't have been set up.
			 */
			if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1,
					     user_len2))
				return;
			/*
			 * We dropped the lock on GPC1 so we have to go all the
			 * way back and revalidate that too.
			 */
			goto retry;
		}
		/*
		 * In this case, the runstate_info struct will be assembled on
		 * the kernel stack (compat or not as appropriate) and will
		 * be copied to GPC1/GPC2 with a dual memcpy. Set up the three
		 * rs pointers accordingly.
		 */
		rs_times = &rs.state_entry_time;
		/*
		 * The rs_state pointer points to the start of what we'll
		 * copy to the guest, which in the case of a compat guest
		 * is the 32-bit field that the compiler thinks is padding.
		 */
		rs_state = ((void *)rs_times) - times_ofs;
		/*
		 * The update_bit is still directly in the guest memory,
		 * via one GPC or the other.
		 */
		if (v->kvm->arch.xen.runstate_update_flag) {
			if (user_len1 >= times_ofs + sizeof(uint64_t))
				update_bit = gpc1->khva + times_ofs +
					sizeof(uint64_t) - 1;
			else
				update_bit = gpc2->khva + times_ofs +
					sizeof(uint64_t) - 1 - user_len1;
		}
#ifdef CONFIG_X86_64
		/*
		 * Don't leak kernel memory through the padding in the 64-bit
		 * version of the struct.
		 */
		memset(&rs, 0, offsetof(struct vcpu_runstate_info, state_entry_time));
#endif
	}
	/*
	 * First, set the XEN_RUNSTATE_UPDATE bit in the top bit of the
	 * state_entry_time field, directly in the guest. We need to set
	 * that (and write-barrier) before writing to the rest of the
	 * structure, and clear it last. Just as Xen does, we address the
	 * single *byte* in which it resides because it might be in a
	 * different cache line to the rest of the 64-bit word, due to
	 * the (lack of) alignment constraints.
	 */
	entry_time = vx->runstate_entry_time;
	if (update_bit) {
		entry_time |= XEN_RUNSTATE_UPDATE;
		*update_bit = (vx->runstate_entry_time | XEN_RUNSTATE_UPDATE) >> 56;
		smp_wmb();
	}
	/*
	 * Now assemble the actual structure, either on our kernel stack
	 * or directly in the guest according to how the rs_state and
	 * rs_times pointers were set up above.
	 */
	*rs_state = vx->current_runstate;
	rs_times[0] = entry_time;
	memcpy(rs_times + 1, vx->runstate_times, sizeof(vx->runstate_times));
	/* For the split case, we have to then copy it to the guest. */
	if (user_len2) {
		memcpy(gpc1->khva, rs_state, user_len1);
		memcpy(gpc2->khva, ((void *)rs_state) + user_len1, user_len2);
	}
	smp_wmb();
	/* Finally, clear the XEN_RUNSTATE_UPDATE bit. */
	if (update_bit) {
		entry_time &= ~XEN_RUNSTATE_UPDATE;
		*update_bit = entry_time >> 56;
		smp_wmb();
	}
	if (user_len2)
		read_unlock(&gpc2->lock);
	read_unlock_irqrestore(&gpc1->lock, flags);
	mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
	if (user_len2)
		mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
}
void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
{
	struct kvm_vcpu_xen *vx = &v->arch.xen;
	u64 now = get_kvmclock_ns(v->kvm);
	u64 delta_ns = now - vx->runstate_entry_time;
	u64 run_delay = current->sched_info.run_delay;
	if (unlikely(!vx->runstate_entry_time))
		vx->current_runstate = RUNSTATE_offline;
	/*
	 * Time waiting for the scheduler isn't "stolen" if the
	 * vCPU wasn't running anyway.
	 */
	if (vx->current_runstate == RUNSTATE_running) {
		u64 steal_ns = run_delay - vx->last_steal;
		delta_ns -= steal_ns;
		vx->runstate_times[RUNSTATE_runnable] += steal_ns;
	}
	vx->last_steal = run_delay;
	vx->runstate_times[vx->current_runstate] += delta_ns;
	vx->current_runstate = state;
	vx->runstate_entry_time = now;
	if (vx->runstate_cache.active)
		kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable);
}
static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
{
	struct kvm_lapic_irq irq = { };
	int r;
	irq.dest_id = v->vcpu_id;
	irq.vector = v->arch.xen.upcall_vector;
	irq.dest_mode = APIC_DEST_PHYSICAL;
	irq.shorthand = APIC_DEST_NOSHORT;
	irq.delivery_mode = APIC_DM_FIXED;
	irq.level = 1;
	/* The fast version will always work for physical unicast */
	WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL));
}
/*
 * On event channel delivery, the vcpu_info may not have been accessible.
 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
 * need to be marked into the vcpu_info (and evtchn_upcall_pending set).
 * Do so now that we can sleep in the context of the vCPU to bring the
 * page in, and refresh the pfn cache for it.
 */
void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
{
	unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
	struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
	unsigned long flags;
	if (!evtchn_pending_sel)
		return;
	/*
	 * Yes, this is an open-coded loop. But that's just what put_user()
	 * does anyway. Page it in and retry the instruction. We're just a
	 * little more honest about it.
	 */
	read_lock_irqsave(&gpc->lock, flags);
	while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
		read_unlock_irqrestore(&gpc->lock, flags);
		if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info)))
			return;
		read_lock_irqsave(&gpc->lock, flags);
	}
	/* Now gpc->khva is a valid kernel address for the vcpu_info */
	if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
		struct vcpu_info *vi = gpc->khva;
		asm volatile(LOCK_PREFIX "orq %0, %1\n"
			     "notq %0\n"
			     LOCK_PREFIX "andq %0, %2\n"
			     : "=r" (evtchn_pending_sel),
			       "+m" (vi->evtchn_pending_sel),
			       "+m" (v->arch.xen.evtchn_pending_sel)
			     : "0" (evtchn_pending_sel));
		WRITE_ONCE(vi->evtchn_upcall_pending, 1);
	} else {
		u32 evtchn_pending_sel32 = evtchn_pending_sel;
		struct compat_vcpu_info *vi = gpc->khva;
		asm volatile(LOCK_PREFIX "orl %0, %1\n"
			     "notl %0\n"
			     LOCK_PREFIX "andl %0, %2\n"
			     : "=r" (evtchn_pending_sel32),
			       "+m" (vi->evtchn_pending_sel),
			       "+m" (v->arch.xen.evtchn_pending_sel)
			     : "0" (evtchn_pending_sel32));
		WRITE_ONCE(vi->evtchn_upcall_pending, 1);
	}
	read_unlock_irqrestore(&gpc->lock, flags);
	/* For the per-vCPU lapic vector, deliver it as MSI. */
	if (v->arch.xen.upcall_vector)
		kvm_xen_inject_vcpu_vector(v);
	mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
}
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
{
	struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
	unsigned long flags;
	u8 rc = 0;
	/*
	 * If the global upcall vector (HVMIRQ_callback_vector) is set and
	 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
	 */
	/* No need for compat handling here */
	BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
		     offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
	BUILD_BUG_ON(sizeof(rc) !=
		     sizeof_field(struct vcpu_info, evtchn_upcall_pending));
	BUILD_BUG_ON(sizeof(rc) !=
		     sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
	read_lock_irqsave(&gpc->lock, flags);
	while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
		read_unlock_irqrestore(&gpc->lock, flags);
		/*
		 * This function gets called from kvm_vcpu_block() after setting the
		 * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
		 * from a HLT. So we really mustn't sleep. If the page ended up absent
		 * at that point, just return 1 in order to trigger an immediate wake,
		 * and we'll end up getting called again from a context where we *can*
		 * fault in the page and wait for it.
		 */
		if (in_atomic() || !task_is_running(current))
			return 1;
		if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) {
			/*
			 * If this failed, userspace has screwed up the
			 * vcpu_info mapping. No interrupts for you.
			 */
			return 0;
		}
		read_lock_irqsave(&gpc->lock, flags);
	}
	rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
	read_unlock_irqrestore(&gpc->lock, flags);
	return rc;
}
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
{
	int r = -ENOENT;
	switch (data->type) {
	case KVM_XEN_ATTR_TYPE_LONG_MODE:
		if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
			r = -EINVAL;
		} else {
			mutex_lock(&kvm->arch.xen.xen_lock);
			kvm->arch.xen.long_mode = !!data->u.long_mode;
			mutex_unlock(&kvm->arch.xen.xen_lock);
			r = 0;
		}
		break;
	case KVM_XEN_ATTR_TYPE_SHARED_INFO:
		mutex_lock(&kvm->arch.xen.xen_lock);
		r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
		mutex_unlock(&kvm->arch.xen.xen_lock);
		break;
	case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
		if (data->u.vector && data->u.vector < 0x10)
			r = -EINVAL;
		else {
			mutex_lock(&kvm->arch.xen.xen_lock);
			kvm->arch.xen.upcall_vector = data->u.vector;
			mutex_unlock(&kvm->arch.xen.xen_lock);
			r = 0;
		}
		break;
	case KVM_XEN_ATTR_TYPE_EVTCHN:
		r = kvm_xen_setattr_evtchn(kvm, data);
		break;
	case KVM_XEN_ATTR_TYPE_XEN_VERSION:
		mutex_lock(&kvm->arch.xen.xen_lock);
		kvm->arch.xen.xen_version = data->u.xen_version;
		mutex_unlock(&kvm->arch.xen.xen_lock);
		r = 0;
		break;
	case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG:
		if (!sched_info_on()) {
			r = -EOPNOTSUPP;
			break;
		}
		mutex_lock(&kvm->arch.xen.xen_lock);
		kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag;
		mutex_unlock(&kvm->arch.xen.xen_lock);
		r = 0;
		break;
	default:
		break;
	}
	return r;
}
int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
{
	int r = -ENOENT;
	mutex_lock(&kvm->arch.xen.xen_lock);
	switch (data->type) {
	case KVM_XEN_ATTR_TYPE_LONG_MODE:
		data->u.long_mode = kvm->arch.xen.long_mode;
		r = 0;
		break;
	case KVM_XEN_ATTR_TYPE_SHARED_INFO:
		if (kvm->arch.xen.shinfo_cache.active)
			data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
		else
			data->u.shared_info.gfn = KVM_XEN_INVALID_GFN;
		r = 0;
		break;
	case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
		data->u.vector = kvm->arch.xen.upcall_vector;
		r = 0;
		break;
	case KVM_XEN_ATTR_TYPE_XEN_VERSION:
		data->u.xen_version = kvm->arch.xen.xen_version;
		r = 0;
		break;
	case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG:
		if (!sched_info_on()) {
			r = -EOPNOTSUPP;
			break;
		}
		data->u.runstate_update_flag = kvm->arch.xen.runstate_update_flag;
		r = 0;
		break;
	default:
		break;
	}
	mutex_unlock(&kvm->arch.xen.xen_lock);
	return r;
}
int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
{
	int idx, r = -ENOENT;
	mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
	idx = srcu_read_lock(&vcpu->kvm->srcu);
	switch (data->type) {
	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
		/* No compat necessary here. */
		BUILD_BUG_ON(sizeof(struct vcpu_info) !=
			     sizeof(struct compat_vcpu_info));
		BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
			     offsetof(struct compat_vcpu_info, time));
		if (data->u.gpa == KVM_XEN_INVALID_GPA) {
			kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
			r = 0;
			break;
		}
		r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache,
				     data->u.gpa, sizeof(struct vcpu_info));
		if (!r)
			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
		if (data->u.gpa == KVM_XEN_INVALID_GPA) {
			kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
			r = 0;
			break;
		}
		r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache,
				     data->u.gpa,
				     sizeof(struct pvclock_vcpu_time_info));
		if (!r)
			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: {
		size_t sz, sz1, sz2;
		if (!sched_info_on()) {
			r = -EOPNOTSUPP;
			break;
		}
		if (data->u.gpa == KVM_XEN_INVALID_GPA) {
			r = 0;
		deactivate_out:
			kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
			kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
			break;
		}
		/*
		 * If the guest switches to 64-bit mode after setting the runstate
		 * address, that's actually OK. kvm_xen_update_runstate_guest()
		 * will cope.
		 */
		if (IS_ENABLED(CONFIG_64BIT) && vcpu->kvm->arch.xen.long_mode)
			sz = sizeof(struct vcpu_runstate_info);
		else
			sz = sizeof(struct compat_vcpu_runstate_info);
		/* How much fits in the (first) page? */
		sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK);
		r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache,
				     data->u.gpa, sz1);
		if (r)
			goto deactivate_out;
		/* Either map the second page, or deactivate the second GPC */
		if (sz1 >= sz) {
			kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
		} else {
			sz2 = sz - sz1;
			BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK);
			r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache,
					     data->u.gpa + sz1, sz2);
			if (r)
				goto deactivate_out;
		}
		kvm_xen_update_runstate_guest(vcpu, false);
		break;
	}
	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
		if (!sched_info_on()) {
			r = -EOPNOTSUPP;
			break;
		}
		if (data->u.runstate.state > RUNSTATE_offline) {
			r = -EINVAL;
			break;
		}
		kvm_xen_update_runstate(vcpu, data->u.runstate.state);
		r = 0;
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
		if (!sched_info_on()) {
			r = -EOPNOTSUPP;
			break;
		}
		if (data->u.runstate.state > RUNSTATE_offline) {
			r = -EINVAL;
			break;
		}
		if (data->u.runstate.state_entry_time !=
		    (data->u.runstate.time_running +
		     data->u.runstate.time_runnable +
		     data->u.runstate.time_blocked +
		     data->u.runstate.time_offline)) {
			r = -EINVAL;
			break;
		}
		if (get_kvmclock_ns(vcpu->kvm) <
		    data->u.runstate.state_entry_time) {
			r = -EINVAL;
			break;
		}
		vcpu->arch.xen.current_runstate = data->u.runstate.state;
		vcpu->arch.xen.runstate_entry_time =
			data->u.runstate.state_entry_time;
		vcpu->arch.xen.runstate_times[RUNSTATE_running] =
			data->u.runstate.time_running;
		vcpu->arch.xen.runstate_times[RUNSTATE_runnable] =
			data->u.runstate.time_runnable;
		vcpu->arch.xen.runstate_times[RUNSTATE_blocked] =
			data->u.runstate.time_blocked;
		vcpu->arch.xen.runstate_times[RUNSTATE_offline] =
			data->u.runstate.time_offline;
		vcpu->arch.xen.last_steal = current->sched_info.run_delay;
		r = 0;
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
		if (!sched_info_on()) {
			r = -EOPNOTSUPP;
			break;
		}
		if (data->u.runstate.state > RUNSTATE_offline &&
		    data->u.runstate.state != (u64)-1) {
			r = -EINVAL;
			break;
		}
		/* The adjustment must add up */
		if (data->u.runstate.state_entry_time !=
		    (data->u.runstate.time_running +
		     data->u.runstate.time_runnable +
		     data->u.runstate.time_blocked +
		     data->u.runstate.time_offline)) {
			r = -EINVAL;
			break;
		}
		if (get_kvmclock_ns(vcpu->kvm) <
		    (vcpu->arch.xen.runstate_entry_time +
		     data->u.runstate.state_entry_time)) {
			r = -EINVAL;
			break;
		}
		vcpu->arch.xen.runstate_entry_time +=
			data->u.runstate.state_entry_time;
		vcpu->arch.xen.runstate_times[RUNSTATE_running] +=
			data->u.runstate.time_running;
		vcpu->arch.xen.runstate_times[RUNSTATE_runnable] +=
			data->u.runstate.time_runnable;
		vcpu->arch.xen.runstate_times[RUNSTATE_blocked] +=
			data->u.runstate.time_blocked;
		vcpu->arch.xen.runstate_times[RUNSTATE_offline] +=
			data->u.runstate.time_offline;
		if (data->u.runstate.state <= RUNSTATE_offline)
			kvm_xen_update_runstate(vcpu, data->u.runstate.state);
		else if (vcpu->arch.xen.runstate_cache.active)
			kvm_xen_update_runstate_guest(vcpu, false);
		r = 0;
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
		if (data->u.vcpu_id >= KVM_MAX_VCPUS)
			r = -EINVAL;
		else {
			vcpu->arch.xen.vcpu_id = data->u.vcpu_id;
			r = 0;
		}
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
		if (data->u.timer.port &&
		    data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) {
			r = -EINVAL;
			break;
		}
		if (!vcpu->arch.xen.timer.function)
			kvm_xen_init_timer(vcpu);
		/* Stop the timer (if it's running) before changing the vector */
		kvm_xen_stop_timer(vcpu);
		vcpu->arch.xen.timer_virq = data->u.timer.port;
		/* Start the timer if the new value has a valid vector+expiry. */
		if (data->u.timer.port && data->u.timer.expires_ns)
			kvm_xen_start_timer(vcpu, data->u.timer.expires_ns,
					    data->u.timer.expires_ns -
					    get_kvmclock_ns(vcpu->kvm));
		r = 0;
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
		if (data->u.vector && data->u.vector < 0x10)
			r = -EINVAL;
		else {
			vcpu->arch.xen.upcall_vector = data->u.vector;
			r = 0;
		}
		break;
	default:
		break;
	}
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
	mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
	return r;
}
int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
{
	int r = -ENOENT;
	mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
	switch (data->type) {
	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
		if (vcpu->arch.xen.vcpu_info_cache.active)
			data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
		else
			data->u.gpa = KVM_XEN_INVALID_GPA;
		r = 0;
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
		if (vcpu->arch.xen.vcpu_time_info_cache.active)
			data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
		else
			data->u.gpa = KVM_XEN_INVALID_GPA;
		r = 0;
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
		if (!sched_info_on()) {
			r = -EOPNOTSUPP;
			break;
		}
		if (vcpu->arch.xen.runstate_cache.active) {
			data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
			r = 0;
		}
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
		if (!sched_info_on()) {
			r = -EOPNOTSUPP;
			break;
		}
		data->u.runstate.state = vcpu->arch.xen.current_runstate;
		r = 0;
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
		if (!sched_info_on()) {
			r = -EOPNOTSUPP;
			break;
		}
		data->u.runstate.state = vcpu->arch.xen.current_runstate;
		data->u.runstate.state_entry_time =
			vcpu->arch.xen.runstate_entry_time;
		data->u.runstate.time_running =
			vcpu->arch.xen.runstate_times[RUNSTATE_running];
		data->u.runstate.time_runnable =
			vcpu->arch.xen.runstate_times[RUNSTATE_runnable];
		data->u.runstate.time_blocked =
			vcpu->arch.xen.runstate_times[RUNSTATE_blocked];
		data->u.runstate.time_offline =
			vcpu->arch.xen.runstate_times[RUNSTATE_offline];
		r = 0;
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
		r = -EINVAL;
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
		data->u.vcpu_id = vcpu->arch.xen.vcpu_id;
		r = 0;
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
		data->u.timer.port = vcpu->arch.xen.timer_virq;
		data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
		data->u.timer.expires_ns = vcpu->arch.xen.timer_expires;
		r = 0;
		break;
	case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
		data->u.vector = vcpu->arch.xen.upcall_vector;
		r = 0;
		break;
	default:
		break;
	}
	mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
	return r;
}
int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
{
	struct kvm *kvm = vcpu->kvm;
	u32 page_num = data & ~PAGE_MASK;
	u64 page_addr = data & PAGE_MASK;
	bool lm = is_long_mode(vcpu);
	/* Latch long_mode for shared_info pages etc. */
	vcpu->kvm->arch.xen.long_mode = lm;
	/*
	 * If Xen hypercall intercept is enabled, fill the hypercall
	 * page with VMCALL/VMMCALL instructions since that's what
	 * we catch. Else the VMM has provided the hypercall pages
	 * with instructions of its own choosing, so use those.
	 */
	if (kvm_xen_hypercall_enabled(kvm)) {
		u8 instructions[32];
		int i;
		if (page_num)
			return 1;
		/* mov imm32, %eax */
		instructions[0] = 0xb8;
		/* vmcall / vmmcall */
		static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5);
		/* ret */
		instructions[8] = 0xc3;
		/* int3 to pad */
		memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
		for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
			*(u32 *)&instructions[1] = i;
			if (kvm_vcpu_write_guest(vcpu,
						 page_addr + (i * sizeof(instructions)),
						 instructions, sizeof(instructions)))
				return 1;
		}
	} else {
		/*
		 * Note, truncation is a non-issue as 'lm' is guaranteed to be
		 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
		 */
		hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
				     : kvm->arch.xen_hvm_config.blob_addr_32;
		u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
				  : kvm->arch.xen_hvm_config.blob_size_32;
		u8 *page;
		int ret;
		if (page_num >= blob_size)
			return 1;
		blob_addr += page_num * PAGE_SIZE;
		page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
		if (IS_ERR(page))
			return PTR_ERR(page);
		ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE);
		kfree(page);
		if (ret)
			return 1;
	}
	return 0;
}
int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
{
	/* Only some feature flags need to be *enabled* by userspace */
	u32 permitted_flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
		KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
	if (xhc->flags & ~permitted_flags)
		return -EINVAL;
	/*
	 * With hypercall interception the kernel generates its own
	 * hypercall page so it must not be provided.
	 */
	if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
	    (xhc->blob_addr_32 || xhc->blob_addr_64 ||
	     xhc->blob_size_32 || xhc->blob_size_64))
		return -EINVAL;
	mutex_lock(&kvm->arch.xen.xen_lock);
	if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
		static_branch_inc(&kvm_xen_enabled.key);
	else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
		static_branch_slow_dec_deferred(&kvm_xen_enabled);
	memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
	mutex_unlock(&kvm->arch.xen.xen_lock);
	return 0;
}
static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
{
	kvm_rax_write(vcpu, result);
	return kvm_skip_emulated_instruction(vcpu);
}
static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = vcpu->run;
	if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
		return 1;
	return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
}
static inline int max_evtchn_port(struct kvm *kvm)
{
	if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
		return EVTCHN_2L_NR_CHANNELS;
	else
		return COMPAT_EVTCHN_2L_NR_CHANNELS;
}
static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
			       evtchn_port_t *ports)
{
	struct kvm *kvm = vcpu->kvm;
	struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
	unsigned long *pending_bits;
	unsigned long flags;
	bool ret = true;
	int idx, i;
	idx = srcu_read_lock(&kvm->srcu);
	read_lock_irqsave(&gpc->lock, flags);
	if (!kvm_gpc_check(gpc, PAGE_SIZE))
		goto out_rcu;
	ret = false;
	if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
		struct shared_info *shinfo = gpc->khva;
		pending_bits = (unsigned long *)&shinfo->evtchn_pending;
	} else {
		struct compat_shared_info *shinfo = gpc->khva;
		pending_bits = (unsigned long *)&shinfo->evtchn_pending;
	}
	for (i = 0; i < nr_ports; i++) {
		if (test_bit(ports[i], pending_bits)) {
			ret = true;
			break;
		}
	}
 out_rcu:
	read_unlock_irqrestore(&gpc->lock, flags);
	srcu_read_unlock(&kvm->srcu, idx);
	return ret;
}
static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
				 u64 param, u64 *r)
{
	struct sched_poll sched_poll;
	evtchn_port_t port, *ports;
	struct x86_exception e;
	int i;
	if (!lapic_in_kernel(vcpu) ||
	    !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
		return false;
	if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
		struct compat_sched_poll sp32;
		/* Sanity check that the compat struct definition is correct */
		BUILD_BUG_ON(sizeof(sp32) != 16);
		if (kvm_read_guest_virt(vcpu, param, &sp32, sizeof(sp32), &e)) {
			*r = -EFAULT;
			return true;
		}
		/*
		 * This is a 32-bit pointer to an array of evtchn_port_t which
		 * are uint32_t, so once it's converted no further compat
		 * handling is needed.
		 */
		sched_poll.ports = (void *)(unsigned long)(sp32.ports);
		sched_poll.nr_ports = sp32.nr_ports;
		sched_poll.timeout = sp32.timeout;
	} else {
		if (kvm_read_guest_virt(vcpu, param, &sched_poll,
					sizeof(sched_poll), &e)) {
			*r = -EFAULT;
			return true;
		}
	}
	if (unlikely(sched_poll.nr_ports > 1)) {
		/* Xen (unofficially) limits number of pollers to 128 */
		if (sched_poll.nr_ports > 128) {
			*r = -EINVAL;
			return true;
		}
		ports = kmalloc_array(sched_poll.nr_ports,
				      sizeof(*ports), GFP_KERNEL);
		if (!ports) {
			*r = -ENOMEM;
			return true;
		}
	} else
		ports = &port;
	if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports,
				sched_poll.nr_ports * sizeof(*ports), &e)) {
		*r = -EFAULT;
		return true;
	}
	for (i = 0; i < sched_poll.nr_ports; i++) {
		if (ports[i] >= max_evtchn_port(vcpu->kvm)) {
			*r = -EINVAL;
			goto out;
		}
	}
	if (sched_poll.nr_ports == 1)
		vcpu->arch.xen.poll_evtchn = port;
	else
		vcpu->arch.xen.poll_evtchn = -1;
	set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
	if (!wait_pending_event(vcpu, sched_poll.nr_ports, ports)) {
		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
		if (sched_poll.timeout)
			mod_timer(&vcpu->arch.xen.poll_timer,
				  jiffies + nsecs_to_jiffies(sched_poll.timeout));
		kvm_vcpu_halt(vcpu);
		if (sched_poll.timeout)
			del_timer(&vcpu->arch.xen.poll_timer);
		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
	}
	vcpu->arch.xen.poll_evtchn = 0;
	*r = 0;
out:
	/* Really, this is only needed in case of timeout */
	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
	if (unlikely(sched_poll.nr_ports > 1))
		kfree(ports);
	return true;
}
static void cancel_evtchn_poll(struct timer_list *t)
{
	struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.xen.poll_timer);
	kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
	kvm_vcpu_kick(vcpu);
}
static bool kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, bool longmode,
				   int cmd, u64 param, u64 *r)
{
	switch (cmd) {
	case SCHEDOP_poll:
		if (kvm_xen_schedop_poll(vcpu, longmode, param, r))
			return true;
		fallthrough;
	case SCHEDOP_yield:
		kvm_vcpu_on_spin(vcpu, true);
		*r = 0;
		return true;
	default:
		break;
	}
	return false;
}
struct compat_vcpu_set_singleshot_timer {
    uint64_t timeout_abs_ns;
    uint32_t flags;
} __attribute__((packed));
static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
				  int vcpu_id, u64 param, u64 *r)
{
	struct vcpu_set_singleshot_timer oneshot;
	struct x86_exception e;
	s64 delta;
	if (!kvm_xen_timer_enabled(vcpu))
		return false;
	switch (cmd) {
	case VCPUOP_set_singleshot_timer:
		if (vcpu->arch.xen.vcpu_id != vcpu_id) {
			*r = -EINVAL;
			return true;
		}
		/*
		 * The only difference for 32-bit compat is the 4 bytes of
		 * padding after the interesting part of the structure. So
		 * for a faithful emulation of Xen we have to *try* to copy
		 * the padding and return -EFAULT if we can't. Otherwise we
		 * might as well just have copied the 12-byte 32-bit struct.
		 */
		BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) !=
			     offsetof(struct vcpu_set_singleshot_timer, timeout_abs_ns));
		BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) !=
			     sizeof_field(struct vcpu_set_singleshot_timer, timeout_abs_ns));
		BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, flags) !=
			     offsetof(struct vcpu_set_singleshot_timer, flags));
		BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) !=
			     sizeof_field(struct vcpu_set_singleshot_timer, flags));
		if (kvm_read_guest_virt(vcpu, param, &oneshot, longmode ? sizeof(oneshot) :
					sizeof(struct compat_vcpu_set_singleshot_timer), &e)) {
			*r = -EFAULT;
			return true;
		}
		delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm);
		if ((oneshot.flags & VCPU_SSHOTTMR_future) && delta < 0) {
			*r = -ETIME;
			return true;
		}
		kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, delta);
		*r = 0;
		return true;
	case VCPUOP_stop_singleshot_timer:
		if (vcpu->arch.xen.vcpu_id != vcpu_id) {
			*r = -EINVAL;
			return true;
		}
		kvm_xen_stop_timer(vcpu);
		*r = 0;
		return true;
	}
	return false;
}
static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout,
				       u64 *r)
{
	if (!kvm_xen_timer_enabled(vcpu))
		return false;
	if (timeout) {
		uint64_t guest_now = get_kvmclock_ns(vcpu->kvm);
		int64_t delta = timeout - guest_now;
		/* Xen has a 'Linux workaround' in do_set_timer_op() which
		 * checks for negative absolute timeout values (caused by
		 * integer overflow), and for values about 13 days in the
		 * future (2^50ns) which would be caused by jiffies
		 * overflow. For those cases, it sets the timeout 100ms in
		 * the future (not *too* soon, since if a guest really did
		 * set a long timeout on purpose we don't want to keep
		 * churning CPU time by waking it up).
		 */
		if (unlikely((int64_t)timeout < 0 ||
			     (delta > 0 && (uint32_t) (delta >> 50) != 0))) {
			delta = 100 * NSEC_PER_MSEC;
			timeout = guest_now + delta;
		}
		kvm_xen_start_timer(vcpu, timeout, delta);
	} else {
		kvm_xen_stop_timer(vcpu);
	}
	*r = 0;
	return true;
}
int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
{
	bool longmode;
	u64 input, params[6], r = -ENOSYS;
	bool handled = false;
	u8 cpl;
	input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
	/* Hyper-V hypercalls get bit 31 set in EAX */
	if ((input & 0x80000000) &&
	    kvm_hv_hypercall_enabled(vcpu))
		return kvm_hv_hypercall(vcpu);
	longmode = is_64_bit_hypercall(vcpu);
	if (!longmode) {
		params[0] = (u32)kvm_rbx_read(vcpu);
		params[1] = (u32)kvm_rcx_read(vcpu);
		params[2] = (u32)kvm_rdx_read(vcpu);
		params[3] = (u32)kvm_rsi_read(vcpu);
		params[4] = (u32)kvm_rdi_read(vcpu);
		params[5] = (u32)kvm_rbp_read(vcpu);
	}
#ifdef CONFIG_X86_64
	else {
		params[0] = (u64)kvm_rdi_read(vcpu);
		params[1] = (u64)kvm_rsi_read(vcpu);
		params[2] = (u64)kvm_rdx_read(vcpu);
		params[3] = (u64)kvm_r10_read(vcpu);
		params[4] = (u64)kvm_r8_read(vcpu);
		params[5] = (u64)kvm_r9_read(vcpu);
	}
#endif
	cpl = static_call(kvm_x86_get_cpl)(vcpu);
	trace_kvm_xen_hypercall(cpl, input, params[0], params[1], params[2],
				params[3], params[4], params[5]);
	/*
	 * Only allow hypercall acceleration for CPL0. The rare hypercalls that
	 * are permitted in guest userspace can be handled by the VMM.
	 */
	if (unlikely(cpl > 0))
		goto handle_in_userspace;
	switch (input) {
	case __HYPERVISOR_xen_version:
		if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) {
			r = vcpu->kvm->arch.xen.xen_version;
			handled = true;
		}
		break;
	case __HYPERVISOR_event_channel_op:
		if (params[0] == EVTCHNOP_send)
			handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r);
		break;
	case __HYPERVISOR_sched_op:
		handled = kvm_xen_hcall_sched_op(vcpu, longmode, params[0],
						 params[1], &r);
		break;
	case __HYPERVISOR_vcpu_op:
		handled = kvm_xen_hcall_vcpu_op(vcpu, longmode, params[0], params[1],
						params[2], &r);
		break;
	case __HYPERVISOR_set_timer_op: {
		u64 timeout = params[0];
		/* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */
		if (!longmode)
			timeout |= params[1] << 32;
		handled = kvm_xen_hcall_set_timer_op(vcpu, timeout, &r);
		break;
	}
	default:
		break;
	}
	if (handled)
		return kvm_xen_hypercall_set_result(vcpu, r);
handle_in_userspace:
	vcpu->run->exit_reason = KVM_EXIT_XEN;
	vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
	vcpu->run->xen.u.hcall.longmode = longmode;
	vcpu->run->xen.u.hcall.cpl = cpl;
	vcpu->run->xen.u.hcall.input = input;
	vcpu->run->xen.u.hcall.params[0] = params[0];
	vcpu->run->xen.u.hcall.params[1] = params[1];
	vcpu->run->xen.u.hcall.params[2] = params[2];
	vcpu->run->xen.u.hcall.params[3] = params[3];
	vcpu->run->xen.u.hcall.params[4] = params[4];
	vcpu->run->xen.u.hcall.params[5] = params[5];
	vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
	vcpu->arch.complete_userspace_io =
		kvm_xen_hypercall_complete_userspace;
	return 0;
}
static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
{
	int poll_evtchn = vcpu->arch.xen.poll_evtchn;
	if ((poll_evtchn == port || poll_evtchn == -1) &&
	    test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) {
		kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
		kvm_vcpu_kick(vcpu);
	}
}
/*
 * The return value from this function is propagated to kvm_set_irq() API,
 * so it returns:
 *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
 *  = 0   Interrupt was coalesced (previous irq is still pending)
 *  > 0   Number of CPUs interrupt was delivered to
 *
 * It is also called directly from kvm_arch_set_irq_inatomic(), where the
 * only check on its return value is a comparison with -EWOULDBLOCK'.
 */
int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
{
	struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
	struct kvm_vcpu *vcpu;
	unsigned long *pending_bits, *mask_bits;
	unsigned long flags;
	int port_word_bit;
	bool kick_vcpu = false;
	int vcpu_idx, idx, rc;
	vcpu_idx = READ_ONCE(xe->vcpu_idx);
	if (vcpu_idx >= 0)
		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
	else {
		vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
		if (!vcpu)
			return -EINVAL;
		WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
	}
	if (!vcpu->arch.xen.vcpu_info_cache.active)
		return -EINVAL;
	if (xe->port >= max_evtchn_port(kvm))
		return -EINVAL;
	rc = -EWOULDBLOCK;
	idx = srcu_read_lock(&kvm->srcu);
	read_lock_irqsave(&gpc->lock, flags);
	if (!kvm_gpc_check(gpc, PAGE_SIZE))
		goto out_rcu;
	if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
		struct shared_info *shinfo = gpc->khva;
		pending_bits = (unsigned long *)&shinfo->evtchn_pending;
		mask_bits = (unsigned long *)&shinfo->evtchn_mask;
		port_word_bit = xe->port / 64;
	} else {
		struct compat_shared_info *shinfo = gpc->khva;
		pending_bits = (unsigned long *)&shinfo->evtchn_pending;
		mask_bits = (unsigned long *)&shinfo->evtchn_mask;
		port_word_bit = xe->port / 32;
	}
	/*
	 * If this port wasn't already set, and if it isn't masked, then
	 * we try to set the corresponding bit in the in-kernel shadow of
	 * evtchn_pending_sel for the target vCPU. And if *that* wasn't
	 * already set, then we kick the vCPU in question to write to the
	 * *real* evtchn_pending_sel in its own guest vcpu_info struct.
	 */
	if (test_and_set_bit(xe->port, pending_bits)) {
		rc = 0; /* It was already raised */
	} else if (test_bit(xe->port, mask_bits)) {
		rc = -ENOTCONN; /* Masked */
		kvm_xen_check_poller(vcpu, xe->port);
	} else {
		rc = 1; /* Delivered to the bitmap in shared_info. */
		/* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
		read_unlock_irqrestore(&gpc->lock, flags);
		gpc = &vcpu->arch.xen.vcpu_info_cache;
		read_lock_irqsave(&gpc->lock, flags);
		if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
			/*
			 * Could not access the vcpu_info. Set the bit in-kernel
			 * and prod the vCPU to deliver it for itself.
			 */
			if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
				kick_vcpu = true;
			goto out_rcu;
		}
		if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
			struct vcpu_info *vcpu_info = gpc->khva;
			if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
				WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
				kick_vcpu = true;
			}
		} else {
			struct compat_vcpu_info *vcpu_info = gpc->khva;
			if (!test_and_set_bit(port_word_bit,
					      (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
				WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
				kick_vcpu = true;
			}
		}
		/* For the per-vCPU lapic vector, deliver it as MSI. */
		if (kick_vcpu && vcpu->arch.xen.upcall_vector) {
			kvm_xen_inject_vcpu_vector(vcpu);
			kick_vcpu = false;
		}
	}
 out_rcu:
	read_unlock_irqrestore(&gpc->lock, flags);
	srcu_read_unlock(&kvm->srcu, idx);
	if (kick_vcpu) {
		kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
		kvm_vcpu_kick(vcpu);
	}
	return rc;
}
static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
{
	bool mm_borrowed = false;
	int rc;
	rc = kvm_xen_set_evtchn_fast(xe, kvm);
	if (rc != -EWOULDBLOCK)
		return rc;
	if (current->mm != kvm->mm) {
		/*
		 * If not on a thread which already belongs to this KVM,
		 * we'd better be in the irqfd workqueue.
		 */
		if (WARN_ON_ONCE(current->mm))
			return -EINVAL;
		kthread_use_mm(kvm->mm);
		mm_borrowed = true;
	}
	mutex_lock(&kvm->arch.xen.xen_lock);
	/*
	 * It is theoretically possible for the page to be unmapped
	 * and the MMU notifier to invalidate the shared_info before
	 * we even get to use it. In that case, this looks like an
	 * infinite loop. It was tempting to do it via the userspace
	 * HVA instead... but that just *hides* the fact that it's
	 * an infinite loop, because if a fault occurs and it waits
	 * for the page to come back, it can *still* immediately
	 * fault and have to wait again, repeatedly.
	 *
	 * Conversely, the page could also have been reinstated by
	 * another thread before we even obtain the mutex above, so
	 * check again *first* before remapping it.
	 */
	do {
		struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
		int idx;
		rc = kvm_xen_set_evtchn_fast(xe, kvm);
		if (rc != -EWOULDBLOCK)
			break;
		idx = srcu_read_lock(&kvm->srcu);
		rc = kvm_gpc_refresh(gpc, PAGE_SIZE);
		srcu_read_unlock(&kvm->srcu, idx);
	} while(!rc);
	mutex_unlock(&kvm->arch.xen.xen_lock);
	if (mm_borrowed)
		kthread_unuse_mm(kvm->mm);
	return rc;
}
/* This is the version called from kvm_set_irq() as the .set function */
static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
			 int irq_source_id, int level, bool line_status)
{
	if (!level)
		return -EINVAL;
	return kvm_xen_set_evtchn(&e->xen_evtchn, kvm);
}
/*
 * Set up an event channel interrupt from the KVM IRQ routing table.
 * Used for e.g. PIRQ from passed through physical devices.
 */
int kvm_xen_setup_evtchn(struct kvm *kvm,
			 struct kvm_kernel_irq_routing_entry *e,
			 const struct kvm_irq_routing_entry *ue)
{
	struct kvm_vcpu *vcpu;
	if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
		return -EINVAL;
	/* We only support 2 level event channels for now */
	if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
		return -EINVAL;
	/*
	 * Xen gives us interesting mappings from vCPU index to APIC ID,
	 * which means kvm_get_vcpu_by_id() has to iterate over all vCPUs
	 * to find it. Do that once at setup time, instead of every time.
	 * But beware that on live update / live migration, the routing
	 * table might be reinstated before the vCPU threads have finished
	 * recreating their vCPUs.
	 */
	vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu);
	if (vcpu)
		e->xen_evtchn.vcpu_idx = vcpu->vcpu_idx;
	else
		e->xen_evtchn.vcpu_idx = -1;
	e->xen_evtchn.port = ue->u.xen_evtchn.port;
	e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu;
	e->xen_evtchn.priority = ue->u.xen_evtchn.priority;
	e->set = evtchn_set_fn;
	return 0;
}
/*
 * Explicit event sending from userspace with KVM_XEN_HVM_EVTCHN_SEND ioctl.
 */
int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
{
	struct kvm_xen_evtchn e;
	int ret;
	if (!uxe->port || uxe->port >= max_evtchn_port(kvm))
		return -EINVAL;
	/* We only support 2 level event channels for now */
	if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
		return -EINVAL;
	e.port = uxe->port;
	e.vcpu_id = uxe->vcpu;
	e.vcpu_idx = -1;
	e.priority = uxe->priority;
	ret = kvm_xen_set_evtchn(&e, kvm);
	/*
	 * None of that 'return 1 if it actually got delivered' nonsense.
	 * We don't care if it was masked (-ENOTCONN) either.
	 */
	if (ret > 0 || ret == -ENOTCONN)
		ret = 0;
	return ret;
}
/*
 * Support for *outbound* event channel events via the EVTCHNOP_send hypercall.
 */
struct evtchnfd {
	u32 send_port;
	u32 type;
	union {
		struct kvm_xen_evtchn port;
		struct {
			u32 port; /* zero */
			struct eventfd_ctx *ctx;
		} eventfd;
	} deliver;
};
/*
 * Update target vCPU or priority for a registered sending channel.
 */
static int kvm_xen_eventfd_update(struct kvm *kvm,
				  struct kvm_xen_hvm_attr *data)
{
	u32 port = data->u.evtchn.send_port;
	struct evtchnfd *evtchnfd;
	int ret;
	/* Protect writes to evtchnfd as well as the idr lookup.  */
	mutex_lock(&kvm->arch.xen.xen_lock);
	evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
	ret = -ENOENT;
	if (!evtchnfd)
		goto out_unlock;
	/* For an UPDATE, nothing may change except the priority/vcpu */
	ret = -EINVAL;
	if (evtchnfd->type != data->u.evtchn.type)
		goto out_unlock;
	/*
	 * Port cannot change, and if it's zero that was an eventfd
	 * which can't be changed either.
	 */
	if (!evtchnfd->deliver.port.port ||
	    evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port)
		goto out_unlock;
	/* We only support 2 level event channels for now */
	if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
		goto out_unlock;
	evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
	if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) {
		evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
		evtchnfd->deliver.port.vcpu_idx = -1;
	}
	ret = 0;
out_unlock:
	mutex_unlock(&kvm->arch.xen.xen_lock);
	return ret;
}
/*
 * Configure the target (eventfd or local port delivery) for sending on
 * a given event channel.
 */
static int kvm_xen_eventfd_assign(struct kvm *kvm,
				  struct kvm_xen_hvm_attr *data)
{
	u32 port = data->u.evtchn.send_port;
	struct eventfd_ctx *eventfd = NULL;
	struct evtchnfd *evtchnfd;
	int ret = -EINVAL;
	evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL);
	if (!evtchnfd)
		return -ENOMEM;
	switch(data->u.evtchn.type) {
	case EVTCHNSTAT_ipi:
		/* IPI  must map back to the same port# */
		if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port)
			goto out_noeventfd; /* -EINVAL */
		break;
	case EVTCHNSTAT_interdomain:
		if (data->u.evtchn.deliver.port.port) {
			if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm))
				goto out_noeventfd; /* -EINVAL */
		} else {
			eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd);
			if (IS_ERR(eventfd)) {
				ret = PTR_ERR(eventfd);
				goto out_noeventfd;
			}
		}
		break;
	case EVTCHNSTAT_virq:
	case EVTCHNSTAT_closed:
	case EVTCHNSTAT_unbound:
	case EVTCHNSTAT_pirq:
	default: /* Unknown event channel type */
		goto out; /* -EINVAL */
	}
	evtchnfd->send_port = data->u.evtchn.send_port;
	evtchnfd->type = data->u.evtchn.type;
	if (eventfd) {
		evtchnfd->deliver.eventfd.ctx = eventfd;
	} else {
		/* We only support 2 level event channels for now */
		if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
			goto out; /* -EINVAL; */
		evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port;
		evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
		evtchnfd->deliver.port.vcpu_idx = -1;
		evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
	}
	mutex_lock(&kvm->arch.xen.xen_lock);
	ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
			GFP_KERNEL);
	mutex_unlock(&kvm->arch.xen.xen_lock);
	if (ret >= 0)
		return 0;
	if (ret == -ENOSPC)
		ret = -EEXIST;
out:
	if (eventfd)
		eventfd_ctx_put(eventfd);
out_noeventfd:
	kfree(evtchnfd);
	return ret;
}
static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
{
	struct evtchnfd *evtchnfd;
	mutex_lock(&kvm->arch.xen.xen_lock);
	evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
	mutex_unlock(&kvm->arch.xen.xen_lock);
	if (!evtchnfd)
		return -ENOENT;
	synchronize_srcu(&kvm->srcu);
	if (!evtchnfd->deliver.port.port)
		eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
	kfree(evtchnfd);
	return 0;
}
static int kvm_xen_eventfd_reset(struct kvm *kvm)
{
	struct evtchnfd *evtchnfd, **all_evtchnfds;
	int i;
	int n = 0;
	mutex_lock(&kvm->arch.xen.xen_lock);
	/*
	 * Because synchronize_srcu() cannot be called inside the
	 * critical section, first collect all the evtchnfd objects
	 * in an array as they are removed from evtchn_ports.
	 */
	idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i)
		n++;
	all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
	if (!all_evtchnfds) {
		mutex_unlock(&kvm->arch.xen.xen_lock);
		return -ENOMEM;
	}
	n = 0;
	idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
		all_evtchnfds[n++] = evtchnfd;
		idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
	}
	mutex_unlock(&kvm->arch.xen.xen_lock);
	synchronize_srcu(&kvm->srcu);
	while (n--) {
		evtchnfd = all_evtchnfds[n];
		if (!evtchnfd->deliver.port.port)
			eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
		kfree(evtchnfd);
	}
	kfree(all_evtchnfds);
	return 0;
}
static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
{
	u32 port = data->u.evtchn.send_port;
	if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET)
		return kvm_xen_eventfd_reset(kvm);
	if (!port || port >= max_evtchn_port(kvm))
		return -EINVAL;
	if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN)
		return kvm_xen_eventfd_deassign(kvm, port);
	if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE)
		return kvm_xen_eventfd_update(kvm, data);
	if (data->u.evtchn.flags)
		return -EINVAL;
	return kvm_xen_eventfd_assign(kvm, data);
}
static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
{
	struct evtchnfd *evtchnfd;
	struct evtchn_send send;
	struct x86_exception e;
	/* Sanity check: this structure is the same for 32-bit and 64-bit */
	BUILD_BUG_ON(sizeof(send) != 4);
	if (kvm_read_guest_virt(vcpu, param, &send, sizeof(send), &e)) {
		*r = -EFAULT;
		return true;
	}
	/*
	 * evtchnfd is protected by kvm->srcu; the idr lookup instead
	 * is protected by RCU.
	 */
	rcu_read_lock();
	evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
	rcu_read_unlock();
	if (!evtchnfd)
		return false;
	if (evtchnfd->deliver.port.port) {
		int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm);
		if (ret < 0 && ret != -ENOTCONN)
			return false;
	} else {
		eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1);
	}
	*r = 0;
	return true;
}
void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
{
	vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx;
	vcpu->arch.xen.poll_evtchn = 0;
	timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
	kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
		     KVM_HOST_USES_PFN);
	kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
		     KVM_HOST_USES_PFN);
	kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
		     KVM_HOST_USES_PFN);
	kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
		     KVM_HOST_USES_PFN);
}
void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
{
	if (kvm_xen_timer_enabled(vcpu))
		kvm_xen_stop_timer(vcpu);
	kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
	kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
	kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
	kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
	del_timer_sync(&vcpu->arch.xen.poll_timer);
}
void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu)
{
	struct kvm_cpuid_entry2 *entry;
	u32 function;
	if (!vcpu->arch.xen.cpuid.base)
		return;
	function = vcpu->arch.xen.cpuid.base | XEN_CPUID_LEAF(3);
	if (function > vcpu->arch.xen.cpuid.limit)
		return;
	entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
	if (entry) {
		entry->ecx = vcpu->arch.hv_clock.tsc_to_system_mul;
		entry->edx = vcpu->arch.hv_clock.tsc_shift;
	}
	entry = kvm_find_cpuid_entry_index(vcpu, function, 2);
	if (entry)
		entry->eax = vcpu->arch.hw_tsc_khz;
}
void kvm_xen_init_vm(struct kvm *kvm)
{
	mutex_init(&kvm->arch.xen.xen_lock);
	idr_init(&kvm->arch.xen.evtchn_ports);
	kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
}
void kvm_xen_destroy_vm(struct kvm *kvm)
{
	struct evtchnfd *evtchnfd;
	int i;
	kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);
	idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
		if (!evtchnfd->deliver.port.port)
			eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
		kfree(evtchnfd);
	}
	idr_destroy(&kvm->arch.xen.evtchn_ports);
	if (kvm->arch.xen_hvm_config.msr)
		static_branch_slow_dec_deferred(&kvm_xen_enabled);
}
 | 
	linux-master | 
	arch/x86/kvm/xen.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
 *
 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Avi Kivity   <[email protected]>
 *   Gleb Natapov <[email protected]>
 *   Wei Huang    <[email protected]>
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <asm/perf_event.h>
#include <asm/cpu_device_id.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
#include "pmu.h"
/* This is enough to filter the vast majority of currently defined events. */
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
struct x86_pmu_capability __read_mostly kvm_pmu_cap;
EXPORT_SYMBOL_GPL(kvm_pmu_cap);
/* Precise Distribution of Instructions Retired (PDIR) */
static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
	/* Instruction-Accurate PDIR (PDIR++) */
	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
	{}
};
/* Precise Distribution (PDist) */
static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = {
	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
	{}
};
/* NOTE:
 * - Each perf counter is defined as "struct kvm_pmc";
 * - There are two types of perf counters: general purpose (gp) and fixed.
 *   gp counters are stored in gp_counters[] and fixed counters are stored
 *   in fixed_counters[] respectively. Both of them are part of "struct
 *   kvm_pmu";
 * - pmu.c understands the difference between gp counters and fixed counters.
 *   However AMD doesn't support fixed-counters;
 * - There are three types of index to access perf counters (PMC):
 *     1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
 *        has MSR_K7_PERFCTRn and, for families 15H and later,
 *        MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are
 *        aliased to MSR_K7_PERFCTRn.
 *     2. MSR Index (named idx): This normally is used by RDPMC instruction.
 *        For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
 *        C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
 *        that it also supports fixed counters. idx can be used to as index to
 *        gp and fixed counters.
 *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
 *        code. Each pmc, stored in kvm_pmc.idx field, is unique across
 *        all perf counters (both gp and fixed). The mapping relationship
 *        between pmc and perf counters is as the following:
 *        * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters
 *                 [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
 *        * AMD:   [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
 *          and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
 */
static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;
#define KVM_X86_PMU_OP(func)					     \
	DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func,			     \
				*(((struct kvm_pmu_ops *)0)->func));
#define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP
#include <asm/kvm-x86-pmu-ops.h>
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
{
	memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));
#define __KVM_X86_PMU_OP(func) \
	static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
#define KVM_X86_PMU_OP(func) \
	WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
#define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
#include <asm/kvm-x86-pmu-ops.h>
#undef __KVM_X86_PMU_OP
}
static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
{
	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
	kvm_pmu_deliver_pmi(vcpu);
}
static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
{
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
	bool skip_pmi = false;
	if (pmc->perf_event && pmc->perf_event->attr.precise_ip) {
		if (!in_pmi) {
			/*
			 * TODO: KVM is currently _choosing_ to not generate records
			 * for emulated instructions, avoiding BUFFER_OVF PMI when
			 * there are no records. Strictly speaking, it should be done
			 * as well in the right context to improve sampling accuracy.
			 */
			skip_pmi = true;
		} else {
			/* Indicate PEBS overflow PMI to guest. */
			skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
						      (unsigned long *)&pmu->global_status);
		}
	} else {
		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
	}
	if (!pmc->intr || skip_pmi)
		return;
	/*
	 * Inject PMI. If vcpu was in a guest mode during NMI PMI
	 * can be ejected on a guest mode re-entry. Otherwise we can't
	 * be sure that vcpu wasn't executing hlt instruction at the
	 * time of vmexit and is not going to re-enter guest mode until
	 * woken up. So we should wake it, but this is impossible from
	 * NMI context. Do it from irq work instead.
	 */
	if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
		irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
	else
		kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
}
static void kvm_perf_overflow(struct perf_event *perf_event,
			      struct perf_sample_data *data,
			      struct pt_regs *regs)
{
	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
	/*
	 * Ignore overflow events for counters that are scheduled to be
	 * reprogrammed, e.g. if a PMI for the previous event races with KVM's
	 * handling of a related guest WRMSR.
	 */
	if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi))
		return;
	__kvm_perf_overflow(pmc, true);
	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
}
static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
{
	/*
	 * For some model specific pebs counters with special capabilities
	 * (PDIR, PDIR++, PDIST), KVM needs to raise the event precise
	 * level to the maximum value (currently 3, backwards compatible)
	 * so that the perf subsystem would assign specific hardware counter
	 * with that capability for vPMC.
	 */
	if ((pmc->idx == 0 && x86_match_cpu(vmx_pebs_pdist_cpu)) ||
	    (pmc->idx == 32 && x86_match_cpu(vmx_pebs_pdir_cpu)))
		return 3;
	/*
	 * The non-zero precision level of guest event makes the ordinary
	 * guest event becomes a guest PEBS event and triggers the host
	 * PEBS PMI handler to determine whether the PEBS overflow PMI
	 * comes from the host counters or the guest.
	 */
	return 1;
}
static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
				 bool exclude_user, bool exclude_kernel,
				 bool intr)
{
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
	struct perf_event *event;
	struct perf_event_attr attr = {
		.type = type,
		.size = sizeof(attr),
		.pinned = true,
		.exclude_idle = true,
		.exclude_host = 1,
		.exclude_user = exclude_user,
		.exclude_kernel = exclude_kernel,
		.config = config,
	};
	bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable);
	attr.sample_period = get_sample_period(pmc, pmc->counter);
	if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
	    guest_cpuid_is_intel(pmc->vcpu)) {
		/*
		 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
		 * period. Just clear the sample period so at least
		 * allocating the counter doesn't fail.
		 */
		attr.sample_period = 0;
	}
	if (pebs) {
		/*
		 * For most PEBS hardware events, the difference in the software
		 * precision levels of guest and host PEBS events will not affect
		 * the accuracy of the PEBS profiling result, because the "event IP"
		 * in the PEBS record is calibrated on the guest side.
		 */
		attr.precise_ip = pmc_get_pebs_precise_level(pmc);
	}
	event = perf_event_create_kernel_counter(&attr, -1, current,
						 kvm_perf_overflow, pmc);
	if (IS_ERR(event)) {
		pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
			    PTR_ERR(event), pmc->idx);
		return PTR_ERR(event);
	}
	pmc->perf_event = event;
	pmc_to_pmu(pmc)->event_count++;
	pmc->is_paused = false;
	pmc->intr = intr || pebs;
	return 0;
}
static void pmc_pause_counter(struct kvm_pmc *pmc)
{
	u64 counter = pmc->counter;
	if (!pmc->perf_event || pmc->is_paused)
		return;
	/* update counter, reset event value to avoid redundant accumulation */
	counter += perf_event_pause(pmc->perf_event, true);
	pmc->counter = counter & pmc_bitmask(pmc);
	pmc->is_paused = true;
}
static bool pmc_resume_counter(struct kvm_pmc *pmc)
{
	if (!pmc->perf_event)
		return false;
	/* recalibrate sample period and check if it's accepted by perf core */
	if (is_sampling_event(pmc->perf_event) &&
	    perf_event_period(pmc->perf_event,
			      get_sample_period(pmc, pmc->counter)))
		return false;
	if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) !=
	    (!!pmc->perf_event->attr.precise_ip))
		return false;
	/* reuse perf_event to serve as pmc_reprogram_counter() does*/
	perf_event_enable(pmc->perf_event);
	pmc->is_paused = false;
	return true;
}
static int filter_cmp(const void *pa, const void *pb, u64 mask)
{
	u64 a = *(u64 *)pa & mask;
	u64 b = *(u64 *)pb & mask;
	return (a > b) - (a < b);
}
static int filter_sort_cmp(const void *pa, const void *pb)
{
	return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT |
				   KVM_PMU_MASKED_ENTRY_EXCLUDE));
}
/*
 * For the event filter, searching is done on the 'includes' list and
 * 'excludes' list separately rather than on the 'events' list (which
 * has both).  As a result the exclude bit can be ignored.
 */
static int filter_event_cmp(const void *pa, const void *pb)
{
	return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT));
}
static int find_filter_index(u64 *events, u64 nevents, u64 key)
{
	u64 *fe = bsearch(&key, events, nevents, sizeof(events[0]),
			  filter_event_cmp);
	if (!fe)
		return -1;
	return fe - events;
}
static bool is_filter_entry_match(u64 filter_event, u64 umask)
{
	u64 mask = filter_event >> (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8);
	u64 match = filter_event & KVM_PMU_MASKED_ENTRY_UMASK_MATCH;
	BUILD_BUG_ON((KVM_PMU_ENCODE_MASKED_ENTRY(0, 0xff, 0, false) >>
		     (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8)) !=
		     ARCH_PERFMON_EVENTSEL_UMASK);
	return (umask & mask) == match;
}
static bool filter_contains_match(u64 *events, u64 nevents, u64 eventsel)
{
	u64 event_select = eventsel & kvm_pmu_ops.EVENTSEL_EVENT;
	u64 umask = eventsel & ARCH_PERFMON_EVENTSEL_UMASK;
	int i, index;
	index = find_filter_index(events, nevents, event_select);
	if (index < 0)
		return false;
	/*
	 * Entries are sorted by the event select.  Walk the list in both
	 * directions to process all entries with the targeted event select.
	 */
	for (i = index; i < nevents; i++) {
		if (filter_event_cmp(&events[i], &event_select))
			break;
		if (is_filter_entry_match(events[i], umask))
			return true;
	}
	for (i = index - 1; i >= 0; i--) {
		if (filter_event_cmp(&events[i], &event_select))
			break;
		if (is_filter_entry_match(events[i], umask))
			return true;
	}
	return false;
}
static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter *f,
				u64 eventsel)
{
	if (filter_contains_match(f->includes, f->nr_includes, eventsel) &&
	    !filter_contains_match(f->excludes, f->nr_excludes, eventsel))
		return f->action == KVM_PMU_EVENT_ALLOW;
	return f->action == KVM_PMU_EVENT_DENY;
}
static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter,
				   int idx)
{
	int fixed_idx = idx - INTEL_PMC_IDX_FIXED;
	if (filter->action == KVM_PMU_EVENT_DENY &&
	    test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
		return false;
	if (filter->action == KVM_PMU_EVENT_ALLOW &&
	    !test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
		return false;
	return true;
}
static bool check_pmu_event_filter(struct kvm_pmc *pmc)
{
	struct kvm_x86_pmu_event_filter *filter;
	struct kvm *kvm = pmc->vcpu->kvm;
	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
	if (!filter)
		return true;
	if (pmc_is_gp(pmc))
		return is_gp_event_allowed(filter, pmc->eventsel);
	return is_fixed_event_allowed(filter, pmc->idx);
}
static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
{
	return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) &&
	       static_call(kvm_x86_pmu_hw_event_available)(pmc) &&
	       check_pmu_event_filter(pmc);
}
static void reprogram_counter(struct kvm_pmc *pmc)
{
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
	u64 eventsel = pmc->eventsel;
	u64 new_config = eventsel;
	u8 fixed_ctr_ctrl;
	pmc_pause_counter(pmc);
	if (!pmc_event_is_allowed(pmc))
		goto reprogram_complete;
	if (pmc->counter < pmc->prev_counter)
		__kvm_perf_overflow(pmc, false);
	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
		printk_once("kvm pmu: pin control bit is ignored\n");
	if (pmc_is_fixed(pmc)) {
		fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
						  pmc->idx - INTEL_PMC_IDX_FIXED);
		if (fixed_ctr_ctrl & 0x1)
			eventsel |= ARCH_PERFMON_EVENTSEL_OS;
		if (fixed_ctr_ctrl & 0x2)
			eventsel |= ARCH_PERFMON_EVENTSEL_USR;
		if (fixed_ctr_ctrl & 0x8)
			eventsel |= ARCH_PERFMON_EVENTSEL_INT;
		new_config = (u64)fixed_ctr_ctrl;
	}
	if (pmc->current_config == new_config && pmc_resume_counter(pmc))
		goto reprogram_complete;
	pmc_release_perf_event(pmc);
	pmc->current_config = new_config;
	/*
	 * If reprogramming fails, e.g. due to contention, leave the counter's
	 * regprogram bit set, i.e. opportunistically try again on the next PMU
	 * refresh.  Don't make a new request as doing so can stall the guest
	 * if reprogramming repeatedly fails.
	 */
	if (pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
				  (eventsel & pmu->raw_event_mask),
				  !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
				  !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
				  eventsel & ARCH_PERFMON_EVENTSEL_INT))
		return;
reprogram_complete:
	clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
	pmc->prev_counter = 0;
}
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	int bit;
	for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
		struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
		if (unlikely(!pmc)) {
			clear_bit(bit, pmu->reprogram_pmi);
			continue;
		}
		reprogram_counter(pmc);
	}
	/*
	 * Unused perf_events are only released if the corresponding MSRs
	 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
	 * triggers KVM_REQ_PMU if cleanup is needed.
	 */
	if (unlikely(pmu->need_cleanup))
		kvm_pmu_cleanup(vcpu);
}
/* check if idx is a valid index to access PMU */
bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
	return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx)(vcpu, idx);
}
bool is_vmware_backdoor_pmc(u32 pmc_idx)
{
	switch (pmc_idx) {
	case VMWARE_BACKDOOR_PMC_HOST_TSC:
	case VMWARE_BACKDOOR_PMC_REAL_TIME:
	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
		return true;
	}
	return false;
}
static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
	u64 ctr_val;
	switch (idx) {
	case VMWARE_BACKDOOR_PMC_HOST_TSC:
		ctr_val = rdtsc();
		break;
	case VMWARE_BACKDOOR_PMC_REAL_TIME:
		ctr_val = ktime_get_boottime_ns();
		break;
	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
		ctr_val = ktime_get_boottime_ns() +
			vcpu->kvm->arch.kvmclock_offset;
		break;
	default:
		return 1;
	}
	*data = ctr_val;
	return 0;
}
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
	bool fast_mode = idx & (1u << 31);
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	struct kvm_pmc *pmc;
	u64 mask = fast_mode ? ~0u : ~0ull;
	if (!pmu->version)
		return 1;
	if (is_vmware_backdoor_pmc(idx))
		return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
	pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
	if (!pmc)
		return 1;
	if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
	    (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
	    kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
		return 1;
	*data = pmc_read_counter(pmc) & mask;
	return 0;
}
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{
	if (lapic_in_kernel(vcpu)) {
		static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu);
		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
	}
}
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
	switch (msr) {
	case MSR_CORE_PERF_GLOBAL_STATUS:
	case MSR_CORE_PERF_GLOBAL_CTRL:
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
		return kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu));
	default:
		break;
	}
	return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
		static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
}
static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
	if (pmc)
		__set_bit(pmc->idx, pmu->pmc_in_use);
}
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	u32 msr = msr_info->index;
	switch (msr) {
	case MSR_CORE_PERF_GLOBAL_STATUS:
	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
		msr_info->data = pmu->global_status;
		break;
	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
	case MSR_CORE_PERF_GLOBAL_CTRL:
		msr_info->data = pmu->global_ctrl;
		break;
	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
		msr_info->data = 0;
		break;
	default:
		return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
	}
	return 0;
}
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	u32 msr = msr_info->index;
	u64 data = msr_info->data;
	u64 diff;
	/*
	 * Note, AMD ignores writes to reserved bits and read-only PMU MSRs,
	 * whereas Intel generates #GP on attempts to write reserved/RO MSRs.
	 */
	switch (msr) {
	case MSR_CORE_PERF_GLOBAL_STATUS:
		if (!msr_info->host_initiated)
			return 1; /* RO MSR */
		fallthrough;
	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
		/* Per PPR, Read-only MSR. Writes are ignored. */
		if (!msr_info->host_initiated)
			break;
		if (data & pmu->global_status_mask)
			return 1;
		pmu->global_status = data;
		break;
	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
		data &= ~pmu->global_ctrl_mask;
		fallthrough;
	case MSR_CORE_PERF_GLOBAL_CTRL:
		if (!kvm_valid_perf_global_ctrl(pmu, data))
			return 1;
		if (pmu->global_ctrl != data) {
			diff = pmu->global_ctrl ^ data;
			pmu->global_ctrl = data;
			reprogram_counters(pmu, diff);
		}
		break;
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
		/*
		 * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
		 * GLOBAL_STATUS, and so the set of reserved bits is the same.
		 */
		if (data & pmu->global_status_mask)
			return 1;
		fallthrough;
	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
		if (!msr_info->host_initiated)
			pmu->global_status &= ~data;
		break;
	default:
		kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
		return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
	}
	return 0;
}
/* refresh PMU settings. This function generally is called when underlying
 * settings are changed (such as changes of PMU CPUID by guest VMs), which
 * should rarely happen.
 */
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
{
	if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
		return;
	bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX);
	static_call(kvm_x86_pmu_refresh)(vcpu);
}
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	irq_work_sync(&pmu->irq_work);
	static_call(kvm_x86_pmu_reset)(vcpu);
}
void kvm_pmu_init(struct kvm_vcpu *vcpu)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	memset(pmu, 0, sizeof(*pmu));
	static_call(kvm_x86_pmu_init)(vcpu);
	init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
	pmu->event_count = 0;
	pmu->need_cleanup = false;
	kvm_pmu_refresh(vcpu);
}
/* Release perf_events for vPMCs that have been unused for a full time slice.  */
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	struct kvm_pmc *pmc = NULL;
	DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
	int i;
	pmu->need_cleanup = false;
	bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
		      pmu->pmc_in_use, X86_PMC_IDX_MAX);
	for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
		pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
		if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
			pmc_stop_counter(pmc);
	}
	static_call_cond(kvm_x86_pmu_cleanup)(vcpu);
	bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
}
void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
{
	kvm_pmu_reset(vcpu);
}
static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
{
	pmc->prev_counter = pmc->counter;
	pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
	kvm_pmu_request_counter_reprogram(pmc);
}
static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
	unsigned int perf_hw_id)
{
	return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) &
		AMD64_RAW_EVENT_MASK_NB);
}
static inline bool cpl_is_matched(struct kvm_pmc *pmc)
{
	bool select_os, select_user;
	u64 config;
	if (pmc_is_gp(pmc)) {
		config = pmc->eventsel;
		select_os = config & ARCH_PERFMON_EVENTSEL_OS;
		select_user = config & ARCH_PERFMON_EVENTSEL_USR;
	} else {
		config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
					  pmc->idx - INTEL_PMC_IDX_FIXED);
		select_os = config & 0x1;
		select_user = config & 0x2;
	}
	return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
}
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	struct kvm_pmc *pmc;
	int i;
	for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
		pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
		if (!pmc || !pmc_event_is_allowed(pmc))
			continue;
		/* Ignore checks for edge detect, pin control, invert and CMASK bits */
		if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
			kvm_pmu_incr_counter(pmc);
	}
}
EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
{
	u64 mask = kvm_pmu_ops.EVENTSEL_EVENT |
		   KVM_PMU_MASKED_ENTRY_UMASK_MASK |
		   KVM_PMU_MASKED_ENTRY_UMASK_MATCH |
		   KVM_PMU_MASKED_ENTRY_EXCLUDE;
	int i;
	for (i = 0; i < filter->nevents; i++) {
		if (filter->events[i] & ~mask)
			return false;
	}
	return true;
}
static void convert_to_masked_filter(struct kvm_x86_pmu_event_filter *filter)
{
	int i, j;
	for (i = 0, j = 0; i < filter->nevents; i++) {
		/*
		 * Skip events that are impossible to match against a guest
		 * event.  When filtering, only the event select + unit mask
		 * of the guest event is used.  To maintain backwards
		 * compatibility, impossible filters can't be rejected :-(
		 */
		if (filter->events[i] & ~(kvm_pmu_ops.EVENTSEL_EVENT |
					  ARCH_PERFMON_EVENTSEL_UMASK))
			continue;
		/*
		 * Convert userspace events to a common in-kernel event so
		 * only one code path is needed to support both events.  For
		 * the in-kernel events use masked events because they are
		 * flexible enough to handle both cases.  To convert to masked
		 * events all that's needed is to add an "all ones" umask_mask,
		 * (unmasked filter events don't support EXCLUDE).
		 */
		filter->events[j++] = filter->events[i] |
				      (0xFFULL << KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT);
	}
	filter->nevents = j;
}
static int prepare_filter_lists(struct kvm_x86_pmu_event_filter *filter)
{
	int i;
	if (!(filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS))
		convert_to_masked_filter(filter);
	else if (!is_masked_filter_valid(filter))
		return -EINVAL;
	/*
	 * Sort entries by event select and includes vs. excludes so that all
	 * entries for a given event select can be processed efficiently during
	 * filtering.  The EXCLUDE flag uses a more significant bit than the
	 * event select, and so the sorted list is also effectively split into
	 * includes and excludes sub-lists.
	 */
	sort(&filter->events, filter->nevents, sizeof(filter->events[0]),
	     filter_sort_cmp, NULL);
	i = filter->nevents;
	/* Find the first EXCLUDE event (only supported for masked events). */
	if (filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS) {
		for (i = 0; i < filter->nevents; i++) {
			if (filter->events[i] & KVM_PMU_MASKED_ENTRY_EXCLUDE)
				break;
		}
	}
	filter->nr_includes = i;
	filter->nr_excludes = filter->nevents - filter->nr_includes;
	filter->includes = filter->events;
	filter->excludes = filter->events + filter->nr_includes;
	return 0;
}
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
{
	struct kvm_pmu_event_filter __user *user_filter = argp;
	struct kvm_x86_pmu_event_filter *filter;
	struct kvm_pmu_event_filter tmp;
	struct kvm_vcpu *vcpu;
	unsigned long i;
	size_t size;
	int r;
	if (copy_from_user(&tmp, user_filter, sizeof(tmp)))
		return -EFAULT;
	if (tmp.action != KVM_PMU_EVENT_ALLOW &&
	    tmp.action != KVM_PMU_EVENT_DENY)
		return -EINVAL;
	if (tmp.flags & ~KVM_PMU_EVENT_FLAGS_VALID_MASK)
		return -EINVAL;
	if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
		return -E2BIG;
	size = struct_size(filter, events, tmp.nevents);
	filter = kzalloc(size, GFP_KERNEL_ACCOUNT);
	if (!filter)
		return -ENOMEM;
	filter->action = tmp.action;
	filter->nevents = tmp.nevents;
	filter->fixed_counter_bitmap = tmp.fixed_counter_bitmap;
	filter->flags = tmp.flags;
	r = -EFAULT;
	if (copy_from_user(filter->events, user_filter->events,
			   sizeof(filter->events[0]) * filter->nevents))
		goto cleanup;
	r = prepare_filter_lists(filter);
	if (r)
		goto cleanup;
	mutex_lock(&kvm->lock);
	filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
				     mutex_is_locked(&kvm->lock));
	mutex_unlock(&kvm->lock);
	synchronize_srcu_expedited(&kvm->srcu);
	BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) >
		     sizeof(((struct kvm_pmu *)0)->__reprogram_pmi));
	kvm_for_each_vcpu(i, vcpu, kvm)
		atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull);
	kvm_make_all_cpus_request(kvm, KVM_REQ_PMU);
	r = 0;
cleanup:
	kfree(filter);
	return r;
}
 | 
	linux-master | 
	arch/x86/kvm/pmu.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * irq.c: API for in kernel interrupt controller
 * Copyright (c) 2007, Intel Corporation.
 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Yaozu (Eddie) Dong <[email protected]>
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/export.h>
#include <linux/kvm_host.h>
#include "irq.h"
#include "i8254.h"
#include "x86.h"
#include "xen.h"
/*
 * check if there are pending timer events
 * to be processed.
 */
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
	int r = 0;
	if (lapic_in_kernel(vcpu))
		r = apic_has_pending_timer(vcpu);
	if (kvm_xen_timer_enabled(vcpu))
		r += kvm_xen_has_pending_timer(vcpu);
	return r;
}
/*
 * check if there is a pending userspace external interrupt
 */
static int pending_userspace_extint(struct kvm_vcpu *v)
{
	return v->arch.pending_external_vector != -1;
}
/*
 * check if there is pending interrupt from
 * non-APIC source without intack.
 */
int kvm_cpu_has_extint(struct kvm_vcpu *v)
{
	/*
	 * FIXME: interrupt.injected represents an interrupt whose
	 * side-effects have already been applied (e.g. bit from IRR
	 * already moved to ISR). Therefore, it is incorrect to rely
	 * on interrupt.injected to know if there is a pending
	 * interrupt in the user-mode LAPIC.
	 * This leads to nVMX/nSVM not be able to distinguish
	 * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
	 * pending interrupt or should re-inject an injected
	 * interrupt.
	 */
	if (!lapic_in_kernel(v))
		return v->arch.interrupt.injected;
	if (kvm_xen_has_interrupt(v))
		return 1;
	if (!kvm_apic_accept_pic_intr(v))
		return 0;
	if (irqchip_split(v->kvm))
		return pending_userspace_extint(v);
	else
		return v->kvm->arch.vpic->output;
}
/*
 * check if there is injectable interrupt:
 * when virtual interrupt delivery enabled,
 * interrupt from apic will handled by hardware,
 * we don't need to check it here.
 */
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
{
	if (kvm_cpu_has_extint(v))
		return 1;
	if (!is_guest_mode(v) && kvm_vcpu_apicv_active(v))
		return 0;
	return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
}
EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
/*
 * check if there is pending interrupt without
 * intack.
 */
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
{
	if (kvm_cpu_has_extint(v))
		return 1;
	return kvm_apic_has_interrupt(v) != -1;	/* LAPIC */
}
EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
/*
 * Read pending interrupt(from non-APIC source)
 * vector and intack.
 */
static int kvm_cpu_get_extint(struct kvm_vcpu *v)
{
	if (!kvm_cpu_has_extint(v)) {
		WARN_ON(!lapic_in_kernel(v));
		return -1;
	}
	if (!lapic_in_kernel(v))
		return v->arch.interrupt.nr;
	if (kvm_xen_has_interrupt(v))
		return v->kvm->arch.xen.upcall_vector;
	if (irqchip_split(v->kvm)) {
		int vector = v->arch.pending_external_vector;
		v->arch.pending_external_vector = -1;
		return vector;
	} else
		return kvm_pic_read_irq(v->kvm); /* PIC */
}
/*
 * Read pending interrupt vector and intack.
 */
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
{
	int vector = kvm_cpu_get_extint(v);
	if (vector != -1)
		return vector;			/* PIC */
	return kvm_get_apic_interrupt(v);	/* APIC */
}
EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
{
	if (lapic_in_kernel(vcpu))
		kvm_inject_apic_timer_irqs(vcpu);
	if (kvm_xen_timer_enabled(vcpu))
		kvm_xen_inject_timer_irqs(vcpu);
}
void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
	__kvm_migrate_apic_timer(vcpu);
	__kvm_migrate_pit_timer(vcpu);
	static_call_cond(kvm_x86_migrate_timers)(vcpu);
}
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
{
	bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE;
	return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm);
}
bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
{
	return irqchip_in_kernel(kvm);
}
 | 
	linux-master | 
	arch/x86/kvm/irq.c | 
| 
	// SPDX-License-Identifier: GPL-2.0-only
/*
 * KVM Microsoft Hyper-V emulation
 *
 * derived from arch/x86/kvm/x86.c
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright (C) 2008 Qumranet, Inc.
 * Copyright IBM Corporation, 2008
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 * Copyright (C) 2015 Andrey Smetanin <[email protected]>
 *
 * Authors:
 *   Avi Kivity   <[email protected]>
 *   Yaniv Kamay  <[email protected]>
 *   Amit Shah    <[email protected]>
 *   Ben-Ami Yassour <[email protected]>
 *   Andrey Smetanin <[email protected]>
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "x86.h"
#include "lapic.h"
#include "ioapic.h"
#include "cpuid.h"
#include "hyperv.h"
#include "mmu.h"
#include "xen.h"
#include <linux/cpu.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <linux/sched/cputime.h>
#include <linux/spinlock.h>
#include <linux/eventfd.h>
#include <asm/apicdef.h>
#include <asm/mshyperv.h>
#include <trace/events/kvm.h>
#include "trace.h"
#include "irq.h"
#include "fpu.h"
#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, HV_VCPUS_PER_SPARSE_BANK)
/*
 * As per Hyper-V TLFS, extended hypercalls start from 0x8001
 * (HvExtCallQueryCapabilities). Response of this hypercalls is a 64 bit value
 * where each bit tells which extended hypercall is available besides
 * HvExtCallQueryCapabilities.
 *
 * 0x8001 - First extended hypercall, HvExtCallQueryCapabilities, no bit
 * assigned.
 *
 * 0x8002 - Bit 0
 * 0x8003 - Bit 1
 * ..
 * 0x8041 - Bit 63
 *
 * Therefore, HV_EXT_CALL_MAX = 0x8001 + 64
 */
#define HV_EXT_CALL_MAX (HV_EXT_CALL_QUERY_CAPABILITIES + 64)
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
				bool vcpu_kick);
static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
{
	return atomic64_read(&synic->sint[sint]);
}
static inline int synic_get_sint_vector(u64 sint_value)
{
	if (sint_value & HV_SYNIC_SINT_MASKED)
		return -1;
	return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
}
static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
				      int vector)
{
	int i;
	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
		if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
			return true;
	}
	return false;
}
static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
				     int vector)
{
	int i;
	u64 sint_value;
	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
		sint_value = synic_read_sint(synic, i);
		if (synic_get_sint_vector(sint_value) == vector &&
		    sint_value & HV_SYNIC_SINT_AUTO_EOI)
			return true;
	}
	return false;
}
static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
				int vector)
{
	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
	bool auto_eoi_old, auto_eoi_new;
	if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
		return;
	if (synic_has_vector_connected(synic, vector))
		__set_bit(vector, synic->vec_bitmap);
	else
		__clear_bit(vector, synic->vec_bitmap);
	auto_eoi_old = !bitmap_empty(synic->auto_eoi_bitmap, 256);
	if (synic_has_vector_auto_eoi(synic, vector))
		__set_bit(vector, synic->auto_eoi_bitmap);
	else
		__clear_bit(vector, synic->auto_eoi_bitmap);
	auto_eoi_new = !bitmap_empty(synic->auto_eoi_bitmap, 256);
	if (auto_eoi_old == auto_eoi_new)
		return;
	if (!enable_apicv)
		return;
	down_write(&vcpu->kvm->arch.apicv_update_lock);
	if (auto_eoi_new)
		hv->synic_auto_eoi_used++;
	else
		hv->synic_auto_eoi_used--;
	/*
	 * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on
	 * the hypervisor to manually inject IRQs.
	 */
	__kvm_set_or_clear_apicv_inhibit(vcpu->kvm,
					 APICV_INHIBIT_REASON_HYPERV,
					 !!hv->synic_auto_eoi_used);
	up_write(&vcpu->kvm->arch.apicv_update_lock);
}
static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
			  u64 data, bool host)
{
	int vector, old_vector;
	bool masked;
	vector = data & HV_SYNIC_SINT_VECTOR_MASK;
	masked = data & HV_SYNIC_SINT_MASKED;
	/*
	 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
	 * default '0x10000' value on boot and this should not #GP. We need to
	 * allow zero-initing the register from host as well.
	 */
	if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
		return 1;
	/*
	 * Guest may configure multiple SINTs to use the same vector, so
	 * we maintain a bitmap of vectors handled by synic, and a
	 * bitmap of vectors with auto-eoi behavior.  The bitmaps are
	 * updated here, and atomically queried on fast paths.
	 */
	old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
	atomic64_set(&synic->sint[sint], data);
	synic_update_vector(synic, old_vector);
	synic_update_vector(synic, vector);
	/* Load SynIC vectors into EOI exit bitmap */
	kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
	return 0;
}
static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
{
	struct kvm_vcpu *vcpu = NULL;
	unsigned long i;
	if (vpidx >= KVM_MAX_VCPUS)
		return NULL;
	vcpu = kvm_get_vcpu(kvm, vpidx);
	if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
		return vcpu;
	kvm_for_each_vcpu(i, vcpu, kvm)
		if (kvm_hv_get_vpindex(vcpu) == vpidx)
			return vcpu;
	return NULL;
}
static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
{
	struct kvm_vcpu *vcpu;
	struct kvm_vcpu_hv_synic *synic;
	vcpu = get_vcpu_by_vpidx(kvm, vpidx);
	if (!vcpu || !to_hv_vcpu(vcpu))
		return NULL;
	synic = to_hv_synic(vcpu);
	return (synic->active) ? synic : NULL;
}
static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
{
	struct kvm *kvm = vcpu->kvm;
	struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	struct kvm_vcpu_hv_stimer *stimer;
	int gsi, idx;
	trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
	/* Try to deliver pending Hyper-V SynIC timers messages */
	for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
		stimer = &hv_vcpu->stimer[idx];
		if (stimer->msg_pending && stimer->config.enable &&
		    !stimer->config.direct_mode &&
		    stimer->config.sintx == sint)
			stimer_mark_pending(stimer, false);
	}
	idx = srcu_read_lock(&kvm->irq_srcu);
	gsi = atomic_read(&synic->sint_to_gsi[sint]);
	if (gsi != -1)
		kvm_notify_acked_gsi(kvm, gsi);
	srcu_read_unlock(&kvm->irq_srcu, idx);
}
static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
{
	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
	hv_vcpu->exit.u.synic.msr = msr;
	hv_vcpu->exit.u.synic.control = synic->control;
	hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
	hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
	kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
}
static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
			 u32 msr, u64 data, bool host)
{
	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
	int ret;
	if (!synic->active && (!host || data))
		return 1;
	trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
	ret = 0;
	switch (msr) {
	case HV_X64_MSR_SCONTROL:
		synic->control = data;
		if (!host)
			synic_exit(synic, msr);
		break;
	case HV_X64_MSR_SVERSION:
		if (!host) {
			ret = 1;
			break;
		}
		synic->version = data;
		break;
	case HV_X64_MSR_SIEFP:
		if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
		    !synic->dont_zero_synic_pages)
			if (kvm_clear_guest(vcpu->kvm,
					    data & PAGE_MASK, PAGE_SIZE)) {
				ret = 1;
				break;
			}
		synic->evt_page = data;
		if (!host)
			synic_exit(synic, msr);
		break;
	case HV_X64_MSR_SIMP:
		if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
		    !synic->dont_zero_synic_pages)
			if (kvm_clear_guest(vcpu->kvm,
					    data & PAGE_MASK, PAGE_SIZE)) {
				ret = 1;
				break;
			}
		synic->msg_page = data;
		if (!host)
			synic_exit(synic, msr);
		break;
	case HV_X64_MSR_EOM: {
		int i;
		if (!synic->active)
			break;
		for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
			kvm_hv_notify_acked_sint(vcpu, i);
		break;
	}
	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
		ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
		break;
	default:
		ret = 1;
		break;
	}
	return ret;
}
static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	return hv_vcpu->cpuid_cache.syndbg_cap_eax &
		HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
}
static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
{
	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
	if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
		hv->hv_syndbg.control.status =
			vcpu->run->hyperv.u.syndbg.status;
	return 1;
}
static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
{
	struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
	hv_vcpu->exit.u.syndbg.msr = msr;
	hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
	hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
	hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
	hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
	vcpu->arch.complete_userspace_io =
			kvm_hv_syndbg_complete_userspace;
	kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
}
static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
	struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
	if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
		return 1;
	trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
				    to_hv_vcpu(vcpu)->vp_index, msr, data);
	switch (msr) {
	case HV_X64_MSR_SYNDBG_CONTROL:
		syndbg->control.control = data;
		if (!host)
			syndbg_exit(vcpu, msr);
		break;
	case HV_X64_MSR_SYNDBG_STATUS:
		syndbg->control.status = data;
		break;
	case HV_X64_MSR_SYNDBG_SEND_BUFFER:
		syndbg->control.send_page = data;
		break;
	case HV_X64_MSR_SYNDBG_RECV_BUFFER:
		syndbg->control.recv_page = data;
		break;
	case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
		syndbg->control.pending_page = data;
		if (!host)
			syndbg_exit(vcpu, msr);
		break;
	case HV_X64_MSR_SYNDBG_OPTIONS:
		syndbg->options = data;
		break;
	default:
		break;
	}
	return 0;
}
static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{
	struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
	if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
		return 1;
	switch (msr) {
	case HV_X64_MSR_SYNDBG_CONTROL:
		*pdata = syndbg->control.control;
		break;
	case HV_X64_MSR_SYNDBG_STATUS:
		*pdata = syndbg->control.status;
		break;
	case HV_X64_MSR_SYNDBG_SEND_BUFFER:
		*pdata = syndbg->control.send_page;
		break;
	case HV_X64_MSR_SYNDBG_RECV_BUFFER:
		*pdata = syndbg->control.recv_page;
		break;
	case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
		*pdata = syndbg->control.pending_page;
		break;
	case HV_X64_MSR_SYNDBG_OPTIONS:
		*pdata = syndbg->options;
		break;
	default:
		break;
	}
	trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
	return 0;
}
static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
			 bool host)
{
	int ret;
	if (!synic->active && !host)
		return 1;
	ret = 0;
	switch (msr) {
	case HV_X64_MSR_SCONTROL:
		*pdata = synic->control;
		break;
	case HV_X64_MSR_SVERSION:
		*pdata = synic->version;
		break;
	case HV_X64_MSR_SIEFP:
		*pdata = synic->evt_page;
		break;
	case HV_X64_MSR_SIMP:
		*pdata = synic->msg_page;
		break;
	case HV_X64_MSR_EOM:
		*pdata = 0;
		break;
	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
		*pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
		break;
	default:
		ret = 1;
		break;
	}
	return ret;
}
static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
{
	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
	struct kvm_lapic_irq irq;
	int ret, vector;
	if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
		return -EINVAL;
	if (sint >= ARRAY_SIZE(synic->sint))
		return -EINVAL;
	vector = synic_get_sint_vector(synic_read_sint(synic, sint));
	if (vector < 0)
		return -ENOENT;
	memset(&irq, 0, sizeof(irq));
	irq.shorthand = APIC_DEST_SELF;
	irq.dest_mode = APIC_DEST_PHYSICAL;
	irq.delivery_mode = APIC_DM_FIXED;
	irq.vector = vector;
	irq.level = 1;
	ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
	trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
	return ret;
}
int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
{
	struct kvm_vcpu_hv_synic *synic;
	synic = synic_get(kvm, vpidx);
	if (!synic)
		return -EINVAL;
	return synic_set_irq(synic, sint);
}
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
{
	struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
	int i;
	trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
	for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
		if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
			kvm_hv_notify_acked_sint(vcpu, i);
}
static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
{
	struct kvm_vcpu_hv_synic *synic;
	synic = synic_get(kvm, vpidx);
	if (!synic)
		return -EINVAL;
	if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
		return -EINVAL;
	atomic_set(&synic->sint_to_gsi[sint], gsi);
	return 0;
}
void kvm_hv_irq_routing_update(struct kvm *kvm)
{
	struct kvm_irq_routing_table *irq_rt;
	struct kvm_kernel_irq_routing_entry *e;
	u32 gsi;
	irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
					lockdep_is_held(&kvm->irq_lock));
	for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
		hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
			if (e->type == KVM_IRQ_ROUTING_HV_SINT)
				kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
						    e->hv_sint.sint, gsi);
		}
	}
}
static void synic_init(struct kvm_vcpu_hv_synic *synic)
{
	int i;
	memset(synic, 0, sizeof(*synic));
	synic->version = HV_SYNIC_VERSION_1;
	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
		atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
		atomic_set(&synic->sint_to_gsi[i], -1);
	}
}
static u64 get_time_ref_counter(struct kvm *kvm)
{
	struct kvm_hv *hv = to_kvm_hv(kvm);
	struct kvm_vcpu *vcpu;
	u64 tsc;
	/*
	 * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
	 * is broken, disabled or being updated.
	 */
	if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
		return div_u64(get_kvmclock_ns(kvm), 100);
	vcpu = kvm_get_vcpu(kvm, 0);
	tsc = kvm_read_l1_tsc(vcpu, rdtsc());
	return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
		+ hv->tsc_ref.tsc_offset;
}
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
				bool vcpu_kick)
{
	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
	set_bit(stimer->index,
		to_hv_vcpu(vcpu)->stimer_pending_bitmap);
	kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
	if (vcpu_kick)
		kvm_vcpu_kick(vcpu);
}
static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
{
	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
	trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
				    stimer->index);
	hrtimer_cancel(&stimer->timer);
	clear_bit(stimer->index,
		  to_hv_vcpu(vcpu)->stimer_pending_bitmap);
	stimer->msg_pending = false;
	stimer->exp_time = 0;
}
static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
{
	struct kvm_vcpu_hv_stimer *stimer;
	stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
	trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
				     stimer->index);
	stimer_mark_pending(stimer, true);
	return HRTIMER_NORESTART;
}
/*
 * stimer_start() assumptions:
 * a) stimer->count is not equal to 0
 * b) stimer->config has HV_STIMER_ENABLE flag
 */
static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
{
	u64 time_now;
	ktime_t ktime_now;
	time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
	ktime_now = ktime_get();
	if (stimer->config.periodic) {
		if (stimer->exp_time) {
			if (time_now >= stimer->exp_time) {
				u64 remainder;
				div64_u64_rem(time_now - stimer->exp_time,
					      stimer->count, &remainder);
				stimer->exp_time =
					time_now + (stimer->count - remainder);
			}
		} else
			stimer->exp_time = time_now + stimer->count;
		trace_kvm_hv_stimer_start_periodic(
					hv_stimer_to_vcpu(stimer)->vcpu_id,
					stimer->index,
					time_now, stimer->exp_time);
		hrtimer_start(&stimer->timer,
			      ktime_add_ns(ktime_now,
					   100 * (stimer->exp_time - time_now)),
			      HRTIMER_MODE_ABS);
		return 0;
	}
	stimer->exp_time = stimer->count;
	if (time_now >= stimer->count) {
		/*
		 * Expire timer according to Hypervisor Top-Level Functional
		 * specification v4(15.3.1):
		 * "If a one shot is enabled and the specified count is in
		 * the past, it will expire immediately."
		 */
		stimer_mark_pending(stimer, false);
		return 0;
	}
	trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
					   stimer->index,
					   time_now, stimer->count);
	hrtimer_start(&stimer->timer,
		      ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
		      HRTIMER_MODE_ABS);
	return 0;
}
static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
			     bool host)
{
	union hv_stimer_config new_config = {.as_uint64 = config},
		old_config = {.as_uint64 = stimer->config.as_uint64};
	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
	if (!synic->active && (!host || config))
		return 1;
	if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
		     !(hv_vcpu->cpuid_cache.features_edx &
		       HV_STIMER_DIRECT_MODE_AVAILABLE)))
		return 1;
	trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
				       stimer->index, config, host);
	stimer_cleanup(stimer);
	if (old_config.enable &&
	    !new_config.direct_mode && new_config.sintx == 0)
		new_config.enable = 0;
	stimer->config.as_uint64 = new_config.as_uint64;
	if (stimer->config.enable)
		stimer_mark_pending(stimer, false);
	return 0;
}
static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
			    bool host)
{
	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
	struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
	if (!synic->active && (!host || count))
		return 1;
	trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
				      stimer->index, count, host);
	stimer_cleanup(stimer);
	stimer->count = count;
	if (stimer->count == 0)
		stimer->config.enable = 0;
	else if (stimer->config.auto_enable)
		stimer->config.enable = 1;
	if (stimer->config.enable)
		stimer_mark_pending(stimer, false);
	return 0;
}
static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
{
	*pconfig = stimer->config.as_uint64;
	return 0;
}
static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
{
	*pcount = stimer->count;
	return 0;
}
static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
			     struct hv_message *src_msg, bool no_retry)
{
	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
	int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
	gfn_t msg_page_gfn;
	struct hv_message_header hv_hdr;
	int r;
	if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
		return -ENOENT;
	msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
	/*
	 * Strictly following the spec-mandated ordering would assume setting
	 * .msg_pending before checking .message_type.  However, this function
	 * is only called in vcpu context so the entire update is atomic from
	 * guest POV and thus the exact order here doesn't matter.
	 */
	r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
				     msg_off + offsetof(struct hv_message,
							header.message_type),
				     sizeof(hv_hdr.message_type));
	if (r < 0)
		return r;
	if (hv_hdr.message_type != HVMSG_NONE) {
		if (no_retry)
			return 0;
		hv_hdr.message_flags.msg_pending = 1;
		r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
					      &hv_hdr.message_flags,
					      msg_off +
					      offsetof(struct hv_message,
						       header.message_flags),
					      sizeof(hv_hdr.message_flags));
		if (r < 0)
			return r;
		return -EAGAIN;
	}
	r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
				      sizeof(src_msg->header) +
				      src_msg->header.payload_size);
	if (r < 0)
		return r;
	r = synic_set_irq(synic, sint);
	if (r < 0)
		return r;
	if (r == 0)
		return -EFAULT;
	return 0;
}
static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
{
	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
	struct hv_message *msg = &stimer->msg;
	struct hv_timer_message_payload *payload =
			(struct hv_timer_message_payload *)&msg->u.payload;
	/*
	 * To avoid piling up periodic ticks, don't retry message
	 * delivery for them (within "lazy" lost ticks policy).
	 */
	bool no_retry = stimer->config.periodic;
	payload->expiration_time = stimer->exp_time;
	payload->delivery_time = get_time_ref_counter(vcpu->kvm);
	return synic_deliver_msg(to_hv_synic(vcpu),
				 stimer->config.sintx, msg,
				 no_retry);
}
static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
{
	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
	struct kvm_lapic_irq irq = {
		.delivery_mode = APIC_DM_FIXED,
		.vector = stimer->config.apic_vector
	};
	if (lapic_in_kernel(vcpu))
		return !kvm_apic_set_irq(vcpu, &irq, NULL);
	return 0;
}
static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
{
	int r, direct = stimer->config.direct_mode;
	stimer->msg_pending = true;
	if (!direct)
		r = stimer_send_msg(stimer);
	else
		r = stimer_notify_direct(stimer);
	trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
				       stimer->index, direct, r);
	if (!r) {
		stimer->msg_pending = false;
		if (!(stimer->config.periodic))
			stimer->config.enable = 0;
	}
}
void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	struct kvm_vcpu_hv_stimer *stimer;
	u64 time_now, exp_time;
	int i;
	if (!hv_vcpu)
		return;
	for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
		if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
			stimer = &hv_vcpu->stimer[i];
			if (stimer->config.enable) {
				exp_time = stimer->exp_time;
				if (exp_time) {
					time_now =
						get_time_ref_counter(vcpu->kvm);
					if (time_now >= exp_time)
						stimer_expiration(stimer);
				}
				if ((stimer->config.enable) &&
				    stimer->count) {
					if (!stimer->msg_pending)
						stimer_start(stimer);
				} else
					stimer_cleanup(stimer);
			}
		}
}
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	int i;
	if (!hv_vcpu)
		return;
	for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
		stimer_cleanup(&hv_vcpu->stimer[i]);
	kfree(hv_vcpu);
	vcpu->arch.hyperv = NULL;
}
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	if (!hv_vcpu)
		return false;
	if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
		return false;
	return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
}
EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu))
		return -EFAULT;
	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
				     &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page));
}
EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
{
	struct hv_message *msg = &stimer->msg;
	struct hv_timer_message_payload *payload =
			(struct hv_timer_message_payload *)&msg->u.payload;
	memset(&msg->header, 0, sizeof(msg->header));
	msg->header.message_type = HVMSG_TIMER_EXPIRED;
	msg->header.payload_size = sizeof(*payload);
	payload->timer_index = stimer->index;
	payload->expiration_time = 0;
	payload->delivery_time = 0;
}
static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
{
	memset(stimer, 0, sizeof(*stimer));
	stimer->index = timer_index;
	hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
	stimer->timer.function = stimer_timer_callback;
	stimer_prepare_msg(stimer);
}
int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	int i;
	if (hv_vcpu)
		return 0;
	hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
	if (!hv_vcpu)
		return -ENOMEM;
	vcpu->arch.hyperv = hv_vcpu;
	hv_vcpu->vcpu = vcpu;
	synic_init(&hv_vcpu->synic);
	bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
	for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
		stimer_init(&hv_vcpu->stimer[i], i);
	hv_vcpu->vp_index = vcpu->vcpu_idx;
	for (i = 0; i < HV_NR_TLB_FLUSH_FIFOS; i++) {
		INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries);
		spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock);
	}
	return 0;
}
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
{
	struct kvm_vcpu_hv_synic *synic;
	int r;
	r = kvm_hv_vcpu_init(vcpu);
	if (r)
		return r;
	synic = to_hv_synic(vcpu);
	synic->active = true;
	synic->dont_zero_synic_pages = dont_zero_synic_pages;
	synic->control = HV_SYNIC_CONTROL_ENABLE;
	return 0;
}
static bool kvm_hv_msr_partition_wide(u32 msr)
{
	bool r = false;
	switch (msr) {
	case HV_X64_MSR_GUEST_OS_ID:
	case HV_X64_MSR_HYPERCALL:
	case HV_X64_MSR_REFERENCE_TSC:
	case HV_X64_MSR_TIME_REF_COUNT:
	case HV_X64_MSR_CRASH_CTL:
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
	case HV_X64_MSR_RESET:
	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_STATUS:
	case HV_X64_MSR_TSC_INVARIANT_CONTROL:
	case HV_X64_MSR_SYNDBG_OPTIONS:
	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
		r = true;
		break;
	}
	return r;
}
static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
{
	struct kvm_hv *hv = to_kvm_hv(kvm);
	size_t size = ARRAY_SIZE(hv->hv_crash_param);
	if (WARN_ON_ONCE(index >= size))
		return -EINVAL;
	*pdata = hv->hv_crash_param[array_index_nospec(index, size)];
	return 0;
}
static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
{
	struct kvm_hv *hv = to_kvm_hv(kvm);
	*pdata = hv->hv_crash_ctl;
	return 0;
}
static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
{
	struct kvm_hv *hv = to_kvm_hv(kvm);
	hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
	return 0;
}
static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
{
	struct kvm_hv *hv = to_kvm_hv(kvm);
	size_t size = ARRAY_SIZE(hv->hv_crash_param);
	if (WARN_ON_ONCE(index >= size))
		return -EINVAL;
	hv->hv_crash_param[array_index_nospec(index, size)] = data;
	return 0;
}
/*
 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
 * between them is possible:
 *
 * kvmclock formula:
 *    nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
 *           + system_time
 *
 * Hyper-V formula:
 *    nsec/100 = ticks * scale / 2^64 + offset
 *
 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
 * By dividing the kvmclock formula by 100 and equating what's left we get:
 *    ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
 *            scale / 2^64 =         tsc_to_system_mul * 2^(tsc_shift-32) / 100
 *            scale        =         tsc_to_system_mul * 2^(32+tsc_shift) / 100
 *
 * Now expand the kvmclock formula and divide by 100:
 *    nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
 *           - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
 *           + system_time
 *    nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
 *               - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
 *               + system_time / 100
 *
 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
 *    nsec/100 = ticks * scale / 2^64
 *               - tsc_timestamp * scale / 2^64
 *               + system_time / 100
 *
 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
 *    offset = system_time / 100 - tsc_timestamp * scale / 2^64
 *
 * These two equivalencies are implemented in this function.
 */
static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
					struct ms_hyperv_tsc_page *tsc_ref)
{
	u64 max_mul;
	if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
		return false;
	/*
	 * check if scale would overflow, if so we use the time ref counter
	 *    tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
	 *    tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
	 *    tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
	 */
	max_mul = 100ull << (32 - hv_clock->tsc_shift);
	if (hv_clock->tsc_to_system_mul >= max_mul)
		return false;
	/*
	 * Otherwise compute the scale and offset according to the formulas
	 * derived above.
	 */
	tsc_ref->tsc_scale =
		mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
				hv_clock->tsc_to_system_mul,
				100);
	tsc_ref->tsc_offset = hv_clock->system_time;
	do_div(tsc_ref->tsc_offset, 100);
	tsc_ref->tsc_offset -=
		mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
	return true;
}
/*
 * Don't touch TSC page values if the guest has opted for TSC emulation after
 * migration. KVM doesn't fully support reenlightenment notifications and TSC
 * access emulation and Hyper-V is known to expect the values in TSC page to
 * stay constant before TSC access emulation is disabled from guest side
 * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
 * frequency and guest visible TSC value across migration (and prevent it when
 * TSC scaling is unsupported).
 */
static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
{
	return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
		hv->hv_tsc_emulation_control;
}
void kvm_hv_setup_tsc_page(struct kvm *kvm,
			   struct pvclock_vcpu_time_info *hv_clock)
{
	struct kvm_hv *hv = to_kvm_hv(kvm);
	u32 tsc_seq;
	u64 gfn;
	BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
	BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
	mutex_lock(&hv->hv_lock);
	if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
	    hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
	    hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
		goto out_unlock;
	if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
		goto out_unlock;
	gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
	/*
	 * Because the TSC parameters only vary when there is a
	 * change in the master clock, do not bother with caching.
	 */
	if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
				    &tsc_seq, sizeof(tsc_seq))))
		goto out_err;
	if (tsc_seq && tsc_page_update_unsafe(hv)) {
		if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
			goto out_err;
		hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
		goto out_unlock;
	}
	/*
	 * While we're computing and writing the parameters, force the
	 * guest to use the time reference count MSR.
	 */
	hv->tsc_ref.tsc_sequence = 0;
	if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
			    &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
		goto out_err;
	if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
		goto out_err;
	/* Ensure sequence is zero before writing the rest of the struct.  */
	smp_wmb();
	if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
		goto out_err;
	/*
	 * Now switch to the TSC page mechanism by writing the sequence.
	 */
	tsc_seq++;
	if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
		tsc_seq = 1;
	/* Write the struct entirely before the non-zero sequence.  */
	smp_wmb();
	hv->tsc_ref.tsc_sequence = tsc_seq;
	if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
			    &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
		goto out_err;
	hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
	goto out_unlock;
out_err:
	hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
out_unlock:
	mutex_unlock(&hv->hv_lock);
}
void kvm_hv_request_tsc_page_update(struct kvm *kvm)
{
	struct kvm_hv *hv = to_kvm_hv(kvm);
	mutex_lock(&hv->hv_lock);
	if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
	    !tsc_page_update_unsafe(hv))
		hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
	mutex_unlock(&hv->hv_lock);
}
static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
{
	if (!hv_vcpu->enforce_cpuid)
		return true;
	switch (msr) {
	case HV_X64_MSR_GUEST_OS_ID:
	case HV_X64_MSR_HYPERCALL:
		return hv_vcpu->cpuid_cache.features_eax &
			HV_MSR_HYPERCALL_AVAILABLE;
	case HV_X64_MSR_VP_RUNTIME:
		return hv_vcpu->cpuid_cache.features_eax &
			HV_MSR_VP_RUNTIME_AVAILABLE;
	case HV_X64_MSR_TIME_REF_COUNT:
		return hv_vcpu->cpuid_cache.features_eax &
			HV_MSR_TIME_REF_COUNT_AVAILABLE;
	case HV_X64_MSR_VP_INDEX:
		return hv_vcpu->cpuid_cache.features_eax &
			HV_MSR_VP_INDEX_AVAILABLE;
	case HV_X64_MSR_RESET:
		return hv_vcpu->cpuid_cache.features_eax &
			HV_MSR_RESET_AVAILABLE;
	case HV_X64_MSR_REFERENCE_TSC:
		return hv_vcpu->cpuid_cache.features_eax &
			HV_MSR_REFERENCE_TSC_AVAILABLE;
	case HV_X64_MSR_SCONTROL:
	case HV_X64_MSR_SVERSION:
	case HV_X64_MSR_SIEFP:
	case HV_X64_MSR_SIMP:
	case HV_X64_MSR_EOM:
	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
		return hv_vcpu->cpuid_cache.features_eax &
			HV_MSR_SYNIC_AVAILABLE;
	case HV_X64_MSR_STIMER0_CONFIG:
	case HV_X64_MSR_STIMER1_CONFIG:
	case HV_X64_MSR_STIMER2_CONFIG:
	case HV_X64_MSR_STIMER3_CONFIG:
	case HV_X64_MSR_STIMER0_COUNT:
	case HV_X64_MSR_STIMER1_COUNT:
	case HV_X64_MSR_STIMER2_COUNT:
	case HV_X64_MSR_STIMER3_COUNT:
		return hv_vcpu->cpuid_cache.features_eax &
			HV_MSR_SYNTIMER_AVAILABLE;
	case HV_X64_MSR_EOI:
	case HV_X64_MSR_ICR:
	case HV_X64_MSR_TPR:
	case HV_X64_MSR_VP_ASSIST_PAGE:
		return hv_vcpu->cpuid_cache.features_eax &
			HV_MSR_APIC_ACCESS_AVAILABLE;
	case HV_X64_MSR_TSC_FREQUENCY:
	case HV_X64_MSR_APIC_FREQUENCY:
		return hv_vcpu->cpuid_cache.features_eax &
			HV_ACCESS_FREQUENCY_MSRS;
	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_STATUS:
		return hv_vcpu->cpuid_cache.features_eax &
			HV_ACCESS_REENLIGHTENMENT;
	case HV_X64_MSR_TSC_INVARIANT_CONTROL:
		return hv_vcpu->cpuid_cache.features_eax &
			HV_ACCESS_TSC_INVARIANT;
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
	case HV_X64_MSR_CRASH_CTL:
		return hv_vcpu->cpuid_cache.features_edx &
			HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
	case HV_X64_MSR_SYNDBG_OPTIONS:
	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
		return hv_vcpu->cpuid_cache.features_edx &
			HV_FEATURE_DEBUG_MSRS_AVAILABLE;
	default:
		break;
	}
	return false;
}
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
			     bool host)
{
	struct kvm *kvm = vcpu->kvm;
	struct kvm_hv *hv = to_kvm_hv(kvm);
	if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
		return 1;
	switch (msr) {
	case HV_X64_MSR_GUEST_OS_ID:
		hv->hv_guest_os_id = data;
		/* setting guest os id to zero disables hypercall page */
		if (!hv->hv_guest_os_id)
			hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
		break;
	case HV_X64_MSR_HYPERCALL: {
		u8 instructions[9];
		int i = 0;
		u64 addr;
		/* if guest os id is not set hypercall should remain disabled */
		if (!hv->hv_guest_os_id)
			break;
		if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
			hv->hv_hypercall = data;
			break;
		}
		/*
		 * If Xen and Hyper-V hypercalls are both enabled, disambiguate
		 * the same way Xen itself does, by setting the bit 31 of EAX
		 * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just
		 * going to be clobbered on 64-bit.
		 */
		if (kvm_xen_hypercall_enabled(kvm)) {
			/* orl $0x80000000, %eax */
			instructions[i++] = 0x0d;
			instructions[i++] = 0x00;
			instructions[i++] = 0x00;
			instructions[i++] = 0x00;
			instructions[i++] = 0x80;
		}
		/* vmcall/vmmcall */
		static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
		i += 3;
		/* ret */
		((unsigned char *)instructions)[i++] = 0xc3;
		addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
		if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
			return 1;
		hv->hv_hypercall = data;
		break;
	}
	case HV_X64_MSR_REFERENCE_TSC:
		hv->hv_tsc_page = data;
		if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
			if (!host)
				hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
			else
				hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
		} else {
			hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
		}
		break;
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
		return kvm_hv_msr_set_crash_data(kvm,
						 msr - HV_X64_MSR_CRASH_P0,
						 data);
	case HV_X64_MSR_CRASH_CTL:
		if (host)
			return kvm_hv_msr_set_crash_ctl(kvm, data);
		if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
			vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
				   hv->hv_crash_param[0],
				   hv->hv_crash_param[1],
				   hv->hv_crash_param[2],
				   hv->hv_crash_param[3],
				   hv->hv_crash_param[4]);
			/* Send notification about crash to user space */
			kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
		}
		break;
	case HV_X64_MSR_RESET:
		if (data == 1) {
			vcpu_debug(vcpu, "hyper-v reset requested\n");
			kvm_make_request(KVM_REQ_HV_RESET, vcpu);
		}
		break;
	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
		hv->hv_reenlightenment_control = data;
		break;
	case HV_X64_MSR_TSC_EMULATION_CONTROL:
		hv->hv_tsc_emulation_control = data;
		break;
	case HV_X64_MSR_TSC_EMULATION_STATUS:
		if (data && !host)
			return 1;
		hv->hv_tsc_emulation_status = data;
		break;
	case HV_X64_MSR_TIME_REF_COUNT:
		/* read-only, but still ignore it if host-initiated */
		if (!host)
			return 1;
		break;
	case HV_X64_MSR_TSC_INVARIANT_CONTROL:
		/* Only bit 0 is supported */
		if (data & ~HV_EXPOSE_INVARIANT_TSC)
			return 1;
		/* The feature can't be disabled from the guest */
		if (!host && hv->hv_invtsc_control && !data)
			return 1;
		hv->hv_invtsc_control = data;
		break;
	case HV_X64_MSR_SYNDBG_OPTIONS:
	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
		return syndbg_set_msr(vcpu, msr, data, host);
	default:
		kvm_pr_unimpl_wrmsr(vcpu, msr, data);
		return 1;
	}
	return 0;
}
/* Calculate cpu time spent by current task in 100ns units */
static u64 current_task_runtime_100ns(void)
{
	u64 utime, stime;
	task_cputime_adjusted(current, &utime, &stime);
	return div_u64(utime + stime, 100);
}
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
		return 1;
	switch (msr) {
	case HV_X64_MSR_VP_INDEX: {
		struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
		u32 new_vp_index = (u32)data;
		if (!host || new_vp_index >= KVM_MAX_VCPUS)
			return 1;
		if (new_vp_index == hv_vcpu->vp_index)
			return 0;
		/*
		 * The VP index is initialized to vcpu_index by
		 * kvm_hv_vcpu_postcreate so they initially match.  Now the
		 * VP index is changing, adjust num_mismatched_vp_indexes if
		 * it now matches or no longer matches vcpu_idx.
		 */
		if (hv_vcpu->vp_index == vcpu->vcpu_idx)
			atomic_inc(&hv->num_mismatched_vp_indexes);
		else if (new_vp_index == vcpu->vcpu_idx)
			atomic_dec(&hv->num_mismatched_vp_indexes);
		hv_vcpu->vp_index = new_vp_index;
		break;
	}
	case HV_X64_MSR_VP_ASSIST_PAGE: {
		u64 gfn;
		unsigned long addr;
		if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
			hv_vcpu->hv_vapic = data;
			if (kvm_lapic_set_pv_eoi(vcpu, 0, 0))
				return 1;
			break;
		}
		gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
		addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
		if (kvm_is_error_hva(addr))
			return 1;
		/*
		 * Clear apic_assist portion of struct hv_vp_assist_page
		 * only, there can be valuable data in the rest which needs
		 * to be preserved e.g. on migration.
		 */
		if (__put_user(0, (u32 __user *)addr))
			return 1;
		hv_vcpu->hv_vapic = data;
		kvm_vcpu_mark_page_dirty(vcpu, gfn);
		if (kvm_lapic_set_pv_eoi(vcpu,
					    gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
					    sizeof(struct hv_vp_assist_page)))
			return 1;
		break;
	}
	case HV_X64_MSR_EOI:
		return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
	case HV_X64_MSR_ICR:
		return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
	case HV_X64_MSR_TPR:
		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
	case HV_X64_MSR_VP_RUNTIME:
		if (!host)
			return 1;
		hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
		break;
	case HV_X64_MSR_SCONTROL:
	case HV_X64_MSR_SVERSION:
	case HV_X64_MSR_SIEFP:
	case HV_X64_MSR_SIMP:
	case HV_X64_MSR_EOM:
	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
		return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
	case HV_X64_MSR_STIMER0_CONFIG:
	case HV_X64_MSR_STIMER1_CONFIG:
	case HV_X64_MSR_STIMER2_CONFIG:
	case HV_X64_MSR_STIMER3_CONFIG: {
		int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
		return stimer_set_config(to_hv_stimer(vcpu, timer_index),
					 data, host);
	}
	case HV_X64_MSR_STIMER0_COUNT:
	case HV_X64_MSR_STIMER1_COUNT:
	case HV_X64_MSR_STIMER2_COUNT:
	case HV_X64_MSR_STIMER3_COUNT: {
		int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
		return stimer_set_count(to_hv_stimer(vcpu, timer_index),
					data, host);
	}
	case HV_X64_MSR_TSC_FREQUENCY:
	case HV_X64_MSR_APIC_FREQUENCY:
		/* read-only, but still ignore it if host-initiated */
		if (!host)
			return 1;
		break;
	default:
		kvm_pr_unimpl_wrmsr(vcpu, msr, data);
		return 1;
	}
	return 0;
}
static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
			     bool host)
{
	u64 data = 0;
	struct kvm *kvm = vcpu->kvm;
	struct kvm_hv *hv = to_kvm_hv(kvm);
	if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
		return 1;
	switch (msr) {
	case HV_X64_MSR_GUEST_OS_ID:
		data = hv->hv_guest_os_id;
		break;
	case HV_X64_MSR_HYPERCALL:
		data = hv->hv_hypercall;
		break;
	case HV_X64_MSR_TIME_REF_COUNT:
		data = get_time_ref_counter(kvm);
		break;
	case HV_X64_MSR_REFERENCE_TSC:
		data = hv->hv_tsc_page;
		break;
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
		return kvm_hv_msr_get_crash_data(kvm,
						 msr - HV_X64_MSR_CRASH_P0,
						 pdata);
	case HV_X64_MSR_CRASH_CTL:
		return kvm_hv_msr_get_crash_ctl(kvm, pdata);
	case HV_X64_MSR_RESET:
		data = 0;
		break;
	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
		data = hv->hv_reenlightenment_control;
		break;
	case HV_X64_MSR_TSC_EMULATION_CONTROL:
		data = hv->hv_tsc_emulation_control;
		break;
	case HV_X64_MSR_TSC_EMULATION_STATUS:
		data = hv->hv_tsc_emulation_status;
		break;
	case HV_X64_MSR_TSC_INVARIANT_CONTROL:
		data = hv->hv_invtsc_control;
		break;
	case HV_X64_MSR_SYNDBG_OPTIONS:
	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
		return syndbg_get_msr(vcpu, msr, pdata, host);
	default:
		kvm_pr_unimpl_rdmsr(vcpu, msr);
		return 1;
	}
	*pdata = data;
	return 0;
}
static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
			  bool host)
{
	u64 data = 0;
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
		return 1;
	switch (msr) {
	case HV_X64_MSR_VP_INDEX:
		data = hv_vcpu->vp_index;
		break;
	case HV_X64_MSR_EOI:
		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
	case HV_X64_MSR_ICR:
		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
	case HV_X64_MSR_TPR:
		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
	case HV_X64_MSR_VP_ASSIST_PAGE:
		data = hv_vcpu->hv_vapic;
		break;
	case HV_X64_MSR_VP_RUNTIME:
		data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
		break;
	case HV_X64_MSR_SCONTROL:
	case HV_X64_MSR_SVERSION:
	case HV_X64_MSR_SIEFP:
	case HV_X64_MSR_SIMP:
	case HV_X64_MSR_EOM:
	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
		return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
	case HV_X64_MSR_STIMER0_CONFIG:
	case HV_X64_MSR_STIMER1_CONFIG:
	case HV_X64_MSR_STIMER2_CONFIG:
	case HV_X64_MSR_STIMER3_CONFIG: {
		int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
		return stimer_get_config(to_hv_stimer(vcpu, timer_index),
					 pdata);
	}
	case HV_X64_MSR_STIMER0_COUNT:
	case HV_X64_MSR_STIMER1_COUNT:
	case HV_X64_MSR_STIMER2_COUNT:
	case HV_X64_MSR_STIMER3_COUNT: {
		int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
		return stimer_get_count(to_hv_stimer(vcpu, timer_index),
					pdata);
	}
	case HV_X64_MSR_TSC_FREQUENCY:
		data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
		break;
	case HV_X64_MSR_APIC_FREQUENCY:
		data = APIC_BUS_FREQUENCY;
		break;
	default:
		kvm_pr_unimpl_rdmsr(vcpu, msr);
		return 1;
	}
	*pdata = data;
	return 0;
}
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
	if (!host && !vcpu->arch.hyperv_enabled)
		return 1;
	if (kvm_hv_vcpu_init(vcpu))
		return 1;
	if (kvm_hv_msr_partition_wide(msr)) {
		int r;
		mutex_lock(&hv->hv_lock);
		r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
		mutex_unlock(&hv->hv_lock);
		return r;
	} else
		return kvm_hv_set_msr(vcpu, msr, data, host);
}
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{
	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
	if (!host && !vcpu->arch.hyperv_enabled)
		return 1;
	if (kvm_hv_vcpu_init(vcpu))
		return 1;
	if (kvm_hv_msr_partition_wide(msr)) {
		int r;
		mutex_lock(&hv->hv_lock);
		r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
		mutex_unlock(&hv->hv_lock);
		return r;
	} else
		return kvm_hv_get_msr(vcpu, msr, pdata, host);
}
static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
				    u64 valid_bank_mask, unsigned long *vcpu_mask)
{
	struct kvm_hv *hv = to_kvm_hv(kvm);
	bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes);
	u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
	struct kvm_vcpu *vcpu;
	int bank, sbank = 0;
	unsigned long i;
	u64 *bitmap;
	BUILD_BUG_ON(sizeof(vp_bitmap) >
		     sizeof(*vcpu_mask) * BITS_TO_LONGS(KVM_MAX_VCPUS));
	/*
	 * If vp_index == vcpu_idx for all vCPUs, fill vcpu_mask directly, else
	 * fill a temporary buffer and manually test each vCPU's VP index.
	 */
	if (likely(!has_mismatch))
		bitmap = (u64 *)vcpu_mask;
	else
		bitmap = vp_bitmap;
	/*
	 * Each set of 64 VPs is packed into sparse_banks, with valid_bank_mask
	 * having a '1' for each bank that exists in sparse_banks.  Sets must
	 * be in ascending order, i.e. bank0..bankN.
	 */
	memset(bitmap, 0, sizeof(vp_bitmap));
	for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
			 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
		bitmap[bank] = sparse_banks[sbank++];
	if (likely(!has_mismatch))
		return;
	bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
			__set_bit(i, vcpu_mask);
	}
}
static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[])
{
	int valid_bit_nr = vp_id / HV_VCPUS_PER_SPARSE_BANK;
	unsigned long sbank;
	if (!test_bit(valid_bit_nr, (unsigned long *)&valid_bank_mask))
		return false;
	/*
	 * The index into the sparse bank is the number of preceding bits in
	 * the valid mask.  Optimize for VMs with <64 vCPUs by skipping the
	 * fancy math if there can't possibly be preceding bits.
	 */
	if (valid_bit_nr)
		sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr - 1, 0));
	else
		sbank = 0;
	return test_bit(vp_id % HV_VCPUS_PER_SPARSE_BANK,
			(unsigned long *)&sparse_banks[sbank]);
}
struct kvm_hv_hcall {
	/* Hypercall input data */
	u64 param;
	u64 ingpa;
	u64 outgpa;
	u16 code;
	u16 var_cnt;
	u16 rep_cnt;
	u16 rep_idx;
	bool fast;
	bool rep;
	sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
	/*
	 * Current read offset when KVM reads hypercall input data gradually,
	 * either offset in bytes from 'ingpa' for regular hypercalls or the
	 * number of already consumed 'XMM halves' for 'fast' hypercalls.
	 */
	union {
		gpa_t data_offset;
		int consumed_xmm_halves;
	};
};
static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
			      u16 orig_cnt, u16 cnt_cap, u64 *data)
{
	/*
	 * Preserve the original count when ignoring entries via a "cap", KVM
	 * still needs to validate the guest input (though the non-XMM path
	 * punts on the checks).
	 */
	u16 cnt = min(orig_cnt, cnt_cap);
	int i, j;
	if (hc->fast) {
		/*
		 * Each XMM holds two sparse banks, but do not count halves that
		 * have already been consumed for hypercall parameters.
		 */
		if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves)
			return HV_STATUS_INVALID_HYPERCALL_INPUT;
		for (i = 0; i < cnt; i++) {
			j = i + hc->consumed_xmm_halves;
			if (j % 2)
				data[i] = sse128_hi(hc->xmm[j / 2]);
			else
				data[i] = sse128_lo(hc->xmm[j / 2]);
		}
		return 0;
	}
	return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data,
			      cnt * sizeof(*data));
}
static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
				 u64 *sparse_banks)
{
	if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS)
		return -EINVAL;
	/* Cap var_cnt to ignore banks that cannot contain a legal VP index. */
	return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS,
				  sparse_banks);
}
static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
{
	return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries);
}
static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu,
				 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo,
				 u64 *entries, int count)
{
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	u64 flush_all_entry = KVM_HV_TLB_FLUSHALL_ENTRY;
	if (!hv_vcpu)
		return;
	spin_lock(&tlb_flush_fifo->write_lock);
	/*
	 * All entries should fit on the fifo leaving one free for 'flush all'
	 * entry in case another request comes in. In case there's not enough
	 * space, just put 'flush all' entry there.
	 */
	if (count && entries && count < kfifo_avail(&tlb_flush_fifo->entries)) {
		WARN_ON(kfifo_in(&tlb_flush_fifo->entries, entries, count) != count);
		goto out_unlock;
	}
	/*
	 * Note: full fifo always contains 'flush all' entry, no need to check the
	 * return value.
	 */
	kfifo_in(&tlb_flush_fifo->entries, &flush_all_entry, 1);
out_unlock:
	spin_unlock(&tlb_flush_fifo->write_lock);
}
int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	u64 entries[KVM_HV_TLB_FLUSH_FIFO_SIZE];
	int i, j, count;
	gva_t gva;
	if (!tdp_enabled || !hv_vcpu)
		return -EINVAL;
	tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
	count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE);
	for (i = 0; i < count; i++) {
		if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
			goto out_flush_all;
		/*
		 * Lower 12 bits of 'address' encode the number of additional
		 * pages to flush.
		 */
		gva = entries[i] & PAGE_MASK;
		for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++)
			static_call(kvm_x86_flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
		++vcpu->stat.tlb_flush;
	}
	return 0;
out_flush_all:
	kfifo_reset_out(&tlb_flush_fifo->entries);
	/* Fall back to full flush. */
	return -ENOSPC;
}
static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	u64 *sparse_banks = hv_vcpu->sparse_banks;
	struct kvm *kvm = vcpu->kvm;
	struct hv_tlb_flush_ex flush_ex;
	struct hv_tlb_flush flush;
	DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
	struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
	/*
	 * Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE'
	 * entries on the TLB flush fifo. The last entry, however, needs to be
	 * always left free for 'flush all' entry which gets placed when
	 * there is not enough space to put all the requested entries.
	 */
	u64 __tlb_flush_entries[KVM_HV_TLB_FLUSH_FIFO_SIZE - 1];
	u64 *tlb_flush_entries;
	u64 valid_bank_mask;
	struct kvm_vcpu *v;
	unsigned long i;
	bool all_cpus;
	/*
	 * The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS
	 * sparse banks. Fail the build if KVM's max allowed number of
	 * vCPUs (>4096) exceeds this limit.
	 */
	BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > HV_MAX_SPARSE_VCPU_BANKS);
	/*
	 * 'Slow' hypercall's first parameter is the address in guest's memory
	 * where hypercall parameters are placed. This is either a GPA or a
	 * nested GPA when KVM is handling the call from L2 ('direct' TLB
	 * flush).  Translate the address here so the memory can be uniformly
	 * read with kvm_read_guest().
	 */
	if (!hc->fast && is_guest_mode(vcpu)) {
		hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0, NULL);
		if (unlikely(hc->ingpa == INVALID_GPA))
			return HV_STATUS_INVALID_HYPERCALL_INPUT;
	}
	if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
	    hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) {
		if (hc->fast) {
			flush.address_space = hc->ingpa;
			flush.flags = hc->outgpa;
			flush.processor_mask = sse128_lo(hc->xmm[0]);
			hc->consumed_xmm_halves = 1;
		} else {
			if (unlikely(kvm_read_guest(kvm, hc->ingpa,
						    &flush, sizeof(flush))))
				return HV_STATUS_INVALID_HYPERCALL_INPUT;
			hc->data_offset = sizeof(flush);
		}
		trace_kvm_hv_flush_tlb(flush.processor_mask,
				       flush.address_space, flush.flags,
				       is_guest_mode(vcpu));
		valid_bank_mask = BIT_ULL(0);
		sparse_banks[0] = flush.processor_mask;
		/*
		 * Work around possible WS2012 bug: it sends hypercalls
		 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
		 * while also expecting us to flush something and crashing if
		 * we don't. Let's treat processor_mask == 0 same as
		 * HV_FLUSH_ALL_PROCESSORS.
		 */
		all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
			flush.processor_mask == 0;
	} else {
		if (hc->fast) {
			flush_ex.address_space = hc->ingpa;
			flush_ex.flags = hc->outgpa;
			memcpy(&flush_ex.hv_vp_set,
			       &hc->xmm[0], sizeof(hc->xmm[0]));
			hc->consumed_xmm_halves = 2;
		} else {
			if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
						    sizeof(flush_ex))))
				return HV_STATUS_INVALID_HYPERCALL_INPUT;
			hc->data_offset = sizeof(flush_ex);
		}
		trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
					  flush_ex.hv_vp_set.format,
					  flush_ex.address_space,
					  flush_ex.flags, is_guest_mode(vcpu));
		valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
		all_cpus = flush_ex.hv_vp_set.format !=
			HV_GENERIC_SET_SPARSE_4K;
		if (hc->var_cnt != hweight64(valid_bank_mask))
			return HV_STATUS_INVALID_HYPERCALL_INPUT;
		if (!all_cpus) {
			if (!hc->var_cnt)
				goto ret_success;
			if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
				return HV_STATUS_INVALID_HYPERCALL_INPUT;
		}
		/*
		 * Hyper-V TLFS doesn't explicitly forbid non-empty sparse vCPU
		 * banks (and, thus, non-zero 'var_cnt') for the 'all vCPUs'
		 * case (HV_GENERIC_SET_ALL).  Always adjust data_offset and
		 * consumed_xmm_halves to make sure TLB flush entries are read
		 * from the correct offset.
		 */
		if (hc->fast)
			hc->consumed_xmm_halves += hc->var_cnt;
		else
			hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
	}
	if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
	    hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
	    hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
		tlb_flush_entries = NULL;
	} else {
		if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries))
			return HV_STATUS_INVALID_HYPERCALL_INPUT;
		tlb_flush_entries = __tlb_flush_entries;
	}
	/*
	 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
	 * analyze it here, flush TLB regardless of the specified address space.
	 */
	if (all_cpus && !is_guest_mode(vcpu)) {
		kvm_for_each_vcpu(i, v, kvm) {
			tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
			hv_tlb_flush_enqueue(v, tlb_flush_fifo,
					     tlb_flush_entries, hc->rep_cnt);
		}
		kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH);
	} else if (!is_guest_mode(vcpu)) {
		sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
		for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) {
			v = kvm_get_vcpu(kvm, i);
			if (!v)
				continue;
			tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
			hv_tlb_flush_enqueue(v, tlb_flush_fifo,
					     tlb_flush_entries, hc->rep_cnt);
		}
		kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
	} else {
		struct kvm_vcpu_hv *hv_v;
		bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
		kvm_for_each_vcpu(i, v, kvm) {
			hv_v = to_hv_vcpu(v);
			/*
			 * The following check races with nested vCPUs entering/exiting
			 * and/or migrating between L1's vCPUs, however the only case when
			 * KVM *must* flush the TLB is when the target L2 vCPU keeps
			 * running on the same L1 vCPU from the moment of the request until
			 * kvm_hv_flush_tlb() returns. TLB is fully flushed in all other
			 * cases, e.g. when the target L2 vCPU migrates to a different L1
			 * vCPU or when the corresponding L1 vCPU temporary switches to a
			 * different L2 vCPU while the request is being processed.
			 */
			if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id)
				continue;
			if (!all_cpus &&
			    !hv_is_vp_in_sparse_set(hv_v->nested.vp_id, valid_bank_mask,
						    sparse_banks))
				continue;
			__set_bit(i, vcpu_mask);
			tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, true);
			hv_tlb_flush_enqueue(v, tlb_flush_fifo,
					     tlb_flush_entries, hc->rep_cnt);
		}
		kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
	}
ret_success:
	/* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
	return (u64)HV_STATUS_SUCCESS |
		((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
}
static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector,
				    u64 *sparse_banks, u64 valid_bank_mask)
{
	struct kvm_lapic_irq irq = {
		.delivery_mode = APIC_DM_FIXED,
		.vector = vector
	};
	struct kvm_vcpu *vcpu;
	unsigned long i;
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (sparse_banks &&
		    !hv_is_vp_in_sparse_set(kvm_hv_get_vpindex(vcpu),
					    valid_bank_mask, sparse_banks))
			continue;
		/* We fail only when APIC is disabled */
		kvm_apic_set_irq(vcpu, &irq, NULL);
	}
}
static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	u64 *sparse_banks = hv_vcpu->sparse_banks;
	struct kvm *kvm = vcpu->kvm;
	struct hv_send_ipi_ex send_ipi_ex;
	struct hv_send_ipi send_ipi;
	u64 valid_bank_mask;
	u32 vector;
	bool all_cpus;
	if (hc->code == HVCALL_SEND_IPI) {
		if (!hc->fast) {
			if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
						    sizeof(send_ipi))))
				return HV_STATUS_INVALID_HYPERCALL_INPUT;
			sparse_banks[0] = send_ipi.cpu_mask;
			vector = send_ipi.vector;
		} else {
			/* 'reserved' part of hv_send_ipi should be 0 */
			if (unlikely(hc->ingpa >> 32 != 0))
				return HV_STATUS_INVALID_HYPERCALL_INPUT;
			sparse_banks[0] = hc->outgpa;
			vector = (u32)hc->ingpa;
		}
		all_cpus = false;
		valid_bank_mask = BIT_ULL(0);
		trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
	} else {
		if (!hc->fast) {
			if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
						    sizeof(send_ipi_ex))))
				return HV_STATUS_INVALID_HYPERCALL_INPUT;
		} else {
			send_ipi_ex.vector = (u32)hc->ingpa;
			send_ipi_ex.vp_set.format = hc->outgpa;
			send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]);
		}
		trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
					 send_ipi_ex.vp_set.format,
					 send_ipi_ex.vp_set.valid_bank_mask);
		vector = send_ipi_ex.vector;
		valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
		all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
		if (hc->var_cnt != hweight64(valid_bank_mask))
			return HV_STATUS_INVALID_HYPERCALL_INPUT;
		if (all_cpus)
			goto check_and_send_ipi;
		if (!hc->var_cnt)
			goto ret_success;
		if (!hc->fast)
			hc->data_offset = offsetof(struct hv_send_ipi_ex,
						   vp_set.bank_contents);
		else
			hc->consumed_xmm_halves = 1;
		if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
			return HV_STATUS_INVALID_HYPERCALL_INPUT;
	}
check_and_send_ipi:
	if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
		return HV_STATUS_INVALID_HYPERCALL_INPUT;
	if (all_cpus)
		kvm_hv_send_ipi_to_many(kvm, vector, NULL, 0);
	else
		kvm_hv_send_ipi_to_many(kvm, vector, sparse_banks, valid_bank_mask);
ret_success:
	return HV_STATUS_SUCCESS;
}
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled)
{
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	struct kvm_cpuid_entry2 *entry;
	vcpu->arch.hyperv_enabled = hyperv_enabled;
	if (!hv_vcpu) {
		/*
		 * KVM should have already allocated kvm_vcpu_hv if Hyper-V is
		 * enabled in CPUID.
		 */
		WARN_ON_ONCE(vcpu->arch.hyperv_enabled);
		return;
	}
	memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache));
	if (!vcpu->arch.hyperv_enabled)
		return;
	entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
	if (entry) {
		hv_vcpu->cpuid_cache.features_eax = entry->eax;
		hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
		hv_vcpu->cpuid_cache.features_edx = entry->edx;
	}
	entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
	if (entry) {
		hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
		hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
	}
	entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
	if (entry)
		hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
	entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_NESTED_FEATURES);
	if (entry) {
		hv_vcpu->cpuid_cache.nested_eax = entry->eax;
		hv_vcpu->cpuid_cache.nested_ebx = entry->ebx;
	}
}
int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
{
	struct kvm_vcpu_hv *hv_vcpu;
	int ret = 0;
	if (!to_hv_vcpu(vcpu)) {
		if (enforce) {
			ret = kvm_hv_vcpu_init(vcpu);
			if (ret)
				return ret;
		} else {
			return 0;
		}
	}
	hv_vcpu = to_hv_vcpu(vcpu);
	hv_vcpu->enforce_cpuid = enforce;
	return ret;
}
static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
{
	bool longmode;
	longmode = is_64_bit_hypercall(vcpu);
	if (longmode)
		kvm_rax_write(vcpu, result);
	else {
		kvm_rdx_write(vcpu, result >> 32);
		kvm_rax_write(vcpu, result & 0xffffffff);
	}
}
static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
{
	u32 tlb_lock_count = 0;
	int ret;
	if (hv_result_success(result) && is_guest_mode(vcpu) &&
	    kvm_hv_is_tlb_flush_hcall(vcpu) &&
	    kvm_read_guest(vcpu->kvm, to_hv_vcpu(vcpu)->nested.pa_page_gpa,
			   &tlb_lock_count, sizeof(tlb_lock_count)))
		result = HV_STATUS_INVALID_HYPERCALL_INPUT;
	trace_kvm_hv_hypercall_done(result);
	kvm_hv_hypercall_set_result(vcpu, result);
	++vcpu->stat.hypercalls;
	ret = kvm_skip_emulated_instruction(vcpu);
	if (tlb_lock_count)
		kvm_x86_ops.nested_ops->hv_inject_synthetic_vmexit_post_tlb_flush(vcpu);
	return ret;
}
static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
{
	return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
}
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{
	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
	struct eventfd_ctx *eventfd;
	if (unlikely(!hc->fast)) {
		int ret;
		gpa_t gpa = hc->ingpa;
		if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
		    offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
			return HV_STATUS_INVALID_ALIGNMENT;
		ret = kvm_vcpu_read_guest(vcpu, gpa,
					  &hc->ingpa, sizeof(hc->ingpa));
		if (ret < 0)
			return HV_STATUS_INVALID_ALIGNMENT;
	}
	/*
	 * Per spec, bits 32-47 contain the extra "flag number".  However, we
	 * have no use for it, and in all known usecases it is zero, so just
	 * report lookup failure if it isn't.
	 */
	if (hc->ingpa & 0xffff00000000ULL)
		return HV_STATUS_INVALID_PORT_ID;
	/* remaining bits are reserved-zero */
	if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
		return HV_STATUS_INVALID_HYPERCALL_INPUT;
	/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
	rcu_read_lock();
	eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
	rcu_read_unlock();
	if (!eventfd)
		return HV_STATUS_INVALID_PORT_ID;
	eventfd_signal(eventfd, 1);
	return HV_STATUS_SUCCESS;
}
static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
{
	switch (hc->code) {
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
	case HVCALL_SEND_IPI_EX:
		return true;
	}
	return false;
}
static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
{
	int reg;
	kvm_fpu_get();
	for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
		_kvm_read_sse_reg(reg, &hc->xmm[reg]);
	kvm_fpu_put();
}
static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
{
	if (!hv_vcpu->enforce_cpuid)
		return true;
	switch (code) {
	case HVCALL_NOTIFY_LONG_SPIN_WAIT:
		return hv_vcpu->cpuid_cache.enlightenments_ebx &&
			hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
	case HVCALL_POST_MESSAGE:
		return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
	case HVCALL_SIGNAL_EVENT:
		return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
	case HVCALL_POST_DEBUG_DATA:
	case HVCALL_RETRIEVE_DEBUG_DATA:
	case HVCALL_RESET_DEBUG_SESSION:
		/*
		 * Return 'true' when SynDBG is disabled so the resulting code
		 * will be HV_STATUS_INVALID_HYPERCALL_CODE.
		 */
		return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
			hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
		if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
		      HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
			return false;
		fallthrough;
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
		return hv_vcpu->cpuid_cache.enlightenments_eax &
			HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
	case HVCALL_SEND_IPI_EX:
		if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
		      HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
			return false;
		fallthrough;
	case HVCALL_SEND_IPI:
		return hv_vcpu->cpuid_cache.enlightenments_eax &
			HV_X64_CLUSTER_IPI_RECOMMENDED;
	case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX:
		return hv_vcpu->cpuid_cache.features_ebx &
			HV_ENABLE_EXTENDED_HYPERCALLS;
	default:
		break;
	}
	return true;
}
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
	struct kvm_hv_hcall hc;
	u64 ret = HV_STATUS_SUCCESS;
	/*
	 * hypercall generates UD from non zero cpl and real mode
	 * per HYPER-V spec
	 */
	if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
		kvm_queue_exception(vcpu, UD_VECTOR);
		return 1;
	}
#ifdef CONFIG_X86_64
	if (is_64_bit_hypercall(vcpu)) {
		hc.param = kvm_rcx_read(vcpu);
		hc.ingpa = kvm_rdx_read(vcpu);
		hc.outgpa = kvm_r8_read(vcpu);
	} else
#endif
	{
		hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
			    (kvm_rax_read(vcpu) & 0xffffffff);
		hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
			    (kvm_rcx_read(vcpu) & 0xffffffff);
		hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
			     (kvm_rsi_read(vcpu) & 0xffffffff);
	}
	hc.code = hc.param & 0xffff;
	hc.var_cnt = (hc.param & HV_HYPERCALL_VARHEAD_MASK) >> HV_HYPERCALL_VARHEAD_OFFSET;
	hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
	hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
	hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
	hc.rep = !!(hc.rep_cnt || hc.rep_idx);
	trace_kvm_hv_hypercall(hc.code, hc.fast, hc.var_cnt, hc.rep_cnt,
			       hc.rep_idx, hc.ingpa, hc.outgpa);
	if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
		ret = HV_STATUS_ACCESS_DENIED;
		goto hypercall_complete;
	}
	if (unlikely(hc.param & HV_HYPERCALL_RSVD_MASK)) {
		ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
		goto hypercall_complete;
	}
	if (hc.fast && is_xmm_fast_hypercall(&hc)) {
		if (unlikely(hv_vcpu->enforce_cpuid &&
			     !(hv_vcpu->cpuid_cache.features_edx &
			       HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
			kvm_queue_exception(vcpu, UD_VECTOR);
			return 1;
		}
		kvm_hv_hypercall_read_xmm(&hc);
	}
	switch (hc.code) {
	case HVCALL_NOTIFY_LONG_SPIN_WAIT:
		if (unlikely(hc.rep || hc.var_cnt)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
		kvm_vcpu_on_spin(vcpu, true);
		break;
	case HVCALL_SIGNAL_EVENT:
		if (unlikely(hc.rep || hc.var_cnt)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
		ret = kvm_hvcall_signal_event(vcpu, &hc);
		if (ret != HV_STATUS_INVALID_PORT_ID)
			break;
		fallthrough;	/* maybe userspace knows this conn_id */
	case HVCALL_POST_MESSAGE:
		/* don't bother userspace if it has no way to handle it */
		if (unlikely(hc.rep || hc.var_cnt || !to_hv_synic(vcpu)->active)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
		goto hypercall_userspace_exit;
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
		if (unlikely(hc.var_cnt)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
		fallthrough;
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
		if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
		ret = kvm_hv_flush_tlb(vcpu, &hc);
		break;
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
		if (unlikely(hc.var_cnt)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
		fallthrough;
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
		if (unlikely(hc.rep)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
		ret = kvm_hv_flush_tlb(vcpu, &hc);
		break;
	case HVCALL_SEND_IPI:
		if (unlikely(hc.var_cnt)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
		fallthrough;
	case HVCALL_SEND_IPI_EX:
		if (unlikely(hc.rep)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
		ret = kvm_hv_send_ipi(vcpu, &hc);
		break;
	case HVCALL_POST_DEBUG_DATA:
	case HVCALL_RETRIEVE_DEBUG_DATA:
		if (unlikely(hc.fast)) {
			ret = HV_STATUS_INVALID_PARAMETER;
			break;
		}
		fallthrough;
	case HVCALL_RESET_DEBUG_SESSION: {
		struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
		if (!kvm_hv_is_syndbg_enabled(vcpu)) {
			ret = HV_STATUS_INVALID_HYPERCALL_CODE;
			break;
		}
		if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
			ret = HV_STATUS_OPERATION_DENIED;
			break;
		}
		goto hypercall_userspace_exit;
	}
	case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX:
		if (unlikely(hc.fast)) {
			ret = HV_STATUS_INVALID_PARAMETER;
			break;
		}
		goto hypercall_userspace_exit;
	default:
		ret = HV_STATUS_INVALID_HYPERCALL_CODE;
		break;
	}
hypercall_complete:
	return kvm_hv_hypercall_complete(vcpu, ret);
hypercall_userspace_exit:
	vcpu->run->exit_reason = KVM_EXIT_HYPERV;
	vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
	vcpu->run->hyperv.u.hcall.input = hc.param;
	vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
	vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
	vcpu->arch.complete_userspace_io = kvm_hv_hypercall_complete_userspace;
	return 0;
}
void kvm_hv_init_vm(struct kvm *kvm)
{
	struct kvm_hv *hv = to_kvm_hv(kvm);
	mutex_init(&hv->hv_lock);
	idr_init(&hv->conn_to_evt);
}
void kvm_hv_destroy_vm(struct kvm *kvm)
{
	struct kvm_hv *hv = to_kvm_hv(kvm);
	struct eventfd_ctx *eventfd;
	int i;
	idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
		eventfd_ctx_put(eventfd);
	idr_destroy(&hv->conn_to_evt);
}
static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
{
	struct kvm_hv *hv = to_kvm_hv(kvm);
	struct eventfd_ctx *eventfd;
	int ret;
	eventfd = eventfd_ctx_fdget(fd);
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);
	mutex_lock(&hv->hv_lock);
	ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
			GFP_KERNEL_ACCOUNT);
	mutex_unlock(&hv->hv_lock);
	if (ret >= 0)
		return 0;
	if (ret == -ENOSPC)
		ret = -EEXIST;
	eventfd_ctx_put(eventfd);
	return ret;
}
static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
{
	struct kvm_hv *hv = to_kvm_hv(kvm);
	struct eventfd_ctx *eventfd;
	mutex_lock(&hv->hv_lock);
	eventfd = idr_remove(&hv->conn_to_evt, conn_id);
	mutex_unlock(&hv->hv_lock);
	if (!eventfd)
		return -ENOENT;
	synchronize_srcu(&kvm->srcu);
	eventfd_ctx_put(eventfd);
	return 0;
}
int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
{
	if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
	    (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
		return -EINVAL;
	if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
		return kvm_hv_eventfd_deassign(kvm, args->conn_id);
	return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
}
int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
		     struct kvm_cpuid_entry2 __user *entries)
{
	uint16_t evmcs_ver = 0;
	struct kvm_cpuid_entry2 cpuid_entries[] = {
		{ .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
		{ .function = HYPERV_CPUID_INTERFACE },
		{ .function = HYPERV_CPUID_VERSION },
		{ .function = HYPERV_CPUID_FEATURES },
		{ .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
		{ .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
		{ .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
		{ .function = HYPERV_CPUID_SYNDBG_INTERFACE },
		{ .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES	},
		{ .function = HYPERV_CPUID_NESTED_FEATURES },
	};
	int i, nent = ARRAY_SIZE(cpuid_entries);
	if (kvm_x86_ops.nested_ops->get_evmcs_version)
		evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
	if (cpuid->nent < nent)
		return -E2BIG;
	if (cpuid->nent > nent)
		cpuid->nent = nent;
	for (i = 0; i < nent; i++) {
		struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
		u32 signature[3];
		switch (ent->function) {
		case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
			memcpy(signature, "Linux KVM Hv", 12);
			ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
			ent->ebx = signature[0];
			ent->ecx = signature[1];
			ent->edx = signature[2];
			break;
		case HYPERV_CPUID_INTERFACE:
			ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
			break;
		case HYPERV_CPUID_VERSION:
			/*
			 * We implement some Hyper-V 2016 functions so let's use
			 * this version.
			 */
			ent->eax = 0x00003839;
			ent->ebx = 0x000A0000;
			break;
		case HYPERV_CPUID_FEATURES:
			ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
			ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
			ent->eax |= HV_MSR_SYNIC_AVAILABLE;
			ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
			ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
			ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
			ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
			ent->eax |= HV_MSR_RESET_AVAILABLE;
			ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
			ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
			ent->eax |= HV_ACCESS_REENLIGHTENMENT;
			ent->eax |= HV_ACCESS_TSC_INVARIANT;
			ent->ebx |= HV_POST_MESSAGES;
			ent->ebx |= HV_SIGNAL_EVENTS;
			ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS;
			ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
			ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
			ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
			ent->ebx |= HV_DEBUGGING;
			ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
			ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
			ent->edx |= HV_FEATURE_EXT_GVA_RANGES_FLUSH;
			/*
			 * Direct Synthetic timers only make sense with in-kernel
			 * LAPIC
			 */
			if (!vcpu || lapic_in_kernel(vcpu))
				ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
			break;
		case HYPERV_CPUID_ENLIGHTMENT_INFO:
			ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
			ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
			ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
			ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
			ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
			if (evmcs_ver)
				ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
			if (!cpu_smt_possible())
				ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
			ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED;
			/*
			 * Default number of spinlock retry attempts, matches
			 * HyperV 2016.
			 */
			ent->ebx = 0x00000FFF;
			break;
		case HYPERV_CPUID_IMPLEMENT_LIMITS:
			/* Maximum number of virtual processors */
			ent->eax = KVM_MAX_VCPUS;
			/*
			 * Maximum number of logical processors, matches
			 * HyperV 2016.
			 */
			ent->ebx = 64;
			break;
		case HYPERV_CPUID_NESTED_FEATURES:
			ent->eax = evmcs_ver;
			ent->eax |= HV_X64_NESTED_DIRECT_FLUSH;
			ent->eax |= HV_X64_NESTED_MSR_BITMAP;
			ent->ebx |= HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
			break;
		case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
			memcpy(signature, "Linux KVM Hv", 12);
			ent->eax = 0;
			ent->ebx = signature[0];
			ent->ecx = signature[1];
			ent->edx = signature[2];
			break;
		case HYPERV_CPUID_SYNDBG_INTERFACE:
			memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
			ent->eax = signature[0];
			break;
		case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
			ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
			break;
		default:
			break;
		}
	}
	if (copy_to_user(entries, cpuid_entries,
			 nent * sizeof(struct kvm_cpuid_entry2)))
		return -EFAULT;
	return 0;
}
 | 
	linux-master | 
	arch/x86/kvm/hyperv.c | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
