python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-or-later /* ----------------------------------------------------------------------- * * * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved * * ----------------------------------------------------------------------- */ /* * x86 CPUID access device * * This device is accessed by lseek() to the appropriate CPUID level * and then read in chunks of 16 bytes. A larger size means multiple * reads of consecutive levels. * * The lower 32 bits of the file position is used as the incoming %eax, * and the upper 32 bits of the file position as the incoming %ecx, * the latter intended for "counting" eax levels like eax=4. * * This driver uses /dev/cpu/%d/cpuid where %d is the minor number, and on * an SMP box will direct the access to CPU %d. */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/smp.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/uaccess.h> #include <linux/gfp.h> #include <linux/completion.h> #include <asm/processor.h> #include <asm/msr.h> static enum cpuhp_state cpuhp_cpuid_state; struct cpuid_regs_done { struct cpuid_regs regs; struct completion done; }; static void cpuid_smp_cpuid(void *cmd_block) { struct cpuid_regs_done *cmd = cmd_block; cpuid_count(cmd->regs.eax, cmd->regs.ecx, &cmd->regs.eax, &cmd->regs.ebx, &cmd->regs.ecx, &cmd->regs.edx); complete(&cmd->done); } static ssize_t cpuid_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char __user *tmp = buf; struct cpuid_regs_done cmd; int cpu = iminor(file_inode(file)); u64 pos = *ppos; ssize_t bytes = 0; int err = 0; if (count % 16) return -EINVAL; /* Invalid chunk size */ init_completion(&cmd.done); for (; count; count -= 16) { call_single_data_t csd; INIT_CSD(&csd, cpuid_smp_cpuid, &cmd); cmd.regs.eax = pos; cmd.regs.ecx = pos >> 32; err = smp_call_function_single_async(cpu, &csd); if (err) break; wait_for_completion(&cmd.done); if (copy_to_user(tmp, &cmd.regs, 16)) { err = -EFAULT; break; } tmp += 16; bytes += 16; *ppos = ++pos; reinit_completion(&cmd.done); } return bytes ? bytes : err; } static int cpuid_open(struct inode *inode, struct file *file) { unsigned int cpu; struct cpuinfo_x86 *c; cpu = iminor(file_inode(file)); if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; /* No such CPU */ c = &cpu_data(cpu); if (c->cpuid_level < 0) return -EIO; /* CPUID not supported */ return 0; } /* * File operations we support */ static const struct file_operations cpuid_fops = { .owner = THIS_MODULE, .llseek = no_seek_end_llseek, .read = cpuid_read, .open = cpuid_open, }; static char *cpuid_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt)); } static const struct class cpuid_class = { .name = "cpuid", .devnode = cpuid_devnode, }; static int cpuid_device_create(unsigned int cpu) { struct device *dev; dev = device_create(&cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL, "cpu%d", cpu); return PTR_ERR_OR_ZERO(dev); } static int cpuid_device_destroy(unsigned int cpu) { device_destroy(&cpuid_class, MKDEV(CPUID_MAJOR, cpu)); return 0; } static int __init cpuid_init(void) { int err; if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid", &cpuid_fops)) { printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n", CPUID_MAJOR); return -EBUSY; } err = class_register(&cpuid_class); if (err) goto out_chrdev; err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/cpuid:online", cpuid_device_create, cpuid_device_destroy); if (err < 0) goto out_class; cpuhp_cpuid_state = err; return 0; out_class: class_unregister(&cpuid_class); out_chrdev: __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); return err; } module_init(cpuid_init); static void __exit cpuid_exit(void) { cpuhp_remove_state(cpuhp_cpuid_state); class_unregister(&cpuid_class); __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); } module_exit(cpuid_exit); MODULE_AUTHOR("H. Peter Anvin <[email protected]>"); MODULE_DESCRIPTION("x86 generic CPUID driver"); MODULE_LICENSE("GPL");
linux-master
arch/x86/kernel/cpuid.c
// SPDX-License-Identifier: GPL-2.0-only /* * Interrupt descriptor table related code */ #include <linux/interrupt.h> #include <asm/cpu_entry_area.h> #include <asm/set_memory.h> #include <asm/traps.h> #include <asm/proto.h> #include <asm/desc.h> #include <asm/hw_irq.h> #include <asm/idtentry.h> #define DPL0 0x0 #define DPL3 0x3 #define DEFAULT_STACK 0 #define G(_vector, _addr, _ist, _type, _dpl, _segment) \ { \ .vector = _vector, \ .bits.ist = _ist, \ .bits.type = _type, \ .bits.dpl = _dpl, \ .bits.p = 1, \ .addr = _addr, \ .segment = _segment, \ } /* Interrupt gate */ #define INTG(_vector, _addr) \ G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL0, __KERNEL_CS) /* System interrupt gate */ #define SYSG(_vector, _addr) \ G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL3, __KERNEL_CS) #ifdef CONFIG_X86_64 /* * Interrupt gate with interrupt stack. The _ist index is the index in * the tss.ist[] array, but for the descriptor it needs to start at 1. */ #define ISTG(_vector, _addr, _ist) \ G(_vector, _addr, _ist + 1, GATE_INTERRUPT, DPL0, __KERNEL_CS) #else #define ISTG(_vector, _addr, _ist) INTG(_vector, _addr) #endif /* Task gate */ #define TSKG(_vector, _gdt) \ G(_vector, NULL, DEFAULT_STACK, GATE_TASK, DPL0, _gdt << 3) #define IDT_TABLE_SIZE (IDT_ENTRIES * sizeof(gate_desc)) static bool idt_setup_done __initdata; /* * Early traps running on the DEFAULT_STACK because the other interrupt * stacks work only after cpu_init(). */ static const __initconst struct idt_data early_idts[] = { INTG(X86_TRAP_DB, asm_exc_debug), SYSG(X86_TRAP_BP, asm_exc_int3), #ifdef CONFIG_X86_32 /* * Not possible on 64-bit. See idt_setup_early_pf() for details. */ INTG(X86_TRAP_PF, asm_exc_page_fault), #endif #ifdef CONFIG_INTEL_TDX_GUEST INTG(X86_TRAP_VE, asm_exc_virtualization_exception), #endif }; /* * The default IDT entries which are set up in trap_init() before * cpu_init() is invoked. Interrupt stacks cannot be used at that point and * the traps which use them are reinitialized with IST after cpu_init() has * set up TSS. */ static const __initconst struct idt_data def_idts[] = { INTG(X86_TRAP_DE, asm_exc_divide_error), ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI), INTG(X86_TRAP_BR, asm_exc_bounds), INTG(X86_TRAP_UD, asm_exc_invalid_op), INTG(X86_TRAP_NM, asm_exc_device_not_available), INTG(X86_TRAP_OLD_MF, asm_exc_coproc_segment_overrun), INTG(X86_TRAP_TS, asm_exc_invalid_tss), INTG(X86_TRAP_NP, asm_exc_segment_not_present), INTG(X86_TRAP_SS, asm_exc_stack_segment), INTG(X86_TRAP_GP, asm_exc_general_protection), INTG(X86_TRAP_SPURIOUS, asm_exc_spurious_interrupt_bug), INTG(X86_TRAP_MF, asm_exc_coprocessor_error), INTG(X86_TRAP_AC, asm_exc_alignment_check), INTG(X86_TRAP_XF, asm_exc_simd_coprocessor_error), #ifdef CONFIG_X86_32 TSKG(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS), #else ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF), #endif ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB), #ifdef CONFIG_X86_MCE ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE), #endif #ifdef CONFIG_X86_CET INTG(X86_TRAP_CP, asm_exc_control_protection), #endif #ifdef CONFIG_AMD_MEM_ENCRYPT ISTG(X86_TRAP_VC, asm_exc_vmm_communication, IST_INDEX_VC), #endif SYSG(X86_TRAP_OF, asm_exc_overflow), #if defined(CONFIG_IA32_EMULATION) SYSG(IA32_SYSCALL_VECTOR, entry_INT80_compat), #elif defined(CONFIG_X86_32) SYSG(IA32_SYSCALL_VECTOR, entry_INT80_32), #endif }; /* * The APIC and SMP idt entries */ static const __initconst struct idt_data apic_idts[] = { #ifdef CONFIG_SMP INTG(RESCHEDULE_VECTOR, asm_sysvec_reschedule_ipi), INTG(CALL_FUNCTION_VECTOR, asm_sysvec_call_function), INTG(CALL_FUNCTION_SINGLE_VECTOR, asm_sysvec_call_function_single), INTG(REBOOT_VECTOR, asm_sysvec_reboot), #endif #ifdef CONFIG_X86_THERMAL_VECTOR INTG(THERMAL_APIC_VECTOR, asm_sysvec_thermal), #endif #ifdef CONFIG_X86_MCE_THRESHOLD INTG(THRESHOLD_APIC_VECTOR, asm_sysvec_threshold), #endif #ifdef CONFIG_X86_MCE_AMD INTG(DEFERRED_ERROR_VECTOR, asm_sysvec_deferred_error), #endif #ifdef CONFIG_X86_LOCAL_APIC INTG(LOCAL_TIMER_VECTOR, asm_sysvec_apic_timer_interrupt), INTG(X86_PLATFORM_IPI_VECTOR, asm_sysvec_x86_platform_ipi), # ifdef CONFIG_HAVE_KVM INTG(POSTED_INTR_VECTOR, asm_sysvec_kvm_posted_intr_ipi), INTG(POSTED_INTR_WAKEUP_VECTOR, asm_sysvec_kvm_posted_intr_wakeup_ipi), INTG(POSTED_INTR_NESTED_VECTOR, asm_sysvec_kvm_posted_intr_nested_ipi), # endif # ifdef CONFIG_IRQ_WORK INTG(IRQ_WORK_VECTOR, asm_sysvec_irq_work), # endif INTG(SPURIOUS_APIC_VECTOR, asm_sysvec_spurious_apic_interrupt), INTG(ERROR_APIC_VECTOR, asm_sysvec_error_interrupt), #endif }; /* Must be page-aligned because the real IDT is used in the cpu entry area */ static gate_desc idt_table[IDT_ENTRIES] __page_aligned_bss; static struct desc_ptr idt_descr __ro_after_init = { .size = IDT_TABLE_SIZE - 1, .address = (unsigned long) idt_table, }; void load_current_idt(void) { lockdep_assert_irqs_disabled(); load_idt(&idt_descr); } #ifdef CONFIG_X86_F00F_BUG bool idt_is_f00f_address(unsigned long address) { return ((address - idt_descr.address) >> 3) == 6; } #endif static __init void idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sys) { gate_desc desc; for (; size > 0; t++, size--) { idt_init_desc(&desc, t); write_idt_entry(idt, t->vector, &desc); if (sys) set_bit(t->vector, system_vectors); } } static __init void set_intr_gate(unsigned int n, const void *addr) { struct idt_data data; init_idt_data(&data, n, addr); idt_setup_from_table(idt_table, &data, 1, false); } /** * idt_setup_early_traps - Initialize the idt table with early traps * * On X8664 these traps do not use interrupt stacks as they can't work * before cpu_init() is invoked and sets up TSS. The IST variants are * installed after that. */ void __init idt_setup_early_traps(void) { idt_setup_from_table(idt_table, early_idts, ARRAY_SIZE(early_idts), true); load_idt(&idt_descr); } /** * idt_setup_traps - Initialize the idt table with default traps */ void __init idt_setup_traps(void) { idt_setup_from_table(idt_table, def_idts, ARRAY_SIZE(def_idts), true); } #ifdef CONFIG_X86_64 /* * Early traps running on the DEFAULT_STACK because the other interrupt * stacks work only after cpu_init(). */ static const __initconst struct idt_data early_pf_idts[] = { INTG(X86_TRAP_PF, asm_exc_page_fault), }; /** * idt_setup_early_pf - Initialize the idt table with early pagefault handler * * On X8664 this does not use interrupt stacks as they can't work before * cpu_init() is invoked and sets up TSS. The IST variant is installed * after that. * * Note, that X86_64 cannot install the real #PF handler in * idt_setup_early_traps() because the memory initialization needs the #PF * handler from the early_idt_handler_array to initialize the early page * tables. */ void __init idt_setup_early_pf(void) { idt_setup_from_table(idt_table, early_pf_idts, ARRAY_SIZE(early_pf_idts), true); } #endif static void __init idt_map_in_cea(void) { /* * Set the IDT descriptor to a fixed read-only location in the cpu * entry area, so that the "sidt" instruction will not leak the * location of the kernel, and to defend the IDT against arbitrary * memory write vulnerabilities. */ cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table), PAGE_KERNEL_RO); idt_descr.address = CPU_ENTRY_AREA_RO_IDT; } /** * idt_setup_apic_and_irq_gates - Setup APIC/SMP and normal interrupt gates */ void __init idt_setup_apic_and_irq_gates(void) { int i = FIRST_EXTERNAL_VECTOR; void *entry; idt_setup_from_table(idt_table, apic_idts, ARRAY_SIZE(apic_idts), true); for_each_clear_bit_from(i, system_vectors, FIRST_SYSTEM_VECTOR) { entry = irq_entries_start + IDT_ALIGN * (i - FIRST_EXTERNAL_VECTOR); set_intr_gate(i, entry); } #ifdef CONFIG_X86_LOCAL_APIC for_each_clear_bit_from(i, system_vectors, NR_VECTORS) { /* * Don't set the non assigned system vectors in the * system_vectors bitmap. Otherwise they show up in * /proc/interrupts. */ entry = spurious_entries_start + IDT_ALIGN * (i - FIRST_SYSTEM_VECTOR); set_intr_gate(i, entry); } #endif /* Map IDT into CPU entry area and reload it. */ idt_map_in_cea(); load_idt(&idt_descr); /* Make the IDT table read only */ set_memory_ro((unsigned long)&idt_table, 1); idt_setup_done = true; } /** * idt_setup_early_handler - Initializes the idt table with early handlers */ void __init idt_setup_early_handler(void) { int i; for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) set_intr_gate(i, early_idt_handler_array[i]); #ifdef CONFIG_X86_32 for ( ; i < NR_VECTORS; i++) set_intr_gate(i, early_ignore_irq); #endif load_idt(&idt_descr); } /** * idt_invalidate - Invalidate interrupt descriptor table */ void idt_invalidate(void) { static const struct desc_ptr idt = { .address = 0, .size = 0 }; load_idt(&idt); } void __init alloc_intr_gate(unsigned int n, const void *addr) { if (WARN_ON(n < FIRST_SYSTEM_VECTOR)) return; if (WARN_ON(idt_setup_done)) return; if (!WARN_ON(test_and_set_bit(n, system_vectors))) set_intr_gate(n, addr); }
linux-master
arch/x86/kernel/idt.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/linkage.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/timex.h> #include <linux/random.h> #include <linux/kprobes.h> #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/device.h> #include <linux/bitops.h> #include <linux/acpi.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/pgtable.h> #include <linux/atomic.h> #include <asm/timer.h> #include <asm/hw_irq.h> #include <asm/desc.h> #include <asm/io_apic.h> #include <asm/acpi.h> #include <asm/apic.h> #include <asm/setup.h> #include <asm/i8259.h> #include <asm/traps.h> #include <asm/prom.h> /* * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: * (these are usually mapped to vectors 0x30-0x3f) */ /* * The IO-APIC gives us many more interrupt sources. Most of these * are unused but an SMP system is supposed to have enough memory ... * sometimes (mostly wrt. hw bugs) we get corrupted vectors all * across the spectrum, so we really want to be prepared to get all * of these. Plus, more powerful systems might have more than 64 * IO-APIC registers. * * (these are usually mapped into the 0x30-0xff vector range) */ DEFINE_PER_CPU(vector_irq_t, vector_irq) = { [0 ... NR_VECTORS - 1] = VECTOR_UNUSED, }; void __init init_ISA_irqs(void) { struct irq_chip *chip = legacy_pic->chip; int i; /* * Try to set up the through-local-APIC virtual wire mode earlier. * * On some 32-bit UP machines, whose APIC has been disabled by BIOS * and then got re-enabled by "lapic", it hangs at boot time without this. */ init_bsp_APIC(); legacy_pic->init(0); for (i = 0; i < nr_legacy_irqs(); i++) { irq_set_chip_and_handler(i, chip, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } } void __init init_IRQ(void) { int i; /* * On cpu 0, Assign ISA_IRQ_VECTOR(irq) to IRQ 0..15. * If these IRQ's are handled by legacy interrupt-controllers like PIC, * then this configuration will likely be static after the boot. If * these IRQs are handled by more modern controllers like IO-APIC, * then this vector space can be freed and re-used dynamically as the * irq's migrate etc. */ for (i = 0; i < nr_legacy_irqs(); i++) per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i); BUG_ON(irq_init_percpu_irqstack(smp_processor_id())); x86_init.irqs.intr_init(); } void __init native_init_IRQ(void) { /* Execute any quirks before the call gates are initialised: */ x86_init.irqs.pre_vector_init(); idt_setup_apic_and_irq_gates(); lapic_assign_system_vectors(); if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs()) { /* IRQ2 is cascade interrupt to second interrupt controller */ if (request_irq(2, no_action, IRQF_NO_THREAD, "cascade", NULL)) pr_err("%s: request_irq() failed\n", "cascade"); } }
linux-master
arch/x86/kernel/irqinit.c
/* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs */ #include <linux/kallsyms.h> #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/utsname.h> #include <linux/hardirq.h> #include <linux/kdebug.h> #include <linux/module.h> #include <linux/ptrace.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> #include <linux/ftrace.h> #include <linux/kexec.h> #include <linux/bug.h> #include <linux/nmi.h> #include <linux/sysfs.h> #include <linux/kasan.h> #include <asm/cpu_entry_area.h> #include <asm/stacktrace.h> #include <asm/unwind.h> int panic_on_unrecovered_nmi; int panic_on_io_nmi; static int die_counter; static struct pt_regs exec_summary_regs; bool noinstr in_task_stack(unsigned long *stack, struct task_struct *task, struct stack_info *info) { unsigned long *begin = task_stack_page(task); unsigned long *end = task_stack_page(task) + THREAD_SIZE; if (stack < begin || stack >= end) return false; info->type = STACK_TYPE_TASK; info->begin = begin; info->end = end; info->next_sp = NULL; return true; } /* Called from get_stack_info_noinstr - so must be noinstr too */ bool noinstr in_entry_stack(unsigned long *stack, struct stack_info *info) { struct entry_stack *ss = cpu_entry_stack(smp_processor_id()); void *begin = ss; void *end = ss + 1; if ((void *)stack < begin || (void *)stack >= end) return false; info->type = STACK_TYPE_ENTRY; info->begin = begin; info->end = end; info->next_sp = NULL; return true; } static void printk_stack_address(unsigned long address, int reliable, const char *log_lvl) { touch_nmi_watchdog(); printk("%s %s%pBb\n", log_lvl, reliable ? "" : "? ", (void *)address); } static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src, unsigned int nbytes) { if (!user_mode(regs)) return copy_from_kernel_nofault(buf, (u8 *)src, nbytes); /* The user space code from other tasks cannot be accessed. */ if (regs != task_pt_regs(current)) return -EPERM; /* * Even if named copy_from_user_nmi() this can be invoked from * other contexts and will not try to resolve a pagefault, which is * the correct thing to do here as this code can be called from any * context. */ return copy_from_user_nmi(buf, (void __user *)src, nbytes); } /* * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus: * * In case where we don't have the exact kernel image (which, if we did, we can * simply disassemble and navigate to the RIP), the purpose of the bigger * prologue is to have more context and to be able to correlate the code from * the different toolchains better. * * In addition, it helps in recreating the register allocation of the failing * kernel and thus make sense of the register dump. * * What is more, the additional complication of a variable length insn arch like * x86 warrants having longer byte sequence before rIP so that the disassembler * can "sync" up properly and find instruction boundaries when decoding the * opcode bytes. * * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random * guesstimate in attempt to achieve all of the above. */ void show_opcodes(struct pt_regs *regs, const char *loglvl) { #define PROLOGUE_SIZE 42 #define EPILOGUE_SIZE 21 #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) u8 opcodes[OPCODE_BUFSIZE]; unsigned long prologue = regs->ip - PROLOGUE_SIZE; switch (copy_code(regs, opcodes, prologue, sizeof(opcodes))) { case 0: printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes, opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1); break; case -EPERM: /* No access to the user space stack of other tasks. Ignore. */ break; default: printk("%sCode: Unable to access opcode bytes at 0x%lx.\n", loglvl, prologue); break; } } void show_ip(struct pt_regs *regs, const char *loglvl) { #ifdef CONFIG_X86_32 printk("%sEIP: %pS\n", loglvl, (void *)regs->ip); #else printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); #endif show_opcodes(regs, loglvl); } void show_iret_regs(struct pt_regs *regs, const char *log_lvl) { show_ip(regs, log_lvl); printk("%sRSP: %04x:%016lx EFLAGS: %08lx", log_lvl, (int)regs->ss, regs->sp, regs->flags); } static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, bool partial, const char *log_lvl) { /* * These on_stack() checks aren't strictly necessary: the unwind code * has already validated the 'regs' pointer. The checks are done for * ordering reasons: if the registers are on the next stack, we don't * want to print them out yet. Otherwise they'll be shown as part of * the wrong stack. Later, when show_trace_log_lvl() switches to the * next stack, this function will be called again with the same regs so * they can be printed in the right context. */ if (!partial && on_stack(info, regs, sizeof(*regs))) { __show_regs(regs, SHOW_REGS_SHORT, log_lvl); } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, IRET_FRAME_SIZE)) { /* * When an interrupt or exception occurs in entry code, the * full pt_regs might not have been saved yet. In that case * just print the iret frame. */ show_iret_regs(regs, log_lvl); } } /* * This function reads pointers from the stack and dereferences them. The * pointers may not have their KMSAN shadow set up properly, which may result * in false positive reports. Disable instrumentation to avoid those. */ __no_kmsan_checks static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, const char *log_lvl) { struct unwind_state state; struct stack_info stack_info = {0}; unsigned long visit_mask = 0; int graph_idx = 0; bool partial = false; printk("%sCall Trace:\n", log_lvl); unwind_start(&state, task, regs, stack); regs = unwind_get_entry_regs(&state, &partial); /* * Iterate through the stacks, starting with the current stack pointer. * Each stack has a pointer to the next one. * * x86-64 can have several stacks: * - task stack * - interrupt stack * - HW exception stacks (double fault, nmi, debug, mce) * - entry stack * * x86-32 can have up to four stacks: * - task stack * - softirq stack * - hardirq stack * - entry stack */ for (stack = stack ?: get_stack_pointer(task, regs); stack; stack = stack_info.next_sp) { const char *stack_name; stack = PTR_ALIGN(stack, sizeof(long)); if (get_stack_info(stack, task, &stack_info, &visit_mask)) { /* * We weren't on a valid stack. It's possible that * we overflowed a valid stack into a guard page. * See if the next page up is valid so that we can * generate some kind of backtrace if this happens. */ stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack); if (get_stack_info(stack, task, &stack_info, &visit_mask)) break; } stack_name = stack_type_name(stack_info.type); if (stack_name) printk("%s <%s>\n", log_lvl, stack_name); if (regs) show_regs_if_on_stack(&stack_info, regs, partial, log_lvl); /* * Scan the stack, printing any text addresses we find. At the * same time, follow proper stack frames with the unwinder. * * Addresses found during the scan which are not reported by * the unwinder are considered to be additional clues which are * sometimes useful for debugging and are prefixed with '?'. * This also serves as a failsafe option in case the unwinder * goes off in the weeds. */ for (; stack < stack_info.end; stack++) { unsigned long real_addr; int reliable = 0; unsigned long addr = READ_ONCE_NOCHECK(*stack); unsigned long *ret_addr_p = unwind_get_return_address_ptr(&state); if (!__kernel_text_address(addr)) continue; /* * Don't print regs->ip again if it was already printed * by show_regs_if_on_stack(). */ if (regs && stack == &regs->ip) goto next; if (stack == ret_addr_p) reliable = 1; /* * When function graph tracing is enabled for a * function, its return address on the stack is * replaced with the address of an ftrace handler * (return_to_handler). In that case, before printing * the "real" address, we want to print the handler * address as an "unreliable" hint that function graph * tracing was involved. */ real_addr = ftrace_graph_ret_addr(task, &graph_idx, addr, stack); if (real_addr != addr) printk_stack_address(addr, 0, log_lvl); printk_stack_address(real_addr, reliable, log_lvl); if (!reliable) continue; next: /* * Get the next frame from the unwinder. No need to * check for an error: if anything goes wrong, the rest * of the addresses will just be printed as unreliable. */ unwind_next_frame(&state); /* if the frame has entry regs, print them */ regs = unwind_get_entry_regs(&state, &partial); if (regs) show_regs_if_on_stack(&stack_info, regs, partial, log_lvl); } if (stack_name) printk("%s </%s>\n", log_lvl, stack_name); } } void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { task = task ? : current; /* * Stack frames below this one aren't interesting. Don't show them * if we're printing for %current. */ if (!sp && task == current) sp = get_stack_pointer(current, NULL); show_trace_log_lvl(task, NULL, sp, loglvl); } void show_stack_regs(struct pt_regs *regs) { show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); } static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; unsigned long oops_begin(void) { int cpu; unsigned long flags; oops_enter(); /* racy, but better than risking deadlock. */ raw_local_irq_save(flags); cpu = smp_processor_id(); if (!arch_spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else arch_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; console_verbose(); bust_spinlocks(1); return flags; } NOKPROBE_SYMBOL(oops_begin); void __noreturn rewind_stack_and_make_dead(int signr); void oops_end(unsigned long flags, struct pt_regs *regs, int signr) { if (regs && kexec_should_crash(current)) crash_kexec(regs); bust_spinlocks(0); die_owner = -1; add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); die_nest_count--; if (!die_nest_count) /* Nest count reaches zero, release the lock. */ arch_spin_unlock(&die_lock); raw_local_irq_restore(flags); oops_exit(); /* Executive summary in case the oops scrolled away */ __show_regs(&exec_summary_regs, SHOW_REGS_ALL, KERN_DEFAULT); if (!signr) return; if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); /* * We're not going to return, but we might be on an IST stack or * have very little stack space left. Rewind the stack and kill * the task. * Before we rewind the stack, we have to tell KASAN that we're going to * reuse the task stack and that existing poisons are invalid. */ kasan_unpoison_task_stack(current); rewind_stack_and_make_dead(signr); } NOKPROBE_SYMBOL(oops_end); static void __die_header(const char *str, struct pt_regs *regs, long err) { const char *pr = ""; /* Save the regs of the first oops for the executive summary later. */ if (!die_counter) exec_summary_regs = *regs; if (IS_ENABLED(CONFIG_PREEMPTION)) pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; printk(KERN_DEFAULT "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter, pr, IS_ENABLED(CONFIG_SMP) ? " SMP" : "", debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "", IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ? (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : ""); } NOKPROBE_SYMBOL(__die_header); static int __die_body(const char *str, struct pt_regs *regs, long err) { show_regs(regs); print_modules(); if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) return 1; return 0; } NOKPROBE_SYMBOL(__die_body); int __die(const char *str, struct pt_regs *regs, long err) { __die_header(str, regs, err); return __die_body(str, regs, err); } NOKPROBE_SYMBOL(__die); /* * This is gone through when something in the kernel has done something bad * and is about to be terminated: */ void die(const char *str, struct pt_regs *regs, long err) { unsigned long flags = oops_begin(); int sig = SIGSEGV; if (__die(str, regs, err)) sig = 0; oops_end(flags, regs, sig); } void die_addr(const char *str, struct pt_regs *regs, long err, long gp_addr) { unsigned long flags = oops_begin(); int sig = SIGSEGV; __die_header(str, regs, err); if (gp_addr) kasan_non_canonical_hook(gp_addr); if (__die_body(str, regs, err)) sig = 0; oops_end(flags, regs, sig); } void show_regs(struct pt_regs *regs) { enum show_regs_mode print_kernel_regs; show_regs_print_info(KERN_DEFAULT); print_kernel_regs = user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL; __show_regs(regs, print_kernel_regs, KERN_DEFAULT); /* * When in-kernel, we also print out the stack at the time of the fault.. */ if (!user_mode(regs)) show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); }
linux-master
arch/x86/kernel/dumpstack.c
// SPDX-License-Identifier: GPL-2.0-only /* * Architecture specific sysfs attributes in /sys/kernel * * Copyright (C) 2007, Intel Corp. * Huang Ying <[email protected]> * Copyright (C) 2013, 2013 Red Hat, Inc. * Dave Young <[email protected]> */ #include <linux/kobject.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/init.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/io.h> #include <asm/setup.h> static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "0x%04x\n", boot_params.hdr.version); } static struct kobj_attribute boot_params_version_attr = __ATTR_RO(version); static ssize_t boot_params_data_read(struct file *fp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { memcpy(buf, (void *)&boot_params + off, count); return count; } static struct bin_attribute boot_params_data_attr = { .attr = { .name = "data", .mode = S_IRUGO, }, .read = boot_params_data_read, .size = sizeof(boot_params), }; static struct attribute *boot_params_version_attrs[] = { &boot_params_version_attr.attr, NULL, }; static struct bin_attribute *boot_params_data_attrs[] = { &boot_params_data_attr, NULL, }; static const struct attribute_group boot_params_attr_group = { .attrs = boot_params_version_attrs, .bin_attrs = boot_params_data_attrs, }; static int kobj_to_setup_data_nr(struct kobject *kobj, int *nr) { const char *name; name = kobject_name(kobj); return kstrtoint(name, 10, nr); } static int get_setup_data_paddr(int nr, u64 *paddr) { int i = 0; struct setup_data *data; u64 pa_data = boot_params.hdr.setup_data; while (pa_data) { if (nr == i) { *paddr = pa_data; return 0; } data = memremap(pa_data, sizeof(*data), MEMREMAP_WB); if (!data) return -ENOMEM; pa_data = data->next; memunmap(data); i++; } return -EINVAL; } static int __init get_setup_data_size(int nr, size_t *size) { u64 pa_data = boot_params.hdr.setup_data, pa_next; struct setup_indirect *indirect; struct setup_data *data; int i = 0; u32 len; while (pa_data) { data = memremap(pa_data, sizeof(*data), MEMREMAP_WB); if (!data) return -ENOMEM; pa_next = data->next; if (nr == i) { if (data->type == SETUP_INDIRECT) { len = sizeof(*data) + data->len; memunmap(data); data = memremap(pa_data, len, MEMREMAP_WB); if (!data) return -ENOMEM; indirect = (struct setup_indirect *)data->data; if (indirect->type != SETUP_INDIRECT) *size = indirect->len; else *size = data->len; } else { *size = data->len; } memunmap(data); return 0; } pa_data = pa_next; memunmap(data); i++; } return -EINVAL; } static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct setup_indirect *indirect; struct setup_data *data; int nr, ret; u64 paddr; u32 len; ret = kobj_to_setup_data_nr(kobj, &nr); if (ret) return ret; ret = get_setup_data_paddr(nr, &paddr); if (ret) return ret; data = memremap(paddr, sizeof(*data), MEMREMAP_WB); if (!data) return -ENOMEM; if (data->type == SETUP_INDIRECT) { len = sizeof(*data) + data->len; memunmap(data); data = memremap(paddr, len, MEMREMAP_WB); if (!data) return -ENOMEM; indirect = (struct setup_indirect *)data->data; ret = sprintf(buf, "0x%x\n", indirect->type); } else { ret = sprintf(buf, "0x%x\n", data->type); } memunmap(data); return ret; } static ssize_t setup_data_data_read(struct file *fp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct setup_indirect *indirect; struct setup_data *data; int nr, ret = 0; u64 paddr, len; void *p; ret = kobj_to_setup_data_nr(kobj, &nr); if (ret) return ret; ret = get_setup_data_paddr(nr, &paddr); if (ret) return ret; data = memremap(paddr, sizeof(*data), MEMREMAP_WB); if (!data) return -ENOMEM; if (data->type == SETUP_INDIRECT) { len = sizeof(*data) + data->len; memunmap(data); data = memremap(paddr, len, MEMREMAP_WB); if (!data) return -ENOMEM; indirect = (struct setup_indirect *)data->data; if (indirect->type != SETUP_INDIRECT) { paddr = indirect->addr; len = indirect->len; } else { /* * Even though this is technically undefined, return * the data as though it is a normal setup_data struct. * This will at least allow it to be inspected. */ paddr += sizeof(*data); len = data->len; } } else { paddr += sizeof(*data); len = data->len; } if (off > len) { ret = -EINVAL; goto out; } if (count > len - off) count = len - off; if (!count) goto out; ret = count; p = memremap(paddr, len, MEMREMAP_WB); if (!p) { ret = -ENOMEM; goto out; } memcpy(buf, p + off, count); memunmap(p); out: memunmap(data); return ret; } static struct kobj_attribute type_attr = __ATTR_RO(type); static struct bin_attribute data_attr __ro_after_init = { .attr = { .name = "data", .mode = S_IRUGO, }, .read = setup_data_data_read, }; static struct attribute *setup_data_type_attrs[] = { &type_attr.attr, NULL, }; static struct bin_attribute *setup_data_data_attrs[] = { &data_attr, NULL, }; static const struct attribute_group setup_data_attr_group = { .attrs = setup_data_type_attrs, .bin_attrs = setup_data_data_attrs, }; static int __init create_setup_data_node(struct kobject *parent, struct kobject **kobjp, int nr) { int ret = 0; size_t size; struct kobject *kobj; char name[16]; /* should be enough for setup_data nodes numbers */ snprintf(name, 16, "%d", nr); kobj = kobject_create_and_add(name, parent); if (!kobj) return -ENOMEM; ret = get_setup_data_size(nr, &size); if (ret) goto out_kobj; data_attr.size = size; ret = sysfs_create_group(kobj, &setup_data_attr_group); if (ret) goto out_kobj; *kobjp = kobj; return 0; out_kobj: kobject_put(kobj); return ret; } static void __init cleanup_setup_data_node(struct kobject *kobj) { sysfs_remove_group(kobj, &setup_data_attr_group); kobject_put(kobj); } static int __init get_setup_data_total_num(u64 pa_data, int *nr) { int ret = 0; struct setup_data *data; *nr = 0; while (pa_data) { *nr += 1; data = memremap(pa_data, sizeof(*data), MEMREMAP_WB); if (!data) { ret = -ENOMEM; goto out; } pa_data = data->next; memunmap(data); } out: return ret; } static int __init create_setup_data_nodes(struct kobject *parent) { struct kobject *setup_data_kobj, **kobjp; u64 pa_data; int i, j, nr, ret = 0; pa_data = boot_params.hdr.setup_data; if (!pa_data) return 0; setup_data_kobj = kobject_create_and_add("setup_data", parent); if (!setup_data_kobj) { ret = -ENOMEM; goto out; } ret = get_setup_data_total_num(pa_data, &nr); if (ret) goto out_setup_data_kobj; kobjp = kmalloc_array(nr, sizeof(*kobjp), GFP_KERNEL); if (!kobjp) { ret = -ENOMEM; goto out_setup_data_kobj; } for (i = 0; i < nr; i++) { ret = create_setup_data_node(setup_data_kobj, kobjp + i, i); if (ret) goto out_clean_nodes; } kfree(kobjp); return 0; out_clean_nodes: for (j = i - 1; j >= 0; j--) cleanup_setup_data_node(*(kobjp + j)); kfree(kobjp); out_setup_data_kobj: kobject_put(setup_data_kobj); out: return ret; } static int __init boot_params_ksysfs_init(void) { int ret; struct kobject *boot_params_kobj; boot_params_kobj = kobject_create_and_add("boot_params", kernel_kobj); if (!boot_params_kobj) { ret = -ENOMEM; goto out; } ret = sysfs_create_group(boot_params_kobj, &boot_params_attr_group); if (ret) goto out_boot_params_kobj; ret = create_setup_data_nodes(boot_params_kobj); if (ret) goto out_create_group; return 0; out_create_group: sysfs_remove_group(boot_params_kobj, &boot_params_attr_group); out_boot_params_kobj: kobject_put(boot_params_kobj); out: return ret; } arch_initcall(boot_params_ksysfs_init);
linux-master
arch/x86/kernel/ksysfs.c
/* * Stack trace management functions * * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <[email protected]> */ #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <linux/export.h> #include <linux/uaccess.h> #include <asm/stacktrace.h> #include <asm/unwind.h> void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) { struct unwind_state state; unsigned long addr; if (regs && !consume_entry(cookie, regs->ip)) return; for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); unwind_next_frame(&state)) { addr = unwind_get_return_address(&state); if (!addr || !consume_entry(cookie, addr)) break; } } int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task) { struct unwind_state state; struct pt_regs *regs; unsigned long addr; for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) { regs = unwind_get_entry_regs(&state, NULL); if (regs) { /* Success path for user tasks */ if (user_mode(regs)) return 0; /* * Kernel mode registers on the stack indicate an * in-kernel interrupt or exception (e.g., preemption * or a page fault), which can make frame pointers * unreliable. */ if (IS_ENABLED(CONFIG_FRAME_POINTER)) return -EINVAL; } addr = unwind_get_return_address(&state); /* * A NULL or invalid return address probably means there's some * generated code which __kernel_text_address() doesn't know * about. */ if (!addr) return -EINVAL; if (!consume_entry(cookie, addr)) return -EINVAL; } /* Check for stack corruption */ if (unwind_error(&state)) return -EINVAL; return 0; } /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ struct stack_frame_user { const void __user *next_fp; unsigned long ret_addr; }; static int copy_stack_frame(const struct stack_frame_user __user *fp, struct stack_frame_user *frame) { int ret; if (!__access_ok(fp, sizeof(*frame))) return 0; ret = 1; pagefault_disable(); if (__get_user(frame->next_fp, &fp->next_fp) || __get_user(frame->ret_addr, &fp->ret_addr)) ret = 0; pagefault_enable(); return ret; } void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, const struct pt_regs *regs) { const void __user *fp = (const void __user *)regs->bp; if (!consume_entry(cookie, regs->ip)) return; while (1) { struct stack_frame_user frame; frame.next_fp = NULL; frame.ret_addr = 0; if (!copy_stack_frame(fp, &frame)) break; if ((unsigned long)fp < regs->sp) break; if (!frame.ret_addr) break; if (!consume_entry(cookie, frame.ret_addr)) break; fp = frame.next_fp; } }
linux-master
arch/x86/kernel/stacktrace.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs */ #include <linux/sched/debug.h> #include <linux/kallsyms.h> #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/hardirq.h> #include <linux/kdebug.h> #include <linux/export.h> #include <linux/ptrace.h> #include <linux/kexec.h> #include <linux/sysfs.h> #include <linux/bug.h> #include <linux/nmi.h> #include <asm/cpu_entry_area.h> #include <asm/stacktrace.h> static const char * const exception_stack_names[] = { [ ESTACK_DF ] = "#DF", [ ESTACK_NMI ] = "NMI", [ ESTACK_DB ] = "#DB", [ ESTACK_MCE ] = "#MC", [ ESTACK_VC ] = "#VC", [ ESTACK_VC2 ] = "#VC2", }; const char *stack_type_name(enum stack_type type) { BUILD_BUG_ON(N_EXCEPTION_STACKS != 6); if (type == STACK_TYPE_TASK) return "TASK"; if (type == STACK_TYPE_IRQ) return "IRQ"; if (type == STACK_TYPE_SOFTIRQ) return "SOFTIRQ"; if (type == STACK_TYPE_ENTRY) { /* * On 64-bit, we have a generic entry stack that we * use for all the kernel entry points, including * SYSENTER. */ return "ENTRY_TRAMPOLINE"; } if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST) return exception_stack_names[type - STACK_TYPE_EXCEPTION]; return NULL; } /** * struct estack_pages - Page descriptor for exception stacks * @offs: Offset from the start of the exception stack area * @size: Size of the exception stack * @type: Type to store in the stack_info struct */ struct estack_pages { u32 offs; u16 size; u16 type; }; #define EPAGERANGE(st) \ [PFN_DOWN(CEA_ESTACK_OFFS(st)) ... \ PFN_DOWN(CEA_ESTACK_OFFS(st) + CEA_ESTACK_SIZE(st) - 1)] = { \ .offs = CEA_ESTACK_OFFS(st), \ .size = CEA_ESTACK_SIZE(st), \ .type = STACK_TYPE_EXCEPTION + ESTACK_ ##st, } /* * Array of exception stack page descriptors. If the stack is larger than * PAGE_SIZE, all pages covering a particular stack will have the same * info. The guard pages including the not mapped DB2 stack are zeroed * out. */ static const struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = { EPAGERANGE(DF), EPAGERANGE(NMI), EPAGERANGE(DB), EPAGERANGE(MCE), EPAGERANGE(VC), EPAGERANGE(VC2), }; static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info) { unsigned long begin, end, stk = (unsigned long)stack; const struct estack_pages *ep; struct pt_regs *regs; unsigned int k; BUILD_BUG_ON(N_EXCEPTION_STACKS != 6); begin = (unsigned long)__this_cpu_read(cea_exception_stacks); /* * Handle the case where stack trace is collected _before_ * cea_exception_stacks had been initialized. */ if (!begin) return false; end = begin + sizeof(struct cea_exception_stacks); /* Bail if @stack is outside the exception stack area. */ if (stk < begin || stk >= end) return false; /* Calc page offset from start of exception stacks */ k = (stk - begin) >> PAGE_SHIFT; /* Lookup the page descriptor */ ep = &estack_pages[k]; /* Guard page? */ if (!ep->size) return false; begin += (unsigned long)ep->offs; end = begin + (unsigned long)ep->size; regs = (struct pt_regs *)end - 1; info->type = ep->type; info->begin = (unsigned long *)begin; info->end = (unsigned long *)end; info->next_sp = (unsigned long *)regs->sp; return true; } static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info) { unsigned long *end = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr); unsigned long *begin; /* * @end points directly to the top most stack entry to avoid a -8 * adjustment in the stack switch hotpath. Adjust it back before * calculating @begin. */ end++; begin = end - (IRQ_STACK_SIZE / sizeof(long)); /* * Due to the switching logic RSP can never be == @end because the * final operation is 'popq %rsp' which means after that RSP points * to the original stack and not to @end. */ if (stack < begin || stack >= end) return false; info->type = STACK_TYPE_IRQ; info->begin = begin; info->end = end; /* * The next stack pointer is stored at the top of the irq stack * before switching to the irq stack. Actual stack entries are all * below that. */ info->next_sp = (unsigned long *)*(end - 1); return true; } bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task, struct stack_info *info) { if (in_task_stack(stack, task, info)) return true; if (task != current) return false; if (in_exception_stack(stack, info)) return true; if (in_irq_stack(stack, info)) return true; if (in_entry_stack(stack, info)) return true; return false; } int get_stack_info(unsigned long *stack, struct task_struct *task, struct stack_info *info, unsigned long *visit_mask) { task = task ? : current; if (!stack) goto unknown; if (!get_stack_info_noinstr(stack, task, info)) goto unknown; /* * Make sure we don't iterate through any given stack more than once. * If it comes up a second time then there's something wrong going on: * just break out and report an unknown stack type. */ if (visit_mask) { if (*visit_mask & (1UL << info->type)) { if (task == current) printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type); goto unknown; } *visit_mask |= 1UL << info->type; } return 0; unknown: info->type = STACK_TYPE_UNKNOWN; return -EINVAL; }
linux-master
arch/x86/kernel/dumpstack_64.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <asm/ucontext.h> #include <asm/fpu/signal.h> #include <asm/sighandling.h> #include <asm/syscall.h> #include <asm/sigframe.h> #include <asm/signal.h> /* * If regs->ss will cause an IRET fault, change it. Otherwise leave it * alone. Using this generally makes no sense unless * user_64bit_mode(regs) would return true. */ static void force_valid_ss(struct pt_regs *regs) { u32 ar; asm volatile ("lar %[old_ss], %[ar]\n\t" "jz 1f\n\t" /* If invalid: */ "xorl %[ar], %[ar]\n\t" /* set ar = 0 */ "1:" : [ar] "=r" (ar) : [old_ss] "rm" ((u16)regs->ss)); /* * For a valid 64-bit user context, we need DPL 3, type * read-write data or read-write exp-down data, and S and P * set. We can't use VERW because VERW doesn't check the * P bit. */ ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK; if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) && ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN)) regs->ss = __USER_DS; } static bool restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, unsigned long uc_flags) { struct sigcontext sc; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; if (copy_from_user(&sc, usc, offsetof(struct sigcontext, reserved1))) return false; regs->bx = sc.bx; regs->cx = sc.cx; regs->dx = sc.dx; regs->si = sc.si; regs->di = sc.di; regs->bp = sc.bp; regs->ax = sc.ax; regs->sp = sc.sp; regs->ip = sc.ip; regs->r8 = sc.r8; regs->r9 = sc.r9; regs->r10 = sc.r10; regs->r11 = sc.r11; regs->r12 = sc.r12; regs->r13 = sc.r13; regs->r14 = sc.r14; regs->r15 = sc.r15; /* Get CS/SS and force CPL3 */ regs->cs = sc.cs | 0x03; regs->ss = sc.ss | 0x03; regs->flags = (regs->flags & ~FIX_EFLAGS) | (sc.flags & FIX_EFLAGS); /* disable syscall checks */ regs->orig_ax = -1; /* * Fix up SS if needed for the benefit of old DOSEMU and * CRIU. */ if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs))) force_valid_ss(regs); return fpu__restore_sig((void __user *)sc.fpstate, 0); } static __always_inline int __unsafe_setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, struct pt_regs *regs, unsigned long mask) { unsafe_put_user(regs->di, &sc->di, Efault); unsafe_put_user(regs->si, &sc->si, Efault); unsafe_put_user(regs->bp, &sc->bp, Efault); unsafe_put_user(regs->sp, &sc->sp, Efault); unsafe_put_user(regs->bx, &sc->bx, Efault); unsafe_put_user(regs->dx, &sc->dx, Efault); unsafe_put_user(regs->cx, &sc->cx, Efault); unsafe_put_user(regs->ax, &sc->ax, Efault); unsafe_put_user(regs->r8, &sc->r8, Efault); unsafe_put_user(regs->r9, &sc->r9, Efault); unsafe_put_user(regs->r10, &sc->r10, Efault); unsafe_put_user(regs->r11, &sc->r11, Efault); unsafe_put_user(regs->r12, &sc->r12, Efault); unsafe_put_user(regs->r13, &sc->r13, Efault); unsafe_put_user(regs->r14, &sc->r14, Efault); unsafe_put_user(regs->r15, &sc->r15, Efault); unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault); unsafe_put_user(current->thread.error_code, &sc->err, Efault); unsafe_put_user(regs->ip, &sc->ip, Efault); unsafe_put_user(regs->flags, &sc->flags, Efault); unsafe_put_user(regs->cs, &sc->cs, Efault); unsafe_put_user(0, &sc->gs, Efault); unsafe_put_user(0, &sc->fs, Efault); unsafe_put_user(regs->ss, &sc->ss, Efault); unsafe_put_user(fpstate, (unsigned long __user *)&sc->fpstate, Efault); /* non-iBCS2 extensions.. */ unsafe_put_user(mask, &sc->oldmask, Efault); unsafe_put_user(current->thread.cr2, &sc->cr2, Efault); return 0; Efault: return -EFAULT; } #define unsafe_put_sigcontext(sc, fp, regs, set, label) \ do { \ if (__unsafe_setup_sigcontext(sc, fp, regs, set->sig[0])) \ goto label; \ } while(0); #define unsafe_put_sigmask(set, frame, label) \ unsafe_put_user(*(__u64 *)(set), \ (__u64 __user *)&(frame)->uc.uc_sigmask, \ label) static unsigned long frame_uc_flags(struct pt_regs *regs) { unsigned long flags; if (boot_cpu_has(X86_FEATURE_XSAVE)) flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS; else flags = UC_SIGCONTEXT_SS; if (likely(user_64bit_mode(regs))) flags |= UC_STRICT_RESTORE_SS; return flags; } int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *set = sigmask_to_save(); struct rt_sigframe __user *frame; void __user *fp = NULL; unsigned long uc_flags; /* x86-64 should always use SA_RESTORER. */ if (!(ksig->ka.sa.sa_flags & SA_RESTORER)) return -EFAULT; frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe), &fp); uc_flags = frame_uc_flags(regs); if (setup_signal_shadow_stack(ksig)) return -EFAULT; if (!user_access_begin(frame, sizeof(*frame))) return -EFAULT; /* Create the ucontext. */ unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault); unsafe_put_user(0, &frame->uc.uc_link, Efault); unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault); /* Set up to return from userspace. If provided, use a stub already in userspace. */ unsafe_put_user(ksig->ka.sa.sa_restorer, &frame->pretcode, Efault); unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault); unsafe_put_sigmask(set, frame, Efault); user_access_end(); if (ksig->ka.sa.sa_flags & SA_SIGINFO) { if (copy_siginfo_to_user(&frame->info, &ksig->info)) return -EFAULT; } /* Set up registers for signal handler */ regs->di = ksig->sig; /* In case the signal handler was declared without prototypes */ regs->ax = 0; /* This also works for non SA_SIGINFO handlers because they expect the next argument after the signal number on the stack. */ regs->si = (unsigned long)&frame->info; regs->dx = (unsigned long)&frame->uc; regs->ip = (unsigned long) ksig->ka.sa.sa_handler; regs->sp = (unsigned long)frame; /* * Set up the CS and SS registers to run signal handlers in * 64-bit mode, even if the handler happens to be interrupting * 32-bit or 16-bit code. * * SS is subtle. In 64-bit mode, we don't need any particular * SS descriptor, but we do need SS to be valid. It's possible * that the old SS is entirely bogus -- this can happen if the * signal we're trying to deliver is #GP or #SS caused by a bad * SS value. We also have a compatibility issue here: DOSEMU * relies on the contents of the SS register indicating the * SS value at the time of the signal, even though that code in * DOSEMU predates sigreturn's ability to restore SS. (DOSEMU * avoids relying on sigreturn to restore SS; instead it uses * a trampoline.) So we do our best: if the old SS was valid, * we keep it. Otherwise we replace it. */ regs->cs = __USER_CS; if (unlikely(regs->ss != __USER_DS)) force_valid_ss(regs); return 0; Efault: user_access_end(); return -EFAULT; } /* * Do a signal return; undo the signal stack. */ SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe __user *frame; sigset_t set; unsigned long uc_flags; frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__get_user(*(__u64 *)&set, (__u64 __user *)&frame->uc.uc_sigmask)) goto badframe; if (__get_user(uc_flags, &frame->uc.uc_flags)) goto badframe; set_current_blocked(&set); if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) goto badframe; if (restore_signal_shadow_stack()) goto badframe; if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return regs->ax; badframe: signal_fault(regs, frame, "rt_sigreturn"); return 0; } #ifdef CONFIG_X86_X32_ABI static int x32_copy_siginfo_to_user(struct compat_siginfo __user *to, const struct kernel_siginfo *from) { struct compat_siginfo new; copy_siginfo_to_external32(&new, from); if (from->si_signo == SIGCHLD) { new._sifields._sigchld_x32._utime = from->si_utime; new._sifields._sigchld_x32._stime = from->si_stime; } if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) return -EFAULT; return 0; } int copy_siginfo_to_user32(struct compat_siginfo __user *to, const struct kernel_siginfo *from) { if (in_x32_syscall()) return x32_copy_siginfo_to_user(to, from); return __copy_siginfo_to_user32(to, from); } int x32_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) { compat_sigset_t *set = (compat_sigset_t *) sigmask_to_save(); struct rt_sigframe_x32 __user *frame; unsigned long uc_flags; void __user *restorer; void __user *fp = NULL; if (!(ksig->ka.sa.sa_flags & SA_RESTORER)) return -EFAULT; frame = get_sigframe(ksig, regs, sizeof(*frame), &fp); uc_flags = frame_uc_flags(regs); if (!user_access_begin(frame, sizeof(*frame))) return -EFAULT; /* Create the ucontext. */ unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault); unsafe_put_user(0, &frame->uc.uc_link, Efault); unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->sp, Efault); unsafe_put_user(0, &frame->uc.uc__pad0, Efault); restorer = ksig->ka.sa.sa_restorer; unsafe_put_user(restorer, (unsigned long __user *)&frame->pretcode, Efault); unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault); unsafe_put_sigmask(set, frame, Efault); user_access_end(); if (ksig->ka.sa.sa_flags & SA_SIGINFO) { if (x32_copy_siginfo_to_user(&frame->info, &ksig->info)) return -EFAULT; } /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ksig->ka.sa.sa_handler; /* We use the x32 calling convention here... */ regs->di = ksig->sig; regs->si = (unsigned long) &frame->info; regs->dx = (unsigned long) &frame->uc; loadsegment(ds, __USER_DS); loadsegment(es, __USER_DS); regs->cs = __USER_CS; regs->ss = __USER_DS; return 0; Efault: user_access_end(); return -EFAULT; } COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe_x32 __user *frame; sigset_t set; unsigned long uc_flags; frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8); if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], (__u64 __user *)&frame->uc.uc_sigmask)) goto badframe; if (__get_user(uc_flags, &frame->uc.uc_flags)) goto badframe; set_current_blocked(&set); if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) goto badframe; if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; return regs->ax; badframe: signal_fault(regs, frame, "x32 rt_sigreturn"); return 0; } #endif /* CONFIG_X86_X32_ABI */ #ifdef CONFIG_COMPAT void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact) { if (!act) return; if (in_ia32_syscall()) act->sa.sa_flags |= SA_IA32_ABI; if (in_x32_syscall()) act->sa.sa_flags |= SA_X32_ABI; } #endif /* CONFIG_COMPAT */ /* * If adding a new si_code, there is probably new data in * the siginfo. Make sure folks bumping the si_code * limits also have to look at this code. Make sure any * new fields are handled in copy_siginfo_to_user32()! */ static_assert(NSIGILL == 11); static_assert(NSIGFPE == 15); static_assert(NSIGSEGV == 10); static_assert(NSIGBUS == 5); static_assert(NSIGTRAP == 6); static_assert(NSIGCHLD == 6); static_assert(NSIGSYS == 2); /* This is part of the ABI and can never change in size: */ static_assert(sizeof(siginfo_t) == 128); /* This is a part of the ABI and can never change in alignment */ static_assert(__alignof__(siginfo_t) == 8); /* * The offsets of all the (unioned) si_fields are fixed * in the ABI, of course. Make sure none of them ever * move and are always at the beginning: */ static_assert(offsetof(siginfo_t, si_signo) == 0); static_assert(offsetof(siginfo_t, si_errno) == 4); static_assert(offsetof(siginfo_t, si_code) == 8); /* * Ensure that the size of each si_field never changes. * If it does, it is a sign that the * copy_siginfo_to_user32() code below needs to updated * along with the size in the CHECK_SI_SIZE(). * * We repeat this check for both the generic and compat * siginfos. * * Note: it is OK for these to grow as long as the whole * structure stays within the padding size (checked * above). */ #define CHECK_SI_OFFSET(name) \ static_assert(offsetof(siginfo_t, _sifields) == \ offsetof(siginfo_t, _sifields.name)) #define CHECK_SI_SIZE(name, size) \ static_assert(sizeof_field(siginfo_t, _sifields.name) == size) CHECK_SI_OFFSET(_kill); CHECK_SI_SIZE (_kill, 2*sizeof(int)); static_assert(offsetof(siginfo_t, si_pid) == 0x10); static_assert(offsetof(siginfo_t, si_uid) == 0x14); CHECK_SI_OFFSET(_timer); CHECK_SI_SIZE (_timer, 6*sizeof(int)); static_assert(offsetof(siginfo_t, si_tid) == 0x10); static_assert(offsetof(siginfo_t, si_overrun) == 0x14); static_assert(offsetof(siginfo_t, si_value) == 0x18); CHECK_SI_OFFSET(_rt); CHECK_SI_SIZE (_rt, 4*sizeof(int)); static_assert(offsetof(siginfo_t, si_pid) == 0x10); static_assert(offsetof(siginfo_t, si_uid) == 0x14); static_assert(offsetof(siginfo_t, si_value) == 0x18); CHECK_SI_OFFSET(_sigchld); CHECK_SI_SIZE (_sigchld, 8*sizeof(int)); static_assert(offsetof(siginfo_t, si_pid) == 0x10); static_assert(offsetof(siginfo_t, si_uid) == 0x14); static_assert(offsetof(siginfo_t, si_status) == 0x18); static_assert(offsetof(siginfo_t, si_utime) == 0x20); static_assert(offsetof(siginfo_t, si_stime) == 0x28); #ifdef CONFIG_X86_X32_ABI /* no _sigchld_x32 in the generic siginfo_t */ static_assert(sizeof_field(compat_siginfo_t, _sifields._sigchld_x32) == 7*sizeof(int)); static_assert(offsetof(compat_siginfo_t, _sifields) == offsetof(compat_siginfo_t, _sifields._sigchld_x32)); static_assert(offsetof(compat_siginfo_t, _sifields._sigchld_x32._utime) == 0x18); static_assert(offsetof(compat_siginfo_t, _sifields._sigchld_x32._stime) == 0x20); #endif CHECK_SI_OFFSET(_sigfault); CHECK_SI_SIZE (_sigfault, 8*sizeof(int)); static_assert(offsetof(siginfo_t, si_addr) == 0x10); static_assert(offsetof(siginfo_t, si_trapno) == 0x18); static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); static_assert(offsetof(siginfo_t, si_lower) == 0x20); static_assert(offsetof(siginfo_t, si_upper) == 0x28); static_assert(offsetof(siginfo_t, si_pkey) == 0x20); static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); CHECK_SI_OFFSET(_sigpoll); CHECK_SI_SIZE (_sigpoll, 4*sizeof(int)); static_assert(offsetof(siginfo_t, si_band) == 0x10); static_assert(offsetof(siginfo_t, si_fd) == 0x18); CHECK_SI_OFFSET(_sigsys); CHECK_SI_SIZE (_sigsys, 4*sizeof(int)); static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); static_assert(offsetof(siginfo_t, si_syscall) == 0x18); static_assert(offsetof(siginfo_t, si_arch) == 0x1C); /* any new si_fields should be added here */
linux-master
arch/x86/kernel/signal_64.c
// SPDX-License-Identifier: GPL-2.0 /* * This file contains work-arounds for x86 and x86_64 platform bugs. */ #include <linux/dmi.h> #include <linux/pci.h> #include <linux/irq.h> #include <asm/hpet.h> #include <asm/setup.h> #include <asm/mce.h> #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) static void quirk_intel_irqbalance(struct pci_dev *dev) { u8 config; u16 word; /* BIOS may enable hardware IRQ balancing for * E7520/E7320/E7525(revision ID 0x9 and below) * based platforms. * Disable SW irqbalance/affinity on those platforms. */ if (dev->revision > 0x9) return; /* enable access to config space*/ pci_read_config_byte(dev, 0xf4, &config); pci_write_config_byte(dev, 0xf4, config|0x2); /* * read xTPR register. We may not have a pci_dev for device 8 * because it might be hidden until the above write. */ pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word); if (!(word & (1 << 13))) { dev_info(&dev->dev, "Intel E7520/7320/7525 detected; " "disabling irq balancing and affinity\n"); noirqdebug_setup(""); #ifdef CONFIG_PROC_FS no_irq_affinity = 1; #endif } /* put back the original value for config space*/ if (!(config & 0x2)) pci_write_config_byte(dev, 0xf4, config); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); #endif #if defined(CONFIG_HPET_TIMER) unsigned long force_hpet_address; static enum { NONE_FORCE_HPET_RESUME, OLD_ICH_FORCE_HPET_RESUME, ICH_FORCE_HPET_RESUME, VT8237_FORCE_HPET_RESUME, NVIDIA_FORCE_HPET_RESUME, ATI_FORCE_HPET_RESUME, } force_hpet_resume_type; static void __iomem *rcba_base; static void ich_force_hpet_resume(void) { u32 val; if (!force_hpet_address) return; BUG_ON(rcba_base == NULL); /* read the Function Disable register, dword mode only */ val = readl(rcba_base + 0x3404); if (!(val & 0x80)) { /* HPET disabled in HPTC. Trying to enable */ writel(val | 0x80, rcba_base + 0x3404); } val = readl(rcba_base + 0x3404); if (!(val & 0x80)) BUG(); else printk(KERN_DEBUG "Force enabled HPET at resume\n"); } static void ich_force_enable_hpet(struct pci_dev *dev) { u32 val; u32 rcba; int err = 0; if (hpet_address || force_hpet_address) return; pci_read_config_dword(dev, 0xF0, &rcba); rcba &= 0xFFFFC000; if (rcba == 0) { dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; " "cannot force enable HPET\n"); return; } /* use bits 31:14, 16 kB aligned */ rcba_base = ioremap(rcba, 0x4000); if (rcba_base == NULL) { dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; " "cannot force enable HPET\n"); return; } /* read the Function Disable register, dword mode only */ val = readl(rcba_base + 0x3404); if (val & 0x80) { /* HPET is enabled in HPTC. Just not reported by BIOS */ val = val & 0x3; force_hpet_address = 0xFED00000 | (val << 12); dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " "0x%lx\n", force_hpet_address); iounmap(rcba_base); return; } /* HPET disabled in HPTC. Trying to enable */ writel(val | 0x80, rcba_base + 0x3404); val = readl(rcba_base + 0x3404); if (!(val & 0x80)) { err = 1; } else { val = val & 0x3; force_hpet_address = 0xFED00000 | (val << 12); } if (err) { force_hpet_address = 0; iounmap(rcba_base); dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n"); } else { force_hpet_resume_type = ICH_FORCE_HPET_RESUME; dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " "0x%lx\n", force_hpet_address); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16, /* ICH10 */ ich_force_enable_hpet); static struct pci_dev *cached_dev; static void hpet_print_force_info(void) { printk(KERN_INFO "HPET not enabled in BIOS. " "You might try hpet=force boot option\n"); } static void old_ich_force_hpet_resume(void) { u32 val; u32 gen_cntl; if (!force_hpet_address || !cached_dev) return; pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); gen_cntl &= (~(0x7 << 15)); gen_cntl |= (0x4 << 15); pci_write_config_dword(cached_dev, 0xD0, gen_cntl); pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); val = gen_cntl >> 15; val &= 0x7; if (val == 0x4) printk(KERN_DEBUG "Force enabled HPET at resume\n"); else BUG(); } static void old_ich_force_enable_hpet(struct pci_dev *dev) { u32 val; u32 gen_cntl; if (hpet_address || force_hpet_address) return; pci_read_config_dword(dev, 0xD0, &gen_cntl); /* * Bit 17 is HPET enable bit. * Bit 16:15 control the HPET base address. */ val = gen_cntl >> 15; val &= 0x7; if (val & 0x4) { val &= 0x3; force_hpet_address = 0xFED00000 | (val << 12); dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n", force_hpet_address); return; } /* * HPET is disabled. Trying enabling at FED00000 and check * whether it sticks */ gen_cntl &= (~(0x7 << 15)); gen_cntl |= (0x4 << 15); pci_write_config_dword(dev, 0xD0, gen_cntl); pci_read_config_dword(dev, 0xD0, &gen_cntl); val = gen_cntl >> 15; val &= 0x7; if (val & 0x4) { /* HPET is enabled in HPTC. Just not reported by BIOS */ val &= 0x3; force_hpet_address = 0xFED00000 | (val << 12); dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " "0x%lx\n", force_hpet_address); cached_dev = dev; force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME; return; } dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n"); } /* * Undocumented chipset features. Make sure that the user enforced * this. */ static void old_ich_force_enable_hpet_user(struct pci_dev *dev) { if (hpet_force_user) old_ich_force_enable_hpet(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, old_ich_force_enable_hpet_user); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, old_ich_force_enable_hpet_user); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, old_ich_force_enable_hpet_user); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, old_ich_force_enable_hpet_user); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, old_ich_force_enable_hpet_user); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, old_ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, old_ich_force_enable_hpet); static void vt8237_force_hpet_resume(void) { u32 val; if (!force_hpet_address || !cached_dev) return; val = 0xfed00000 | 0x80; pci_write_config_dword(cached_dev, 0x68, val); pci_read_config_dword(cached_dev, 0x68, &val); if (val & 0x80) printk(KERN_DEBUG "Force enabled HPET at resume\n"); else BUG(); } static void vt8237_force_enable_hpet(struct pci_dev *dev) { u32 val; if (hpet_address || force_hpet_address) return; if (!hpet_force_user) { hpet_print_force_info(); return; } pci_read_config_dword(dev, 0x68, &val); /* * Bit 7 is HPET enable bit. * Bit 31:10 is HPET base address (contrary to what datasheet claims) */ if (val & 0x80) { force_hpet_address = (val & ~0x3ff); dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n", force_hpet_address); return; } /* * HPET is disabled. Trying enabling at FED00000 and check * whether it sticks */ val = 0xfed00000 | 0x80; pci_write_config_dword(dev, 0x68, val); pci_read_config_dword(dev, 0x68, &val); if (val & 0x80) { force_hpet_address = (val & ~0x3ff); dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " "0x%lx\n", force_hpet_address); cached_dev = dev; force_hpet_resume_type = VT8237_FORCE_HPET_RESUME; return; } dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, vt8237_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, vt8237_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700, vt8237_force_enable_hpet); static void ati_force_hpet_resume(void) { pci_write_config_dword(cached_dev, 0x14, 0xfed00000); printk(KERN_DEBUG "Force enabled HPET at resume\n"); } static u32 ati_ixp4x0_rev(struct pci_dev *dev) { int err = 0; u32 d = 0; u8 b = 0; err = pci_read_config_byte(dev, 0xac, &b); b &= ~(1<<5); err |= pci_write_config_byte(dev, 0xac, b); err |= pci_read_config_dword(dev, 0x70, &d); d |= 1<<8; err |= pci_write_config_dword(dev, 0x70, d); err |= pci_read_config_dword(dev, 0x8, &d); d &= 0xff; dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d); WARN_ON_ONCE(err); return d; } static void ati_force_enable_hpet(struct pci_dev *dev) { u32 d, val; u8 b; if (hpet_address || force_hpet_address) return; if (!hpet_force_user) { hpet_print_force_info(); return; } d = ati_ixp4x0_rev(dev); if (d < 0x82) return; /* base address */ pci_write_config_dword(dev, 0x14, 0xfed00000); pci_read_config_dword(dev, 0x14, &val); /* enable interrupt */ outb(0x72, 0xcd6); b = inb(0xcd7); b |= 0x1; outb(0x72, 0xcd6); outb(b, 0xcd7); outb(0x72, 0xcd6); b = inb(0xcd7); if (!(b & 0x1)) return; pci_read_config_dword(dev, 0x64, &d); d |= (1<<10); pci_write_config_dword(dev, 0x64, d); pci_read_config_dword(dev, 0x64, &d); if (!(d & (1<<10))) return; force_hpet_address = val; force_hpet_resume_type = ATI_FORCE_HPET_RESUME; dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", force_hpet_address); cached_dev = dev; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS, ati_force_enable_hpet); /* * Undocumented chipset feature taken from LinuxBIOS. */ static void nvidia_force_hpet_resume(void) { pci_write_config_dword(cached_dev, 0x44, 0xfed00001); printk(KERN_DEBUG "Force enabled HPET at resume\n"); } static void nvidia_force_enable_hpet(struct pci_dev *dev) { u32 val; if (hpet_address || force_hpet_address) return; if (!hpet_force_user) { hpet_print_force_info(); return; } pci_write_config_dword(dev, 0x44, 0xfed00001); pci_read_config_dword(dev, 0x44, &val); force_hpet_address = val & 0xfffffffe; force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME; dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", force_hpet_address); cached_dev = dev; } /* ISA Bridges */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051, nvidia_force_enable_hpet); /* LPC bridges */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367, nvidia_force_enable_hpet); void force_hpet_resume(void) { switch (force_hpet_resume_type) { case ICH_FORCE_HPET_RESUME: ich_force_hpet_resume(); return; case OLD_ICH_FORCE_HPET_RESUME: old_ich_force_hpet_resume(); return; case VT8237_FORCE_HPET_RESUME: vt8237_force_hpet_resume(); return; case NVIDIA_FORCE_HPET_RESUME: nvidia_force_hpet_resume(); return; case ATI_FORCE_HPET_RESUME: ati_force_hpet_resume(); return; default: break; } } /* * According to the datasheet e6xx systems have the HPET hardwired to * 0xfed00000 */ static void e6xx_force_enable_hpet(struct pci_dev *dev) { if (hpet_address || force_hpet_address) return; force_hpet_address = 0xFED00000; force_hpet_resume_type = NONE_FORCE_HPET_RESUME; dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " "0x%lx\n", force_hpet_address); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU, e6xx_force_enable_hpet); /* * HPET MSI on some boards (ATI SB700/SB800) has side effect on * floppy DMA. Disable HPET MSI on such platforms. * See erratum #27 (Misinterpreted MSI Requests May Result in * Corrupted LPC DMA Data) in AMD Publication #46837, * "SB700 Family Product Errata", Rev. 1.0, March 2010. */ static void force_disable_hpet_msi(struct pci_dev *unused) { hpet_msi_disable = true; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, force_disable_hpet_msi); #endif #if defined(CONFIG_PCI) && defined(CONFIG_NUMA) /* Set correct numa_node information for AMD NB functions */ static void quirk_amd_nb_node(struct pci_dev *dev) { struct pci_dev *nb_ht; unsigned int devfn; u32 node; u32 val; devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); nb_ht = pci_get_slot(dev->bus, devfn); if (!nb_ht) return; pci_read_config_dword(nb_ht, 0x60, &val); node = pcibus_to_node(dev->bus) | (val & 7); /* * Some hardware may return an invalid node ID, * so check it first: */ if (node_online(node)) set_dev_node(&dev->dev, node); pci_dev_put(nb_ht); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5, quirk_amd_nb_node); #endif #ifdef CONFIG_PCI /* * Processor does not ensure DRAM scrub read/write sequence * is atomic wrt accesses to CC6 save state area. Therefore * if a concurrent scrub read/write access is to same address * the entry may appear as if it is not written. This quirk * applies to Fam16h models 00h-0Fh * * See "Revision Guide" for AMD F16h models 00h-0fh, * document 51810 rev. 3.04, Nov 2013 */ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev) { u32 val; /* * Suggested workaround: * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b */ pci_read_config_dword(dev, 0x58, &val); if (val & 0x1F) { val &= ~(0x1F); pci_write_config_dword(dev, 0x58, val); } pci_read_config_dword(dev, 0x5C, &val); if (val & BIT(0)) { val &= ~BIT(0); pci_write_config_dword(dev, 0x5c, val); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3, amd_disable_seq_and_redirect_scrub); /* Ivy Bridge, Haswell, Broadwell */ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev) { u32 capid0; pci_read_config_dword(pdev, 0x84, &capid0); if (capid0 & 0x10) enable_copy_mc_fragile(); } /* Skylake */ static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev) { u32 capid0, capid5; pci_read_config_dword(pdev, 0x84, &capid0); pci_read_config_dword(pdev, 0x98, &capid5); /* * CAPID0{7:6} indicate whether this is an advanced RAS SKU * CAPID5{8:5} indicate that various NVDIMM usage modes are * enabled, so memory machine check recovery is also enabled. */ if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0)) enable_copy_mc_fragile(); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap); #endif bool x86_apple_machine; EXPORT_SYMBOL(x86_apple_machine); void __init early_platform_quirks(void) { x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") || dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc."); }
linux-master
arch/x86/kernel/quirks.c
// SPDX-License-Identifier: GPL-2.0-or-later /* -*- linux-c -*- * APM BIOS driver for Linux * Copyright 1994-2001 Stephen Rothwell ([email protected]) * * Initial development of this driver was funded by NEC Australia P/L * and NEC Corporation * * October 1995, Rik Faith ([email protected]): * Minor enhancements and updates (to the patch set) for 1.3.x * Documentation * January 1996, Rik Faith ([email protected]): * Make /proc/apm easy to format (bump driver version) * March 1996, Rik Faith ([email protected]): * Prohibit APM BIOS calls unless apm_enabled. * (Thanks to Ulrich Windl <[email protected]>) * April 1996, Stephen Rothwell ([email protected]) * Version 1.0 and 1.1 * May 1996, Version 1.2 * Feb 1998, Version 1.3 * Feb 1998, Version 1.4 * Aug 1998, Version 1.5 * Sep 1998, Version 1.6 * Nov 1998, Version 1.7 * Jan 1999, Version 1.8 * Jan 1999, Version 1.9 * Oct 1999, Version 1.10 * Nov 1999, Version 1.11 * Jan 2000, Version 1.12 * Feb 2000, Version 1.13 * Nov 2000, Version 1.14 * Oct 2001, Version 1.15 * Jan 2002, Version 1.16 * Oct 2002, Version 1.16ac * * History: * 0.6b: first version in official kernel, Linux 1.3.46 * 0.7: changed /proc/apm format, Linux 1.3.58 * 0.8: fixed gcc 2.7.[12] compilation problems, Linux 1.3.59 * 0.9: only call bios if bios is present, Linux 1.3.72 * 1.0: use fixed device number, consolidate /proc/apm into this file, * Linux 1.3.85 * 1.1: support user-space standby and suspend, power off after system * halted, Linux 1.3.98 * 1.2: When resetting RTC after resume, take care so that the time * is only incorrect by 30-60mS (vs. 1S previously) (Gabor J. Toth * <[email protected]>); improve interaction between * screen-blanking and gpm (Stephen Rothwell); Linux 1.99.4 * 1.2a:Simple change to stop mysterious bug reports with SMP also added * levels to the printk calls. APM is not defined for SMP machines. * The new replacement for it is, but Linux doesn't yet support this. * Alan Cox Linux 2.1.55 * 1.3: Set up a valid data descriptor 0x40 for buggy BIOS's * 1.4: Upgraded to support APM 1.2. Integrated ThinkPad suspend patch by * Dean Gaudet <[email protected]>. * C. Scott Ananian <[email protected]> Linux 2.1.87 * 1.5: Fix segment register reloading (in case of bad segments saved * across BIOS call). * Stephen Rothwell * 1.6: Cope with compiler/assembler differences. * Only try to turn off the first display device. * Fix OOPS at power off with no APM BIOS by Jan Echternach * <[email protected]> * Stephen Rothwell * 1.7: Modify driver's cached copy of the disabled/disengaged flags * to reflect current state of APM BIOS. * Chris Rankin <[email protected]> * Reset interrupt 0 timer to 100Hz after suspend * Chad Miller <[email protected]> * Add CONFIG_APM_IGNORE_SUSPEND_BOUNCE * Richard Gooch <[email protected]> * Allow boot time disabling of APM * Make boot messages far less verbose by default * Make asm safer * Stephen Rothwell * 1.8: Add CONFIG_APM_RTC_IS_GMT * Richard Gooch <[email protected]> * change APM_NOINTS to CONFIG_APM_ALLOW_INTS * remove dependency on CONFIG_PROC_FS * Stephen Rothwell * 1.9: Fix small typo. <[email protected]> * Try to cope with BIOS's that need to have all display * devices blanked and not just the first one. * Ross Paterson <[email protected]> * Fix segment limit setting it has always been wrong as * the segments needed to have byte granularity. * Mark a few things __init. * Add hack to allow power off of SMP systems by popular request. * Use CONFIG_SMP instead of __SMP__ * Ignore BOUNCES for three seconds. * Stephen Rothwell * 1.10: Fix for Thinkpad return code. * Merge 2.2 and 2.3 drivers. * Remove APM dependencies in arch/i386/kernel/process.c * Remove APM dependencies in drivers/char/sysrq.c * Reset time across standby. * Allow more initialisation on SMP. * Remove CONFIG_APM_POWER_OFF and make it boot time * configurable (default on). * Make debug only a boot time parameter (remove APM_DEBUG). * Try to blank all devices on any error. * 1.11: Remove APM dependencies in drivers/char/console.c * Check nr_running to detect if we are idle (from * Borislav Deianov <[email protected]>) * Fix for bioses that don't zero the top part of the * entrypoint offset (Mario Sitta <[email protected]>) * (reported by Panos Katsaloulis <[email protected]>). * Real mode power off patch (Walter Hofmann * <[email protected]>). * 1.12: Remove CONFIG_SMP as the compiler will optimize * the code away anyway (smp_num_cpus == 1 in UP) * noted by Artur Skawina <[email protected]>. * Make power off under SMP work again. * Fix thinko with initial engaging of BIOS. * Make sure power off only happens on CPU 0 * (Paul "Rusty" Russell <[email protected]>). * Do error notification to user mode if BIOS calls fail. * Move entrypoint offset fix to ...boot/setup.S * where it belongs (Cosmos <[email protected]>). * Remove smp-power-off. SMP users must now specify * "apm=power-off" on the kernel command line. Suggested * by Jim Avera <[email protected]>, modified by Alan Cox * <[email protected]>. * Register the /proc/apm entry even on SMP so that * scripts that check for it before doing power off * work (Jim Avera <[email protected]>). * 1.13: Changes for new pm_ interfaces (Andy Henroid * <[email protected]>). * Modularize the code. * Fix the Thinkpad (again) :-( (CONFIG_APM_IGNORE_MULTIPLE_SUSPENDS * is now the way life works). * Fix thinko in suspend() (wrong return). * Notify drivers on critical suspend. * Make kapmd absorb more idle time (Pavel Machek <[email protected]> * modified by sfr). * Disable interrupts while we are suspended (Andy Henroid * <[email protected]> fixed by sfr). * Make power off work on SMP again (Tony Hoyle * <[email protected]> and <[email protected]>) modified by sfr. * Remove CONFIG_APM_SUSPEND_BOUNCE. The bounce ignore * interval is now configurable. * 1.14: Make connection version persist across module unload/load. * Enable and engage power management earlier. * Disengage power management on module unload. * Changed to use the sysrq-register hack for registering the * power off function called by magic sysrq based upon discussions * in irc://irc.openprojects.net/#kernelnewbies * (Crutcher Dunnavant <[email protected]>). * Make CONFIG_APM_REAL_MODE_POWER_OFF run time configurable. * (Arjan van de Ven <[email protected]>) modified by sfr. * Work around byte swap bug in one of the Vaio's BIOS's * (Marc Boucher <[email protected]>). * Exposed the disable flag to dmi so that we can handle known * broken APM (Alan Cox <[email protected]>). * 1.14ac: If the BIOS says "I slowed the CPU down" then don't spin * calling it - instead idle. (Alan Cox <[email protected]>) * If an APM idle fails log it and idle sensibly * 1.15: Don't queue events to clients who open the device O_WRONLY. * Don't expect replies from clients who open the device O_RDONLY. * (Idea from Thomas Hood) * Minor waitqueue cleanups. (John Fremlin <[email protected]>) * 1.16: Fix idle calling. (Andreas Steinmetz <[email protected]> et al.) * Notify listeners of standby or suspend events before notifying * drivers. Return EBUSY to ioctl() if suspend is rejected. * (Russell King <[email protected]> and Thomas Hood) * Ignore first resume after we generate our own resume event * after a suspend (Thomas Hood) * Daemonize now gets rid of our controlling terminal (sfr). * CONFIG_APM_CPU_IDLE now just affects the default value of * idle_threshold (sfr). * Change name of kernel apm daemon (as it no longer idles) (sfr). * 1.16ac: Fix up SMP support somewhat. You can now force SMP on and we * make _all_ APM calls on the CPU#0. Fix unsafe sign bug. * TODO: determine if its "boot CPU" or "CPU0" we want to lock to. * * APM 1.1 Reference: * * Intel Corporation, Microsoft Corporation. Advanced Power Management * (APM) BIOS Interface Specification, Revision 1.1, September 1993. * Intel Order Number 241704-001. Microsoft Part Number 781-110-X01. * * [This document is available free from Intel by calling 800.628.8686 (fax * 916.356.6100) or 800.548.4725; or from * http://www.microsoft.com/whdc/archive/amp_12.mspx It is also * available from Microsoft by calling 206.882.8080.] * * APM 1.2 Reference: * Intel Corporation, Microsoft Corporation. Advanced Power Management * (APM) BIOS Interface Specification, Revision 1.2, February 1996. * * [This document is available from Microsoft at: * http://www.microsoft.com/whdc/archive/amp_12.mspx] */ #define pr_fmt(fmt) "apm: " fmt #include <linux/module.h> #include <linux/poll.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/timer.h> #include <linux/fcntl.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/miscdevice.h> #include <linux/apm_bios.h> #include <linux/init.h> #include <linux/time.h> #include <linux/sched/signal.h> #include <linux/sched/cputime.h> #include <linux/pm.h> #include <linux/capability.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/freezer.h> #include <linux/smp.h> #include <linux/dmi.h> #include <linux/suspend.h> #include <linux/kthread.h> #include <linux/jiffies.h> #include <linux/acpi.h> #include <linux/syscore_ops.h> #include <linux/i8253.h> #include <linux/cpuidle.h> #include <linux/uaccess.h> #include <asm/desc.h> #include <asm/olpc.h> #include <asm/paravirt.h> #include <asm/reboot.h> #include <asm/nospec-branch.h> #include <asm/ibt.h> #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) extern int (*console_blank_hook)(int); #endif /* * Various options can be changed at boot time as follows: * (We allow underscores for compatibility with the modules code) * apm=on/off enable/disable APM * [no-]allow[-_]ints allow interrupts during BIOS calls * [no-]broken[-_]psr BIOS has a broken GetPowerStatus call * [no-]realmode[-_]power[-_]off switch to real mode before * powering off * [no-]debug log some debugging messages * [no-]power[-_]off power off on shutdown * [no-]smp Use apm even on an SMP box * bounce[-_]interval=<n> number of ticks to ignore suspend * bounces * idle[-_]threshold=<n> System idle percentage above which to * make APM BIOS idle calls. Set it to * 100 to disable. * idle[-_]period=<n> Period (in 1/100s of a second) over * which the idle percentage is * calculated. */ /* KNOWN PROBLEM MACHINES: * * U: TI 4000M TravelMate: BIOS is *NOT* APM compliant * [Confirmed by TI representative] * ?: ACER 486DX4/75: uses dseg 0040, in violation of APM specification * [Confirmed by BIOS disassembly] * [This may work now ...] * P: Toshiba 1950S: battery life information only gets updated after resume * P: Midwest Micro Soundbook Elite DX2/66 monochrome: screen blanking * broken in BIOS [Reported by Garst R. Reese <[email protected]>] * ?: AcerNote-950: oops on reading /proc/apm - workaround is a WIP * Neale Banks <[email protected]> December 2000 * * Legend: U = unusable with APM patches * P = partially usable with APM patches */ /* * Define as 1 to make the driver always call the APM BIOS busy * routine even if the clock was not reported as slowed by the * idle routine. Otherwise, define as 0. */ #define ALWAYS_CALL_BUSY 1 /* * Define to make the APM BIOS calls zero all data segment registers (so * that an incorrect BIOS implementation will cause a kernel panic if it * tries to write to arbitrary memory). */ #define APM_ZERO_SEGS #include <asm/apm.h> /* * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend. * This patched by Chad Miller <[email protected]>, original code by * David Chen <[email protected]> */ #undef INIT_TIMER_AFTER_SUSPEND #ifdef INIT_TIMER_AFTER_SUSPEND #include <linux/timex.h> #include <asm/io.h> #include <linux/delay.h> #endif /* * Need to poll the APM BIOS every second */ #define APM_CHECK_TIMEOUT (HZ) /* * Ignore suspend events for this amount of time after a resume */ #define DEFAULT_BOUNCE_INTERVAL (3 * HZ) /* * Maximum number of events stored */ #define APM_MAX_EVENTS 20 /* * The per-file APM data */ struct apm_user { int magic; struct apm_user *next; unsigned int suser: 1; unsigned int writer: 1; unsigned int reader: 1; unsigned int suspend_wait: 1; int suspend_result; int suspends_pending; int standbys_pending; int suspends_read; int standbys_read; int event_head; int event_tail; apm_event_t events[APM_MAX_EVENTS]; }; /* * The magic number in apm_user */ #define APM_BIOS_MAGIC 0x4101 /* * idle percentage above which bios idle calls are done */ #ifdef CONFIG_APM_CPU_IDLE #define DEFAULT_IDLE_THRESHOLD 95 #else #define DEFAULT_IDLE_THRESHOLD 100 #endif #define DEFAULT_IDLE_PERIOD (100 / 3) static int apm_cpu_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); static struct cpuidle_driver apm_idle_driver = { .name = "apm_idle", .owner = THIS_MODULE, .states = { { /* entry 0 is for polling */ }, { /* entry 1 is for APM idle */ .name = "APM", .desc = "APM idle", .exit_latency = 250, /* WAG */ .target_residency = 500, /* WAG */ .enter = &apm_cpu_idle }, }, .state_count = 2, }; static struct cpuidle_device apm_cpuidle_device; /* * Local variables */ __visible struct { unsigned long offset; unsigned short segment; } apm_bios_entry; static int clock_slowed; static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD; static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD; static int suspends_pending; static int standbys_pending; static int ignore_sys_suspend; static int ignore_normal_resume; static int bounce_interval __read_mostly = DEFAULT_BOUNCE_INTERVAL; static bool debug __read_mostly; static bool smp __read_mostly; static int apm_disabled = -1; #ifdef CONFIG_SMP static bool power_off; #else static bool power_off = 1; #endif static bool realmode_power_off; #ifdef CONFIG_APM_ALLOW_INTS static bool allow_ints = 1; #else static bool allow_ints; #endif static bool broken_psr; static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); static struct apm_user *user_list; static DEFINE_SPINLOCK(user_list_lock); static DEFINE_MUTEX(apm_mutex); /* * Set up a segment that references the real mode segment 0x40 * that extends up to the end of page zero (that we have reserved). * This is for buggy BIOS's that refer to (real mode) segment 0x40 * even though they are called in protected mode. */ static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); static const char driver_version[] = "1.16ac"; /* no spaces */ static struct task_struct *kapmd_task; /* * APM event names taken from the APM 1.2 specification. These are * the message codes that the BIOS uses to tell us about events */ static const char * const apm_event_name[] = { "system standby", "system suspend", "normal resume", "critical resume", "low battery", "power status change", "update time", "critical suspend", "user standby", "user suspend", "system standby resume", "capabilities change" }; #define NR_APM_EVENT_NAME ARRAY_SIZE(apm_event_name) typedef struct lookup_t { int key; char *msg; } lookup_t; /* * The BIOS returns a set of standard error codes in AX when the * carry flag is set. */ static const lookup_t error_table[] = { /* N/A { APM_SUCCESS, "Operation succeeded" }, */ { APM_DISABLED, "Power management disabled" }, { APM_CONNECTED, "Real mode interface already connected" }, { APM_NOT_CONNECTED, "Interface not connected" }, { APM_16_CONNECTED, "16 bit interface already connected" }, /* N/A { APM_16_UNSUPPORTED, "16 bit interface not supported" }, */ { APM_32_CONNECTED, "32 bit interface already connected" }, { APM_32_UNSUPPORTED, "32 bit interface not supported" }, { APM_BAD_DEVICE, "Unrecognized device ID" }, { APM_BAD_PARAM, "Parameter out of range" }, { APM_NOT_ENGAGED, "Interface not engaged" }, { APM_BAD_FUNCTION, "Function not supported" }, { APM_RESUME_DISABLED, "Resume timer disabled" }, { APM_BAD_STATE, "Unable to enter requested state" }, /* N/A { APM_NO_EVENTS, "No events pending" }, */ { APM_NO_ERROR, "BIOS did not set a return code" }, { APM_NOT_PRESENT, "No APM present" } }; #define ERROR_COUNT ARRAY_SIZE(error_table) /** * apm_error - display an APM error * @str: information string * @err: APM BIOS return code * * Write a meaningful log entry to the kernel log in the event of * an APM error. Note that this also handles (negative) kernel errors. */ static void apm_error(char *str, int err) { int i; for (i = 0; i < ERROR_COUNT; i++) if (error_table[i].key == err) break; if (i < ERROR_COUNT) pr_notice("%s: %s\n", str, error_table[i].msg); else if (err < 0) pr_notice("%s: linux error code %i\n", str, err); else pr_notice("%s: unknown error code %#2.2x\n", str, err); } /* * These are the actual BIOS calls. Depending on APM_ZERO_SEGS and * apm_info.allow_ints, we are being really paranoid here! Not only * are interrupts disabled, but all the segment registers (except SS) * are saved and zeroed this means that if the BIOS tries to reference * any data without explicitly loading the segment registers, the kernel * will fault immediately rather than have some unforeseen circumstances * for the rest of the kernel. And it will be very obvious! :-) Doing * this depends on CS referring to the same physical memory as DS so that * DS can be zeroed before the call. Unfortunately, we can't do anything * about the stack segment/pointer. Also, we tell the compiler that * everything could change. * * Also, we KNOW that for the non error case of apm_bios_call, there * is no useful data returned in the low order 8 bits of eax. */ static inline unsigned long __apm_irq_save(void) { unsigned long flags; local_save_flags(flags); if (apm_info.allow_ints) { if (irqs_disabled_flags(flags)) local_irq_enable(); } else local_irq_disable(); return flags; } #define apm_irq_save(flags) \ do { flags = __apm_irq_save(); } while (0) static inline void apm_irq_restore(unsigned long flags) { if (irqs_disabled_flags(flags)) local_irq_disable(); else if (irqs_disabled()) local_irq_enable(); } #ifdef APM_ZERO_SEGS # define APM_DECL_SEGS \ unsigned int saved_fs; unsigned int saved_gs; # define APM_DO_SAVE_SEGS \ savesegment(fs, saved_fs); savesegment(gs, saved_gs) # define APM_DO_RESTORE_SEGS \ loadsegment(fs, saved_fs); loadsegment(gs, saved_gs) #else # define APM_DECL_SEGS # define APM_DO_SAVE_SEGS # define APM_DO_RESTORE_SEGS #endif struct apm_bios_call { u32 func; /* In and out */ u32 ebx; u32 ecx; /* Out only */ u32 eax; u32 edx; u32 esi; /* Error: -ENOMEM, or bits 8-15 of eax */ int err; }; /** * __apm_bios_call - Make an APM BIOS 32bit call * @_call: pointer to struct apm_bios_call. * * Make an APM call using the 32bit protected mode interface. The * caller is responsible for knowing if APM BIOS is configured and * enabled. This call can disable interrupts for a long period of * time on some laptops. The return value is in AH and the carry * flag is loaded into AL. If there is an error, then the error * code is returned in AH (bits 8-15 of eax) and this function * returns non-zero. * * Note: this makes the call on the current CPU. */ static long __apm_bios_call(void *_call) { APM_DECL_SEGS unsigned long flags; int cpu; struct desc_struct save_desc_40; struct desc_struct *gdt; struct apm_bios_call *call = _call; u64 ibt; cpu = get_cpu(); BUG_ON(cpu != 0); gdt = get_cpu_gdt_rw(cpu); save_desc_40 = gdt[0x40 / 8]; gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); firmware_restrict_branch_speculation_start(); ibt = ibt_save(true); APM_DO_SAVE_SEGS; apm_bios_call_asm(call->func, call->ebx, call->ecx, &call->eax, &call->ebx, &call->ecx, &call->edx, &call->esi); APM_DO_RESTORE_SEGS; ibt_restore(ibt); firmware_restrict_branch_speculation_end(); apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); return call->eax & 0xff; } /* Run __apm_bios_call or __apm_bios_call_simple on CPU 0 */ static int on_cpu0(long (*fn)(void *), struct apm_bios_call *call) { int ret; /* Don't bother with work_on_cpu in the common case, so we don't * have to worry about OOM or overhead. */ if (get_cpu() == 0) { ret = fn(call); put_cpu(); } else { put_cpu(); ret = work_on_cpu(0, fn, call); } /* work_on_cpu can fail with -ENOMEM */ if (ret < 0) call->err = ret; else call->err = (call->eax >> 8) & 0xff; return ret; } /** * apm_bios_call - Make an APM BIOS 32bit call (on CPU 0) * @call: the apm_bios_call registers. * * If there is an error, it is returned in @call.err. */ static int apm_bios_call(struct apm_bios_call *call) { return on_cpu0(__apm_bios_call, call); } /** * __apm_bios_call_simple - Make an APM BIOS 32bit call (on CPU 0) * @_call: pointer to struct apm_bios_call. * * Make a BIOS call that returns one value only, or just status. * If there is an error, then the error code is returned in AH * (bits 8-15 of eax) and this function returns non-zero (it can * also return -ENOMEM). This is used for simpler BIOS operations. * This call may hold interrupts off for a long time on some laptops. * * Note: this makes the call on the current CPU. */ static long __apm_bios_call_simple(void *_call) { u8 error; APM_DECL_SEGS unsigned long flags; int cpu; struct desc_struct save_desc_40; struct desc_struct *gdt; struct apm_bios_call *call = _call; u64 ibt; cpu = get_cpu(); BUG_ON(cpu != 0); gdt = get_cpu_gdt_rw(cpu); save_desc_40 = gdt[0x40 / 8]; gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); firmware_restrict_branch_speculation_start(); ibt = ibt_save(true); APM_DO_SAVE_SEGS; error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx, &call->eax); APM_DO_RESTORE_SEGS; ibt_restore(ibt); firmware_restrict_branch_speculation_end(); apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); return error; } /** * apm_bios_call_simple - make a simple APM BIOS 32bit call * @func: APM function to invoke * @ebx_in: EBX register value for BIOS call * @ecx_in: ECX register value for BIOS call * @eax: EAX register on return from the BIOS call * @err: bits * * Make a BIOS call that returns one value only, or just status. * If there is an error, then the error code is returned in @err * and this function returns non-zero. This is used for simpler * BIOS operations. This call may hold interrupts off for a long * time on some laptops. */ static int apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax, int *err) { struct apm_bios_call call; int ret; call.func = func; call.ebx = ebx_in; call.ecx = ecx_in; ret = on_cpu0(__apm_bios_call_simple, &call); *eax = call.eax; *err = call.err; return ret; } /** * apm_driver_version - APM driver version * @val: loaded with the APM version on return * * Retrieve the APM version supported by the BIOS. This is only * supported for APM 1.1 or higher. An error indicates APM 1.0 is * probably present. * * On entry val should point to a value indicating the APM driver * version with the high byte being the major and the low byte the * minor number both in BCD * * On return it will hold the BIOS revision supported in the * same format. */ static int apm_driver_version(u_short *val) { u32 eax; int err; if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax, &err)) return err; *val = eax; return APM_SUCCESS; } /** * apm_get_event - get an APM event from the BIOS * @event: pointer to the event * @info: point to the event information * * The APM BIOS provides a polled information for event * reporting. The BIOS expects to be polled at least every second * when events are pending. When a message is found the caller should * poll until no more messages are present. However, this causes * problems on some laptops where a suspend event notification is * not cleared until it is acknowledged. * * Additional information is returned in the info pointer, providing * that APM 1.2 is in use. If no messages are pending the value 0x80 * is returned (No power management events pending). */ static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) { struct apm_bios_call call; call.func = APM_FUNC_GET_EVENT; call.ebx = call.ecx = 0; if (apm_bios_call(&call)) return call.err; *event = call.ebx; if (apm_info.connection_version < 0x0102) *info = ~0; /* indicate info not valid */ else *info = call.ecx; return APM_SUCCESS; } /** * set_power_state - set the power management state * @what: which items to transition * @state: state to transition to * * Request an APM change of state for one or more system devices. The * processor state must be transitioned last of all. what holds the * class of device in the upper byte and the device number (0xFF for * all) for the object to be transitioned. * * The state holds the state to transition to, which may in fact * be an acceptance of a BIOS requested state change. */ static int set_power_state(u_short what, u_short state) { u32 eax; int err; if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax, &err)) return err; return APM_SUCCESS; } /** * set_system_power_state - set system wide power state * @state: which state to enter * * Transition the entire system into a new APM power state. */ static int set_system_power_state(u_short state) { return set_power_state(APM_DEVICE_ALL, state); } /** * apm_do_idle - perform power saving * * This function notifies the BIOS that the processor is (in the view * of the OS) idle. It returns -1 in the event that the BIOS refuses * to handle the idle request. On a success the function returns 1 * if the BIOS did clock slowing or 0 otherwise. */ static int apm_do_idle(void) { u32 eax; u8 ret = 0; int idled = 0; int err = 0; if (!need_resched()) { idled = 1; ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax, &err); } if (!idled) return 0; if (ret) { static unsigned long t; /* This always fails on some SMP boards running UP kernels. * Only report the failure the first 5 times. */ if (++t < 5) { printk(KERN_DEBUG "apm_do_idle failed (%d)\n", err); t = jiffies; } return -1; } clock_slowed = (apm_info.bios.flags & APM_IDLE_SLOWS_CLOCK) != 0; return clock_slowed; } /** * apm_do_busy - inform the BIOS the CPU is busy * * Request that the BIOS brings the CPU back to full performance. */ static void apm_do_busy(void) { u32 dummy; int err; if (clock_slowed || ALWAYS_CALL_BUSY) { (void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy, &err); clock_slowed = 0; } } /* * If no process has really been interested in * the CPU for some time, we want to call BIOS * power management - we probably want * to conserve power. */ #define IDLE_CALC_LIMIT (HZ * 100) #define IDLE_LEAKY_MAX 16 /** * apm_cpu_idle - cpu idling for APM capable Linux * * This is the idling function the kernel executes when APM is available. It * tries to do BIOS powermanagement based on the average system idle time. * Furthermore it calls the system default idle routine. */ static int apm_cpu_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { static int use_apm_idle; /* = 0 */ static unsigned int last_jiffies; /* = 0 */ static u64 last_stime; /* = 0 */ u64 stime, utime; int apm_idle_done = 0; unsigned int jiffies_since_last_check = jiffies - last_jiffies; unsigned int bucket; recalc: task_cputime(current, &utime, &stime); if (jiffies_since_last_check > IDLE_CALC_LIMIT) { use_apm_idle = 0; } else if (jiffies_since_last_check > idle_period) { unsigned int idle_percentage; idle_percentage = nsecs_to_jiffies(stime - last_stime); idle_percentage *= 100; idle_percentage /= jiffies_since_last_check; use_apm_idle = (idle_percentage > idle_threshold); if (apm_info.forbid_idle) use_apm_idle = 0; } last_jiffies = jiffies; last_stime = stime; bucket = IDLE_LEAKY_MAX; while (!need_resched()) { if (use_apm_idle) { unsigned int t; t = jiffies; switch (apm_do_idle()) { case 0: apm_idle_done = 1; if (t != jiffies) { if (bucket) { bucket = IDLE_LEAKY_MAX; continue; } } else if (bucket) { bucket--; continue; } break; case 1: apm_idle_done = 1; break; default: /* BIOS refused */ break; } } default_idle(); local_irq_disable(); jiffies_since_last_check = jiffies - last_jiffies; if (jiffies_since_last_check > idle_period) goto recalc; } if (apm_idle_done) apm_do_busy(); return index; } /** * apm_power_off - ask the BIOS to power off * * Handle the power off sequence. This is the one piece of code we * will execute even on SMP machines. In order to deal with BIOS * bugs we support real mode APM BIOS power off calls. We also make * the SMP call on CPU0 as some systems will only honour this call * on their first cpu. */ static void apm_power_off(void) { /* Some bioses don't like being called from CPU != 0 */ if (apm_info.realmode_power_off) { set_cpus_allowed_ptr(current, cpumask_of(0)); machine_real_restart(MRR_APM); } else { (void)set_system_power_state(APM_STATE_OFF); } } #ifdef CONFIG_APM_DO_ENABLE /** * apm_enable_power_management - enable BIOS APM power management * @enable: enable yes/no * * Enable or disable the APM BIOS power services. */ static int apm_enable_power_management(int enable) { u32 eax; int err; if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED)) return APM_NOT_ENGAGED; if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL, enable, &eax, &err)) return err; if (enable) apm_info.bios.flags &= ~APM_BIOS_DISABLED; else apm_info.bios.flags |= APM_BIOS_DISABLED; return APM_SUCCESS; } #endif /** * apm_get_power_status - get current power state * @status: returned status * @bat: battery info * @life: estimated life * * Obtain the current power status from the APM BIOS. We return a * status which gives the rough battery status, and current power * source. The bat value returned give an estimate as a percentage * of life and a status value for the battery. The estimated life * if reported is a lifetime in seconds/minutes at current power * consumption. */ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life) { struct apm_bios_call call; call.func = APM_FUNC_GET_STATUS; call.ebx = APM_DEVICE_ALL; call.ecx = 0; if (apm_info.get_power_status_broken) return APM_32_UNSUPPORTED; if (apm_bios_call(&call)) { if (!call.err) return APM_NO_ERROR; return call.err; } *status = call.ebx; *bat = call.ecx; if (apm_info.get_power_status_swabinminutes) { *life = swab16((u16)call.edx); *life |= 0x8000; } else *life = call.edx; return APM_SUCCESS; } #if 0 static int apm_get_battery_status(u_short which, u_short *status, u_short *bat, u_short *life, u_short *nbat) { u32 eax; u32 ebx; u32 ecx; u32 edx; u32 esi; if (apm_info.connection_version < 0x0102) { /* pretend we only have one battery. */ if (which != 1) return APM_BAD_DEVICE; *nbat = 1; return apm_get_power_status(status, bat, life); } if (apm_bios_call(APM_FUNC_GET_STATUS, (0x8000 | (which)), 0, &eax, &ebx, &ecx, &edx, &esi)) return (eax >> 8) & 0xff; *status = ebx; *bat = ecx; *life = edx; *nbat = esi; return APM_SUCCESS; } #endif /** * apm_engage_power_management - enable PM on a device * @device: identity of device * @enable: on/off * * Activate or deactivate power management on either a specific device * or the entire system (%APM_DEVICE_ALL). */ static int apm_engage_power_management(u_short device, int enable) { u32 eax; int err; if ((enable == 0) && (device == APM_DEVICE_ALL) && (apm_info.bios.flags & APM_BIOS_DISABLED)) return APM_DISABLED; if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable, &eax, &err)) return err; if (device == APM_DEVICE_ALL) { if (enable) apm_info.bios.flags &= ~APM_BIOS_DISENGAGED; else apm_info.bios.flags |= APM_BIOS_DISENGAGED; } return APM_SUCCESS; } #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) /** * apm_console_blank - blank the display * @blank: on/off * * Attempt to blank the console, firstly by blanking just video device * zero, and if that fails (some BIOSes don't support it) then it blanks * all video devices. Typically the BIOS will do laptop backlight and * monitor powerdown for us. */ static int apm_console_blank(int blank) { int error = APM_NOT_ENGAGED; /* silence gcc */ int i; u_short state; static const u_short dev[3] = { 0x100, 0x1FF, 0x101 }; state = blank ? APM_STATE_STANDBY : APM_STATE_READY; for (i = 0; i < ARRAY_SIZE(dev); i++) { error = set_power_state(dev[i], state); if ((error == APM_SUCCESS) || (error == APM_NO_ERROR)) return 1; if (error == APM_NOT_ENGAGED) break; } if (error == APM_NOT_ENGAGED) { static int tried; int eng_error; if (tried++ == 0) { eng_error = apm_engage_power_management(APM_DEVICE_ALL, 1); if (eng_error) { apm_error("set display", error); apm_error("engage interface", eng_error); return 0; } else return apm_console_blank(blank); } } apm_error("set display", error); return 0; } #endif static int queue_empty(struct apm_user *as) { return as->event_head == as->event_tail; } static apm_event_t get_queued_event(struct apm_user *as) { if (++as->event_tail >= APM_MAX_EVENTS) as->event_tail = 0; return as->events[as->event_tail]; } static void queue_event(apm_event_t event, struct apm_user *sender) { struct apm_user *as; spin_lock(&user_list_lock); if (user_list == NULL) goto out; for (as = user_list; as != NULL; as = as->next) { if ((as == sender) || (!as->reader)) continue; if (++as->event_head >= APM_MAX_EVENTS) as->event_head = 0; if (as->event_head == as->event_tail) { static int notified; if (notified++ == 0) pr_err("an event queue overflowed\n"); if (++as->event_tail >= APM_MAX_EVENTS) as->event_tail = 0; } as->events[as->event_head] = event; if (!as->suser || !as->writer) continue; switch (event) { case APM_SYS_SUSPEND: case APM_USER_SUSPEND: as->suspends_pending++; suspends_pending++; break; case APM_SYS_STANDBY: case APM_USER_STANDBY: as->standbys_pending++; standbys_pending++; break; } } wake_up_interruptible(&apm_waitqueue); out: spin_unlock(&user_list_lock); } static void reinit_timer(void) { #ifdef INIT_TIMER_AFTER_SUSPEND unsigned long flags; raw_spin_lock_irqsave(&i8253_lock, flags); /* set the clock to HZ */ outb_p(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ udelay(10); outb_p(LATCH & 0xff, PIT_CH0); /* LSB */ udelay(10); outb_p(LATCH >> 8, PIT_CH0); /* MSB */ udelay(10); raw_spin_unlock_irqrestore(&i8253_lock, flags); #endif } static int suspend(int vetoable) { int err; struct apm_user *as; dpm_suspend_start(PMSG_SUSPEND); dpm_suspend_end(PMSG_SUSPEND); local_irq_disable(); syscore_suspend(); local_irq_enable(); save_processor_state(); err = set_system_power_state(APM_STATE_SUSPEND); ignore_normal_resume = 1; restore_processor_state(); local_irq_disable(); reinit_timer(); if (err == APM_NO_ERROR) err = APM_SUCCESS; if (err != APM_SUCCESS) apm_error("suspend", err); err = (err == APM_SUCCESS) ? 0 : -EIO; syscore_resume(); local_irq_enable(); dpm_resume_start(PMSG_RESUME); dpm_resume_end(PMSG_RESUME); queue_event(APM_NORMAL_RESUME, NULL); spin_lock(&user_list_lock); for (as = user_list; as != NULL; as = as->next) { as->suspend_wait = 0; as->suspend_result = err; } spin_unlock(&user_list_lock); wake_up_interruptible(&apm_suspend_waitqueue); return err; } static void standby(void) { int err; dpm_suspend_end(PMSG_SUSPEND); local_irq_disable(); syscore_suspend(); local_irq_enable(); err = set_system_power_state(APM_STATE_STANDBY); if ((err != APM_SUCCESS) && (err != APM_NO_ERROR)) apm_error("standby", err); local_irq_disable(); syscore_resume(); local_irq_enable(); dpm_resume_start(PMSG_RESUME); } static apm_event_t get_event(void) { int error; apm_event_t event = APM_NO_EVENTS; /* silence gcc */ apm_eventinfo_t info; static int notified; /* we don't use the eventinfo */ error = apm_get_event(&event, &info); if (error == APM_SUCCESS) return event; if ((error != APM_NO_EVENTS) && (notified++ == 0)) apm_error("get_event", error); return 0; } static void check_events(void) { apm_event_t event; static unsigned long last_resume; static int ignore_bounce; while ((event = get_event()) != 0) { if (debug) { if (event <= NR_APM_EVENT_NAME) printk(KERN_DEBUG "apm: received %s notify\n", apm_event_name[event - 1]); else printk(KERN_DEBUG "apm: received unknown " "event 0x%02x\n", event); } if (ignore_bounce && (time_after(jiffies, last_resume + bounce_interval))) ignore_bounce = 0; switch (event) { case APM_SYS_STANDBY: case APM_USER_STANDBY: queue_event(event, NULL); if (standbys_pending <= 0) standby(); break; case APM_USER_SUSPEND: #ifdef CONFIG_APM_IGNORE_USER_SUSPEND if (apm_info.connection_version > 0x100) set_system_power_state(APM_STATE_REJECT); break; #endif case APM_SYS_SUSPEND: if (ignore_bounce) { if (apm_info.connection_version > 0x100) set_system_power_state(APM_STATE_REJECT); break; } /* * If we are already processing a SUSPEND, * then further SUSPEND events from the BIOS * will be ignored. We also return here to * cope with the fact that the Thinkpads keep * sending a SUSPEND event until something else * happens! */ if (ignore_sys_suspend) return; ignore_sys_suspend = 1; queue_event(event, NULL); if (suspends_pending <= 0) (void) suspend(1); break; case APM_NORMAL_RESUME: case APM_CRITICAL_RESUME: case APM_STANDBY_RESUME: ignore_sys_suspend = 0; last_resume = jiffies; ignore_bounce = 1; if ((event != APM_NORMAL_RESUME) || (ignore_normal_resume == 0)) { dpm_resume_end(PMSG_RESUME); queue_event(event, NULL); } ignore_normal_resume = 0; break; case APM_CAPABILITY_CHANGE: case APM_LOW_BATTERY: case APM_POWER_STATUS_CHANGE: queue_event(event, NULL); /* If needed, notify drivers here */ break; case APM_UPDATE_TIME: break; case APM_CRITICAL_SUSPEND: /* * We are not allowed to reject a critical suspend. */ (void)suspend(0); break; } } } static void apm_event_handler(void) { static int pending_count = 4; int err; if ((standbys_pending > 0) || (suspends_pending > 0)) { if ((apm_info.connection_version > 0x100) && (pending_count-- <= 0)) { pending_count = 4; if (debug) printk(KERN_DEBUG "apm: setting state busy\n"); err = set_system_power_state(APM_STATE_BUSY); if (err) apm_error("busy", err); } } else pending_count = 4; check_events(); } /* * This is the APM thread main loop. */ static void apm_mainloop(void) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(&apm_waitqueue, &wait); set_current_state(TASK_INTERRUPTIBLE); for (;;) { schedule_timeout(APM_CHECK_TIMEOUT); if (kthread_should_stop()) break; /* * Ok, check all events, check for idle (and mark us sleeping * so as not to count towards the load average).. */ set_current_state(TASK_INTERRUPTIBLE); apm_event_handler(); } remove_wait_queue(&apm_waitqueue, &wait); } static int check_apm_user(struct apm_user *as, const char *func) { if (as == NULL || as->magic != APM_BIOS_MAGIC) { pr_err("%s passed bad filp\n", func); return 1; } return 0; } static ssize_t do_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) { struct apm_user *as; int i; apm_event_t event; as = fp->private_data; if (check_apm_user(as, "read")) return -EIO; if ((int)count < sizeof(apm_event_t)) return -EINVAL; if ((queue_empty(as)) && (fp->f_flags & O_NONBLOCK)) return -EAGAIN; wait_event_interruptible(apm_waitqueue, !queue_empty(as)); i = count; while ((i >= sizeof(event)) && !queue_empty(as)) { event = get_queued_event(as); if (copy_to_user(buf, &event, sizeof(event))) { if (i < count) break; return -EFAULT; } switch (event) { case APM_SYS_SUSPEND: case APM_USER_SUSPEND: as->suspends_read++; break; case APM_SYS_STANDBY: case APM_USER_STANDBY: as->standbys_read++; break; } buf += sizeof(event); i -= sizeof(event); } if (i < count) return count - i; if (signal_pending(current)) return -ERESTARTSYS; return 0; } static __poll_t do_poll(struct file *fp, poll_table *wait) { struct apm_user *as; as = fp->private_data; if (check_apm_user(as, "poll")) return 0; poll_wait(fp, &apm_waitqueue, wait); if (!queue_empty(as)) return EPOLLIN | EPOLLRDNORM; return 0; } static long do_ioctl(struct file *filp, u_int cmd, u_long arg) { struct apm_user *as; int ret; as = filp->private_data; if (check_apm_user(as, "ioctl")) return -EIO; if (!as->suser || !as->writer) return -EPERM; switch (cmd) { case APM_IOC_STANDBY: mutex_lock(&apm_mutex); if (as->standbys_read > 0) { as->standbys_read--; as->standbys_pending--; standbys_pending--; } else queue_event(APM_USER_STANDBY, as); if (standbys_pending <= 0) standby(); mutex_unlock(&apm_mutex); break; case APM_IOC_SUSPEND: mutex_lock(&apm_mutex); if (as->suspends_read > 0) { as->suspends_read--; as->suspends_pending--; suspends_pending--; } else queue_event(APM_USER_SUSPEND, as); if (suspends_pending <= 0) { ret = suspend(1); mutex_unlock(&apm_mutex); } else { as->suspend_wait = 1; mutex_unlock(&apm_mutex); wait_event_interruptible(apm_suspend_waitqueue, as->suspend_wait == 0); ret = as->suspend_result; } return ret; default: return -ENOTTY; } return 0; } static int do_release(struct inode *inode, struct file *filp) { struct apm_user *as; as = filp->private_data; if (check_apm_user(as, "release")) return 0; filp->private_data = NULL; if (as->standbys_pending > 0) { standbys_pending -= as->standbys_pending; if (standbys_pending <= 0) standby(); } if (as->suspends_pending > 0) { suspends_pending -= as->suspends_pending; if (suspends_pending <= 0) (void) suspend(1); } spin_lock(&user_list_lock); if (user_list == as) user_list = as->next; else { struct apm_user *as1; for (as1 = user_list; (as1 != NULL) && (as1->next != as); as1 = as1->next) ; if (as1 == NULL) pr_err("filp not in user list\n"); else as1->next = as->next; } spin_unlock(&user_list_lock); kfree(as); return 0; } static int do_open(struct inode *inode, struct file *filp) { struct apm_user *as; as = kmalloc(sizeof(*as), GFP_KERNEL); if (as == NULL) return -ENOMEM; as->magic = APM_BIOS_MAGIC; as->event_tail = as->event_head = 0; as->suspends_pending = as->standbys_pending = 0; as->suspends_read = as->standbys_read = 0; /* * XXX - this is a tiny bit broken, when we consider BSD * process accounting. If the device is opened by root, we * instantly flag that we used superuser privs. Who knows, * we might close the device immediately without doing a * privileged operation -- cevans */ as->suser = capable(CAP_SYS_ADMIN); as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE; as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ; spin_lock(&user_list_lock); as->next = user_list; user_list = as; spin_unlock(&user_list_lock); filp->private_data = as; return 0; } #ifdef CONFIG_PROC_FS static int proc_apm_show(struct seq_file *m, void *v) { unsigned short bx; unsigned short cx; unsigned short dx; int error; unsigned short ac_line_status = 0xff; unsigned short battery_status = 0xff; unsigned short battery_flag = 0xff; int percentage = -1; int time_units = -1; char *units = "?"; if ((num_online_cpus() == 1) && !(error = apm_get_power_status(&bx, &cx, &dx))) { ac_line_status = (bx >> 8) & 0xff; battery_status = bx & 0xff; if ((cx & 0xff) != 0xff) percentage = cx & 0xff; if (apm_info.connection_version > 0x100) { battery_flag = (cx >> 8) & 0xff; if (dx != 0xffff) { units = (dx & 0x8000) ? "min" : "sec"; time_units = dx & 0x7fff; } } } /* Arguments, with symbols from linux/apm_bios.h. Information is from the Get Power Status (0x0a) call unless otherwise noted. 0) Linux driver version (this will change if format changes) 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2. 2) APM flags from APM Installation Check (0x00): bit 0: APM_16_BIT_SUPPORT bit 1: APM_32_BIT_SUPPORT bit 2: APM_IDLE_SLOWS_CLOCK bit 3: APM_BIOS_DISABLED bit 4: APM_BIOS_DISENGAGED 3) AC line status 0x00: Off-line 0x01: On-line 0x02: On backup power (BIOS >= 1.1 only) 0xff: Unknown 4) Battery status 0x00: High 0x01: Low 0x02: Critical 0x03: Charging 0x04: Selected battery not present (BIOS >= 1.2 only) 0xff: Unknown 5) Battery flag bit 0: High bit 1: Low bit 2: Critical bit 3: Charging bit 7: No system battery 0xff: Unknown 6) Remaining battery life (percentage of charge): 0-100: valid -1: Unknown 7) Remaining battery life (time units): Number of remaining minutes or seconds -1: Unknown 8) min = minutes; sec = seconds */ seq_printf(m, "%s %d.%d 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", driver_version, (apm_info.bios.version >> 8) & 0xff, apm_info.bios.version & 0xff, apm_info.bios.flags, ac_line_status, battery_status, battery_flag, percentage, time_units, units); return 0; } #endif static int apm(void *unused) { unsigned short bx; unsigned short cx; unsigned short dx; int error; char *power_stat; char *bat_stat; /* 2002/08/01 - WT * This is to avoid random crashes at boot time during initialization * on SMP systems in case of "apm=power-off" mode. Seen on ASUS A7M266D. * Some bioses don't like being called from CPU != 0. * Method suggested by Ingo Molnar. */ set_cpus_allowed_ptr(current, cpumask_of(0)); BUG_ON(smp_processor_id() != 0); if (apm_info.connection_version == 0) { apm_info.connection_version = apm_info.bios.version; if (apm_info.connection_version > 0x100) { /* * We only support BIOSs up to version 1.2 */ if (apm_info.connection_version > 0x0102) apm_info.connection_version = 0x0102; error = apm_driver_version(&apm_info.connection_version); if (error != APM_SUCCESS) { apm_error("driver version", error); /* Fall back to an APM 1.0 connection. */ apm_info.connection_version = 0x100; } } } if (debug) printk(KERN_INFO "apm: Connection version %d.%d\n", (apm_info.connection_version >> 8) & 0xff, apm_info.connection_version & 0xff); #ifdef CONFIG_APM_DO_ENABLE if (apm_info.bios.flags & APM_BIOS_DISABLED) { /* * This call causes my NEC UltraLite Versa 33/C to hang if it * is booted with PM disabled but not in the docking station. * Unfortunate ... */ error = apm_enable_power_management(1); if (error) { apm_error("enable power management", error); return -1; } } #endif if ((apm_info.bios.flags & APM_BIOS_DISENGAGED) && (apm_info.connection_version > 0x0100)) { error = apm_engage_power_management(APM_DEVICE_ALL, 1); if (error) { apm_error("engage power management", error); return -1; } } if (debug && (num_online_cpus() == 1 || smp)) { error = apm_get_power_status(&bx, &cx, &dx); if (error) printk(KERN_INFO "apm: power status not available\n"); else { switch ((bx >> 8) & 0xff) { case 0: power_stat = "off line"; break; case 1: power_stat = "on line"; break; case 2: power_stat = "on backup power"; break; default: power_stat = "unknown"; break; } switch (bx & 0xff) { case 0: bat_stat = "high"; break; case 1: bat_stat = "low"; break; case 2: bat_stat = "critical"; break; case 3: bat_stat = "charging"; break; default: bat_stat = "unknown"; break; } printk(KERN_INFO "apm: AC %s, battery status %s, battery life ", power_stat, bat_stat); if ((cx & 0xff) == 0xff) printk("unknown\n"); else printk("%d%%\n", cx & 0xff); if (apm_info.connection_version > 0x100) { printk(KERN_INFO "apm: battery flag 0x%02x, battery life ", (cx >> 8) & 0xff); if (dx == 0xffff) printk("unknown\n"); else printk("%d %s\n", dx & 0x7fff, (dx & 0x8000) ? "minutes" : "seconds"); } } } /* Install our power off handler.. */ if (power_off) pm_power_off = apm_power_off; if (num_online_cpus() == 1 || smp) { #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) console_blank_hook = apm_console_blank; #endif apm_mainloop(); #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) console_blank_hook = NULL; #endif } return 0; } #ifndef MODULE static int __init apm_setup(char *str) { int invert; while ((str != NULL) && (*str != '\0')) { if (strncmp(str, "off", 3) == 0) apm_disabled = 1; if (strncmp(str, "on", 2) == 0) apm_disabled = 0; if ((strncmp(str, "bounce-interval=", 16) == 0) || (strncmp(str, "bounce_interval=", 16) == 0)) bounce_interval = simple_strtol(str + 16, NULL, 0); if ((strncmp(str, "idle-threshold=", 15) == 0) || (strncmp(str, "idle_threshold=", 15) == 0)) idle_threshold = simple_strtol(str + 15, NULL, 0); if ((strncmp(str, "idle-period=", 12) == 0) || (strncmp(str, "idle_period=", 12) == 0)) idle_period = simple_strtol(str + 12, NULL, 0); invert = (strncmp(str, "no-", 3) == 0) || (strncmp(str, "no_", 3) == 0); if (invert) str += 3; if (strncmp(str, "debug", 5) == 0) debug = !invert; if ((strncmp(str, "power-off", 9) == 0) || (strncmp(str, "power_off", 9) == 0)) power_off = !invert; if (strncmp(str, "smp", 3) == 0) { smp = !invert; idle_threshold = 100; } if ((strncmp(str, "allow-ints", 10) == 0) || (strncmp(str, "allow_ints", 10) == 0)) apm_info.allow_ints = !invert; if ((strncmp(str, "broken-psr", 10) == 0) || (strncmp(str, "broken_psr", 10) == 0)) apm_info.get_power_status_broken = !invert; if ((strncmp(str, "realmode-power-off", 18) == 0) || (strncmp(str, "realmode_power_off", 18) == 0)) apm_info.realmode_power_off = !invert; str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); } return 1; } __setup("apm=", apm_setup); #endif static const struct file_operations apm_bios_fops = { .owner = THIS_MODULE, .read = do_read, .poll = do_poll, .unlocked_ioctl = do_ioctl, .open = do_open, .release = do_release, .llseek = noop_llseek, }; static struct miscdevice apm_device = { APM_MINOR_DEV, "apm_bios", &apm_bios_fops }; /* Simple "print if true" callback */ static int __init print_if_true(const struct dmi_system_id *d) { printk("%s\n", d->ident); return 0; } /* * Some Bioses enable the PS/2 mouse (touchpad) at resume, even if it was * disabled before the suspend. Linux used to get terribly confused by that. */ static int __init broken_ps2_resume(const struct dmi_system_id *d) { printk(KERN_INFO "%s machine detected. Mousepad Resume Bug " "workaround hopefully not needed.\n", d->ident); return 0; } /* Some bioses have a broken protected mode poweroff and need to use realmode */ static int __init set_realmode_power_off(const struct dmi_system_id *d) { if (apm_info.realmode_power_off == 0) { apm_info.realmode_power_off = 1; printk(KERN_INFO "%s bios detected. " "Using realmode poweroff only.\n", d->ident); } return 0; } /* Some laptops require interrupts to be enabled during APM calls */ static int __init set_apm_ints(const struct dmi_system_id *d) { if (apm_info.allow_ints == 0) { apm_info.allow_ints = 1; printk(KERN_INFO "%s machine detected. " "Enabling interrupts during APM calls.\n", d->ident); } return 0; } /* Some APM bioses corrupt memory or just plain do not work */ static int __init apm_is_horked(const struct dmi_system_id *d) { if (apm_info.disabled == 0) { apm_info.disabled = 1; printk(KERN_INFO "%s machine detected. " "Disabling APM.\n", d->ident); } return 0; } static int __init apm_is_horked_d850md(const struct dmi_system_id *d) { if (apm_info.disabled == 0) { apm_info.disabled = 1; printk(KERN_INFO "%s machine detected. " "Disabling APM.\n", d->ident); printk(KERN_INFO "This bug is fixed in bios P15 which is available for\n"); printk(KERN_INFO "download from support.intel.com\n"); } return 0; } /* Some APM bioses hang on APM idle calls */ static int __init apm_likes_to_melt(const struct dmi_system_id *d) { if (apm_info.forbid_idle == 0) { apm_info.forbid_idle = 1; printk(KERN_INFO "%s machine detected. " "Disabling APM idle calls.\n", d->ident); } return 0; } /* * Check for clue free BIOS implementations who use * the following QA technique * * [ Write BIOS Code ]<------ * | ^ * < Does it Compile >----N-- * |Y ^ * < Does it Boot Win98 >-N-- * |Y * [Ship It] * * Phoenix A04 08/24/2000 is known bad (Dell Inspiron 5000e) * Phoenix A07 09/29/2000 is known good (Dell Inspiron 5000) */ static int __init broken_apm_power(const struct dmi_system_id *d) { apm_info.get_power_status_broken = 1; printk(KERN_WARNING "BIOS strings suggest APM bugs, " "disabling power status reporting.\n"); return 0; } /* * This bios swaps the APM minute reporting bytes over (Many sony laptops * have this problem). */ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d) { apm_info.get_power_status_swabinminutes = 1; printk(KERN_WARNING "BIOS strings suggest APM reports battery life " "in minutes and wrong byte order.\n"); return 0; } static const struct dmi_system_id apm_dmi_table[] __initconst = { { print_if_true, KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.", { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), DMI_MATCH(DMI_BIOS_VERSION, "1AET38WW (1.01b)"), }, }, { /* Handle problems with APM on the C600 */ broken_ps2_resume, "Dell Latitude C600", { DMI_MATCH(DMI_SYS_VENDOR, "Dell"), DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C600"), }, }, { /* Allow interrupts during suspend on Dell Latitude laptops*/ set_apm_ints, "Dell Latitude", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C510"), } }, { /* APM crashes */ apm_is_horked, "Dell Inspiron 2500", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"), DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A11"), }, }, { /* Allow interrupts during suspend on Dell Inspiron laptops*/ set_apm_ints, "Dell Inspiron", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 4000"), }, }, { /* Handle problems with APM on Inspiron 5000e */ broken_apm_power, "Dell Inspiron 5000e", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A04"), DMI_MATCH(DMI_BIOS_DATE, "08/24/2000"), }, }, { /* Handle problems with APM on Inspiron 2500 */ broken_apm_power, "Dell Inspiron 2500", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A12"), DMI_MATCH(DMI_BIOS_DATE, "02/04/2002"), }, }, { /* APM crashes */ apm_is_horked, "Dell Dimension 4100", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"), DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "A11"), }, }, { /* Allow interrupts during suspend on Compaq Laptops*/ set_apm_ints, "Compaq 12XL125", { DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), DMI_MATCH(DMI_PRODUCT_NAME, "Compaq PC"), DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "4.06"), }, }, { /* Allow interrupts during APM or the clock goes slow */ set_apm_ints, "ASUSTeK", { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "L8400K series Notebook PC"), }, }, { /* APM blows on shutdown */ apm_is_horked, "ABIT KX7-333[R]", { DMI_MATCH(DMI_BOARD_VENDOR, "ABIT"), DMI_MATCH(DMI_BOARD_NAME, "VT8367-8233A (KX7-333[R])"), }, }, { /* APM crashes */ apm_is_horked, "Trigem Delhi3", { DMI_MATCH(DMI_SYS_VENDOR, "TriGem Computer, Inc"), DMI_MATCH(DMI_PRODUCT_NAME, "Delhi3"), }, }, { /* APM crashes */ apm_is_horked, "Fujitsu-Siemens", { DMI_MATCH(DMI_BIOS_VENDOR, "hoenix/FUJITSU SIEMENS"), DMI_MATCH(DMI_BIOS_VERSION, "Version1.01"), }, }, { /* APM crashes */ apm_is_horked_d850md, "Intel D850MD", { DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "MV85010A.86A.0016.P07.0201251536"), }, }, { /* APM crashes */ apm_is_horked, "Intel D810EMO", { DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "MO81010A.86A.0008.P04.0004170800"), }, }, { /* APM crashes */ apm_is_horked, "Dell XPS-Z", { DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "A11"), DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"), }, }, { /* APM crashes */ apm_is_horked, "Sharp PC-PJ/AX", { DMI_MATCH(DMI_SYS_VENDOR, "SHARP"), DMI_MATCH(DMI_PRODUCT_NAME, "PC-PJ/AX"), DMI_MATCH(DMI_BIOS_VENDOR, "SystemSoft"), DMI_MATCH(DMI_BIOS_VERSION, "Version R2.08"), }, }, { /* APM crashes */ apm_is_horked, "Dell Inspiron 2500", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"), DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A11"), }, }, { /* APM idle hangs */ apm_likes_to_melt, "Jabil AMD", { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), DMI_MATCH(DMI_BIOS_VERSION, "0AASNP06"), }, }, { /* APM idle hangs */ apm_likes_to_melt, "AMI Bios", { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), DMI_MATCH(DMI_BIOS_VERSION, "0AASNP05"), }, }, { /* Handle problems with APM on Sony Vaio PCG-N505X(DE) */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0206H"), DMI_MATCH(DMI_BIOS_DATE, "08/23/99"), }, }, { /* Handle problems with APM on Sony Vaio PCG-N505VX */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "W2K06H0"), DMI_MATCH(DMI_BIOS_DATE, "02/03/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-XG29 */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0117A0"), DMI_MATCH(DMI_BIOS_DATE, "04/25/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z600NE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0121Z1"), DMI_MATCH(DMI_BIOS_DATE, "05/11/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z600NE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "WME01Z1"), DMI_MATCH(DMI_BIOS_DATE, "08/11/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z600LEK(DE) */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0206Z3"), DMI_MATCH(DMI_BIOS_DATE, "12/25/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z505LS */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0203D0"), DMI_MATCH(DMI_BIOS_DATE, "05/12/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z505LS */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0203Z3"), DMI_MATCH(DMI_BIOS_DATE, "08/25/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z505LS (with updated BIOS) */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0209Z3"), DMI_MATCH(DMI_BIOS_DATE, "05/12/01"), }, }, { /* Handle problems with APM on Sony Vaio PCG-F104K */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0204K2"), DMI_MATCH(DMI_BIOS_DATE, "08/28/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-C1VN/C1VE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0208P1"), DMI_MATCH(DMI_BIOS_DATE, "11/09/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-C1VE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0204P1"), DMI_MATCH(DMI_BIOS_DATE, "09/12/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-C1VE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "WXPO1Z3"), DMI_MATCH(DMI_BIOS_DATE, "10/26/01"), }, }, { /* broken PM poweroff bios */ set_realmode_power_off, "Award Software v4.60 PGMA", { DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), DMI_MATCH(DMI_BIOS_VERSION, "4.60 PGMA"), DMI_MATCH(DMI_BIOS_DATE, "134526184"), }, }, /* Generic per vendor APM settings */ { /* Allow interrupts during suspend on IBM laptops */ set_apm_ints, "IBM", { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, }, { } }; /* * Just start the APM thread. We do NOT want to do APM BIOS * calls from anything but the APM thread, if for no other reason * than the fact that we don't trust the APM BIOS. This way, * most common APM BIOS problems that lead to protection errors * etc will have at least some level of being contained... * * In short, if something bad happens, at least we have a choice * of just killing the apm thread.. */ static int __init apm_init(void) { struct desc_struct *gdt; int err; dmi_check_system(apm_dmi_table); if (apm_info.bios.version == 0 || machine_is_olpc()) { printk(KERN_INFO "apm: BIOS not found.\n"); return -ENODEV; } printk(KERN_INFO "apm: BIOS version %d.%d Flags 0x%02x (Driver version %s)\n", ((apm_info.bios.version >> 8) & 0xff), (apm_info.bios.version & 0xff), apm_info.bios.flags, driver_version); if ((apm_info.bios.flags & APM_32_BIT_SUPPORT) == 0) { printk(KERN_INFO "apm: no 32 bit BIOS support\n"); return -ENODEV; } if (allow_ints) apm_info.allow_ints = 1; if (broken_psr) apm_info.get_power_status_broken = 1; if (realmode_power_off) apm_info.realmode_power_off = 1; /* User can override, but default is to trust DMI */ if (apm_disabled != -1) apm_info.disabled = apm_disabled; /* * Fix for the Compaq Contura 3/25c which reports BIOS version 0.1 * but is reportedly a 1.0 BIOS. */ if (apm_info.bios.version == 0x001) apm_info.bios.version = 0x100; /* BIOS < 1.2 doesn't set cseg_16_len */ if (apm_info.bios.version < 0x102) apm_info.bios.cseg_16_len = 0; /* 64k */ if (debug) { printk(KERN_INFO "apm: entry %x:%x cseg16 %x dseg %x", apm_info.bios.cseg, apm_info.bios.offset, apm_info.bios.cseg_16, apm_info.bios.dseg); if (apm_info.bios.version > 0x100) printk(" cseg len %x, dseg len %x", apm_info.bios.cseg_len, apm_info.bios.dseg_len); if (apm_info.bios.version > 0x101) printk(" cseg16 len %x", apm_info.bios.cseg_16_len); printk("\n"); } if (apm_info.disabled) { pr_notice("disabled on user request.\n"); return -ENODEV; } if ((num_online_cpus() > 1) && !power_off && !smp) { pr_notice("disabled - APM is not SMP safe.\n"); apm_info.disabled = 1; return -ENODEV; } if (!acpi_disabled) { pr_notice("overridden by ACPI.\n"); apm_info.disabled = 1; return -ENODEV; } /* * Set up the long jump entry point to the APM BIOS, which is called * from inline assembly. */ apm_bios_entry.offset = apm_info.bios.offset; apm_bios_entry.segment = APM_CS; /* * The APM 1.1 BIOS is supposed to provide limit information that it * recognizes. Many machines do this correctly, but many others do * not restrict themselves to their claimed limit. When this happens, * they will cause a segmentation violation in the kernel at boot time. * Most BIOS's, however, will respect a 64k limit, so we use that. * * Note we only set APM segments on CPU zero, since we pin the APM * code to that CPU. */ gdt = get_cpu_gdt_rw(0); set_desc_base(&gdt[APM_CS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); set_desc_base(&gdt[APM_CS_16 >> 3], (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); set_desc_base(&gdt[APM_DS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); proc_create_single("apm", 0, NULL, proc_apm_show); kapmd_task = kthread_create(apm, NULL, "kapmd"); if (IS_ERR(kapmd_task)) { pr_err("disabled - Unable to start kernel thread\n"); err = PTR_ERR(kapmd_task); kapmd_task = NULL; remove_proc_entry("apm", NULL); return err; } wake_up_process(kapmd_task); if (num_online_cpus() > 1 && !smp) { printk(KERN_NOTICE "apm: disabled - APM is not SMP safe (power off active).\n"); return 0; } /* * Note we don't actually care if the misc_device cannot be registered. * this driver can do its job without it, even if userspace can't * control it. just log the error */ if (misc_register(&apm_device)) printk(KERN_WARNING "apm: Could not register misc device.\n"); if (HZ != 100) idle_period = (idle_period * HZ) / 100; if (idle_threshold < 100) { cpuidle_poll_state_init(&apm_idle_driver); if (!cpuidle_register_driver(&apm_idle_driver)) if (cpuidle_register_device(&apm_cpuidle_device)) cpuidle_unregister_driver(&apm_idle_driver); } return 0; } static void __exit apm_exit(void) { int error; cpuidle_unregister_device(&apm_cpuidle_device); cpuidle_unregister_driver(&apm_idle_driver); if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) && (apm_info.connection_version > 0x0100)) { error = apm_engage_power_management(APM_DEVICE_ALL, 0); if (error) apm_error("disengage power management", error); } misc_deregister(&apm_device); remove_proc_entry("apm", NULL); if (power_off) pm_power_off = NULL; if (kapmd_task) { kthread_stop(kapmd_task); kapmd_task = NULL; } } module_init(apm_init); module_exit(apm_exit); MODULE_AUTHOR("Stephen Rothwell"); MODULE_DESCRIPTION("Advanced Power Management"); MODULE_LICENSE("GPL"); module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Enable debug mode"); module_param(power_off, bool, 0444); MODULE_PARM_DESC(power_off, "Enable power off"); module_param(bounce_interval, int, 0444); MODULE_PARM_DESC(bounce_interval, "Set the number of ticks to ignore suspend bounces"); module_param(allow_ints, bool, 0444); MODULE_PARM_DESC(allow_ints, "Allow interrupts during BIOS calls"); module_param(broken_psr, bool, 0444); MODULE_PARM_DESC(broken_psr, "BIOS has a broken GetPowerStatus call"); module_param(realmode_power_off, bool, 0444); MODULE_PARM_DESC(realmode_power_off, "Switch to real mode before powering off"); module_param(idle_threshold, int, 0444); MODULE_PARM_DESC(idle_threshold, "System idle percentage above which to make APM BIOS idle calls"); module_param(idle_period, int, 0444); MODULE_PARM_DESC(idle_period, "Period (in sec/100) over which to calculate the idle percentage"); module_param(smp, bool, 0444); MODULE_PARM_DESC(smp, "Set this to enable APM use on an SMP platform. Use with caution on older systems"); MODULE_ALIAS_MISCDEV(APM_MINOR_DEV);
linux-master
arch/x86/kernel/apm_32.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/crash_core.h> #include <linux/pgtable.h> #include <asm/setup.h> void arch_crash_save_vmcoreinfo(void) { #ifdef CONFIG_NUMA VMCOREINFO_SYMBOL(node_data); VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); #endif #ifdef CONFIG_X86_PAE VMCOREINFO_CONFIG(X86_PAE); #endif }
linux-master
arch/x86/kernel/crash_core_32.c
// SPDX-License-Identifier: GPL-2.0 /* * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Based on * sys_sparc32 * * Copyright (C) 2000 VA Linux Co * Copyright (C) 2000 Don Dugger <[email protected]> * Copyright (C) 1999 Arun Sharma <[email protected]> * Copyright (C) 1997,1998 Jakub Jelinek ([email protected]) * Copyright (C) 1997 David S. Miller ([email protected]) * Copyright (C) 2000 Hewlett-Packard Co. * Copyright (C) 2000 David Mosberger-Tang <[email protected]> * Copyright (C) 2000,2001,2002 Andi Kleen, SuSE Labs (x86-64 port) * * These routines maintain argument size conversion between 32bit and 64bit * environment. In 2.5 most of this should be moved to a generic directory. * * This file assumes that there is a hole at the end of user address space. * * Some of the functions are LE specific currently. These are * hopefully all marked. This should be fixed. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/signal.h> #include <linux/syscalls.h> #include <linux/times.h> #include <linux/utsname.h> #include <linux/mm.h> #include <linux/uio.h> #include <linux/poll.h> #include <linux/personality.h> #include <linux/stat.h> #include <linux/rwsem.h> #include <linux/compat.h> #include <linux/vfs.h> #include <linux/ptrace.h> #include <linux/highuid.h> #include <linux/sysctl.h> #include <linux/slab.h> #include <linux/sched/task.h> #include <asm/mman.h> #include <asm/types.h> #include <linux/uaccess.h> #include <linux/atomic.h> #include <asm/vgtod.h> #include <asm/ia32.h> #define AA(__x) ((unsigned long)(__x)) SYSCALL_DEFINE3(ia32_truncate64, const char __user *, filename, unsigned long, offset_low, unsigned long, offset_high) { return ksys_truncate(filename, ((loff_t) offset_high << 32) | offset_low); } SYSCALL_DEFINE3(ia32_ftruncate64, unsigned int, fd, unsigned long, offset_low, unsigned long, offset_high) { return ksys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low); } /* warning: next two assume little endian */ SYSCALL_DEFINE5(ia32_pread64, unsigned int, fd, char __user *, ubuf, u32, count, u32, poslo, u32, poshi) { return ksys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); } SYSCALL_DEFINE5(ia32_pwrite64, unsigned int, fd, const char __user *, ubuf, u32, count, u32, poslo, u32, poshi) { return ksys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo)); } /* * Some system calls that need sign extended arguments. This could be * done by a generic wrapper. */ SYSCALL_DEFINE6(ia32_fadvise64_64, int, fd, __u32, offset_low, __u32, offset_high, __u32, len_low, __u32, len_high, int, advice) { return ksys_fadvise64_64(fd, (((u64)offset_high)<<32) | offset_low, (((u64)len_high)<<32) | len_low, advice); } SYSCALL_DEFINE4(ia32_readahead, int, fd, unsigned int, off_lo, unsigned int, off_hi, size_t, count) { return ksys_readahead(fd, ((u64)off_hi << 32) | off_lo, count); } SYSCALL_DEFINE6(ia32_sync_file_range, int, fd, unsigned int, off_low, unsigned int, off_hi, unsigned int, n_low, unsigned int, n_hi, int, flags) { return ksys_sync_file_range(fd, ((u64)off_hi << 32) | off_low, ((u64)n_hi << 32) | n_low, flags); } SYSCALL_DEFINE5(ia32_fadvise64, int, fd, unsigned int, offset_lo, unsigned int, offset_hi, size_t, len, int, advice) { return ksys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo, len, advice); } SYSCALL_DEFINE6(ia32_fallocate, int, fd, int, mode, unsigned int, offset_lo, unsigned int, offset_hi, unsigned int, len_lo, unsigned int, len_hi) { return ksys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, ((u64)len_hi << 32) | len_lo); } #ifdef CONFIG_IA32_EMULATION /* * Another set for IA32/LFS -- x86_64 struct stat is different due to * support for 64bit inode numbers. */ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) { typeof(ubuf->st_uid) uid = 0; typeof(ubuf->st_gid) gid = 0; SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid)); SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid)); if (!user_write_access_begin(ubuf, sizeof(struct stat64))) return -EFAULT; unsafe_put_user(huge_encode_dev(stat->dev), &ubuf->st_dev, Efault); unsafe_put_user(stat->ino, &ubuf->__st_ino, Efault); unsafe_put_user(stat->ino, &ubuf->st_ino, Efault); unsafe_put_user(stat->mode, &ubuf->st_mode, Efault); unsafe_put_user(stat->nlink, &ubuf->st_nlink, Efault); unsafe_put_user(uid, &ubuf->st_uid, Efault); unsafe_put_user(gid, &ubuf->st_gid, Efault); unsafe_put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev, Efault); unsafe_put_user(stat->size, &ubuf->st_size, Efault); unsafe_put_user(stat->atime.tv_sec, &ubuf->st_atime, Efault); unsafe_put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec, Efault); unsafe_put_user(stat->mtime.tv_sec, &ubuf->st_mtime, Efault); unsafe_put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec, Efault); unsafe_put_user(stat->ctime.tv_sec, &ubuf->st_ctime, Efault); unsafe_put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec, Efault); unsafe_put_user(stat->blksize, &ubuf->st_blksize, Efault); unsafe_put_user(stat->blocks, &ubuf->st_blocks, Efault); user_access_end(); return 0; Efault: user_write_access_end(); return -EFAULT; } COMPAT_SYSCALL_DEFINE2(ia32_stat64, const char __user *, filename, struct stat64 __user *, statbuf) { struct kstat stat; int ret = vfs_stat(filename, &stat); if (!ret) ret = cp_stat64(statbuf, &stat); return ret; } COMPAT_SYSCALL_DEFINE2(ia32_lstat64, const char __user *, filename, struct stat64 __user *, statbuf) { struct kstat stat; int ret = vfs_lstat(filename, &stat); if (!ret) ret = cp_stat64(statbuf, &stat); return ret; } COMPAT_SYSCALL_DEFINE2(ia32_fstat64, unsigned int, fd, struct stat64 __user *, statbuf) { struct kstat stat; int ret = vfs_fstat(fd, &stat); if (!ret) ret = cp_stat64(statbuf, &stat); return ret; } COMPAT_SYSCALL_DEFINE4(ia32_fstatat64, unsigned int, dfd, const char __user *, filename, struct stat64 __user *, statbuf, int, flag) { struct kstat stat; int error; error = vfs_fstatat(dfd, filename, &stat, flag); if (error) return error; return cp_stat64(statbuf, &stat); } /* * Linux/i386 didn't use to be able to handle more than * 4 system call parameters, so these system calls used a memory * block for parameter passing.. */ struct mmap_arg_struct32 { unsigned int addr; unsigned int len; unsigned int prot; unsigned int flags; unsigned int fd; unsigned int offset; }; COMPAT_SYSCALL_DEFINE1(ia32_mmap, struct mmap_arg_struct32 __user *, arg) { struct mmap_arg_struct32 a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; if (a.offset & ~PAGE_MASK) return -EINVAL; return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset>>PAGE_SHIFT); } /* * The 32-bit clone ABI is CONFIG_CLONE_BACKWARDS */ COMPAT_SYSCALL_DEFINE5(ia32_clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, unsigned long, tls_val, int __user *, child_tidptr) { struct kernel_clone_args args = { .flags = (clone_flags & ~CSIGNAL), .pidfd = parent_tidptr, .child_tid = child_tidptr, .parent_tid = parent_tidptr, .exit_signal = (clone_flags & CSIGNAL), .stack = newsp, .tls = tls_val, }; return kernel_clone(&args); } #endif /* CONFIG_IA32_EMULATION */
linux-master
arch/x86/kernel/sys_ia32.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/user.h> #include <linux/regset.h> #include <linux/syscalls.h> #include <linux/nospec.h> #include <linux/uaccess.h> #include <asm/desc.h> #include <asm/ldt.h> #include <asm/processor.h> #include <asm/proto.h> #include <asm/gsseg.h> #include "tls.h" /* * sys_alloc_thread_area: get a yet unused TLS descriptor index. */ static int get_free_idx(void) { struct thread_struct *t = &current->thread; int idx; for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) if (desc_empty(&t->tls_array[idx])) return idx + GDT_ENTRY_TLS_MIN; return -ESRCH; } static bool tls_desc_okay(const struct user_desc *info) { /* * For historical reasons (i.e. no one ever documented how any * of the segmentation APIs work), user programs can and do * assume that a struct user_desc that's all zeros except for * entry_number means "no segment at all". This never actually * worked. In fact, up to Linux 3.19, a struct user_desc like * this would create a 16-bit read-write segment with base and * limit both equal to zero. * * That was close enough to "no segment at all" until we * hardened this function to disallow 16-bit TLS segments. Fix * it up by interpreting these zeroed segments the way that they * were almost certainly intended to be interpreted. * * The correct way to ask for "no segment at all" is to specify * a user_desc that satisfies LDT_empty. To keep everything * working, we accept both. * * Note that there's a similar kludge in modify_ldt -- look at * the distinction between modes 1 and 0x11. */ if (LDT_empty(info) || LDT_zero(info)) return true; /* * espfix is required for 16-bit data segments, but espfix * only works for LDT segments. */ if (!info->seg_32bit) return false; /* Only allow data segments in the TLS array. */ if (info->contents > 1) return false; /* * Non-present segments with DPL 3 present an interesting attack * surface. The kernel should handle such segments correctly, * but TLS is very difficult to protect in a sandbox, so prevent * such segments from being created. * * If userspace needs to remove a TLS entry, it can still delete * it outright. */ if (info->seg_not_present) return false; return true; } static void set_tls_desc(struct task_struct *p, int idx, const struct user_desc *info, int n) { struct thread_struct *t = &p->thread; struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN]; int cpu; /* * We must not get preempted while modifying the TLS. */ cpu = get_cpu(); while (n-- > 0) { if (LDT_empty(info) || LDT_zero(info)) memset(desc, 0, sizeof(*desc)); else fill_ldt(desc, info); ++info; ++desc; } if (t == &current->thread) load_TLS(t, cpu); put_cpu(); } /* * Set a given TLS descriptor: */ int do_set_thread_area(struct task_struct *p, int idx, struct user_desc __user *u_info, int can_allocate) { struct user_desc info; unsigned short __maybe_unused sel, modified_sel; if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; if (!tls_desc_okay(&info)) return -EINVAL; if (idx == -1) idx = info.entry_number; /* * index -1 means the kernel should try to find and * allocate an empty descriptor: */ if (idx == -1 && can_allocate) { idx = get_free_idx(); if (idx < 0) return idx; if (put_user(idx, &u_info->entry_number)) return -EFAULT; } if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; set_tls_desc(p, idx, &info, 1); /* * If DS, ES, FS, or GS points to the modified segment, forcibly * refresh it. Only needed on x86_64 because x86_32 reloads them * on return to user mode. */ modified_sel = (idx << 3) | 3; if (p == current) { #ifdef CONFIG_X86_64 savesegment(ds, sel); if (sel == modified_sel) loadsegment(ds, sel); savesegment(es, sel); if (sel == modified_sel) loadsegment(es, sel); savesegment(fs, sel); if (sel == modified_sel) loadsegment(fs, sel); #endif savesegment(gs, sel); if (sel == modified_sel) load_gs_index(sel); } else { #ifdef CONFIG_X86_64 if (p->thread.fsindex == modified_sel) p->thread.fsbase = info.base_addr; if (p->thread.gsindex == modified_sel) p->thread.gsbase = info.base_addr; #endif } return 0; } SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, u_info) { return do_set_thread_area(current, -1, u_info, 1); } /* * Get the current Thread-Local Storage area: */ static void fill_user_desc(struct user_desc *info, int idx, const struct desc_struct *desc) { memset(info, 0, sizeof(*info)); info->entry_number = idx; info->base_addr = get_desc_base(desc); info->limit = get_desc_limit(desc); info->seg_32bit = desc->d; info->contents = desc->type >> 2; info->read_exec_only = !(desc->type & 2); info->limit_in_pages = desc->g; info->seg_not_present = !desc->p; info->useable = desc->avl; #ifdef CONFIG_X86_64 info->lm = desc->l; #endif } int do_get_thread_area(struct task_struct *p, int idx, struct user_desc __user *u_info) { struct user_desc info; int index; if (idx == -1 && get_user(idx, &u_info->entry_number)) return -EFAULT; if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; index = idx - GDT_ENTRY_TLS_MIN; index = array_index_nospec(index, GDT_ENTRY_TLS_MAX - GDT_ENTRY_TLS_MIN + 1); fill_user_desc(&info, idx, &p->thread.tls_array[index]); if (copy_to_user(u_info, &info, sizeof(info))) return -EFAULT; return 0; } SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, u_info) { return do_get_thread_area(current, -1, u_info); } int regset_tls_active(struct task_struct *target, const struct user_regset *regset) { struct thread_struct *t = &target->thread; int n = GDT_ENTRY_TLS_ENTRIES; while (n > 0 && desc_empty(&t->tls_array[n - 1])) --n; return n; } int regset_tls_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { const struct desc_struct *tls; struct user_desc v; int pos; for (pos = 0, tls = target->thread.tls_array; to.left; pos++, tls++) { fill_user_desc(&v, GDT_ENTRY_TLS_MIN + pos, tls); membuf_write(&to, &v, sizeof(v)); } return 0; } int regset_tls_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; const struct user_desc *info; int i; if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || (pos % sizeof(struct user_desc)) != 0 || (count % sizeof(struct user_desc)) != 0) return -EINVAL; if (kbuf) info = kbuf; else if (__copy_from_user(infobuf, ubuf, count)) return -EFAULT; else info = infobuf; for (i = 0; i < count / sizeof(struct user_desc); i++) if (!tls_desc_okay(info + i)) return -EINVAL; set_tls_desc(target, GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)), info, count / sizeof(struct user_desc)); return 0; }
linux-master
arch/x86/kernel/tls.c
/* * Copyright (C) 1995 Linus Torvalds * * Pentium III FXSR, SSE support * Gareth Hughes <[email protected]>, May 2000 */ /* * This file handles the architecture-dependent parts of process handling.. */ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/elfcore.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/user.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/reboot.h> #include <linux/mc146818rtc.h> #include <linux/export.h> #include <linux/kallsyms.h> #include <linux/ptrace.h> #include <linux/personality.h> #include <linux/percpu.h> #include <linux/prctl.h> #include <linux/ftrace.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/kdebug.h> #include <linux/syscalls.h> #include <asm/ldt.h> #include <asm/processor.h> #include <asm/fpu/sched.h> #include <asm/desc.h> #include <linux/err.h> #include <asm/tlbflush.h> #include <asm/cpu.h> #include <asm/debugreg.h> #include <asm/switch_to.h> #include <asm/vm86.h> #include <asm/resctrl.h> #include <asm/proto.h> #include "process.h" void __show_regs(struct pt_regs *regs, enum show_regs_mode mode, const char *log_lvl) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; unsigned long d0, d1, d2, d3, d6, d7; unsigned short gs; savesegment(gs, gs); show_ip(regs, log_lvl); printk("%sEAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", log_lvl, regs->ax, regs->bx, regs->cx, regs->dx); printk("%sESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", log_lvl, regs->si, regs->di, regs->bp, regs->sp); printk("%sDS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", log_lvl, (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, regs->ss, regs->flags); if (mode != SHOW_REGS_ALL) return; cr0 = read_cr0(); cr2 = read_cr2(); cr3 = __read_cr3(); cr4 = __read_cr4(); printk("%sCR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", log_lvl, cr0, cr2, cr3, cr4); get_debugreg(d0, 0); get_debugreg(d1, 1); get_debugreg(d2, 2); get_debugreg(d3, 3); get_debugreg(d6, 6); get_debugreg(d7, 7); /* Only print out debug registers if they are in their non-default state. */ if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && (d6 == DR6_RESERVED) && (d7 == 0x400)) return; printk("%sDR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", log_lvl, d0, d1, d2, d3); printk("%sDR6: %08lx DR7: %08lx\n", log_lvl, d6, d7); } void release_thread(struct task_struct *dead_task) { BUG_ON(dead_task->mm); release_vm86_irqs(dead_task); } void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { loadsegment(gs, 0); regs->fs = 0; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; regs->ip = new_ip; regs->sp = new_sp; regs->flags = X86_EFLAGS_IF; } EXPORT_SYMBOL_GPL(start_thread); /* * switch_to(x,y) should switch tasks from x to y. * * We fsave/fwait so that an exception goes off at the right time * (as a call from the fsave or fwait in effect) rather than to * the wrong process. Lazy FP saving no longer makes any sense * with modern CPU's, and this simplifies a lot of things (SMP * and UP become the same). * * NOTE! We used to use the x86 hardware context switching. The * reason for not using it any more becomes apparent when you * try to recover gracefully from saved state that is no longer * valid (stale segment register values in particular). With the * hardware task-switch, there is no way to fix up bad state in * a reasonable manner. * * The fact that Intel documents the hardware task-switching to * be slow is a fairly red herring - this code is not noticeably * faster. However, there _is_ some room for improvement here, * so the performance issues may eventually be a valid point. * More important, however, is the fact that this allows us much * more flexibility. * * The return value (in %ax) will be the "prev" task after * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */ __visible __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; struct fpu *prev_fpu = &prev->fpu; int cpu = smp_processor_id(); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) switch_fpu_prepare(prev_fpu, cpu); /* * Save away %gs. No need to save %fs, as it was saved on the * stack on entry. No need to save %es and %ds, as those are * always kernel segments while inside the kernel. Doing this * before setting the new TLS descriptors avoids the situation * where we temporarily have non-reloadable segments in %fs * and %gs. This could be an issue if the NMI handler ever * used %fs or %gs (it does not today), or if the kernel is * running inside of a hypervisor layer. */ savesegment(gs, prev->gs); /* * Load the per-thread Thread-Local Storage descriptor. */ load_TLS(next, cpu); switch_to_extra(prev_p, next_p); /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so * the GDT and LDT are properly updated. */ arch_end_context_switch(next_p); /* * Reload esp0 and pcpu_hot.top_of_stack. This changes * current_thread_info(). Refresh the SYSENTER configuration in * case prev or next is vm86. */ update_task_stack(next_p); refresh_sysenter_cs(next); this_cpu_write(pcpu_hot.top_of_stack, (unsigned long)task_stack_page(next_p) + THREAD_SIZE); /* * Restore %gs if needed (which is common) */ if (prev->gs | next->gs) loadsegment(gs, next->gs); raw_cpu_write(pcpu_hot.current_task, next_p); switch_fpu_finish(); /* Load the Intel cache allocation PQR MSR. */ resctrl_sched_in(next_p); return prev_p; } SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) { return do_arch_prctl_common(option, arg2); }
linux-master
arch/x86/kernel/process_32.c
// SPDX-License-Identifier: GPL-2.0 /* * This is a good place to put board specific reboot fixups. * * List of supported fixups: * geode-gx1/cs5530a - Jaya Kumar <[email protected]> * geode-gx/lx/cs5536 - Andres Salomon <[email protected]> * */ #include <asm/delay.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <asm/reboot_fixups.h> #include <asm/msr.h> #include <linux/cs5535.h> static void cs5530a_warm_reset(struct pci_dev *dev) { /* writing 1 to the reset control register, 0x44 causes the cs5530a to perform a system warm reset */ pci_write_config_byte(dev, 0x44, 0x1); udelay(50); /* shouldn't get here but be safe and spin-a-while */ return; } static void cs5536_warm_reset(struct pci_dev *dev) { /* writing 1 to the LSB of this MSR causes a hard reset */ wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL); udelay(50); /* shouldn't get here but be safe and spin a while */ } static void rdc321x_reset(struct pci_dev *dev) { unsigned i; /* Voluntary reset the watchdog timer */ outl(0x80003840, 0xCF8); /* Generate a CPU reset on next tick */ i = inl(0xCFC); /* Use the minimum timer resolution */ i |= 0x1600; outl(i, 0xCFC); outb(1, 0x92); } static void ce4100_reset(struct pci_dev *dev) { int i; for (i = 0; i < 10; i++) { outb(0x2, 0xcf9); udelay(50); } } struct device_fixup { unsigned int vendor; unsigned int device; void (*reboot_fixup)(struct pci_dev *); }; /* * PCI ids solely used for fixups_table go here */ #define PCI_DEVICE_ID_INTEL_CE4100 0x0708 static const struct device_fixup fixups_table[] = { { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset }, { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset }, { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE, cs5530a_warm_reset }, { PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030, rdc321x_reset }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100, ce4100_reset }, }; /* * we see if any fixup is available for our current hardware. if there * is a fixup, we call it and we expect to never return from it. if we * do return, we keep looking and then eventually fall back to the * standard mach_reboot on return. */ void mach_reboot_fixups(void) { const struct device_fixup *cur; struct pci_dev *dev; int i; /* we can be called from sysrq-B code. In such a case it is * prohibited to dig PCI */ if (in_interrupt()) return; for (i=0; i < ARRAY_SIZE(fixups_table); i++) { cur = &(fixups_table[i]); dev = pci_get_device(cur->vendor, cur->device, NULL); if (!dev) continue; cur->reboot_fixup(dev); pci_dev_put(dev); } }
linux-master
arch/x86/kernel/reboot_fixups_32.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs * * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-2002 x86-64 support by Andi Kleen */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/kstrtox.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/personality.h> #include <linux/uaccess.h> #include <linux/user-return-notifier.h> #include <linux/uprobes.h> #include <linux/context_tracking.h> #include <linux/entry-common.h> #include <linux/syscalls.h> #include <asm/processor.h> #include <asm/ucontext.h> #include <asm/fpu/signal.h> #include <asm/fpu/xstate.h> #include <asm/vdso.h> #include <asm/mce.h> #include <asm/sighandling.h> #include <asm/vm86.h> #include <asm/syscall.h> #include <asm/sigframe.h> #include <asm/signal.h> #include <asm/shstk.h> static inline int is_ia32_compat_frame(struct ksignal *ksig) { return IS_ENABLED(CONFIG_IA32_EMULATION) && ksig->ka.sa.sa_flags & SA_IA32_ABI; } static inline int is_ia32_frame(struct ksignal *ksig) { return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig); } static inline int is_x32_frame(struct ksignal *ksig) { return IS_ENABLED(CONFIG_X86_X32_ABI) && ksig->ka.sa.sa_flags & SA_X32_ABI; } /* * Set up a signal frame. */ /* x86 ABI requires 16-byte alignment */ #define FRAME_ALIGNMENT 16UL #define MAX_FRAME_PADDING (FRAME_ALIGNMENT - 1) /* * Determine which stack to use.. */ void __user * get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size, void __user **fpstate) { struct k_sigaction *ka = &ksig->ka; int ia32_frame = is_ia32_frame(ksig); /* Default to using normal stack */ bool nested_altstack = on_sig_stack(regs->sp); bool entering_altstack = false; unsigned long math_size = 0; unsigned long sp = regs->sp; unsigned long buf_fx = 0; /* redzone */ if (!ia32_frame) sp -= 128; /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { /* * This checks nested_altstack via sas_ss_flags(). Sensible * programs use SS_AUTODISARM, which disables that check, and * programs that don't use SS_AUTODISARM get compatible. */ if (sas_ss_flags(sp) == 0) { sp = current->sas_ss_sp + current->sas_ss_size; entering_altstack = true; } } else if (ia32_frame && !nested_altstack && regs->ss != __USER_DS && !(ka->sa.sa_flags & SA_RESTORER) && ka->sa.sa_restorer) { /* This is the legacy signal stack switching. */ sp = (unsigned long) ka->sa.sa_restorer; entering_altstack = true; } sp = fpu__alloc_mathframe(sp, ia32_frame, &buf_fx, &math_size); *fpstate = (void __user *)sp; sp -= frame_size; if (ia32_frame) /* * Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ sp = ((sp + 4) & -FRAME_ALIGNMENT) - 4; else sp = round_down(sp, FRAME_ALIGNMENT) - 8; /* * If we are on the alternate signal stack and would overflow it, don't. * Return an always-bogus address instead so we will die with SIGSEGV. */ if (unlikely((nested_altstack || entering_altstack) && !__on_sig_stack(sp))) { if (show_unhandled_signals && printk_ratelimit()) pr_info("%s[%d] overflowed sigaltstack\n", current->comm, task_pid_nr(current)); return (void __user *)-1L; } /* save i387 and extended state */ if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size)) return (void __user *)-1L; return (void __user *)sp; } /* * There are four different struct types for signal frame: sigframe_ia32, * rt_sigframe_ia32, rt_sigframe_x32, and rt_sigframe. Use the worst case * -- the largest size. It means the size for 64-bit apps is a bit more * than needed, but this keeps the code simple. */ #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) # define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct sigframe_ia32) #else # define MAX_FRAME_SIGINFO_UCTXT_SIZE sizeof(struct rt_sigframe) #endif /* * The FP state frame contains an XSAVE buffer which must be 64-byte aligned. * If a signal frame starts at an unaligned address, extra space is required. * This is the max alignment padding, conservatively. */ #define MAX_XSAVE_PADDING 63UL /* * The frame data is composed of the following areas and laid out as: * * ------------------------- * | alignment padding | * ------------------------- * | (f)xsave frame | * ------------------------- * | fsave header | * ------------------------- * | alignment padding | * ------------------------- * | siginfo + ucontext | * ------------------------- */ /* max_frame_size tells userspace the worst case signal stack size. */ static unsigned long __ro_after_init max_frame_size; static unsigned int __ro_after_init fpu_default_state_size; static int __init init_sigframe_size(void) { fpu_default_state_size = fpu__get_fpstate_size(); max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING; max_frame_size += fpu_default_state_size + MAX_XSAVE_PADDING; /* Userspace expects an aligned size. */ max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT); pr_info("max sigframe size: %lu\n", max_frame_size); return 0; } early_initcall(init_sigframe_size); unsigned long get_sigframe_size(void) { return max_frame_size; } static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) { /* Perform fixup for the pre-signal frame. */ rseq_signal_deliver(ksig, regs); /* Set up the stack frame */ if (is_ia32_frame(ksig)) { if (ksig->ka.sa.sa_flags & SA_SIGINFO) return ia32_setup_rt_frame(ksig, regs); else return ia32_setup_frame(ksig, regs); } else if (is_x32_frame(ksig)) { return x32_setup_rt_frame(ksig, regs); } else { return x64_setup_rt_frame(ksig, regs); } } static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { bool stepping, failed; struct fpu *fpu = &current->thread.fpu; if (v8086_mode(regs)) save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL); /* Are we from a system call? */ if (syscall_get_nr(current, regs) != -1) { /* If so, check system call restarting.. */ switch (syscall_get_error(current, regs)) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->ax = -EINTR; break; case -ERESTARTSYS: if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { regs->ax = -EINTR; break; } fallthrough; case -ERESTARTNOINTR: regs->ax = regs->orig_ax; regs->ip -= 2; break; } } /* * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now * so that register information in the sigcontext is correct and * then notify the tracer before entering the signal handler. */ stepping = test_thread_flag(TIF_SINGLESTEP); if (stepping) user_disable_single_step(current); failed = (setup_rt_frame(ksig, regs) < 0); if (!failed) { /* * Clear the direction flag as per the ABI for function entry. * * Clear RF when entering the signal handler, because * it might disable possible debug exception from the * signal handler. * * Clear TF for the case when it wasn't set by debugger to * avoid the recursive send_sigtrap() in SIGTRAP handler. */ regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF); /* * Ensure the signal handler starts with the new fpu state. */ fpu__clear_user_states(fpu); } signal_setup_done(failed, ksig, stepping); } static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) { #ifdef CONFIG_IA32_EMULATION if (current->restart_block.arch_data & TS_COMPAT) return __NR_ia32_restart_syscall; #endif #ifdef CONFIG_X86_X32_ABI return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT); #else return __NR_restart_syscall; #endif } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ void arch_do_signal_or_restart(struct pt_regs *regs) { struct ksignal ksig; if (get_signal(&ksig)) { /* Whee! Actually deliver the signal. */ handle_signal(&ksig, regs); return; } /* Did we come from a system call? */ if (syscall_get_nr(current, regs) != -1) { /* Restart the system call - no handlers present */ switch (syscall_get_error(current, regs)) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->ax = regs->orig_ax; regs->ip -= 2; break; case -ERESTART_RESTARTBLOCK: regs->ax = get_nr_restart_syscall(regs); regs->ip -= 2; break; } } /* * If there's no signal to deliver, we just put the saved sigmask * back. */ restore_saved_sigmask(); } void signal_fault(struct pt_regs *regs, void __user *frame, char *where) { struct task_struct *me = current; if (show_unhandled_signals && printk_ratelimit()) { printk("%s" "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, me->comm, me->pid, where, frame, regs->ip, regs->sp, regs->orig_ax); print_vma_addr(KERN_CONT " in ", regs->ip); pr_cont("\n"); } force_sig(SIGSEGV); } #ifdef CONFIG_DYNAMIC_SIGFRAME #ifdef CONFIG_STRICT_SIGALTSTACK_SIZE static bool strict_sigaltstack_size __ro_after_init = true; #else static bool strict_sigaltstack_size __ro_after_init = false; #endif static int __init strict_sas_size(char *arg) { return kstrtobool(arg, &strict_sigaltstack_size) == 0; } __setup("strict_sas_size", strict_sas_size); /* * MINSIGSTKSZ is 2048 and can't be changed despite the fact that AVX512 * exceeds that size already. As such programs might never use the * sigaltstack they just continued to work. While always checking against * the real size would be correct, this might be considered a regression. * * Therefore avoid the sanity check, unless enforced by kernel * configuration or command line option. * * When dynamic FPU features are supported, the check is also enforced when * the task has permissions to use dynamic features. Tasks which have no * permission are checked against the size of the non-dynamic feature set * if strict checking is enabled. This avoids forcing all tasks on the * system to allocate large sigaltstacks even if they are never going * to use a dynamic feature. As this is serialized via sighand::siglock * any permission request for a dynamic feature either happened already * or will see the newly install sigaltstack size in the permission checks. */ bool sigaltstack_size_valid(size_t ss_size) { unsigned long fsize = max_frame_size - fpu_default_state_size; u64 mask; lockdep_assert_held(&current->sighand->siglock); if (!fpu_state_size_dynamic() && !strict_sigaltstack_size) return true; fsize += current->group_leader->thread.fpu.perm.__user_state_size; if (likely(ss_size > fsize)) return true; if (strict_sigaltstack_size) return ss_size > fsize; mask = current->group_leader->thread.fpu.perm.__state_perm; if (mask & XFEATURE_MASK_USER_DYNAMIC) return ss_size > fsize; return true; } #endif /* CONFIG_DYNAMIC_SIGFRAME */
linux-master
arch/x86/kernel/signal.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar * * This file contains the lowest level x86_64-specific interrupt * entry and irq statistics code. All the remaining irq logic is * done by the generic kernel/irq/ code and in the * x86_64-specific irq controller code. (e.g. i8259.c and * io_apic.c.) */ #include <linux/kernel_stat.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/delay.h> #include <linux/ftrace.h> #include <linux/uaccess.h> #include <linux/smp.h> #include <linux/sched/task_stack.h> #include <asm/cpu_entry_area.h> #include <asm/softirq_stack.h> #include <asm/irq_stack.h> #include <asm/io_apic.h> #include <asm/apic.h> DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible; DECLARE_INIT_PER_CPU(irq_stack_backing_store); #ifdef CONFIG_VMAP_STACK /* * VMAP the backing store with guard pages */ static int map_irq_stack(unsigned int cpu) { char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu); struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE]; void *va; int i; for (i = 0; i < IRQ_STACK_SIZE / PAGE_SIZE; i++) { phys_addr_t pa = per_cpu_ptr_to_phys(stack + (i << PAGE_SHIFT)); pages[i] = pfn_to_page(pa >> PAGE_SHIFT); } va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL); if (!va) return -ENOMEM; /* Store actual TOS to avoid adjustment in the hotpath */ per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8; return 0; } #else /* * If VMAP stacks are disabled due to KASAN, just use the per cpu * backing store without guard pages. */ static int map_irq_stack(unsigned int cpu) { void *va = per_cpu_ptr(&irq_stack_backing_store, cpu); /* Store actual TOS to avoid adjustment in the hotpath */ per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8; return 0; } #endif int irq_init_percpu_irqstack(unsigned int cpu) { if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu)) return 0; return map_irq_stack(cpu); }
linux-master
arch/x86/kernel/irq_64.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * 8237A DMA controller suspend functions. * * Written by Pierre Ossman, 2005. */ #include <linux/dmi.h> #include <linux/init.h> #include <linux/syscore_ops.h> #include <asm/dma.h> #include <asm/x86_init.h> /* * This module just handles suspend/resume issues with the * 8237A DMA controller (used for ISA and LPC). * Allocation is handled in kernel/dma.c and normal usage is * in asm/dma.h. */ static void i8237A_resume(void) { unsigned long flags; int i; flags = claim_dma_lock(); dma_outb(0, DMA1_RESET_REG); dma_outb(0, DMA2_RESET_REG); for (i = 0; i < 8; i++) { set_dma_addr(i, 0x000000); /* DMA count is a bit weird so this is not 0 */ set_dma_count(i, 1); } /* Enable cascade DMA or channel 0-3 won't work */ enable_dma(4); release_dma_lock(flags); } static struct syscore_ops i8237_syscore_ops = { .resume = i8237A_resume, }; static int __init i8237A_init_ops(void) { /* * From SKL PCH onwards, the legacy DMA device is removed in which the * I/O ports (81h-83h, 87h, 89h-8Bh, 8Fh) related to it are removed * as well. All removed ports must return 0xff for a inb() request. * * Note: DMA_PAGE_2 (port 0x81) should not be checked for detecting * the presence of DMA device since it may be used by BIOS to decode * LPC traffic for POST codes. Original LPC only decodes one byte of * port 0x80 but some BIOS may choose to enhance PCH LPC port 0x8x * decoding. */ if (dma_inb(DMA_PAGE_0) == 0xFF) return -ENODEV; /* * It is not required to load this driver as newer SoC may not * support 8237 DMA or bus mastering from LPC. Platform firmware * must announce the support for such legacy devices via * ACPI_FADT_LEGACY_DEVICES field in FADT table. */ if (x86_pnpbios_disabled() && dmi_get_bios_year() >= 2017) return -ENODEV; register_syscore_ops(&i8237_syscore_ops); return 0; } device_initcall(i8237A_init_ops);
linux-master
arch/x86/kernel/i8237.c
// SPDX-License-Identifier: GPL-2.0-only /* * Kexec bzImage loader * * Copyright (C) 2014 Red Hat Inc. * Authors: * Vivek Goyal <[email protected]> */ #define pr_fmt(fmt) "kexec-bzImage64: " fmt #include <linux/string.h> #include <linux/printk.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/kexec.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/efi.h> #include <linux/random.h> #include <asm/bootparam.h> #include <asm/setup.h> #include <asm/crash.h> #include <asm/efi.h> #include <asm/e820/api.h> #include <asm/kexec-bzimage64.h> #define MAX_ELFCOREHDR_STR_LEN 30 /* elfcorehdr=0x<64bit-value> */ /* * Defines lowest physical address for various segments. Not sure where * exactly these limits came from. Current bzimage64 loader in kexec-tools * uses these so I am retaining it. It can be changed over time as we gain * more insight. */ #define MIN_PURGATORY_ADDR 0x3000 #define MIN_BOOTPARAM_ADDR 0x3000 #define MIN_KERNEL_LOAD_ADDR 0x100000 #define MIN_INITRD_LOAD_ADDR 0x1000000 /* * This is a place holder for all boot loader specific data structure which * gets allocated in one call but gets freed much later during cleanup * time. Right now there is only one field but it can grow as need be. */ struct bzimage64_data { /* * Temporary buffer to hold bootparams buffer. This should be * freed once the bootparam segment has been loaded. */ void *bootparams_buf; }; static int setup_initrd(struct boot_params *params, unsigned long initrd_load_addr, unsigned long initrd_len) { params->hdr.ramdisk_image = initrd_load_addr & 0xffffffffUL; params->hdr.ramdisk_size = initrd_len & 0xffffffffUL; params->ext_ramdisk_image = initrd_load_addr >> 32; params->ext_ramdisk_size = initrd_len >> 32; return 0; } static int setup_cmdline(struct kimage *image, struct boot_params *params, unsigned long bootparams_load_addr, unsigned long cmdline_offset, char *cmdline, unsigned long cmdline_len) { char *cmdline_ptr = ((char *)params) + cmdline_offset; unsigned long cmdline_ptr_phys, len = 0; uint32_t cmdline_low_32, cmdline_ext_32; if (image->type == KEXEC_TYPE_CRASH) { len = sprintf(cmdline_ptr, "elfcorehdr=0x%lx ", image->elf_load_addr); } memcpy(cmdline_ptr + len, cmdline, cmdline_len); cmdline_len += len; cmdline_ptr[cmdline_len - 1] = '\0'; pr_debug("Final command line is: %s\n", cmdline_ptr); cmdline_ptr_phys = bootparams_load_addr + cmdline_offset; cmdline_low_32 = cmdline_ptr_phys & 0xffffffffUL; cmdline_ext_32 = cmdline_ptr_phys >> 32; params->hdr.cmd_line_ptr = cmdline_low_32; if (cmdline_ext_32) params->ext_cmd_line_ptr = cmdline_ext_32; return 0; } static int setup_e820_entries(struct boot_params *params) { unsigned int nr_e820_entries; nr_e820_entries = e820_table_kexec->nr_entries; /* TODO: Pass entries more than E820_MAX_ENTRIES_ZEROPAGE in bootparams setup data */ if (nr_e820_entries > E820_MAX_ENTRIES_ZEROPAGE) nr_e820_entries = E820_MAX_ENTRIES_ZEROPAGE; params->e820_entries = nr_e820_entries; memcpy(&params->e820_table, &e820_table_kexec->entries, nr_e820_entries*sizeof(struct e820_entry)); return 0; } enum { RNG_SEED_LENGTH = 32 }; static void setup_rng_seed(struct boot_params *params, unsigned long params_load_addr, unsigned int rng_seed_setup_data_offset) { struct setup_data *sd = (void *)params + rng_seed_setup_data_offset; unsigned long setup_data_phys; if (!rng_is_initialized()) return; sd->type = SETUP_RNG_SEED; sd->len = RNG_SEED_LENGTH; get_random_bytes(sd->data, RNG_SEED_LENGTH); setup_data_phys = params_load_addr + rng_seed_setup_data_offset; sd->next = params->hdr.setup_data; params->hdr.setup_data = setup_data_phys; } #ifdef CONFIG_EFI static int setup_efi_info_memmap(struct boot_params *params, unsigned long params_load_addr, unsigned int efi_map_offset, unsigned int efi_map_sz) { void *efi_map = (void *)params + efi_map_offset; unsigned long efi_map_phys_addr = params_load_addr + efi_map_offset; struct efi_info *ei = &params->efi_info; if (!efi_map_sz) return 0; efi_runtime_map_copy(efi_map, efi_map_sz); ei->efi_memmap = efi_map_phys_addr & 0xffffffff; ei->efi_memmap_hi = efi_map_phys_addr >> 32; ei->efi_memmap_size = efi_map_sz; return 0; } static int prepare_add_efi_setup_data(struct boot_params *params, unsigned long params_load_addr, unsigned int efi_setup_data_offset) { unsigned long setup_data_phys; struct setup_data *sd = (void *)params + efi_setup_data_offset; struct efi_setup_data *esd = (void *)sd + sizeof(struct setup_data); esd->fw_vendor = efi_fw_vendor; esd->tables = efi_config_table; esd->smbios = efi.smbios; sd->type = SETUP_EFI; sd->len = sizeof(struct efi_setup_data); /* Add setup data */ setup_data_phys = params_load_addr + efi_setup_data_offset; sd->next = params->hdr.setup_data; params->hdr.setup_data = setup_data_phys; return 0; } static int setup_efi_state(struct boot_params *params, unsigned long params_load_addr, unsigned int efi_map_offset, unsigned int efi_map_sz, unsigned int efi_setup_data_offset) { struct efi_info *current_ei = &boot_params.efi_info; struct efi_info *ei = &params->efi_info; if (!efi_enabled(EFI_RUNTIME_SERVICES)) return 0; if (!current_ei->efi_memmap_size) return 0; params->secure_boot = boot_params.secure_boot; ei->efi_loader_signature = current_ei->efi_loader_signature; ei->efi_systab = current_ei->efi_systab; ei->efi_systab_hi = current_ei->efi_systab_hi; ei->efi_memdesc_version = current_ei->efi_memdesc_version; ei->efi_memdesc_size = efi_get_runtime_map_desc_size(); setup_efi_info_memmap(params, params_load_addr, efi_map_offset, efi_map_sz); prepare_add_efi_setup_data(params, params_load_addr, efi_setup_data_offset); return 0; } #endif /* CONFIG_EFI */ static void setup_ima_state(const struct kimage *image, struct boot_params *params, unsigned long params_load_addr, unsigned int ima_setup_data_offset) { #ifdef CONFIG_IMA_KEXEC struct setup_data *sd = (void *)params + ima_setup_data_offset; unsigned long setup_data_phys; struct ima_setup_data *ima; if (!image->ima_buffer_size) return; sd->type = SETUP_IMA; sd->len = sizeof(*ima); ima = (void *)sd + sizeof(struct setup_data); ima->addr = image->ima_buffer_addr; ima->size = image->ima_buffer_size; /* Add setup data */ setup_data_phys = params_load_addr + ima_setup_data_offset; sd->next = params->hdr.setup_data; params->hdr.setup_data = setup_data_phys; #endif /* CONFIG_IMA_KEXEC */ } static int setup_boot_parameters(struct kimage *image, struct boot_params *params, unsigned long params_load_addr, unsigned int efi_map_offset, unsigned int efi_map_sz, unsigned int setup_data_offset) { unsigned int nr_e820_entries; unsigned long long mem_k, start, end; int i, ret = 0; /* Get subarch from existing bootparams */ params->hdr.hardware_subarch = boot_params.hdr.hardware_subarch; /* Copying screen_info will do? */ memcpy(&params->screen_info, &screen_info, sizeof(struct screen_info)); /* Fill in memsize later */ params->screen_info.ext_mem_k = 0; params->alt_mem_k = 0; /* Always fill in RSDP: it is either 0 or a valid value */ params->acpi_rsdp_addr = boot_params.acpi_rsdp_addr; /* Default APM info */ memset(&params->apm_bios_info, 0, sizeof(params->apm_bios_info)); /* Default drive info */ memset(&params->hd0_info, 0, sizeof(params->hd0_info)); memset(&params->hd1_info, 0, sizeof(params->hd1_info)); if (image->type == KEXEC_TYPE_CRASH) { ret = crash_setup_memmap_entries(image, params); if (ret) return ret; } else setup_e820_entries(params); nr_e820_entries = params->e820_entries; for (i = 0; i < nr_e820_entries; i++) { if (params->e820_table[i].type != E820_TYPE_RAM) continue; start = params->e820_table[i].addr; end = params->e820_table[i].addr + params->e820_table[i].size - 1; if ((start <= 0x100000) && end > 0x100000) { mem_k = (end >> 10) - (0x100000 >> 10); params->screen_info.ext_mem_k = mem_k; params->alt_mem_k = mem_k; if (mem_k > 0xfc00) params->screen_info.ext_mem_k = 0xfc00; /* 64M*/ if (mem_k > 0xffffffff) params->alt_mem_k = 0xffffffff; } } #ifdef CONFIG_EFI /* Setup EFI state */ setup_efi_state(params, params_load_addr, efi_map_offset, efi_map_sz, setup_data_offset); setup_data_offset += sizeof(struct setup_data) + sizeof(struct efi_setup_data); #endif if (IS_ENABLED(CONFIG_IMA_KEXEC)) { /* Setup IMA log buffer state */ setup_ima_state(image, params, params_load_addr, setup_data_offset); setup_data_offset += sizeof(struct setup_data) + sizeof(struct ima_setup_data); } /* Setup RNG seed */ setup_rng_seed(params, params_load_addr, setup_data_offset); /* Setup EDD info */ memcpy(params->eddbuf, boot_params.eddbuf, EDDMAXNR * sizeof(struct edd_info)); params->eddbuf_entries = boot_params.eddbuf_entries; memcpy(params->edd_mbr_sig_buffer, boot_params.edd_mbr_sig_buffer, EDD_MBR_SIG_MAX * sizeof(unsigned int)); return ret; } static int bzImage64_probe(const char *buf, unsigned long len) { int ret = -ENOEXEC; struct setup_header *header; /* kernel should be at least two sectors long */ if (len < 2 * 512) { pr_err("File is too short to be a bzImage\n"); return ret; } header = (struct setup_header *)(buf + offsetof(struct boot_params, hdr)); if (memcmp((char *)&header->header, "HdrS", 4) != 0) { pr_err("Not a bzImage\n"); return ret; } if (header->boot_flag != 0xAA55) { pr_err("No x86 boot sector present\n"); return ret; } if (header->version < 0x020C) { pr_err("Must be at least protocol version 2.12\n"); return ret; } if (!(header->loadflags & LOADED_HIGH)) { pr_err("zImage not a bzImage\n"); return ret; } if (!(header->xloadflags & XLF_KERNEL_64)) { pr_err("Not a bzImage64. XLF_KERNEL_64 is not set.\n"); return ret; } if (!(header->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G)) { pr_err("XLF_CAN_BE_LOADED_ABOVE_4G is not set.\n"); return ret; } /* * Can't handle 32bit EFI as it does not allow loading kernel * above 4G. This should be handled by 32bit bzImage loader */ if (efi_enabled(EFI_RUNTIME_SERVICES) && !efi_enabled(EFI_64BIT)) { pr_debug("EFI is 32 bit. Can't load kernel above 4G.\n"); return ret; } if (!(header->xloadflags & XLF_5LEVEL) && pgtable_l5_enabled()) { pr_err("bzImage cannot handle 5-level paging mode.\n"); return ret; } /* I've got a bzImage */ pr_debug("It's a relocatable bzImage64\n"); ret = 0; return ret; } static void *bzImage64_load(struct kimage *image, char *kernel, unsigned long kernel_len, char *initrd, unsigned long initrd_len, char *cmdline, unsigned long cmdline_len) { struct setup_header *header; int setup_sects, kern16_size, ret = 0; unsigned long setup_header_size, params_cmdline_sz; struct boot_params *params; unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr; struct bzimage64_data *ldata; struct kexec_entry64_regs regs64; void *stack; unsigned int setup_hdr_offset = offsetof(struct boot_params, hdr); unsigned int efi_map_offset, efi_map_sz, efi_setup_data_offset; struct kexec_buf kbuf = { .image = image, .buf_max = ULONG_MAX, .top_down = true }; struct kexec_buf pbuf = { .image = image, .buf_min = MIN_PURGATORY_ADDR, .buf_max = ULONG_MAX, .top_down = true }; header = (struct setup_header *)(kernel + setup_hdr_offset); setup_sects = header->setup_sects; if (setup_sects == 0) setup_sects = 4; kern16_size = (setup_sects + 1) * 512; if (kernel_len < kern16_size) { pr_err("bzImage truncated\n"); return ERR_PTR(-ENOEXEC); } if (cmdline_len > header->cmdline_size) { pr_err("Kernel command line too long\n"); return ERR_PTR(-EINVAL); } /* * In case of crash dump, we will append elfcorehdr=<addr> to * command line. Make sure it does not overflow */ if (cmdline_len + MAX_ELFCOREHDR_STR_LEN > header->cmdline_size) { pr_debug("Appending elfcorehdr=<addr> to command line exceeds maximum allowed length\n"); return ERR_PTR(-EINVAL); } /* Allocate and load backup region */ if (image->type == KEXEC_TYPE_CRASH) { ret = crash_load_segments(image); if (ret) return ERR_PTR(ret); } /* * Load purgatory. For 64bit entry point, purgatory code can be * anywhere. */ ret = kexec_load_purgatory(image, &pbuf); if (ret) { pr_err("Loading purgatory failed\n"); return ERR_PTR(ret); } pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem); /* * Load Bootparams and cmdline and space for efi stuff. * * Allocate memory together for multiple data structures so * that they all can go in single area/segment and we don't * have to create separate segment for each. Keeps things * little bit simple */ efi_map_sz = efi_get_runtime_map_size(); params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + MAX_ELFCOREHDR_STR_LEN; params_cmdline_sz = ALIGN(params_cmdline_sz, 16); kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) + sizeof(struct setup_data) + sizeof(struct efi_setup_data) + sizeof(struct setup_data) + RNG_SEED_LENGTH; if (IS_ENABLED(CONFIG_IMA_KEXEC)) kbuf.bufsz += sizeof(struct setup_data) + sizeof(struct ima_setup_data); params = kzalloc(kbuf.bufsz, GFP_KERNEL); if (!params) return ERR_PTR(-ENOMEM); efi_map_offset = params_cmdline_sz; efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16); /* Copy setup header onto bootparams. Documentation/arch/x86/boot.rst */ setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; /* Is there a limit on setup header size? */ memcpy(&params->hdr, (kernel + setup_hdr_offset), setup_header_size); kbuf.buffer = params; kbuf.memsz = kbuf.bufsz; kbuf.buf_align = 16; kbuf.buf_min = MIN_BOOTPARAM_ADDR; ret = kexec_add_buffer(&kbuf); if (ret) goto out_free_params; bootparam_load_addr = kbuf.mem; pr_debug("Loaded boot_param, command line and misc at 0x%lx bufsz=0x%lx memsz=0x%lx\n", bootparam_load_addr, kbuf.bufsz, kbuf.bufsz); /* Load kernel */ kbuf.buffer = kernel + kern16_size; kbuf.bufsz = kernel_len - kern16_size; kbuf.memsz = PAGE_ALIGN(header->init_size); kbuf.buf_align = header->kernel_alignment; kbuf.buf_min = MIN_KERNEL_LOAD_ADDR; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ret = kexec_add_buffer(&kbuf); if (ret) goto out_free_params; kernel_load_addr = kbuf.mem; pr_debug("Loaded 64bit kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", kernel_load_addr, kbuf.bufsz, kbuf.memsz); /* Load initrd high */ if (initrd) { kbuf.buffer = initrd; kbuf.bufsz = kbuf.memsz = initrd_len; kbuf.buf_align = PAGE_SIZE; kbuf.buf_min = MIN_INITRD_LOAD_ADDR; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ret = kexec_add_buffer(&kbuf); if (ret) goto out_free_params; initrd_load_addr = kbuf.mem; pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n", initrd_load_addr, initrd_len, initrd_len); setup_initrd(params, initrd_load_addr, initrd_len); } setup_cmdline(image, params, bootparam_load_addr, sizeof(struct boot_params), cmdline, cmdline_len); /* bootloader info. Do we need a separate ID for kexec kernel loader? */ params->hdr.type_of_loader = 0x0D << 4; params->hdr.loadflags = 0; /* Setup purgatory regs for entry */ ret = kexec_purgatory_get_set_symbol(image, "entry64_regs", &regs64, sizeof(regs64), 1); if (ret) goto out_free_params; regs64.rbx = 0; /* Bootstrap Processor */ regs64.rsi = bootparam_load_addr; regs64.rip = kernel_load_addr + 0x200; stack = kexec_purgatory_get_symbol_addr(image, "stack_end"); if (IS_ERR(stack)) { pr_err("Could not find address of symbol stack_end\n"); ret = -EINVAL; goto out_free_params; } regs64.rsp = (unsigned long)stack; ret = kexec_purgatory_get_set_symbol(image, "entry64_regs", &regs64, sizeof(regs64), 0); if (ret) goto out_free_params; ret = setup_boot_parameters(image, params, bootparam_load_addr, efi_map_offset, efi_map_sz, efi_setup_data_offset); if (ret) goto out_free_params; /* Allocate loader specific data */ ldata = kzalloc(sizeof(struct bzimage64_data), GFP_KERNEL); if (!ldata) { ret = -ENOMEM; goto out_free_params; } /* * Store pointer to params so that it could be freed after loading * params segment has been loaded and contents have been copied * somewhere else. */ ldata->bootparams_buf = params; return ldata; out_free_params: kfree(params); return ERR_PTR(ret); } /* This cleanup function is called after various segments have been loaded */ static int bzImage64_cleanup(void *loader_data) { struct bzimage64_data *ldata = loader_data; if (!ldata) return 0; kfree(ldata->bootparams_buf); ldata->bootparams_buf = NULL; return 0; } const struct kexec_file_ops kexec_bzImage64_ops = { .probe = bzImage64_probe, .load = bzImage64_load, .cleanup = bzImage64_cleanup, #ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG .verify_sig = kexec_kernel_verify_pe_sig, #endif };
linux-master
arch/x86/kernel/kexec-bzimage64.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1994 Linus Torvalds * * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 * stack - Manfred Spraul <[email protected]> * * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle * them correctly. Now the emulation will be in a * consistent state after stackfaults - Kasper Dupont * <[email protected]> * * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont * <[email protected]> * * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault * caused by Kasper Dupont's changes - Stas Sergeev * * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. * Kasper Dupont <[email protected]> * * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. * Kasper Dupont <[email protected]> * * 9 apr 2002 - Changed stack access macros to jump to a label * instead of returning to userspace. This simplifies * do_int, and is needed by handle_vm6_fault. Kasper * Dupont <[email protected]> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/capability.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/syscalls.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/highmem.h> #include <linux/ptrace.h> #include <linux/audit.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/tlbflush.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/vm86.h> #include <asm/switch_to.h> /* * Known problems: * * Interrupt handling is not guaranteed: * - a real x86 will disable all interrupts for one instruction * after a "mov ss,xx" to make stack handling atomic even without * the 'lss' instruction. We can't guarantee this in v86 mode, * as the next instruction might result in a page fault or similar. * - a real x86 will have interrupts disabled for one instruction * past the 'sti' that enables them. We don't bother with all the * details yet. * * Let's hope these problems do not actually matter for anything. */ /* * 8- and 16-bit register defines.. */ #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0]) #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1]) #define IP(regs) (*(unsigned short *)&((regs)->pt.ip)) #define SP(regs) (*(unsigned short *)&((regs)->pt.sp)) /* * virtual flags (16 and 32-bit versions) */ #define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags)) #define VEFLAGS (current->thread.vm86->veflags) #define set_flags(X, new, mask) \ ((X) = ((X) & ~(mask)) | ((new) & (mask))) #define SAFE_MASK (0xDD5) #define RETURN_MASK (0xDFF) void save_v86_state(struct kernel_vm86_regs *regs, int retval) { struct task_struct *tsk = current; struct vm86plus_struct __user *user; struct vm86 *vm86 = current->thread.vm86; /* * This gets called from entry.S with interrupts disabled, but * from process context. Enable interrupts here, before trying * to access user space. */ local_irq_enable(); BUG_ON(!vm86); set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask); user = vm86->user_vm86; if (!user_access_begin(user, vm86->vm86plus.is_vm86pus ? sizeof(struct vm86plus_struct) : sizeof(struct vm86_struct))) goto Efault; unsafe_put_user(regs->pt.bx, &user->regs.ebx, Efault_end); unsafe_put_user(regs->pt.cx, &user->regs.ecx, Efault_end); unsafe_put_user(regs->pt.dx, &user->regs.edx, Efault_end); unsafe_put_user(regs->pt.si, &user->regs.esi, Efault_end); unsafe_put_user(regs->pt.di, &user->regs.edi, Efault_end); unsafe_put_user(regs->pt.bp, &user->regs.ebp, Efault_end); unsafe_put_user(regs->pt.ax, &user->regs.eax, Efault_end); unsafe_put_user(regs->pt.ip, &user->regs.eip, Efault_end); unsafe_put_user(regs->pt.cs, &user->regs.cs, Efault_end); unsafe_put_user(regs->pt.flags, &user->regs.eflags, Efault_end); unsafe_put_user(regs->pt.sp, &user->regs.esp, Efault_end); unsafe_put_user(regs->pt.ss, &user->regs.ss, Efault_end); unsafe_put_user(regs->es, &user->regs.es, Efault_end); unsafe_put_user(regs->ds, &user->regs.ds, Efault_end); unsafe_put_user(regs->fs, &user->regs.fs, Efault_end); unsafe_put_user(regs->gs, &user->regs.gs, Efault_end); /* * Don't write screen_bitmap in case some user had a value there * and expected it to remain unchanged. */ user_access_end(); exit_vm86: preempt_disable(); tsk->thread.sp0 = vm86->saved_sp0; tsk->thread.sysenter_cs = __KERNEL_CS; update_task_stack(tsk); refresh_sysenter_cs(&tsk->thread); vm86->saved_sp0 = 0; preempt_enable(); memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs)); loadsegment(gs, vm86->regs32.gs); regs->pt.ax = retval; return; Efault_end: user_access_end(); Efault: pr_alert("could not access userspace vm86 info\n"); force_exit_sig(SIGSEGV); goto exit_vm86; } static int do_vm86_irq_handling(int subfunction, int irqnumber); static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus); SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86) { return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false); } SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) { switch (cmd) { case VM86_REQUEST_IRQ: case VM86_FREE_IRQ: case VM86_GET_IRQ_BITS: case VM86_GET_AND_RESET_IRQ: return do_vm86_irq_handling(cmd, (int)arg); case VM86_PLUS_INSTALL_CHECK: /* * NOTE: on old vm86 stuff this will return the error * from access_ok(), because the subfunction is * interpreted as (invalid) address to vm86_struct. * So the installation check works. */ return 0; } /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ return do_sys_vm86((struct vm86plus_struct __user *) arg, true); } static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) { struct task_struct *tsk = current; struct vm86 *vm86 = tsk->thread.vm86; struct kernel_vm86_regs vm86regs; struct pt_regs *regs = current_pt_regs(); unsigned long err = 0; struct vm86_struct v; err = security_mmap_addr(0); if (err) { /* * vm86 cannot virtualize the address space, so vm86 users * need to manage the low 1MB themselves using mmap. Given * that BIOS places important data in the first page, vm86 * is essentially useless if mmap_min_addr != 0. DOSEMU, * for example, won't even bother trying to use vm86 if it * can't map a page at virtual address 0. * * To reduce the available kernel attack surface, simply * disallow vm86(old) for users who cannot mmap at va 0. * * The implementation of security_mmap_addr will allow * suitably privileged users to map va 0 even if * vm.mmap_min_addr is set above 0, and we want this * behavior for vm86 as well, as it ensures that legacy * tools like vbetool will not fail just because of * vm.mmap_min_addr. */ pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n", current->comm, task_pid_nr(current), from_kuid_munged(&init_user_ns, current_uid())); return -EPERM; } if (!vm86) { if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) return -ENOMEM; tsk->thread.vm86 = vm86; } if (vm86->saved_sp0) return -EPERM; if (copy_from_user(&v, user_vm86, offsetof(struct vm86_struct, int_revectored))) return -EFAULT; /* VM86_SCREEN_BITMAP had numerous bugs and appears to have no users. */ if (v.flags & VM86_SCREEN_BITMAP) { char comm[TASK_COMM_LEN]; pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n", get_task_comm(comm, current)); return -EINVAL; } memset(&vm86regs, 0, sizeof(vm86regs)); vm86regs.pt.bx = v.regs.ebx; vm86regs.pt.cx = v.regs.ecx; vm86regs.pt.dx = v.regs.edx; vm86regs.pt.si = v.regs.esi; vm86regs.pt.di = v.regs.edi; vm86regs.pt.bp = v.regs.ebp; vm86regs.pt.ax = v.regs.eax; vm86regs.pt.ip = v.regs.eip; vm86regs.pt.cs = v.regs.cs; vm86regs.pt.flags = v.regs.eflags; vm86regs.pt.sp = v.regs.esp; vm86regs.pt.ss = v.regs.ss; vm86regs.es = v.regs.es; vm86regs.ds = v.regs.ds; vm86regs.fs = v.regs.fs; vm86regs.gs = v.regs.gs; vm86->flags = v.flags; vm86->cpu_type = v.cpu_type; if (copy_from_user(&vm86->int_revectored, &user_vm86->int_revectored, sizeof(struct revectored_struct))) return -EFAULT; if (copy_from_user(&vm86->int21_revectored, &user_vm86->int21_revectored, sizeof(struct revectored_struct))) return -EFAULT; if (plus) { if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus, sizeof(struct vm86plus_info_struct))) return -EFAULT; vm86->vm86plus.is_vm86pus = 1; } else memset(&vm86->vm86plus, 0, sizeof(struct vm86plus_info_struct)); memcpy(&vm86->regs32, regs, sizeof(struct pt_regs)); vm86->user_vm86 = user_vm86; /* * The flags register is also special: we cannot trust that the user * has set it up safely, so this makes sure interrupt etc flags are * inherited from protected mode. */ VEFLAGS = vm86regs.pt.flags; vm86regs.pt.flags &= SAFE_MASK; vm86regs.pt.flags |= regs->flags & ~SAFE_MASK; vm86regs.pt.flags |= X86_VM_MASK; vm86regs.pt.orig_ax = regs->orig_ax; switch (vm86->cpu_type) { case CPU_286: vm86->veflags_mask = 0; break; case CPU_386: vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; case CPU_486: vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; default: vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; } /* * Save old state */ vm86->saved_sp0 = tsk->thread.sp0; savesegment(gs, vm86->regs32.gs); /* make room for real-mode segments */ preempt_disable(); tsk->thread.sp0 += 16; if (boot_cpu_has(X86_FEATURE_SEP)) { tsk->thread.sysenter_cs = 0; refresh_sysenter_cs(&tsk->thread); } update_task_stack(tsk); preempt_enable(); memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs)); return regs->ax; } static inline void set_IF(struct kernel_vm86_regs *regs) { VEFLAGS |= X86_EFLAGS_VIF; } static inline void clear_IF(struct kernel_vm86_regs *regs) { VEFLAGS &= ~X86_EFLAGS_VIF; } static inline void clear_TF(struct kernel_vm86_regs *regs) { regs->pt.flags &= ~X86_EFLAGS_TF; } static inline void clear_AC(struct kernel_vm86_regs *regs) { regs->pt.flags &= ~X86_EFLAGS_AC; } /* * It is correct to call set_IF(regs) from the set_vflags_* * functions. However someone forgot to call clear_IF(regs) * in the opposite case. * After the command sequence CLI PUSHF STI POPF you should * end up with interrupts disabled, but you ended up with * interrupts enabled. * ( I was testing my own changes, but the only bug I * could find was in a function I had not changed. ) * [KD] */ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) { set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask); set_flags(regs->pt.flags, flags, SAFE_MASK); if (flags & X86_EFLAGS_IF) set_IF(regs); else clear_IF(regs); } static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) { set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask); set_flags(regs->pt.flags, flags, SAFE_MASK); if (flags & X86_EFLAGS_IF) set_IF(regs); else clear_IF(regs); } static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) { unsigned long flags = regs->pt.flags & RETURN_MASK; if (VEFLAGS & X86_EFLAGS_VIF) flags |= X86_EFLAGS_IF; flags |= X86_EFLAGS_IOPL; return flags | (VEFLAGS & current->thread.vm86->veflags_mask); } static inline int is_revectored(int nr, struct revectored_struct *bitmap) { return test_bit(nr, bitmap->__map); } #define val_byte(val, n) (((__u8 *)&val)[n]) #define pushb(base, ptr, val, err_label) \ do { \ __u8 __val = val; \ ptr--; \ if (put_user(__val, base + ptr) < 0) \ goto err_label; \ } while (0) #define pushw(base, ptr, val, err_label) \ do { \ __u16 __val = val; \ ptr--; \ if (put_user(val_byte(__val, 1), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ } while (0) #define pushl(base, ptr, val, err_label) \ do { \ __u32 __val = val; \ ptr--; \ if (put_user(val_byte(__val, 3), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 2), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 1), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ } while (0) #define popb(base, ptr, err_label) \ ({ \ __u8 __res; \ if (get_user(__res, base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ }) #define popw(base, ptr, err_label) \ ({ \ __u16 __res; \ if (get_user(val_byte(__res, 0), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 1), base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ }) #define popl(base, ptr, err_label) \ ({ \ __u32 __res; \ if (get_user(val_byte(__res, 0), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 1), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 2), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 3), base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ }) /* There are so many possible reasons for this function to return * VM86_INTx, so adding another doesn't bother me. We can expect * userspace programs to be able to handle it. (Getting a problem * in userspace is always better than an Oops anyway.) [KD] */ static void do_int(struct kernel_vm86_regs *regs, int i, unsigned char __user *ssp, unsigned short sp) { unsigned long __user *intr_ptr; unsigned long segoffs; struct vm86 *vm86 = current->thread.vm86; if (regs->pt.cs == BIOSSEG) goto cannot_handle; if (is_revectored(i, &vm86->int_revectored)) goto cannot_handle; if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored)) goto cannot_handle; intr_ptr = (unsigned long __user *) (i << 2); if (get_user(segoffs, intr_ptr)) goto cannot_handle; if ((segoffs >> 16) == BIOSSEG) goto cannot_handle; pushw(ssp, sp, get_vflags(regs), cannot_handle); pushw(ssp, sp, regs->pt.cs, cannot_handle); pushw(ssp, sp, IP(regs), cannot_handle); regs->pt.cs = segoffs >> 16; SP(regs) -= 6; IP(regs) = segoffs & 0xffff; clear_TF(regs); clear_IF(regs); clear_AC(regs); return; cannot_handle: save_v86_state(regs, VM86_INTx + (i << 8)); } int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) { struct vm86 *vm86 = current->thread.vm86; if (vm86->vm86plus.is_vm86pus) { if ((trapno == 3) || (trapno == 1)) { save_v86_state(regs, VM86_TRAP + (trapno << 8)); return 0; } do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); return 0; } if (trapno != 1) return 1; /* we let this handle by the calling routine */ current->thread.trap_nr = trapno; current->thread.error_code = error_code; force_sig(SIGTRAP); return 0; } void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) { unsigned char opcode; unsigned char __user *csp; unsigned char __user *ssp; unsigned short ip, sp, orig_flags; int data32, pref_done; struct vm86plus_info_struct *vmpi = &current->thread.vm86->vm86plus; #define CHECK_IF_IN_TRAP \ if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \ newflags |= X86_EFLAGS_TF orig_flags = *(unsigned short *)&regs->pt.flags; csp = (unsigned char __user *) (regs->pt.cs << 4); ssp = (unsigned char __user *) (regs->pt.ss << 4); sp = SP(regs); ip = IP(regs); data32 = 0; pref_done = 0; do { switch (opcode = popb(csp, ip, simulate_sigsegv)) { case 0x66: /* 32-bit data */ data32 = 1; break; case 0x67: /* 32-bit address */ break; case 0x2e: /* CS */ break; case 0x3e: /* DS */ break; case 0x26: /* ES */ break; case 0x36: /* SS */ break; case 0x65: /* GS */ break; case 0x64: /* FS */ break; case 0xf2: /* repnz */ break; case 0xf3: /* rep */ break; default: pref_done = 1; } } while (!pref_done); switch (opcode) { /* pushf */ case 0x9c: if (data32) { pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); SP(regs) -= 4; } else { pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); SP(regs) -= 2; } IP(regs) = ip; goto vm86_fault_return; /* popf */ case 0x9d: { unsigned long newflags; if (data32) { newflags = popl(ssp, sp, simulate_sigsegv); SP(regs) += 4; } else { newflags = popw(ssp, sp, simulate_sigsegv); SP(regs) += 2; } IP(regs) = ip; CHECK_IF_IN_TRAP; if (data32) set_vflags_long(newflags, regs); else set_vflags_short(newflags, regs); goto check_vip; } /* int xx */ case 0xcd: { int intno = popb(csp, ip, simulate_sigsegv); IP(regs) = ip; if (vmpi->vm86dbg_active) { if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) { save_v86_state(regs, VM86_INTx + (intno << 8)); return; } } do_int(regs, intno, ssp, sp); return; } /* iret */ case 0xcf: { unsigned long newip; unsigned long newcs; unsigned long newflags; if (data32) { newip = popl(ssp, sp, simulate_sigsegv); newcs = popl(ssp, sp, simulate_sigsegv); newflags = popl(ssp, sp, simulate_sigsegv); SP(regs) += 12; } else { newip = popw(ssp, sp, simulate_sigsegv); newcs = popw(ssp, sp, simulate_sigsegv); newflags = popw(ssp, sp, simulate_sigsegv); SP(regs) += 6; } IP(regs) = newip; regs->pt.cs = newcs; CHECK_IF_IN_TRAP; if (data32) { set_vflags_long(newflags, regs); } else { set_vflags_short(newflags, regs); } goto check_vip; } /* cli */ case 0xfa: IP(regs) = ip; clear_IF(regs); goto vm86_fault_return; /* sti */ /* * Damn. This is incorrect: the 'sti' instruction should actually * enable interrupts after the /next/ instruction. Not good. * * Probably needs some horsing around with the TF flag. Aiee.. */ case 0xfb: IP(regs) = ip; set_IF(regs); goto check_vip; default: save_v86_state(regs, VM86_UNKNOWN); } return; check_vip: if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) == (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) { save_v86_state(regs, VM86_STI); return; } vm86_fault_return: if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) { save_v86_state(regs, VM86_PICRETURN); return; } if (orig_flags & X86_EFLAGS_TF) handle_vm86_trap(regs, 0, X86_TRAP_DB); return; simulate_sigsegv: /* FIXME: After a long discussion with Stas we finally * agreed, that this is wrong. Here we should * really send a SIGSEGV to the user program. * But how do we create the correct context? We * are inside a general protection fault handler * and has just returned from a page fault handler. * The correct context for the signal handler * should be a mixture of the two, but how do we * get the information? [KD] */ save_v86_state(regs, VM86_UNKNOWN); } /* ---------------- vm86 special IRQ passing stuff ----------------- */ #define VM86_IRQNAME "vm86irq" static struct vm86_irqs { struct task_struct *tsk; int sig; } vm86_irqs[16]; static DEFINE_SPINLOCK(irqbits_lock); static int irqbits; #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \ | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | (1 << SIGUNUSED)) static irqreturn_t irq_handler(int intno, void *dev_id) { int irq_bit; unsigned long flags; spin_lock_irqsave(&irqbits_lock, flags); irq_bit = 1 << intno; if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) goto out; irqbits |= irq_bit; if (vm86_irqs[intno].sig) send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); /* * IRQ will be re-enabled when user asks for the irq (whether * polling or as a result of the signal) */ disable_irq_nosync(intno); spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_HANDLED; out: spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_NONE; } static inline void free_vm86_irq(int irqnumber) { unsigned long flags; free_irq(irqnumber, NULL); vm86_irqs[irqnumber].tsk = NULL; spin_lock_irqsave(&irqbits_lock, flags); irqbits &= ~(1 << irqnumber); spin_unlock_irqrestore(&irqbits_lock, flags); } void release_vm86_irqs(struct task_struct *task) { int i; for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) if (vm86_irqs[i].tsk == task) free_vm86_irq(i); } static inline int get_and_reset_irq(int irqnumber) { int bit; unsigned long flags; int ret = 0; if (invalid_vm86_irq(irqnumber)) return 0; if (vm86_irqs[irqnumber].tsk != current) return 0; spin_lock_irqsave(&irqbits_lock, flags); bit = irqbits & (1 << irqnumber); irqbits &= ~bit; if (bit) { enable_irq(irqnumber); ret = 1; } spin_unlock_irqrestore(&irqbits_lock, flags); return ret; } static int do_vm86_irq_handling(int subfunction, int irqnumber) { int ret; switch (subfunction) { case VM86_GET_AND_RESET_IRQ: { return get_and_reset_irq(irqnumber); } case VM86_GET_IRQ_BITS: { return irqbits; } case VM86_REQUEST_IRQ: { int sig = irqnumber >> 8; int irq = irqnumber & 255; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; if (invalid_vm86_irq(irq)) return -EPERM; if (vm86_irqs[irq].tsk) return -EPERM; ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); if (ret) return ret; vm86_irqs[irq].sig = sig; vm86_irqs[irq].tsk = current; return irq; } case VM86_FREE_IRQ: { if (invalid_vm86_irq(irqnumber)) return -EPERM; if (!vm86_irqs[irqnumber].tsk) return 0; if (vm86_irqs[irqnumber].tsk != current) return -EPERM; free_vm86_irq(irqnumber); return 0; } } return -EINVAL; }
linux-master
arch/x86/kernel/vm86_32.c
// SPDX-License-Identifier: GPL-2.0-or-later /* */ /* * Copyright (C) 2004 Amit S. Kale <[email protected]> * Copyright (C) 2000-2001 VERITAS Software Corporation. * Copyright (C) 2002 Andi Kleen, SuSE Labs * Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd. * Copyright (C) 2007 MontaVista Software, Inc. * Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc. */ /**************************************************************************** * Contributor: Lake Stevens Instrument Division$ * Written by: Glenn Engel $ * Updated by: Amit Kale<[email protected]> * Updated by: Tom Rini <[email protected]> * Updated by: Jason Wessel <[email protected]> * Modified for 386 by Jim Kingdon, Cygnus Support. * Original kgdb, compatibility with 2.1.xx kernel by * David Grothe <[email protected]> * Integrated into 2.2.5 kernel by Tigran Aivazian <[email protected]> * X86_64 changes from Andi Kleen's patch merged by Jim Houston */ #include <linux/spinlock.h> #include <linux/kdebug.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/kgdb.h> #include <linux/smp.h> #include <linux/nmi.h> #include <linux/hw_breakpoint.h> #include <linux/uaccess.h> #include <linux/memory.h> #include <asm/text-patching.h> #include <asm/debugreg.h> #include <asm/apicdef.h> #include <asm/apic.h> #include <asm/nmi.h> #include <asm/switch_to.h> struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { #ifdef CONFIG_X86_32 { "ax", 4, offsetof(struct pt_regs, ax) }, { "cx", 4, offsetof(struct pt_regs, cx) }, { "dx", 4, offsetof(struct pt_regs, dx) }, { "bx", 4, offsetof(struct pt_regs, bx) }, { "sp", 4, offsetof(struct pt_regs, sp) }, { "bp", 4, offsetof(struct pt_regs, bp) }, { "si", 4, offsetof(struct pt_regs, si) }, { "di", 4, offsetof(struct pt_regs, di) }, { "ip", 4, offsetof(struct pt_regs, ip) }, { "flags", 4, offsetof(struct pt_regs, flags) }, { "cs", 4, offsetof(struct pt_regs, cs) }, { "ss", 4, offsetof(struct pt_regs, ss) }, { "ds", 4, offsetof(struct pt_regs, ds) }, { "es", 4, offsetof(struct pt_regs, es) }, #else { "ax", 8, offsetof(struct pt_regs, ax) }, { "bx", 8, offsetof(struct pt_regs, bx) }, { "cx", 8, offsetof(struct pt_regs, cx) }, { "dx", 8, offsetof(struct pt_regs, dx) }, { "si", 8, offsetof(struct pt_regs, si) }, { "di", 8, offsetof(struct pt_regs, di) }, { "bp", 8, offsetof(struct pt_regs, bp) }, { "sp", 8, offsetof(struct pt_regs, sp) }, { "r8", 8, offsetof(struct pt_regs, r8) }, { "r9", 8, offsetof(struct pt_regs, r9) }, { "r10", 8, offsetof(struct pt_regs, r10) }, { "r11", 8, offsetof(struct pt_regs, r11) }, { "r12", 8, offsetof(struct pt_regs, r12) }, { "r13", 8, offsetof(struct pt_regs, r13) }, { "r14", 8, offsetof(struct pt_regs, r14) }, { "r15", 8, offsetof(struct pt_regs, r15) }, { "ip", 8, offsetof(struct pt_regs, ip) }, { "flags", 4, offsetof(struct pt_regs, flags) }, { "cs", 4, offsetof(struct pt_regs, cs) }, { "ss", 4, offsetof(struct pt_regs, ss) }, { "ds", 4, -1 }, { "es", 4, -1 }, #endif { "fs", 4, -1 }, { "gs", 4, -1 }, }; int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) { if ( #ifdef CONFIG_X86_32 regno == GDB_SS || regno == GDB_FS || regno == GDB_GS || #endif regno == GDB_SP || regno == GDB_ORIG_AX) return 0; if (dbg_reg_def[regno].offset != -1) memcpy((void *)regs + dbg_reg_def[regno].offset, mem, dbg_reg_def[regno].size); return 0; } char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) { if (regno == GDB_ORIG_AX) { memcpy(mem, &regs->orig_ax, sizeof(regs->orig_ax)); return "orig_ax"; } if (regno >= DBG_MAX_REG_NUM || regno < 0) return NULL; if (dbg_reg_def[regno].offset != -1) memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, dbg_reg_def[regno].size); #ifdef CONFIG_X86_32 switch (regno) { case GDB_GS: case GDB_FS: *(unsigned long *)mem = 0xFFFF; break; } #endif return dbg_reg_def[regno].name; } /** * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs * @gdb_regs: A pointer to hold the registers in the order GDB wants. * @p: The &struct task_struct of the desired process. * * Convert the register values of the sleeping process in @p to * the format that GDB expects. * This function is called when kgdb does not have access to the * &struct pt_regs and therefore it should fill the gdb registers * @gdb_regs with what has been saved in &struct thread_struct * thread field during switch_to. */ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) { #ifndef CONFIG_X86_32 u32 *gdb_regs32 = (u32 *)gdb_regs; #endif gdb_regs[GDB_AX] = 0; gdb_regs[GDB_BX] = 0; gdb_regs[GDB_CX] = 0; gdb_regs[GDB_DX] = 0; gdb_regs[GDB_SI] = 0; gdb_regs[GDB_DI] = 0; gdb_regs[GDB_BP] = ((struct inactive_task_frame *)p->thread.sp)->bp; #ifdef CONFIG_X86_32 gdb_regs[GDB_DS] = __KERNEL_DS; gdb_regs[GDB_ES] = __KERNEL_DS; gdb_regs[GDB_PS] = 0; gdb_regs[GDB_CS] = __KERNEL_CS; gdb_regs[GDB_SS] = __KERNEL_DS; gdb_regs[GDB_FS] = 0xFFFF; gdb_regs[GDB_GS] = 0xFFFF; #else gdb_regs32[GDB_PS] = 0; gdb_regs32[GDB_CS] = __KERNEL_CS; gdb_regs32[GDB_SS] = __KERNEL_DS; gdb_regs[GDB_R8] = 0; gdb_regs[GDB_R9] = 0; gdb_regs[GDB_R10] = 0; gdb_regs[GDB_R11] = 0; gdb_regs[GDB_R12] = 0; gdb_regs[GDB_R13] = 0; gdb_regs[GDB_R14] = 0; gdb_regs[GDB_R15] = 0; #endif gdb_regs[GDB_PC] = 0; gdb_regs[GDB_SP] = p->thread.sp; } static struct hw_breakpoint { unsigned enabled; unsigned long addr; int len; int type; struct perf_event * __percpu *pev; } breakinfo[HBP_NUM]; static unsigned long early_dr7; static void kgdb_correct_hw_break(void) { int breakno; for (breakno = 0; breakno < HBP_NUM; breakno++) { struct perf_event *bp; struct arch_hw_breakpoint *info; int val; int cpu = raw_smp_processor_id(); if (!breakinfo[breakno].enabled) continue; if (dbg_is_early) { set_debugreg(breakinfo[breakno].addr, breakno); early_dr7 |= encode_dr7(breakno, breakinfo[breakno].len, breakinfo[breakno].type); set_debugreg(early_dr7, 7); continue; } bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); info = counter_arch_bp(bp); if (bp->attr.disabled != 1) continue; bp->attr.bp_addr = breakinfo[breakno].addr; bp->attr.bp_len = breakinfo[breakno].len; bp->attr.bp_type = breakinfo[breakno].type; info->address = breakinfo[breakno].addr; info->len = breakinfo[breakno].len; info->type = breakinfo[breakno].type; val = arch_install_hw_breakpoint(bp); if (!val) bp->attr.disabled = 0; } if (!dbg_is_early) hw_breakpoint_restore(); } static int hw_break_reserve_slot(int breakno) { int cpu; int cnt = 0; struct perf_event **pevent; if (dbg_is_early) return 0; for_each_online_cpu(cpu) { cnt++; pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); if (dbg_reserve_bp_slot(*pevent)) goto fail; } return 0; fail: for_each_online_cpu(cpu) { cnt--; if (!cnt) break; pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); dbg_release_bp_slot(*pevent); } return -1; } static int hw_break_release_slot(int breakno) { struct perf_event **pevent; int cpu; if (dbg_is_early) return 0; for_each_online_cpu(cpu) { pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); if (dbg_release_bp_slot(*pevent)) /* * The debugger is responsible for handing the retry on * remove failure. */ return -1; } return 0; } static int kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) { int i; for (i = 0; i < HBP_NUM; i++) if (breakinfo[i].addr == addr && breakinfo[i].enabled) break; if (i == HBP_NUM) return -1; if (hw_break_release_slot(i)) { printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr); return -1; } breakinfo[i].enabled = 0; return 0; } static void kgdb_remove_all_hw_break(void) { int i; int cpu = raw_smp_processor_id(); struct perf_event *bp; for (i = 0; i < HBP_NUM; i++) { if (!breakinfo[i].enabled) continue; bp = *per_cpu_ptr(breakinfo[i].pev, cpu); if (!bp->attr.disabled) { arch_uninstall_hw_breakpoint(bp); bp->attr.disabled = 1; continue; } if (dbg_is_early) early_dr7 &= ~encode_dr7(i, breakinfo[i].len, breakinfo[i].type); else if (hw_break_release_slot(i)) printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n", breakinfo[i].addr); breakinfo[i].enabled = 0; } } static int kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) { int i; for (i = 0; i < HBP_NUM; i++) if (!breakinfo[i].enabled) break; if (i == HBP_NUM) return -1; switch (bptype) { case BP_HARDWARE_BREAKPOINT: len = 1; breakinfo[i].type = X86_BREAKPOINT_EXECUTE; break; case BP_WRITE_WATCHPOINT: breakinfo[i].type = X86_BREAKPOINT_WRITE; break; case BP_ACCESS_WATCHPOINT: breakinfo[i].type = X86_BREAKPOINT_RW; break; default: return -1; } switch (len) { case 1: breakinfo[i].len = X86_BREAKPOINT_LEN_1; break; case 2: breakinfo[i].len = X86_BREAKPOINT_LEN_2; break; case 4: breakinfo[i].len = X86_BREAKPOINT_LEN_4; break; #ifdef CONFIG_X86_64 case 8: breakinfo[i].len = X86_BREAKPOINT_LEN_8; break; #endif default: return -1; } breakinfo[i].addr = addr; if (hw_break_reserve_slot(i)) { breakinfo[i].addr = 0; return -1; } breakinfo[i].enabled = 1; return 0; } /** * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. * @regs: Current &struct pt_regs. * * This function will be called if the particular architecture must * disable hardware debugging while it is processing gdb packets or * handling exception. */ static void kgdb_disable_hw_debug(struct pt_regs *regs) { int i; int cpu = raw_smp_processor_id(); struct perf_event *bp; /* Disable hardware debugging while we are in kgdb: */ set_debugreg(0UL, 7); for (i = 0; i < HBP_NUM; i++) { if (!breakinfo[i].enabled) continue; if (dbg_is_early) { early_dr7 &= ~encode_dr7(i, breakinfo[i].len, breakinfo[i].type); continue; } bp = *per_cpu_ptr(breakinfo[i].pev, cpu); if (bp->attr.disabled == 1) continue; arch_uninstall_hw_breakpoint(bp); bp->attr.disabled = 1; } } #ifdef CONFIG_SMP /** * kgdb_roundup_cpus - Get other CPUs into a holding pattern * * On SMP systems, we need to get the attention of the other CPUs * and get them be in a known state. This should do what is needed * to get the other CPUs to call kgdb_wait(). Note that on some arches, * the NMI approach is not used for rounding up all the CPUs. For example, * in case of MIPS, smp_call_function() is used to roundup CPUs. * * On non-SMP systems, this is not called. */ void kgdb_roundup_cpus(void) { apic_send_IPI_allbutself(NMI_VECTOR); } #endif /** * kgdb_arch_handle_exception - Handle architecture specific GDB packets. * @e_vector: The error vector of the exception that happened. * @signo: The signal number of the exception that happened. * @err_code: The error code of the exception that happened. * @remcomInBuffer: The buffer of the packet we have read. * @remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into. * @linux_regs: The &struct pt_regs of the current process. * * This function MUST handle the 'c' and 's' command packets, * as well packets to set / remove a hardware breakpoint, if used. * If there are additional packets which the hardware needs to handle, * they are handled here. The code should return -1 if it wants to * process more packets, and a %0 or %1 if it wants to exit from the * kgdb callback. */ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, char *remcomInBuffer, char *remcomOutBuffer, struct pt_regs *linux_regs) { unsigned long addr; char *ptr; switch (remcomInBuffer[0]) { case 'c': case 's': /* try to read optional parameter, pc unchanged if no parm */ ptr = &remcomInBuffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->ip = addr; fallthrough; case 'D': case 'k': /* clear the trace bit */ linux_regs->flags &= ~X86_EFLAGS_TF; atomic_set(&kgdb_cpu_doing_single_step, -1); /* set the trace bit if we're stepping */ if (remcomInBuffer[0] == 's') { linux_regs->flags |= X86_EFLAGS_TF; atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id()); } return 0; } /* this means that we do not want to exit from the handler: */ return -1; } static inline int single_step_cont(struct pt_regs *regs, struct die_args *args) { /* * Single step exception from kernel space to user space so * eat the exception and continue the process: */ printk(KERN_ERR "KGDB: trap/step from kernel to user space, " "resuming...\n"); kgdb_arch_handle_exception(args->trapnr, args->signr, args->err, "c", "", regs); /* * Reset the BS bit in dr6 (pointed by args->err) to * denote completion of processing */ (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; return NOTIFY_STOP; } static DECLARE_BITMAP(was_in_debug_nmi, NR_CPUS); static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs) { int cpu; switch (cmd) { case NMI_LOCAL: if (atomic_read(&kgdb_active) != -1) { /* KGDB CPU roundup */ cpu = raw_smp_processor_id(); kgdb_nmicallback(cpu, regs); set_bit(cpu, was_in_debug_nmi); touch_nmi_watchdog(); return NMI_HANDLED; } break; case NMI_UNKNOWN: cpu = raw_smp_processor_id(); if (__test_and_clear_bit(cpu, was_in_debug_nmi)) return NMI_HANDLED; break; default: /* do nothing */ break; } return NMI_DONE; } static int __kgdb_notify(struct die_args *args, unsigned long cmd) { struct pt_regs *regs = args->regs; switch (cmd) { case DIE_DEBUG: if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { if (user_mode(regs)) return single_step_cont(regs, args); break; } else if (test_thread_flag(TIF_SINGLESTEP)) /* This means a user thread is single stepping * a system call which should be ignored */ return NOTIFY_DONE; fallthrough; default: if (user_mode(regs)) return NOTIFY_DONE; } if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs)) return NOTIFY_DONE; /* Must touch watchdog before return to normal operation */ touch_nmi_watchdog(); return NOTIFY_STOP; } int kgdb_ll_trap(int cmd, const char *str, struct pt_regs *regs, long err, int trap, int sig) { struct die_args args = { .regs = regs, .str = str, .err = err, .trapnr = trap, .signr = sig, }; if (!kgdb_io_module_registered) return NOTIFY_DONE; return __kgdb_notify(&args, cmd); } static int kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) { unsigned long flags; int ret; local_irq_save(flags); ret = __kgdb_notify(ptr, cmd); local_irq_restore(flags); return ret; } static struct notifier_block kgdb_notifier = { .notifier_call = kgdb_notify, }; /** * kgdb_arch_init - Perform any architecture specific initialization. * * This function will handle the initialization of any architecture * specific callbacks. */ int kgdb_arch_init(void) { int retval; retval = register_die_notifier(&kgdb_notifier); if (retval) goto out; retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler, 0, "kgdb"); if (retval) goto out1; retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler, 0, "kgdb"); if (retval) goto out2; return retval; out2: unregister_nmi_handler(NMI_LOCAL, "kgdb"); out1: unregister_die_notifier(&kgdb_notifier); out: return retval; } static void kgdb_hw_overflow_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { struct task_struct *tsk = current; int i; for (i = 0; i < 4; i++) { if (breakinfo[i].enabled) tsk->thread.virtual_dr6 |= (DR_TRAP0 << i); } } void kgdb_arch_late(void) { int i, cpu; struct perf_event_attr attr; struct perf_event **pevent; /* * Pre-allocate the hw breakpoint instructions in the non-atomic * portion of kgdb because this operation requires mutexs to * complete. */ hw_breakpoint_init(&attr); attr.bp_addr = (unsigned long)kgdb_arch_init; attr.bp_len = HW_BREAKPOINT_LEN_1; attr.bp_type = HW_BREAKPOINT_W; attr.disabled = 1; for (i = 0; i < HBP_NUM; i++) { if (breakinfo[i].pev) continue; breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL); if (IS_ERR((void * __force)breakinfo[i].pev)) { printk(KERN_ERR "kgdb: Could not allocate hw" "breakpoints\nDisabling the kernel debugger\n"); breakinfo[i].pev = NULL; kgdb_arch_exit(); return; } for_each_online_cpu(cpu) { pevent = per_cpu_ptr(breakinfo[i].pev, cpu); pevent[0]->hw.sample_period = 1; pevent[0]->overflow_handler = kgdb_hw_overflow_handler; if (pevent[0]->destroy != NULL) { pevent[0]->destroy = NULL; release_bp_slot(*pevent); } } } } /** * kgdb_arch_exit - Perform any architecture specific uninitalization. * * This function will handle the uninitalization of any architecture * specific callbacks, for dynamic registration and unregistration. */ void kgdb_arch_exit(void) { int i; for (i = 0; i < 4; i++) { if (breakinfo[i].pev) { unregister_wide_hw_breakpoint(breakinfo[i].pev); breakinfo[i].pev = NULL; } } unregister_nmi_handler(NMI_UNKNOWN, "kgdb"); unregister_nmi_handler(NMI_LOCAL, "kgdb"); unregister_die_notifier(&kgdb_notifier); } /** * * kgdb_skipexception - Bail out of KGDB when we've been triggered. * @exception: Exception vector number * @regs: Current &struct pt_regs. * * On some architectures we need to skip a breakpoint exception when * it occurs after a breakpoint has been removed. * * Skip an int3 exception when it occurs after a breakpoint has been * removed. Backtrack eip by 1 since the int3 would have caused it to * increment by 1. */ int kgdb_skipexception(int exception, struct pt_regs *regs) { if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) { regs->ip -= 1; return 1; } return 0; } unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) { if (exception == 3) return instruction_pointer(regs) - 1; return instruction_pointer(regs); } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->ip = ip; } int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; bpt->type = BP_BREAKPOINT; err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; err = copy_to_kernel_nofault((char *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); if (!err) return err; /* * It is safe to call text_poke_kgdb() because normal kernel execution * is stopped on all cores, so long as the text_mutex is not locked. */ if (mutex_is_locked(&text_mutex)) return -EBUSY; text_poke_kgdb((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); bpt->type = BP_POKE_BREAKPOINT; return 0; } int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { if (bpt->type != BP_POKE_BREAKPOINT) goto knl_write; /* * It is safe to call text_poke_kgdb() because normal kernel execution * is stopped on all cores, so long as the text_mutex is not locked. */ if (mutex_is_locked(&text_mutex)) goto knl_write; text_poke_kgdb((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE); return 0; knl_write: return copy_to_kernel_nofault((char *)bpt->bpt_addr, (char *)bpt->saved_instr, BREAK_INSTR_SIZE); } const struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: */ .gdb_bpt_instr = { 0xcc }, .flags = KGDB_HW_BREAKPOINT, .set_hw_breakpoint = kgdb_set_hw_break, .remove_hw_breakpoint = kgdb_remove_hw_break, .disable_hw_break = kgdb_disable_hw_debug, .remove_all_hw_break = kgdb_remove_all_hw_break, .correct_hw_break = kgdb_correct_hw_break, };
linux-master
arch/x86/kernel/kgdb.c
// SPDX-License-Identifier: GPL-2.0 /* * x86 single-step support code, common to 32-bit and 64-bit. */ #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <asm/desc.h> #include <asm/mmu_context.h> unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) { unsigned long addr, seg; addr = regs->ip; seg = regs->cs; if (v8086_mode(regs)) { addr = (addr & 0xffff) + (seg << 4); return addr; } #ifdef CONFIG_MODIFY_LDT_SYSCALL /* * We'll assume that the code segments in the GDT * are all zero-based. That is largely true: the * TLS segments are used for data, and the PNPBIOS * and APM bios ones we just ignore here. */ if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc; unsigned long base; seg >>= 3; mutex_lock(&child->mm->context.lock); if (unlikely(!child->mm->context.ldt || seg >= child->mm->context.ldt->nr_entries)) addr = -1L; /* bogus selector, access would fault */ else { desc = &child->mm->context.ldt->entries[seg]; base = get_desc_base(desc); /* 16-bit code segment? */ if (!desc->d) addr &= 0xffff; addr += base; } mutex_unlock(&child->mm->context.lock); } #endif return addr; } static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) { int i, copied; unsigned char opcode[15]; unsigned long addr = convert_ip_to_linear(child, regs); copied = access_process_vm(child, addr, opcode, sizeof(opcode), FOLL_FORCE); for (i = 0; i < copied; i++) { switch (opcode[i]) { /* popf and iret */ case 0x9d: case 0xcf: return 1; /* CHECKME: 64 65 */ /* opcode and address size prefixes */ case 0x66: case 0x67: continue; /* irrelevant prefixes (segment overrides and repeats) */ case 0x26: case 0x2e: case 0x36: case 0x3e: case 0x64: case 0x65: case 0xf0: case 0xf2: case 0xf3: continue; #ifdef CONFIG_X86_64 case 0x40 ... 0x4f: if (!user_64bit_mode(regs)) /* 32-bit mode: register increment */ return 0; /* 64-bit mode: REX prefix */ continue; #endif /* CHECKME: f2, f3 */ /* * pushf: NOTE! We should probably not let * the user see the TF bit being set. But * it's more pain than it's worth to avoid * it, and a debugger could emulate this * all in user space if it _really_ cares. */ case 0x9c: default: return 0; } } return 0; } /* * Enable single-stepping. Return nonzero if user mode is not using TF itself. */ static int enable_single_step(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); unsigned long oflags; /* * If we stepped into a sysenter/syscall insn, it trapped in * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. * If user-mode had set TF itself, then it's still clear from * do_debug() and we need to set it again to restore the user * state so we don't wrongly set TIF_FORCED_TF below. * If enable_single_step() was used last and that is what * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are * already set and our bookkeeping is fine. */ if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP))) regs->flags |= X86_EFLAGS_TF; /* * Always set TIF_SINGLESTEP. This will also * cause us to set TF when returning to user mode. */ set_tsk_thread_flag(child, TIF_SINGLESTEP); /* * Ensure that a trap is triggered once stepping out of a system * call prior to executing any user instruction. */ set_task_syscall_work(child, SYSCALL_EXIT_TRAP); oflags = regs->flags; /* Set TF on the kernel stack.. */ regs->flags |= X86_EFLAGS_TF; /* * ..but if TF is changed by the instruction we will trace, * don't mark it as being "us" that set it, so that we * won't clear it by hand later. * * Note that if we don't actually execute the popf because * of a signal arriving right now or suchlike, we will lose * track of the fact that it really was "us" that set it. */ if (is_setting_trap_flag(child, regs)) { clear_tsk_thread_flag(child, TIF_FORCED_TF); return 0; } /* * If TF was already set, check whether it was us who set it. * If not, we should never attempt a block step. */ if (oflags & X86_EFLAGS_TF) return test_tsk_thread_flag(child, TIF_FORCED_TF); set_tsk_thread_flag(child, TIF_FORCED_TF); return 1; } void set_task_blockstep(struct task_struct *task, bool on) { unsigned long debugctl; /* * Ensure irq/preemption can't change debugctl in between. * Note also that both TIF_BLOCKSTEP and debugctl should * be changed atomically wrt preemption. * * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if * task is current or it can't be running, otherwise we can race * with __switch_to_xtra(). We rely on ptrace_freeze_traced(). */ local_irq_disable(); debugctl = get_debugctlmsr(); if (on) { debugctl |= DEBUGCTLMSR_BTF; set_tsk_thread_flag(task, TIF_BLOCKSTEP); } else { debugctl &= ~DEBUGCTLMSR_BTF; clear_tsk_thread_flag(task, TIF_BLOCKSTEP); } if (task == current) update_debugctlmsr(debugctl); local_irq_enable(); } /* * Enable single or block step. */ static void enable_step(struct task_struct *child, bool block) { /* * Make sure block stepping (BTF) is not enabled unless it should be. * Note that we don't try to worry about any is_setting_trap_flag() * instructions after the first when using block stepping. * So no one should try to use debugger block stepping in a program * that uses user-mode single stepping itself. */ if (enable_single_step(child) && block) set_task_blockstep(child, true); else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) set_task_blockstep(child, false); } void user_enable_single_step(struct task_struct *child) { enable_step(child, 0); } void user_enable_block_step(struct task_struct *child) { enable_step(child, 1); } void user_disable_single_step(struct task_struct *child) { /* * Make sure block stepping (BTF) is disabled. */ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) set_task_blockstep(child, false); /* Always clear TIF_SINGLESTEP... */ clear_tsk_thread_flag(child, TIF_SINGLESTEP); clear_task_syscall_work(child, SYSCALL_EXIT_TRAP); /* But touch TF only if it was set by us.. */ if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) task_pt_regs(child)->flags &= ~X86_EFLAGS_TF; }
linux-master
arch/x86/kernel/step.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/perf_event.h> #include <linux/bug.h> #include <linux/stddef.h> #include <asm/perf_regs.h> #include <asm/ptrace.h> #ifdef CONFIG_X86_32 #define PERF_REG_X86_MAX PERF_REG_X86_32_MAX #else #define PERF_REG_X86_MAX PERF_REG_X86_64_MAX #endif #define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r) static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = { PT_REGS_OFFSET(PERF_REG_X86_AX, ax), PT_REGS_OFFSET(PERF_REG_X86_BX, bx), PT_REGS_OFFSET(PERF_REG_X86_CX, cx), PT_REGS_OFFSET(PERF_REG_X86_DX, dx), PT_REGS_OFFSET(PERF_REG_X86_SI, si), PT_REGS_OFFSET(PERF_REG_X86_DI, di), PT_REGS_OFFSET(PERF_REG_X86_BP, bp), PT_REGS_OFFSET(PERF_REG_X86_SP, sp), PT_REGS_OFFSET(PERF_REG_X86_IP, ip), PT_REGS_OFFSET(PERF_REG_X86_FLAGS, flags), PT_REGS_OFFSET(PERF_REG_X86_CS, cs), PT_REGS_OFFSET(PERF_REG_X86_SS, ss), #ifdef CONFIG_X86_32 PT_REGS_OFFSET(PERF_REG_X86_DS, ds), PT_REGS_OFFSET(PERF_REG_X86_ES, es), PT_REGS_OFFSET(PERF_REG_X86_FS, fs), PT_REGS_OFFSET(PERF_REG_X86_GS, gs), #else /* * The pt_regs struct does not store * ds, es, fs, gs in 64 bit mode. */ (unsigned int) -1, (unsigned int) -1, (unsigned int) -1, (unsigned int) -1, #endif #ifdef CONFIG_X86_64 PT_REGS_OFFSET(PERF_REG_X86_R8, r8), PT_REGS_OFFSET(PERF_REG_X86_R9, r9), PT_REGS_OFFSET(PERF_REG_X86_R10, r10), PT_REGS_OFFSET(PERF_REG_X86_R11, r11), PT_REGS_OFFSET(PERF_REG_X86_R12, r12), PT_REGS_OFFSET(PERF_REG_X86_R13, r13), PT_REGS_OFFSET(PERF_REG_X86_R14, r14), PT_REGS_OFFSET(PERF_REG_X86_R15, r15), #endif }; u64 perf_reg_value(struct pt_regs *regs, int idx) { struct x86_perf_regs *perf_regs; if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) { perf_regs = container_of(regs, struct x86_perf_regs, regs); if (!perf_regs->xmm_regs) return 0; return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0]; } if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset))) return 0; return regs_get_register(regs, pt_regs_offset[idx]); } #define PERF_REG_X86_RESERVED (((1ULL << PERF_REG_X86_XMM0) - 1) & \ ~((1ULL << PERF_REG_X86_MAX) - 1)) #ifdef CONFIG_X86_32 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \ (1ULL << PERF_REG_X86_R9) | \ (1ULL << PERF_REG_X86_R10) | \ (1ULL << PERF_REG_X86_R11) | \ (1ULL << PERF_REG_X86_R12) | \ (1ULL << PERF_REG_X86_R13) | \ (1ULL << PERF_REG_X86_R14) | \ (1ULL << PERF_REG_X86_R15)) int perf_reg_validate(u64 mask) { if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) return -EINVAL; return 0; } u64 perf_reg_abi(struct task_struct *task) { return PERF_SAMPLE_REGS_ABI_32; } void perf_get_regs_user(struct perf_regs *regs_user, struct pt_regs *regs) { regs_user->regs = task_pt_regs(current); regs_user->abi = perf_reg_abi(current); } #else /* CONFIG_X86_64 */ #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ (1ULL << PERF_REG_X86_ES) | \ (1ULL << PERF_REG_X86_FS) | \ (1ULL << PERF_REG_X86_GS)) int perf_reg_validate(u64 mask) { if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) return -EINVAL; return 0; } u64 perf_reg_abi(struct task_struct *task) { if (!user_64bit_mode(task_pt_regs(task))) return PERF_SAMPLE_REGS_ABI_32; else return PERF_SAMPLE_REGS_ABI_64; } static DEFINE_PER_CPU(struct pt_regs, nmi_user_regs); void perf_get_regs_user(struct perf_regs *regs_user, struct pt_regs *regs) { struct pt_regs *regs_user_copy = this_cpu_ptr(&nmi_user_regs); struct pt_regs *user_regs = task_pt_regs(current); if (!in_nmi()) { regs_user->regs = user_regs; regs_user->abi = perf_reg_abi(current); return; } /* * If we're in an NMI that interrupted task_pt_regs setup, then * we can't sample user regs at all. This check isn't really * sufficient, though, as we could be in an NMI inside an interrupt * that happened during task_pt_regs setup. */ if (regs->sp > (unsigned long)&user_regs->r11 && regs->sp <= (unsigned long)(user_regs + 1)) { regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; regs_user->regs = NULL; return; } /* * These registers are always saved on 64-bit syscall entry. * On 32-bit entry points, they are saved too except r8..r11. */ regs_user_copy->ip = user_regs->ip; regs_user_copy->ax = user_regs->ax; regs_user_copy->cx = user_regs->cx; regs_user_copy->dx = user_regs->dx; regs_user_copy->si = user_regs->si; regs_user_copy->di = user_regs->di; regs_user_copy->r8 = user_regs->r8; regs_user_copy->r9 = user_regs->r9; regs_user_copy->r10 = user_regs->r10; regs_user_copy->r11 = user_regs->r11; regs_user_copy->orig_ax = user_regs->orig_ax; regs_user_copy->flags = user_regs->flags; regs_user_copy->sp = user_regs->sp; regs_user_copy->cs = user_regs->cs; regs_user_copy->ss = user_regs->ss; /* * Store user space frame-pointer value on sample * to facilitate stack unwinding for cases when * user space executable code has such support * enabled at compile time: */ regs_user_copy->bp = user_regs->bp; regs_user_copy->bx = -1; regs_user_copy->r12 = -1; regs_user_copy->r13 = -1; regs_user_copy->r14 = -1; regs_user_copy->r15 = -1; /* * For this to be at all useful, we need a reasonable guess for * the ABI. Be careful: we're in NMI context, and we're * considering current to be the current task, so we should * be careful not to look at any other percpu variables that might * change during context switches. */ regs_user->abi = user_64bit_mode(user_regs) ? PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32; regs_user->regs = regs_user_copy; } #endif /* CONFIG_X86_32 */
linux-master
arch/x86/kernel/perf_regs.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs * Copyright (C) 2011 Don Zickus Red Hat, Inc. * * Pentium III FXSR, SSE support * Gareth Hughes <[email protected]>, May 2000 */ /* * Handle hardware traps and faults. */ #include <linux/spinlock.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/sched/debug.h> #include <linux/nmi.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/hardirq.h> #include <linux/ratelimit.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/atomic.h> #include <linux/sched/clock.h> #include <asm/cpu_entry_area.h> #include <asm/traps.h> #include <asm/mach_traps.h> #include <asm/nmi.h> #include <asm/x86_init.h> #include <asm/reboot.h> #include <asm/cache.h> #include <asm/nospec-branch.h> #include <asm/sev.h> #define CREATE_TRACE_POINTS #include <trace/events/nmi.h> struct nmi_desc { raw_spinlock_t lock; struct list_head head; }; static struct nmi_desc nmi_desc[NMI_MAX] = { { .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock), .head = LIST_HEAD_INIT(nmi_desc[0].head), }, { .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock), .head = LIST_HEAD_INIT(nmi_desc[1].head), }, { .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock), .head = LIST_HEAD_INIT(nmi_desc[2].head), }, { .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock), .head = LIST_HEAD_INIT(nmi_desc[3].head), }, }; struct nmi_stats { unsigned int normal; unsigned int unknown; unsigned int external; unsigned int swallow; unsigned long recv_jiffies; unsigned long idt_seq; unsigned long idt_nmi_seq; unsigned long idt_ignored; atomic_long_t idt_calls; unsigned long idt_seq_snap; unsigned long idt_nmi_seq_snap; unsigned long idt_ignored_snap; long idt_calls_snap; }; static DEFINE_PER_CPU(struct nmi_stats, nmi_stats); static int ignore_nmis __read_mostly; int unknown_nmi_panic; /* * Prevent NMI reason port (0x61) being accessed simultaneously, can * only be used in NMI handler. */ static DEFINE_RAW_SPINLOCK(nmi_reason_lock); static int __init setup_unknown_nmi_panic(char *str) { unknown_nmi_panic = 1; return 1; } __setup("unknown_nmi_panic", setup_unknown_nmi_panic); #define nmi_to_desc(type) (&nmi_desc[type]) static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC; static int __init nmi_warning_debugfs(void) { debugfs_create_u64("nmi_longest_ns", 0644, arch_debugfs_dir, &nmi_longest_ns); return 0; } fs_initcall(nmi_warning_debugfs); static void nmi_check_duration(struct nmiaction *action, u64 duration) { int remainder_ns, decimal_msecs; if (duration < nmi_longest_ns || duration < action->max_duration) return; action->max_duration = duration; remainder_ns = do_div(duration, (1000 * 1000)); decimal_msecs = remainder_ns / 1000; printk_ratelimited(KERN_INFO "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", action->handler, duration, decimal_msecs); } static int nmi_handle(unsigned int type, struct pt_regs *regs) { struct nmi_desc *desc = nmi_to_desc(type); struct nmiaction *a; int handled=0; rcu_read_lock(); /* * NMIs are edge-triggered, which means if you have enough * of them concurrently, you can lose some because only one * can be latched at any given time. Walk the whole list * to handle those situations. */ list_for_each_entry_rcu(a, &desc->head, list) { int thishandled; u64 delta; delta = sched_clock(); thishandled = a->handler(type, regs); handled += thishandled; delta = sched_clock() - delta; trace_nmi_handler(a->handler, (int)delta, thishandled); nmi_check_duration(a, delta); } rcu_read_unlock(); /* return total number of NMI events handled */ return handled; } NOKPROBE_SYMBOL(nmi_handle); int __register_nmi_handler(unsigned int type, struct nmiaction *action) { struct nmi_desc *desc = nmi_to_desc(type); unsigned long flags; if (WARN_ON_ONCE(!action->handler || !list_empty(&action->list))) return -EINVAL; raw_spin_lock_irqsave(&desc->lock, flags); /* * Indicate if there are multiple registrations on the * internal NMI handler call chains (SERR and IO_CHECK). */ WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); /* * some handlers need to be executed first otherwise a fake * event confuses some handlers (kdump uses this flag) */ if (action->flags & NMI_FLAG_FIRST) list_add_rcu(&action->list, &desc->head); else list_add_tail_rcu(&action->list, &desc->head); raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } EXPORT_SYMBOL(__register_nmi_handler); void unregister_nmi_handler(unsigned int type, const char *name) { struct nmi_desc *desc = nmi_to_desc(type); struct nmiaction *n, *found = NULL; unsigned long flags; raw_spin_lock_irqsave(&desc->lock, flags); list_for_each_entry_rcu(n, &desc->head, list) { /* * the name passed in to describe the nmi handler * is used as the lookup key */ if (!strcmp(n->name, name)) { WARN(in_nmi(), "Trying to free NMI (%s) from NMI context!\n", n->name); list_del_rcu(&n->list); found = n; break; } } raw_spin_unlock_irqrestore(&desc->lock, flags); if (found) { synchronize_rcu(); INIT_LIST_HEAD(&found->list); } } EXPORT_SYMBOL_GPL(unregister_nmi_handler); static void pci_serr_error(unsigned char reason, struct pt_regs *regs) { /* check to see if anyone registered against these types of errors */ if (nmi_handle(NMI_SERR, regs)) return; pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", reason, smp_processor_id()); if (panic_on_unrecovered_nmi) nmi_panic(regs, "NMI: Not continuing"); pr_emerg("Dazed and confused, but trying to continue\n"); /* Clear and disable the PCI SERR error line. */ reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; outb(reason, NMI_REASON_PORT); } NOKPROBE_SYMBOL(pci_serr_error); static void io_check_error(unsigned char reason, struct pt_regs *regs) { unsigned long i; /* check to see if anyone registered against these types of errors */ if (nmi_handle(NMI_IO_CHECK, regs)) return; pr_emerg( "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", reason, smp_processor_id()); show_regs(regs); if (panic_on_io_nmi) { nmi_panic(regs, "NMI IOCK error: Not continuing"); /* * If we end up here, it means we have received an NMI while * processing panic(). Simply return without delaying and * re-enabling NMIs. */ return; } /* Re-enable the IOCK line, wait for a few seconds */ reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; outb(reason, NMI_REASON_PORT); i = 20000; while (--i) { touch_nmi_watchdog(); udelay(100); } reason &= ~NMI_REASON_CLEAR_IOCHK; outb(reason, NMI_REASON_PORT); } NOKPROBE_SYMBOL(io_check_error); static void unknown_nmi_error(unsigned char reason, struct pt_regs *regs) { int handled; /* * Use 'false' as back-to-back NMIs are dealt with one level up. * Of course this makes having multiple 'unknown' handlers useless * as only the first one is ever run (unless it can actually determine * if it caused the NMI) */ handled = nmi_handle(NMI_UNKNOWN, regs); if (handled) { __this_cpu_add(nmi_stats.unknown, handled); return; } __this_cpu_add(nmi_stats.unknown, 1); pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", reason, smp_processor_id()); if (unknown_nmi_panic || panic_on_unrecovered_nmi) nmi_panic(regs, "NMI: Not continuing"); pr_emerg("Dazed and confused, but trying to continue\n"); } NOKPROBE_SYMBOL(unknown_nmi_error); static DEFINE_PER_CPU(bool, swallow_nmi); static DEFINE_PER_CPU(unsigned long, last_nmi_rip); static noinstr void default_do_nmi(struct pt_regs *regs) { unsigned char reason = 0; int handled; bool b2b = false; /* * CPU-specific NMI must be processed before non-CPU-specific * NMI, otherwise we may lose it, because the CPU-specific * NMI can not be detected/processed on other CPUs. */ /* * Back-to-back NMIs are interesting because they can either * be two NMI or more than two NMIs (any thing over two is dropped * due to NMI being edge-triggered). If this is the second half * of the back-to-back NMI, assume we dropped things and process * more handlers. Otherwise reset the 'swallow' NMI behaviour */ if (regs->ip == __this_cpu_read(last_nmi_rip)) b2b = true; else __this_cpu_write(swallow_nmi, false); __this_cpu_write(last_nmi_rip, regs->ip); instrumentation_begin(); handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); if (handled) { /* * There are cases when a NMI handler handles multiple * events in the current NMI. One of these events may * be queued for in the next NMI. Because the event is * already handled, the next NMI will result in an unknown * NMI. Instead lets flag this for a potential NMI to * swallow. */ if (handled > 1) __this_cpu_write(swallow_nmi, true); goto out; } /* * Non-CPU-specific NMI: NMI sources can be processed on any CPU. * * Another CPU may be processing panic routines while holding * nmi_reason_lock. Check if the CPU issued the IPI for crash dumping, * and if so, call its callback directly. If there is no CPU preparing * crash dump, we simply loop here. */ while (!raw_spin_trylock(&nmi_reason_lock)) { run_crash_ipi_callback(regs); cpu_relax(); } reason = x86_platform.get_nmi_reason(); if (reason & NMI_REASON_MASK) { if (reason & NMI_REASON_SERR) pci_serr_error(reason, regs); else if (reason & NMI_REASON_IOCHK) io_check_error(reason, regs); #ifdef CONFIG_X86_32 /* * Reassert NMI in case it became active * meanwhile as it's edge-triggered: */ reassert_nmi(); #endif __this_cpu_add(nmi_stats.external, 1); raw_spin_unlock(&nmi_reason_lock); goto out; } raw_spin_unlock(&nmi_reason_lock); /* * Only one NMI can be latched at a time. To handle * this we may process multiple nmi handlers at once to * cover the case where an NMI is dropped. The downside * to this approach is we may process an NMI prematurely, * while its real NMI is sitting latched. This will cause * an unknown NMI on the next run of the NMI processing. * * We tried to flag that condition above, by setting the * swallow_nmi flag when we process more than one event. * This condition is also only present on the second half * of a back-to-back NMI, so we flag that condition too. * * If both are true, we assume we already processed this * NMI previously and we swallow it. Otherwise we reset * the logic. * * There are scenarios where we may accidentally swallow * a 'real' unknown NMI. For example, while processing * a perf NMI another perf NMI comes in along with a * 'real' unknown NMI. These two NMIs get combined into * one (as described above). When the next NMI gets * processed, it will be flagged by perf as handled, but * no one will know that there was a 'real' unknown NMI sent * also. As a result it gets swallowed. Or if the first * perf NMI returns two events handled then the second * NMI will get eaten by the logic below, again losing a * 'real' unknown NMI. But this is the best we can do * for now. */ if (b2b && __this_cpu_read(swallow_nmi)) __this_cpu_add(nmi_stats.swallow, 1); else unknown_nmi_error(reason, regs); out: instrumentation_end(); } /* * NMIs can page fault or hit breakpoints which will cause it to lose * its NMI context with the CPU when the breakpoint or page fault does an IRET. * * As a result, NMIs can nest if NMIs get unmasked due an IRET during * NMI processing. On x86_64, the asm glue protects us from nested NMIs * if the outer NMI came from kernel mode, but we can still nest if the * outer NMI came from user mode. * * To handle these nested NMIs, we have three states: * * 1) not running * 2) executing * 3) latched * * When no NMI is in progress, it is in the "not running" state. * When an NMI comes in, it goes into the "executing" state. * Normally, if another NMI is triggered, it does not interrupt * the running NMI and the HW will simply latch it so that when * the first NMI finishes, it will restart the second NMI. * (Note, the latch is binary, thus multiple NMIs triggering, * when one is running, are ignored. Only one NMI is restarted.) * * If an NMI executes an iret, another NMI can preempt it. We do not * want to allow this new NMI to run, but we want to execute it when the * first one finishes. We set the state to "latched", and the exit of * the first NMI will perform a dec_return, if the result is zero * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the * dec_return would have set the state to NMI_EXECUTING (what we want it * to be when we are running). In this case, we simply jump back to * rerun the NMI handler again, and restart the 'latched' NMI. * * No trap (breakpoint or page fault) should be hit before nmi_restart, * thus there is no race between the first check of state for NOT_RUNNING * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs * at this point. * * In case the NMI takes a page fault, we need to save off the CR2 * because the NMI could have preempted another page fault and corrupt * the CR2 that is about to be read. As nested NMIs must be restarted * and they can not take breakpoints or page faults, the update of the * CR2 must be done before converting the nmi state back to NOT_RUNNING. * Otherwise, there would be a race of another nested NMI coming in * after setting state to NOT_RUNNING but before updating the nmi_cr2. */ enum nmi_states { NMI_NOT_RUNNING = 0, NMI_EXECUTING, NMI_LATCHED, }; static DEFINE_PER_CPU(enum nmi_states, nmi_state); static DEFINE_PER_CPU(unsigned long, nmi_cr2); static DEFINE_PER_CPU(unsigned long, nmi_dr7); DEFINE_IDTENTRY_RAW(exc_nmi) { irqentry_state_t irq_state; struct nmi_stats *nsp = this_cpu_ptr(&nmi_stats); /* * Re-enable NMIs right here when running as an SEV-ES guest. This might * cause nested NMIs, but those can be handled safely. */ sev_es_nmi_complete(); if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) raw_atomic_long_inc(&nsp->idt_calls); if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) return; if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { this_cpu_write(nmi_state, NMI_LATCHED); return; } this_cpu_write(nmi_state, NMI_EXECUTING); this_cpu_write(nmi_cr2, read_cr2()); if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) { WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1); WARN_ON_ONCE(!(nsp->idt_seq & 0x1)); WRITE_ONCE(nsp->recv_jiffies, jiffies); } nmi_restart: /* * Needs to happen before DR7 is accessed, because the hypervisor can * intercept DR7 reads/writes, turning those into #VC exceptions. */ sev_es_ist_enter(regs); this_cpu_write(nmi_dr7, local_db_save()); irq_state = irqentry_nmi_enter(regs); inc_irq_stat(__nmi_count); if (IS_ENABLED(CONFIG_NMI_CHECK_CPU) && ignore_nmis) { WRITE_ONCE(nsp->idt_ignored, nsp->idt_ignored + 1); } else if (!ignore_nmis) { if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) { WRITE_ONCE(nsp->idt_nmi_seq, nsp->idt_nmi_seq + 1); WARN_ON_ONCE(!(nsp->idt_nmi_seq & 0x1)); } default_do_nmi(regs); if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) { WRITE_ONCE(nsp->idt_nmi_seq, nsp->idt_nmi_seq + 1); WARN_ON_ONCE(nsp->idt_nmi_seq & 0x1); } } irqentry_nmi_exit(regs, irq_state); local_db_restore(this_cpu_read(nmi_dr7)); sev_es_ist_exit(); if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) write_cr2(this_cpu_read(nmi_cr2)); if (this_cpu_dec_return(nmi_state)) goto nmi_restart; if (user_mode(regs)) mds_user_clear_cpu_buffers(); if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) { WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1); WARN_ON_ONCE(nsp->idt_seq & 0x1); WRITE_ONCE(nsp->recv_jiffies, jiffies); } } #if IS_ENABLED(CONFIG_KVM_INTEL) DEFINE_IDTENTRY_RAW(exc_nmi_kvm_vmx) { exc_nmi(regs); } #if IS_MODULE(CONFIG_KVM_INTEL) EXPORT_SYMBOL_GPL(asm_exc_nmi_kvm_vmx); #endif #endif #ifdef CONFIG_NMI_CHECK_CPU static char *nmi_check_stall_msg[] = { /* */ /* +--------- nsp->idt_seq_snap & 0x1: CPU is in NMI handler. */ /* | +------ cpu_is_offline(cpu) */ /* | | +--- nsp->idt_calls_snap != atomic_long_read(&nsp->idt_calls): */ /* | | | NMI handler has been invoked. */ /* | | | */ /* V V V */ /* 0 0 0 */ "NMIs are not reaching exc_nmi() handler", /* 0 0 1 */ "exc_nmi() handler is ignoring NMIs", /* 0 1 0 */ "CPU is offline and NMIs are not reaching exc_nmi() handler", /* 0 1 1 */ "CPU is offline and exc_nmi() handler is legitimately ignoring NMIs", /* 1 0 0 */ "CPU is in exc_nmi() handler and no further NMIs are reaching handler", /* 1 0 1 */ "CPU is in exc_nmi() handler which is legitimately ignoring NMIs", /* 1 1 0 */ "CPU is offline in exc_nmi() handler and no more NMIs are reaching exc_nmi() handler", /* 1 1 1 */ "CPU is offline in exc_nmi() handler which is legitimately ignoring NMIs", }; void nmi_backtrace_stall_snap(const struct cpumask *btp) { int cpu; struct nmi_stats *nsp; for_each_cpu(cpu, btp) { nsp = per_cpu_ptr(&nmi_stats, cpu); nsp->idt_seq_snap = READ_ONCE(nsp->idt_seq); nsp->idt_nmi_seq_snap = READ_ONCE(nsp->idt_nmi_seq); nsp->idt_ignored_snap = READ_ONCE(nsp->idt_ignored); nsp->idt_calls_snap = atomic_long_read(&nsp->idt_calls); } } void nmi_backtrace_stall_check(const struct cpumask *btp) { int cpu; int idx; unsigned long nmi_seq; unsigned long j = jiffies; char *modp; char *msgp; char *msghp; struct nmi_stats *nsp; for_each_cpu(cpu, btp) { nsp = per_cpu_ptr(&nmi_stats, cpu); modp = ""; msghp = ""; nmi_seq = READ_ONCE(nsp->idt_nmi_seq); if (nsp->idt_nmi_seq_snap + 1 == nmi_seq && (nmi_seq & 0x1)) { msgp = "CPU entered NMI handler function, but has not exited"; } else if ((nsp->idt_nmi_seq_snap & 0x1) != (nmi_seq & 0x1)) { msgp = "CPU is handling NMIs"; } else { idx = ((nsp->idt_seq_snap & 0x1) << 2) | (cpu_is_offline(cpu) << 1) | (nsp->idt_calls_snap != atomic_long_read(&nsp->idt_calls)); msgp = nmi_check_stall_msg[idx]; if (nsp->idt_ignored_snap != READ_ONCE(nsp->idt_ignored) && (idx & 0x1)) modp = ", but OK because ignore_nmis was set"; if (nmi_seq & ~0x1) msghp = " (CPU currently in NMI handler function)"; else if (nsp->idt_nmi_seq_snap + 1 == nmi_seq) msghp = " (CPU exited one NMI handler function)"; } pr_alert("%s: CPU %d: %s%s%s, last activity: %lu jiffies ago.\n", __func__, cpu, msgp, modp, msghp, j - READ_ONCE(nsp->recv_jiffies)); } } #endif void stop_nmi(void) { ignore_nmis++; } void restart_nmi(void) { ignore_nmis--; } /* reset the back-to-back NMI logic */ void local_touch_nmi(void) { __this_cpu_write(last_nmi_rip, 0); } EXPORT_SYMBOL_GPL(local_touch_nmi);
linux-master
arch/x86/kernel/nmi.c
// SPDX-License-Identifier: GPL-2.0 /* * shstk.c - Intel shadow stack support * * Copyright (c) 2021, Intel Corporation. * Yu-cheng Yu <[email protected]> */ #include <linux/sched.h> #include <linux/bitops.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/sched/signal.h> #include <linux/compat.h> #include <linux/sizes.h> #include <linux/user.h> #include <linux/syscalls.h> #include <asm/msr.h> #include <asm/fpu/xstate.h> #include <asm/fpu/types.h> #include <asm/shstk.h> #include <asm/special_insns.h> #include <asm/fpu/api.h> #include <asm/prctl.h> #define SS_FRAME_SIZE 8 static bool features_enabled(unsigned long features) { return current->thread.features & features; } static void features_set(unsigned long features) { current->thread.features |= features; } static void features_clr(unsigned long features) { current->thread.features &= ~features; } /* * Create a restore token on the shadow stack. A token is always 8-byte * and aligned to 8. */ static int create_rstor_token(unsigned long ssp, unsigned long *token_addr) { unsigned long addr; /* Token must be aligned */ if (!IS_ALIGNED(ssp, 8)) return -EINVAL; addr = ssp - SS_FRAME_SIZE; /* * SSP is aligned, so reserved bits and mode bit are a zero, just mark * the token 64-bit. */ ssp |= BIT(0); if (write_user_shstk_64((u64 __user *)addr, (u64)ssp)) return -EFAULT; if (token_addr) *token_addr = addr; return 0; } /* * VM_SHADOW_STACK will have a guard page. This helps userspace protect * itself from attacks. The reasoning is as follows: * * The shadow stack pointer(SSP) is moved by CALL, RET, and INCSSPQ. The * INCSSP instruction can increment the shadow stack pointer. It is the * shadow stack analog of an instruction like: * * addq $0x80, %rsp * * However, there is one important difference between an ADD on %rsp * and INCSSP. In addition to modifying SSP, INCSSP also reads from the * memory of the first and last elements that were "popped". It can be * thought of as acting like this: * * READ_ONCE(ssp); // read+discard top element on stack * ssp += nr_to_pop * 8; // move the shadow stack * READ_ONCE(ssp-8); // read+discard last popped stack element * * The maximum distance INCSSP can move the SSP is 2040 bytes, before * it would read the memory. Therefore a single page gap will be enough * to prevent any operation from shifting the SSP to an adjacent stack, * since it would have to land in the gap at least once, causing a * fault. */ static unsigned long alloc_shstk(unsigned long addr, unsigned long size, unsigned long token_offset, bool set_res_tok) { int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_ABOVE4G; struct mm_struct *mm = current->mm; unsigned long mapped_addr, unused; if (addr) flags |= MAP_FIXED_NOREPLACE; mmap_write_lock(mm); mapped_addr = do_mmap(NULL, addr, size, PROT_READ, flags, VM_SHADOW_STACK | VM_WRITE, 0, &unused, NULL); mmap_write_unlock(mm); if (!set_res_tok || IS_ERR_VALUE(mapped_addr)) goto out; if (create_rstor_token(mapped_addr + token_offset, NULL)) { vm_munmap(mapped_addr, size); return -EINVAL; } out: return mapped_addr; } static unsigned long adjust_shstk_size(unsigned long size) { if (size) return PAGE_ALIGN(size); return PAGE_ALIGN(min_t(unsigned long long, rlimit(RLIMIT_STACK), SZ_4G)); } static void unmap_shadow_stack(u64 base, u64 size) { int r; r = vm_munmap(base, size); /* * mmap_write_lock_killable() failed with -EINTR. This means * the process is about to die and have it's MM cleaned up. * This task shouldn't ever make it back to userspace. In this * case it is ok to leak a shadow stack, so just exit out. */ if (r == -EINTR) return; /* * For all other types of vm_munmap() failure, either the * system is out of memory or there is bug. */ WARN_ON_ONCE(r); } static int shstk_setup(void) { struct thread_shstk *shstk = &current->thread.shstk; unsigned long addr, size; /* Already enabled */ if (features_enabled(ARCH_SHSTK_SHSTK)) return 0; /* Also not supported for 32 bit and x32 */ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) || in_32bit_syscall()) return -EOPNOTSUPP; size = adjust_shstk_size(0); addr = alloc_shstk(0, size, 0, false); if (IS_ERR_VALUE(addr)) return PTR_ERR((void *)addr); fpregs_lock_and_load(); wrmsrl(MSR_IA32_PL3_SSP, addr + size); wrmsrl(MSR_IA32_U_CET, CET_SHSTK_EN); fpregs_unlock(); shstk->base = addr; shstk->size = size; features_set(ARCH_SHSTK_SHSTK); return 0; } void reset_thread_features(void) { memset(&current->thread.shstk, 0, sizeof(struct thread_shstk)); current->thread.features = 0; current->thread.features_locked = 0; } unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long clone_flags, unsigned long stack_size) { struct thread_shstk *shstk = &tsk->thread.shstk; unsigned long addr, size; /* * If shadow stack is not enabled on the new thread, skip any * switch to a new shadow stack. */ if (!features_enabled(ARCH_SHSTK_SHSTK)) return 0; /* * For CLONE_VFORK the child will share the parents shadow stack. * Make sure to clear the internal tracking of the thread shadow * stack so the freeing logic run for child knows to leave it alone. */ if (clone_flags & CLONE_VFORK) { shstk->base = 0; shstk->size = 0; return 0; } /* * For !CLONE_VM the child will use a copy of the parents shadow * stack. */ if (!(clone_flags & CLONE_VM)) return 0; size = adjust_shstk_size(stack_size); addr = alloc_shstk(0, size, 0, false); if (IS_ERR_VALUE(addr)) return addr; shstk->base = addr; shstk->size = size; return addr + size; } static unsigned long get_user_shstk_addr(void) { unsigned long long ssp; fpregs_lock_and_load(); rdmsrl(MSR_IA32_PL3_SSP, ssp); fpregs_unlock(); return ssp; } #define SHSTK_DATA_BIT BIT(63) static int put_shstk_data(u64 __user *addr, u64 data) { if (WARN_ON_ONCE(data & SHSTK_DATA_BIT)) return -EINVAL; /* * Mark the high bit so that the sigframe can't be processed as a * return address. */ if (write_user_shstk_64(addr, data | SHSTK_DATA_BIT)) return -EFAULT; return 0; } static int get_shstk_data(unsigned long *data, unsigned long __user *addr) { unsigned long ldata; if (unlikely(get_user(ldata, addr))) return -EFAULT; if (!(ldata & SHSTK_DATA_BIT)) return -EINVAL; *data = ldata & ~SHSTK_DATA_BIT; return 0; } static int shstk_push_sigframe(unsigned long *ssp) { unsigned long target_ssp = *ssp; /* Token must be aligned */ if (!IS_ALIGNED(target_ssp, 8)) return -EINVAL; *ssp -= SS_FRAME_SIZE; if (put_shstk_data((void __user *)*ssp, target_ssp)) return -EFAULT; return 0; } static int shstk_pop_sigframe(unsigned long *ssp) { struct vm_area_struct *vma; unsigned long token_addr; bool need_to_check_vma; int err = 1; /* * It is possible for the SSP to be off the end of a shadow stack by 4 * or 8 bytes. If the shadow stack is at the start of a page or 4 bytes * before it, it might be this case, so check that the address being * read is actually shadow stack. */ if (!IS_ALIGNED(*ssp, 8)) return -EINVAL; need_to_check_vma = PAGE_ALIGN(*ssp) == *ssp; if (need_to_check_vma) mmap_read_lock_killable(current->mm); err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp); if (unlikely(err)) goto out_err; if (need_to_check_vma) { vma = find_vma(current->mm, *ssp); if (!vma || !(vma->vm_flags & VM_SHADOW_STACK)) { err = -EFAULT; goto out_err; } mmap_read_unlock(current->mm); } /* Restore SSP aligned? */ if (unlikely(!IS_ALIGNED(token_addr, 8))) return -EINVAL; /* SSP in userspace? */ if (unlikely(token_addr >= TASK_SIZE_MAX)) return -EINVAL; *ssp = token_addr; return 0; out_err: if (need_to_check_vma) mmap_read_unlock(current->mm); return err; } int setup_signal_shadow_stack(struct ksignal *ksig) { void __user *restorer = ksig->ka.sa.sa_restorer; unsigned long ssp; int err; if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) || !features_enabled(ARCH_SHSTK_SHSTK)) return 0; if (!restorer) return -EINVAL; ssp = get_user_shstk_addr(); if (unlikely(!ssp)) return -EINVAL; err = shstk_push_sigframe(&ssp); if (unlikely(err)) return err; /* Push restorer address */ ssp -= SS_FRAME_SIZE; err = write_user_shstk_64((u64 __user *)ssp, (u64)restorer); if (unlikely(err)) return -EFAULT; fpregs_lock_and_load(); wrmsrl(MSR_IA32_PL3_SSP, ssp); fpregs_unlock(); return 0; } int restore_signal_shadow_stack(void) { unsigned long ssp; int err; if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) || !features_enabled(ARCH_SHSTK_SHSTK)) return 0; ssp = get_user_shstk_addr(); if (unlikely(!ssp)) return -EINVAL; err = shstk_pop_sigframe(&ssp); if (unlikely(err)) return err; fpregs_lock_and_load(); wrmsrl(MSR_IA32_PL3_SSP, ssp); fpregs_unlock(); return 0; } void shstk_free(struct task_struct *tsk) { struct thread_shstk *shstk = &tsk->thread.shstk; if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) || !features_enabled(ARCH_SHSTK_SHSTK)) return; /* * When fork() with CLONE_VM fails, the child (tsk) already has a * shadow stack allocated, and exit_thread() calls this function to * free it. In this case the parent (current) and the child share * the same mm struct. */ if (!tsk->mm || tsk->mm != current->mm) return; /* * If shstk->base is NULL, then this task is not managing its * own shadow stack (CLONE_VFORK). So skip freeing it. */ if (!shstk->base) return; /* * shstk->base is NULL for CLONE_VFORK child tasks, and so is * normal. But size = 0 on a shstk->base is not normal and * indicated an attempt to free the thread shadow stack twice. * Warn about it. */ if (WARN_ON(!shstk->size)) return; unmap_shadow_stack(shstk->base, shstk->size); shstk->size = 0; } static int wrss_control(bool enable) { u64 msrval; if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK)) return -EOPNOTSUPP; /* * Only enable WRSS if shadow stack is enabled. If shadow stack is not * enabled, WRSS will already be disabled, so don't bother clearing it * when disabling. */ if (!features_enabled(ARCH_SHSTK_SHSTK)) return -EPERM; /* Already enabled/disabled? */ if (features_enabled(ARCH_SHSTK_WRSS) == enable) return 0; fpregs_lock_and_load(); rdmsrl(MSR_IA32_U_CET, msrval); if (enable) { features_set(ARCH_SHSTK_WRSS); msrval |= CET_WRSS_EN; } else { features_clr(ARCH_SHSTK_WRSS); if (!(msrval & CET_WRSS_EN)) goto unlock; msrval &= ~CET_WRSS_EN; } wrmsrl(MSR_IA32_U_CET, msrval); unlock: fpregs_unlock(); return 0; } static int shstk_disable(void) { if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK)) return -EOPNOTSUPP; /* Already disabled? */ if (!features_enabled(ARCH_SHSTK_SHSTK)) return 0; fpregs_lock_and_load(); /* Disable WRSS too when disabling shadow stack */ wrmsrl(MSR_IA32_U_CET, 0); wrmsrl(MSR_IA32_PL3_SSP, 0); fpregs_unlock(); shstk_free(current); features_clr(ARCH_SHSTK_SHSTK | ARCH_SHSTK_WRSS); return 0; } SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags) { bool set_tok = flags & SHADOW_STACK_SET_TOKEN; unsigned long aligned_size; if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK)) return -EOPNOTSUPP; if (flags & ~SHADOW_STACK_SET_TOKEN) return -EINVAL; /* If there isn't space for a token */ if (set_tok && size < 8) return -ENOSPC; if (addr && addr < SZ_4G) return -ERANGE; /* * An overflow would result in attempting to write the restore token * to the wrong location. Not catastrophic, but just return the right * error code and block it. */ aligned_size = PAGE_ALIGN(size); if (aligned_size < size) return -EOVERFLOW; return alloc_shstk(addr, aligned_size, size, set_tok); } long shstk_prctl(struct task_struct *task, int option, unsigned long arg2) { unsigned long features = arg2; if (option == ARCH_SHSTK_STATUS) { return put_user(task->thread.features, (unsigned long __user *)arg2); } if (option == ARCH_SHSTK_LOCK) { task->thread.features_locked |= features; return 0; } /* Only allow via ptrace */ if (task != current) { if (option == ARCH_SHSTK_UNLOCK && IS_ENABLED(CONFIG_CHECKPOINT_RESTORE)) { task->thread.features_locked &= ~features; return 0; } return -EINVAL; } /* Do not allow to change locked features */ if (features & task->thread.features_locked) return -EPERM; /* Only support enabling/disabling one feature at a time. */ if (hweight_long(features) > 1) return -EINVAL; if (option == ARCH_SHSTK_DISABLE) { if (features & ARCH_SHSTK_WRSS) return wrss_control(false); if (features & ARCH_SHSTK_SHSTK) return shstk_disable(); return -EINVAL; } /* Handle ARCH_SHSTK_ENABLE */ if (features & ARCH_SHSTK_SHSTK) return shstk_setup(); if (features & ARCH_SHSTK_WRSS) return wrss_control(true); return -EINVAL; }
linux-master
arch/x86/kernel/shstk.c
// SPDX-License-Identifier: GPL-2.0 /* * TSC frequency enumeration via MSR * * Copyright (C) 2013, 2018 Intel Corporation * Author: Bin Gao <[email protected]> */ #include <linux/kernel.h> #include <linux/thread_info.h> #include <asm/apic.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/msr.h> #include <asm/param.h> #include <asm/tsc.h> #define MAX_NUM_FREQS 16 /* 4 bits to select the frequency */ /* * The frequency numbers in the SDM are e.g. 83.3 MHz, which does not contain a * lot of accuracy which leads to clock drift. As far as we know Bay Trail SoCs * use a 25 MHz crystal and Cherry Trail uses a 19.2 MHz crystal, the crystal * is the source clk for a root PLL which outputs 1600 and 100 MHz. It is * unclear if the root PLL outputs are used directly by the CPU clock PLL or * if there is another PLL in between. * This does not matter though, we can model the chain of PLLs as a single PLL * with a quotient equal to the quotients of all PLLs in the chain multiplied. * So we can create a simplified model of the CPU clock setup using a reference * clock of 100 MHz plus a quotient which gets us as close to the frequency * from the SDM as possible. * For the 83.3 MHz example from above this would give us 100 MHz * 5 / 6 = * 83 and 1/3 MHz, which matches exactly what has been measured on actual hw. */ #define TSC_REFERENCE_KHZ 100000 struct muldiv { u32 multiplier; u32 divider; }; /* * If MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40]. * Unfortunately some Intel Atom SoCs aren't quite compliant to this, * so we need manually differentiate SoC families. This is what the * field use_msr_plat does. */ struct freq_desc { bool use_msr_plat; struct muldiv muldiv[MAX_NUM_FREQS]; /* * Some CPU frequencies in the SDM do not map to known PLL freqs, in * that case the muldiv array is empty and the freqs array is used. */ u32 freqs[MAX_NUM_FREQS]; u32 mask; }; /* * Penwell and Clovertrail use spread spectrum clock, * so the freq number is not exactly the same as reported * by MSR based on SDM. */ static const struct freq_desc freq_desc_pnw = { .use_msr_plat = false, .freqs = { 0, 0, 0, 0, 0, 99840, 0, 83200 }, .mask = 0x07, }; static const struct freq_desc freq_desc_clv = { .use_msr_plat = false, .freqs = { 0, 133200, 0, 0, 0, 99840, 0, 83200 }, .mask = 0x07, }; /* * Bay Trail SDM MSR_FSB_FREQ frequencies simplified PLL model: * 000: 100 * 5 / 6 = 83.3333 MHz * 001: 100 * 1 / 1 = 100.0000 MHz * 010: 100 * 4 / 3 = 133.3333 MHz * 011: 100 * 7 / 6 = 116.6667 MHz * 100: 100 * 4 / 5 = 80.0000 MHz */ static const struct freq_desc freq_desc_byt = { .use_msr_plat = true, .muldiv = { { 5, 6 }, { 1, 1 }, { 4, 3 }, { 7, 6 }, { 4, 5 } }, .mask = 0x07, }; /* * Cherry Trail SDM MSR_FSB_FREQ frequencies simplified PLL model: * 0000: 100 * 5 / 6 = 83.3333 MHz * 0001: 100 * 1 / 1 = 100.0000 MHz * 0010: 100 * 4 / 3 = 133.3333 MHz * 0011: 100 * 7 / 6 = 116.6667 MHz * 0100: 100 * 4 / 5 = 80.0000 MHz * 0101: 100 * 14 / 15 = 93.3333 MHz * 0110: 100 * 9 / 10 = 90.0000 MHz * 0111: 100 * 8 / 9 = 88.8889 MHz * 1000: 100 * 7 / 8 = 87.5000 MHz */ static const struct freq_desc freq_desc_cht = { .use_msr_plat = true, .muldiv = { { 5, 6 }, { 1, 1 }, { 4, 3 }, { 7, 6 }, { 4, 5 }, { 14, 15 }, { 9, 10 }, { 8, 9 }, { 7, 8 } }, .mask = 0x0f, }; /* * Merriefield SDM MSR_FSB_FREQ frequencies simplified PLL model: * 0001: 100 * 1 / 1 = 100.0000 MHz * 0010: 100 * 4 / 3 = 133.3333 MHz */ static const struct freq_desc freq_desc_tng = { .use_msr_plat = true, .muldiv = { { 0, 0 }, { 1, 1 }, { 4, 3 } }, .mask = 0x07, }; /* * Moorefield SDM MSR_FSB_FREQ frequencies simplified PLL model: * 0000: 100 * 5 / 6 = 83.3333 MHz * 0001: 100 * 1 / 1 = 100.0000 MHz * 0010: 100 * 4 / 3 = 133.3333 MHz * 0011: 100 * 1 / 1 = 100.0000 MHz */ static const struct freq_desc freq_desc_ann = { .use_msr_plat = true, .muldiv = { { 5, 6 }, { 1, 1 }, { 4, 3 }, { 1, 1 } }, .mask = 0x0f, }; /* * 24 MHz crystal? : 24 * 13 / 4 = 78 MHz * Frequency step for Lightning Mountain SoC is fixed to 78 MHz, * so all the frequency entries are 78000. */ static const struct freq_desc freq_desc_lgm = { .use_msr_plat = true, .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 }, .mask = 0x0f, }; static const struct x86_cpu_id tsc_msr_cpu_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, &freq_desc_pnw), X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_TABLET,&freq_desc_clv), X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &freq_desc_byt), X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &freq_desc_tng), X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &freq_desc_cht), X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_MID, &freq_desc_ann), X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_NP, &freq_desc_lgm), {} }; /* * MSR-based CPU/TSC frequency discovery for certain CPUs. * * Set global "lapic_timer_period" to bus_clock_cycles/jiffy * Return processor base frequency in KHz, or 0 on failure. */ unsigned long cpu_khz_from_msr(void) { u32 lo, hi, ratio, freq, tscref; const struct freq_desc *freq_desc; const struct x86_cpu_id *id; const struct muldiv *md; unsigned long res; int index; id = x86_match_cpu(tsc_msr_cpu_ids); if (!id) return 0; freq_desc = (struct freq_desc *)id->driver_data; if (freq_desc->use_msr_plat) { rdmsr(MSR_PLATFORM_INFO, lo, hi); ratio = (lo >> 8) & 0xff; } else { rdmsr(MSR_IA32_PERF_STATUS, lo, hi); ratio = (hi >> 8) & 0x1f; } /* Get FSB FREQ ID */ rdmsr(MSR_FSB_FREQ, lo, hi); index = lo & freq_desc->mask; md = &freq_desc->muldiv[index]; /* * Note this also catches cases where the index points to an unpopulated * part of muldiv, in that case the else will set freq and res to 0. */ if (md->divider) { tscref = TSC_REFERENCE_KHZ * md->multiplier; freq = DIV_ROUND_CLOSEST(tscref, md->divider); /* * Multiplying by ratio before the division has better * accuracy than just calculating freq * ratio. */ res = DIV_ROUND_CLOSEST(tscref * ratio, md->divider); } else { freq = freq_desc->freqs[index]; res = freq * ratio; } if (freq == 0) pr_err("Error MSR_FSB_FREQ index %d is unknown\n", index); #ifdef CONFIG_X86_LOCAL_APIC lapic_timer_period = (freq * 1000) / HZ; #endif /* * TSC frequency determined by MSR is always considered "known" * because it is reported by HW. * Another fact is that on MSR capable platforms, PIT/HPET is * generally not available so calibration won't work at all. */ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); /* * Unfortunately there is no way for hardware to tell whether the * TSC is reliable. We were told by silicon design team that TSC * on Atom SoCs are always "reliable". TSC is also the only * reliable clocksource on these SoCs (HPET is either not present * or not functional) so mark TSC reliable which removes the * requirement for a watchdog clocksource. */ setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); return res; }
linux-master
arch/x86/kernel/tsc_msr.c
// SPDX-License-Identifier: GPL-2.0-only /* * EISA specific code */ #include <linux/ioport.h> #include <linux/eisa.h> #include <linux/io.h> #include <xen/xen.h> static __init int eisa_bus_probe(void) { void __iomem *p; if (xen_pv_domain() && !xen_initial_domain()) return 0; p = ioremap(0x0FFFD9, 4); if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24)) EISA_bus = 1; iounmap(p); return 0; } subsys_initcall(eisa_bus_probe);
linux-master
arch/x86/kernel/eisa.c
// SPDX-License-Identifier: GPL-2.0-only /* * Architecture specific debugfs files * * Copyright (C) 2007, Intel Corp. * Huang Ying <[email protected]> */ #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/stat.h> #include <linux/io.h> #include <linux/mm.h> #include <asm/setup.h> struct dentry *arch_debugfs_dir; EXPORT_SYMBOL(arch_debugfs_dir); #ifdef CONFIG_DEBUG_BOOT_PARAMS struct setup_data_node { u64 paddr; u32 type; u32 len; }; static ssize_t setup_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct setup_data_node *node = file->private_data; unsigned long remain; loff_t pos = *ppos; void *p; u64 pa; if (pos < 0) return -EINVAL; if (pos >= node->len) return 0; if (count > node->len - pos) count = node->len - pos; pa = node->paddr + pos; /* Is it direct data or invalid indirect one? */ if (!(node->type & SETUP_INDIRECT) || node->type == SETUP_INDIRECT) pa += sizeof(struct setup_data); p = memremap(pa, count, MEMREMAP_WB); if (!p) return -ENOMEM; remain = copy_to_user(user_buf, p, count); memunmap(p); if (remain) return -EFAULT; *ppos = pos + count; return count; } static const struct file_operations fops_setup_data = { .read = setup_data_read, .open = simple_open, .llseek = default_llseek, }; static void __init create_setup_data_node(struct dentry *parent, int no, struct setup_data_node *node) { struct dentry *d; char buf[16]; sprintf(buf, "%d", no); d = debugfs_create_dir(buf, parent); debugfs_create_x32("type", S_IRUGO, d, &node->type); debugfs_create_file("data", S_IRUGO, d, node, &fops_setup_data); } static int __init create_setup_data_nodes(struct dentry *parent) { struct setup_indirect *indirect; struct setup_data_node *node; struct setup_data *data; u64 pa_data, pa_next; struct dentry *d; int error; u32 len; int no = 0; d = debugfs_create_dir("setup_data", parent); pa_data = boot_params.hdr.setup_data; while (pa_data) { node = kmalloc(sizeof(*node), GFP_KERNEL); if (!node) { error = -ENOMEM; goto err_dir; } data = memremap(pa_data, sizeof(*data), MEMREMAP_WB); if (!data) { kfree(node); error = -ENOMEM; goto err_dir; } pa_next = data->next; if (data->type == SETUP_INDIRECT) { len = sizeof(*data) + data->len; memunmap(data); data = memremap(pa_data, len, MEMREMAP_WB); if (!data) { kfree(node); error = -ENOMEM; goto err_dir; } indirect = (struct setup_indirect *)data->data; if (indirect->type != SETUP_INDIRECT) { node->paddr = indirect->addr; node->type = indirect->type; node->len = indirect->len; } else { node->paddr = pa_data; node->type = data->type; node->len = data->len; } } else { node->paddr = pa_data; node->type = data->type; node->len = data->len; } create_setup_data_node(d, no, node); pa_data = pa_next; memunmap(data); no++; } return 0; err_dir: debugfs_remove_recursive(d); return error; } static struct debugfs_blob_wrapper boot_params_blob = { .data = &boot_params, .size = sizeof(boot_params), }; static int __init boot_params_kdebugfs_init(void) { struct dentry *dbp; int error; dbp = debugfs_create_dir("boot_params", arch_debugfs_dir); debugfs_create_x16("version", S_IRUGO, dbp, &boot_params.hdr.version); debugfs_create_blob("data", S_IRUGO, dbp, &boot_params_blob); error = create_setup_data_nodes(dbp); if (error) debugfs_remove_recursive(dbp); return error; } #endif /* CONFIG_DEBUG_BOOT_PARAMS */ static int __init arch_kdebugfs_init(void) { int error = 0; arch_debugfs_dir = debugfs_create_dir("x86", NULL); #ifdef CONFIG_DEBUG_BOOT_PARAMS error = boot_params_kdebugfs_init(); #endif return error; } arch_initcall(arch_kdebugfs_init);
linux-master
arch/x86/kernel/kdebugfs.c
// SPDX-License-Identifier: GPL-2.0 /* * Split spinlock implementation out into its own file, so it can be * compiled in a FTRACE-compatible way. */ #include <linux/spinlock.h> #include <linux/export.h> #include <linux/jump_label.h> #include <asm/paravirt.h> __visible void __native_queued_spin_unlock(struct qspinlock *lock) { native_queued_spin_unlock(lock); } PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); bool pv_is_native_spin_unlock(void) { return pv_ops.lock.queued_spin_unlock.func == __raw_callee_save___native_queued_spin_unlock; } __visible bool __native_vcpu_is_preempted(long cpu) { return false; } PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted); bool pv_is_native_vcpu_is_preempted(void) { return pv_ops.lock.vcpu_is_preempted.func == __raw_callee_save___native_vcpu_is_preempted; } void __init paravirt_set_cap(void) { if (!pv_is_native_spin_unlock()) setup_force_cpu_cap(X86_FEATURE_PVUNLOCK); if (!pv_is_native_vcpu_is_preempted()) setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT); }
linux-master
arch/x86/kernel/paravirt-spinlocks.c
// SPDX-License-Identifier: GPL-2.0 /* * AMD Encrypted Register State Support * * Author: Joerg Roedel <[email protected]> * * This file is not compiled stand-alone. It contains code shared * between the pre-decompression boot code and the running Linux kernel * and is included directly into both code-bases. */ #ifndef __BOOT_COMPRESSED #define error(v) pr_err(v) #define has_cpuflag(f) boot_cpu_has(f) #else #undef WARN #define WARN(condition, format...) (!!(condition)) #endif /* I/O parameters for CPUID-related helpers */ struct cpuid_leaf { u32 fn; u32 subfn; u32 eax; u32 ebx; u32 ecx; u32 edx; }; /* * Individual entries of the SNP CPUID table, as defined by the SNP * Firmware ABI, Revision 0.9, Section 7.1, Table 14. */ struct snp_cpuid_fn { u32 eax_in; u32 ecx_in; u64 xcr0_in; u64 xss_in; u32 eax; u32 ebx; u32 ecx; u32 edx; u64 __reserved; } __packed; /* * SNP CPUID table, as defined by the SNP Firmware ABI, Revision 0.9, * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit * of 64 entries per CPUID table. */ #define SNP_CPUID_COUNT_MAX 64 struct snp_cpuid_table { u32 count; u32 __reserved1; u64 __reserved2; struct snp_cpuid_fn fn[SNP_CPUID_COUNT_MAX]; } __packed; /* * Since feature negotiation related variables are set early in the boot * process they must reside in the .data section so as not to be zeroed * out when the .bss section is later cleared. * * GHCB protocol version negotiated with the hypervisor. */ static u16 ghcb_version __ro_after_init; /* Copy of the SNP firmware's CPUID page. */ static struct snp_cpuid_table cpuid_table_copy __ro_after_init; /* * These will be initialized based on CPUID table so that non-present * all-zero leaves (for sparse tables) can be differentiated from * invalid/out-of-range leaves. This is needed since all-zero leaves * still need to be post-processed. */ static u32 cpuid_std_range_max __ro_after_init; static u32 cpuid_hyp_range_max __ro_after_init; static u32 cpuid_ext_range_max __ro_after_init; static bool __init sev_es_check_cpu_features(void) { if (!has_cpuflag(X86_FEATURE_RDRAND)) { error("RDRAND instruction not supported - no trusted source of randomness available\n"); return false; } return true; } static void __noreturn sev_es_terminate(unsigned int set, unsigned int reason) { u64 val = GHCB_MSR_TERM_REQ; /* Tell the hypervisor what went wrong. */ val |= GHCB_SEV_TERM_REASON(set, reason); /* Request Guest Termination from Hypvervisor */ sev_es_wr_ghcb_msr(val); VMGEXIT(); while (true) asm volatile("hlt\n" : : : "memory"); } /* * The hypervisor features are available from GHCB version 2 onward. */ static u64 get_hv_features(void) { u64 val; if (ghcb_version < 2) return 0; sev_es_wr_ghcb_msr(GHCB_MSR_HV_FT_REQ); VMGEXIT(); val = sev_es_rd_ghcb_msr(); if (GHCB_RESP_CODE(val) != GHCB_MSR_HV_FT_RESP) return 0; return GHCB_MSR_HV_FT_RESP_VAL(val); } static void snp_register_ghcb_early(unsigned long paddr) { unsigned long pfn = paddr >> PAGE_SHIFT; u64 val; sev_es_wr_ghcb_msr(GHCB_MSR_REG_GPA_REQ_VAL(pfn)); VMGEXIT(); val = sev_es_rd_ghcb_msr(); /* If the response GPA is not ours then abort the guest */ if ((GHCB_RESP_CODE(val) != GHCB_MSR_REG_GPA_RESP) || (GHCB_MSR_REG_GPA_RESP_VAL(val) != pfn)) sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_REGISTER); } static bool sev_es_negotiate_protocol(void) { u64 val; /* Do the GHCB protocol version negotiation */ sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ); VMGEXIT(); val = sev_es_rd_ghcb_msr(); if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP) return false; if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN || GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX) return false; ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val), GHCB_PROTOCOL_MAX); return true; } static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb) { ghcb->save.sw_exit_code = 0; __builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); } static bool vc_decoding_needed(unsigned long exit_code) { /* Exceptions don't require to decode the instruction */ return !(exit_code >= SVM_EXIT_EXCP_BASE && exit_code <= SVM_EXIT_LAST_EXCP); } static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt, struct pt_regs *regs, unsigned long exit_code) { enum es_result ret = ES_OK; memset(ctxt, 0, sizeof(*ctxt)); ctxt->regs = regs; if (vc_decoding_needed(exit_code)) ret = vc_decode_insn(ctxt); return ret; } static void vc_finish_insn(struct es_em_ctxt *ctxt) { ctxt->regs->ip += ctxt->insn.length; } static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { u32 ret; ret = ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0); if (!ret) return ES_OK; if (ret == 1) { u64 info = ghcb->save.sw_exit_info_2; unsigned long v = info & SVM_EVTINJ_VEC_MASK; /* Check if exception information from hypervisor is sane. */ if ((info & SVM_EVTINJ_VALID) && ((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) && ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) { ctxt->fi.vector = v; if (info & SVM_EVTINJ_VALID_ERR) ctxt->fi.error_code = info >> 32; return ES_EXCEPTION; } } return ES_VMM_ERROR; } static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, struct es_em_ctxt *ctxt, u64 exit_code, u64 exit_info_1, u64 exit_info_2) { /* Fill in protocol and format specifiers */ ghcb->protocol_version = ghcb_version; ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; ghcb_set_sw_exit_code(ghcb, exit_code); ghcb_set_sw_exit_info_1(ghcb, exit_info_1); ghcb_set_sw_exit_info_2(ghcb, exit_info_2); sev_es_wr_ghcb_msr(__pa(ghcb)); VMGEXIT(); return verify_exception_info(ghcb, ctxt); } static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg) { u64 val; sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, reg_idx)); VMGEXIT(); val = sev_es_rd_ghcb_msr(); if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP) return -EIO; *reg = (val >> 32); return 0; } static int sev_cpuid_hv(struct cpuid_leaf *leaf) { int ret; /* * MSR protocol does not support fetching non-zero subfunctions, but is * sufficient to handle current early-boot cases. Should that change, * make sure to report an error rather than ignoring the index and * grabbing random values. If this issue arises in the future, handling * can be added here to use GHCB-page protocol for cases that occur late * enough in boot that GHCB page is available. */ if (cpuid_function_is_indexed(leaf->fn) && leaf->subfn) return -EINVAL; ret = __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EAX, &leaf->eax); ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EBX, &leaf->ebx); ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_ECX, &leaf->ecx); ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EDX, &leaf->edx); return ret; } /* * This may be called early while still running on the initial identity * mapping. Use RIP-relative addressing to obtain the correct address * while running with the initial identity mapping as well as the * switch-over to kernel virtual addresses later. */ static const struct snp_cpuid_table *snp_cpuid_get_table(void) { void *ptr; asm ("lea cpuid_table_copy(%%rip), %0" : "=r" (ptr) : "p" (&cpuid_table_copy)); return ptr; } /* * The SNP Firmware ABI, Revision 0.9, Section 7.1, details the use of * XCR0_IN and XSS_IN to encode multiple versions of 0xD subfunctions 0 * and 1 based on the corresponding features enabled by a particular * combination of XCR0 and XSS registers so that a guest can look up the * version corresponding to the features currently enabled in its XCR0/XSS * registers. The only values that differ between these versions/table * entries is the enabled XSAVE area size advertised via EBX. * * While hypervisors may choose to make use of this support, it is more * robust/secure for a guest to simply find the entry corresponding to the * base/legacy XSAVE area size (XCR0=1 or XCR0=3), and then calculate the * XSAVE area size using subfunctions 2 through 64, as documented in APM * Volume 3, Rev 3.31, Appendix E.3.8, which is what is done here. * * Since base/legacy XSAVE area size is documented as 0x240, use that value * directly rather than relying on the base size in the CPUID table. * * Return: XSAVE area size on success, 0 otherwise. */ static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted) { const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); u64 xfeatures_found = 0; u32 xsave_size = 0x240; int i; for (i = 0; i < cpuid_table->count; i++) { const struct snp_cpuid_fn *e = &cpuid_table->fn[i]; if (!(e->eax_in == 0xD && e->ecx_in > 1 && e->ecx_in < 64)) continue; if (!(xfeatures_en & (BIT_ULL(e->ecx_in)))) continue; if (xfeatures_found & (BIT_ULL(e->ecx_in))) continue; xfeatures_found |= (BIT_ULL(e->ecx_in)); if (compacted) xsave_size += e->eax; else xsave_size = max(xsave_size, e->eax + e->ebx); } /* * Either the guest set unsupported XCR0/XSS bits, or the corresponding * entries in the CPUID table were not present. This is not a valid * state to be in. */ if (xfeatures_found != (xfeatures_en & GENMASK_ULL(63, 2))) return 0; return xsave_size; } static bool snp_cpuid_get_validated_func(struct cpuid_leaf *leaf) { const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); int i; for (i = 0; i < cpuid_table->count; i++) { const struct snp_cpuid_fn *e = &cpuid_table->fn[i]; if (e->eax_in != leaf->fn) continue; if (cpuid_function_is_indexed(leaf->fn) && e->ecx_in != leaf->subfn) continue; /* * For 0xD subfunctions 0 and 1, only use the entry corresponding * to the base/legacy XSAVE area size (XCR0=1 or XCR0=3, XSS=0). * See the comments above snp_cpuid_calc_xsave_size() for more * details. */ if (e->eax_in == 0xD && (e->ecx_in == 0 || e->ecx_in == 1)) if (!(e->xcr0_in == 1 || e->xcr0_in == 3) || e->xss_in) continue; leaf->eax = e->eax; leaf->ebx = e->ebx; leaf->ecx = e->ecx; leaf->edx = e->edx; return true; } return false; } static void snp_cpuid_hv(struct cpuid_leaf *leaf) { if (sev_cpuid_hv(leaf)) sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV); } static int snp_cpuid_postprocess(struct cpuid_leaf *leaf) { struct cpuid_leaf leaf_hv = *leaf; switch (leaf->fn) { case 0x1: snp_cpuid_hv(&leaf_hv); /* initial APIC ID */ leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0)); /* APIC enabled bit */ leaf->edx = (leaf_hv.edx & BIT(9)) | (leaf->edx & ~BIT(9)); /* OSXSAVE enabled bit */ if (native_read_cr4() & X86_CR4_OSXSAVE) leaf->ecx |= BIT(27); break; case 0x7: /* OSPKE enabled bit */ leaf->ecx &= ~BIT(4); if (native_read_cr4() & X86_CR4_PKE) leaf->ecx |= BIT(4); break; case 0xB: leaf_hv.subfn = 0; snp_cpuid_hv(&leaf_hv); /* extended APIC ID */ leaf->edx = leaf_hv.edx; break; case 0xD: { bool compacted = false; u64 xcr0 = 1, xss = 0; u32 xsave_size; if (leaf->subfn != 0 && leaf->subfn != 1) return 0; if (native_read_cr4() & X86_CR4_OSXSAVE) xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); if (leaf->subfn == 1) { /* Get XSS value if XSAVES is enabled. */ if (leaf->eax & BIT(3)) { unsigned long lo, hi; asm volatile("rdmsr" : "=a" (lo), "=d" (hi) : "c" (MSR_IA32_XSS)); xss = (hi << 32) | lo; } /* * The PPR and APM aren't clear on what size should be * encoded in 0xD:0x1:EBX when compaction is not enabled * by either XSAVEC (feature bit 1) or XSAVES (feature * bit 3) since SNP-capable hardware has these feature * bits fixed as 1. KVM sets it to 0 in this case, but * to avoid this becoming an issue it's safer to simply * treat this as unsupported for SNP guests. */ if (!(leaf->eax & (BIT(1) | BIT(3)))) return -EINVAL; compacted = true; } xsave_size = snp_cpuid_calc_xsave_size(xcr0 | xss, compacted); if (!xsave_size) return -EINVAL; leaf->ebx = xsave_size; } break; case 0x8000001E: snp_cpuid_hv(&leaf_hv); /* extended APIC ID */ leaf->eax = leaf_hv.eax; /* compute ID */ leaf->ebx = (leaf->ebx & GENMASK(31, 8)) | (leaf_hv.ebx & GENMASK(7, 0)); /* node ID */ leaf->ecx = (leaf->ecx & GENMASK(31, 8)) | (leaf_hv.ecx & GENMASK(7, 0)); break; default: /* No fix-ups needed, use values as-is. */ break; } return 0; } /* * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value * should be treated as fatal by caller. */ static int snp_cpuid(struct cpuid_leaf *leaf) { const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); if (!cpuid_table->count) return -EOPNOTSUPP; if (!snp_cpuid_get_validated_func(leaf)) { /* * Some hypervisors will avoid keeping track of CPUID entries * where all values are zero, since they can be handled the * same as out-of-range values (all-zero). This is useful here * as well as it allows virtually all guest configurations to * work using a single SNP CPUID table. * * To allow for this, there is a need to distinguish between * out-of-range entries and in-range zero entries, since the * CPUID table entries are only a template that may need to be * augmented with additional values for things like * CPU-specific information during post-processing. So if it's * not in the table, set the values to zero. Then, if they are * within a valid CPUID range, proceed with post-processing * using zeros as the initial values. Otherwise, skip * post-processing and just return zeros immediately. */ leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0; /* Skip post-processing for out-of-range zero leafs. */ if (!(leaf->fn <= cpuid_std_range_max || (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) || (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max))) return 0; } return snp_cpuid_postprocess(leaf); } /* * Boot VC Handler - This is the first VC handler during boot, there is no GHCB * page yet, so it only supports the MSR based communication with the * hypervisor and only the CPUID exit-code. */ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code) { unsigned int subfn = lower_bits(regs->cx, 32); unsigned int fn = lower_bits(regs->ax, 32); struct cpuid_leaf leaf; int ret; /* Only CPUID is supported via MSR protocol */ if (exit_code != SVM_EXIT_CPUID) goto fail; leaf.fn = fn; leaf.subfn = subfn; ret = snp_cpuid(&leaf); if (!ret) goto cpuid_done; if (ret != -EOPNOTSUPP) goto fail; if (sev_cpuid_hv(&leaf)) goto fail; cpuid_done: regs->ax = leaf.eax; regs->bx = leaf.ebx; regs->cx = leaf.ecx; regs->dx = leaf.edx; /* * This is a VC handler and the #VC is only raised when SEV-ES is * active, which means SEV must be active too. Do sanity checks on the * CPUID results to make sure the hypervisor does not trick the kernel * into the no-sev path. This could map sensitive data unencrypted and * make it accessible to the hypervisor. * * In particular, check for: * - Availability of CPUID leaf 0x8000001f * - SEV CPUID bit. * * The hypervisor might still report the wrong C-bit position, but this * can't be checked here. */ if (fn == 0x80000000 && (regs->ax < 0x8000001f)) /* SEV leaf check */ goto fail; else if ((fn == 0x8000001f && !(regs->ax & BIT(1)))) /* SEV bit */ goto fail; /* Skip over the CPUID two-byte opcode */ regs->ip += 2; return; fail: /* Terminate the guest */ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); } static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt, void *src, char *buf, unsigned int data_size, unsigned int count, bool backwards) { int i, b = backwards ? -1 : 1; enum es_result ret = ES_OK; for (i = 0; i < count; i++) { void *s = src + (i * data_size * b); char *d = buf + (i * data_size); ret = vc_read_mem(ctxt, s, d, data_size); if (ret != ES_OK) break; } return ret; } static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt, void *dst, char *buf, unsigned int data_size, unsigned int count, bool backwards) { int i, s = backwards ? -1 : 1; enum es_result ret = ES_OK; for (i = 0; i < count; i++) { void *d = dst + (i * data_size * s); char *b = buf + (i * data_size); ret = vc_write_mem(ctxt, d, b, data_size); if (ret != ES_OK) break; } return ret; } #define IOIO_TYPE_STR BIT(2) #define IOIO_TYPE_IN 1 #define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR) #define IOIO_TYPE_OUT 0 #define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR) #define IOIO_REP BIT(3) #define IOIO_ADDR_64 BIT(9) #define IOIO_ADDR_32 BIT(8) #define IOIO_ADDR_16 BIT(7) #define IOIO_DATA_32 BIT(6) #define IOIO_DATA_16 BIT(5) #define IOIO_DATA_8 BIT(4) #define IOIO_SEG_ES (0 << 10) #define IOIO_SEG_DS (3 << 10) static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo) { struct insn *insn = &ctxt->insn; *exitinfo = 0; switch (insn->opcode.bytes[0]) { /* INS opcodes */ case 0x6c: case 0x6d: *exitinfo |= IOIO_TYPE_INS; *exitinfo |= IOIO_SEG_ES; *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; break; /* OUTS opcodes */ case 0x6e: case 0x6f: *exitinfo |= IOIO_TYPE_OUTS; *exitinfo |= IOIO_SEG_DS; *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; break; /* IN immediate opcodes */ case 0xe4: case 0xe5: *exitinfo |= IOIO_TYPE_IN; *exitinfo |= (u8)insn->immediate.value << 16; break; /* OUT immediate opcodes */ case 0xe6: case 0xe7: *exitinfo |= IOIO_TYPE_OUT; *exitinfo |= (u8)insn->immediate.value << 16; break; /* IN register opcodes */ case 0xec: case 0xed: *exitinfo |= IOIO_TYPE_IN; *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; break; /* OUT register opcodes */ case 0xee: case 0xef: *exitinfo |= IOIO_TYPE_OUT; *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; break; default: return ES_DECODE_FAILED; } switch (insn->opcode.bytes[0]) { case 0x6c: case 0x6e: case 0xe4: case 0xe6: case 0xec: case 0xee: /* Single byte opcodes */ *exitinfo |= IOIO_DATA_8; break; default: /* Length determined by instruction parsing */ *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16 : IOIO_DATA_32; } switch (insn->addr_bytes) { case 2: *exitinfo |= IOIO_ADDR_16; break; case 4: *exitinfo |= IOIO_ADDR_32; break; case 8: *exitinfo |= IOIO_ADDR_64; break; } if (insn_has_rep_prefix(insn)) *exitinfo |= IOIO_REP; return ES_OK; } static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { struct pt_regs *regs = ctxt->regs; u64 exit_info_1, exit_info_2; enum es_result ret; ret = vc_ioio_exitinfo(ctxt, &exit_info_1); if (ret != ES_OK) return ret; if (exit_info_1 & IOIO_TYPE_STR) { /* (REP) INS/OUTS */ bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF); unsigned int io_bytes, exit_bytes; unsigned int ghcb_count, op_count; unsigned long es_base; u64 sw_scratch; /* * For the string variants with rep prefix the amount of in/out * operations per #VC exception is limited so that the kernel * has a chance to take interrupts and re-schedule while the * instruction is emulated. */ io_bytes = (exit_info_1 >> 4) & 0x7; ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes; op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1; exit_info_2 = min(op_count, ghcb_count); exit_bytes = exit_info_2 * io_bytes; es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); /* Read bytes of OUTS into the shared buffer */ if (!(exit_info_1 & IOIO_TYPE_IN)) { ret = vc_insn_string_read(ctxt, (void *)(es_base + regs->si), ghcb->shared_buffer, io_bytes, exit_info_2, df); if (ret) return ret; } /* * Issue an VMGEXIT to the HV to consume the bytes from the * shared buffer or to have it write them into the shared buffer * depending on the instruction: OUTS or INS. */ sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer); ghcb_set_sw_scratch(ghcb, sw_scratch); ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, exit_info_2); if (ret != ES_OK) return ret; /* Read bytes from shared buffer into the guest's destination. */ if (exit_info_1 & IOIO_TYPE_IN) { ret = vc_insn_string_write(ctxt, (void *)(es_base + regs->di), ghcb->shared_buffer, io_bytes, exit_info_2, df); if (ret) return ret; if (df) regs->di -= exit_bytes; else regs->di += exit_bytes; } else { if (df) regs->si -= exit_bytes; else regs->si += exit_bytes; } if (exit_info_1 & IOIO_REP) regs->cx -= exit_info_2; ret = regs->cx ? ES_RETRY : ES_OK; } else { /* IN/OUT into/from rAX */ int bits = (exit_info_1 & 0x70) >> 1; u64 rax = 0; if (!(exit_info_1 & IOIO_TYPE_IN)) rax = lower_bits(regs->ax, bits); ghcb_set_rax(ghcb, rax); ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0); if (ret != ES_OK) return ret; if (exit_info_1 & IOIO_TYPE_IN) { if (!ghcb_rax_is_valid(ghcb)) return ES_VMM_ERROR; regs->ax = lower_bits(ghcb->save.rax, bits); } } return ret; } static int vc_handle_cpuid_snp(struct pt_regs *regs) { struct cpuid_leaf leaf; int ret; leaf.fn = regs->ax; leaf.subfn = regs->cx; ret = snp_cpuid(&leaf); if (!ret) { regs->ax = leaf.eax; regs->bx = leaf.ebx; regs->cx = leaf.ecx; regs->dx = leaf.edx; } return ret; } static enum es_result vc_handle_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { struct pt_regs *regs = ctxt->regs; u32 cr4 = native_read_cr4(); enum es_result ret; int snp_cpuid_ret; snp_cpuid_ret = vc_handle_cpuid_snp(regs); if (!snp_cpuid_ret) return ES_OK; if (snp_cpuid_ret != -EOPNOTSUPP) return ES_VMM_ERROR; ghcb_set_rax(ghcb, regs->ax); ghcb_set_rcx(ghcb, regs->cx); if (cr4 & X86_CR4_OSXSAVE) /* Safe to read xcr0 */ ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK)); else /* xgetbv will cause #GP - use reset value for xcr0 */ ghcb_set_xcr0(ghcb, 1); ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); if (ret != ES_OK) return ret; if (!(ghcb_rax_is_valid(ghcb) && ghcb_rbx_is_valid(ghcb) && ghcb_rcx_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb))) return ES_VMM_ERROR; regs->ax = ghcb->save.rax; regs->bx = ghcb->save.rbx; regs->cx = ghcb->save.rcx; regs->dx = ghcb->save.rdx; return ES_OK; } static enum es_result vc_handle_rdtsc(struct ghcb *ghcb, struct es_em_ctxt *ctxt, unsigned long exit_code) { bool rdtscp = (exit_code == SVM_EXIT_RDTSCP); enum es_result ret; ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0); if (ret != ES_OK) return ret; if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) && (!rdtscp || ghcb_rcx_is_valid(ghcb)))) return ES_VMM_ERROR; ctxt->regs->ax = ghcb->save.rax; ctxt->regs->dx = ghcb->save.rdx; if (rdtscp) ctxt->regs->cx = ghcb->save.rcx; return ES_OK; } struct cc_setup_data { struct setup_data header; u32 cc_blob_address; }; /* * Search for a Confidential Computing blob passed in as a setup_data entry * via the Linux Boot Protocol. */ static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp) { struct cc_setup_data *sd = NULL; struct setup_data *hdr; hdr = (struct setup_data *)bp->hdr.setup_data; while (hdr) { if (hdr->type == SETUP_CC_BLOB) { sd = (struct cc_setup_data *)hdr; return (struct cc_blob_sev_info *)(unsigned long)sd->cc_blob_address; } hdr = (struct setup_data *)hdr->next; } return NULL; } /* * Initialize the kernel's copy of the SNP CPUID table, and set up the * pointer that will be used to access it. * * Maintaining a direct mapping of the SNP CPUID table used by firmware would * be possible as an alternative, but the approach is brittle since the * mapping needs to be updated in sync with all the changes to virtual memory * layout and related mapping facilities throughout the boot process. */ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info) { const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table; int i; if (!cc_info || !cc_info->cpuid_phys || cc_info->cpuid_len < PAGE_SIZE) sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID); cpuid_table_fw = (const struct snp_cpuid_table *)cc_info->cpuid_phys; if (!cpuid_table_fw->count || cpuid_table_fw->count > SNP_CPUID_COUNT_MAX) sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID); cpuid_table = snp_cpuid_get_table(); memcpy((void *)cpuid_table, cpuid_table_fw, sizeof(*cpuid_table)); /* Initialize CPUID ranges for range-checking. */ for (i = 0; i < cpuid_table->count; i++) { const struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; if (fn->eax_in == 0x0) cpuid_std_range_max = fn->eax; else if (fn->eax_in == 0x40000000) cpuid_hyp_range_max = fn->eax; else if (fn->eax_in == 0x80000000) cpuid_ext_range_max = fn->eax; } } static void pvalidate_pages(struct snp_psc_desc *desc) { struct psc_entry *e; unsigned long vaddr; unsigned int size; unsigned int i; bool validate; int rc; for (i = 0; i <= desc->hdr.end_entry; i++) { e = &desc->entries[i]; vaddr = (unsigned long)pfn_to_kaddr(e->gfn); size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; validate = e->operation == SNP_PAGE_STATE_PRIVATE; rc = pvalidate(vaddr, size, validate); if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) { unsigned long vaddr_end = vaddr + PMD_SIZE; for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) { rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate); if (rc) break; } } if (rc) { WARN(1, "Failed to validate address 0x%lx ret %d", vaddr, rc); sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); } } } static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc) { int cur_entry, end_entry, ret = 0; struct snp_psc_desc *data; struct es_em_ctxt ctxt; vc_ghcb_invalidate(ghcb); /* Copy the input desc into GHCB shared buffer */ data = (struct snp_psc_desc *)ghcb->shared_buffer; memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc))); /* * As per the GHCB specification, the hypervisor can resume the guest * before processing all the entries. Check whether all the entries * are processed. If not, then keep retrying. Note, the hypervisor * will update the data memory directly to indicate the status, so * reference the data->hdr everywhere. * * The strategy here is to wait for the hypervisor to change the page * state in the RMP table before guest accesses the memory pages. If the * page state change was not successful, then later memory access will * result in a crash. */ cur_entry = data->hdr.cur_entry; end_entry = data->hdr.end_entry; while (data->hdr.cur_entry <= data->hdr.end_entry) { ghcb_set_sw_scratch(ghcb, (u64)__pa(data)); /* This will advance the shared buffer data points to. */ ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0); /* * Page State Change VMGEXIT can pass error code through * exit_info_2. */ if (WARN(ret || ghcb->save.sw_exit_info_2, "SNP: PSC failed ret=%d exit_info_2=%llx\n", ret, ghcb->save.sw_exit_info_2)) { ret = 1; goto out; } /* Verify that reserved bit is not set */ if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) { ret = 1; goto out; } /* * Sanity check that entry processing is not going backwards. * This will happen only if hypervisor is tricking us. */ if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry, "SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n", end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) { ret = 1; goto out; } } out: return ret; }
linux-master
arch/x86/kernel/sev-shared.c
// SPDX-License-Identifier: GPL-2.0-only /* * handle transition of Linux booting another kernel * Copyright (C) 2002-2005 Eric Biederman <[email protected]> */ #define pr_fmt(fmt) "kexec: " fmt #include <linux/mm.h> #include <linux/kexec.h> #include <linux/string.h> #include <linux/gfp.h> #include <linux/reboot.h> #include <linux/numa.h> #include <linux/ftrace.h> #include <linux/io.h> #include <linux/suspend.h> #include <linux/vmalloc.h> #include <linux/efi.h> #include <linux/cc_platform.h> #include <asm/init.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/io_apic.h> #include <asm/debugreg.h> #include <asm/kexec-bzimage64.h> #include <asm/setup.h> #include <asm/set_memory.h> #include <asm/cpu.h> #ifdef CONFIG_ACPI /* * Used while adding mapping for ACPI tables. * Can be reused when other iomem regions need be mapped */ struct init_pgtable_data { struct x86_mapping_info *info; pgd_t *level4p; }; static int mem_region_callback(struct resource *res, void *arg) { struct init_pgtable_data *data = arg; unsigned long mstart, mend; mstart = res->start; mend = mstart + resource_size(res) - 1; return kernel_ident_mapping_init(data->info, data->level4p, mstart, mend); } static int map_acpi_tables(struct x86_mapping_info *info, pgd_t *level4p) { struct init_pgtable_data data; unsigned long flags; int ret; data.info = info; data.level4p = level4p; flags = IORESOURCE_MEM | IORESOURCE_BUSY; ret = walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &data, mem_region_callback); if (ret && ret != -EINVAL) return ret; /* ACPI tables could be located in ACPI Non-volatile Storage region */ ret = walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &data, mem_region_callback); if (ret && ret != -EINVAL) return ret; return 0; } #else static int map_acpi_tables(struct x86_mapping_info *info, pgd_t *level4p) { return 0; } #endif #ifdef CONFIG_KEXEC_FILE const struct kexec_file_ops * const kexec_file_loaders[] = { &kexec_bzImage64_ops, NULL }; #endif static int map_efi_systab(struct x86_mapping_info *info, pgd_t *level4p) { #ifdef CONFIG_EFI unsigned long mstart, mend; if (!efi_enabled(EFI_BOOT)) return 0; mstart = (boot_params.efi_info.efi_systab | ((u64)boot_params.efi_info.efi_systab_hi<<32)); if (efi_enabled(EFI_64BIT)) mend = mstart + sizeof(efi_system_table_64_t); else mend = mstart + sizeof(efi_system_table_32_t); if (!mstart) return 0; return kernel_ident_mapping_init(info, level4p, mstart, mend); #endif return 0; } static void free_transition_pgtable(struct kimage *image) { free_page((unsigned long)image->arch.p4d); image->arch.p4d = NULL; free_page((unsigned long)image->arch.pud); image->arch.pud = NULL; free_page((unsigned long)image->arch.pmd); image->arch.pmd = NULL; free_page((unsigned long)image->arch.pte); image->arch.pte = NULL; } static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) { pgprot_t prot = PAGE_KERNEL_EXEC_NOENC; unsigned long vaddr, paddr; int result = -ENOMEM; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; vaddr = (unsigned long)relocate_kernel; paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE); pgd += pgd_index(vaddr); if (!pgd_present(*pgd)) { p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL); if (!p4d) goto err; image->arch.p4d = p4d; set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE)); } p4d = p4d_offset(pgd, vaddr); if (!p4d_present(*p4d)) { pud = (pud_t *)get_zeroed_page(GFP_KERNEL); if (!pud) goto err; image->arch.pud = pud; set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE)); } pud = pud_offset(p4d, vaddr); if (!pud_present(*pud)) { pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); if (!pmd) goto err; image->arch.pmd = pmd; set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); } pmd = pmd_offset(pud, vaddr); if (!pmd_present(*pmd)) { pte = (pte_t *)get_zeroed_page(GFP_KERNEL); if (!pte) goto err; image->arch.pte = pte; set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); } pte = pte_offset_kernel(pmd, vaddr); if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) prot = PAGE_KERNEL_EXEC; set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); return 0; err: return result; } static void *alloc_pgt_page(void *data) { struct kimage *image = (struct kimage *)data; struct page *page; void *p = NULL; page = kimage_alloc_control_pages(image, 0); if (page) { p = page_address(page); clear_page(p); } return p; } static int init_pgtable(struct kimage *image, unsigned long start_pgtable) { struct x86_mapping_info info = { .alloc_pgt_page = alloc_pgt_page, .context = image, .page_flag = __PAGE_KERNEL_LARGE_EXEC, .kernpg_flag = _KERNPG_TABLE_NOENC, }; unsigned long mstart, mend; pgd_t *level4p; int result; int i; level4p = (pgd_t *)__va(start_pgtable); clear_page(level4p); if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { info.page_flag |= _PAGE_ENC; info.kernpg_flag |= _PAGE_ENC; } if (direct_gbpages) info.direct_gbpages = true; for (i = 0; i < nr_pfn_mapped; i++) { mstart = pfn_mapped[i].start << PAGE_SHIFT; mend = pfn_mapped[i].end << PAGE_SHIFT; result = kernel_ident_mapping_init(&info, level4p, mstart, mend); if (result) return result; } /* * segments's mem ranges could be outside 0 ~ max_pfn, * for example when jump back to original kernel from kexeced kernel. * or first kernel is booted with user mem map, and second kernel * could be loaded out of that range. */ for (i = 0; i < image->nr_segments; i++) { mstart = image->segment[i].mem; mend = mstart + image->segment[i].memsz; result = kernel_ident_mapping_init(&info, level4p, mstart, mend); if (result) return result; } /* * Prepare EFI systab and ACPI tables for kexec kernel since they are * not covered by pfn_mapped. */ result = map_efi_systab(&info, level4p); if (result) return result; result = map_acpi_tables(&info, level4p); if (result) return result; return init_transition_pgtable(image, level4p); } static void load_segments(void) { __asm__ __volatile__ ( "\tmovl %0,%%ds\n" "\tmovl %0,%%es\n" "\tmovl %0,%%ss\n" "\tmovl %0,%%fs\n" "\tmovl %0,%%gs\n" : : "a" (__KERNEL_DS) : "memory" ); } int machine_kexec_prepare(struct kimage *image) { unsigned long start_pgtable; int result; /* Calculate the offsets */ start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; /* Setup the identity mapped 64bit page table */ result = init_pgtable(image, start_pgtable); if (result) return result; return 0; } void machine_kexec_cleanup(struct kimage *image) { free_transition_pgtable(image); } /* * Do not allocate memory (or fail in any way) in machine_kexec(). * We are past the point of no return, committed to rebooting now. */ void machine_kexec(struct kimage *image) { unsigned long page_list[PAGES_NR]; void *control_page; int save_ftrace_enabled; #ifdef CONFIG_KEXEC_JUMP if (image->preserve_context) save_processor_state(); #endif save_ftrace_enabled = __ftrace_enabled_save(); /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); hw_breakpoint_disable(); cet_disable(); if (image->preserve_context) { #ifdef CONFIG_X86_IO_APIC /* * We need to put APICs in legacy mode so that we can * get timer interrupts in second kernel. kexec/kdump * paths already have calls to restore_boot_irq_mode() * in one form or other. kexec jump path also need one. */ clear_IO_APIC(); restore_boot_irq_mode(); #endif } control_page = page_address(image->control_code_page) + PAGE_SIZE; __memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE); page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page); page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; page_list[PA_TABLE_PAGE] = (unsigned long)__pa(page_address(image->control_code_page)); if (image->type == KEXEC_TYPE_DEFAULT) page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT); /* * The segment registers are funny things, they have both a * visible and an invisible part. Whenever the visible part is * set to a specific selector, the invisible part is loaded * with from a table in memory. At no other time is the * descriptor table in memory accessed. * * I take advantage of this here by force loading the * segments, before I zap the gdt with an invalid value. */ load_segments(); /* * The gdt & idt are now invalid. * If you want to load them you must set up your own idt & gdt. */ native_idt_invalidate(); native_gdt_invalidate(); /* now call it */ image->start = relocate_kernel((unsigned long)image->head, (unsigned long)page_list, image->start, image->preserve_context, cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)); #ifdef CONFIG_KEXEC_JUMP if (image->preserve_context) restore_processor_state(); #endif __ftrace_enabled_restore(save_ftrace_enabled); } /* arch-dependent functionality related to kexec file-based syscall */ #ifdef CONFIG_KEXEC_FILE /* * Apply purgatory relocations. * * @pi: Purgatory to be relocated. * @section: Section relocations applying to. * @relsec: Section containing RELAs. * @symtabsec: Corresponding symtab. * * TODO: Some of the code belongs to generic code. Move that in kexec.c. */ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, const Elf_Shdr *relsec, const Elf_Shdr *symtabsec) { unsigned int i; Elf64_Rela *rel; Elf64_Sym *sym; void *location; unsigned long address, sec_base, value; const char *strtab, *name, *shstrtab; const Elf_Shdr *sechdrs; /* String & section header string table */ sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff; strtab = (char *)pi->ehdr + sechdrs[symtabsec->sh_link].sh_offset; shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset; rel = (void *)pi->ehdr + relsec->sh_offset; pr_debug("Applying relocate section %s to %u\n", shstrtab + relsec->sh_name, relsec->sh_info); for (i = 0; i < relsec->sh_size / sizeof(*rel); i++) { /* * rel[i].r_offset contains byte offset from beginning * of section to the storage unit affected. * * This is location to update. This is temporary buffer * where section is currently loaded. This will finally be * loaded to a different address later, pointed to by * ->sh_addr. kexec takes care of moving it * (kexec_load_segment()). */ location = pi->purgatory_buf; location += section->sh_offset; location += rel[i].r_offset; /* Final address of the location */ address = section->sh_addr + rel[i].r_offset; /* * rel[i].r_info contains information about symbol table index * w.r.t which relocation must be made and type of relocation * to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get * these respectively. */ sym = (void *)pi->ehdr + symtabsec->sh_offset; sym += ELF64_R_SYM(rel[i].r_info); if (sym->st_name) name = strtab + sym->st_name; else name = shstrtab + sechdrs[sym->st_shndx].sh_name; pr_debug("Symbol: %s info: %02x shndx: %02x value=%llx size: %llx\n", name, sym->st_info, sym->st_shndx, sym->st_value, sym->st_size); if (sym->st_shndx == SHN_UNDEF) { pr_err("Undefined symbol: %s\n", name); return -ENOEXEC; } if (sym->st_shndx == SHN_COMMON) { pr_err("symbol '%s' in common section\n", name); return -ENOEXEC; } if (sym->st_shndx == SHN_ABS) sec_base = 0; else if (sym->st_shndx >= pi->ehdr->e_shnum) { pr_err("Invalid section %d for symbol %s\n", sym->st_shndx, name); return -ENOEXEC; } else sec_base = pi->sechdrs[sym->st_shndx].sh_addr; value = sym->st_value; value += sec_base; value += rel[i].r_addend; switch (ELF64_R_TYPE(rel[i].r_info)) { case R_X86_64_NONE: break; case R_X86_64_64: *(u64 *)location = value; break; case R_X86_64_32: *(u32 *)location = value; if (value != *(u32 *)location) goto overflow; break; case R_X86_64_32S: *(s32 *)location = value; if ((s64)value != *(s32 *)location) goto overflow; break; case R_X86_64_PC32: case R_X86_64_PLT32: value -= (u64)address; *(u32 *)location = value; break; default: pr_err("Unknown rela relocation: %llu\n", ELF64_R_TYPE(rel[i].r_info)); return -ENOEXEC; } } return 0; overflow: pr_err("Overflow in relocation type %d value 0x%lx\n", (int)ELF64_R_TYPE(rel[i].r_info), value); return -ENOEXEC; } int arch_kimage_file_post_load_cleanup(struct kimage *image) { vfree(image->elf_headers); image->elf_headers = NULL; image->elf_headers_sz = 0; return kexec_image_post_load_cleanup_default(image); } #endif /* CONFIG_KEXEC_FILE */ static int kexec_mark_range(unsigned long start, unsigned long end, bool protect) { struct page *page; unsigned int nr_pages; /* * For physical range: [start, end]. We must skip the unassigned * crashk resource with zero-valued "end" member. */ if (!end || start > end) return 0; page = pfn_to_page(start >> PAGE_SHIFT); nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; if (protect) return set_pages_ro(page, nr_pages); else return set_pages_rw(page, nr_pages); } static void kexec_mark_crashkres(bool protect) { unsigned long control; kexec_mark_range(crashk_low_res.start, crashk_low_res.end, protect); /* Don't touch the control code page used in crash_kexec().*/ control = PFN_PHYS(page_to_pfn(kexec_crash_image->control_code_page)); /* Control code page is located in the 2nd page. */ kexec_mark_range(crashk_res.start, control + PAGE_SIZE - 1, protect); control += KEXEC_CONTROL_PAGE_SIZE; kexec_mark_range(control, crashk_res.end, protect); } void arch_kexec_protect_crashkres(void) { kexec_mark_crashkres(true); } void arch_kexec_unprotect_crashkres(void) { kexec_mark_crashkres(false); } /* * During a traditional boot under SME, SME will encrypt the kernel, * so the SME kexec kernel also needs to be un-encrypted in order to * replicate a normal SME boot. * * During a traditional boot under SEV, the kernel has already been * loaded encrypted, so the SEV kexec kernel needs to be encrypted in * order to replicate a normal SEV boot. */ int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) { if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return 0; /* * If host memory encryption is active we need to be sure that kexec * pages are not encrypted because when we boot to the new kernel the * pages won't be accessed encrypted (initially). */ return set_memory_decrypted((unsigned long)vaddr, pages); } void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return; /* * If host memory encryption is active we need to reset the pages back * to being an encrypted mapping before freeing them. */ set_memory_encrypted((unsigned long)vaddr, pages); }
linux-master
arch/x86/kernel/machine_kexec_64.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Paravirtualization interfaces Copyright (C) 2006 Rusty Russell IBM Corporation 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc */ #include <linux/errno.h> #include <linux/init.h> #include <linux/export.h> #include <linux/efi.h> #include <linux/bcd.h> #include <linux/highmem.h> #include <linux/kprobes.h> #include <linux/pgtable.h> #include <linux/static_call.h> #include <asm/bug.h> #include <asm/paravirt.h> #include <asm/debugreg.h> #include <asm/desc.h> #include <asm/setup.h> #include <asm/time.h> #include <asm/pgalloc.h> #include <asm/irq.h> #include <asm/delay.h> #include <asm/fixmap.h> #include <asm/apic.h> #include <asm/tlbflush.h> #include <asm/timer.h> #include <asm/special_insns.h> #include <asm/tlb.h> #include <asm/io_bitmap.h> #include <asm/gsseg.h> /* * nop stub, which must not clobber anything *including the stack* to * avoid confusing the entry prologues. */ DEFINE_PARAVIRT_ASM(_paravirt_nop, "", .entry.text); /* stub always returning 0. */ DEFINE_PARAVIRT_ASM(paravirt_ret0, "xor %eax,%eax", .entry.text); void __init default_banner(void) { printk(KERN_INFO "Booting paravirtualized kernel on %s\n", pv_info.name); } /* Undefined instruction for dealing with missing ops pointers. */ noinstr void paravirt_BUG(void) { BUG(); } static unsigned paravirt_patch_call(void *insn_buff, const void *target, unsigned long addr, unsigned len) { __text_gen_insn(insn_buff, CALL_INSN_OPCODE, (void *)addr, target, CALL_INSN_SIZE); return CALL_INSN_SIZE; } #ifdef CONFIG_PARAVIRT_XXL DEFINE_PARAVIRT_ASM(_paravirt_ident_64, "mov %rdi, %rax", .text); DEFINE_PARAVIRT_ASM(pv_native_save_fl, "pushf; pop %rax", .noinstr.text); DEFINE_PARAVIRT_ASM(pv_native_irq_disable, "cli", .noinstr.text); DEFINE_PARAVIRT_ASM(pv_native_irq_enable, "sti", .noinstr.text); DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text); #endif DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); void __init native_pv_lock_init(void) { if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && !boot_cpu_has(X86_FEATURE_HYPERVISOR)) static_branch_disable(&virt_spin_lock_key); } static void native_tlb_remove_table(struct mmu_gather *tlb, void *table) { tlb_remove_page(tlb, table); } unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, unsigned int len) { /* * Neat trick to map patch type back to the call within the * corresponding structure. */ void *opfunc = *((void **)&pv_ops + type); unsigned ret; if (opfunc == NULL) /* If there's no function, patch it with paravirt_BUG() */ ret = paravirt_patch_call(insn_buff, paravirt_BUG, addr, len); else if (opfunc == _paravirt_nop) ret = 0; else /* Otherwise call the function. */ ret = paravirt_patch_call(insn_buff, opfunc, addr, len); return ret; } struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; static u64 native_steal_clock(int cpu) { return 0; } DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock); void paravirt_set_sched_clock(u64 (*func)(void)) { static_call_update(pv_sched_clock, func); } /* These are in entry.S */ static struct resource reserve_ioports = { .start = 0, .end = IO_SPACE_LIMIT, .name = "paravirt-ioport", .flags = IORESOURCE_IO | IORESOURCE_BUSY, }; /* * Reserve the whole legacy IO space to prevent any legacy drivers * from wasting time probing for their hardware. This is a fairly * brute-force approach to disabling all non-virtual drivers. * * Note that this must be called very early to have any effect. */ int paravirt_disable_iospace(void) { return request_resource(&ioport_resource, &reserve_ioports); } #ifdef CONFIG_PARAVIRT_XXL static noinstr void pv_native_write_cr2(unsigned long val) { native_write_cr2(val); } static noinstr unsigned long pv_native_get_debugreg(int regno) { return native_get_debugreg(regno); } static noinstr void pv_native_set_debugreg(int regno, unsigned long val) { native_set_debugreg(regno, val); } noinstr void pv_native_wbinvd(void) { native_wbinvd(); } static noinstr void pv_native_safe_halt(void) { native_safe_halt(); } #endif struct pv_info pv_info = { .name = "bare hardware", #ifdef CONFIG_PARAVIRT_XXL .extra_user_64bit_cs = __USER_CS, #endif }; /* 64-bit pagetable entries */ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) struct paravirt_patch_template pv_ops = { /* Cpu ops. */ .cpu.io_delay = native_io_delay, #ifdef CONFIG_PARAVIRT_XXL .cpu.cpuid = native_cpuid, .cpu.get_debugreg = pv_native_get_debugreg, .cpu.set_debugreg = pv_native_set_debugreg, .cpu.read_cr0 = native_read_cr0, .cpu.write_cr0 = native_write_cr0, .cpu.write_cr4 = native_write_cr4, .cpu.wbinvd = pv_native_wbinvd, .cpu.read_msr = native_read_msr, .cpu.write_msr = native_write_msr, .cpu.read_msr_safe = native_read_msr_safe, .cpu.write_msr_safe = native_write_msr_safe, .cpu.read_pmc = native_read_pmc, .cpu.load_tr_desc = native_load_tr_desc, .cpu.set_ldt = native_set_ldt, .cpu.load_gdt = native_load_gdt, .cpu.load_idt = native_load_idt, .cpu.store_tr = native_store_tr, .cpu.load_tls = native_load_tls, .cpu.load_gs_index = native_load_gs_index, .cpu.write_ldt_entry = native_write_ldt_entry, .cpu.write_gdt_entry = native_write_gdt_entry, .cpu.write_idt_entry = native_write_idt_entry, .cpu.alloc_ldt = paravirt_nop, .cpu.free_ldt = paravirt_nop, .cpu.load_sp0 = native_load_sp0, #ifdef CONFIG_X86_IOPL_IOPERM .cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap, .cpu.update_io_bitmap = native_tss_update_io_bitmap, #endif .cpu.start_context_switch = paravirt_nop, .cpu.end_context_switch = paravirt_nop, /* Irq ops. */ .irq.save_fl = __PV_IS_CALLEE_SAVE(pv_native_save_fl), .irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable), .irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable), .irq.safe_halt = pv_native_safe_halt, .irq.halt = native_halt, #endif /* CONFIG_PARAVIRT_XXL */ /* Mmu ops. */ .mmu.flush_tlb_user = native_flush_tlb_local, .mmu.flush_tlb_kernel = native_flush_tlb_global, .mmu.flush_tlb_one_user = native_flush_tlb_one_user, .mmu.flush_tlb_multi = native_flush_tlb_multi, .mmu.tlb_remove_table = native_tlb_remove_table, .mmu.exit_mmap = paravirt_nop, .mmu.notify_page_enc_status_changed = paravirt_nop, #ifdef CONFIG_PARAVIRT_XXL .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(pv_native_read_cr2), .mmu.write_cr2 = pv_native_write_cr2, .mmu.read_cr3 = __native_read_cr3, .mmu.write_cr3 = native_write_cr3, .mmu.pgd_alloc = __paravirt_pgd_alloc, .mmu.pgd_free = paravirt_nop, .mmu.alloc_pte = paravirt_nop, .mmu.alloc_pmd = paravirt_nop, .mmu.alloc_pud = paravirt_nop, .mmu.alloc_p4d = paravirt_nop, .mmu.release_pte = paravirt_nop, .mmu.release_pmd = paravirt_nop, .mmu.release_pud = paravirt_nop, .mmu.release_p4d = paravirt_nop, .mmu.set_pte = native_set_pte, .mmu.set_pmd = native_set_pmd, .mmu.ptep_modify_prot_start = __ptep_modify_prot_start, .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit, .mmu.set_pud = native_set_pud, .mmu.pmd_val = PTE_IDENT, .mmu.make_pmd = PTE_IDENT, .mmu.pud_val = PTE_IDENT, .mmu.make_pud = PTE_IDENT, .mmu.set_p4d = native_set_p4d, #if CONFIG_PGTABLE_LEVELS >= 5 .mmu.p4d_val = PTE_IDENT, .mmu.make_p4d = PTE_IDENT, .mmu.set_pgd = native_set_pgd, #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ .mmu.pte_val = PTE_IDENT, .mmu.pgd_val = PTE_IDENT, .mmu.make_pte = PTE_IDENT, .mmu.make_pgd = PTE_IDENT, .mmu.enter_mmap = paravirt_nop, .mmu.lazy_mode = { .enter = paravirt_nop, .leave = paravirt_nop, .flush = paravirt_nop, }, .mmu.set_fixmap = native_set_fixmap, #endif /* CONFIG_PARAVIRT_XXL */ #if defined(CONFIG_PARAVIRT_SPINLOCKS) /* Lock ops. */ #ifdef CONFIG_SMP .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, .lock.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), .lock.wait = paravirt_nop, .lock.kick = paravirt_nop, .lock.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted), #endif /* SMP */ #endif }; #ifdef CONFIG_PARAVIRT_XXL NOKPROBE_SYMBOL(native_load_idt); #endif EXPORT_SYMBOL(pv_ops); EXPORT_SYMBOL_GPL(pv_info);
linux-master
arch/x86/kernel/paravirt.c
// SPDX-License-Identifier: GPL-2.0-only #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/init.h> #include <linux/export.h> #include <linux/timer.h> #include <linux/acpi_pmtmr.h> #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/clocksource.h> #include <linux/percpu.h> #include <linux/timex.h> #include <linux/static_key.h> #include <linux/static_call.h> #include <asm/hpet.h> #include <asm/timer.h> #include <asm/vgtod.h> #include <asm/time.h> #include <asm/delay.h> #include <asm/hypervisor.h> #include <asm/nmi.h> #include <asm/x86_init.h> #include <asm/geode.h> #include <asm/apic.h> #include <asm/intel-family.h> #include <asm/i8259.h> #include <asm/uv/uv.h> unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); unsigned int __read_mostly tsc_khz; EXPORT_SYMBOL(tsc_khz); #define KHZ 1000 /* * TSC can be unstable due to cpufreq or due to unsynced TSCs */ static int __read_mostly tsc_unstable; static unsigned int __initdata tsc_early_khz; static DEFINE_STATIC_KEY_FALSE(__use_tsc); int tsc_clocksource_reliable; static int __read_mostly tsc_force_recalibrate; static u32 art_to_tsc_numerator; static u32 art_to_tsc_denominator; static u64 art_to_tsc_offset; static struct clocksource *art_related_clocksource; struct cyc2ns { struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */ seqcount_latch_t seq; /* 32 + 4 = 36 */ }; /* fits one cacheline */ static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); static int __init tsc_early_khz_setup(char *buf) { return kstrtouint(buf, 0, &tsc_early_khz); } early_param("tsc_early_khz", tsc_early_khz_setup); __always_inline void __cyc2ns_read(struct cyc2ns_data *data) { int seq, idx; do { seq = this_cpu_read(cyc2ns.seq.seqcount.sequence); idx = seq & 1; data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset); data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul); data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift); } while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence))); } __always_inline void cyc2ns_read_begin(struct cyc2ns_data *data) { preempt_disable_notrace(); __cyc2ns_read(data); } __always_inline void cyc2ns_read_end(void) { preempt_enable_notrace(); } /* * Accelerators for sched_clock() * convert from cycles(64bits) => nanoseconds (64bits) * basic equation: * ns = cycles / (freq / ns_per_sec) * ns = cycles * (ns_per_sec / freq) * ns = cycles * (10^9 / (cpu_khz * 10^3)) * ns = cycles * (10^6 / cpu_khz) * * Then we use scaling math (suggested by [email protected]) to get: * ns = cycles * (10^6 * SC / cpu_khz) / SC * ns = cycles * cyc2ns_scale / SC * * And since SC is a constant power of two, we can convert the div * into a shift. The larger SC is, the more accurate the conversion, but * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication * (64-bit result) can be used. * * We can use khz divisor instead of mhz to keep a better precision. * ([email protected]) * * [email protected] "math is hard, lets go shopping!" */ static __always_inline unsigned long long __cycles_2_ns(unsigned long long cyc) { struct cyc2ns_data data; unsigned long long ns; __cyc2ns_read(&data); ns = data.cyc2ns_offset; ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift); return ns; } static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc) { unsigned long long ns; preempt_disable_notrace(); ns = __cycles_2_ns(cyc); preempt_enable_notrace(); return ns; } static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now) { unsigned long long ns_now; struct cyc2ns_data data; struct cyc2ns *c2n; ns_now = cycles_2_ns(tsc_now); /* * Compute a new multiplier as per the above comment and ensure our * time function is continuous; see the comment near struct * cyc2ns_data. */ clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz, NSEC_PER_MSEC, 0); /* * cyc2ns_shift is exported via arch_perf_update_userpage() where it is * not expected to be greater than 31 due to the original published * conversion algorithm shifting a 32-bit value (now specifies a 64-bit * value) - refer perf_event_mmap_page documentation in perf_event.h. */ if (data.cyc2ns_shift == 32) { data.cyc2ns_shift = 31; data.cyc2ns_mul >>= 1; } data.cyc2ns_offset = ns_now - mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift); c2n = per_cpu_ptr(&cyc2ns, cpu); raw_write_seqcount_latch(&c2n->seq); c2n->data[0] = data; raw_write_seqcount_latch(&c2n->seq); c2n->data[1] = data; } static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now) { unsigned long flags; local_irq_save(flags); sched_clock_idle_sleep_event(); if (khz) __set_cyc2ns_scale(khz, cpu, tsc_now); sched_clock_idle_wakeup_event(); local_irq_restore(flags); } /* * Initialize cyc2ns for boot cpu */ static void __init cyc2ns_init_boot_cpu(void) { struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns); seqcount_latch_init(&c2n->seq); __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc()); } /* * Secondary CPUs do not run through tsc_init(), so set up * all the scale factors for all CPUs, assuming the same * speed as the bootup CPU. */ static void __init cyc2ns_init_secondary_cpus(void) { unsigned int cpu, this_cpu = smp_processor_id(); struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns); struct cyc2ns_data *data = c2n->data; for_each_possible_cpu(cpu) { if (cpu != this_cpu) { seqcount_latch_init(&c2n->seq); c2n = per_cpu_ptr(&cyc2ns, cpu); c2n->data[0] = data[0]; c2n->data[1] = data[1]; } } } /* * Scheduler clock - returns current time in nanosec units. */ noinstr u64 native_sched_clock(void) { if (static_branch_likely(&__use_tsc)) { u64 tsc_now = rdtsc(); /* return the value in ns */ return __cycles_2_ns(tsc_now); } /* * Fall back to jiffies if there's no TSC available: * ( But note that we still use it if the TSC is marked * unstable. We do this because unlike Time Of Day, * the scheduler clock tolerates small errors and it's * very important for it to be as fast as the platform * can achieve it. ) */ /* No locking but a rare wrong value is not a big deal: */ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); } /* * Generate a sched_clock if you already have a TSC value. */ u64 native_sched_clock_from_tsc(u64 tsc) { return cycles_2_ns(tsc); } /* We need to define a real function for sched_clock, to override the weak default version */ #ifdef CONFIG_PARAVIRT noinstr u64 sched_clock_noinstr(void) { return paravirt_sched_clock(); } bool using_native_sched_clock(void) { return static_call_query(pv_sched_clock) == native_sched_clock; } #else u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock"))); bool using_native_sched_clock(void) { return true; } #endif notrace u64 sched_clock(void) { u64 now; preempt_disable_notrace(); now = sched_clock_noinstr(); preempt_enable_notrace(); return now; } int check_tsc_unstable(void) { return tsc_unstable; } EXPORT_SYMBOL_GPL(check_tsc_unstable); #ifdef CONFIG_X86_TSC int __init notsc_setup(char *str) { mark_tsc_unstable("boot parameter notsc"); return 1; } #else /* * disable flag for tsc. Takes effect by clearing the TSC cpu flag * in cpu/common.c */ int __init notsc_setup(char *str) { setup_clear_cpu_cap(X86_FEATURE_TSC); return 1; } #endif __setup("notsc", notsc_setup); static int no_sched_irq_time; static int no_tsc_watchdog; static int tsc_as_watchdog; static int __init tsc_setup(char *str) { if (!strcmp(str, "reliable")) tsc_clocksource_reliable = 1; if (!strncmp(str, "noirqtime", 9)) no_sched_irq_time = 1; if (!strcmp(str, "unstable")) mark_tsc_unstable("boot parameter"); if (!strcmp(str, "nowatchdog")) { no_tsc_watchdog = 1; if (tsc_as_watchdog) pr_alert("%s: Overriding earlier tsc=watchdog with tsc=nowatchdog\n", __func__); tsc_as_watchdog = 0; } if (!strcmp(str, "recalibrate")) tsc_force_recalibrate = 1; if (!strcmp(str, "watchdog")) { if (no_tsc_watchdog) pr_alert("%s: tsc=watchdog overridden by earlier tsc=nowatchdog\n", __func__); else tsc_as_watchdog = 1; } return 1; } __setup("tsc=", tsc_setup); #define MAX_RETRIES 5 #define TSC_DEFAULT_THRESHOLD 0x20000 /* * Read TSC and the reference counters. Take care of any disturbances */ static u64 tsc_read_refs(u64 *p, int hpet) { u64 t1, t2; u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD; int i; for (i = 0; i < MAX_RETRIES; i++) { t1 = get_cycles(); if (hpet) *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; else *p = acpi_pm_read_early(); t2 = get_cycles(); if ((t2 - t1) < thresh) return t2; } return ULLONG_MAX; } /* * Calculate the TSC frequency from HPET reference */ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) { u64 tmp; if (hpet2 < hpet1) hpet2 += 0x100000000ULL; hpet2 -= hpet1; tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); do_div(tmp, 1000000); deltatsc = div64_u64(deltatsc, tmp); return (unsigned long) deltatsc; } /* * Calculate the TSC frequency from PMTimer reference */ static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) { u64 tmp; if (!pm1 && !pm2) return ULONG_MAX; if (pm2 < pm1) pm2 += (u64)ACPI_PM_OVRRUN; pm2 -= pm1; tmp = pm2 * 1000000000LL; do_div(tmp, PMTMR_TICKS_PER_SEC); do_div(deltatsc, tmp); return (unsigned long) deltatsc; } #define CAL_MS 10 #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS)) #define CAL_PIT_LOOPS 1000 #define CAL2_MS 50 #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS)) #define CAL2_PIT_LOOPS 5000 /* * Try to calibrate the TSC against the Programmable * Interrupt Timer and return the frequency of the TSC * in kHz. * * Return ULONG_MAX on failure to calibrate. */ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) { u64 tsc, t1, t2, delta; unsigned long tscmin, tscmax; int pitcnt; if (!has_legacy_pic()) { /* * Relies on tsc_early_delay_calibrate() to have given us semi * usable udelay(), wait for the same 50ms we would have with * the PIT loop below. */ udelay(10 * USEC_PER_MSEC); udelay(10 * USEC_PER_MSEC); udelay(10 * USEC_PER_MSEC); udelay(10 * USEC_PER_MSEC); udelay(10 * USEC_PER_MSEC); return ULONG_MAX; } /* Set the Gate high, disable speaker */ outb((inb(0x61) & ~0x02) | 0x01, 0x61); /* * Setup CTC channel 2* for mode 0, (interrupt on terminal * count mode), binary count. Set the latch register to 50ms * (LSB then MSB) to begin countdown. */ outb(0xb0, 0x43); outb(latch & 0xff, 0x42); outb(latch >> 8, 0x42); tsc = t1 = t2 = get_cycles(); pitcnt = 0; tscmax = 0; tscmin = ULONG_MAX; while ((inb(0x61) & 0x20) == 0) { t2 = get_cycles(); delta = t2 - tsc; tsc = t2; if ((unsigned long) delta < tscmin) tscmin = (unsigned int) delta; if ((unsigned long) delta > tscmax) tscmax = (unsigned int) delta; pitcnt++; } /* * Sanity checks: * * If we were not able to read the PIT more than loopmin * times, then we have been hit by a massive SMI * * If the maximum is 10 times larger than the minimum, * then we got hit by an SMI as well. */ if (pitcnt < loopmin || tscmax > 10 * tscmin) return ULONG_MAX; /* Calculate the PIT value */ delta = t2 - t1; do_div(delta, ms); return delta; } /* * This reads the current MSB of the PIT counter, and * checks if we are running on sufficiently fast and * non-virtualized hardware. * * Our expectations are: * * - the PIT is running at roughly 1.19MHz * * - each IO is going to take about 1us on real hardware, * but we allow it to be much faster (by a factor of 10) or * _slightly_ slower (ie we allow up to a 2us read+counter * update - anything else implies a unacceptably slow CPU * or PIT for the fast calibration to work. * * - with 256 PIT ticks to read the value, we have 214us to * see the same MSB (and overhead like doing a single TSC * read per MSB value etc). * * - We're doing 2 reads per loop (LSB, MSB), and we expect * them each to take about a microsecond on real hardware. * So we expect a count value of around 100. But we'll be * generous, and accept anything over 50. * * - if the PIT is stuck, and we see *many* more reads, we * return early (and the next caller of pit_expect_msb() * then consider it a failure when they don't see the * next expected value). * * These expectations mean that we know that we have seen the * transition from one expected value to another with a fairly * high accuracy, and we didn't miss any events. We can thus * use the TSC value at the transitions to calculate a pretty * good value for the TSC frequency. */ static inline int pit_verify_msb(unsigned char val) { /* Ignore LSB */ inb(0x42); return inb(0x42) == val; } static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) { int count; u64 tsc = 0, prev_tsc = 0; for (count = 0; count < 50000; count++) { if (!pit_verify_msb(val)) break; prev_tsc = tsc; tsc = get_cycles(); } *deltap = get_cycles() - prev_tsc; *tscp = tsc; /* * We require _some_ success, but the quality control * will be based on the error terms on the TSC values. */ return count > 5; } /* * How many MSB values do we want to see? We aim for * a maximum error rate of 500ppm (in practice the * real error is much smaller), but refuse to spend * more than 50ms on it. */ #define MAX_QUICK_PIT_MS 50 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) static unsigned long quick_pit_calibrate(void) { int i; u64 tsc, delta; unsigned long d1, d2; if (!has_legacy_pic()) return 0; /* Set the Gate high, disable speaker */ outb((inb(0x61) & ~0x02) | 0x01, 0x61); /* * Counter 2, mode 0 (one-shot), binary count * * NOTE! Mode 2 decrements by two (and then the * output is flipped each time, giving the same * final output frequency as a decrement-by-one), * so mode 0 is much better when looking at the * individual counts. */ outb(0xb0, 0x43); /* Start at 0xffff */ outb(0xff, 0x42); outb(0xff, 0x42); /* * The PIT starts counting at the next edge, so we * need to delay for a microsecond. The easiest way * to do that is to just read back the 16-bit counter * once from the PIT. */ pit_verify_msb(0); if (pit_expect_msb(0xff, &tsc, &d1)) { for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { if (!pit_expect_msb(0xff-i, &delta, &d2)) break; delta -= tsc; /* * Extrapolate the error and fail fast if the error will * never be below 500 ppm. */ if (i == 1 && d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11) return 0; /* * Iterate until the error is less than 500 ppm */ if (d1+d2 >= delta >> 11) continue; /* * Check the PIT one more time to verify that * all TSC reads were stable wrt the PIT. * * This also guarantees serialization of the * last cycle read ('d2') in pit_expect_msb. */ if (!pit_verify_msb(0xfe - i)) break; goto success; } } pr_info("Fast TSC calibration failed\n"); return 0; success: /* * Ok, if we get here, then we've seen the * MSB of the PIT decrement 'i' times, and the * error has shrunk to less than 500 ppm. * * As a result, we can depend on there not being * any odd delays anywhere, and the TSC reads are * reliable (within the error). * * kHz = ticks / time-in-seconds / 1000; * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) */ delta *= PIT_TICK_RATE; do_div(delta, i*256*1000); pr_info("Fast TSC calibration using PIT\n"); return delta; } /** * native_calibrate_tsc * Determine TSC frequency via CPUID, else return 0. */ unsigned long native_calibrate_tsc(void) { unsigned int eax_denominator, ebx_numerator, ecx_hz, edx; unsigned int crystal_khz; if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return 0; if (boot_cpu_data.cpuid_level < 0x15) return 0; eax_denominator = ebx_numerator = ecx_hz = edx = 0; /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */ cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx); if (ebx_numerator == 0 || eax_denominator == 0) return 0; crystal_khz = ecx_hz / 1000; /* * Denverton SoCs don't report crystal clock, and also don't support * CPUID.0x16 for the calculation below, so hardcode the 25MHz crystal * clock. */ if (crystal_khz == 0 && boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D) crystal_khz = 25000; /* * TSC frequency reported directly by CPUID is a "hardware reported" * frequency and is the most accurate one so far we have. This * is considered a known frequency. */ if (crystal_khz != 0) setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); /* * Some Intel SoCs like Skylake and Kabylake don't report the crystal * clock, but we can easily calculate it to a high degree of accuracy * by considering the crystal ratio and the CPU speed. */ if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= 0x16) { unsigned int eax_base_mhz, ebx, ecx, edx; cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx); crystal_khz = eax_base_mhz * 1000 * eax_denominator / ebx_numerator; } if (crystal_khz == 0) return 0; /* * For Atom SoCs TSC is the only reliable clocksource. * Mark TSC reliable so no watchdog on it. */ if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT) setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); #ifdef CONFIG_X86_LOCAL_APIC /* * The local APIC appears to be fed by the core crystal clock * (which sounds entirely sensible). We can set the global * lapic_timer_period here to avoid having to calibrate the APIC * timer later. */ lapic_timer_period = crystal_khz * 1000 / HZ; #endif return crystal_khz * ebx_numerator / eax_denominator; } static unsigned long cpu_khz_from_cpuid(void) { unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx; if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return 0; if (boot_cpu_data.cpuid_level < 0x16) return 0; eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0; cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx); return eax_base_mhz * 1000; } /* * calibrate cpu using pit, hpet, and ptimer methods. They are available * later in boot after acpi is initialized. */ static unsigned long pit_hpet_ptimer_calibrate_cpu(void) { u64 tsc1, tsc2, delta, ref1, ref2; unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; unsigned long flags, latch, ms; int hpet = is_hpet_enabled(), i, loopmin; /* * Run 5 calibration loops to get the lowest frequency value * (the best estimate). We use two different calibration modes * here: * * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and * load a timeout of 50ms. We read the time right after we * started the timer and wait until the PIT count down reaches * zero. In each wait loop iteration we read the TSC and check * the delta to the previous read. We keep track of the min * and max values of that delta. The delta is mostly defined * by the IO time of the PIT access, so we can detect when * any disturbance happened between the two reads. If the * maximum time is significantly larger than the minimum time, * then we discard the result and have another try. * * 2) Reference counter. If available we use the HPET or the * PMTIMER as a reference to check the sanity of that value. * We use separate TSC readouts and check inside of the * reference read for any possible disturbance. We discard * disturbed values here as well. We do that around the PIT * calibration delay loop as we have to wait for a certain * amount of time anyway. */ /* Preset PIT loop values */ latch = CAL_LATCH; ms = CAL_MS; loopmin = CAL_PIT_LOOPS; for (i = 0; i < 3; i++) { unsigned long tsc_pit_khz; /* * Read the start value and the reference count of * hpet/pmtimer when available. Then do the PIT * calibration, which will take at least 50ms, and * read the end value. */ local_irq_save(flags); tsc1 = tsc_read_refs(&ref1, hpet); tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); tsc2 = tsc_read_refs(&ref2, hpet); local_irq_restore(flags); /* Pick the lowest PIT TSC calibration so far */ tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); /* hpet or pmtimer available ? */ if (ref1 == ref2) continue; /* Check, whether the sampling was disturbed */ if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) continue; tsc2 = (tsc2 - tsc1) * 1000000LL; if (hpet) tsc2 = calc_hpet_ref(tsc2, ref1, ref2); else tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); /* Check the reference deviation */ delta = ((u64) tsc_pit_min) * 100; do_div(delta, tsc_ref_min); /* * If both calibration results are inside a 10% window * then we can be sure, that the calibration * succeeded. We break out of the loop right away. We * use the reference value, as it is more precise. */ if (delta >= 90 && delta <= 110) { pr_info("PIT calibration matches %s. %d loops\n", hpet ? "HPET" : "PMTIMER", i + 1); return tsc_ref_min; } /* * Check whether PIT failed more than once. This * happens in virtualized environments. We need to * give the virtual PC a slightly longer timeframe for * the HPET/PMTIMER to make the result precise. */ if (i == 1 && tsc_pit_min == ULONG_MAX) { latch = CAL2_LATCH; ms = CAL2_MS; loopmin = CAL2_PIT_LOOPS; } } /* * Now check the results. */ if (tsc_pit_min == ULONG_MAX) { /* PIT gave no useful value */ pr_warn("Unable to calibrate against PIT\n"); /* We don't have an alternative source, disable TSC */ if (!hpet && !ref1 && !ref2) { pr_notice("No reference (HPET/PMTIMER) available\n"); return 0; } /* The alternative source failed as well, disable TSC */ if (tsc_ref_min == ULONG_MAX) { pr_warn("HPET/PMTIMER calibration failed\n"); return 0; } /* Use the alternative source */ pr_info("using %s reference calibration\n", hpet ? "HPET" : "PMTIMER"); return tsc_ref_min; } /* We don't have an alternative source, use the PIT calibration value */ if (!hpet && !ref1 && !ref2) { pr_info("Using PIT calibration value\n"); return tsc_pit_min; } /* The alternative source failed, use the PIT calibration value */ if (tsc_ref_min == ULONG_MAX) { pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n"); return tsc_pit_min; } /* * The calibration values differ too much. In doubt, we use * the PIT value as we know that there are PMTIMERs around * running at double speed. At least we let the user know: */ pr_warn("PIT calibration deviates from %s: %lu %lu\n", hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); pr_info("Using PIT calibration value\n"); return tsc_pit_min; } /** * native_calibrate_cpu_early - can calibrate the cpu early in boot */ unsigned long native_calibrate_cpu_early(void) { unsigned long flags, fast_calibrate = cpu_khz_from_cpuid(); if (!fast_calibrate) fast_calibrate = cpu_khz_from_msr(); if (!fast_calibrate) { local_irq_save(flags); fast_calibrate = quick_pit_calibrate(); local_irq_restore(flags); } return fast_calibrate; } /** * native_calibrate_cpu - calibrate the cpu */ static unsigned long native_calibrate_cpu(void) { unsigned long tsc_freq = native_calibrate_cpu_early(); if (!tsc_freq) tsc_freq = pit_hpet_ptimer_calibrate_cpu(); return tsc_freq; } void recalibrate_cpu_khz(void) { #ifndef CONFIG_SMP unsigned long cpu_khz_old = cpu_khz; if (!boot_cpu_has(X86_FEATURE_TSC)) return; cpu_khz = x86_platform.calibrate_cpu(); tsc_khz = x86_platform.calibrate_tsc(); if (tsc_khz == 0) tsc_khz = cpu_khz; else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) cpu_khz = tsc_khz; cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy, cpu_khz_old, cpu_khz); #endif } EXPORT_SYMBOL_GPL(recalibrate_cpu_khz); static unsigned long long cyc2ns_suspend; void tsc_save_sched_clock_state(void) { if (!sched_clock_stable()) return; cyc2ns_suspend = sched_clock(); } /* * Even on processors with invariant TSC, TSC gets reset in some the * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to * arbitrary value (still sync'd across cpu's) during resume from such sleep * states. To cope up with this, recompute the cyc2ns_offset for each cpu so * that sched_clock() continues from the point where it was left off during * suspend. */ void tsc_restore_sched_clock_state(void) { unsigned long long offset; unsigned long flags; int cpu; if (!sched_clock_stable()) return; local_irq_save(flags); /* * We're coming out of suspend, there's no concurrency yet; don't * bother being nice about the RCU stuff, just write to both * data fields. */ this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0); this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0); offset = cyc2ns_suspend - sched_clock(); for_each_possible_cpu(cpu) { per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; } local_irq_restore(flags); } #ifdef CONFIG_CPU_FREQ /* * Frequency scaling support. Adjust the TSC based timer when the CPU frequency * changes. * * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC * as unstable and give up in those cases. * * Should fix up last_tsc too. Currently gettimeofday in the * first tick after the change will be slightly wrong. */ static unsigned int ref_freq; static unsigned long loops_per_jiffy_ref; static unsigned long tsc_khz_ref; static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; if (num_online_cpus() > 1) { mark_tsc_unstable("cpufreq changes on SMP"); return 0; } if (!ref_freq) { ref_freq = freq->old; loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; tsc_khz_ref = tsc_khz; } if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { boot_cpu_data.loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); if (!(freq->flags & CPUFREQ_CONST_LOOPS)) mark_tsc_unstable("cpufreq changes"); set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc()); } return 0; } static struct notifier_block time_cpufreq_notifier_block = { .notifier_call = time_cpufreq_notifier }; static int __init cpufreq_register_tsc_scaling(void) { if (!boot_cpu_has(X86_FEATURE_TSC)) return 0; if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) return 0; cpufreq_register_notifier(&time_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); return 0; } core_initcall(cpufreq_register_tsc_scaling); #endif /* CONFIG_CPU_FREQ */ #define ART_CPUID_LEAF (0x15) #define ART_MIN_DENOMINATOR (1) /* * If ART is present detect the numerator:denominator to convert to TSC */ static void __init detect_art(void) { unsigned int unused[2]; if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF) return; /* * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required, * and the TSC counter resets must not occur asynchronously. */ if (boot_cpu_has(X86_FEATURE_HYPERVISOR) || !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) || !boot_cpu_has(X86_FEATURE_TSC_ADJUST) || tsc_async_resets) return; cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator, &art_to_tsc_numerator, unused, unused+1); if (art_to_tsc_denominator < ART_MIN_DENOMINATOR) return; rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset); /* Make this sticky over multiple CPU init calls */ setup_force_cpu_cap(X86_FEATURE_ART); } /* clocksource code */ static void tsc_resume(struct clocksource *cs) { tsc_verify_tsc_adjust(true); } /* * We used to compare the TSC to the cycle_last value in the clocksource * structure to avoid a nasty time-warp. This can be observed in a * very small window right after one CPU updated cycle_last under * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which * is smaller than the cycle_last reference value due to a TSC which * is slightly behind. This delta is nowhere else observable, but in * that case it results in a forward time jump in the range of hours * due to the unsigned delta calculation of the time keeping core * code, which is necessary to support wrapping clocksources like pm * timer. * * This sanity check is now done in the core timekeeping code. * checking the result of read_tsc() - cycle_last for being negative. * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. */ static u64 read_tsc(struct clocksource *cs) { return (u64)rdtsc_ordered(); } static void tsc_cs_mark_unstable(struct clocksource *cs) { if (tsc_unstable) return; tsc_unstable = 1; if (using_native_sched_clock()) clear_sched_clock_stable(); disable_sched_clock_irqtime(); pr_info("Marking TSC unstable due to clocksource watchdog\n"); } static void tsc_cs_tick_stable(struct clocksource *cs) { if (tsc_unstable) return; if (using_native_sched_clock()) sched_clock_tick_stable(); } static int tsc_cs_enable(struct clocksource *cs) { vclocks_set_used(VDSO_CLOCKMODE_TSC); return 0; } /* * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() */ static struct clocksource clocksource_tsc_early = { .name = "tsc-early", .rating = 299, .uncertainty_margin = 32 * NSEC_PER_MSEC, .read = read_tsc, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_MUST_VERIFY, .vdso_clock_mode = VDSO_CLOCKMODE_TSC, .enable = tsc_cs_enable, .resume = tsc_resume, .mark_unstable = tsc_cs_mark_unstable, .tick_stable = tsc_cs_tick_stable, .list = LIST_HEAD_INIT(clocksource_tsc_early.list), }; /* * Must mark VALID_FOR_HRES early such that when we unregister tsc_early * this one will immediately take over. We will only register if TSC has * been found good. */ static struct clocksource clocksource_tsc = { .name = "tsc", .rating = 300, .read = read_tsc, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_MUST_VERIFY | CLOCK_SOURCE_VERIFY_PERCPU, .vdso_clock_mode = VDSO_CLOCKMODE_TSC, .enable = tsc_cs_enable, .resume = tsc_resume, .mark_unstable = tsc_cs_mark_unstable, .tick_stable = tsc_cs_tick_stable, .list = LIST_HEAD_INIT(clocksource_tsc.list), }; void mark_tsc_unstable(char *reason) { if (tsc_unstable) return; tsc_unstable = 1; if (using_native_sched_clock()) clear_sched_clock_stable(); disable_sched_clock_irqtime(); pr_info("Marking TSC unstable due to %s\n", reason); clocksource_mark_unstable(&clocksource_tsc_early); clocksource_mark_unstable(&clocksource_tsc); } EXPORT_SYMBOL_GPL(mark_tsc_unstable); static void __init tsc_disable_clocksource_watchdog(void) { clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY; clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; } bool tsc_clocksource_watchdog_disabled(void) { return !(clocksource_tsc.flags & CLOCK_SOURCE_MUST_VERIFY) && tsc_as_watchdog && !no_tsc_watchdog; } static void __init check_system_tsc_reliable(void) { #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) if (is_geode_lx()) { /* RTSC counts during suspend */ #define RTSC_SUSP 0x100 unsigned long res_low, res_high; rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); /* Geode_LX - the OLPC CPU has a very reliable TSC */ if (res_low & RTSC_SUSP) tsc_clocksource_reliable = 1; } #endif if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) tsc_clocksource_reliable = 1; /* * Disable the clocksource watchdog when the system has: * - TSC running at constant frequency * - TSC which does not stop in C-States * - the TSC_ADJUST register which allows to detect even minimal * modifications * - not more than two sockets. As the number of sockets cannot be * evaluated at the early boot stage where this has to be * invoked, check the number of online memory nodes as a * fallback solution which is an reasonable estimate. */ if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && boot_cpu_has(X86_FEATURE_NONSTOP_TSC) && boot_cpu_has(X86_FEATURE_TSC_ADJUST) && nr_online_nodes <= 4) tsc_disable_clocksource_watchdog(); } /* * Make an educated guess if the TSC is trustworthy and synchronized * over all CPUs. */ int unsynchronized_tsc(void) { if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable) return 1; #ifdef CONFIG_SMP if (apic_is_clustered_box()) return 1; #endif if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) return 0; if (tsc_clocksource_reliable) return 0; /* * Intel systems are normally all synchronized. * Exceptions must mark TSC as unstable: */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { /* assume multi socket systems are not synchronized: */ if (num_possible_cpus() > 1) return 1; } return 0; } /* * Convert ART to TSC given numerator/denominator found in detect_art() */ struct system_counterval_t convert_art_to_tsc(u64 art) { u64 tmp, res, rem; rem = do_div(art, art_to_tsc_denominator); res = art * art_to_tsc_numerator; tmp = rem * art_to_tsc_numerator; do_div(tmp, art_to_tsc_denominator); res += tmp + art_to_tsc_offset; return (struct system_counterval_t) {.cs = art_related_clocksource, .cycles = res}; } EXPORT_SYMBOL(convert_art_to_tsc); /** * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC. * @art_ns: ART (Always Running Timer) in unit of nanoseconds * * PTM requires all timestamps to be in units of nanoseconds. When user * software requests a cross-timestamp, this function converts system timestamp * to TSC. * * This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set * indicating the tsc_khz is derived from CPUID[15H]. Drivers should check * that this flag is set before conversion to TSC is attempted. * * Return: * struct system_counterval_t - system counter value with the pointer to the * corresponding clocksource * @cycles: System counter value * @cs: Clocksource corresponding to system counter value. Used * by timekeeping code to verify comparability of two cycle * values. */ struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns) { u64 tmp, res, rem; rem = do_div(art_ns, USEC_PER_SEC); res = art_ns * tsc_khz; tmp = rem * tsc_khz; do_div(tmp, USEC_PER_SEC); res += tmp; return (struct system_counterval_t) { .cs = art_related_clocksource, .cycles = res}; } EXPORT_SYMBOL(convert_art_ns_to_tsc); static void tsc_refine_calibration_work(struct work_struct *work); static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); /** * tsc_refine_calibration_work - Further refine tsc freq calibration * @work - ignored. * * This functions uses delayed work over a period of a * second to further refine the TSC freq value. Since this is * timer based, instead of loop based, we don't block the boot * process while this longer calibration is done. * * If there are any calibration anomalies (too many SMIs, etc), * or the refined calibration is off by 1% of the fast early * calibration, we throw out the new calibration and use the * early calibration. */ static void tsc_refine_calibration_work(struct work_struct *work) { static u64 tsc_start = ULLONG_MAX, ref_start; static int hpet; u64 tsc_stop, ref_stop, delta; unsigned long freq; int cpu; /* Don't bother refining TSC on unstable systems */ if (tsc_unstable) goto unreg; /* * Since the work is started early in boot, we may be * delayed the first time we expire. So set the workqueue * again once we know timers are working. */ if (tsc_start == ULLONG_MAX) { restart: /* * Only set hpet once, to avoid mixing hardware * if the hpet becomes enabled later. */ hpet = is_hpet_enabled(); tsc_start = tsc_read_refs(&ref_start, hpet); schedule_delayed_work(&tsc_irqwork, HZ); return; } tsc_stop = tsc_read_refs(&ref_stop, hpet); /* hpet or pmtimer available ? */ if (ref_start == ref_stop) goto out; /* Check, whether the sampling was disturbed */ if (tsc_stop == ULLONG_MAX) goto restart; delta = tsc_stop - tsc_start; delta *= 1000000LL; if (hpet) freq = calc_hpet_ref(delta, ref_start, ref_stop); else freq = calc_pmtimer_ref(delta, ref_start, ref_stop); /* Will hit this only if tsc_force_recalibrate has been set */ if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { /* Warn if the deviation exceeds 500 ppm */ if (abs(tsc_khz - freq) > (tsc_khz >> 11)) { pr_warn("Warning: TSC freq calibrated by CPUID/MSR differs from what is calibrated by HW timer, please check with vendor!!\n"); pr_info("Previous calibrated TSC freq:\t %lu.%03lu MHz\n", (unsigned long)tsc_khz / 1000, (unsigned long)tsc_khz % 1000); } pr_info("TSC freq recalibrated by [%s]:\t %lu.%03lu MHz\n", hpet ? "HPET" : "PM_TIMER", (unsigned long)freq / 1000, (unsigned long)freq % 1000); return; } /* Make sure we're within 1% */ if (abs(tsc_khz - freq) > tsc_khz/100) goto out; tsc_khz = freq; pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n", (unsigned long)tsc_khz / 1000, (unsigned long)tsc_khz % 1000); /* Inform the TSC deadline clockevent devices about the recalibration */ lapic_update_tsc_freq(); /* Update the sched_clock() rate to match the clocksource one */ for_each_possible_cpu(cpu) set_cyc2ns_scale(tsc_khz, cpu, tsc_stop); out: if (tsc_unstable) goto unreg; if (boot_cpu_has(X86_FEATURE_ART)) art_related_clocksource = &clocksource_tsc; clocksource_register_khz(&clocksource_tsc, tsc_khz); unreg: clocksource_unregister(&clocksource_tsc_early); } static int __init init_tsc_clocksource(void) { if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz) return 0; if (tsc_unstable) { clocksource_unregister(&clocksource_tsc_early); return 0; } if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; /* * When TSC frequency is known (retrieved via MSR or CPUID), we skip * the refined calibration and directly register it as a clocksource. */ if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { if (boot_cpu_has(X86_FEATURE_ART)) art_related_clocksource = &clocksource_tsc; clocksource_register_khz(&clocksource_tsc, tsc_khz); clocksource_unregister(&clocksource_tsc_early); if (!tsc_force_recalibrate) return 0; } schedule_delayed_work(&tsc_irqwork, 0); return 0; } /* * We use device_initcall here, to ensure we run after the hpet * is fully initialized, which may occur at fs_initcall time. */ device_initcall(init_tsc_clocksource); static bool __init determine_cpu_tsc_frequencies(bool early) { /* Make sure that cpu and tsc are not already calibrated */ WARN_ON(cpu_khz || tsc_khz); if (early) { cpu_khz = x86_platform.calibrate_cpu(); if (tsc_early_khz) tsc_khz = tsc_early_khz; else tsc_khz = x86_platform.calibrate_tsc(); } else { /* We should not be here with non-native cpu calibration */ WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu); cpu_khz = pit_hpet_ptimer_calibrate_cpu(); } /* * Trust non-zero tsc_khz as authoritative, * and use it to sanity check cpu_khz, * which will be off if system timer is off. */ if (tsc_khz == 0) tsc_khz = cpu_khz; else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) cpu_khz = tsc_khz; if (tsc_khz == 0) return false; pr_info("Detected %lu.%03lu MHz processor\n", (unsigned long)cpu_khz / KHZ, (unsigned long)cpu_khz % KHZ); if (cpu_khz != tsc_khz) { pr_info("Detected %lu.%03lu MHz TSC", (unsigned long)tsc_khz / KHZ, (unsigned long)tsc_khz % KHZ); } return true; } static unsigned long __init get_loops_per_jiffy(void) { u64 lpj = (u64)tsc_khz * KHZ; do_div(lpj, HZ); return lpj; } static void __init tsc_enable_sched_clock(void) { loops_per_jiffy = get_loops_per_jiffy(); use_tsc_delay(); /* Sanitize TSC ADJUST before cyc2ns gets initialized */ tsc_store_and_check_tsc_adjust(true); cyc2ns_init_boot_cpu(); static_branch_enable(&__use_tsc); } void __init tsc_early_init(void) { if (!boot_cpu_has(X86_FEATURE_TSC)) return; /* Don't change UV TSC multi-chassis synchronization */ if (is_early_uv_system()) return; if (!determine_cpu_tsc_frequencies(true)) return; tsc_enable_sched_clock(); } void __init tsc_init(void) { if (!cpu_feature_enabled(X86_FEATURE_TSC)) { setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return; } /* * native_calibrate_cpu_early can only calibrate using methods that are * available early in boot. */ if (x86_platform.calibrate_cpu == native_calibrate_cpu_early) x86_platform.calibrate_cpu = native_calibrate_cpu; if (!tsc_khz) { /* We failed to determine frequencies earlier, try again */ if (!determine_cpu_tsc_frequencies(false)) { mark_tsc_unstable("could not calculate TSC khz"); setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return; } tsc_enable_sched_clock(); } cyc2ns_init_secondary_cpus(); if (!no_sched_irq_time) enable_sched_clock_irqtime(); lpj_fine = get_loops_per_jiffy(); check_system_tsc_reliable(); if (unsynchronized_tsc()) { mark_tsc_unstable("TSCs unsynchronized"); return; } if (tsc_clocksource_reliable || no_tsc_watchdog) tsc_disable_clocksource_watchdog(); clocksource_register_khz(&clocksource_tsc_early, tsc_khz); detect_art(); } #ifdef CONFIG_SMP /* * Check whether existing calibration data can be reused. */ unsigned long calibrate_delay_is_known(void) { int sibling, cpu = smp_processor_id(); int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC); const struct cpumask *mask = topology_core_cpumask(cpu); /* * If TSC has constant frequency and TSC is synchronized across * sockets then reuse CPU0 calibration. */ if (constant_tsc && !tsc_unstable) return cpu_data(0).loops_per_jiffy; /* * If TSC has constant frequency and TSC is not synchronized across * sockets and this is not the first CPU in the socket, then reuse * the calibration value of an already online CPU on that socket. * * This assumes that CONSTANT_TSC is consistent for all CPUs in a * socket. */ if (!constant_tsc || !mask) return 0; sibling = cpumask_any_but(mask, cpu); if (sibling < nr_cpu_ids) return cpu_data(sibling).loops_per_jiffy; return 0; } #endif
linux-master
arch/x86/kernel/tsc.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/x86/kernel/nmi-selftest.c * * Testsuite for NMI: IPIs * * Started by Don Zickus: * (using lib/locking-selftest.c as a guide) * * Copyright (C) 2011 Red Hat, Inc., Don Zickus <[email protected]> */ #include <linux/smp.h> #include <linux/cpumask.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/percpu.h> #include <asm/apic.h> #include <asm/nmi.h> #define SUCCESS 0 #define FAILURE 1 #define TIMEOUT 2 static int __initdata nmi_fail; /* check to see if NMI IPIs work on this machine */ static DECLARE_BITMAP(nmi_ipi_mask, NR_CPUS) __initdata; static int __initdata testcase_total; static int __initdata testcase_successes; static int __initdata expected_testcase_failures; static int __initdata unexpected_testcase_failures; static int __initdata unexpected_testcase_unknowns; static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs) { unexpected_testcase_unknowns++; return NMI_HANDLED; } static void __init init_nmi_testsuite(void) { /* trap all the unknown NMIs we may generate */ register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk", __initdata); } static void __init cleanup_nmi_testsuite(void) { unregister_nmi_handler(NMI_UNKNOWN, "nmi_selftest_unk"); } static int __init test_nmi_ipi_callback(unsigned int val, struct pt_regs *regs) { int cpu = raw_smp_processor_id(); if (cpumask_test_and_clear_cpu(cpu, to_cpumask(nmi_ipi_mask))) return NMI_HANDLED; return NMI_DONE; } static void __init test_nmi_ipi(struct cpumask *mask) { unsigned long timeout; if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, NMI_FLAG_FIRST, "nmi_selftest", __initdata)) { nmi_fail = FAILURE; return; } /* sync above data before sending NMI */ wmb(); __apic_send_IPI_mask(mask, NMI_VECTOR); /* Don't wait longer than a second */ timeout = USEC_PER_SEC; while (!cpumask_empty(mask) && --timeout) udelay(1); /* What happens if we timeout, do we still unregister?? */ unregister_nmi_handler(NMI_LOCAL, "nmi_selftest"); if (!timeout) nmi_fail = TIMEOUT; return; } static void __init remote_ipi(void) { cpumask_copy(to_cpumask(nmi_ipi_mask), cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), to_cpumask(nmi_ipi_mask)); if (!cpumask_empty(to_cpumask(nmi_ipi_mask))) test_nmi_ipi(to_cpumask(nmi_ipi_mask)); } static void __init local_ipi(void) { cpumask_clear(to_cpumask(nmi_ipi_mask)); cpumask_set_cpu(smp_processor_id(), to_cpumask(nmi_ipi_mask)); test_nmi_ipi(to_cpumask(nmi_ipi_mask)); } static void __init reset_nmi(void) { nmi_fail = 0; } static void __init dotest(void (*testcase_fn)(void), int expected) { testcase_fn(); /* * Filter out expected failures: */ if (nmi_fail != expected) { unexpected_testcase_failures++; if (nmi_fail == FAILURE) printk(KERN_CONT "FAILED |"); else if (nmi_fail == TIMEOUT) printk(KERN_CONT "TIMEOUT|"); else printk(KERN_CONT "ERROR |"); dump_stack(); } else { testcase_successes++; printk(KERN_CONT " ok |"); } testcase_total++; reset_nmi(); } static inline void __init print_testname(const char *testname) { printk("%12s:", testname); } void __init nmi_selftest(void) { init_nmi_testsuite(); /* * Run the testsuite: */ printk("----------------\n"); printk("| NMI testsuite:\n"); printk("--------------------\n"); print_testname("remote IPI"); dotest(remote_ipi, SUCCESS); printk(KERN_CONT "\n"); print_testname("local IPI"); dotest(local_ipi, SUCCESS); printk(KERN_CONT "\n"); cleanup_nmi_testsuite(); if (unexpected_testcase_failures) { printk("--------------------\n"); printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n", unexpected_testcase_failures, testcase_total); printk("-----------------------------------------------------------------\n"); } else if (expected_testcase_failures && testcase_successes) { printk("--------------------\n"); printk("%3d out of %3d testcases failed, as expected. |\n", expected_testcase_failures, testcase_total); printk("----------------------------------------------------\n"); } else if (expected_testcase_failures && !testcase_successes) { printk("--------------------\n"); printk("All %3d testcases failed, as expected. |\n", expected_testcase_failures); printk("----------------------------------------\n"); } else { printk("--------------------\n"); printk("Good, all %3d testcases passed! |\n", testcase_successes); printk("---------------------------------\n"); } }
linux-master
arch/x86/kernel/nmi_selftest.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Intel SMP support routines. * * (c) 1995 Alan Cox, Building #3 <[email protected]> * (c) 1998-99, 2000, 2009 Ingo Molnar <[email protected]> * (c) 2002,2003 Andi Kleen, SuSE Labs. * * i386 and x86_64 integration by Glauber Costa <[email protected]> */ #include <linux/init.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/export.h> #include <linux/kernel_stat.h> #include <linux/mc146818rtc.h> #include <linux/cache.h> #include <linux/interrupt.h> #include <linux/cpu.h> #include <linux/gfp.h> #include <linux/kexec.h> #include <asm/mtrr.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/proto.h> #include <asm/apic.h> #include <asm/cpu.h> #include <asm/idtentry.h> #include <asm/nmi.h> #include <asm/mce.h> #include <asm/trace/irq_vectors.h> #include <asm/kexec.h> #include <asm/reboot.h> /* * Some notes on x86 processor bugs affecting SMP operation: * * Pentium, Pentium Pro, II, III (and all CPUs) have bugs. * The Linux implications for SMP are handled as follows: * * Pentium III / [Xeon] * None of the E1AP-E3AP errata are visible to the user. * * E1AP. see PII A1AP * E2AP. see PII A2AP * E3AP. see PII A3AP * * Pentium II / [Xeon] * None of the A1AP-A3AP errata are visible to the user. * * A1AP. see PPro 1AP * A2AP. see PPro 2AP * A3AP. see PPro 7AP * * Pentium Pro * None of 1AP-9AP errata are visible to the normal user, * except occasional delivery of 'spurious interrupt' as trap #15. * This is very rare and a non-problem. * * 1AP. Linux maps APIC as non-cacheable * 2AP. worked around in hardware * 3AP. fixed in C0 and above steppings microcode update. * Linux does not use excessive STARTUP_IPIs. * 4AP. worked around in hardware * 5AP. symmetric IO mode (normal Linux operation) not affected. * 'noapic' mode has vector 0xf filled out properly. * 6AP. 'noapic' mode might be affected - fixed in later steppings * 7AP. We do not assume writes to the LVT deasserting IRQs * 8AP. We do not enable low power mode (deep sleep) during MP bootup * 9AP. We do not use mixed mode * * Pentium * There is a marginal case where REP MOVS on 100MHz SMP * machines with B stepping processors can fail. XXX should provide * an L1cache=Writethrough or L1cache=off option. * * B stepping CPUs may hang. There are hardware work arounds * for this. We warn about it in case your board doesn't have the work * arounds. Basically that's so I can tell anyone with a B stepping * CPU and SMP problems "tough". * * Specific items [From Pentium Processor Specification Update] * * 1AP. Linux doesn't use remote read * 2AP. Linux doesn't trust APIC errors * 3AP. We work around this * 4AP. Linux never generated 3 interrupts of the same priority * to cause a lost local interrupt. * 5AP. Remote read is never used * 6AP. not affected - worked around in hardware * 7AP. not affected - worked around in hardware * 8AP. worked around in hardware - we get explicit CS errors if not * 9AP. only 'noapic' mode affected. Might generate spurious * interrupts, we log only the first one and count the * rest silently. * 10AP. not affected - worked around in hardware * 11AP. Linux reads the APIC between writes to avoid this, as per * the documentation. Make sure you preserve this as it affects * the C stepping chips too. * 12AP. not affected - worked around in hardware * 13AP. not affected - worked around in hardware * 14AP. we always deassert INIT during bootup * 15AP. not affected - worked around in hardware * 16AP. not affected - worked around in hardware * 17AP. not affected - worked around in hardware * 18AP. not affected - worked around in hardware * 19AP. not affected - worked around in BIOS * * If this sounds worrying believe me these bugs are either ___RARE___, * or are signal timing bugs worked around in hardware and there's * about nothing of note with C stepping upwards. */ static atomic_t stopping_cpu = ATOMIC_INIT(-1); static bool smp_no_nmi_ipi = false; static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) { /* We are registered on stopping cpu too, avoid spurious NMI */ if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) return NMI_HANDLED; cpu_emergency_disable_virtualization(); stop_this_cpu(NULL); return NMI_HANDLED; } /* * Disable virtualization, APIC etc. and park the CPU in a HLT loop */ DEFINE_IDTENTRY_SYSVEC(sysvec_reboot) { apic_eoi(); cpu_emergency_disable_virtualization(); stop_this_cpu(NULL); } static int register_stop_handler(void) { return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, NMI_FLAG_FIRST, "smp_stop"); } static void native_stop_other_cpus(int wait) { unsigned int cpu = smp_processor_id(); unsigned long flags, timeout; if (reboot_force) return; /* Only proceed if this is the first CPU to reach this code */ if (atomic_cmpxchg(&stopping_cpu, -1, cpu) != -1) return; /* For kexec, ensure that offline CPUs are out of MWAIT and in HLT */ if (kexec_in_progress) smp_kick_mwait_play_dead(); /* * 1) Send an IPI on the reboot vector to all other CPUs. * * The other CPUs should react on it after leaving critical * sections and re-enabling interrupts. They might still hold * locks, but there is nothing which can be done about that. * * 2) Wait for all other CPUs to report that they reached the * HLT loop in stop_this_cpu() * * 3) If the system uses INIT/STARTUP for CPU bringup, then * send all present CPUs an INIT vector, which brings them * completely out of the way. * * 4) If #3 is not possible and #2 timed out send an NMI to the * CPUs which did not yet report * * 5) Wait for all other CPUs to report that they reached the * HLT loop in stop_this_cpu() * * #4 can obviously race against a CPU reaching the HLT loop late. * That CPU will have reported already and the "have all CPUs * reached HLT" condition will be true despite the fact that the * other CPU is still handling the NMI. Again, there is no * protection against that as "disabled" APICs still respond to * NMIs. */ cpumask_copy(&cpus_stop_mask, cpu_online_mask); cpumask_clear_cpu(cpu, &cpus_stop_mask); if (!cpumask_empty(&cpus_stop_mask)) { apic_send_IPI_allbutself(REBOOT_VECTOR); /* * Don't wait longer than a second for IPI completion. The * wait request is not checked here because that would * prevent an NMI/INIT shutdown in case that not all * CPUs reach shutdown state. */ timeout = USEC_PER_SEC; while (!cpumask_empty(&cpus_stop_mask) && timeout--) udelay(1); } /* * Park all other CPUs in INIT including "offline" CPUs, if * possible. That's a safe place where they can't resume execution * of HLT and then execute the HLT loop from overwritten text or * page tables. * * The only downside is a broadcast MCE, but up to the point where * the kexec() kernel brought all APs online again an MCE will just * make HLT resume and handle the MCE. The machine crashes and burns * due to overwritten text, page tables and data. So there is a * choice between fire and frying pan. The result is pretty much * the same. Chose frying pan until x86 provides a sane mechanism * to park a CPU. */ if (smp_park_other_cpus_in_init()) goto done; /* * If park with INIT was not possible and the REBOOT_VECTOR didn't * take all secondary CPUs offline, try with the NMI. */ if (!cpumask_empty(&cpus_stop_mask)) { /* * If NMI IPI is enabled, try to register the stop handler * and send the IPI. In any case try to wait for the other * CPUs to stop. */ if (!smp_no_nmi_ipi && !register_stop_handler()) { pr_emerg("Shutting down cpus with NMI\n"); for_each_cpu(cpu, &cpus_stop_mask) __apic_send_IPI(cpu, NMI_VECTOR); } /* * Don't wait longer than 10 ms if the caller didn't * request it. If wait is true, the machine hangs here if * one or more CPUs do not reach shutdown state. */ timeout = USEC_PER_MSEC * 10; while (!cpumask_empty(&cpus_stop_mask) && (wait || timeout--)) udelay(1); } done: local_irq_save(flags); disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); local_irq_restore(flags); /* * Ensure that the cpus_stop_mask cache lines are invalidated on * the other CPUs. See comment vs. SME in stop_this_cpu(). */ cpumask_clear(&cpus_stop_mask); } /* * Reschedule call back. KVM uses this interrupt to force a cpu out of * guest mode. */ DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_reschedule_ipi) { apic_eoi(); trace_reschedule_entry(RESCHEDULE_VECTOR); inc_irq_stat(irq_resched_count); scheduler_ipi(); trace_reschedule_exit(RESCHEDULE_VECTOR); } DEFINE_IDTENTRY_SYSVEC(sysvec_call_function) { apic_eoi(); trace_call_function_entry(CALL_FUNCTION_VECTOR); inc_irq_stat(irq_call_count); generic_smp_call_function_interrupt(); trace_call_function_exit(CALL_FUNCTION_VECTOR); } DEFINE_IDTENTRY_SYSVEC(sysvec_call_function_single) { apic_eoi(); trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); inc_irq_stat(irq_call_count); generic_smp_call_function_single_interrupt(); trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR); } static int __init nonmi_ipi_setup(char *str) { smp_no_nmi_ipi = true; return 1; } __setup("nonmi_ipi", nonmi_ipi_setup); struct smp_ops smp_ops = { .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, .smp_prepare_cpus = native_smp_prepare_cpus, .smp_cpus_done = native_smp_cpus_done, .stop_other_cpus = native_stop_other_cpus, #if defined(CONFIG_KEXEC_CORE) .crash_stop_other_cpus = kdump_nmi_shootdown_cpus, #endif .smp_send_reschedule = native_smp_send_reschedule, .kick_ap_alive = native_kick_ap, .cpu_disable = native_cpu_disable, .play_dead = native_play_dead, .send_call_func_ipi = native_send_call_func_ipi, .send_call_func_single_ipi = native_send_call_func_single_ipi, }; EXPORT_SYMBOL_GPL(smp_ops);
linux-master
arch/x86/kernel/smp.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Memory Encryption Support * * Copyright (C) 2019 SUSE * * Author: Joerg Roedel <[email protected]> */ #define pr_fmt(fmt) "SEV: " fmt #include <linux/sched/debug.h> /* For show_regs() */ #include <linux/percpu-defs.h> #include <linux/cc_platform.h> #include <linux/printk.h> #include <linux/mm_types.h> #include <linux/set_memory.h> #include <linux/memblock.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/cpumask.h> #include <linux/efi.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/psp-sev.h> #include <uapi/linux/sev-guest.h> #include <asm/cpu_entry_area.h> #include <asm/stacktrace.h> #include <asm/sev.h> #include <asm/insn-eval.h> #include <asm/fpu/xcr.h> #include <asm/processor.h> #include <asm/realmode.h> #include <asm/setup.h> #include <asm/traps.h> #include <asm/svm.h> #include <asm/smp.h> #include <asm/cpu.h> #include <asm/apic.h> #include <asm/cpuid.h> #include <asm/cmdline.h> #define DR7_RESET_VALUE 0x400 /* AP INIT values as documented in the APM2 section "Processor Initialization State" */ #define AP_INIT_CS_LIMIT 0xffff #define AP_INIT_DS_LIMIT 0xffff #define AP_INIT_LDTR_LIMIT 0xffff #define AP_INIT_GDTR_LIMIT 0xffff #define AP_INIT_IDTR_LIMIT 0xffff #define AP_INIT_TR_LIMIT 0xffff #define AP_INIT_RFLAGS_DEFAULT 0x2 #define AP_INIT_DR6_DEFAULT 0xffff0ff0 #define AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL #define AP_INIT_XCR0_DEFAULT 0x1 #define AP_INIT_X87_FTW_DEFAULT 0x5555 #define AP_INIT_X87_FCW_DEFAULT 0x0040 #define AP_INIT_CR0_DEFAULT 0x60000010 #define AP_INIT_MXCSR_DEFAULT 0x1f80 /* For early boot hypervisor communication in SEV-ES enabled guests */ static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE); /* * Needs to be in the .data section because we need it NULL before bss is * cleared */ static struct ghcb *boot_ghcb __section(".data"); /* Bitmap of SEV features supported by the hypervisor */ static u64 sev_hv_features __ro_after_init; /* #VC handler runtime per-CPU data */ struct sev_es_runtime_data { struct ghcb ghcb_page; /* * Reserve one page per CPU as backup storage for the unencrypted GHCB. * It is needed when an NMI happens while the #VC handler uses the real * GHCB, and the NMI handler itself is causing another #VC exception. In * that case the GHCB content of the first handler needs to be backed up * and restored. */ struct ghcb backup_ghcb; /* * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions. * There is no need for it to be atomic, because nothing is written to * the GHCB between the read and the write of ghcb_active. So it is safe * to use it when a nested #VC exception happens before the write. * * This is necessary for example in the #VC->NMI->#VC case when the NMI * happens while the first #VC handler uses the GHCB. When the NMI code * raises a second #VC handler it might overwrite the contents of the * GHCB written by the first handler. To avoid this the content of the * GHCB is saved and restored when the GHCB is detected to be in use * already. */ bool ghcb_active; bool backup_ghcb_active; /* * Cached DR7 value - write it on DR7 writes and return it on reads. * That value will never make it to the real hardware DR7 as debugging * is currently unsupported in SEV-ES guests. */ unsigned long dr7; }; struct ghcb_state { struct ghcb *ghcb; }; static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data); static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa); struct sev_config { __u64 debug : 1, /* * A flag used by __set_pages_state() that indicates when the * per-CPU GHCB has been created and registered and thus can be * used by the BSP instead of the early boot GHCB. * * For APs, the per-CPU GHCB is created before they are started * and registered upon startup, so this flag can be used globally * for the BSP and APs. */ ghcbs_initialized : 1, __reserved : 62; }; static struct sev_config sev_cfg __read_mostly; static __always_inline bool on_vc_stack(struct pt_regs *regs) { unsigned long sp = regs->sp; /* User-mode RSP is not trusted */ if (user_mode(regs)) return false; /* SYSCALL gap still has user-mode RSP */ if (ip_within_syscall_gap(regs)) return false; return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC))); } /* * This function handles the case when an NMI is raised in the #VC * exception handler entry code, before the #VC handler has switched off * its IST stack. In this case, the IST entry for #VC must be adjusted, * so that any nested #VC exception will not overwrite the stack * contents of the interrupted #VC handler. * * The IST entry is adjusted unconditionally so that it can be also be * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a * nested sev_es_ist_exit() call may adjust back the IST entry too * early. * * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run * on the NMI IST stack, as they are only called from NMI handling code * right now. */ void noinstr __sev_es_ist_enter(struct pt_regs *regs) { unsigned long old_ist, new_ist; /* Read old IST entry */ new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); /* * If NMI happened while on the #VC IST stack, set the new IST * value below regs->sp, so that the interrupted stack frame is * not overwritten by subsequent #VC exceptions. */ if (on_vc_stack(regs)) new_ist = regs->sp; /* * Reserve additional 8 bytes and store old IST value so this * adjustment can be unrolled in __sev_es_ist_exit(). */ new_ist -= sizeof(old_ist); *(unsigned long *)new_ist = old_ist; /* Set new IST entry */ this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist); } void noinstr __sev_es_ist_exit(void) { unsigned long ist; /* Read IST entry */ ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); if (WARN_ON(ist == __this_cpu_ist_top_va(VC))) return; /* Read back old IST entry and write it to the TSS */ this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist); } /* * Nothing shall interrupt this code path while holding the per-CPU * GHCB. The backup GHCB is only for NMIs interrupting this path. * * Callers must disable local interrupts around it. */ static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state) { struct sev_es_runtime_data *data; struct ghcb *ghcb; WARN_ON(!irqs_disabled()); data = this_cpu_read(runtime_data); ghcb = &data->ghcb_page; if (unlikely(data->ghcb_active)) { /* GHCB is already in use - save its contents */ if (unlikely(data->backup_ghcb_active)) { /* * Backup-GHCB is also already in use. There is no way * to continue here so just kill the machine. To make * panic() work, mark GHCBs inactive so that messages * can be printed out. */ data->ghcb_active = false; data->backup_ghcb_active = false; instrumentation_begin(); panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use"); instrumentation_end(); } /* Mark backup_ghcb active before writing to it */ data->backup_ghcb_active = true; state->ghcb = &data->backup_ghcb; /* Backup GHCB content */ *state->ghcb = *ghcb; } else { state->ghcb = NULL; data->ghcb_active = true; } return ghcb; } static inline u64 sev_es_rd_ghcb_msr(void) { return __rdmsr(MSR_AMD64_SEV_ES_GHCB); } static __always_inline void sev_es_wr_ghcb_msr(u64 val) { u32 low, high; low = (u32)(val); high = (u32)(val >> 32); native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high); } static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt, unsigned char *buffer) { return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); } static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt) { char buffer[MAX_INSN_SIZE]; int insn_bytes; insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer); if (insn_bytes == 0) { /* Nothing could be copied */ ctxt->fi.vector = X86_TRAP_PF; ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; ctxt->fi.cr2 = ctxt->regs->ip; return ES_EXCEPTION; } else if (insn_bytes == -EINVAL) { /* Effective RIP could not be calculated */ ctxt->fi.vector = X86_TRAP_GP; ctxt->fi.error_code = 0; ctxt->fi.cr2 = 0; return ES_EXCEPTION; } if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes)) return ES_DECODE_FAILED; if (ctxt->insn.immediate.got) return ES_OK; else return ES_DECODE_FAILED; } static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt) { char buffer[MAX_INSN_SIZE]; int res, ret; res = vc_fetch_insn_kernel(ctxt, buffer); if (res) { ctxt->fi.vector = X86_TRAP_PF; ctxt->fi.error_code = X86_PF_INSTR; ctxt->fi.cr2 = ctxt->regs->ip; return ES_EXCEPTION; } ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); if (ret < 0) return ES_DECODE_FAILED; else return ES_OK; } static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) { if (user_mode(ctxt->regs)) return __vc_decode_user_insn(ctxt); else return __vc_decode_kern_insn(ctxt); } static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, char *dst, char *buf, size_t size) { unsigned long error_code = X86_PF_PROT | X86_PF_WRITE; /* * This function uses __put_user() independent of whether kernel or user * memory is accessed. This works fine because __put_user() does no * sanity checks of the pointer being accessed. All that it does is * to report when the access failed. * * Also, this function runs in atomic context, so __put_user() is not * allowed to sleep. The page-fault handler detects that it is running * in atomic context and will not try to take mmap_sem and handle the * fault, so additional pagefault_enable()/disable() calls are not * needed. * * The access can't be done via copy_to_user() here because * vc_write_mem() must not use string instructions to access unsafe * memory. The reason is that MOVS is emulated by the #VC handler by * splitting the move up into a read and a write and taking a nested #VC * exception on whatever of them is the MMIO access. Using string * instructions here would cause infinite nesting. */ switch (size) { case 1: { u8 d1; u8 __user *target = (u8 __user *)dst; memcpy(&d1, buf, 1); if (__put_user(d1, target)) goto fault; break; } case 2: { u16 d2; u16 __user *target = (u16 __user *)dst; memcpy(&d2, buf, 2); if (__put_user(d2, target)) goto fault; break; } case 4: { u32 d4; u32 __user *target = (u32 __user *)dst; memcpy(&d4, buf, 4); if (__put_user(d4, target)) goto fault; break; } case 8: { u64 d8; u64 __user *target = (u64 __user *)dst; memcpy(&d8, buf, 8); if (__put_user(d8, target)) goto fault; break; } default: WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); return ES_UNSUPPORTED; } return ES_OK; fault: if (user_mode(ctxt->regs)) error_code |= X86_PF_USER; ctxt->fi.vector = X86_TRAP_PF; ctxt->fi.error_code = error_code; ctxt->fi.cr2 = (unsigned long)dst; return ES_EXCEPTION; } static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, char *src, char *buf, size_t size) { unsigned long error_code = X86_PF_PROT; /* * This function uses __get_user() independent of whether kernel or user * memory is accessed. This works fine because __get_user() does no * sanity checks of the pointer being accessed. All that it does is * to report when the access failed. * * Also, this function runs in atomic context, so __get_user() is not * allowed to sleep. The page-fault handler detects that it is running * in atomic context and will not try to take mmap_sem and handle the * fault, so additional pagefault_enable()/disable() calls are not * needed. * * The access can't be done via copy_from_user() here because * vc_read_mem() must not use string instructions to access unsafe * memory. The reason is that MOVS is emulated by the #VC handler by * splitting the move up into a read and a write and taking a nested #VC * exception on whatever of them is the MMIO access. Using string * instructions here would cause infinite nesting. */ switch (size) { case 1: { u8 d1; u8 __user *s = (u8 __user *)src; if (__get_user(d1, s)) goto fault; memcpy(buf, &d1, 1); break; } case 2: { u16 d2; u16 __user *s = (u16 __user *)src; if (__get_user(d2, s)) goto fault; memcpy(buf, &d2, 2); break; } case 4: { u32 d4; u32 __user *s = (u32 __user *)src; if (__get_user(d4, s)) goto fault; memcpy(buf, &d4, 4); break; } case 8: { u64 d8; u64 __user *s = (u64 __user *)src; if (__get_user(d8, s)) goto fault; memcpy(buf, &d8, 8); break; } default: WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); return ES_UNSUPPORTED; } return ES_OK; fault: if (user_mode(ctxt->regs)) error_code |= X86_PF_USER; ctxt->fi.vector = X86_TRAP_PF; ctxt->fi.error_code = error_code; ctxt->fi.cr2 = (unsigned long)src; return ES_EXCEPTION; } static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, unsigned long vaddr, phys_addr_t *paddr) { unsigned long va = (unsigned long)vaddr; unsigned int level; phys_addr_t pa; pgd_t *pgd; pte_t *pte; pgd = __va(read_cr3_pa()); pgd = &pgd[pgd_index(va)]; pte = lookup_address_in_pgd(pgd, va, &level); if (!pte) { ctxt->fi.vector = X86_TRAP_PF; ctxt->fi.cr2 = vaddr; ctxt->fi.error_code = 0; if (user_mode(ctxt->regs)) ctxt->fi.error_code |= X86_PF_USER; return ES_EXCEPTION; } if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC)) /* Emulated MMIO to/from encrypted memory not supported */ return ES_UNSUPPORTED; pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; pa |= va & ~page_level_mask(level); *paddr = pa; return ES_OK; } /* Include code shared with pre-decompression boot stage */ #include "sev-shared.c" static noinstr void __sev_put_ghcb(struct ghcb_state *state) { struct sev_es_runtime_data *data; struct ghcb *ghcb; WARN_ON(!irqs_disabled()); data = this_cpu_read(runtime_data); ghcb = &data->ghcb_page; if (state->ghcb) { /* Restore GHCB from Backup */ *ghcb = *state->ghcb; data->backup_ghcb_active = false; state->ghcb = NULL; } else { /* * Invalidate the GHCB so a VMGEXIT instruction issued * from userspace won't appear to be valid. */ vc_ghcb_invalidate(ghcb); data->ghcb_active = false; } } void noinstr __sev_es_nmi_complete(void) { struct ghcb_state state; struct ghcb *ghcb; ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE); ghcb_set_sw_exit_info_1(ghcb, 0); ghcb_set_sw_exit_info_2(ghcb, 0); sev_es_wr_ghcb_msr(__pa_nodebug(ghcb)); VMGEXIT(); __sev_put_ghcb(&state); } static u64 __init get_secrets_page(void) { u64 pa_data = boot_params.cc_blob_address; struct cc_blob_sev_info info; void *map; /* * The CC blob contains the address of the secrets page, check if the * blob is present. */ if (!pa_data) return 0; map = early_memremap(pa_data, sizeof(info)); if (!map) { pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n"); return 0; } memcpy(&info, map, sizeof(info)); early_memunmap(map, sizeof(info)); /* smoke-test the secrets page passed */ if (!info.secrets_phys || info.secrets_len != PAGE_SIZE) return 0; return info.secrets_phys; } static u64 __init get_snp_jump_table_addr(void) { struct snp_secrets_page_layout *layout; void __iomem *mem; u64 pa, addr; pa = get_secrets_page(); if (!pa) return 0; mem = ioremap_encrypted(pa, PAGE_SIZE); if (!mem) { pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n"); return 0; } layout = (__force struct snp_secrets_page_layout *)mem; addr = layout->os_area.ap_jump_table_pa; iounmap(mem); return addr; } static u64 __init get_jump_table_addr(void) { struct ghcb_state state; unsigned long flags; struct ghcb *ghcb; u64 ret = 0; if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) return get_snp_jump_table_addr(); local_irq_save(flags); ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE); ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE); ghcb_set_sw_exit_info_2(ghcb, 0); sev_es_wr_ghcb_msr(__pa(ghcb)); VMGEXIT(); if (ghcb_sw_exit_info_1_is_valid(ghcb) && ghcb_sw_exit_info_2_is_valid(ghcb)) ret = ghcb->save.sw_exit_info_2; __sev_put_ghcb(&state); local_irq_restore(flags); return ret; } static void early_set_pages_state(unsigned long vaddr, unsigned long paddr, unsigned long npages, enum psc_op op) { unsigned long paddr_end; u64 val; int ret; vaddr = vaddr & PAGE_MASK; paddr = paddr & PAGE_MASK; paddr_end = paddr + (npages << PAGE_SHIFT); while (paddr < paddr_end) { if (op == SNP_PAGE_STATE_SHARED) { /* Page validation must be rescinded before changing to shared */ ret = pvalidate(vaddr, RMP_PG_SIZE_4K, false); if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret)) goto e_term; } /* * Use the MSR protocol because this function can be called before * the GHCB is established. */ sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op)); VMGEXIT(); val = sev_es_rd_ghcb_msr(); if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP, "Wrong PSC response code: 0x%x\n", (unsigned int)GHCB_RESP_CODE(val))) goto e_term; if (WARN(GHCB_MSR_PSC_RESP_VAL(val), "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n", op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared", paddr, GHCB_MSR_PSC_RESP_VAL(val))) goto e_term; if (op == SNP_PAGE_STATE_PRIVATE) { /* Page validation must be performed after changing to private */ ret = pvalidate(vaddr, RMP_PG_SIZE_4K, true); if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret)) goto e_term; } vaddr += PAGE_SIZE; paddr += PAGE_SIZE; } return; e_term: sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC); } void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { /* * This can be invoked in early boot while running identity mapped, so * use an open coded check for SNP instead of using cc_platform_has(). * This eliminates worries about jump tables or checking boot_cpu_data * in the cc_platform_has() function. */ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) return; /* * Ask the hypervisor to mark the memory pages as private in the RMP * table. */ early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE); } void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { /* * This can be invoked in early boot while running identity mapped, so * use an open coded check for SNP instead of using cc_platform_has(). * This eliminates worries about jump tables or checking boot_cpu_data * in the cc_platform_has() function. */ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) return; /* Ask hypervisor to mark the memory pages shared in the RMP table. */ early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED); } void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { unsigned long vaddr, npages; vaddr = (unsigned long)__va(paddr); npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; if (op == SNP_PAGE_STATE_PRIVATE) early_snp_set_memory_private(vaddr, paddr, npages); else if (op == SNP_PAGE_STATE_SHARED) early_snp_set_memory_shared(vaddr, paddr, npages); else WARN(1, "invalid memory op %d\n", op); } static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr, unsigned long vaddr_end, int op) { struct ghcb_state state; bool use_large_entry; struct psc_hdr *hdr; struct psc_entry *e; unsigned long flags; unsigned long pfn; struct ghcb *ghcb; int i; hdr = &data->hdr; e = data->entries; memset(data, 0, sizeof(*data)); i = 0; while (vaddr < vaddr_end && i < ARRAY_SIZE(data->entries)) { hdr->end_entry = i; if (is_vmalloc_addr((void *)vaddr)) { pfn = vmalloc_to_pfn((void *)vaddr); use_large_entry = false; } else { pfn = __pa(vaddr) >> PAGE_SHIFT; use_large_entry = true; } e->gfn = pfn; e->operation = op; if (use_large_entry && IS_ALIGNED(vaddr, PMD_SIZE) && (vaddr_end - vaddr) >= PMD_SIZE) { e->pagesize = RMP_PG_SIZE_2M; vaddr += PMD_SIZE; } else { e->pagesize = RMP_PG_SIZE_4K; vaddr += PAGE_SIZE; } e++; i++; } /* Page validation must be rescinded before changing to shared */ if (op == SNP_PAGE_STATE_SHARED) pvalidate_pages(data); local_irq_save(flags); if (sev_cfg.ghcbs_initialized) ghcb = __sev_get_ghcb(&state); else ghcb = boot_ghcb; /* Invoke the hypervisor to perform the page state changes */ if (!ghcb || vmgexit_psc(ghcb, data)) sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC); if (sev_cfg.ghcbs_initialized) __sev_put_ghcb(&state); local_irq_restore(flags); /* Page validation must be performed after changing to private */ if (op == SNP_PAGE_STATE_PRIVATE) pvalidate_pages(data); return vaddr; } static void set_pages_state(unsigned long vaddr, unsigned long npages, int op) { struct snp_psc_desc desc; unsigned long vaddr_end; /* Use the MSR protocol when a GHCB is not available. */ if (!boot_ghcb) return early_set_pages_state(vaddr, __pa(vaddr), npages, op); vaddr = vaddr & PAGE_MASK; vaddr_end = vaddr + (npages << PAGE_SHIFT); while (vaddr < vaddr_end) vaddr = __set_pages_state(&desc, vaddr, vaddr_end, op); } void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) return; set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED); } void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) return; set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE); } void snp_accept_memory(phys_addr_t start, phys_addr_t end) { unsigned long vaddr; unsigned int npages; if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) return; vaddr = (unsigned long)__va(start); npages = (end - start) >> PAGE_SHIFT; set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE); } static int snp_set_vmsa(void *va, bool vmsa) { u64 attrs; /* * Running at VMPL0 allows the kernel to change the VMSA bit for a page * using the RMPADJUST instruction. However, for the instruction to * succeed it must target the permissions of a lesser privileged * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST * instruction in the AMD64 APM Volume 3). */ attrs = 1; if (vmsa) attrs |= RMPADJUST_VMSA_PAGE_BIT; return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs); } #define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK) #define INIT_CS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK) #define INIT_DS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_WRITE_MASK) #define INIT_LDTR_ATTRIBS (SVM_SELECTOR_P_MASK | 2) #define INIT_TR_ATTRIBS (SVM_SELECTOR_P_MASK | 3) static void *snp_alloc_vmsa_page(void) { struct page *p; /* * Allocate VMSA page to work around the SNP erratum where the CPU will * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB) * collides with the RMP entry of VMSA page. The recommended workaround * is to not use a large page. * * Allocate an 8k page which is also 8k-aligned. */ p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1); if (!p) return NULL; split_page(p, 1); /* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */ __free_page(p); return page_address(p + 1); } static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa) { int err; err = snp_set_vmsa(vmsa, false); if (err) pr_err("clear VMSA page failed (%u), leaking page\n", err); else free_page((unsigned long)vmsa); } static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip) { struct sev_es_save_area *cur_vmsa, *vmsa; struct ghcb_state state; unsigned long flags; struct ghcb *ghcb; u8 sipi_vector; int cpu, ret; u64 cr4; /* * The hypervisor SNP feature support check has happened earlier, just check * the AP_CREATION one here. */ if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION)) return -EOPNOTSUPP; /* * Verify the desired start IP against the known trampoline start IP * to catch any future new trampolines that may be introduced that * would require a new protected guest entry point. */ if (WARN_ONCE(start_ip != real_mode_header->trampoline_start, "Unsupported SNP start_ip: %lx\n", start_ip)) return -EINVAL; /* Override start_ip with known protected guest start IP */ start_ip = real_mode_header->sev_es_trampoline_start; /* Find the logical CPU for the APIC ID */ for_each_present_cpu(cpu) { if (arch_match_cpu_phys_id(cpu, apic_id)) break; } if (cpu >= nr_cpu_ids) return -EINVAL; cur_vmsa = per_cpu(sev_vmsa, cpu); /* * A new VMSA is created each time because there is no guarantee that * the current VMSA is the kernels or that the vCPU is not running. If * an attempt was done to use the current VMSA with a running vCPU, a * #VMEXIT of that vCPU would wipe out all of the settings being done * here. */ vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page(); if (!vmsa) return -ENOMEM; /* CR4 should maintain the MCE value */ cr4 = native_read_cr4() & X86_CR4_MCE; /* Set the CS value based on the start_ip converted to a SIPI vector */ sipi_vector = (start_ip >> 12); vmsa->cs.base = sipi_vector << 12; vmsa->cs.limit = AP_INIT_CS_LIMIT; vmsa->cs.attrib = INIT_CS_ATTRIBS; vmsa->cs.selector = sipi_vector << 8; /* Set the RIP value based on start_ip */ vmsa->rip = start_ip & 0xfff; /* Set AP INIT defaults as documented in the APM */ vmsa->ds.limit = AP_INIT_DS_LIMIT; vmsa->ds.attrib = INIT_DS_ATTRIBS; vmsa->es = vmsa->ds; vmsa->fs = vmsa->ds; vmsa->gs = vmsa->ds; vmsa->ss = vmsa->ds; vmsa->gdtr.limit = AP_INIT_GDTR_LIMIT; vmsa->ldtr.limit = AP_INIT_LDTR_LIMIT; vmsa->ldtr.attrib = INIT_LDTR_ATTRIBS; vmsa->idtr.limit = AP_INIT_IDTR_LIMIT; vmsa->tr.limit = AP_INIT_TR_LIMIT; vmsa->tr.attrib = INIT_TR_ATTRIBS; vmsa->cr4 = cr4; vmsa->cr0 = AP_INIT_CR0_DEFAULT; vmsa->dr7 = DR7_RESET_VALUE; vmsa->dr6 = AP_INIT_DR6_DEFAULT; vmsa->rflags = AP_INIT_RFLAGS_DEFAULT; vmsa->g_pat = AP_INIT_GPAT_DEFAULT; vmsa->xcr0 = AP_INIT_XCR0_DEFAULT; vmsa->mxcsr = AP_INIT_MXCSR_DEFAULT; vmsa->x87_ftw = AP_INIT_X87_FTW_DEFAULT; vmsa->x87_fcw = AP_INIT_X87_FCW_DEFAULT; /* SVME must be set. */ vmsa->efer = EFER_SVME; /* * Set the SNP-specific fields for this VMSA: * VMPL level * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits) */ vmsa->vmpl = 0; vmsa->sev_features = sev_status >> 2; /* Switch the page over to a VMSA page now that it is initialized */ ret = snp_set_vmsa(vmsa, true); if (ret) { pr_err("set VMSA page failed (%u)\n", ret); free_page((unsigned long)vmsa); return -EINVAL; } /* Issue VMGEXIT AP Creation NAE event */ local_irq_save(flags); ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); ghcb_set_rax(ghcb, vmsa->sev_features); ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION); ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE); ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa)); sev_es_wr_ghcb_msr(__pa(ghcb)); VMGEXIT(); if (!ghcb_sw_exit_info_1_is_valid(ghcb) || lower_32_bits(ghcb->save.sw_exit_info_1)) { pr_err("SNP AP Creation error\n"); ret = -EINVAL; } __sev_put_ghcb(&state); local_irq_restore(flags); /* Perform cleanup if there was an error */ if (ret) { snp_cleanup_vmsa(vmsa); vmsa = NULL; } /* Free up any previous VMSA page */ if (cur_vmsa) snp_cleanup_vmsa(cur_vmsa); /* Record the current VMSA page */ per_cpu(sev_vmsa, cpu) = vmsa; return ret; } void __init snp_set_wakeup_secondary_cpu(void) { if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) return; /* * Always set this override if SNP is enabled. This makes it the * required method to start APs under SNP. If the hypervisor does * not support AP creation, then no APs will be started. */ apic_update_callback(wakeup_secondary_cpu, wakeup_cpu_via_vmgexit); } int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { u16 startup_cs, startup_ip; phys_addr_t jump_table_pa; u64 jump_table_addr; u16 __iomem *jump_table; jump_table_addr = get_jump_table_addr(); /* On UP guests there is no jump table so this is not a failure */ if (!jump_table_addr) return 0; /* Check if AP Jump Table is page-aligned */ if (jump_table_addr & ~PAGE_MASK) return -EINVAL; jump_table_pa = jump_table_addr & PAGE_MASK; startup_cs = (u16)(rmh->trampoline_start >> 4); startup_ip = (u16)(rmh->sev_es_trampoline_start - rmh->trampoline_start); jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE); if (!jump_table) return -EIO; writew(startup_ip, &jump_table[0]); writew(startup_cs, &jump_table[1]); iounmap(jump_table); return 0; } /* * This is needed by the OVMF UEFI firmware which will use whatever it finds in * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu * runtime GHCBs used by the kernel are also mapped in the EFI page-table. */ int __init sev_es_efi_map_ghcbs(pgd_t *pgd) { struct sev_es_runtime_data *data; unsigned long address, pflags; int cpu; u64 pfn; if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) return 0; pflags = _PAGE_NX | _PAGE_RW; for_each_possible_cpu(cpu) { data = per_cpu(runtime_data, cpu); address = __pa(&data->ghcb_page); pfn = address >> PAGE_SHIFT; if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags)) return 1; } return 0; } static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { struct pt_regs *regs = ctxt->regs; enum es_result ret; u64 exit_info_1; /* Is it a WRMSR? */ exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0; ghcb_set_rcx(ghcb, regs->cx); if (exit_info_1) { ghcb_set_rax(ghcb, regs->ax); ghcb_set_rdx(ghcb, regs->dx); } ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0); if ((ret == ES_OK) && (!exit_info_1)) { regs->ax = ghcb->save.rax; regs->dx = ghcb->save.rdx; } return ret; } static void snp_register_per_cpu_ghcb(void) { struct sev_es_runtime_data *data; struct ghcb *ghcb; data = this_cpu_read(runtime_data); ghcb = &data->ghcb_page; snp_register_ghcb_early(__pa(ghcb)); } void setup_ghcb(void) { if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) return; /* First make sure the hypervisor talks a supported protocol. */ if (!sev_es_negotiate_protocol()) sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); /* * Check whether the runtime #VC exception handler is active. It uses * the per-CPU GHCB page which is set up by sev_es_init_vc_handling(). * * If SNP is active, register the per-CPU GHCB page so that the runtime * exception handler can use it. */ if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) { if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) snp_register_per_cpu_ghcb(); sev_cfg.ghcbs_initialized = true; return; } /* * Clear the boot_ghcb. The first exception comes in before the bss * section is cleared. */ memset(&boot_ghcb_page, 0, PAGE_SIZE); /* Alright - Make the boot-ghcb public */ boot_ghcb = &boot_ghcb_page; /* SNP guest requires that GHCB GPA must be registered. */ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) snp_register_ghcb_early(__pa(&boot_ghcb_page)); } #ifdef CONFIG_HOTPLUG_CPU static void sev_es_ap_hlt_loop(void) { struct ghcb_state state; struct ghcb *ghcb; ghcb = __sev_get_ghcb(&state); while (true) { vc_ghcb_invalidate(ghcb); ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP); ghcb_set_sw_exit_info_1(ghcb, 0); ghcb_set_sw_exit_info_2(ghcb, 0); sev_es_wr_ghcb_msr(__pa(ghcb)); VMGEXIT(); /* Wakeup signal? */ if (ghcb_sw_exit_info_2_is_valid(ghcb) && ghcb->save.sw_exit_info_2) break; } __sev_put_ghcb(&state); } /* * Play_dead handler when running under SEV-ES. This is needed because * the hypervisor can't deliver an SIPI request to restart the AP. * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the * hypervisor wakes it up again. */ static void sev_es_play_dead(void) { play_dead_common(); /* IRQs now disabled */ sev_es_ap_hlt_loop(); /* * If we get here, the VCPU was woken up again. Jump to CPU * startup code to get it back online. */ soft_restart_cpu(); } #else /* CONFIG_HOTPLUG_CPU */ #define sev_es_play_dead native_play_dead #endif /* CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_SMP static void __init sev_es_setup_play_dead(void) { smp_ops.play_dead = sev_es_play_dead; } #else static inline void sev_es_setup_play_dead(void) { } #endif static void __init alloc_runtime_data(int cpu) { struct sev_es_runtime_data *data; data = memblock_alloc(sizeof(*data), PAGE_SIZE); if (!data) panic("Can't allocate SEV-ES runtime data"); per_cpu(runtime_data, cpu) = data; } static void __init init_ghcb(int cpu) { struct sev_es_runtime_data *data; int err; data = per_cpu(runtime_data, cpu); err = early_set_memory_decrypted((unsigned long)&data->ghcb_page, sizeof(data->ghcb_page)); if (err) panic("Can't map GHCBs unencrypted"); memset(&data->ghcb_page, 0, sizeof(data->ghcb_page)); data->ghcb_active = false; data->backup_ghcb_active = false; } void __init sev_es_init_vc_handling(void) { int cpu; BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE); if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) return; if (!sev_es_check_cpu_features()) panic("SEV-ES CPU Features missing"); /* * SNP is supported in v2 of the GHCB spec which mandates support for HV * features. */ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) { sev_hv_features = get_hv_features(); if (!(sev_hv_features & GHCB_HV_FT_SNP)) sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); } /* Initialize per-cpu GHCB pages */ for_each_possible_cpu(cpu) { alloc_runtime_data(cpu); init_ghcb(cpu); } sev_es_setup_play_dead(); /* Secondary CPUs use the runtime #VC handler */ initial_vc_handler = (unsigned long)kernel_exc_vmm_communication; } static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt) { int trapnr = ctxt->fi.vector; if (trapnr == X86_TRAP_PF) native_write_cr2(ctxt->fi.cr2); ctxt->regs->orig_ax = ctxt->fi.error_code; do_early_exception(ctxt->regs, trapnr); } static long *vc_insn_get_rm(struct es_em_ctxt *ctxt) { long *reg_array; int offset; reg_array = (long *)ctxt->regs; offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs); if (offset < 0) return NULL; offset /= sizeof(long); return reg_array + offset; } static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt, unsigned int bytes, bool read) { u64 exit_code, exit_info_1, exit_info_2; unsigned long ghcb_pa = __pa(ghcb); enum es_result res; phys_addr_t paddr; void __user *ref; ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs); if (ref == (void __user *)-1L) return ES_UNSUPPORTED; exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE; res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr); if (res != ES_OK) { if (res == ES_EXCEPTION && !read) ctxt->fi.error_code |= X86_PF_WRITE; return res; } exit_info_1 = paddr; /* Can never be greater than 8 */ exit_info_2 = bytes; ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer)); return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2); } /* * The MOVS instruction has two memory operands, which raises the * problem that it is not known whether the access to the source or the * destination caused the #VC exception (and hence whether an MMIO read * or write operation needs to be emulated). * * Instead of playing games with walking page-tables and trying to guess * whether the source or destination is an MMIO range, split the move * into two operations, a read and a write with only one memory operand. * This will cause a nested #VC exception on the MMIO address which can * then be handled. * * This implementation has the benefit that it also supports MOVS where * source _and_ destination are MMIO regions. * * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a * rare operation. If it turns out to be a performance problem the split * operations can be moved to memcpy_fromio() and memcpy_toio(). */ static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt, unsigned int bytes) { unsigned long ds_base, es_base; unsigned char *src, *dst; unsigned char buffer[8]; enum es_result ret; bool rep; int off; ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS); es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); if (ds_base == -1L || es_base == -1L) { ctxt->fi.vector = X86_TRAP_GP; ctxt->fi.error_code = 0; return ES_EXCEPTION; } src = ds_base + (unsigned char *)ctxt->regs->si; dst = es_base + (unsigned char *)ctxt->regs->di; ret = vc_read_mem(ctxt, src, buffer, bytes); if (ret != ES_OK) return ret; ret = vc_write_mem(ctxt, dst, buffer, bytes); if (ret != ES_OK) return ret; if (ctxt->regs->flags & X86_EFLAGS_DF) off = -bytes; else off = bytes; ctxt->regs->si += off; ctxt->regs->di += off; rep = insn_has_rep_prefix(&ctxt->insn); if (rep) ctxt->regs->cx -= 1; if (!rep || ctxt->regs->cx == 0) return ES_OK; else return ES_RETRY; } static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { struct insn *insn = &ctxt->insn; enum insn_mmio_type mmio; unsigned int bytes = 0; enum es_result ret; u8 sign_byte; long *reg_data; mmio = insn_decode_mmio(insn, &bytes); if (mmio == INSN_MMIO_DECODE_FAILED) return ES_DECODE_FAILED; if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) { reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs); if (!reg_data) return ES_DECODE_FAILED; } switch (mmio) { case INSN_MMIO_WRITE: memcpy(ghcb->shared_buffer, reg_data, bytes); ret = vc_do_mmio(ghcb, ctxt, bytes, false); break; case INSN_MMIO_WRITE_IMM: memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes); ret = vc_do_mmio(ghcb, ctxt, bytes, false); break; case INSN_MMIO_READ: ret = vc_do_mmio(ghcb, ctxt, bytes, true); if (ret) break; /* Zero-extend for 32-bit operation */ if (bytes == 4) *reg_data = 0; memcpy(reg_data, ghcb->shared_buffer, bytes); break; case INSN_MMIO_READ_ZERO_EXTEND: ret = vc_do_mmio(ghcb, ctxt, bytes, true); if (ret) break; /* Zero extend based on operand size */ memset(reg_data, 0, insn->opnd_bytes); memcpy(reg_data, ghcb->shared_buffer, bytes); break; case INSN_MMIO_READ_SIGN_EXTEND: ret = vc_do_mmio(ghcb, ctxt, bytes, true); if (ret) break; if (bytes == 1) { u8 *val = (u8 *)ghcb->shared_buffer; sign_byte = (*val & 0x80) ? 0xff : 0x00; } else { u16 *val = (u16 *)ghcb->shared_buffer; sign_byte = (*val & 0x8000) ? 0xff : 0x00; } /* Sign extend based on operand size */ memset(reg_data, sign_byte, insn->opnd_bytes); memcpy(reg_data, ghcb->shared_buffer, bytes); break; case INSN_MMIO_MOVS: ret = vc_handle_mmio_movs(ctxt, bytes); break; default: ret = ES_UNSUPPORTED; break; } return ret; } static enum es_result vc_handle_dr7_write(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { struct sev_es_runtime_data *data = this_cpu_read(runtime_data); long val, *reg = vc_insn_get_rm(ctxt); enum es_result ret; if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP) return ES_VMM_ERROR; if (!reg) return ES_DECODE_FAILED; val = *reg; /* Upper 32 bits must be written as zeroes */ if (val >> 32) { ctxt->fi.vector = X86_TRAP_GP; ctxt->fi.error_code = 0; return ES_EXCEPTION; } /* Clear out other reserved bits and set bit 10 */ val = (val & 0xffff23ffL) | BIT(10); /* Early non-zero writes to DR7 are not supported */ if (!data && (val & ~DR7_RESET_VALUE)) return ES_UNSUPPORTED; /* Using a value of 0 for ExitInfo1 means RAX holds the value */ ghcb_set_rax(ghcb, val); ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0); if (ret != ES_OK) return ret; if (data) data->dr7 = val; return ES_OK; } static enum es_result vc_handle_dr7_read(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { struct sev_es_runtime_data *data = this_cpu_read(runtime_data); long *reg = vc_insn_get_rm(ctxt); if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP) return ES_VMM_ERROR; if (!reg) return ES_DECODE_FAILED; if (data) *reg = data->dr7; else *reg = DR7_RESET_VALUE; return ES_OK; } static enum es_result vc_handle_wbinvd(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0); } static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { enum es_result ret; ghcb_set_rcx(ghcb, ctxt->regs->cx); ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0); if (ret != ES_OK) return ret; if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb))) return ES_VMM_ERROR; ctxt->regs->ax = ghcb->save.rax; ctxt->regs->dx = ghcb->save.rdx; return ES_OK; } static enum es_result vc_handle_monitor(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { /* * Treat it as a NOP and do not leak a physical address to the * hypervisor. */ return ES_OK; } static enum es_result vc_handle_mwait(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { /* Treat the same as MONITOR/MONITORX */ return ES_OK; } static enum es_result vc_handle_vmmcall(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { enum es_result ret; ghcb_set_rax(ghcb, ctxt->regs->ax); ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0); if (x86_platform.hyper.sev_es_hcall_prepare) x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs); ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0); if (ret != ES_OK) return ret; if (!ghcb_rax_is_valid(ghcb)) return ES_VMM_ERROR; ctxt->regs->ax = ghcb->save.rax; /* * Call sev_es_hcall_finish() after regs->ax is already set. * This allows the hypervisor handler to overwrite it again if * necessary. */ if (x86_platform.hyper.sev_es_hcall_finish && !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs)) return ES_VMM_ERROR; return ES_OK; } static enum es_result vc_handle_trap_ac(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { /* * Calling ecx_alignment_check() directly does not work, because it * enables IRQs and the GHCB is active. Forward the exception and call * it later from vc_forward_exception(). */ ctxt->fi.vector = X86_TRAP_AC; ctxt->fi.error_code = 0; return ES_EXCEPTION; } static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt, struct ghcb *ghcb, unsigned long exit_code) { enum es_result result; switch (exit_code) { case SVM_EXIT_READ_DR7: result = vc_handle_dr7_read(ghcb, ctxt); break; case SVM_EXIT_WRITE_DR7: result = vc_handle_dr7_write(ghcb, ctxt); break; case SVM_EXIT_EXCP_BASE + X86_TRAP_AC: result = vc_handle_trap_ac(ghcb, ctxt); break; case SVM_EXIT_RDTSC: case SVM_EXIT_RDTSCP: result = vc_handle_rdtsc(ghcb, ctxt, exit_code); break; case SVM_EXIT_RDPMC: result = vc_handle_rdpmc(ghcb, ctxt); break; case SVM_EXIT_INVD: pr_err_ratelimited("#VC exception for INVD??? Seriously???\n"); result = ES_UNSUPPORTED; break; case SVM_EXIT_CPUID: result = vc_handle_cpuid(ghcb, ctxt); break; case SVM_EXIT_IOIO: result = vc_handle_ioio(ghcb, ctxt); break; case SVM_EXIT_MSR: result = vc_handle_msr(ghcb, ctxt); break; case SVM_EXIT_VMMCALL: result = vc_handle_vmmcall(ghcb, ctxt); break; case SVM_EXIT_WBINVD: result = vc_handle_wbinvd(ghcb, ctxt); break; case SVM_EXIT_MONITOR: result = vc_handle_monitor(ghcb, ctxt); break; case SVM_EXIT_MWAIT: result = vc_handle_mwait(ghcb, ctxt); break; case SVM_EXIT_NPF: result = vc_handle_mmio(ghcb, ctxt); break; default: /* * Unexpected #VC exception */ result = ES_UNSUPPORTED; } return result; } static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt) { long error_code = ctxt->fi.error_code; int trapnr = ctxt->fi.vector; ctxt->regs->orig_ax = ctxt->fi.error_code; switch (trapnr) { case X86_TRAP_GP: exc_general_protection(ctxt->regs, error_code); break; case X86_TRAP_UD: exc_invalid_op(ctxt->regs); break; case X86_TRAP_PF: write_cr2(ctxt->fi.cr2); exc_page_fault(ctxt->regs, error_code); break; case X86_TRAP_AC: exc_alignment_check(ctxt->regs, error_code); break; default: pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n"); BUG(); } } static __always_inline bool is_vc2_stack(unsigned long sp) { return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2)); } static __always_inline bool vc_from_invalid_context(struct pt_regs *regs) { unsigned long sp, prev_sp; sp = (unsigned long)regs; prev_sp = regs->sp; /* * If the code was already executing on the VC2 stack when the #VC * happened, let it proceed to the normal handling routine. This way the * code executing on the VC2 stack can cause #VC exceptions to get handled. */ return is_vc2_stack(sp) && !is_vc2_stack(prev_sp); } static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code) { struct ghcb_state state; struct es_em_ctxt ctxt; enum es_result result; struct ghcb *ghcb; bool ret = true; ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); result = vc_init_em_ctxt(&ctxt, regs, error_code); if (result == ES_OK) result = vc_handle_exitcode(&ctxt, ghcb, error_code); __sev_put_ghcb(&state); /* Done - now check the result */ switch (result) { case ES_OK: vc_finish_insn(&ctxt); break; case ES_UNSUPPORTED: pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n", error_code, regs->ip); ret = false; break; case ES_VMM_ERROR: pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", error_code, regs->ip); ret = false; break; case ES_DECODE_FAILED: pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", error_code, regs->ip); ret = false; break; case ES_EXCEPTION: vc_forward_exception(&ctxt); break; case ES_RETRY: /* Nothing to do */ break; default: pr_emerg("Unknown result in %s():%d\n", __func__, result); /* * Emulating the instruction which caused the #VC exception * failed - can't continue so print debug information */ BUG(); } return ret; } static __always_inline bool vc_is_db(unsigned long error_code) { return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB; } /* * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode * and will panic when an error happens. */ DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication) { irqentry_state_t irq_state; /* * With the current implementation it is always possible to switch to a * safe stack because #VC exceptions only happen at known places, like * intercepted instructions or accesses to MMIO areas/IO ports. They can * also happen with code instrumentation when the hypervisor intercepts * #DB, but the critical paths are forbidden to be instrumented, so #DB * exceptions currently also only happen in safe places. * * But keep this here in case the noinstr annotations are violated due * to bug elsewhere. */ if (unlikely(vc_from_invalid_context(regs))) { instrumentation_begin(); panic("Can't handle #VC exception from unsupported context\n"); instrumentation_end(); } /* * Handle #DB before calling into !noinstr code to avoid recursive #DB. */ if (vc_is_db(error_code)) { exc_debug(regs); return; } irq_state = irqentry_nmi_enter(regs); instrumentation_begin(); if (!vc_raw_handle_exception(regs, error_code)) { /* Show some debug info */ show_regs(regs); /* Ask hypervisor to sev_es_terminate */ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); /* If that fails and we get here - just panic */ panic("Returned from Terminate-Request to Hypervisor\n"); } instrumentation_end(); irqentry_nmi_exit(regs, irq_state); } /* * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode * and will kill the current task with SIGBUS when an error happens. */ DEFINE_IDTENTRY_VC_USER(exc_vmm_communication) { /* * Handle #DB before calling into !noinstr code to avoid recursive #DB. */ if (vc_is_db(error_code)) { noist_exc_debug(regs); return; } irqentry_enter_from_user_mode(regs); instrumentation_begin(); if (!vc_raw_handle_exception(regs, error_code)) { /* * Do not kill the machine if user-space triggered the * exception. Send SIGBUS instead and let user-space deal with * it. */ force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0); } instrumentation_end(); irqentry_exit_to_user_mode(regs); } bool __init handle_vc_boot_ghcb(struct pt_regs *regs) { unsigned long exit_code = regs->orig_ax; struct es_em_ctxt ctxt; enum es_result result; vc_ghcb_invalidate(boot_ghcb); result = vc_init_em_ctxt(&ctxt, regs, exit_code); if (result == ES_OK) result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code); /* Done - now check the result */ switch (result) { case ES_OK: vc_finish_insn(&ctxt); break; case ES_UNSUPPORTED: early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n", exit_code, regs->ip); goto fail; case ES_VMM_ERROR: early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", exit_code, regs->ip); goto fail; case ES_DECODE_FAILED: early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", exit_code, regs->ip); goto fail; case ES_EXCEPTION: vc_early_forward_exception(&ctxt); break; case ES_RETRY: /* Nothing to do */ break; default: BUG(); } return true; fail: show_regs(regs); sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); } /* * Initial set up of SNP relies on information provided by the * Confidential Computing blob, which can be passed to the kernel * in the following ways, depending on how it is booted: * * - when booted via the boot/decompress kernel: * - via boot_params * * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH): * - via a setup_data entry, as defined by the Linux Boot Protocol * * Scan for the blob in that order. */ static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp) { struct cc_blob_sev_info *cc_info; /* Boot kernel would have passed the CC blob via boot_params. */ if (bp->cc_blob_address) { cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address; goto found_cc_info; } /* * If kernel was booted directly, without the use of the * boot/decompression kernel, the CC blob may have been passed via * setup_data instead. */ cc_info = find_cc_blob_setup_data(bp); if (!cc_info) return NULL; found_cc_info: if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC) snp_abort(); return cc_info; } bool __init snp_init(struct boot_params *bp) { struct cc_blob_sev_info *cc_info; if (!bp) return false; cc_info = find_cc_blob(bp); if (!cc_info) return false; setup_cpuid_table(cc_info); /* * The CC blob will be used later to access the secrets page. Cache * it here like the boot kernel does. */ bp->cc_blob_address = (u32)(unsigned long)cc_info; return true; } void __init __noreturn snp_abort(void) { sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); } static void dump_cpuid_table(void) { const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); int i = 0; pr_info("count=%d reserved=0x%x reserved2=0x%llx\n", cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2); for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) { const struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n", i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx, fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved); } } /* * It is useful from an auditing/testing perspective to provide an easy way * for the guest owner to know that the CPUID table has been initialized as * expected, but that initialization happens too early in boot to print any * sort of indicator, and there's not really any other good place to do it, * so do it here. */ static int __init report_cpuid_table(void) { const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); if (!cpuid_table->count) return 0; pr_info("Using SNP CPUID table, %d entries present.\n", cpuid_table->count); if (sev_cfg.debug) dump_cpuid_table(); return 0; } arch_initcall(report_cpuid_table); static int __init init_sev_config(char *str) { char *s; while ((s = strsep(&str, ","))) { if (!strcmp(s, "debug")) { sev_cfg.debug = true; continue; } pr_info("SEV command-line option '%s' was not recognized\n", s); } return 1; } __setup("sev=", init_sev_config); int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio) { struct ghcb_state state; struct es_em_ctxt ctxt; unsigned long flags; struct ghcb *ghcb; int ret; rio->exitinfo2 = SEV_RET_NO_FW_CALL; /* * __sev_get_ghcb() needs to run with IRQs disabled because it is using * a per-CPU GHCB. */ local_irq_save(flags); ghcb = __sev_get_ghcb(&state); if (!ghcb) { ret = -EIO; goto e_restore_irq; } vc_ghcb_invalidate(ghcb); if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { ghcb_set_rax(ghcb, input->data_gpa); ghcb_set_rbx(ghcb, input->data_npages); } ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa); if (ret) goto e_put; rio->exitinfo2 = ghcb->save.sw_exit_info_2; switch (rio->exitinfo2) { case 0: break; case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_BUSY): ret = -EAGAIN; break; case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN): /* Number of expected pages are returned in RBX */ if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) { input->data_npages = ghcb_get_rbx(ghcb); ret = -ENOSPC; break; } fallthrough; default: ret = -EIO; break; } e_put: __sev_put_ghcb(&state); e_restore_irq: local_irq_restore(flags); return ret; } EXPORT_SYMBOL_GPL(snp_issue_guest_request); static struct platform_device sev_guest_device = { .name = "sev-guest", .id = -1, }; static int __init snp_init_platform_device(void) { struct sev_guest_platform_data data; u64 gpa; if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) return -ENODEV; gpa = get_secrets_page(); if (!gpa) return -ENODEV; data.secrets_gpa = gpa; if (platform_device_add_data(&sev_guest_device, &data, sizeof(data))) return -ENODEV; if (platform_device_register(&sev_guest_device)) return -ENODEV; pr_info("SNP guest platform device initialized.\n"); return 0; } device_initcall(snp_init_platform_device);
linux-master
arch/x86/kernel/sev.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel Multiprocessor Specification 1.1 and 1.4 * compliant MP-table parsing routines. * * (c) 1995 Alan Cox, Building #3 <[email protected]> * (c) 1998, 1999, 2000, 2009 Ingo Molnar <[email protected]> * (c) 2008 Alexey Starikovskiy <[email protected]> */ #include <linux/mm.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/memblock.h> #include <linux/kernel_stat.h> #include <linux/mc146818rtc.h> #include <linux/bitops.h> #include <linux/acpi.h> #include <linux/smp.h> #include <linux/pci.h> #include <asm/i8259.h> #include <asm/io_apic.h> #include <asm/acpi.h> #include <asm/irqdomain.h> #include <asm/mtrr.h> #include <asm/mpspec.h> #include <asm/proto.h> #include <asm/bios_ebda.h> #include <asm/e820/api.h> #include <asm/setup.h> #include <asm/smp.h> #include <asm/apic.h> /* * Checksum an MP configuration block. */ static int __init mpf_checksum(unsigned char *mp, int len) { int sum = 0; while (len--) sum += *mp++; return sum & 0xFF; } static void __init MP_processor_info(struct mpc_cpu *m) { char *bootup_cpu = ""; if (!(m->cpuflag & CPU_ENABLED)) { disabled_cpus++; return; } if (m->cpuflag & CPU_BOOTPROCESSOR) bootup_cpu = " (Bootup-CPU)"; pr_info("Processor #%d%s\n", m->apicid, bootup_cpu); generic_processor_info(m->apicid); } #ifdef CONFIG_X86_IO_APIC static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str) { memcpy(str, m->bustype, 6); str[6] = 0; apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str); } static void __init MP_bus_info(struct mpc_bus *m) { char str[7]; mpc_oem_bus_info(m, str); #if MAX_MP_BUSSES < 256 if (m->busid >= MAX_MP_BUSSES) { pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n", m->busid, str, MAX_MP_BUSSES - 1); return; } #endif set_bit(m->busid, mp_bus_not_pci); if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { #ifdef CONFIG_EISA mp_bus_id_to_type[m->busid] = MP_BUS_ISA; #endif } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { clear_bit(m->busid, mp_bus_not_pci); #ifdef CONFIG_EISA mp_bus_id_to_type[m->busid] = MP_BUS_PCI; } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { mp_bus_id_to_type[m->busid] = MP_BUS_EISA; #endif } else pr_warn("Unknown bustype %s - ignoring\n", str); } static void __init MP_ioapic_info(struct mpc_ioapic *m) { struct ioapic_domain_cfg cfg = { .type = IOAPIC_DOMAIN_LEGACY, .ops = &mp_ioapic_irqdomain_ops, }; if (m->flags & MPC_APIC_USABLE) mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg); } static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq) { apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n", mp_irq->irqtype, mp_irq->irqflag & 3, (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus, mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq); } #else /* CONFIG_X86_IO_APIC */ static inline void __init MP_bus_info(struct mpc_bus *m) {} static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {} #endif /* CONFIG_X86_IO_APIC */ static void __init MP_lintsrc_info(struct mpc_lintsrc *m) { apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n", m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid, m->srcbusirq, m->destapic, m->destapiclint); } /* * Read/parse the MPC */ static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str) { if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) { pr_err("MPTABLE: bad signature [%c%c%c%c]!\n", mpc->signature[0], mpc->signature[1], mpc->signature[2], mpc->signature[3]); return 0; } if (mpf_checksum((unsigned char *)mpc, mpc->length)) { pr_err("MPTABLE: checksum error!\n"); return 0; } if (mpc->spec != 0x01 && mpc->spec != 0x04) { pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec); return 0; } if (!mpc->lapic) { pr_err("MPTABLE: null local APIC address!\n"); return 0; } memcpy(oem, mpc->oem, 8); oem[8] = 0; pr_info("MPTABLE: OEM ID: %s\n", oem); memcpy(str, mpc->productid, 12); str[12] = 0; pr_info("MPTABLE: Product ID: %s\n", str); pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic); return 1; } static void skip_entry(unsigned char **ptr, int *count, int size) { *ptr += size; *count += size; } static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt) { pr_err("Your mptable is wrong, contact your HW vendor!\n"); pr_cont("type %x\n", *mpt); print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, 1, mpc, mpc->length, 1); } static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) { char str[16]; char oem[10]; int count = sizeof(*mpc); unsigned char *mpt = ((unsigned char *)mpc) + count; if (!smp_check_mpc(mpc, oem, str)) return 0; /* Initialize the lapic mapping */ if (!acpi_lapic) register_lapic_address(mpc->lapic); if (early) return 1; /* Now process the configuration blocks. */ while (count < mpc->length) { switch (*mpt) { case MP_PROCESSOR: /* ACPI may have already provided this data */ if (!acpi_lapic) MP_processor_info((struct mpc_cpu *)mpt); skip_entry(&mpt, &count, sizeof(struct mpc_cpu)); break; case MP_BUS: MP_bus_info((struct mpc_bus *)mpt); skip_entry(&mpt, &count, sizeof(struct mpc_bus)); break; case MP_IOAPIC: MP_ioapic_info((struct mpc_ioapic *)mpt); skip_entry(&mpt, &count, sizeof(struct mpc_ioapic)); break; case MP_INTSRC: mp_save_irq((struct mpc_intsrc *)mpt); skip_entry(&mpt, &count, sizeof(struct mpc_intsrc)); break; case MP_LINTSRC: MP_lintsrc_info((struct mpc_lintsrc *)mpt); skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc)); break; default: /* wrong mptable */ smp_dump_mptable(mpc, mpt); count = mpc->length; break; } } if (!num_processors) pr_err("MPTABLE: no processors registered!\n"); return num_processors; } #ifdef CONFIG_X86_IO_APIC static int __init ELCR_trigger(unsigned int irq) { unsigned int port; port = PIC_ELCR1 + (irq >> 3); return (inb(port) >> (irq & 7)) & 1; } static void __init construct_default_ioirq_mptable(int mpc_default_type) { struct mpc_intsrc intsrc; int i; int ELCR_fallback = 0; intsrc.type = MP_INTSRC; intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT; intsrc.srcbus = 0; intsrc.dstapic = mpc_ioapic_id(0); intsrc.irqtype = mp_INT; /* * If true, we have an ISA/PCI system with no IRQ entries * in the MP table. To prevent the PCI interrupts from being set up * incorrectly, we try to use the ELCR. The sanity check to see if * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can * never be level sensitive, so we simply see if the ELCR agrees. * If it does, we assume it's valid. */ if (mpc_default_type == 5) { pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13)) pr_err("ELCR contains invalid data... not using ELCR\n"); else { pr_info("Using ELCR to identify PCI interrupts\n"); ELCR_fallback = 1; } } for (i = 0; i < 16; i++) { switch (mpc_default_type) { case 2: if (i == 0 || i == 13) continue; /* IRQ0 & IRQ13 not connected */ fallthrough; default: if (i == 2) continue; /* IRQ2 is never connected */ } if (ELCR_fallback) { /* * If the ELCR indicates a level-sensitive interrupt, we * copy that information over to the MP table in the * irqflag field (level sensitive, active high polarity). */ if (ELCR_trigger(i)) { intsrc.irqflag = MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_HIGH; } else { intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT; } } intsrc.srcbusirq = i; intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ mp_save_irq(&intsrc); } intsrc.irqtype = mp_ExtINT; intsrc.srcbusirq = 0; intsrc.dstirq = 0; /* 8259A to INTIN0 */ mp_save_irq(&intsrc); } static void __init construct_ioapic_table(int mpc_default_type) { struct mpc_ioapic ioapic; struct mpc_bus bus; bus.type = MP_BUS; bus.busid = 0; switch (mpc_default_type) { default: pr_err("???\nUnknown standard configuration %d\n", mpc_default_type); fallthrough; case 1: case 5: memcpy(bus.bustype, "ISA ", 6); break; case 2: case 6: case 3: memcpy(bus.bustype, "EISA ", 6); break; } MP_bus_info(&bus); if (mpc_default_type > 4) { bus.busid = 1; memcpy(bus.bustype, "PCI ", 6); MP_bus_info(&bus); } ioapic.type = MP_IOAPIC; ioapic.apicid = 2; ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; ioapic.flags = MPC_APIC_USABLE; ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE; MP_ioapic_info(&ioapic); /* * We set up most of the low 16 IO-APIC pins according to MPS rules. */ construct_default_ioirq_mptable(mpc_default_type); } #else static inline void __init construct_ioapic_table(int mpc_default_type) { } #endif static inline void __init construct_default_ISA_mptable(int mpc_default_type) { struct mpc_cpu processor; struct mpc_lintsrc lintsrc; int linttypes[2] = { mp_ExtINT, mp_NMI }; int i; /* * 2 CPUs, numbered 0 & 1. */ processor.type = MP_PROCESSOR; /* Either an integrated APIC or a discrete 82489DX. */ processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; processor.cpuflag = CPU_ENABLED; processor.cpufeature = (boot_cpu_data.x86 << 8) | (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping; processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX]; processor.reserved[0] = 0; processor.reserved[1] = 0; for (i = 0; i < 2; i++) { processor.apicid = i; MP_processor_info(&processor); } construct_ioapic_table(mpc_default_type); lintsrc.type = MP_LINTSRC; lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT; lintsrc.srcbusid = 0; lintsrc.srcbusirq = 0; lintsrc.destapic = MP_APIC_ALL; for (i = 0; i < 2; i++) { lintsrc.irqtype = linttypes[i]; lintsrc.destapiclint = i; MP_lintsrc_info(&lintsrc); } } static unsigned long mpf_base; static bool mpf_found; static unsigned long __init get_mpc_size(unsigned long physptr) { struct mpc_table *mpc; unsigned long size; mpc = early_memremap(physptr, PAGE_SIZE); size = mpc->length; early_memunmap(mpc, PAGE_SIZE); apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size); return size; } static int __init check_physptr(struct mpf_intel *mpf, unsigned int early) { struct mpc_table *mpc; unsigned long size; size = get_mpc_size(mpf->physptr); mpc = early_memremap(mpf->physptr, size); /* * Read the physical hardware table. Anything here will * override the defaults. */ if (!smp_read_mpc(mpc, early)) { #ifdef CONFIG_X86_LOCAL_APIC smp_found_config = 0; #endif pr_err("BIOS bug, MP table errors detected!...\n"); pr_cont("... disabling SMP support. (tell your hw vendor)\n"); early_memunmap(mpc, size); return -1; } early_memunmap(mpc, size); if (early) return -1; #ifdef CONFIG_X86_IO_APIC /* * If there are no explicit MP IRQ entries, then we are * broken. We set up most of the low 16 IO-APIC pins to * ISA defaults and hope it will work. */ if (!mp_irq_entries) { struct mpc_bus bus; pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); bus.type = MP_BUS; bus.busid = 0; memcpy(bus.bustype, "ISA ", 6); MP_bus_info(&bus); construct_default_ioirq_mptable(0); } #endif return 0; } /* * Scan the memory blocks for an SMP configuration block. */ void __init default_get_smp_config(unsigned int early) { struct mpf_intel *mpf; if (!smp_found_config) return; if (!mpf_found) return; if (acpi_lapic && early) return; /* * MPS doesn't support hyperthreading, aka only have * thread 0 apic id in MPS table */ if (acpi_lapic && acpi_ioapic) return; mpf = early_memremap(mpf_base, sizeof(*mpf)); if (!mpf) { pr_err("MPTABLE: error mapping MP table\n"); return; } pr_info("Intel MultiProcessor Specification v1.%d\n", mpf->specification); #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) if (mpf->feature2 & (1 << 7)) { pr_info(" IMCR and PIC compatibility mode.\n"); pic_mode = 1; } else { pr_info(" Virtual Wire compatibility mode.\n"); pic_mode = 0; } #endif /* * Now see if we need to read further. */ if (mpf->feature1) { if (early) { /* Local APIC has default address */ register_lapic_address(APIC_DEFAULT_PHYS_BASE); goto out; } pr_info("Default MP configuration #%d\n", mpf->feature1); construct_default_ISA_mptable(mpf->feature1); } else if (mpf->physptr) { if (check_physptr(mpf, early)) goto out; } else BUG(); if (!early) pr_info("Processors: %d\n", num_processors); /* * Only use the first configuration found. */ out: early_memunmap(mpf, sizeof(*mpf)); } static void __init smp_reserve_memory(struct mpf_intel *mpf) { memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr)); } static int __init smp_scan_config(unsigned long base, unsigned long length) { unsigned int *bp; struct mpf_intel *mpf; int ret = 0; apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n", base, base + length - 1); BUILD_BUG_ON(sizeof(*mpf) != 16); while (length > 0) { bp = early_memremap(base, length); mpf = (struct mpf_intel *)bp; if ((*bp == SMP_MAGIC_IDENT) && (mpf->length == 1) && !mpf_checksum((unsigned char *)bp, 16) && ((mpf->specification == 1) || (mpf->specification == 4))) { #ifdef CONFIG_X86_LOCAL_APIC smp_found_config = 1; #endif mpf_base = base; mpf_found = true; pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n", base, base + sizeof(*mpf) - 1); memblock_reserve(base, sizeof(*mpf)); if (mpf->physptr) smp_reserve_memory(mpf); ret = 1; } early_memunmap(bp, length); if (ret) break; base += 16; length -= 16; } return ret; } void __init default_find_smp_config(void) { unsigned int address; /* * FIXME: Linux assumes you have 640K of base ram.. * this continues the error... * * 1) Scan the bottom 1K for a signature * 2) Scan the top 1K of base RAM * 3) Scan the 64K of bios */ if (smp_scan_config(0x0, 0x400) || smp_scan_config(639 * 0x400, 0x400) || smp_scan_config(0xF0000, 0x10000)) return; /* * If it is an SMP machine we should know now, unless the * configuration is in an EISA bus machine with an * extended bios data area. * * there is a real-mode segmented pointer pointing to the * 4K EBDA area at 0x40E, calculate and scan it here. * * NOTE! There are Linux loaders that will corrupt the EBDA * area, and as such this kind of SMP config may be less * trustworthy, simply because the SMP table may have been * stomped on during early boot. These loaders are buggy and * should be fixed. * * MP1.4 SPEC states to only scan first 1K of 4K EBDA. */ address = get_bios_ebda(); if (address) smp_scan_config(address, 0x400); } #ifdef CONFIG_X86_IO_APIC static u8 __initdata irq_used[MAX_IRQ_SOURCES]; static int __init get_MP_intsrc_index(struct mpc_intsrc *m) { int i; if (m->irqtype != mp_INT) return 0; if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW)) return 0; /* not legacy */ for (i = 0; i < mp_irq_entries; i++) { if (mp_irqs[i].irqtype != mp_INT) continue; if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW)) continue; if (mp_irqs[i].srcbus != m->srcbus) continue; if (mp_irqs[i].srcbusirq != m->srcbusirq) continue; if (irq_used[i]) { /* already claimed */ return -2; } irq_used[i] = 1; return i; } /* not found */ return -1; } #define SPARE_SLOT_NUM 20 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) { int i; apic_printk(APIC_VERBOSE, "OLD "); print_mp_irq_info(m); i = get_MP_intsrc_index(m); if (i > 0) { memcpy(m, &mp_irqs[i], sizeof(*m)); apic_printk(APIC_VERBOSE, "NEW "); print_mp_irq_info(&mp_irqs[i]); return; } if (!i) { /* legacy, do nothing */ return; } if (*nr_m_spare < SPARE_SLOT_NUM) { /* * not found (-1), or duplicated (-2) are invalid entries, * we need to use the slot later */ m_spare[*nr_m_spare] = m; *nr_m_spare += 1; } } static int __init check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count) { if (!mpc_new_phys || count <= mpc_new_length) { WARN(1, "update_mptable: No spare slots (length: %x)\n", count); return -1; } return 0; } #else /* CONFIG_X86_IO_APIC */ static inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} #endif /* CONFIG_X86_IO_APIC */ static int __init replace_intsrc_all(struct mpc_table *mpc, unsigned long mpc_new_phys, unsigned long mpc_new_length) { #ifdef CONFIG_X86_IO_APIC int i; #endif int count = sizeof(*mpc); int nr_m_spare = 0; unsigned char *mpt = ((unsigned char *)mpc) + count; pr_info("mpc_length %x\n", mpc->length); while (count < mpc->length) { switch (*mpt) { case MP_PROCESSOR: skip_entry(&mpt, &count, sizeof(struct mpc_cpu)); break; case MP_BUS: skip_entry(&mpt, &count, sizeof(struct mpc_bus)); break; case MP_IOAPIC: skip_entry(&mpt, &count, sizeof(struct mpc_ioapic)); break; case MP_INTSRC: check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare); skip_entry(&mpt, &count, sizeof(struct mpc_intsrc)); break; case MP_LINTSRC: skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc)); break; default: /* wrong mptable */ smp_dump_mptable(mpc, mpt); goto out; } } #ifdef CONFIG_X86_IO_APIC for (i = 0; i < mp_irq_entries; i++) { if (irq_used[i]) continue; if (mp_irqs[i].irqtype != mp_INT) continue; if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW)) continue; if (nr_m_spare > 0) { apic_printk(APIC_VERBOSE, "*NEW* found\n"); nr_m_spare--; memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i])); m_spare[nr_m_spare] = NULL; } else { struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; count += sizeof(struct mpc_intsrc); if (check_slot(mpc_new_phys, mpc_new_length, count) < 0) goto out; memcpy(m, &mp_irqs[i], sizeof(*m)); mpc->length = count; mpt += sizeof(struct mpc_intsrc); } print_mp_irq_info(&mp_irqs[i]); } #endif out: /* update checksum */ mpc->checksum = 0; mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length); return 0; } int enable_update_mptable; static int __init update_mptable_setup(char *str) { enable_update_mptable = 1; #ifdef CONFIG_PCI pci_routeirq = 1; #endif return 0; } early_param("update_mptable", update_mptable_setup); static unsigned long __initdata mpc_new_phys; static unsigned long mpc_new_length __initdata = 4096; /* alloc_mptable or alloc_mptable=4k */ static int __initdata alloc_mptable; static int __init parse_alloc_mptable_opt(char *p) { enable_update_mptable = 1; #ifdef CONFIG_PCI pci_routeirq = 1; #endif alloc_mptable = 1; if (!p) return 0; mpc_new_length = memparse(p, &p); return 0; } early_param("alloc_mptable", parse_alloc_mptable_opt); void __init e820__memblock_alloc_reserved_mpc_new(void) { if (enable_update_mptable && alloc_mptable) mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4); } static int __init update_mp_table(void) { char str[16]; char oem[10]; struct mpf_intel *mpf; struct mpc_table *mpc, *mpc_new; unsigned long size; if (!enable_update_mptable) return 0; if (!mpf_found) return 0; mpf = early_memremap(mpf_base, sizeof(*mpf)); if (!mpf) { pr_err("MPTABLE: mpf early_memremap() failed\n"); return 0; } /* * Now see if we need to go further. */ if (mpf->feature1) goto do_unmap_mpf; if (!mpf->physptr) goto do_unmap_mpf; size = get_mpc_size(mpf->physptr); mpc = early_memremap(mpf->physptr, size); if (!mpc) { pr_err("MPTABLE: mpc early_memremap() failed\n"); goto do_unmap_mpf; } if (!smp_check_mpc(mpc, oem, str)) goto do_unmap_mpc; pr_info("mpf: %llx\n", (u64)mpf_base); pr_info("physptr: %x\n", mpf->physptr); if (mpc_new_phys && mpc->length > mpc_new_length) { mpc_new_phys = 0; pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n", mpc_new_length); } if (!mpc_new_phys) { unsigned char old, new; /* check if we can change the position */ mpc->checksum = 0; old = mpf_checksum((unsigned char *)mpc, mpc->length); mpc->checksum = 0xff; new = mpf_checksum((unsigned char *)mpc, mpc->length); if (old == new) { pr_info("mpc is readonly, please try alloc_mptable instead\n"); goto do_unmap_mpc; } pr_info("use in-position replacing\n"); } else { mpc_new = early_memremap(mpc_new_phys, mpc_new_length); if (!mpc_new) { pr_err("MPTABLE: new mpc early_memremap() failed\n"); goto do_unmap_mpc; } mpf->physptr = mpc_new_phys; memcpy(mpc_new, mpc, mpc->length); early_memunmap(mpc, size); mpc = mpc_new; size = mpc_new_length; /* check if we can modify that */ if (mpc_new_phys - mpf->physptr) { struct mpf_intel *mpf_new; /* steal 16 bytes from [0, 1k) */ mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new)); if (!mpf_new) { pr_err("MPTABLE: new mpf early_memremap() failed\n"); goto do_unmap_mpc; } pr_info("mpf new: %x\n", 0x400 - 16); memcpy(mpf_new, mpf, 16); early_memunmap(mpf, sizeof(*mpf)); mpf = mpf_new; mpf->physptr = mpc_new_phys; } mpf->checksum = 0; mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16); pr_info("physptr new: %x\n", mpf->physptr); } /* * only replace the one with mp_INT and * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW, * already in mp_irqs , stored by ... and mp_config_acpi_gsi, * may need pci=routeirq for all coverage */ replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length); do_unmap_mpc: early_memunmap(mpc, size); do_unmap_mpf: early_memunmap(mpf, sizeof(*mpf)); return 0; } late_initcall(update_mp_table);
linux-master
arch/x86/kernel/mpparse.c
/* * umip.c Emulation for instruction protected by the User-Mode Instruction * Prevention feature * * Copyright (c) 2017, Intel Corporation. * Ricardo Neri <[email protected]> */ #include <linux/uaccess.h> #include <asm/umip.h> #include <asm/traps.h> #include <asm/insn.h> #include <asm/insn-eval.h> #include <linux/ratelimit.h> #undef pr_fmt #define pr_fmt(fmt) "umip: " fmt /** DOC: Emulation for User-Mode Instruction Prevention (UMIP) * * User-Mode Instruction Prevention is a security feature present in recent * x86 processors that, when enabled, prevents a group of instructions (SGDT, * SIDT, SLDT, SMSW and STR) from being run in user mode by issuing a general * protection fault if the instruction is executed with CPL > 0. * * Rather than relaying to the user space the general protection fault caused by * the UMIP-protected instructions (in the form of a SIGSEGV signal), it can be * trapped and emulate the result of such instructions to provide dummy values. * This allows to both conserve the current kernel behavior and not reveal the * system resources that UMIP intends to protect (i.e., the locations of the * global descriptor and interrupt descriptor tables, the segment selectors of * the local descriptor table, the value of the task state register and the * contents of the CR0 register). * * This emulation is needed because certain applications (e.g., WineHQ and * DOSEMU2) rely on this subset of instructions to function. * * The instructions protected by UMIP can be split in two groups. Those which * return a kernel memory address (SGDT and SIDT) and those which return a * value (SLDT, STR and SMSW). * * For the instructions that return a kernel memory address, applications * such as WineHQ rely on the result being located in the kernel memory space, * not the actual location of the table. The result is emulated as a hard-coded * value that, lies close to the top of the kernel memory. The limit for the GDT * and the IDT are set to zero. * * The instruction SMSW is emulated to return the value that the register CR0 * has at boot time as set in the head_32. * SLDT and STR are emulated to return the values that the kernel programmatically * assigns: * - SLDT returns (GDT_ENTRY_LDT * 8) if an LDT has been set, 0 if not. * - STR returns (GDT_ENTRY_TSS * 8). * * Emulation is provided for both 32-bit and 64-bit processes. * * Care is taken to appropriately emulate the results when segmentation is * used. That is, rather than relying on USER_DS and USER_CS, the function * insn_get_addr_ref() inspects the segment descriptor pointed by the * registers in pt_regs. This ensures that we correctly obtain the segment * base address and the address and operand sizes even if the user space * application uses a local descriptor table. */ #define UMIP_DUMMY_GDT_BASE 0xfffffffffffe0000ULL #define UMIP_DUMMY_IDT_BASE 0xffffffffffff0000ULL /* * The SGDT and SIDT instructions store the contents of the global descriptor * table and interrupt table registers, respectively. The destination is a * memory operand of X+2 bytes. X bytes are used to store the base address of * the table and 2 bytes are used to store the limit. In 32-bit processes X * has a value of 4, in 64-bit processes X has a value of 8. */ #define UMIP_GDT_IDT_BASE_SIZE_64BIT 8 #define UMIP_GDT_IDT_BASE_SIZE_32BIT 4 #define UMIP_GDT_IDT_LIMIT_SIZE 2 #define UMIP_INST_SGDT 0 /* 0F 01 /0 */ #define UMIP_INST_SIDT 1 /* 0F 01 /1 */ #define UMIP_INST_SMSW 2 /* 0F 01 /4 */ #define UMIP_INST_SLDT 3 /* 0F 00 /0 */ #define UMIP_INST_STR 4 /* 0F 00 /1 */ static const char * const umip_insns[5] = { [UMIP_INST_SGDT] = "SGDT", [UMIP_INST_SIDT] = "SIDT", [UMIP_INST_SMSW] = "SMSW", [UMIP_INST_SLDT] = "SLDT", [UMIP_INST_STR] = "STR", }; #define umip_pr_err(regs, fmt, ...) \ umip_printk(regs, KERN_ERR, fmt, ##__VA_ARGS__) #define umip_pr_debug(regs, fmt, ...) \ umip_printk(regs, KERN_DEBUG, fmt, ##__VA_ARGS__) /** * umip_printk() - Print a rate-limited message * @regs: Register set with the context in which the warning is printed * @log_level: Kernel log level to print the message * @fmt: The text string to print * * Print the text contained in @fmt. The print rate is limited to bursts of 5 * messages every two minutes. The purpose of this customized version of * printk() is to print messages when user space processes use any of the * UMIP-protected instructions. Thus, the printed text is prepended with the * task name and process ID number of the current task as well as the * instruction and stack pointers in @regs as seen when entering kernel mode. * * Returns: * * None. */ static __printf(3, 4) void umip_printk(const struct pt_regs *regs, const char *log_level, const char *fmt, ...) { /* Bursts of 5 messages every two minutes */ static DEFINE_RATELIMIT_STATE(ratelimit, 2 * 60 * HZ, 5); struct task_struct *tsk = current; struct va_format vaf; va_list args; if (!__ratelimit(&ratelimit)) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%s" pr_fmt("%s[%d] ip:%lx sp:%lx: %pV"), log_level, tsk->comm, task_pid_nr(tsk), regs->ip, regs->sp, &vaf); va_end(args); } /** * identify_insn() - Identify a UMIP-protected instruction * @insn: Instruction structure with opcode and ModRM byte. * * From the opcode and ModRM.reg in @insn identify, if any, a UMIP-protected * instruction that can be emulated. * * Returns: * * On success, a constant identifying a specific UMIP-protected instruction that * can be emulated. * * -EINVAL on error or when not an UMIP-protected instruction that can be * emulated. */ static int identify_insn(struct insn *insn) { /* By getting modrm we also get the opcode. */ insn_get_modrm(insn); if (!insn->modrm.nbytes) return -EINVAL; /* All the instructions of interest start with 0x0f. */ if (insn->opcode.bytes[0] != 0xf) return -EINVAL; if (insn->opcode.bytes[1] == 0x1) { switch (X86_MODRM_REG(insn->modrm.value)) { case 0: return UMIP_INST_SGDT; case 1: return UMIP_INST_SIDT; case 4: return UMIP_INST_SMSW; default: return -EINVAL; } } else if (insn->opcode.bytes[1] == 0x0) { if (X86_MODRM_REG(insn->modrm.value) == 0) return UMIP_INST_SLDT; else if (X86_MODRM_REG(insn->modrm.value) == 1) return UMIP_INST_STR; else return -EINVAL; } else { return -EINVAL; } } /** * emulate_umip_insn() - Emulate UMIP instructions and return dummy values * @insn: Instruction structure with operands * @umip_inst: A constant indicating the instruction to emulate * @data: Buffer into which the dummy result is stored * @data_size: Size of the emulated result * @x86_64: true if process is 64-bit, false otherwise * * Emulate an instruction protected by UMIP and provide a dummy result. The * result of the emulation is saved in @data. The size of the results depends * on both the instruction and type of operand (register vs memory address). * The size of the result is updated in @data_size. Caller is responsible * of providing a @data buffer of at least UMIP_GDT_IDT_BASE_SIZE + * UMIP_GDT_IDT_LIMIT_SIZE bytes. * * Returns: * * 0 on success, -EINVAL on error while emulating. */ static int emulate_umip_insn(struct insn *insn, int umip_inst, unsigned char *data, int *data_size, bool x86_64) { if (!data || !data_size || !insn) return -EINVAL; /* * These two instructions return the base address and limit of the * global and interrupt descriptor table, respectively. According to the * Intel Software Development manual, the base address can be 24-bit, * 32-bit or 64-bit. Limit is always 16-bit. If the operand size is * 16-bit, the returned value of the base address is supposed to be a * zero-extended 24-byte number. However, it seems that a 32-byte number * is always returned irrespective of the operand size. */ if (umip_inst == UMIP_INST_SGDT || umip_inst == UMIP_INST_SIDT) { u64 dummy_base_addr; u16 dummy_limit = 0; /* SGDT and SIDT do not use registers operands. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) return -EINVAL; if (umip_inst == UMIP_INST_SGDT) dummy_base_addr = UMIP_DUMMY_GDT_BASE; else dummy_base_addr = UMIP_DUMMY_IDT_BASE; /* * 64-bit processes use the entire dummy base address. * 32-bit processes use the lower 32 bits of the base address. * dummy_base_addr is always 64 bits, but we memcpy the correct * number of bytes from it to the destination. */ if (x86_64) *data_size = UMIP_GDT_IDT_BASE_SIZE_64BIT; else *data_size = UMIP_GDT_IDT_BASE_SIZE_32BIT; memcpy(data + 2, &dummy_base_addr, *data_size); *data_size += UMIP_GDT_IDT_LIMIT_SIZE; memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE); } else if (umip_inst == UMIP_INST_SMSW || umip_inst == UMIP_INST_SLDT || umip_inst == UMIP_INST_STR) { unsigned long dummy_value; if (umip_inst == UMIP_INST_SMSW) { dummy_value = CR0_STATE; } else if (umip_inst == UMIP_INST_STR) { dummy_value = GDT_ENTRY_TSS * 8; } else if (umip_inst == UMIP_INST_SLDT) { #ifdef CONFIG_MODIFY_LDT_SYSCALL down_read(&current->mm->context.ldt_usr_sem); if (current->mm->context.ldt) dummy_value = GDT_ENTRY_LDT * 8; else dummy_value = 0; up_read(&current->mm->context.ldt_usr_sem); #else dummy_value = 0; #endif } /* * For these 3 instructions, the number * of bytes to be copied in the result buffer is determined * by whether the operand is a register or a memory location. * If operand is a register, return as many bytes as the operand * size. If operand is memory, return only the two least * significant bytes. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) *data_size = insn->opnd_bytes; else *data_size = 2; memcpy(data, &dummy_value, *data_size); } else { return -EINVAL; } return 0; } /** * force_sig_info_umip_fault() - Force a SIGSEGV with SEGV_MAPERR * @addr: Address that caused the signal * @regs: Register set containing the instruction pointer * * Force a SIGSEGV signal with SEGV_MAPERR as the error code. This function is * intended to be used to provide a segmentation fault when the result of the * UMIP emulation could not be copied to the user space memory. * * Returns: none */ static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs) { struct task_struct *tsk = current; tsk->thread.cr2 = (unsigned long)addr; tsk->thread.error_code = X86_PF_USER | X86_PF_WRITE; tsk->thread.trap_nr = X86_TRAP_PF; force_sig_fault(SIGSEGV, SEGV_MAPERR, addr); if (!(show_unhandled_signals && unhandled_signal(tsk, SIGSEGV))) return; umip_pr_err(regs, "segfault in emulation. error%x\n", X86_PF_USER | X86_PF_WRITE); } /** * fixup_umip_exception() - Fixup a general protection fault caused by UMIP * @regs: Registers as saved when entering the #GP handler * * The instructions SGDT, SIDT, STR, SMSW and SLDT cause a general protection * fault if executed with CPL > 0 (i.e., from user space). This function fixes * the exception up and provides dummy results for SGDT, SIDT and SMSW; STR * and SLDT are not fixed up. * * If operands are memory addresses, results are copied to user-space memory as * indicated by the instruction pointed by eIP using the registers indicated in * the instruction operands. If operands are registers, results are copied into * the context that was saved when entering kernel mode. * * Returns: * * True if emulation was successful; false if not. */ bool fixup_umip_exception(struct pt_regs *regs) { int nr_copied, reg_offset, dummy_data_size, umip_inst; /* 10 bytes is the maximum size of the result of UMIP instructions */ unsigned char dummy_data[10] = { 0 }; unsigned char buf[MAX_INSN_SIZE]; unsigned long *reg_addr; void __user *uaddr; struct insn insn; if (!regs) return false; /* * Give up on emulation if fetching the instruction failed. Should a * page fault or a #GP be issued? */ nr_copied = insn_fetch_from_user(regs, buf); if (nr_copied <= 0) return false; if (!insn_decode_from_regs(&insn, regs, buf, nr_copied)) return false; umip_inst = identify_insn(&insn); if (umip_inst < 0) return false; umip_pr_debug(regs, "%s instruction cannot be used by applications.\n", umip_insns[umip_inst]); umip_pr_debug(regs, "For now, expensive software emulation returns the result.\n"); if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size, user_64bit_mode(regs))) return false; /* * If operand is a register, write result to the copy of the register * value that was pushed to the stack when entering into kernel mode. * Upon exit, the value we write will be restored to the actual hardware * register. */ if (X86_MODRM_MOD(insn.modrm.value) == 3) { reg_offset = insn_get_modrm_rm_off(&insn, regs); /* * Negative values are usually errors. In memory addressing, * the exception is -EDOM. Since we expect a register operand, * all negative values are errors. */ if (reg_offset < 0) return false; reg_addr = (unsigned long *)((unsigned long)regs + reg_offset); memcpy(reg_addr, dummy_data, dummy_data_size); } else { uaddr = insn_get_addr_ref(&insn, regs); if ((unsigned long)uaddr == -1L) return false; nr_copied = copy_to_user(uaddr, dummy_data, dummy_data_size); if (nr_copied > 0) { /* * If copy fails, send a signal and tell caller that * fault was fixed up. */ force_sig_info_umip_fault(uaddr, regs); return true; } } /* increase IP to let the program keep going */ regs->ip += insn.length; return true; }
linux-master
arch/x86/kernel/umip.c
// SPDX-License-Identifier: GPL-2.0 /* * I/O delay strategies for inb_p/outb_p * * Allow for a DMI based override of port 0x80, needed for certain HP laptops * and possibly other systems. Also allow for the gradual elimination of * outb_p/inb_p API uses. */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/dmi.h> #include <linux/io.h> #define IO_DELAY_TYPE_0X80 0 #define IO_DELAY_TYPE_0XED 1 #define IO_DELAY_TYPE_UDELAY 2 #define IO_DELAY_TYPE_NONE 3 #if defined(CONFIG_IO_DELAY_0X80) #define DEFAULT_IO_DELAY_TYPE IO_DELAY_TYPE_0X80 #elif defined(CONFIG_IO_DELAY_0XED) #define DEFAULT_IO_DELAY_TYPE IO_DELAY_TYPE_0XED #elif defined(CONFIG_IO_DELAY_UDELAY) #define DEFAULT_IO_DELAY_TYPE IO_DELAY_TYPE_UDELAY #elif defined(CONFIG_IO_DELAY_NONE) #define DEFAULT_IO_DELAY_TYPE IO_DELAY_TYPE_NONE #endif int io_delay_type __read_mostly = DEFAULT_IO_DELAY_TYPE; static int __initdata io_delay_override; /* * Paravirt wants native_io_delay to be a constant. */ void native_io_delay(void) { switch (io_delay_type) { default: case IO_DELAY_TYPE_0X80: asm volatile ("outb %al, $0x80"); break; case IO_DELAY_TYPE_0XED: asm volatile ("outb %al, $0xed"); break; case IO_DELAY_TYPE_UDELAY: /* * 2 usecs is an upper-bound for the outb delay but * note that udelay doesn't have the bus-level * side-effects that outb does, nor does udelay() have * precise timings during very early bootup (the delays * are shorter until calibrated): */ udelay(2); break; case IO_DELAY_TYPE_NONE: break; } } EXPORT_SYMBOL(native_io_delay); static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id) { if (io_delay_type == IO_DELAY_TYPE_0X80) { pr_notice("%s: using 0xed I/O delay port\n", id->ident); io_delay_type = IO_DELAY_TYPE_0XED; } return 0; } /* * Quirk table for systems that misbehave (lock up, etc.) if port * 0x80 is used: */ static const struct dmi_system_id io_delay_0xed_port_dmi_table[] __initconst = { { .callback = dmi_io_delay_0xed_port, .ident = "Compaq Presario V6000", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), DMI_MATCH(DMI_BOARD_NAME, "30B7") } }, { .callback = dmi_io_delay_0xed_port, .ident = "HP Pavilion dv9000z", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), DMI_MATCH(DMI_BOARD_NAME, "30B9") } }, { .callback = dmi_io_delay_0xed_port, .ident = "HP Pavilion dv6000", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), DMI_MATCH(DMI_BOARD_NAME, "30B8") } }, { .callback = dmi_io_delay_0xed_port, .ident = "HP Pavilion tx1000", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), DMI_MATCH(DMI_BOARD_NAME, "30BF") } }, { .callback = dmi_io_delay_0xed_port, .ident = "Presario F700", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"), DMI_MATCH(DMI_BOARD_NAME, "30D3") } }, { } }; void __init io_delay_init(void) { if (!io_delay_override) dmi_check_system(io_delay_0xed_port_dmi_table); } static int __init io_delay_param(char *s) { if (!s) return -EINVAL; if (!strcmp(s, "0x80")) io_delay_type = IO_DELAY_TYPE_0X80; else if (!strcmp(s, "0xed")) io_delay_type = IO_DELAY_TYPE_0XED; else if (!strcmp(s, "udelay")) io_delay_type = IO_DELAY_TYPE_UDELAY; else if (!strcmp(s, "none")) io_delay_type = IO_DELAY_TYPE_NONE; else return -EINVAL; io_delay_override = 1; return 0; } early_param("io_delay", io_delay_param);
linux-master
arch/x86/kernel/io_delay.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) 2007 Alan Stern * Copyright (C) 2009 IBM Corporation * Copyright (C) 2009 Frederic Weisbecker <[email protected]> * * Authors: Alan Stern <[email protected]> * K.Prasad <[email protected]> * Frederic Weisbecker <[email protected]> */ /* * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, * using the CPU's debug registers. */ #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/irqflags.h> #include <linux/notifier.h> #include <linux/kallsyms.h> #include <linux/kprobes.h> #include <linux/percpu.h> #include <linux/kdebug.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/smp.h> #include <asm/hw_breakpoint.h> #include <asm/processor.h> #include <asm/debugreg.h> #include <asm/user.h> #include <asm/desc.h> #include <asm/tlbflush.h> /* Per cpu debug control register value */ DEFINE_PER_CPU(unsigned long, cpu_dr7); EXPORT_PER_CPU_SYMBOL(cpu_dr7); /* Per cpu debug address registers values */ static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]); /* * Stores the breakpoints currently in use on each breakpoint address * register for each cpus */ static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); static inline unsigned long __encode_dr7(int drnum, unsigned int len, unsigned int type) { unsigned long bp_info; bp_info = (len | type) & 0xf; bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE); bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE)); return bp_info; } /* * Encode the length, type, Exact, and Enable bits for a particular breakpoint * as stored in debug register 7. */ unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type) { return __encode_dr7(drnum, len, type) | DR_GLOBAL_SLOWDOWN; } /* * Decode the length and type bits for a particular breakpoint as * stored in debug register 7. Return the "enabled" status. */ int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type) { int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE); *len = (bp_info & 0xc) | 0x40; *type = (bp_info & 0x3) | 0x80; return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3; } /* * Install a perf counter breakpoint. * * We seek a free debug address register and use it for this * breakpoint. Eventually we enable it in the debug control register. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned long *dr7; int i; lockdep_assert_irqs_disabled(); for (i = 0; i < HBP_NUM; i++) { struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); if (!*slot) { *slot = bp; break; } } if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) return -EBUSY; set_debugreg(info->address, i); __this_cpu_write(cpu_debugreg[i], info->address); dr7 = this_cpu_ptr(&cpu_dr7); *dr7 |= encode_dr7(i, info->len, info->type); /* * Ensure we first write cpu_dr7 before we set the DR7 register. * This ensures an NMI never see cpu_dr7 0 when DR7 is not. */ barrier(); set_debugreg(*dr7, 7); if (info->mask) amd_set_dr_addr_mask(info->mask, i); return 0; } /* * Uninstall the breakpoint contained in the given counter. * * First we search the debug address register it uses and then we disable * it. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ void arch_uninstall_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned long dr7; int i; lockdep_assert_irqs_disabled(); for (i = 0; i < HBP_NUM; i++) { struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); if (*slot == bp) { *slot = NULL; break; } } if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) return; dr7 = this_cpu_read(cpu_dr7); dr7 &= ~__encode_dr7(i, info->len, info->type); set_debugreg(dr7, 7); if (info->mask) amd_set_dr_addr_mask(0, i); /* * Ensure the write to cpu_dr7 is after we've set the DR7 register. * This ensures an NMI never see cpu_dr7 0 when DR7 is not. */ barrier(); this_cpu_write(cpu_dr7, dr7); } static int arch_bp_generic_len(int x86_len) { switch (x86_len) { case X86_BREAKPOINT_LEN_1: return HW_BREAKPOINT_LEN_1; case X86_BREAKPOINT_LEN_2: return HW_BREAKPOINT_LEN_2; case X86_BREAKPOINT_LEN_4: return HW_BREAKPOINT_LEN_4; #ifdef CONFIG_X86_64 case X86_BREAKPOINT_LEN_8: return HW_BREAKPOINT_LEN_8; #endif default: return -EINVAL; } } int arch_bp_generic_fields(int x86_len, int x86_type, int *gen_len, int *gen_type) { int len; /* Type */ switch (x86_type) { case X86_BREAKPOINT_EXECUTE: if (x86_len != X86_BREAKPOINT_LEN_X) return -EINVAL; *gen_type = HW_BREAKPOINT_X; *gen_len = sizeof(long); return 0; case X86_BREAKPOINT_WRITE: *gen_type = HW_BREAKPOINT_W; break; case X86_BREAKPOINT_RW: *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; break; default: return -EINVAL; } /* Len */ len = arch_bp_generic_len(x86_len); if (len < 0) return -EINVAL; *gen_len = len; return 0; } /* * Check for virtual address in kernel space. */ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { unsigned long va; int len; va = hw->address; len = arch_bp_generic_len(hw->len); WARN_ON_ONCE(len < 0); /* * We don't need to worry about va + len - 1 overflowing: * we already require that va is aligned to a multiple of len. */ return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX); } /* * Checks whether the range [addr, end], overlaps the area [base, base + size). */ static inline bool within_area(unsigned long addr, unsigned long end, unsigned long base, unsigned long size) { return end >= base && addr < (base + size); } /* * Checks whether the range from addr to end, inclusive, overlaps the fixed * mapped CPU entry area range or other ranges used for CPU entry. */ static inline bool within_cpu_entry(unsigned long addr, unsigned long end) { int cpu; /* CPU entry erea is always used for CPU entry */ if (within_area(addr, end, CPU_ENTRY_AREA_BASE, CPU_ENTRY_AREA_MAP_SIZE)) return true; /* * When FSGSBASE is enabled, paranoid_entry() fetches the per-CPU * GSBASE value via __per_cpu_offset or pcpu_unit_offsets. */ #ifdef CONFIG_SMP if (within_area(addr, end, (unsigned long)__per_cpu_offset, sizeof(unsigned long) * nr_cpu_ids)) return true; #else if (within_area(addr, end, (unsigned long)&pcpu_unit_offsets, sizeof(pcpu_unit_offsets))) return true; #endif for_each_possible_cpu(cpu) { /* The original rw GDT is being used after load_direct_gdt() */ if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu), GDT_SIZE)) return true; /* * cpu_tss_rw is not directly referenced by hardware, but * cpu_tss_rw is also used in CPU entry code, */ if (within_area(addr, end, (unsigned long)&per_cpu(cpu_tss_rw, cpu), sizeof(struct tss_struct))) return true; /* * cpu_tlbstate.user_pcid_flush_mask is used for CPU entry. * If a data breakpoint on it, it will cause an unwanted #DB. * Protect the full cpu_tlbstate structure to be sure. */ if (within_area(addr, end, (unsigned long)&per_cpu(cpu_tlbstate, cpu), sizeof(struct tlb_state))) return true; /* * When in guest (X86_FEATURE_HYPERVISOR), local_db_save() * will read per-cpu cpu_dr7 before clear dr7 register. */ if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu), sizeof(cpu_dr7))) return true; } return false; } static int arch_build_bp_info(struct perf_event *bp, const struct perf_event_attr *attr, struct arch_hw_breakpoint *hw) { unsigned long bp_end; bp_end = attr->bp_addr + attr->bp_len - 1; if (bp_end < attr->bp_addr) return -EINVAL; /* * Prevent any breakpoint of any type that overlaps the CPU * entry area and data. This protects the IST stacks and also * reduces the chance that we ever find out what happens if * there's a data breakpoint on the GDT, IDT, or TSS. */ if (within_cpu_entry(attr->bp_addr, bp_end)) return -EINVAL; hw->address = attr->bp_addr; hw->mask = 0; /* Type */ switch (attr->bp_type) { case HW_BREAKPOINT_W: hw->type = X86_BREAKPOINT_WRITE; break; case HW_BREAKPOINT_W | HW_BREAKPOINT_R: hw->type = X86_BREAKPOINT_RW; break; case HW_BREAKPOINT_X: /* * We don't allow kernel breakpoints in places that are not * acceptable for kprobes. On non-kprobes kernels, we don't * allow kernel breakpoints at all. */ if (attr->bp_addr >= TASK_SIZE_MAX) { if (within_kprobe_blacklist(attr->bp_addr)) return -EINVAL; } hw->type = X86_BREAKPOINT_EXECUTE; /* * x86 inst breakpoints need to have a specific undefined len. * But we still need to check userspace is not trying to setup * an unsupported length, to get a range breakpoint for example. */ if (attr->bp_len == sizeof(long)) { hw->len = X86_BREAKPOINT_LEN_X; return 0; } fallthrough; default: return -EINVAL; } /* Len */ switch (attr->bp_len) { case HW_BREAKPOINT_LEN_1: hw->len = X86_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: hw->len = X86_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: hw->len = X86_BREAKPOINT_LEN_4; break; #ifdef CONFIG_X86_64 case HW_BREAKPOINT_LEN_8: hw->len = X86_BREAKPOINT_LEN_8; break; #endif default: /* AMD range breakpoint */ if (!is_power_of_2(attr->bp_len)) return -EINVAL; if (attr->bp_addr & (attr->bp_len - 1)) return -EINVAL; if (!boot_cpu_has(X86_FEATURE_BPEXT)) return -EOPNOTSUPP; /* * It's impossible to use a range breakpoint to fake out * user vs kernel detection because bp_len - 1 can't * have the high bit set. If we ever allow range instruction * breakpoints, then we'll have to check for kprobe-blacklisted * addresses anywhere in the range. */ hw->mask = attr->bp_len - 1; hw->len = X86_BREAKPOINT_LEN_1; } return 0; } /* * Validate the arch-specific HW Breakpoint register settings */ int hw_breakpoint_arch_parse(struct perf_event *bp, const struct perf_event_attr *attr, struct arch_hw_breakpoint *hw) { unsigned int align; int ret; ret = arch_build_bp_info(bp, attr, hw); if (ret) return ret; switch (hw->len) { case X86_BREAKPOINT_LEN_1: align = 0; if (hw->mask) align = hw->mask; break; case X86_BREAKPOINT_LEN_2: align = 1; break; case X86_BREAKPOINT_LEN_4: align = 3; break; #ifdef CONFIG_X86_64 case X86_BREAKPOINT_LEN_8: align = 7; break; #endif default: WARN_ON_ONCE(1); return -EINVAL; } /* * Check that the low-order bits of the address are appropriate * for the alignment implied by len. */ if (hw->address & align) return -EINVAL; return 0; } /* * Release the user breakpoints used by ptrace */ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { int i; struct thread_struct *t = &tsk->thread; for (i = 0; i < HBP_NUM; i++) { unregister_hw_breakpoint(t->ptrace_bps[i]); t->ptrace_bps[i] = NULL; } t->virtual_dr6 = 0; t->ptrace_dr7 = 0; } void hw_breakpoint_restore(void) { set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0); set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1); set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2); set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3); set_debugreg(DR6_RESERVED, 6); set_debugreg(__this_cpu_read(cpu_dr7), 7); } EXPORT_SYMBOL_GPL(hw_breakpoint_restore); /* * Handle debug exception notifications. * * Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below. * * NOTIFY_DONE returned if one of the following conditions is true. * i) When the causative address is from user-space and the exception * is a valid one, i.e. not triggered as a result of lazy debug register * switching * ii) When there are more bits than trap<n> set in DR6 register (such * as BD, BS or BT) indicating that more than one debug condition is * met and requires some more action in do_debug(). * * NOTIFY_STOP returned for all other cases * */ static int hw_breakpoint_handler(struct die_args *args) { int i, rc = NOTIFY_STOP; struct perf_event *bp; unsigned long *dr6_p; unsigned long dr6; bool bpx; /* The DR6 value is pointed by args->err */ dr6_p = (unsigned long *)ERR_PTR(args->err); dr6 = *dr6_p; /* Do an early return if no trap bits are set in DR6 */ if ((dr6 & DR_TRAP_BITS) == 0) return NOTIFY_DONE; /* Handle all the breakpoints that were triggered */ for (i = 0; i < HBP_NUM; ++i) { if (likely(!(dr6 & (DR_TRAP0 << i)))) continue; bp = this_cpu_read(bp_per_reg[i]); if (!bp) continue; bpx = bp->hw.info.type == X86_BREAKPOINT_EXECUTE; /* * TF and data breakpoints are traps and can be merged, however * instruction breakpoints are faults and will be raised * separately. * * However DR6 can indicate both TF and instruction * breakpoints. In that case take TF as that has precedence and * delay the instruction breakpoint for the next exception. */ if (bpx && (dr6 & DR_STEP)) continue; /* * Reset the 'i'th TRAP bit in dr6 to denote completion of * exception handling */ (*dr6_p) &= ~(DR_TRAP0 << i); perf_bp_event(bp, args->regs); /* * Set up resume flag to avoid breakpoint recursion when * returning back to origin. */ if (bpx) args->regs->flags |= X86_EFLAGS_RF; } /* * Further processing in do_debug() is needed for a) user-space * breakpoints (to generate signals) and b) when the system has * taken exception due to multiple causes */ if ((current->thread.virtual_dr6 & DR_TRAP_BITS) || (dr6 & (~DR_TRAP_BITS))) rc = NOTIFY_DONE; return rc; } /* * Handle debug exception notifications. */ int hw_breakpoint_exceptions_notify( struct notifier_block *unused, unsigned long val, void *data) { if (val != DIE_DEBUG) return NOTIFY_DONE; return hw_breakpoint_handler(data); } void hw_breakpoint_pmu_read(struct perf_event *bp) { /* TODO */ }
linux-master
arch/x86/kernel/hw_breakpoint.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/compat.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/syscalls.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/utsname.h> #include <linux/personality.h> #include <linux/random.h> #include <linux/uaccess.h> #include <linux/elf.h> #include <asm/elf.h> #include <asm/ia32.h> /* * Align a virtual address to avoid aliasing in the I$ on AMD F15h. */ static unsigned long get_align_mask(void) { /* handle 32- and 64-bit case with a single conditional */ if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32()))) return 0; if (!(current->flags & PF_RANDOMIZE)) return 0; return va_align.mask; } /* * To avoid aliasing in the I$ on AMD F15h, the bits defined by the * va_align.bits, [12:upper_bit), are set to a random value instead of * zeroing them. This random value is computed once per boot. This form * of ASLR is known as "per-boot ASLR". * * To achieve this, the random value is added to the info.align_offset * value before calling vm_unmapped_area() or ORed directly to the * address. */ static unsigned long get_align_bits(void) { return va_align.bits & get_align_mask(); } unsigned long align_vdso_addr(unsigned long addr) { unsigned long align_mask = get_align_mask(); addr = (addr + align_mask) & ~align_mask; return addr | get_align_bits(); } static int __init control_va_addr_alignment(char *str) { /* guard against enabling this on other CPU families */ if (va_align.flags < 0) return 1; if (*str == 0) return 1; if (!strcmp(str, "32")) va_align.flags = ALIGN_VA_32; else if (!strcmp(str, "64")) va_align.flags = ALIGN_VA_64; else if (!strcmp(str, "off")) va_align.flags = 0; else if (!strcmp(str, "on")) va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; else pr_warn("invalid option value: 'align_va_addr=%s'\n", str); return 1; } __setup("align_va_addr=", control_va_addr_alignment); SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { if (off & ~PAGE_MASK) return -EINVAL; return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); } static void find_start_end(unsigned long addr, unsigned long flags, unsigned long *begin, unsigned long *end) { if (!in_32bit_syscall() && (flags & MAP_32BIT)) { /* This is usually used needed to map code in small model, so it needs to be in the first 31bit. Limit it to that. This means we need to move the unmapped base down for this case. This can give conflicts with the heap, but we assume that glibc malloc knows how to fall back to mmap. Give it 1GB of playground for now. -AK */ *begin = 0x40000000; *end = 0x80000000; if (current->flags & PF_RANDOMIZE) { *begin = randomize_page(*begin, 0x02000000); } return; } *begin = get_mmap_base(1); if (in_32bit_syscall()) *end = task_size_32bit(); else *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW); } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct vm_unmapped_area_info info; unsigned long begin, end; if (flags & MAP_FIXED) return addr; find_start_end(addr, flags, &begin, &end); if (len > end) return -ENOMEM; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } info.flags = 0; info.length = len; info.low_limit = begin; info.high_limit = end; info.align_mask = 0; info.align_offset = pgoff << PAGE_SHIFT; if (filp) { info.align_mask = get_align_mask(); info.align_offset += get_align_bits(); } return vm_unmapped_area(&info); } unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; /* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; /* No address checking. See comment at mmap_address_hint_valid() */ if (flags & MAP_FIXED) return addr; /* for MAP_32BIT mappings we force the legacy mmap base */ if (!in_32bit_syscall() && (flags & MAP_32BIT)) goto bottomup; /* requesting a specific address */ if (addr) { addr &= PAGE_MASK; if (!mmap_address_hint_valid(addr, len)) goto get_unmapped_area; vma = find_vma(mm, addr); if (!vma || addr + len <= vm_start_gap(vma)) return addr; } get_unmapped_area: info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; if (!in_32bit_syscall() && (flags & MAP_ABOVE4G)) info.low_limit = SZ_4G; else info.low_limit = PAGE_SIZE; info.high_limit = get_mmap_base(0); /* * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area * in the full address space. * * !in_32bit_syscall() check to avoid high addresses for x32 * (and make it no op on native i386). */ if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; info.align_mask = 0; info.align_offset = pgoff << PAGE_SHIFT; if (filp) { info.align_mask = get_align_mask(); info.align_offset += get_align_bits(); } addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) return addr; VM_BUG_ON(addr != -ENOMEM); bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); }
linux-master
arch/x86/kernel/sys_x86_64.c
// SPDX-License-Identifier: GPL-2.0-only /* * cppc.c: CPPC Interface for x86 * Copyright (c) 2016, Intel Corporation. */ #include <acpi/cppc_acpi.h> #include <asm/msr.h> #include <asm/processor.h> #include <asm/topology.h> /* Refer to drivers/acpi/cppc_acpi.c for the description of functions */ bool cpc_supported_by_cpu(void) { switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: case X86_VENDOR_HYGON: if (boot_cpu_data.x86 == 0x19 && ((boot_cpu_data.x86_model <= 0x0f) || (boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f))) return true; else if (boot_cpu_data.x86 == 0x17 && boot_cpu_data.x86_model >= 0x70 && boot_cpu_data.x86_model <= 0x7f) return true; return boot_cpu_has(X86_FEATURE_CPPC); } return false; } bool cpc_ffh_supported(void) { return true; } int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) { int err; err = rdmsrl_safe_on_cpu(cpunum, reg->address, val); if (!err) { u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, reg->bit_offset); *val &= mask; *val >>= reg->bit_offset; } return err; } int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) { u64 rd_val; int err; err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val); if (!err) { u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, reg->bit_offset); val <<= reg->bit_offset; val &= mask; rd_val &= ~mask; rd_val |= val; err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val); } return err; } static void amd_set_max_freq_ratio(void) { struct cppc_perf_caps perf_caps; u64 highest_perf, nominal_perf; u64 perf_ratio; int rc; rc = cppc_get_perf_caps(0, &perf_caps); if (rc) { pr_debug("Could not retrieve perf counters (%d)\n", rc); return; } highest_perf = amd_get_highest_perf(); nominal_perf = perf_caps.nominal_perf; if (!highest_perf || !nominal_perf) { pr_debug("Could not retrieve highest or nominal performance\n"); return; } perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf); /* midpoint between max_boost and max_P */ perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1; if (!perf_ratio) { pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n"); return; } freq_invariance_set_perf_ratio(perf_ratio, false); } static DEFINE_MUTEX(freq_invariance_lock); void init_freq_invariance_cppc(void) { static bool init_done; if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF)) return; if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return; mutex_lock(&freq_invariance_lock); if (!init_done) amd_set_max_freq_ratio(); init_done = true; mutex_unlock(&freq_invariance_lock); }
linux-master
arch/x86/kernel/acpi/cppc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Arch-specific APEI-related functions. */ #include <acpi/apei.h> #include <asm/mce.h> #include <asm/tlbflush.h> int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data) { #ifdef CONFIG_X86_MCE int i; struct acpi_hest_ia_corrected *cmc; struct acpi_hest_ia_error_bank *mc_bank; cmc = (struct acpi_hest_ia_corrected *)hest_hdr; if (!cmc->enabled) return 0; /* * We expect HEST to provide a list of MC banks that report errors * in firmware first mode. Otherwise, return non-zero value to * indicate that we are done parsing HEST. */ if (!(cmc->flags & ACPI_HEST_FIRMWARE_FIRST) || !cmc->num_hardware_banks) return 1; pr_info("HEST: Enabling Firmware First mode for corrected errors.\n"); mc_bank = (struct acpi_hest_ia_error_bank *)(cmc + 1); for (i = 0; i < cmc->num_hardware_banks; i++, mc_bank++) mce_disable_bank(mc_bank->bank_number); #endif return 1; } void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) { #ifdef CONFIG_X86_MCE apei_mce_report_mem_error(sev, mem_err); #endif } int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id) { return apei_smca_report_x86_error(ctx_info, lapic_id); }
linux-master
arch/x86/kernel/acpi/apei.c
// SPDX-License-Identifier: GPL-2.0 /* * sleep.c - x86-specific ACPI sleep support. * * Copyright (C) 2001-2003 Patrick Mochel * Copyright (C) 2001-2003 Pavel Machek <[email protected]> */ #include <linux/acpi.h> #include <linux/memblock.h> #include <linux/dmi.h> #include <linux/cpumask.h> #include <linux/pgtable.h> #include <asm/segment.h> #include <asm/desc.h> #include <asm/cacheflush.h> #include <asm/realmode.h> #include <asm/hypervisor.h> #include <asm/smp.h> #include <linux/ftrace.h> #include "../../realmode/rm/wakeup.h" #include "sleep.h" unsigned long acpi_realmode_flags; #if defined(CONFIG_SMP) && defined(CONFIG_64BIT) static char temp_stack[4096]; #endif /** * acpi_get_wakeup_address - provide physical address for S3 wakeup * * Returns the physical address where the kernel should be resumed after the * system awakes from S3, e.g. for programming into the firmware waking vector. */ unsigned long acpi_get_wakeup_address(void) { return ((unsigned long)(real_mode_header->wakeup_start)); } /** * x86_acpi_enter_sleep_state - enter sleep state * @state: Sleep state to enter. * * Wrapper around acpi_enter_sleep_state() to be called by assembly. */ asmlinkage acpi_status __visible x86_acpi_enter_sleep_state(u8 state) { return acpi_enter_sleep_state(state); } /** * x86_acpi_suspend_lowlevel - save kernel state * * Create an identity mapped page table and copy the wakeup routine to * low memory. */ int x86_acpi_suspend_lowlevel(void) { struct wakeup_header *header = (struct wakeup_header *) __va(real_mode_header->wakeup_header); if (header->signature != WAKEUP_HEADER_SIGNATURE) { printk(KERN_ERR "wakeup header does not match\n"); return -EINVAL; } header->video_mode = saved_video_mode; header->pmode_behavior = 0; #ifndef CONFIG_64BIT native_store_gdt((struct desc_ptr *)&header->pmode_gdt); /* * We have to check that we can write back the value, and not * just read it. At least on 90 nm Pentium M (Family 6, Model * 13), reading an invalid MSR is not guaranteed to trap, see * Erratum X4 in "Intel Pentium M Processor on 90 nm Process * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90 * nm process with 512-KB L2 Cache Specification Update". */ if (!rdmsr_safe(MSR_EFER, &header->pmode_efer_low, &header->pmode_efer_high) && !wrmsr_safe(MSR_EFER, header->pmode_efer_low, header->pmode_efer_high)) header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER); #endif /* !CONFIG_64BIT */ header->pmode_cr0 = read_cr0(); if (__this_cpu_read(cpu_info.cpuid_level) >= 0) { header->pmode_cr4 = __read_cr4(); header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4); } if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, &header->pmode_misc_en_low, &header->pmode_misc_en_high) && !wrmsr_safe(MSR_IA32_MISC_ENABLE, header->pmode_misc_en_low, header->pmode_misc_en_high)) header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE); header->realmode_flags = acpi_realmode_flags; header->real_magic = 0x12345678; #ifndef CONFIG_64BIT header->pmode_entry = (u32)&wakeup_pmode_return; header->pmode_cr3 = (u32)__pa_symbol(initial_page_table); saved_magic = 0x12345678; #else /* CONFIG_64BIT */ #ifdef CONFIG_SMP /* * As each CPU starts up, it will find its own stack pointer * from its current_task->thread.sp. Typically that will be * the idle thread for a newly-started AP, or even the boot * CPU which will find it set to &init_task in the static * per-cpu data. * * Make the resuming CPU use the temporary stack at startup * by setting current->thread.sp to point to that. The true * %rsp will be restored with the rest of the CPU context, * by do_suspend_lowlevel(). And unwinders don't care about * the abuse of ->thread.sp because it's a dead variable * while the thread is running on the CPU anyway; the true * value is in the actual %rsp register. */ current->thread.sp = (unsigned long)temp_stack + sizeof(temp_stack); /* * Ensure the CPU knows which one it is when it comes back, if * it isn't in parallel mode and expected to work that out for * itself. */ if (!(smpboot_control & STARTUP_PARALLEL_MASK)) smpboot_control = smp_processor_id(); #endif initial_code = (unsigned long)wakeup_long64; saved_magic = 0x123456789abcdef0L; #endif /* CONFIG_64BIT */ /* * Pause/unpause graph tracing around do_suspend_lowlevel as it has * inconsistent call/return info after it jumps to the wakeup vector. */ pause_graph_tracing(); do_suspend_lowlevel(); unpause_graph_tracing(); return 0; } static int __init acpi_sleep_setup(char *str) { while ((str != NULL) && (*str != '\0')) { if (strncmp(str, "s3_bios", 7) == 0) acpi_realmode_flags |= 1; if (strncmp(str, "s3_mode", 7) == 0) acpi_realmode_flags |= 2; if (strncmp(str, "s3_beep", 7) == 0) acpi_realmode_flags |= 4; #ifdef CONFIG_HIBERNATION if (strncmp(str, "s4_hwsig", 8) == 0) acpi_check_s4_hw_signature = 1; if (strncmp(str, "s4_nohwsig", 10) == 0) acpi_check_s4_hw_signature = 0; #endif if (strncmp(str, "nonvs", 5) == 0) acpi_nvs_nosave(); if (strncmp(str, "nonvs_s3", 8) == 0) acpi_nvs_nosave_s3(); if (strncmp(str, "old_ordering", 12) == 0) acpi_old_suspend_ordering(); if (strncmp(str, "nobl", 4) == 0) acpi_sleep_no_blacklist(); str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); } return 1; } __setup("acpi_sleep=", acpi_sleep_setup); #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HYPERVISOR_GUEST) static int __init init_s4_sigcheck(void) { /* * If running on a hypervisor, honour the ACPI specification * by default and trigger a clean reboot when the hardware * signature in FACS is changed after hibernation. */ if (acpi_check_s4_hw_signature == -1 && !hypervisor_is_type(X86_HYPER_NATIVE)) acpi_check_s4_hw_signature = 1; return 0; } /* This must happen before acpi_init() which is a subsys initcall */ arch_initcall(init_s4_sigcheck); #endif
linux-master
arch/x86/kernel/acpi/sleep.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005 Intel Corporation * Venkatesh Pallipadi <[email protected]> * - Added _PDC for SMP C-states on Intel CPUs */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/cpu.h> #include <linux/sched.h> #include <acpi/processor.h> #include <asm/mwait.h> #include <asm/special_insns.h> /* * Initialize bm_flags based on the CPU cache properties * On SMP it depends on cache configuration * - When cache is not shared among all CPUs, we flush cache * before entering C3. * - When cache is shared among all CPUs, we use bm_check * mechanism as in UP case * * This routine is called only after all the CPUs are online */ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, unsigned int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); flags->bm_check = 0; if (num_online_cpus() == 1) flags->bm_check = 1; else if (c->x86_vendor == X86_VENDOR_INTEL) { /* * Today all MP CPUs that support C3 share cache. * And caches should not be flushed by software while * entering C3 type state. */ flags->bm_check = 1; } /* * On all recent Intel platforms, ARB_DISABLE is a nop. * So, set bm_control to zero to indicate that ARB_DISABLE * is not required while entering C3 type state on * P4, Core and beyond CPUs */ if (c->x86_vendor == X86_VENDOR_INTEL && (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f))) flags->bm_control = 0; if (c->x86_vendor == X86_VENDOR_CENTAUR) { if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f && c->x86_stepping >= 0x0e)) { /* * For all recent Centaur CPUs, the ucode will make sure that each * core can keep cache coherence with each other while entering C3 * type state. So, set bm_check to 1 to indicate that the kernel * doesn't need to execute a cache flush operation (WBINVD) when * entering C3 type state. */ flags->bm_check = 1; /* * For all recent Centaur platforms, ARB_DISABLE is a nop. * Set bm_control to zero to indicate that ARB_DISABLE is * not required while entering C3 type state. */ flags->bm_control = 0; } } if (c->x86_vendor == X86_VENDOR_ZHAOXIN) { /* * All Zhaoxin CPUs that support C3 share cache. * And caches should not be flushed by software while * entering C3 type state. */ flags->bm_check = 1; /* * On all recent Zhaoxin platforms, ARB_DISABLE is a nop. * So, set bm_control to zero to indicate that ARB_DISABLE * is not required while entering C3 type state. */ flags->bm_control = 0; } if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17) { /* * For all AMD Zen or newer CPUs that support C3, caches * should not be flushed by software while entering C3 * type state. Set bm->check to 1 so that kernel doesn't * need to execute cache flush operation. */ flags->bm_check = 1; /* * In current AMD C state implementation ARB_DIS is no longer * used. So set bm_control to zero to indicate ARB_DIS is not * required while entering C3 type state. */ flags->bm_control = 0; } } EXPORT_SYMBOL(acpi_processor_power_init_bm_check); /* The code below handles cstate entry with monitor-mwait pair on Intel*/ struct cstate_entry { struct { unsigned int eax; unsigned int ecx; } states[ACPI_PROCESSOR_MAX_POWER]; }; static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; #define NATIVE_CSTATE_BEYOND_HALT (2) static long acpi_processor_ffh_cstate_probe_cpu(void *_cx) { struct acpi_processor_cx *cx = _cx; long retval; unsigned int eax, ebx, ecx, edx; unsigned int edx_part; unsigned int cstate_type; /* C-state type and not ACPI C-state type */ unsigned int num_cstate_subtype; cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); /* Check whether this particular cx_type (in CST) is supported or not */ cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; retval = 0; /* If the HW does not support any sub-states in this C-state */ if (num_cstate_subtype == 0) { pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n", cx->address, edx_part); retval = -1; goto out; } /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) { retval = -1; goto out; } if (!mwait_supported[cstate_type]) { mwait_supported[cstate_type] = 1; printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d state\n", cx->type); } snprintf(cx->desc, ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x", cx->address); out: return retval; } int acpi_processor_ffh_cstate_probe(unsigned int cpu, struct acpi_processor_cx *cx, struct acpi_power_register *reg) { struct cstate_entry *percpu_entry; struct cpuinfo_x86 *c = &cpu_data(cpu); long retval; if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF) return -1; if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT) return -1; percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); percpu_entry->states[cx->index].eax = 0; percpu_entry->states[cx->index].ecx = 0; /* Make sure we are running on right CPU */ retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx, false); if (retval == 0) { /* Use the hint in CST */ percpu_entry->states[cx->index].eax = cx->address; percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; } /* * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared, * then we should skip checking BM_STS for this C-state. * ref: "Intel Processor Vendor-Specific ACPI Interface Specification" */ if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2)) cx->bm_sts_skip = 1; return retval; } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); struct cstate_entry *percpu_entry; percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); mwait_idle_with_hints(percpu_entry->states[cx->index].eax, percpu_entry->states[cx->index].ecx); } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter); static int __init ffh_cstate_init(void) { struct cpuinfo_x86 *c = &boot_cpu_data; if (c->x86_vendor != X86_VENDOR_INTEL && c->x86_vendor != X86_VENDOR_AMD && c->x86_vendor != X86_VENDOR_HYGON) return -1; cpu_cstate_entry = alloc_percpu(struct cstate_entry); return 0; } static void __exit ffh_cstate_exit(void) { free_percpu(cpu_cstate_entry); cpu_cstate_entry = NULL; } arch_initcall(ffh_cstate_init); __exitcall(ffh_cstate_exit);
linux-master
arch/x86/kernel/acpi/cstate.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * boot.c - Architecture-Specific Low-Level ACPI Boot Support * * Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]> * Copyright (C) 2001 Jun Nakajima <[email protected]> */ #define pr_fmt(fmt) "ACPI: " fmt #include <linux/init.h> #include <linux/acpi.h> #include <linux/acpi_pmtmr.h> #include <linux/efi.h> #include <linux/cpumask.h> #include <linux/export.h> #include <linux/dmi.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/memblock.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/efi-bgrt.h> #include <linux/serial_core.h> #include <linux/pgtable.h> #include <asm/e820/api.h> #include <asm/irqdomain.h> #include <asm/pci_x86.h> #include <asm/io_apic.h> #include <asm/apic.h> #include <asm/io.h> #include <asm/mpspec.h> #include <asm/smp.h> #include <asm/i8259.h> #include <asm/setup.h> #include "sleep.h" /* To include x86_acpi_suspend_lowlevel */ static int __initdata acpi_force = 0; int acpi_disabled; EXPORT_SYMBOL(acpi_disabled); #ifdef CONFIG_X86_64 # include <asm/proto.h> #endif /* X86 */ int acpi_noirq; /* skip ACPI IRQ initialization */ static int acpi_nobgrt; /* skip ACPI BGRT */ int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */ EXPORT_SYMBOL(acpi_pci_disabled); int acpi_lapic; int acpi_ioapic; int acpi_strict; int acpi_disable_cmcff; bool acpi_int_src_ovr[NR_IRQS_LEGACY]; /* ACPI SCI override configuration */ u8 acpi_sci_flags __initdata; u32 acpi_sci_override_gsi __initdata = INVALID_ACPI_IRQ; int acpi_skip_timer_override __initdata; int acpi_use_timer_override __initdata; int acpi_fix_pin2_polarity __initdata; #ifdef CONFIG_X86_LOCAL_APIC static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; static bool acpi_support_online_capable; #endif #ifdef CONFIG_X86_64 /* Physical address of the Multiprocessor Wakeup Structure mailbox */ static u64 acpi_mp_wake_mailbox_paddr; /* Virtual address of the Multiprocessor Wakeup Structure mailbox */ static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox; #endif #ifdef CONFIG_X86_IO_APIC /* * Locks related to IOAPIC hotplug * Hotplug side: * ->device_hotplug_lock * ->acpi_ioapic_lock * ->ioapic_lock * Interrupt mapping side: * ->acpi_ioapic_lock * ->ioapic_mutex * ->ioapic_lock */ static DEFINE_MUTEX(acpi_ioapic_lock); #endif /* -------------------------------------------------------------------------- Boot-time Configuration -------------------------------------------------------------------------- */ /* * The default interrupt routing model is PIC (8259). This gets * overridden if IOAPICs are enumerated (below). */ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; /* * ISA irqs by default are the first 16 gsis but can be * any gsi as specified by an interrupt source override. */ static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; /* * This is just a simple wrapper around early_memremap(), * with sanity checks for phys == 0 and size == 0. */ void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size) { if (!phys || !size) return NULL; return early_memremap(phys, size); } void __init __acpi_unmap_table(void __iomem *map, unsigned long size) { if (!map || !size) return; early_memunmap(map, size); } #ifdef CONFIG_X86_LOCAL_APIC static int __init acpi_parse_madt(struct acpi_table_header *table) { struct acpi_table_madt *madt = NULL; if (!boot_cpu_has(X86_FEATURE_APIC)) return -EINVAL; madt = (struct acpi_table_madt *)table; if (!madt) { pr_warn("Unable to map MADT\n"); return -ENODEV; } if (madt->address) { acpi_lapic_addr = (u64) madt->address; pr_debug("Local APIC address 0x%08x\n", madt->address); } /* ACPI 6.3 and newer support the online capable bit. */ if (acpi_gbl_FADT.header.revision > 6 || (acpi_gbl_FADT.header.revision == 6 && acpi_gbl_FADT.minor_revision >= 3)) acpi_support_online_capable = true; default_acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); return 0; } /** * acpi_register_lapic - register a local apic and generates a logic cpu number * @id: local apic id to register * @acpiid: ACPI id to register * @enabled: this cpu is enabled or not * * Returns the logic cpu number which maps to the local apic */ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled) { int cpu; if (id >= MAX_LOCAL_APIC) { pr_info("skipped apicid that is too big\n"); return -EINVAL; } if (!enabled) { ++disabled_cpus; return -EINVAL; } cpu = generic_processor_info(id); if (cpu >= 0) early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid; return cpu; } static bool __init acpi_is_processor_usable(u32 lapic_flags) { if (lapic_flags & ACPI_MADT_ENABLED) return true; if (!acpi_support_online_capable || (lapic_flags & ACPI_MADT_ONLINE_CAPABLE)) return true; return false; } static int __init acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_local_x2apic *processor = NULL; #ifdef CONFIG_X86_X2APIC u32 apic_id; u8 enabled; #endif processor = (struct acpi_madt_local_x2apic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(&header->common); #ifdef CONFIG_X86_X2APIC apic_id = processor->local_apic_id; enabled = processor->lapic_flags & ACPI_MADT_ENABLED; /* Ignore invalid ID */ if (apic_id == 0xffffffff) return 0; /* don't register processors that cannot be onlined */ if (!acpi_is_processor_usable(processor->lapic_flags)) return 0; /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ if (!apic_id_valid(apic_id)) { if (enabled) pr_warn("x2apic entry ignored\n"); return 0; } acpi_register_lapic(apic_id, processor->uid, enabled); #else pr_warn("x2apic entry ignored\n"); #endif return 0; } static int __init acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_local_apic *processor = NULL; processor = (struct acpi_madt_local_apic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(&header->common); /* Ignore invalid ID */ if (processor->id == 0xff) return 0; /* don't register processors that can not be onlined */ if (!acpi_is_processor_usable(processor->lapic_flags)) return 0; /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ acpi_register_lapic(processor->id, /* APIC ID */ processor->processor_id, /* ACPI ID */ processor->lapic_flags & ACPI_MADT_ENABLED); return 0; } static int __init acpi_parse_sapic(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_local_sapic *processor = NULL; processor = (struct acpi_madt_local_sapic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(&header->common); acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */ processor->processor_id, /* ACPI ID */ processor->lapic_flags & ACPI_MADT_ENABLED); return 0; } static int __init acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL; lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header; if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) return -EINVAL; acpi_table_print_madt_entry(&header->common); acpi_lapic_addr = lapic_addr_ovr->address; return 0; } static int __init acpi_parse_x2apic_nmi(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL; x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header; if (BAD_MADT_ENTRY(x2apic_nmi, end)) return -EINVAL; acpi_table_print_madt_entry(&header->common); if (x2apic_nmi->lint != 1) pr_warn("NMI not connected to LINT 1!\n"); return 0; } static int __init acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_local_apic_nmi *lapic_nmi = NULL; lapic_nmi = (struct acpi_madt_local_apic_nmi *)header; if (BAD_MADT_ENTRY(lapic_nmi, end)) return -EINVAL; acpi_table_print_madt_entry(&header->common); if (lapic_nmi->lint != 1) pr_warn("NMI not connected to LINT 1!\n"); return 0; } #ifdef CONFIG_X86_64 static int acpi_wakeup_cpu(int apicid, unsigned long start_ip) { /* * Remap mailbox memory only for the first call to acpi_wakeup_cpu(). * * Wakeup of secondary CPUs is fully serialized in the core code. * No need to protect acpi_mp_wake_mailbox from concurrent accesses. */ if (!acpi_mp_wake_mailbox) { acpi_mp_wake_mailbox = memremap(acpi_mp_wake_mailbox_paddr, sizeof(*acpi_mp_wake_mailbox), MEMREMAP_WB); } /* * Mailbox memory is shared between the firmware and OS. Firmware will * listen on mailbox command address, and once it receives the wakeup * command, the CPU associated with the given apicid will be booted. * * The value of 'apic_id' and 'wakeup_vector' must be visible to the * firmware before the wakeup command is visible. smp_store_release() * ensures ordering and visibility. */ acpi_mp_wake_mailbox->apic_id = apicid; acpi_mp_wake_mailbox->wakeup_vector = start_ip; smp_store_release(&acpi_mp_wake_mailbox->command, ACPI_MP_WAKE_COMMAND_WAKEUP); /* * Wait for the CPU to wake up. * * The CPU being woken up is essentially in a spin loop waiting to be * woken up. It should not take long for it wake up and acknowledge by * zeroing out ->command. * * ACPI specification doesn't provide any guidance on how long kernel * has to wait for a wake up acknowledgement. It also doesn't provide * a way to cancel a wake up request if it takes too long. * * In TDX environment, the VMM has control over how long it takes to * wake up secondary. It can postpone scheduling secondary vCPU * indefinitely. Giving up on wake up request and reporting error opens * possible attack vector for VMM: it can wake up a secondary CPU when * kernel doesn't expect it. Wait until positive result of the wake up * request. */ while (READ_ONCE(acpi_mp_wake_mailbox->command)) cpu_relax(); return 0; } #endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_IO_APIC #define MP_ISA_BUS 0 static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) { /* * Check bus_irq boundary. */ if (bus_irq >= NR_IRQS_LEGACY) { pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq); return; } /* * TBD: This check is for faulty timer entries, where the override * erroneously sets the trigger to level, resulting in a HUGE * increase of timer interrupts! */ if ((bus_irq == 0) && (trigger == 3)) trigger = 1; if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0) return; /* * Reset default identity mapping if gsi is also an legacy IRQ, * otherwise there will be more than one entry with the same GSI * and acpi_isa_irq_to_gsi() may give wrong result. */ if (gsi < nr_legacy_irqs() && isa_irq_to_gsi[gsi] == gsi) isa_irq_to_gsi[gsi] = INVALID_ACPI_IRQ; isa_irq_to_gsi[bus_irq] = gsi; } static void mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { #ifdef CONFIG_X86_MPPARSE struct mpc_intsrc mp_irq; struct pci_dev *pdev; unsigned char number; unsigned int devfn; int ioapic; u8 pin; if (!acpi_ioapic) return; if (!dev || !dev_is_pci(dev)) return; pdev = to_pci_dev(dev); number = pdev->bus->number; devfn = pdev->devfn; pin = pdev->pin; /* print the entry should happen on mptable identically */ mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); mp_irq.srcbus = number; mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); ioapic = mp_find_ioapic(gsi); mp_irq.dstapic = mpc_ioapic_id(ioapic); mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi); mp_save_irq(&mp_irq); #endif } static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) { struct mpc_intsrc mp_irq; int ioapic, pin; /* Convert 'gsi' to 'ioapic.pin'(INTIN#) */ ioapic = mp_find_ioapic(gsi); if (ioapic < 0) { pr_warn("Failed to find ioapic for gsi : %u\n", gsi); return ioapic; } pin = mp_find_ioapic_pin(ioapic, gsi); mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; mp_irq.irqflag = (trigger << 2) | polarity; mp_irq.srcbus = MP_ISA_BUS; mp_irq.srcbusirq = bus_irq; mp_irq.dstapic = mpc_ioapic_id(ioapic); mp_irq.dstirq = pin; mp_save_irq(&mp_irq); return 0; } static int __init acpi_parse_ioapic(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_io_apic *ioapic = NULL; struct ioapic_domain_cfg cfg = { .type = IOAPIC_DOMAIN_DYNAMIC, .ops = &mp_ioapic_irqdomain_ops, }; ioapic = (struct acpi_madt_io_apic *)header; if (BAD_MADT_ENTRY(ioapic, end)) return -EINVAL; acpi_table_print_madt_entry(&header->common); /* Statically assign IRQ numbers for IOAPICs hosting legacy IRQs */ if (ioapic->global_irq_base < nr_legacy_irqs()) cfg.type = IOAPIC_DOMAIN_LEGACY; mp_register_ioapic(ioapic->id, ioapic->address, ioapic->global_irq_base, &cfg); return 0; } /* * Parse Interrupt Source Override for the ACPI SCI */ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, u32 gsi) { if (trigger == 0) /* compatible SCI trigger is level */ trigger = 3; if (polarity == 0) /* compatible SCI polarity is low */ polarity = 3; /* Command-line over-ride via acpi_sci= */ if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2; if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; if (bus_irq < NR_IRQS_LEGACY) mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); else mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi); acpi_penalize_sci_irq(bus_irq, trigger, polarity); /* * stash over-ride to indicate we've been here * and for later update of acpi_gbl_FADT */ acpi_sci_override_gsi = gsi; return; } static int __init acpi_parse_int_src_ovr(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_interrupt_override *intsrc = NULL; intsrc = (struct acpi_madt_interrupt_override *)header; if (BAD_MADT_ENTRY(intsrc, end)) return -EINVAL; acpi_table_print_madt_entry(&header->common); if (intsrc->source_irq < NR_IRQS_LEGACY) acpi_int_src_ovr[intsrc->source_irq] = true; if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) { acpi_sci_ioapic_setup(intsrc->source_irq, intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2, intsrc->global_irq); return 0; } if (intsrc->source_irq == 0) { if (acpi_skip_timer_override) { pr_warn("BIOS IRQ0 override ignored.\n"); return 0; } if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; pr_warn("BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); } } mp_override_legacy_irq(intsrc->source_irq, intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2, intsrc->global_irq); return 0; } static int __init acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_nmi_source *nmi_src = NULL; nmi_src = (struct acpi_madt_nmi_source *)header; if (BAD_MADT_ENTRY(nmi_src, end)) return -EINVAL; acpi_table_print_madt_entry(&header->common); /* TBD: Support nimsrc entries? */ return 0; } #endif /* CONFIG_X86_IO_APIC */ /* * acpi_pic_sci_set_trigger() * * use ELCR to set PIC-mode trigger type for SCI * * If a PIC-mode SCI is not recognized or gives spurious IRQ7's * it may require Edge Trigger -- use "acpi_sci=edge" * * Port 0x4d0-4d1 are ELCR1 and ELCR2, the Edge/Level Control Registers * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. * ELCR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0) * ELCR2 is IRQs 8-15 (IRQ 8, 13 must be 0) */ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) { unsigned int mask = 1 << irq; unsigned int old, new; /* Real old ELCR mask */ old = inb(PIC_ELCR1) | (inb(PIC_ELCR2) << 8); /* * If we use ACPI to set PCI IRQs, then we should clear ELCR * since we will set it correctly as we enable the PCI irq * routing. */ new = acpi_noirq ? old : 0; /* * Update SCI information in the ELCR, it isn't in the PCI * routing tables.. */ switch (trigger) { case 1: /* Edge - clear */ new &= ~mask; break; case 3: /* Level - set */ new |= mask; break; } if (old == new) return; pr_warn("setting ELCR to %04x (from %04x)\n", new, old); outb(new, PIC_ELCR1); outb(new >> 8, PIC_ELCR2); } int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) { int rc, irq, trigger, polarity; if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { *irqp = gsi; return 0; } rc = acpi_get_override_irq(gsi, &trigger, &polarity); if (rc) return rc; trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; irq = acpi_register_gsi(NULL, gsi, trigger, polarity); if (irq < 0) return irq; *irqp = irq; return 0; } EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) { if (isa_irq < nr_legacy_irqs() && isa_irq_to_gsi[isa_irq] != INVALID_ACPI_IRQ) { *gsi = isa_irq_to_gsi[isa_irq]; return 0; } return -1; } static int acpi_register_gsi_pic(struct device *dev, u32 gsi, int trigger, int polarity) { #ifdef CONFIG_PCI /* * Make sure all (legacy) PCI IRQs are set as level-triggered. */ if (trigger == ACPI_LEVEL_SENSITIVE) elcr_set_level_irq(gsi); #endif return gsi; } #ifdef CONFIG_X86_LOCAL_APIC static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi, int trigger, int polarity) { int irq = gsi; #ifdef CONFIG_X86_IO_APIC int node; struct irq_alloc_info info; node = dev ? dev_to_node(dev) : NUMA_NO_NODE; trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1; polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1; ioapic_set_alloc_attr(&info, node, trigger, polarity); mutex_lock(&acpi_ioapic_lock); irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info); /* Don't set up the ACPI SCI because it's already set up */ if (irq >= 0 && enable_update_mptable && gsi != acpi_gbl_FADT.sci_interrupt) mp_config_acpi_gsi(dev, gsi, trigger, polarity); mutex_unlock(&acpi_ioapic_lock); #endif return irq; } static void acpi_unregister_gsi_ioapic(u32 gsi) { #ifdef CONFIG_X86_IO_APIC int irq; mutex_lock(&acpi_ioapic_lock); irq = mp_map_gsi_to_irq(gsi, 0, NULL); if (irq > 0) mp_unmap_irq(irq); mutex_unlock(&acpi_ioapic_lock); #endif } #endif int (*__acpi_register_gsi)(struct device *dev, u32 gsi, int trigger, int polarity) = acpi_register_gsi_pic; void (*__acpi_unregister_gsi)(u32 gsi) = NULL; #ifdef CONFIG_ACPI_SLEEP int (*acpi_suspend_lowlevel)(void) = x86_acpi_suspend_lowlevel; #else int (*acpi_suspend_lowlevel)(void); #endif /* * success: return IRQ number (>=0) * failure: return < 0 */ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) { return __acpi_register_gsi(dev, gsi, trigger, polarity); } EXPORT_SYMBOL_GPL(acpi_register_gsi); void acpi_unregister_gsi(u32 gsi) { if (__acpi_unregister_gsi) __acpi_unregister_gsi(gsi); } EXPORT_SYMBOL_GPL(acpi_unregister_gsi); #ifdef CONFIG_X86_LOCAL_APIC static void __init acpi_set_irq_model_ioapic(void) { acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; __acpi_register_gsi = acpi_register_gsi_ioapic; __acpi_unregister_gsi = acpi_unregister_gsi_ioapic; acpi_ioapic = 1; } #endif /* * ACPI based hotplug support for CPU */ #ifdef CONFIG_ACPI_HOTPLUG_CPU #include <acpi/processor.h> static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA int nid; nid = acpi_get_node(handle); if (nid != NUMA_NO_NODE) { set_apicid_to_node(physid, nid); numa_set_node(cpu, nid); } #endif return 0; } int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu) { int cpu; cpu = acpi_register_lapic(physid, acpi_id, ACPI_MADT_ENABLED); if (cpu < 0) { pr_info("Unable to map lapic to logical cpu number\n"); return cpu; } acpi_processor_set_pdc(handle); acpi_map_cpu2node(handle, cpu, physid); *pcpu = cpu; return 0; } EXPORT_SYMBOL(acpi_map_cpu); int acpi_unmap_cpu(int cpu) { #ifdef CONFIG_ACPI_NUMA set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE); #endif per_cpu(x86_cpu_to_apicid, cpu) = -1; set_cpu_present(cpu, false); num_processors--; return (0); } EXPORT_SYMBOL(acpi_unmap_cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) { int ret = -ENOSYS; #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int ioapic_id; u64 addr; struct ioapic_domain_cfg cfg = { .type = IOAPIC_DOMAIN_DYNAMIC, .ops = &mp_ioapic_irqdomain_ops, }; ioapic_id = acpi_get_ioapic_id(handle, gsi_base, &addr); if (ioapic_id < 0) { unsigned long long uid; acpi_status status; status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid); if (ACPI_FAILURE(status)) { acpi_handle_warn(handle, "failed to get IOAPIC ID.\n"); return -EINVAL; } ioapic_id = (int)uid; } mutex_lock(&acpi_ioapic_lock); ret = mp_register_ioapic(ioapic_id, phys_addr, gsi_base, &cfg); mutex_unlock(&acpi_ioapic_lock); #endif return ret; } EXPORT_SYMBOL(acpi_register_ioapic); int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) { int ret = -ENOSYS; #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC mutex_lock(&acpi_ioapic_lock); ret = mp_unregister_ioapic(gsi_base); mutex_unlock(&acpi_ioapic_lock); #endif return ret; } EXPORT_SYMBOL(acpi_unregister_ioapic); /** * acpi_ioapic_registered - Check whether IOAPIC associated with @gsi_base * has been registered * @handle: ACPI handle of the IOAPIC device * @gsi_base: GSI base associated with the IOAPIC * * Assume caller holds some type of lock to serialize acpi_ioapic_registered() * with acpi_register_ioapic()/acpi_unregister_ioapic(). */ int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base) { int ret = 0; #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC mutex_lock(&acpi_ioapic_lock); ret = mp_ioapic_registered(gsi_base); mutex_unlock(&acpi_ioapic_lock); #endif return ret; } static int __init acpi_parse_sbf(struct acpi_table_header *table) { struct acpi_table_boot *sb = (struct acpi_table_boot *)table; sbf_port = sb->cmos_index; /* Save CMOS port */ return 0; } #ifdef CONFIG_HPET_TIMER #include <asm/hpet.h> static struct resource *hpet_res __initdata; static int __init acpi_parse_hpet(struct acpi_table_header *table) { struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table; if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) { pr_warn("HPET timers must be located in memory.\n"); return -1; } hpet_address = hpet_tbl->address.address; hpet_blockid = hpet_tbl->sequence; /* * Some broken BIOSes advertise HPET at 0x0. We really do not * want to allocate a resource there. */ if (!hpet_address) { pr_warn("HPET id: %#x base: %#lx is invalid\n", hpet_tbl->id, hpet_address); return 0; } #ifdef CONFIG_X86_64 /* * Some even more broken BIOSes advertise HPET at * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add * some noise: */ if (hpet_address == 0xfed0000000000000UL) { if (!hpet_force_user) { pr_warn("HPET id: %#x base: 0xfed0000000000000 is bogus, try hpet=force on the kernel command line to fix it up to 0xfed00000.\n", hpet_tbl->id); hpet_address = 0; return 0; } pr_warn("HPET id: %#x base: 0xfed0000000000000 fixed up to 0xfed00000.\n", hpet_tbl->id); hpet_address >>= 32; } #endif pr_info("HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet_address); /* * Allocate and initialize the HPET firmware resource for adding into * the resource tree during the lateinit timeframe. */ #define HPET_RESOURCE_NAME_SIZE 9 hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE, SMP_CACHE_BYTES); if (!hpet_res) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); hpet_res->name = (void *)&hpet_res[1]; hpet_res->flags = IORESOURCE_MEM; snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u", hpet_tbl->sequence); hpet_res->start = hpet_address; hpet_res->end = hpet_address + (1 * 1024) - 1; return 0; } /* * hpet_insert_resource inserts the HPET resources used into the resource * tree. */ static __init int hpet_insert_resource(void) { if (!hpet_res) return 1; return insert_resource(&iomem_resource, hpet_res); } late_initcall(hpet_insert_resource); #else #define acpi_parse_hpet NULL #endif static int __init acpi_parse_fadt(struct acpi_table_header *table) { if (!(acpi_gbl_FADT.boot_flags & ACPI_FADT_LEGACY_DEVICES)) { pr_debug("no legacy devices present\n"); x86_platform.legacy.devices.pnpbios = 0; } if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && !(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) && x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) { pr_debug("i8042 controller is absent\n"); x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT; } if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) { pr_debug("not registering RTC platform device\n"); x86_platform.legacy.rtc = 0; } if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_VGA) { pr_debug("probing for VGA not safe\n"); x86_platform.legacy.no_vga = 1; } #ifdef CONFIG_X86_PM_TIMER /* detect the location of the ACPI PM Timer */ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) { /* FADT rev. 2 */ if (acpi_gbl_FADT.xpm_timer_block.space_id != ACPI_ADR_SPACE_SYSTEM_IO) return 0; pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address; /* * "X" fields are optional extensions to the original V1.0 * fields, so we must selectively expand V1.0 fields if the * corresponding X field is zero. */ if (!pmtmr_ioport) pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; } else { /* FADT rev. 1 */ pmtmr_ioport = acpi_gbl_FADT.pm_timer_block; } if (pmtmr_ioport) pr_info("PM-Timer IO Port: %#x\n", pmtmr_ioport); #endif return 0; } #ifdef CONFIG_X86_LOCAL_APIC /* * Parse LAPIC entries in MADT * returns 0 on success, < 0 on error */ static int __init early_acpi_parse_madt_lapic_addr_ovr(void) { int count; if (!boot_cpu_has(X86_FEATURE_APIC)) return -ENODEV; /* * Note that the LAPIC address is obtained from the MADT (32-bit value) * and (optionally) overridden by a LAPIC_ADDR_OVR entry (64-bit value). */ count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0); if (count < 0) { pr_err("Error parsing LAPIC address override entry\n"); return count; } register_lapic_address(acpi_lapic_addr); return count; } static int __init acpi_parse_madt_lapic_entries(void) { int count; int x2count = 0; int ret; struct acpi_subtable_proc madt_proc[2]; if (!boot_cpu_has(X86_FEATURE_APIC)) return -ENODEV; count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_sapic, MAX_LOCAL_APIC); if (!count) { memset(madt_proc, 0, sizeof(madt_proc)); madt_proc[0].id = ACPI_MADT_TYPE_LOCAL_APIC; madt_proc[0].handler = acpi_parse_lapic; madt_proc[1].id = ACPI_MADT_TYPE_LOCAL_X2APIC; madt_proc[1].handler = acpi_parse_x2apic; ret = acpi_table_parse_entries_array(ACPI_SIG_MADT, sizeof(struct acpi_table_madt), madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC); if (ret < 0) { pr_err("Error parsing LAPIC/X2APIC entries\n"); return ret; } count = madt_proc[0].count; x2count = madt_proc[1].count; } if (!count && !x2count) { pr_err("No LAPIC entries present\n"); /* TBD: Cleanup to allow fallback to MPS */ return -ENODEV; } else if (count < 0 || x2count < 0) { pr_err("Error parsing LAPIC entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI, acpi_parse_x2apic_nmi, 0); count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0); if (count < 0 || x2count < 0) { pr_err("Error parsing LAPIC NMI entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } return 0; } #ifdef CONFIG_X86_64 static int __init acpi_parse_mp_wake(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_multiproc_wakeup *mp_wake; if (!IS_ENABLED(CONFIG_SMP)) return -ENODEV; mp_wake = (struct acpi_madt_multiproc_wakeup *)header; if (BAD_MADT_ENTRY(mp_wake, end)) return -EINVAL; acpi_table_print_madt_entry(&header->common); acpi_mp_wake_mailbox_paddr = mp_wake->base_address; apic_update_callback(wakeup_secondary_cpu_64, acpi_wakeup_cpu); return 0; } #endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_IO_APIC static void __init mp_config_acpi_legacy_irqs(void) { int i; struct mpc_intsrc mp_irq; #ifdef CONFIG_EISA /* * Fabricate the legacy ISA bus (bus #31). */ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; #endif set_bit(MP_ISA_BUS, mp_bus_not_pci); pr_debug("Bus #%d is ISA (nIRQs: %d)\n", MP_ISA_BUS, nr_legacy_irqs()); /* * Use the default configuration for the IRQs 0-15. Unless * overridden by (MADT) interrupt source override entries. */ for (i = 0; i < nr_legacy_irqs(); i++) { int ioapic, pin; unsigned int dstapic; int idx; u32 gsi; /* Locate the gsi that irq i maps to. */ if (acpi_isa_irq_to_gsi(i, &gsi)) continue; /* * Locate the IOAPIC that manages the ISA IRQ. */ ioapic = mp_find_ioapic(gsi); if (ioapic < 0) continue; pin = mp_find_ioapic_pin(ioapic, gsi); dstapic = mpc_ioapic_id(ioapic); for (idx = 0; idx < mp_irq_entries; idx++) { struct mpc_intsrc *irq = mp_irqs + idx; /* Do we already have a mapping for this ISA IRQ? */ if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i) break; /* Do we already have a mapping for this IOAPIC pin */ if (irq->dstapic == dstapic && irq->dstirq == pin) break; } if (idx != mp_irq_entries) { pr_debug("ACPI: IRQ%d used by override.\n", i); continue; /* IRQ already used */ } mp_irq.type = MP_INTSRC; mp_irq.irqflag = 0; /* Conforming */ mp_irq.srcbus = MP_ISA_BUS; mp_irq.dstapic = dstapic; mp_irq.irqtype = mp_INT; mp_irq.srcbusirq = i; /* Identity mapped */ mp_irq.dstirq = pin; mp_save_irq(&mp_irq); } } /* * Parse IOAPIC related entries in MADT * returns 0 on success, < 0 on error */ static int __init acpi_parse_madt_ioapic_entries(void) { int count; /* * ACPI interpreter is required to complete interrupt setup, * so if it is off, don't enumerate the io-apics with ACPI. * If MPS is present, it will handle them, * otherwise the system will stay in PIC mode */ if (acpi_disabled || acpi_noirq) return -ENODEV; if (!boot_cpu_has(X86_FEATURE_APIC)) return -ENODEV; /* * if "noapic" boot option, don't look for IO-APICs */ if (ioapic_is_disabled) { pr_info("Skipping IOAPIC probe due to 'noapic' option.\n"); return -ENODEV; } count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic, MAX_IO_APICS); if (!count) { pr_err("No IOAPIC entries present\n"); return -ENODEV; } else if (count < 0) { pr_err("Error parsing IOAPIC entry\n"); return count; } count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, nr_irqs); if (count < 0) { pr_err("Error parsing interrupt source overrides entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } /* * If BIOS did not supply an INT_SRC_OVR for the SCI * pretend we got one so we can set the SCI flags. * But ignore setting up SCI on hardware reduced platforms. */ if (acpi_sci_override_gsi == INVALID_ACPI_IRQ && !acpi_gbl_reduced_hardware) acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0, acpi_gbl_FADT.sci_interrupt); /* Fill in identity legacy mappings where no override */ mp_config_acpi_legacy_irqs(); count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, nr_irqs); if (count < 0) { pr_err("Error parsing NMI SRC entry\n"); /* TBD: Cleanup to allow fallback to MPS */ return count; } return 0; } #else static inline int acpi_parse_madt_ioapic_entries(void) { return -1; } #endif /* !CONFIG_X86_IO_APIC */ static void __init early_acpi_process_madt(void) { #ifdef CONFIG_X86_LOCAL_APIC int error; if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { /* * Parse MADT LAPIC entries */ error = early_acpi_parse_madt_lapic_addr_ovr(); if (!error) { acpi_lapic = 1; smp_found_config = 1; } if (error == -EINVAL) { /* * Dell Precision Workstation 410, 610 come here. */ pr_err("Invalid BIOS MADT, disabling ACPI\n"); disable_acpi(); } } #endif } static void __init acpi_process_madt(void) { #ifdef CONFIG_X86_LOCAL_APIC int error; if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { /* * Parse MADT LAPIC entries */ error = acpi_parse_madt_lapic_entries(); if (!error) { acpi_lapic = 1; /* * Parse MADT IO-APIC entries */ mutex_lock(&acpi_ioapic_lock); error = acpi_parse_madt_ioapic_entries(); mutex_unlock(&acpi_ioapic_lock); if (!error) { acpi_set_irq_model_ioapic(); smp_found_config = 1; } #ifdef CONFIG_X86_64 /* * Parse MADT MP Wake entry. */ acpi_table_parse_madt(ACPI_MADT_TYPE_MULTIPROC_WAKEUP, acpi_parse_mp_wake, 1); #endif } if (error == -EINVAL) { /* * Dell Precision Workstation 410, 610 come here. */ pr_err("Invalid BIOS MADT, disabling ACPI\n"); disable_acpi(); } } else { /* * ACPI found no MADT, and so ACPI wants UP PIC mode. * In the event an MPS table was found, forget it. * Boot with "acpi=off" to use MPS on such a system. */ if (smp_found_config) { pr_warn("No APIC-table, disabling MPS\n"); smp_found_config = 0; } } /* * ACPI supports both logical (e.g. Hyper-Threading) and physical * processors, where MPS only supports physical. */ if (acpi_lapic && acpi_ioapic) pr_info("Using ACPI (MADT) for SMP configuration information\n"); else if (acpi_lapic) pr_info("Using ACPI for processor (LAPIC) configuration information\n"); #endif return; } static int __init disable_acpi_irq(const struct dmi_system_id *d) { if (!acpi_force) { pr_notice("%s detected: force use of acpi=noirq\n", d->ident); acpi_noirq_set(); } return 0; } static int __init disable_acpi_pci(const struct dmi_system_id *d) { if (!acpi_force) { pr_notice("%s detected: force use of pci=noacpi\n", d->ident); acpi_disable_pci(); } return 0; } static int __init disable_acpi_xsdt(const struct dmi_system_id *d) { if (!acpi_force) { pr_notice("%s detected: force use of acpi=rsdt\n", d->ident); acpi_gbl_do_not_use_xsdt = TRUE; } else { pr_notice("Warning: DMI blacklist says broken, but acpi XSDT forced\n"); } return 0; } static int __init dmi_disable_acpi(const struct dmi_system_id *d) { if (!acpi_force) { pr_notice("%s detected: acpi off\n", d->ident); disable_acpi(); } else { pr_notice("Warning: DMI blacklist says broken, but acpi forced\n"); } return 0; } /* * Force ignoring BIOS IRQ0 override */ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) { if (!acpi_skip_timer_override) { pr_notice("%s detected: Ignoring BIOS IRQ0 override\n", d->ident); acpi_skip_timer_override = 1; } return 0; } /* * ACPI offers an alternative platform interface model that removes * ACPI hardware requirements for platforms that do not implement * the PC Architecture. * * We initialize the Hardware-reduced ACPI model here: */ void __init acpi_generic_reduced_hw_init(void) { /* * Override x86_init functions and bypass legacy PIC in * hardware reduced ACPI mode. */ x86_init.timers.timer_init = x86_init_noop; x86_init.irqs.pre_vector_init = x86_init_noop; legacy_pic = &null_legacy_pic; } static void __init acpi_reduced_hw_init(void) { if (acpi_gbl_reduced_hardware) x86_init.acpi.reduced_hw_early_init(); } /* * If your system is blacklisted here, but you find that acpi=force * works for you, please contact [email protected] */ static const struct dmi_system_id acpi_dmi_table[] __initconst = { /* * Boxes that need ACPI disabled */ { .callback = dmi_disable_acpi, .ident = "IBM Thinkpad", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), DMI_MATCH(DMI_BOARD_NAME, "2629H1G"), }, }, /* * Boxes that need ACPI PCI IRQ routing disabled */ { .callback = disable_acpi_irq, .ident = "ASUS A7V", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"), DMI_MATCH(DMI_BOARD_NAME, "<A7V>"), /* newer BIOS, Revision 1011, does work */ DMI_MATCH(DMI_BIOS_VERSION, "ASUS A7V ACPI BIOS Revision 1007"), }, }, { /* * Latest BIOS for IBM 600E (1.16) has bad pcinum * for LPC bridge, which is needed for the PCI * interrupt links to work. DSDT fix is in bug 5966. * 2645, 2646 model numbers are shared with 600/600E/600X */ .callback = disable_acpi_irq, .ident = "IBM Thinkpad 600 Series 2645", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), DMI_MATCH(DMI_BOARD_NAME, "2645"), }, }, { .callback = disable_acpi_irq, .ident = "IBM Thinkpad 600 Series 2646", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), DMI_MATCH(DMI_BOARD_NAME, "2646"), }, }, /* * Boxes that need ACPI PCI IRQ routing and PCI scan disabled */ { /* _BBN 0 bug */ .callback = disable_acpi_pci, .ident = "ASUS PR-DLS", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"), DMI_MATCH(DMI_BIOS_VERSION, "ASUS PR-DLS ACPI BIOS Revision 1010"), DMI_MATCH(DMI_BIOS_DATE, "03/21/2003") }, }, { .callback = disable_acpi_pci, .ident = "Acer TravelMate 36x Laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, /* * Boxes that need ACPI XSDT use disabled due to corrupted tables */ { .callback = disable_acpi_xsdt, .ident = "Advantech DAC-BJ01", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "NEC"), DMI_MATCH(DMI_PRODUCT_NAME, "Bearlake CRB Board"), DMI_MATCH(DMI_BIOS_VERSION, "V1.12"), DMI_MATCH(DMI_BIOS_DATE, "02/01/2011"), }, }, {} }; /* second table for DMI checks that should run after early-quirks */ static const struct dmi_system_id acpi_dmi_table_late[] __initconst = { /* * HP laptops which use a DSDT reporting as HP/SB400/10000, * which includes some code which overrides all temperature * trip points to 16C if the INTIN2 input of the I/O APIC * is enabled. This input is incorrectly designated the * ISA IRQ 0 via an interrupt source override even though * it is wired to the output of the master 8259A and INTIN0 * is not connected at all. Force ignoring BIOS IRQ0 * override in that cases. */ { .callback = dmi_ignore_irq0_timer_override, .ident = "HP nx6115 laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "HP NX6125 laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6125"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "HP NX6325 laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "HP 6715b laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"), }, }, { .callback = dmi_ignore_irq0_timer_override, .ident = "FUJITSU SIEMENS", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"), }, }, {} }; /* * acpi_boot_table_init() and acpi_boot_init() * called from setup_arch(), always. * 1. checksums all tables * 2. enumerates lapics * 3. enumerates io-apics * * acpi_table_init() is separate to allow reading SRAT without * other side effects. * * side effects of acpi_boot_init: * acpi_lapic = 1 if LAPIC found * acpi_ioapic = 1 if IOAPIC found * if (acpi_lapic && acpi_ioapic) smp_found_config = 1; * if acpi_blacklisted() acpi_disabled = 1; * acpi_irq_model=... * ... */ void __init acpi_boot_table_init(void) { dmi_check_system(acpi_dmi_table); /* * If acpi_disabled, bail out */ if (acpi_disabled) return; /* * Initialize the ACPI boot-time table parser. */ if (acpi_locate_initial_tables()) disable_acpi(); else acpi_reserve_initial_tables(); } int __init early_acpi_boot_init(void) { if (acpi_disabled) return 1; acpi_table_init_complete(); acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); /* * blacklist may disable ACPI entirely */ if (acpi_blacklisted()) { if (acpi_force) { pr_warn("acpi=force override\n"); } else { pr_warn("Disabling ACPI support\n"); disable_acpi(); return 1; } } /* * Process the Multiple APIC Description Table (MADT), if present */ early_acpi_process_madt(); /* * Hardware-reduced ACPI mode initialization: */ acpi_reduced_hw_init(); return 0; } int __init acpi_boot_init(void) { /* those are executed after early-quirks are executed */ dmi_check_system(acpi_dmi_table_late); /* * If acpi_disabled, bail out */ if (acpi_disabled) return 1; acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); /* * set sci_int and PM timer address */ acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt); /* * Process the Multiple APIC Description Table (MADT), if present */ acpi_process_madt(); acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet); if (IS_ENABLED(CONFIG_ACPI_BGRT) && !acpi_nobgrt) acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt); if (!acpi_noirq) x86_init.pci.init = pci_acpi_init; /* Do not enable ACPI SPCR console by default */ acpi_parse_spcr(earlycon_acpi_spcr_enable, false); return 0; } static int __init parse_acpi(char *arg) { if (!arg) return -EINVAL; /* "acpi=off" disables both ACPI table parsing and interpreter */ if (strcmp(arg, "off") == 0) { disable_acpi(); } /* acpi=force to over-ride black-list */ else if (strcmp(arg, "force") == 0) { acpi_force = 1; acpi_disabled = 0; } /* acpi=strict disables out-of-spec workarounds */ else if (strcmp(arg, "strict") == 0) { acpi_strict = 1; } /* acpi=rsdt use RSDT instead of XSDT */ else if (strcmp(arg, "rsdt") == 0) { acpi_gbl_do_not_use_xsdt = TRUE; } /* "acpi=noirq" disables ACPI interrupt routing */ else if (strcmp(arg, "noirq") == 0) { acpi_noirq_set(); } /* "acpi=copy_dsdt" copies DSDT */ else if (strcmp(arg, "copy_dsdt") == 0) { acpi_gbl_copy_dsdt_locally = 1; } /* "acpi=nocmcff" disables FF mode for corrected errors */ else if (strcmp(arg, "nocmcff") == 0) { acpi_disable_cmcff = 1; } else { /* Core will printk when we return error. */ return -EINVAL; } return 0; } early_param("acpi", parse_acpi); static int __init parse_acpi_bgrt(char *arg) { acpi_nobgrt = true; return 0; } early_param("bgrt_disable", parse_acpi_bgrt); /* FIXME: Using pci= for an ACPI parameter is a travesty. */ static int __init parse_pci(char *arg) { if (arg && strcmp(arg, "noacpi") == 0) acpi_disable_pci(); return 0; } early_param("pci", parse_pci); int __init acpi_mps_check(void) { #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE) /* mptable code is not built-in*/ if (acpi_disabled || acpi_noirq) { pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n"); return 1; } #endif return 0; } #ifdef CONFIG_X86_IO_APIC static int __init parse_acpi_skip_timer_override(char *arg) { acpi_skip_timer_override = 1; return 0; } early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); static int __init parse_acpi_use_timer_override(char *arg) { acpi_use_timer_override = 1; return 0; } early_param("acpi_use_timer_override", parse_acpi_use_timer_override); #endif /* CONFIG_X86_IO_APIC */ static int __init setup_acpi_sci(char *s) { if (!s) return -EINVAL; if (!strcmp(s, "edge")) acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE | (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); else if (!strcmp(s, "level")) acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL | (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK); else if (!strcmp(s, "high")) acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH | (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); else if (!strcmp(s, "low")) acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW | (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK); else return -EINVAL; return 0; } early_param("acpi_sci", setup_acpi_sci); int __acpi_acquire_global_lock(unsigned int *lock) { unsigned int old, new, val; old = READ_ONCE(*lock); do { val = (old >> 1) & 0x1; new = (old & ~0x3) + 2 + val; } while (!try_cmpxchg(lock, &old, new)); if (val) return 0; return -1; } int __acpi_release_global_lock(unsigned int *lock) { unsigned int old, new; old = READ_ONCE(*lock); do { new = old & ~0x3; } while (!try_cmpxchg(lock, &old, new)); return old & 0x1; } void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) { e820__range_add(addr, size, E820_TYPE_NVS); e820__update_table_print(); } void x86_default_set_root_pointer(u64 addr) { boot_params.acpi_rsdp_addr = addr; } u64 x86_default_get_root_pointer(void) { return boot_params.acpi_rsdp_addr; }
linux-master
arch/x86/kernel/acpi/boot.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/bitops.h> #include <linux/delay.h> #include <linux/isa-dma.h> #include <linux/pci.h> #include <asm/dma.h> #include <linux/io.h> #include <asm/processor-cyrix.h> #include <asm/processor-flags.h> #include <linux/timer.h> #include <asm/pci-direct.h> #include <asm/tsc.h> #include <asm/cpufeature.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include "cpu.h" /* * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU */ static void __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) { unsigned char ccr2, ccr3; /* we test for DEVID by checking whether CCR3 is writable */ ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, ccr3 ^ 0x80); getCx86(0xc0); /* dummy to change bus */ if (getCx86(CX86_CCR3) == ccr3) { /* no DEVID regs. */ ccr2 = getCx86(CX86_CCR2); setCx86(CX86_CCR2, ccr2 ^ 0x04); getCx86(0xc0); /* dummy */ if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */ *dir0 = 0xfd; else { /* Cx486S A step */ setCx86(CX86_CCR2, ccr2); *dir0 = 0xfe; } } else { setCx86(CX86_CCR3, ccr3); /* restore CCR3 */ /* read DIR0 and DIR1 CPU registers */ *dir0 = getCx86(CX86_DIR0); *dir1 = getCx86(CX86_DIR1); } } static void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) { unsigned long flags; local_irq_save(flags); __do_cyrix_devid(dir0, dir1); local_irq_restore(flags); } /* * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in * order to identify the Cyrix CPU model after we're out of setup.c * * Actually since bugs.h doesn't even reference this perhaps someone should * fix the documentation ??? */ static unsigned char Cx86_dir0_msb = 0; static const char Cx86_model[][9] = { "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", "M II ", "Unknown" }; static const char Cx486_name[][5] = { "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", "SRx2", "DRx2" }; static const char Cx486S_name[][4] = { "S", "S2", "Se", "S2e" }; static const char Cx486D_name[][4] = { "DX", "DX2", "?", "?", "?", "DX4" }; static char Cx86_cb[] = "?.5x Core/Bus Clock"; static const char cyrix_model_mult1[] = "12??43"; static const char cyrix_model_mult2[] = "12233445"; /* * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old * BIOSes for compatibility with DOS games. This makes the udelay loop * work correctly, and improves performance. * * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP */ static void check_cx686_slop(struct cpuinfo_x86 *c) { unsigned long flags; if (Cx86_dir0_msb == 3) { unsigned char ccr3, ccr5; local_irq_save(flags); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ ccr5 = getCx86(CX86_CCR5); if (ccr5 & 2) setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */ setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ local_irq_restore(flags); if (ccr5 & 2) { /* possible wrong calibration done */ pr_info("Recalibrating delay loop with SLOP bit reset\n"); calibrate_delay(); c->loops_per_jiffy = loops_per_jiffy; } } } static void set_cx86_reorder(void) { u8 ccr3; pr_info("Enable Memory access reorder on Cyrix/NSC processor.\n"); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ /* Load/Store Serialize to mem access disable (=reorder it) */ setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80); /* set load/store serialize from 1GB to 4GB */ ccr3 |= 0xe0; setCx86(CX86_CCR3, ccr3); } static void set_cx86_memwb(void) { pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); /* CCR2 bit 2: unlock NW bit */ setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); /* set 'Not Write-through' */ write_cr0(read_cr0() | X86_CR0_NW); /* CCR2 bit 2: lock NW bit and set WT1 */ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); } /* * Configure later MediaGX and/or Geode processor. */ static void geode_configure(void) { unsigned long flags; u8 ccr3; local_irq_save(flags); /* Suspend on halt power saving and enable #SUSP pin */ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ /* FPU fast, DTE cache, Mem bypass */ setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ set_cx86_memwb(); set_cx86_reorder(); local_irq_restore(flags); } static void early_init_cyrix(struct cpuinfo_x86 *c) { unsigned char dir0, dir0_msn, dir1 = 0; __do_cyrix_devid(&dir0, &dir1); dir0_msn = dir0 >> 4; /* identifies CPU "family" */ switch (dir0_msn) { case 3: /* 6x86/6x86L */ /* Emulate MTRRs using Cyrix's ARRs. */ set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); break; case 5: /* 6x86MX/M II */ /* Emulate MTRRs using Cyrix's ARRs. */ set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); break; } } static void init_cyrix(struct cpuinfo_x86 *c) { unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; char *buf = c->x86_model_id; const char *p = NULL; /* * Bit 31 in normal CPUID used for nonstandard 3DNow ID; * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ clear_cpu_cap(c, 0*32+31); /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */ if (test_cpu_cap(c, 1*32+24)) { clear_cpu_cap(c, 1*32+24); set_cpu_cap(c, X86_FEATURE_CXMMX); } do_cyrix_devid(&dir0, &dir1); check_cx686_slop(c); Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family" */ dir0_lsn = dir0 & 0xf; /* model or clock multiplier */ /* common case step number/rev -- exceptions handled below */ c->x86_model = (dir1 >> 4) + 1; c->x86_stepping = dir1 & 0xf; /* Now cook; the original recipe is by Channing Corn, from Cyrix. * We do the same thing for each generation: we work out * the model, multiplier and stepping. Black magic included, * to make the silicon step/rev numbers match the printed ones. */ switch (dir0_msn) { unsigned char tmp; case 0: /* Cx486SLC/DLC/SRx/DRx */ p = Cx486_name[dir0_lsn & 7]; break; case 1: /* Cx486S/DX/DX2/DX4 */ p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5] : Cx486S_name[dir0_lsn & 3]; break; case 2: /* 5x86 */ Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5]; p = Cx86_cb+2; break; case 3: /* 6x86/6x86L */ Cx86_cb[1] = ' '; Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5]; if (dir1 > 0x21) { /* 686L */ Cx86_cb[0] = 'L'; p = Cx86_cb; (c->x86_model)++; } else /* 686 */ p = Cx86_cb+1; /* Emulate MTRRs using Cyrix's ARRs. */ set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); /* 6x86's contain this bug */ set_cpu_bug(c, X86_BUG_COMA); break; case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */ case 11: /* GX1 with inverted Device ID */ #ifdef CONFIG_PCI { u32 vendor, device; /* * It isn't really a PCI quirk directly, but the cure is the * same. The MediaGX has deep magic SMM stuff that handles the * SB emulation. It throws away the fifo on disable_dma() which * is wrong and ruins the audio. * * Bug2: VSA1 has a wrap bug so that using maximum sized DMA * causes bad things. According to NatSemi VSA2 has another * bug to do with 'hlt'. I've not seen any boards using VSA2 * and X doesn't seem to support it either so who cares 8). * VSA1 we work around however. */ pr_info("Working around Cyrix MediaGX virtual DMA bugs.\n"); isa_dma_bridge_buggy = 2; /* We do this before the PCI layer is running. However we are safe here as we know the bridge must be a Cyrix companion and must be present */ vendor = read_pci_config_16(0, 0, 0x12, PCI_VENDOR_ID); device = read_pci_config_16(0, 0, 0x12, PCI_DEVICE_ID); /* * The 5510/5520 companion chips have a funky PIT. */ if (vendor == PCI_VENDOR_ID_CYRIX && (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520)) mark_tsc_unstable("cyrix 5510/5520 detected"); } #endif c->x86_cache_size = 16; /* Yep 16K integrated cache that's it */ /* GXm supports extended cpuid levels 'ala' AMD */ if (c->cpuid_level == 2) { /* Enable cxMMX extensions (GX1 Datasheet 54) */ setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); /* * GXm : 0x30 ... 0x5f GXm datasheet 51 * GXlv: 0x6x GXlv datasheet 54 * ? : 0x7x * GX1 : 0x8x GX1 datasheet 56 */ if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) geode_configure(); return; } else { /* MediaGX */ Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; p = Cx86_cb+2; c->x86_model = (dir1 & 0x20) ? 1 : 2; } break; case 5: /* 6x86MX/M II */ if (dir1 > 7) { dir0_msn++; /* M II */ /* Enable MMX extensions (App note 108) */ setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); } else { /* A 6x86MX - it has the bug. */ set_cpu_bug(c, X86_BUG_COMA); } tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; p = Cx86_cb+tmp; if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20)) (c->x86_model)++; /* Emulate MTRRs using Cyrix's ARRs. */ set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); break; case 0xf: /* Cyrix 486 without DEVID registers */ switch (dir0_lsn) { case 0xd: /* either a 486SLC or DLC w/o DEVID */ dir0_msn = 0; p = Cx486_name[!!boot_cpu_has(X86_FEATURE_FPU)]; break; case 0xe: /* a 486S A step */ dir0_msn = 0; p = Cx486S_name[0]; break; } break; default: /* unknown (shouldn't happen, we know everyone ;-) */ dir0_msn = 7; break; } strcpy(buf, Cx86_model[dir0_msn & 7]); if (p) strcat(buf, p); return; } /* * Handle National Semiconductor branded processors */ static void init_nsc(struct cpuinfo_x86 *c) { /* * There may be GX1 processors in the wild that are branded * NSC and not Cyrix. * * This function only handles the GX processor, and kicks every * thing else to the Cyrix init function above - that should * cover any processors that might have been branded differently * after NSC acquired Cyrix. * * If this breaks your GX1 horribly, please e-mail * [email protected] to tell us. */ /* Handle the GX (Formally known as the GX2) */ if (c->x86 == 5 && c->x86_model == 5) cpu_detect_cache_sizes(c); else init_cyrix(c); } /* * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected * by the fact that they preserve the flags across the division of 5/2. * PII and PPro exhibit this behavior too, but they have cpuid available. */ /* * Perform the Cyrix 5/2 test. A Cyrix won't change * the flags, while other 486 chips will. */ static inline int test_cyrix_52div(void) { unsigned int test; __asm__ __volatile__( "sahf\n\t" /* clear flags (%eax = 0x0005) */ "div %b2\n\t" /* divide 5 by 2 */ "lahf" /* store flags into %ah */ : "=a" (test) : "0" (5), "q" (2) : "cc"); /* AH is 0x02 on Cyrix after the divide.. */ return (unsigned char) (test >> 8) == 0x02; } static void cyrix_identify(struct cpuinfo_x86 *c) { /* Detect Cyrix with disabled CPUID */ if (c->x86 == 4 && test_cyrix_52div()) { unsigned char dir0, dir1; strcpy(c->x86_vendor_id, "CyrixInstead"); c->x86_vendor = X86_VENDOR_CYRIX; /* Actually enable cpuid on the older cyrix */ /* Retrieve CPU revisions */ do_cyrix_devid(&dir0, &dir1); dir0 >>= 4; /* Check it is an affected model */ if (dir0 == 5 || dir0 == 3) { unsigned char ccr3; unsigned long flags; pr_info("Enabling CPUID on Cyrix processor.\n"); local_irq_save(flags); ccr3 = getCx86(CX86_CCR3); /* enable MAPEN */ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable cpuid */ setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* disable MAPEN */ setCx86(CX86_CCR3, ccr3); local_irq_restore(flags); } } } static const struct cpu_dev cyrix_cpu_dev = { .c_vendor = "Cyrix", .c_ident = { "CyrixInstead" }, .c_early_init = early_init_cyrix, .c_init = init_cyrix, .c_identify = cyrix_identify, .c_x86_vendor = X86_VENDOR_CYRIX, }; cpu_dev_register(cyrix_cpu_dev); static const struct cpu_dev nsc_cpu_dev = { .c_vendor = "NSC", .c_ident = { "Geode by NSC" }, .c_init = init_nsc, .c_x86_vendor = X86_VENDOR_NSC, }; cpu_dev_register(nsc_cpu_dev);
linux-master
arch/x86/kernel/cpu/cyrix.c
// SPDX-License-Identifier: GPL-2.0 #include <asm/cpu_device_id.h> #include <asm/cpufeature.h> #include <linux/cpu.h> #include <linux/export.h> #include <linux/slab.h> /** * x86_match_cpu - match current CPU again an array of x86_cpu_ids * @match: Pointer to array of x86_cpu_ids. Last entry terminated with * {}. * * Return the entry if the current CPU matches the entries in the * passed x86_cpu_id match table. Otherwise NULL. The match table * contains vendor (X86_VENDOR_*), family, model and feature bits or * respective wildcard entries. * * A typical table entry would be to match a specific CPU * * X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_BROADWELL, * X86_FEATURE_ANY, NULL); * * Fields can be wildcarded with %X86_VENDOR_ANY, %X86_FAMILY_ANY, * %X86_MODEL_ANY, %X86_FEATURE_ANY (except for vendor) * * asm/cpu_device_id.h contains a set of useful macros which are shortcuts * for various common selections. The above can be shortened to: * * X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, NULL); * * Arrays used to match for this should also be declared using * MODULE_DEVICE_TABLE(x86cpu, ...) * * This always matches against the boot cpu, assuming models and features are * consistent over all CPUs. */ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match) { const struct x86_cpu_id *m; struct cpuinfo_x86 *c = &boot_cpu_data; for (m = match; m->vendor | m->family | m->model | m->steppings | m->feature; m++) { if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor) continue; if (m->family != X86_FAMILY_ANY && c->x86 != m->family) continue; if (m->model != X86_MODEL_ANY && c->x86_model != m->model) continue; if (m->steppings != X86_STEPPING_ANY && !(BIT(c->x86_stepping) & m->steppings)) continue; if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature)) continue; return m; } return NULL; } EXPORT_SYMBOL(x86_match_cpu); static const struct x86_cpu_desc * x86_match_cpu_with_stepping(const struct x86_cpu_desc *match) { struct cpuinfo_x86 *c = &boot_cpu_data; const struct x86_cpu_desc *m; for (m = match; m->x86_family | m->x86_model; m++) { if (c->x86_vendor != m->x86_vendor) continue; if (c->x86 != m->x86_family) continue; if (c->x86_model != m->x86_model) continue; if (c->x86_stepping != m->x86_stepping) continue; return m; } return NULL; } bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table) { const struct x86_cpu_desc *res = x86_match_cpu_with_stepping(table); if (!res || res->x86_microcode_rev > boot_cpu_data.microcode) return false; return true; } EXPORT_SYMBOL_GPL(x86_cpu_has_min_microcode_rev);
linux-master
arch/x86/kernel/cpu/match.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/pgtable.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/semaphore.h> #include <linux/thread_info.h> #include <linux/init.h> #include <linux/uaccess.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/cpuhotplug.h> #include <asm/cpufeature.h> #include <asm/msr.h> #include <asm/bugs.h> #include <asm/cpu.h> #include <asm/intel-family.h> #include <asm/microcode.h> #include <asm/hwcap2.h> #include <asm/elf.h> #include <asm/cpu_device_id.h> #include <asm/cmdline.h> #include <asm/traps.h> #include <asm/resctrl.h> #include <asm/numa.h> #include <asm/thermal.h> #ifdef CONFIG_X86_64 #include <linux/topology.h> #endif #include "cpu.h" #ifdef CONFIG_X86_LOCAL_APIC #include <asm/mpspec.h> #include <asm/apic.h> #endif enum split_lock_detect_state { sld_off = 0, sld_warn, sld_fatal, sld_ratelimit, }; /* * Default to sld_off because most systems do not support split lock detection. * sld_state_setup() will switch this to sld_warn on systems that support * split lock/bus lock detect, unless there is a command line override. */ static enum split_lock_detect_state sld_state __ro_after_init = sld_off; static u64 msr_test_ctrl_cache __ro_after_init; /* * With a name like MSR_TEST_CTL it should go without saying, but don't touch * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it * on CPUs that do not support SLD can cause fireworks, even when writing '0'. */ static bool cpu_model_supports_sld __ro_after_init; /* * Processors which have self-snooping capability can handle conflicting * memory type across CPUs by snooping its own cache. However, there exists * CPU models in which having conflicting memory types still leads to * unpredictable behavior, machine check errors, or hangs. Clear this * feature to prevent its use on machines with known erratas. */ static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c) { switch (c->x86_model) { case INTEL_FAM6_CORE_YONAH: case INTEL_FAM6_CORE2_MEROM: case INTEL_FAM6_CORE2_MEROM_L: case INTEL_FAM6_CORE2_PENRYN: case INTEL_FAM6_CORE2_DUNNINGTON: case INTEL_FAM6_NEHALEM: case INTEL_FAM6_NEHALEM_G: case INTEL_FAM6_NEHALEM_EP: case INTEL_FAM6_NEHALEM_EX: case INTEL_FAM6_WESTMERE: case INTEL_FAM6_WESTMERE_EP: case INTEL_FAM6_SANDYBRIDGE: setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP); } } static bool ring3mwait_disabled __read_mostly; static int __init ring3mwait_disable(char *__unused) { ring3mwait_disabled = true; return 1; } __setup("ring3mwait=disable", ring3mwait_disable); static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) { /* * Ring 3 MONITOR/MWAIT feature cannot be detected without * cpu model and family comparison. */ if (c->x86 != 6) return; switch (c->x86_model) { case INTEL_FAM6_XEON_PHI_KNL: case INTEL_FAM6_XEON_PHI_KNM: break; default: return; } if (ring3mwait_disabled) return; set_cpu_cap(c, X86_FEATURE_RING3MWAIT); this_cpu_or(msr_misc_features_shadow, 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT); if (c == &boot_cpu_data) ELF_HWCAP2 |= HWCAP2_RING3MWAIT; } /* * Early microcode releases for the Spectre v2 mitigation were broken. * Information taken from; * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf * - https://kb.vmware.com/s/article/52345 * - Microcode revisions observed in the wild * - Release note from 20180108 microcode release */ struct sku_microcode { u8 model; u8 stepping; u32 microcode; }; static const struct sku_microcode spectre_bad_microcodes[] = { { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 }, { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 }, { INTEL_FAM6_KABYLAKE, 0x09, 0x80 }, { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 }, { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 }, { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, { INTEL_FAM6_BROADWELL, 0x04, 0x28 }, { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b }, { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 }, { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 }, { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, { INTEL_FAM6_HASWELL_L, 0x01, 0x21 }, { INTEL_FAM6_HASWELL_G, 0x01, 0x18 }, { INTEL_FAM6_HASWELL, 0x03, 0x23 }, { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, /* Observed in the wild */ { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, }; static bool bad_spectre_microcode(struct cpuinfo_x86 *c) { int i; /* * We know that the hypervisor lie to us on the microcode version so * we may as well hope that it is running the correct version. */ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) return false; if (c->x86 != 6) return false; for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { if (c->x86_model == spectre_bad_microcodes[i].model && c->x86_stepping == spectre_bad_microcodes[i].stepping) return (c->microcode <= spectre_bad_microcodes[i].microcode); } return false; } static void early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable; /* Unmask CPUID levels if masked: */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) { c->cpuid_level = cpuid_eax(0); get_cpu_cap(c); } } if ((c->x86 == 0xf && c->x86_model >= 0x03) || (c->x86 == 0x6 && c->x86_model >= 0x0e)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) c->microcode = intel_get_microcode_revision(); /* Now if any of them are set, check the blacklist and clear the lot */ if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) || cpu_has(c, X86_FEATURE_INTEL_STIBP) || cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) || cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) { pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n"); setup_clear_cpu_cap(X86_FEATURE_IBRS); setup_clear_cpu_cap(X86_FEATURE_IBPB); setup_clear_cpu_cap(X86_FEATURE_STIBP); setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL); setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); setup_clear_cpu_cap(X86_FEATURE_SSBD); setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD); } /* * Atom erratum AAE44/AAF40/AAG38/AAH41: * * A race condition between speculative fetches and invalidating * a large page. This is worked around in microcode, but we * need the microcode to have already been loaded... so if it is * not, recommend a BIOS update and disable large pages. */ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 && c->microcode < 0x20e) { pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); clear_cpu_cap(c, X86_FEATURE_PSE); } #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #else /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ if (c->x86 == 15 && c->x86_cache_alignment == 64) c->x86_cache_alignment = 128; #endif /* CPUID workaround for 0F33/0F34 CPU */ if (c->x86 == 0xF && c->x86_model == 0x3 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4)) c->x86_phys_bits = 36; /* * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate * with P/T states and does not stop in deep C-states. * * It is also reliable across cores and sockets. (but not across * cabinets - we turn it off in that case explicitly.) */ if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ if (c->x86 == 6) { switch (c->x86_model) { case INTEL_FAM6_ATOM_SALTWELL_MID: case INTEL_FAM6_ATOM_SALTWELL_TABLET: case INTEL_FAM6_ATOM_SILVERMONT_MID: case INTEL_FAM6_ATOM_AIRMONT_NP: set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); break; default: break; } } /* * There is a known erratum on Pentium III and Core Solo * and Core Duo CPUs. * " Page with PAT set to WC while associated MTRR is UC * may consolidate to UC " * Because of this erratum, it is better to stick with * setting WC in MTRR rather than using PAT on these CPUs. * * Enable PAT WC only on P4, Core 2 or later CPUs. */ if (c->x86 == 6 && c->x86_model < 15) clear_cpu_cap(c, X86_FEATURE_PAT); /* * If fast string is not enabled in IA32_MISC_ENABLE for any reason, * clear the fast string and enhanced fast string CPU capabilities. */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { pr_info("Disabled fast string operations\n"); setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); setup_clear_cpu_cap(X86_FEATURE_ERMS); } } /* * Intel Quark Core DevMan_001.pdf section 6.4.11 * "The operating system also is required to invalidate (i.e., flush) * the TLB when any changes are made to any of the page table entries. * The operating system must reload CR3 to cause the TLB to be flushed" * * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE * to be modified. */ if (c->x86 == 5 && c->x86_model == 9) { pr_info("Disabling PGE capability bit\n"); setup_clear_cpu_cap(X86_FEATURE_PGE); } if (c->cpuid_level >= 0x00000001) { u32 eax, ebx, ecx, edx; cpuid(0x00000001, &eax, &ebx, &ecx, &edx); /* * If HTT (EDX[28]) is set EBX[16:23] contain the number of * apicids which are reserved per package. Store the resulting * shift value for the package management code. */ if (edx & (1U << 28)) c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); } check_memory_type_self_snoop_errata(c); /* * Get the number of SMT siblings early from the extended topology * leaf, if available. Otherwise try the legacy SMT detection. */ if (detect_extended_topology_early(c) < 0) detect_ht_early(c); } static void bsp_init_intel(struct cpuinfo_x86 *c) { resctrl_cpu_detect(c); } #ifdef CONFIG_X86_32 /* * Early probe support logic for ppro memory erratum #50 * * This is called before we do cpu ident work */ int ppro_with_ram_bug(void) { /* Uses data from early_cpu_detect now */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && boot_cpu_data.x86_stepping < 8) { pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); return 1; } return 0; } static void intel_smp_check(struct cpuinfo_x86 *c) { /* calling is from identify_secondary_cpu() ? */ if (!c->cpu_index) return; /* * Mask B, Pentium, but not Pentium MMX */ if (c->x86 == 5 && c->x86_stepping >= 1 && c->x86_stepping <= 4 && c->x86_model <= 3) { /* * Remember we have B step Pentia with bugs */ WARN_ONCE(1, "WARNING: SMP operation may be unreliable" "with B stepping processors.\n"); } } static int forcepae; static int __init forcepae_setup(char *__unused) { forcepae = 1; return 1; } __setup("forcepae", forcepae_setup); static void intel_workarounds(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_F00F_BUG /* * All models of Pentium and Pentium with MMX technology CPUs * have the F0 0F bug, which lets nonprivileged users lock up the * system. Announce that the fault handler will be checking for it. * The Quark is also family 5, but does not have the same bug. */ clear_cpu_bug(c, X86_BUG_F00F); if (c->x86 == 5 && c->x86_model < 9) { static int f00f_workaround_enabled; set_cpu_bug(c, X86_BUG_F00F); if (!f00f_workaround_enabled) { pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n"); f00f_workaround_enabled = 1; } } #endif /* * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until * model 3 mask 3 */ if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633) clear_cpu_cap(c, X86_FEATURE_SEP); /* * PAE CPUID issue: many Pentium M report no PAE but may have a * functionally usable PAE implementation. * Forcefully enable PAE if kernel parameter "forcepae" is present. */ if (forcepae) { pr_warn("PAE forced!\n"); set_cpu_cap(c, X86_FEATURE_PAE); add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); } /* * P4 Xeon erratum 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) { if (msr_set_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { pr_info("CPU: C0 stepping P4 Xeon detected.\n"); pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n"); } } /* * See if we have a good local APIC by checking for buggy Pentia, * i.e. all B steppings and the C2 stepping of P54C when using their * integrated APIC (see 11AP erratum in "Pentium Processor * Specification Update"). */ if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && (c->x86_stepping < 0x6 || c->x86_stepping == 0xb)) set_cpu_bug(c, X86_BUG_11AP); #ifdef CONFIG_X86_INTEL_USERCOPY /* * Set up the preferred alignment for movsl bulk memory moves */ switch (c->x86) { case 4: /* 486: untested */ break; case 5: /* Old Pentia: untested */ break; case 6: /* PII/PIII only like movsl with 8-byte alignment */ movsl_mask.mask = 7; break; case 15: /* P4 is OK down to 8-byte alignment */ movsl_mask.mask = 7; break; } #endif intel_smp_check(c); } #else static void intel_workarounds(struct cpuinfo_x86 *c) { } #endif static void srat_detect_node(struct cpuinfo_x86 *c) { #ifdef CONFIG_NUMA unsigned node; int cpu = smp_processor_id(); /* Don't do the funky fallback heuristics the AMD version employs for now. */ node = numa_cpu_node(cpu); if (node == NUMA_NO_NODE || !node_online(node)) { /* reuse the value from init_cpu_to_node() */ node = cpu_to_node(cpu); } numa_set_node(cpu, node); #endif } #define MSR_IA32_TME_ACTIVATE 0x982 /* Helpers to access TME_ACTIVATE MSR */ #define TME_ACTIVATE_LOCKED(x) (x & 0x1) #define TME_ACTIVATE_ENABLED(x) (x & 0x2) #define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */ #define TME_ACTIVATE_POLICY_AES_XTS_128 0 #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */ #define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */ #define TME_ACTIVATE_CRYPTO_AES_XTS_128 1 /* Values for mktme_status (SW only construct) */ #define MKTME_ENABLED 0 #define MKTME_DISABLED 1 #define MKTME_UNINITIALIZED 2 static int mktme_status = MKTME_UNINITIALIZED; static void detect_tme(struct cpuinfo_x86 *c) { u64 tme_activate, tme_policy, tme_crypto_algs; int keyid_bits = 0, nr_keyids = 0; static u64 tme_activate_cpu0 = 0; rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate); if (mktme_status != MKTME_UNINITIALIZED) { if (tme_activate != tme_activate_cpu0) { /* Broken BIOS? */ pr_err_once("x86/tme: configuration is inconsistent between CPUs\n"); pr_err_once("x86/tme: MKTME is not usable\n"); mktme_status = MKTME_DISABLED; /* Proceed. We may need to exclude bits from x86_phys_bits. */ } } else { tme_activate_cpu0 = tme_activate; } if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { pr_info_once("x86/tme: not enabled by BIOS\n"); mktme_status = MKTME_DISABLED; return; } if (mktme_status != MKTME_UNINITIALIZED) goto detect_keyid_bits; pr_info("x86/tme: enabled by BIOS\n"); tme_policy = TME_ACTIVATE_POLICY(tme_activate); if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128) pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy); tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate); if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) { pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n", tme_crypto_algs); mktme_status = MKTME_DISABLED; } detect_keyid_bits: keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate); nr_keyids = (1UL << keyid_bits) - 1; if (nr_keyids) { pr_info_once("x86/mktme: enabled by BIOS\n"); pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids); } else { pr_info_once("x86/mktme: disabled by BIOS\n"); } if (mktme_status == MKTME_UNINITIALIZED) { /* MKTME is usable */ mktme_status = MKTME_ENABLED; } /* * KeyID bits effectively lower the number of physical address * bits. Update cpuinfo_x86::x86_phys_bits accordingly. */ c->x86_phys_bits -= keyid_bits; } static void init_cpuid_fault(struct cpuinfo_x86 *c) { u64 msr; if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) { if (msr & MSR_PLATFORM_INFO_CPUID_FAULT) set_cpu_cap(c, X86_FEATURE_CPUID_FAULT); } } static void init_intel_misc_features(struct cpuinfo_x86 *c) { u64 msr; if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr)) return; /* Clear all MISC features */ this_cpu_write(msr_misc_features_shadow, 0); /* Check features and update capabilities and shadow control bits */ init_cpuid_fault(c); probe_xeon_phi_r3mwait(c); msr = this_cpu_read(msr_misc_features_shadow); wrmsrl(MSR_MISC_FEATURES_ENABLES, msr); } static void split_lock_init(void); static void bus_lock_init(void); static void init_intel(struct cpuinfo_x86 *c) { early_init_intel(c); intel_workarounds(c); /* * Detect the extended topology information if available. This * will reinitialise the initial_apicid which will be used * in init_intel_cacheinfo() */ detect_extended_topology(c); if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { /* * let's use the legacy cpuid vector 0x1 and 0x4 for topology * detection. */ detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif } init_intel_cacheinfo(c); if (c->cpuid_level > 9) { unsigned eax = cpuid_eax(10); /* Check for version and the number of counters */ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); } if (cpu_has(c, X86_FEATURE_XMM2)) set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); if (boot_cpu_has(X86_FEATURE_DS)) { unsigned int l1, l2; rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)) set_cpu_cap(c, X86_FEATURE_BTS); if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL)) set_cpu_cap(c, X86_FEATURE_PEBS); } if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) && (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR); if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) && ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT))) set_cpu_bug(c, X86_BUG_MONITOR); #ifdef CONFIG_X86_64 if (c->x86 == 15) c->x86_cache_alignment = c->x86_clflush_size * 2; if (c->x86 == 6) set_cpu_cap(c, X86_FEATURE_REP_GOOD); #else /* * Names for the Pentium II/Celeron processors * detectable only by also checking the cache size. * Dixon is NOT a Celeron. */ if (c->x86 == 6) { unsigned int l2 = c->x86_cache_size; char *p = NULL; switch (c->x86_model) { case 5: if (l2 == 0) p = "Celeron (Covington)"; else if (l2 == 256) p = "Mobile Pentium II (Dixon)"; break; case 6: if (l2 == 128) p = "Celeron (Mendocino)"; else if (c->x86_stepping == 0 || c->x86_stepping == 5) p = "Celeron-A"; break; case 8: if (l2 == 128) p = "Celeron (Coppermine)"; break; } if (p) strcpy(c->x86_model_id, p); } if (c->x86 == 15) set_cpu_cap(c, X86_FEATURE_P4); if (c->x86 == 6) set_cpu_cap(c, X86_FEATURE_P3); #endif /* Work around errata */ srat_detect_node(c); init_ia32_feat_ctl(c); if (cpu_has(c, X86_FEATURE_TME)) detect_tme(c); init_intel_misc_features(c); split_lock_init(); bus_lock_init(); intel_init_thermal(c); } #ifdef CONFIG_X86_32 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) { /* * Intel PIII Tualatin. This comes in two flavours. * One has 256kb of cache, the other 512. We have no way * to determine which, so we use a boottime override * for the 512kb model, and assume 256 otherwise. */ if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) size = 256; /* * Intel Quark SoC X1000 contains a 4-way set associative * 16K cache with a 16 byte cache line and 256 lines per tag */ if ((c->x86 == 5) && (c->x86_model == 9)) size = 16; return size; } #endif #define TLB_INST_4K 0x01 #define TLB_INST_4M 0x02 #define TLB_INST_2M_4M 0x03 #define TLB_INST_ALL 0x05 #define TLB_INST_1G 0x06 #define TLB_DATA_4K 0x11 #define TLB_DATA_4M 0x12 #define TLB_DATA_2M_4M 0x13 #define TLB_DATA_4K_4M 0x14 #define TLB_DATA_1G 0x16 #define TLB_DATA0_4K 0x21 #define TLB_DATA0_4M 0x22 #define TLB_DATA0_2M_4M 0x23 #define STLB_4K 0x41 #define STLB_4K_2M 0x42 static const struct _tlb_table intel_tlb_table[] = { { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" }, { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" }, { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" }, { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" }, { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" }, { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" }, { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" }, { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" }, { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" }, { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" }, { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" }, { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" }, { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" }, { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" }, { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" }, { 0x00, 0, 0 } }; static void intel_tlb_lookup(const unsigned char desc) { unsigned char k; if (desc == 0) return; /* look up this descriptor in the table */ for (k = 0; intel_tlb_table[k].descriptor != desc && intel_tlb_table[k].descriptor != 0; k++) ; if (intel_tlb_table[k].tlb_type == 0) return; switch (intel_tlb_table[k].tlb_type) { case STLB_4K: if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; break; case STLB_4K_2M: if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_INST_ALL: if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_INST_4K: if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_INST_4M: if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_INST_2M_4M: if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_DATA_4K: case TLB_DATA0_4K: if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_DATA_4M: case TLB_DATA0_4M: if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_DATA_2M_4M: case TLB_DATA0_2M_4M: if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_DATA_4K_4M: if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_DATA_1G: if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; break; } } static void intel_detect_tlb(struct cpuinfo_x86 *c) { int i, j, n; unsigned int regs[4]; unsigned char *desc = (unsigned char *)regs; if (c->cpuid_level < 2) return; /* Number of times to iterate */ n = cpuid_eax(2) & 0xFF; for (i = 0 ; i < n ; i++) { cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]); /* If bit 31 is set, this is an unknown format */ for (j = 0 ; j < 3 ; j++) if (regs[j] & (1 << 31)) regs[j] = 0; /* Byte 0 is level count, not a descriptor */ for (j = 1 ; j < 16 ; j++) intel_tlb_lookup(desc[j]); } } static const struct cpu_dev intel_cpu_dev = { .c_vendor = "Intel", .c_ident = { "GenuineIntel" }, #ifdef CONFIG_X86_32 .legacy_models = { { .family = 4, .model_names = { [0] = "486 DX-25/33", [1] = "486 DX-50", [2] = "486 SX", [3] = "486 DX/2", [4] = "486 SL", [5] = "486 SX/2", [7] = "486 DX/2-WB", [8] = "486 DX/4", [9] = "486 DX/4-WB" } }, { .family = 5, .model_names = { [0] = "Pentium 60/66 A-step", [1] = "Pentium 60/66", [2] = "Pentium 75 - 200", [3] = "OverDrive PODP5V83", [4] = "Pentium MMX", [7] = "Mobile Pentium 75 - 200", [8] = "Mobile Pentium MMX", [9] = "Quark SoC X1000", } }, { .family = 6, .model_names = { [0] = "Pentium Pro A-step", [1] = "Pentium Pro", [3] = "Pentium II (Klamath)", [4] = "Pentium II (Deschutes)", [5] = "Pentium II (Deschutes)", [6] = "Mobile Pentium II", [7] = "Pentium III (Katmai)", [8] = "Pentium III (Coppermine)", [10] = "Pentium III (Cascades)", [11] = "Pentium III (Tualatin)", } }, { .family = 15, .model_names = { [0] = "Pentium 4 (Unknown)", [1] = "Pentium 4 (Willamette)", [2] = "Pentium 4 (Northwood)", [4] = "Pentium 4 (Foster)", [5] = "Pentium 4 (Foster)", } }, }, .legacy_cache_size = intel_size_cache, #endif .c_detect_tlb = intel_detect_tlb, .c_early_init = early_init_intel, .c_bsp_init = bsp_init_intel, .c_init = init_intel, .c_x86_vendor = X86_VENDOR_INTEL, }; cpu_dev_register(intel_cpu_dev); #undef pr_fmt #define pr_fmt(fmt) "x86/split lock detection: " fmt static const struct { const char *option; enum split_lock_detect_state state; } sld_options[] __initconst = { { "off", sld_off }, { "warn", sld_warn }, { "fatal", sld_fatal }, { "ratelimit:", sld_ratelimit }, }; static struct ratelimit_state bld_ratelimit; static unsigned int sysctl_sld_mitigate = 1; static DEFINE_SEMAPHORE(buslock_sem, 1); #ifdef CONFIG_PROC_SYSCTL static struct ctl_table sld_sysctls[] = { { .procname = "split_lock_mitigate", .data = &sysctl_sld_mitigate, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_douintvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, {} }; static int __init sld_mitigate_sysctl_init(void) { register_sysctl_init("kernel", sld_sysctls); return 0; } late_initcall(sld_mitigate_sysctl_init); #endif static inline bool match_option(const char *arg, int arglen, const char *opt) { int len = strlen(opt), ratelimit; if (strncmp(arg, opt, len)) return false; /* * Min ratelimit is 1 bus lock/sec. * Max ratelimit is 1000 bus locks/sec. */ if (sscanf(arg, "ratelimit:%d", &ratelimit) == 1 && ratelimit > 0 && ratelimit <= 1000) { ratelimit_state_init(&bld_ratelimit, HZ, ratelimit); ratelimit_set_flags(&bld_ratelimit, RATELIMIT_MSG_ON_RELEASE); return true; } return len == arglen; } static bool split_lock_verify_msr(bool on) { u64 ctrl, tmp; if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl)) return false; if (on) ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; else ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT; if (wrmsrl_safe(MSR_TEST_CTRL, ctrl)) return false; rdmsrl(MSR_TEST_CTRL, tmp); return ctrl == tmp; } static void __init sld_state_setup(void) { enum split_lock_detect_state state = sld_warn; char arg[20]; int i, ret; if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && !boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) return; ret = cmdline_find_option(boot_command_line, "split_lock_detect", arg, sizeof(arg)); if (ret >= 0) { for (i = 0; i < ARRAY_SIZE(sld_options); i++) { if (match_option(arg, ret, sld_options[i].option)) { state = sld_options[i].state; break; } } } sld_state = state; } static void __init __split_lock_setup(void) { if (!split_lock_verify_msr(false)) { pr_info("MSR access failed: Disabled\n"); return; } rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); if (!split_lock_verify_msr(true)) { pr_info("MSR access failed: Disabled\n"); return; } /* Restore the MSR to its cached value. */ wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); } /* * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking * is not implemented as one thread could undo the setting of the other * thread immediately after dropping the lock anyway. */ static void sld_update_msr(bool on) { u64 test_ctrl_val = msr_test_ctrl_cache; if (on) test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; wrmsrl(MSR_TEST_CTRL, test_ctrl_val); } static void split_lock_init(void) { /* * #DB for bus lock handles ratelimit and #AC for split lock is * disabled. */ if (sld_state == sld_ratelimit) { split_lock_verify_msr(false); return; } if (cpu_model_supports_sld) split_lock_verify_msr(sld_state != sld_off); } static void __split_lock_reenable_unlock(struct work_struct *work) { sld_update_msr(true); up(&buslock_sem); } static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock); static void __split_lock_reenable(struct work_struct *work) { sld_update_msr(true); } static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable); /* * If a CPU goes offline with pending delayed work to re-enable split lock * detection then the delayed work will be executed on some other CPU. That * handles releasing the buslock_sem, but because it executes on a * different CPU probably won't re-enable split lock detection. This is a * problem on HT systems since the sibling CPU on the same core may then be * left running with split lock detection disabled. * * Unconditionally re-enable detection here. */ static int splitlock_cpu_offline(unsigned int cpu) { sld_update_msr(true); return 0; } static void split_lock_warn(unsigned long ip) { struct delayed_work *work; int cpu; if (!current->reported_split_lock) pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n", current->comm, current->pid, ip); current->reported_split_lock = 1; if (sysctl_sld_mitigate) { /* * misery factor #1: * sleep 10ms before trying to execute split lock. */ if (msleep_interruptible(10) > 0) return; /* * Misery factor #2: * only allow one buslocked disabled core at a time. */ if (down_interruptible(&buslock_sem) == -EINTR) return; work = &sl_reenable_unlock; } else { work = &sl_reenable; } cpu = get_cpu(); schedule_delayed_work_on(cpu, work, 2); /* Disable split lock detection on this CPU to make progress */ sld_update_msr(false); put_cpu(); } bool handle_guest_split_lock(unsigned long ip) { if (sld_state == sld_warn) { split_lock_warn(ip); return true; } pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n", current->comm, current->pid, sld_state == sld_fatal ? "fatal" : "bogus", ip); current->thread.error_code = 0; current->thread.trap_nr = X86_TRAP_AC; force_sig_fault(SIGBUS, BUS_ADRALN, NULL); return false; } EXPORT_SYMBOL_GPL(handle_guest_split_lock); static void bus_lock_init(void) { u64 val; if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) return; rdmsrl(MSR_IA32_DEBUGCTLMSR, val); if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && (sld_state == sld_warn || sld_state == sld_fatal)) || sld_state == sld_off) { /* * Warn and fatal are handled by #AC for split lock if #AC for * split lock is supported. */ val &= ~DEBUGCTLMSR_BUS_LOCK_DETECT; } else { val |= DEBUGCTLMSR_BUS_LOCK_DETECT; } wrmsrl(MSR_IA32_DEBUGCTLMSR, val); } bool handle_user_split_lock(struct pt_regs *regs, long error_code) { if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal) return false; split_lock_warn(regs->ip); return true; } void handle_bus_lock(struct pt_regs *regs) { switch (sld_state) { case sld_off: break; case sld_ratelimit: /* Enforce no more than bld_ratelimit bus locks/sec. */ while (!__ratelimit(&bld_ratelimit)) msleep(20); /* Warn on the bus lock. */ fallthrough; case sld_warn: pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n", current->comm, current->pid, regs->ip); break; case sld_fatal: force_sig_fault(SIGBUS, BUS_ADRALN, NULL); break; } } /* * CPU models that are known to have the per-core split-lock detection * feature even though they do not enumerate IA32_CORE_CAPABILITIES. */ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0), {} }; static void __init split_lock_setup(struct cpuinfo_x86 *c) { const struct x86_cpu_id *m; u64 ia32_core_caps; if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return; /* Check for CPUs that have support but do not enumerate it: */ m = x86_match_cpu(split_lock_cpu_ids); if (m) goto supported; if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) return; /* * Not all bits in MSR_IA32_CORE_CAPS are architectural, but * MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is. All CPUs that set * it have split lock detection. */ rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps); if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT) goto supported; /* CPU is not in the model list and does not have the MSR bit: */ return; supported: cpu_model_supports_sld = true; __split_lock_setup(); } static void sld_state_show(void) { if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) && !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) return; switch (sld_state) { case sld_off: pr_info("disabled\n"); break; case sld_warn: if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) { pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n"); if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/splitlock", NULL, splitlock_cpu_offline) < 0) pr_warn("No splitlock CPU offline handler\n"); } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) { pr_info("#DB: warning on user-space bus_locks\n"); } break; case sld_fatal: if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) { pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n"); } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) { pr_info("#DB: sending SIGBUS on user-space bus_locks%s\n", boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) ? " from non-WB" : ""); } break; case sld_ratelimit: if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst); break; } } void __init sld_setup(struct cpuinfo_x86 *c) { split_lock_setup(c); sld_state_setup(); sld_state_show(); } #define X86_HYBRID_CPU_TYPE_ID_SHIFT 24 /** * get_this_hybrid_cpu_type() - Get the type of this hybrid CPU * * Returns the CPU type [31:24] (i.e., Atom or Core) of a CPU in * a hybrid processor. If the processor is not hybrid, returns 0. */ u8 get_this_hybrid_cpu_type(void) { if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) return 0; return cpuid_eax(0x0000001a) >> X86_HYBRID_CPU_TYPE_ID_SHIFT; }
linux-master
arch/x86/kernel/cpu/intel.c
// SPDX-License-Identifier: GPL-2.0-only /* cpu_feature_enabled() cannot be used this early */ #define USE_EARLY_PGTABLE_L5 #include <linux/memblock.h> #include <linux/linkage.h> #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/percpu.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/sched/mm.h> #include <linux/sched/clock.h> #include <linux/sched/task.h> #include <linux/sched/smt.h> #include <linux/init.h> #include <linux/kprobes.h> #include <linux/kgdb.h> #include <linux/mem_encrypt.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/io.h> #include <linux/syscore_ops.h> #include <linux/pgtable.h> #include <linux/stackprotector.h> #include <linux/utsname.h> #include <asm/alternative.h> #include <asm/cmdline.h> #include <asm/perf_event.h> #include <asm/mmu_context.h> #include <asm/doublefault.h> #include <asm/archrandom.h> #include <asm/hypervisor.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/debugreg.h> #include <asm/sections.h> #include <asm/vsyscall.h> #include <linux/topology.h> #include <linux/cpumask.h> #include <linux/atomic.h> #include <asm/proto.h> #include <asm/setup.h> #include <asm/apic.h> #include <asm/desc.h> #include <asm/fpu/api.h> #include <asm/mtrr.h> #include <asm/hwcap2.h> #include <linux/numa.h> #include <asm/numa.h> #include <asm/asm.h> #include <asm/bugs.h> #include <asm/cpu.h> #include <asm/mce.h> #include <asm/msr.h> #include <asm/cacheinfo.h> #include <asm/memtype.h> #include <asm/microcode.h> #include <asm/intel-family.h> #include <asm/cpu_device_id.h> #include <asm/uv/uv.h> #include <asm/set_memory.h> #include <asm/traps.h> #include <asm/sev.h> #include "cpu.h" u32 elf_hwcap2 __read_mostly; /* Number of siblings per CPU package */ int smp_num_siblings = 1; EXPORT_SYMBOL(smp_num_siblings); /* Last level cache ID of each logical CPU */ DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; u16 get_llc_id(unsigned int cpu) { return per_cpu(cpu_llc_id, cpu); } EXPORT_SYMBOL_GPL(get_llc_id); /* L2 cache ID of each logical CPU */ DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id) = BAD_APICID; static struct ppin_info { int feature; int msr_ppin_ctl; int msr_ppin; } ppin_info[] = { [X86_VENDOR_INTEL] = { .feature = X86_FEATURE_INTEL_PPIN, .msr_ppin_ctl = MSR_PPIN_CTL, .msr_ppin = MSR_PPIN }, [X86_VENDOR_AMD] = { .feature = X86_FEATURE_AMD_PPIN, .msr_ppin_ctl = MSR_AMD_PPIN_CTL, .msr_ppin = MSR_AMD_PPIN }, }; static const struct x86_cpu_id ppin_cpuids[] = { X86_MATCH_FEATURE(X86_FEATURE_AMD_PPIN, &ppin_info[X86_VENDOR_AMD]), X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]), /* Legacy models without CPUID enumeration */ X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]), {} }; static void ppin_init(struct cpuinfo_x86 *c) { const struct x86_cpu_id *id; unsigned long long val; struct ppin_info *info; id = x86_match_cpu(ppin_cpuids); if (!id) return; /* * Testing the presence of the MSR is not enough. Need to check * that the PPIN_CTL allows reading of the PPIN. */ info = (struct ppin_info *)id->driver_data; if (rdmsrl_safe(info->msr_ppin_ctl, &val)) goto clear_ppin; if ((val & 3UL) == 1UL) { /* PPIN locked in disabled mode */ goto clear_ppin; } /* If PPIN is disabled, try to enable */ if (!(val & 2UL)) { wrmsrl_safe(info->msr_ppin_ctl, val | 2UL); rdmsrl_safe(info->msr_ppin_ctl, &val); } /* Is the enable bit set? */ if (val & 2UL) { c->ppin = __rdmsr(info->msr_ppin); set_cpu_cap(c, info->feature); return; } clear_ppin: clear_cpu_cap(c, info->feature); } static void default_init(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_64 cpu_detect_cache_sizes(c); #else /* Not much we can do here... */ /* Check if at least it has cpuid */ if (c->cpuid_level == -1) { /* No cpuid. It must be an ancient CPU */ if (c->x86 == 4) strcpy(c->x86_model_id, "486"); else if (c->x86 == 3) strcpy(c->x86_model_id, "386"); } #endif } static const struct cpu_dev default_cpu = { .c_init = default_init, .c_vendor = "Unknown", .c_x86_vendor = X86_VENDOR_UNKNOWN, }; static const struct cpu_dev *this_cpu = &default_cpu; DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { #ifdef CONFIG_X86_64 /* * We need valid kernel segments for data and code in long mode too * IRET will check the segment types kkeil 2000/10/28 * Also sysret mandates a special GDT layout * * TLS descriptors are currently at a different place compared to i386. * Hopefully nobody expects them at a fixed place (Wine?) */ [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), #else [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), /* * Segments used for calling PnP BIOS have byte granularity. * They code segments and data segments have fixed 64k limits, * the transfer segment sizes are set at run time. */ /* 32-bit code */ [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), /* 16-bit code */ [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), /* 16-bit data */ [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), /* 16-bit data */ [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), /* 16-bit data */ [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), /* * The APM segments have byte granularity and their bases * are set at run time. All have 64k limits. */ /* 32-bit code */ [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), /* 16-bit code */ [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), /* data */ [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), #endif } }; EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); #ifdef CONFIG_X86_64 static int __init x86_nopcid_setup(char *s) { /* nopcid doesn't accept parameters */ if (s) return -EINVAL; /* do not emit a message if the feature is not present */ if (!boot_cpu_has(X86_FEATURE_PCID)) return 0; setup_clear_cpu_cap(X86_FEATURE_PCID); pr_info("nopcid: PCID feature disabled\n"); return 0; } early_param("nopcid", x86_nopcid_setup); #endif static int __init x86_noinvpcid_setup(char *s) { /* noinvpcid doesn't accept parameters */ if (s) return -EINVAL; /* do not emit a message if the feature is not present */ if (!boot_cpu_has(X86_FEATURE_INVPCID)) return 0; setup_clear_cpu_cap(X86_FEATURE_INVPCID); pr_info("noinvpcid: INVPCID feature disabled\n"); return 0; } early_param("noinvpcid", x86_noinvpcid_setup); #ifdef CONFIG_X86_32 static int cachesize_override = -1; static int disable_x86_serial_nr = 1; static int __init cachesize_setup(char *str) { get_option(&str, &cachesize_override); return 1; } __setup("cachesize=", cachesize_setup); /* Standard macro to see if a specific flag is changeable */ static inline int flag_is_changeable_p(u32 flag) { u32 f1, f2; /* * Cyrix and IDT cpus allow disabling of CPUID * so the code below may return different results * when it is executed before and after enabling * the CPUID. Add "volatile" to not allow gcc to * optimize the subsequent calls to this function. */ asm volatile ("pushfl \n\t" "pushfl \n\t" "popl %0 \n\t" "movl %0, %1 \n\t" "xorl %2, %0 \n\t" "pushl %0 \n\t" "popfl \n\t" "pushfl \n\t" "popl %0 \n\t" "popfl \n\t" : "=&r" (f1), "=&r" (f2) : "ir" (flag)); return ((f1^f2) & flag) != 0; } /* Probe for the CPUID instruction */ int have_cpuid_p(void) { return flag_is_changeable_p(X86_EFLAGS_ID); } static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) { unsigned long lo, hi; if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) return; /* Disable processor serial number: */ rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); lo |= 0x200000; wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); pr_notice("CPU serial number disabled.\n"); clear_cpu_cap(c, X86_FEATURE_PN); /* Disabling the serial number may affect the cpuid level */ c->cpuid_level = cpuid_eax(0); } static int __init x86_serial_nr_setup(char *s) { disable_x86_serial_nr = 0; return 1; } __setup("serialnumber", x86_serial_nr_setup); #else static inline int flag_is_changeable_p(u32 flag) { return 1; } static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) { } #endif static __always_inline void setup_smep(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_SMEP)) cr4_set_bits(X86_CR4_SMEP); } static __always_inline void setup_smap(struct cpuinfo_x86 *c) { unsigned long eflags = native_save_fl(); /* This should have been cleared long ago */ BUG_ON(eflags & X86_EFLAGS_AC); if (cpu_has(c, X86_FEATURE_SMAP)) cr4_set_bits(X86_CR4_SMAP); } static __always_inline void setup_umip(struct cpuinfo_x86 *c) { /* Check the boot processor, plus build option for UMIP. */ if (!cpu_feature_enabled(X86_FEATURE_UMIP)) goto out; /* Check the current processor's cpuid bits. */ if (!cpu_has(c, X86_FEATURE_UMIP)) goto out; cr4_set_bits(X86_CR4_UMIP); pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n"); return; out: /* * Make sure UMIP is disabled in case it was enabled in a * previous boot (e.g., via kexec). */ cr4_clear_bits(X86_CR4_UMIP); } /* These bits should not change their value after CPU init is finished. */ static const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE | X86_CR4_CET; static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning); static unsigned long cr4_pinned_bits __ro_after_init; void native_write_cr0(unsigned long val) { unsigned long bits_missing = 0; set_register: asm volatile("mov %0,%%cr0": "+r" (val) : : "memory"); if (static_branch_likely(&cr_pinning)) { if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) { bits_missing = X86_CR0_WP; val |= bits_missing; goto set_register; } /* Warn after we've set the missing bits. */ WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n"); } } EXPORT_SYMBOL(native_write_cr0); void __no_profile native_write_cr4(unsigned long val) { unsigned long bits_changed = 0; set_register: asm volatile("mov %0,%%cr4": "+r" (val) : : "memory"); if (static_branch_likely(&cr_pinning)) { if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) { bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits; val = (val & ~cr4_pinned_mask) | cr4_pinned_bits; goto set_register; } /* Warn after we've corrected the changed bits. */ WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n", bits_changed); } } #if IS_MODULE(CONFIG_LKDTM) EXPORT_SYMBOL_GPL(native_write_cr4); #endif void cr4_update_irqsoff(unsigned long set, unsigned long clear) { unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4); lockdep_assert_irqs_disabled(); newval = (cr4 & ~clear) | set; if (newval != cr4) { this_cpu_write(cpu_tlbstate.cr4, newval); __write_cr4(newval); } } EXPORT_SYMBOL(cr4_update_irqsoff); /* Read the CR4 shadow. */ unsigned long cr4_read_shadow(void) { return this_cpu_read(cpu_tlbstate.cr4); } EXPORT_SYMBOL_GPL(cr4_read_shadow); void cr4_init(void) { unsigned long cr4 = __read_cr4(); if (boot_cpu_has(X86_FEATURE_PCID)) cr4 |= X86_CR4_PCIDE; if (static_branch_likely(&cr_pinning)) cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits; __write_cr4(cr4); /* Initialize cr4 shadow for this CPU. */ this_cpu_write(cpu_tlbstate.cr4, cr4); } /* * Once CPU feature detection is finished (and boot params have been * parsed), record any of the sensitive CR bits that are set, and * enable CR pinning. */ static void __init setup_cr_pinning(void) { cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask; static_key_enable(&cr_pinning.key); } static __init int x86_nofsgsbase_setup(char *arg) { /* Require an exact match without trailing characters. */ if (strlen(arg)) return 0; /* Do not emit a message if the feature is not present. */ if (!boot_cpu_has(X86_FEATURE_FSGSBASE)) return 1; setup_clear_cpu_cap(X86_FEATURE_FSGSBASE); pr_info("FSGSBASE disabled via kernel command line\n"); return 1; } __setup("nofsgsbase", x86_nofsgsbase_setup); /* * Protection Keys are not available in 32-bit mode. */ static bool pku_disabled; static __always_inline void setup_pku(struct cpuinfo_x86 *c) { if (c == &boot_cpu_data) { if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU)) return; /* * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid * bit to be set. Enforce it. */ setup_force_cpu_cap(X86_FEATURE_OSPKE); } else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) { return; } cr4_set_bits(X86_CR4_PKE); /* Load the default PKRU value */ pkru_write_default(); } #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS static __init int setup_disable_pku(char *arg) { /* * Do not clear the X86_FEATURE_PKU bit. All of the * runtime checks are against OSPKE so clearing the * bit does nothing. * * This way, we will see "pku" in cpuinfo, but not * "ospke", which is exactly what we want. It shows * that the CPU has PKU, but the OS has not enabled it. * This happens to be exactly how a system would look * if we disabled the config option. */ pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n"); pku_disabled = true; return 1; } __setup("nopku", setup_disable_pku); #endif #ifdef CONFIG_X86_KERNEL_IBT __noendbr u64 ibt_save(bool disable) { u64 msr = 0; if (cpu_feature_enabled(X86_FEATURE_IBT)) { rdmsrl(MSR_IA32_S_CET, msr); if (disable) wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN); } return msr; } __noendbr void ibt_restore(u64 save) { u64 msr; if (cpu_feature_enabled(X86_FEATURE_IBT)) { rdmsrl(MSR_IA32_S_CET, msr); msr &= ~CET_ENDBR_EN; msr |= (save & CET_ENDBR_EN); wrmsrl(MSR_IA32_S_CET, msr); } } #endif static __always_inline void setup_cet(struct cpuinfo_x86 *c) { bool user_shstk, kernel_ibt; if (!IS_ENABLED(CONFIG_X86_CET)) return; kernel_ibt = HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT); user_shstk = cpu_feature_enabled(X86_FEATURE_SHSTK) && IS_ENABLED(CONFIG_X86_USER_SHADOW_STACK); if (!kernel_ibt && !user_shstk) return; if (user_shstk) set_cpu_cap(c, X86_FEATURE_USER_SHSTK); if (kernel_ibt) wrmsrl(MSR_IA32_S_CET, CET_ENDBR_EN); else wrmsrl(MSR_IA32_S_CET, 0); cr4_set_bits(X86_CR4_CET); if (kernel_ibt && ibt_selftest()) { pr_err("IBT selftest: Failed!\n"); wrmsrl(MSR_IA32_S_CET, 0); setup_clear_cpu_cap(X86_FEATURE_IBT); } } __noendbr void cet_disable(void) { if (!(cpu_feature_enabled(X86_FEATURE_IBT) || cpu_feature_enabled(X86_FEATURE_SHSTK))) return; wrmsrl(MSR_IA32_S_CET, 0); wrmsrl(MSR_IA32_U_CET, 0); } /* * Some CPU features depend on higher CPUID levels, which may not always * be available due to CPUID level capping or broken virtualization * software. Add those features to this table to auto-disable them. */ struct cpuid_dependent_feature { u32 feature; u32 level; }; static const struct cpuid_dependent_feature cpuid_dependent_features[] = { { X86_FEATURE_MWAIT, 0x00000005 }, { X86_FEATURE_DCA, 0x00000009 }, { X86_FEATURE_XSAVE, 0x0000000d }, { 0, 0 } }; static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) { const struct cpuid_dependent_feature *df; for (df = cpuid_dependent_features; df->feature; df++) { if (!cpu_has(c, df->feature)) continue; /* * Note: cpuid_level is set to -1 if unavailable, but * extended_extended_level is set to 0 if unavailable * and the legitimate extended levels are all negative * when signed; hence the weird messing around with * signs here... */ if (!((s32)df->level < 0 ? (u32)df->level > (u32)c->extended_cpuid_level : (s32)df->level > (s32)c->cpuid_level)) continue; clear_cpu_cap(c, df->feature); if (!warn) continue; pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", x86_cap_flag(df->feature), df->level); } } /* * Naming convention should be: <Name> [(<Codename>)] * This table only is used unless init_<vendor>() below doesn't set it; * in particular, if CPUID levels 0x80000002..4 are supported, this * isn't used */ /* Look up CPU names by table lookup. */ static const char *table_lookup_model(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 const struct legacy_cpu_model_info *info; if (c->x86_model >= 16) return NULL; /* Range check */ if (!this_cpu) return NULL; info = this_cpu->legacy_models; while (info->family) { if (info->family == c->x86) return info->model_names[c->x86_model]; info++; } #endif return NULL; /* Not found */ } /* Aligned to unsigned long to avoid split lock in atomic bitmap ops */ __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long)); __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long)); #ifdef CONFIG_X86_32 /* The 32-bit entry code needs to find cpu_entry_area. */ DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); #endif /* Load the original GDT from the per-cpu structure */ void load_direct_gdt(int cpu) { struct desc_ptr gdt_descr; gdt_descr.address = (long)get_cpu_gdt_rw(cpu); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); } EXPORT_SYMBOL_GPL(load_direct_gdt); /* Load a fixmap remapping of the per-cpu GDT */ void load_fixmap_gdt(int cpu) { struct desc_ptr gdt_descr; gdt_descr.address = (long)get_cpu_gdt_ro(cpu); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); } EXPORT_SYMBOL_GPL(load_fixmap_gdt); /** * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base * @cpu: The CPU number for which this is invoked * * Invoked during early boot to switch from early GDT and early per CPU to * the direct GDT and the runtime per CPU area. On 32-bit the percpu base * switch is implicit by loading the direct GDT. On 64bit this requires * to update GSBASE. */ void __init switch_gdt_and_percpu_base(int cpu) { load_direct_gdt(cpu); #ifdef CONFIG_X86_64 /* * No need to load %gs. It is already correct. * * Writing %gs on 64bit would zero GSBASE which would make any per * CPU operation up to the point of the wrmsrl() fault. * * Set GSBASE to the new offset. Until the wrmsrl() happens the * early mapping is still valid. That means the GSBASE update will * lose any prior per CPU data which was not copied over in * setup_per_cpu_areas(). * * This works even with stackprotector enabled because the * per CPU stack canary is 0 in both per CPU areas. */ wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); #else /* * %fs is already set to __KERNEL_PERCPU, but after switching GDT * it is required to load FS again so that the 'hidden' part is * updated from the new GDT. Up to this point the early per CPU * translation is active. Any content of the early per CPU data * which was not copied over in setup_per_cpu_areas() is lost. */ loadsegment(fs, __KERNEL_PERCPU); #endif } static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; static void get_model_name(struct cpuinfo_x86 *c) { unsigned int *v; char *p, *q, *s; if (c->extended_cpuid_level < 0x80000004) return; v = (unsigned int *)c->x86_model_id; cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); c->x86_model_id[48] = 0; /* Trim whitespace */ p = q = s = &c->x86_model_id[0]; while (*p == ' ') p++; while (*p) { /* Note the last non-whitespace index */ if (!isspace(*p)) s = q; *q++ = *p++; } *(s + 1) = '\0'; } void detect_num_cpu_cores(struct cpuinfo_x86 *c) { unsigned int eax, ebx, ecx, edx; c->x86_max_cores = 1; if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) return; cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); if (eax & 0x1f) c->x86_max_cores = (eax >> 26) + 1; } void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) { unsigned int n, dummy, ebx, ecx, edx, l2size; n = c->extended_cpuid_level; if (n >= 0x80000005) { cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); c->x86_cache_size = (ecx>>24) + (edx>>24); #ifdef CONFIG_X86_64 /* On K8 L1 TLB is inclusive, so don't count it */ c->x86_tlbsize = 0; #endif } if (n < 0x80000006) /* Some chips just has a large L1. */ return; cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); l2size = ecx >> 16; #ifdef CONFIG_X86_64 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); #else /* do processor-specific cache resizing */ if (this_cpu->legacy_cache_size) l2size = this_cpu->legacy_cache_size(c, l2size); /* Allow user to override all this if necessary. */ if (cachesize_override != -1) l2size = cachesize_override; if (l2size == 0) return; /* Again, no L2 cache is possible */ #endif c->x86_cache_size = l2size; } u16 __read_mostly tlb_lli_4k[NR_INFO]; u16 __read_mostly tlb_lli_2m[NR_INFO]; u16 __read_mostly tlb_lli_4m[NR_INFO]; u16 __read_mostly tlb_lld_4k[NR_INFO]; u16 __read_mostly tlb_lld_2m[NR_INFO]; u16 __read_mostly tlb_lld_4m[NR_INFO]; u16 __read_mostly tlb_lld_1g[NR_INFO]; static void cpu_detect_tlb(struct cpuinfo_x86 *c) { if (this_cpu->c_detect_tlb) this_cpu->c_detect_tlb(c); pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], tlb_lli_4m[ENTRIES]); pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); } int detect_ht_early(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP u32 eax, ebx, ecx, edx; if (!cpu_has(c, X86_FEATURE_HT)) return -1; if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) return -1; if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) return -1; cpuid(1, &eax, &ebx, &ecx, &edx); smp_num_siblings = (ebx & 0xff0000) >> 16; if (smp_num_siblings == 1) pr_info_once("CPU0: Hyper-Threading is disabled\n"); #endif return 0; } void detect_ht(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP int index_msb, core_bits; if (detect_ht_early(c) < 0) return; index_msb = get_count_order(smp_num_siblings); c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); smp_num_siblings = smp_num_siblings / c->x86_max_cores; index_msb = get_count_order(smp_num_siblings); core_bits = get_count_order(c->x86_max_cores); c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & ((1 << core_bits) - 1); #endif } static void get_cpu_vendor(struct cpuinfo_x86 *c) { char *v = c->x86_vendor_id; int i; for (i = 0; i < X86_VENDOR_NUM; i++) { if (!cpu_devs[i]) break; if (!strcmp(v, cpu_devs[i]->c_ident[0]) || (cpu_devs[i]->c_ident[1] && !strcmp(v, cpu_devs[i]->c_ident[1]))) { this_cpu = cpu_devs[i]; c->x86_vendor = this_cpu->c_x86_vendor; return; } } pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ "CPU: Your system may be unstable.\n", v); c->x86_vendor = X86_VENDOR_UNKNOWN; this_cpu = &default_cpu; } void cpu_detect(struct cpuinfo_x86 *c) { /* Get vendor name */ cpuid(0x00000000, (unsigned int *)&c->cpuid_level, (unsigned int *)&c->x86_vendor_id[0], (unsigned int *)&c->x86_vendor_id[8], (unsigned int *)&c->x86_vendor_id[4]); c->x86 = 4; /* Intel-defined flags: level 0x00000001 */ if (c->cpuid_level >= 0x00000001) { u32 junk, tfms, cap0, misc; cpuid(0x00000001, &tfms, &misc, &junk, &cap0); c->x86 = x86_family(tfms); c->x86_model = x86_model(tfms); c->x86_stepping = x86_stepping(tfms); if (cap0 & (1<<19)) { c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; c->x86_cache_alignment = c->x86_clflush_size; } } } static void apply_forced_caps(struct cpuinfo_x86 *c) { int i; for (i = 0; i < NCAPINTS + NBUGINTS; i++) { c->x86_capability[i] &= ~cpu_caps_cleared[i]; c->x86_capability[i] |= cpu_caps_set[i]; } } static void init_speculation_control(struct cpuinfo_x86 *c) { /* * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, * and they also have a different bit for STIBP support. Also, * a hypervisor might have set the individual AMD bits even on * Intel CPUs, for finer-grained selection of what's available. */ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { set_cpu_cap(c, X86_FEATURE_IBRS); set_cpu_cap(c, X86_FEATURE_IBPB); set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); } if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) set_cpu_cap(c, X86_FEATURE_STIBP); if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || cpu_has(c, X86_FEATURE_VIRT_SSBD)) set_cpu_cap(c, X86_FEATURE_SSBD); if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { set_cpu_cap(c, X86_FEATURE_IBRS); set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); } if (cpu_has(c, X86_FEATURE_AMD_IBPB)) set_cpu_cap(c, X86_FEATURE_IBPB); if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { set_cpu_cap(c, X86_FEATURE_STIBP); set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); } if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { set_cpu_cap(c, X86_FEATURE_SSBD); set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); } } void get_cpu_cap(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; /* Intel-defined flags: level 0x00000001 */ if (c->cpuid_level >= 0x00000001) { cpuid(0x00000001, &eax, &ebx, &ecx, &edx); c->x86_capability[CPUID_1_ECX] = ecx; c->x86_capability[CPUID_1_EDX] = edx; } /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ if (c->cpuid_level >= 0x00000006) c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); /* Additional Intel-defined flags: level 0x00000007 */ if (c->cpuid_level >= 0x00000007) { cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); c->x86_capability[CPUID_7_0_EBX] = ebx; c->x86_capability[CPUID_7_ECX] = ecx; c->x86_capability[CPUID_7_EDX] = edx; /* Check valid sub-leaf index before accessing it */ if (eax >= 1) { cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx); c->x86_capability[CPUID_7_1_EAX] = eax; } } /* Extended state features: level 0x0000000d */ if (c->cpuid_level >= 0x0000000d) { cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); c->x86_capability[CPUID_D_1_EAX] = eax; } /* AMD-defined flags: level 0x80000001 */ eax = cpuid_eax(0x80000000); c->extended_cpuid_level = eax; if ((eax & 0xffff0000) == 0x80000000) { if (eax >= 0x80000001) { cpuid(0x80000001, &eax, &ebx, &ecx, &edx); c->x86_capability[CPUID_8000_0001_ECX] = ecx; c->x86_capability[CPUID_8000_0001_EDX] = edx; } } if (c->extended_cpuid_level >= 0x80000007) { cpuid(0x80000007, &eax, &ebx, &ecx, &edx); c->x86_capability[CPUID_8000_0007_EBX] = ebx; c->x86_power = edx; } if (c->extended_cpuid_level >= 0x80000008) { cpuid(0x80000008, &eax, &ebx, &ecx, &edx); c->x86_capability[CPUID_8000_0008_EBX] = ebx; } if (c->extended_cpuid_level >= 0x8000000a) c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); if (c->extended_cpuid_level >= 0x8000001f) c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f); if (c->extended_cpuid_level >= 0x80000021) c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021); init_scattered_cpuid_features(c); init_speculation_control(c); /* * Clear/Set all flags overridden by options, after probe. * This needs to happen each time we re-probe, which may happen * several times during CPU initialization. */ apply_forced_caps(c); } void get_cpu_address_sizes(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; if (c->extended_cpuid_level >= 0x80000008) { cpuid(0x80000008, &eax, &ebx, &ecx, &edx); c->x86_virt_bits = (eax >> 8) & 0xff; c->x86_phys_bits = eax & 0xff; } #ifdef CONFIG_X86_32 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) c->x86_phys_bits = 36; #endif c->x86_cache_bits = c->x86_phys_bits; } static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 int i; /* * First of all, decide if this is a 486 or higher * It's a 486 if we can modify the AC flag */ if (flag_is_changeable_p(X86_EFLAGS_AC)) c->x86 = 4; else c->x86 = 3; for (i = 0; i < X86_VENDOR_NUM; i++) if (cpu_devs[i] && cpu_devs[i]->c_identify) { c->x86_vendor_id[0] = 0; cpu_devs[i]->c_identify(c); if (c->x86_vendor_id[0]) { get_cpu_vendor(c); break; } } #endif } #define NO_SPECULATION BIT(0) #define NO_MELTDOWN BIT(1) #define NO_SSB BIT(2) #define NO_L1TF BIT(3) #define NO_MDS BIT(4) #define MSBDS_ONLY BIT(5) #define NO_SWAPGS BIT(6) #define NO_ITLB_MULTIHIT BIT(7) #define NO_SPECTRE_V2 BIT(8) #define NO_MMIO BIT(9) #define NO_EIBRS_PBRSB BIT(10) #define VULNWL(vendor, family, model, whitelist) \ X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist) #define VULNWL_INTEL(model, whitelist) \ VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist) #define VULNWL_AMD(family, whitelist) \ VULNWL(AMD, family, X86_MODEL_ANY, whitelist) #define VULNWL_HYGON(family, whitelist) \ VULNWL(HYGON, family, X86_MODEL_ANY, whitelist) static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION), VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION), VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), VULNWL(VORTEX, 5, X86_MODEL_ANY, NO_SPECULATION), VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION), /* Intel Family 6 */ VULNWL_INTEL(TIGERLAKE, NO_MMIO), VULNWL_INTEL(TIGERLAKE_L, NO_MMIO), VULNWL_INTEL(ALDERLAKE, NO_MMIO), VULNWL_INTEL(ALDERLAKE_L, NO_MMIO), VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_INTEL(CORE_YONAH, NO_SSB), VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB), /* * Technically, swapgs isn't serializing on AMD (despite it previously * being documented as such in the APM). But according to AMD, %gs is * updated non-speculatively, and the issuing of %gs-relative memory * operands will be blocked until the %gs update completes, which is * good enough for our purposes. */ VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB), VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB), VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), /* AMD Family 0xf - 0x12 */ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB), VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB), /* Zhaoxin Family 7 */ VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), {} }; #define VULNBL(vendor, family, model, blacklist) \ X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist) #define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ INTEL_FAM6_##model, steppings, \ X86_FEATURE_ANY, issues) #define VULNBL_AMD(family, blacklist) \ VULNBL(AMD, family, X86_MODEL_ANY, blacklist) #define VULNBL_HYGON(family, blacklist) \ VULNBL(HYGON, family, X86_MODEL_ANY, blacklist) #define SRBDS BIT(0) /* CPU is affected by X86_BUG_MMIO_STALE_DATA */ #define MMIO BIT(1) /* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */ #define MMIO_SBDS BIT(2) /* CPU is affected by RETbleed, speculating where you would not expect it */ #define RETBLEED BIT(3) /* CPU is affected by SMT (cross-thread) return predictions */ #define SMT_RSB BIT(4) /* CPU is affected by SRSO */ #define SRSO BIT(5) /* CPU is affected by GDS */ #define GDS BIT(6) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS), VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED), VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS), VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS), VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS), VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS), VULNBL_AMD(0x15, RETBLEED), VULNBL_AMD(0x16, RETBLEED), VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), VULNBL_HYGON(0x18, RETBLEED | SMT_RSB), VULNBL_AMD(0x19, SRSO), {} }; static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which) { const struct x86_cpu_id *m = x86_match_cpu(table); return m && !!(m->driver_data & which); } u64 x86_read_arch_cap_msr(void) { u64 ia32_cap = 0; if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); return ia32_cap; } static bool arch_cap_mmio_immune(u64 ia32_cap) { return (ia32_cap & ARCH_CAP_FBSDP_NO && ia32_cap & ARCH_CAP_PSDP_NO && ia32_cap & ARCH_CAP_SBDR_SSDP_NO); } static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) { u64 ia32_cap = x86_read_arch_cap_msr(); /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION)) return; setup_force_cpu_bug(X86_BUG_SPECTRE_V1); if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) setup_force_cpu_bug(X86_BUG_SPECTRE_V2); if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); /* * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature * flag and protect from vendor-specific bugs via the whitelist. */ if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) { setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && !(ia32_cap & ARCH_CAP_PBRSB_NO)) setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); } if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) { setup_force_cpu_bug(X86_BUG_MDS); if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY)) setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); } if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS)) setup_force_cpu_bug(X86_BUG_SWAPGS); /* * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when: * - TSX is supported or * - TSX_CTRL is present * * TSX_CTRL check is needed for cases when TSX could be disabled before * the kernel boot e.g. kexec. * TSX_CTRL check alone is not sufficient for cases when the microcode * update is not present or running as guest that don't get TSX_CTRL. */ if (!(ia32_cap & ARCH_CAP_TAA_NO) && (cpu_has(c, X86_FEATURE_RTM) || (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) setup_force_cpu_bug(X86_BUG_TAA); /* * SRBDS affects CPUs which support RDRAND or RDSEED and are listed * in the vulnerability blacklist. * * Some of the implications and mitigation of Shared Buffers Data * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as * SRBDS. */ if ((cpu_has(c, X86_FEATURE_RDRAND) || cpu_has(c, X86_FEATURE_RDSEED)) && cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS)) setup_force_cpu_bug(X86_BUG_SRBDS); /* * Processor MMIO Stale Data bug enumeration * * Affected CPU list is generally enough to enumerate the vulnerability, * but for virtualization case check for ARCH_CAP MSR bits also, VMM may * not want the guest to enumerate the bug. * * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist, * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits. */ if (!arch_cap_mmio_immune(ia32_cap)) { if (cpu_matches(cpu_vuln_blacklist, MMIO)) setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO)) setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN); } if (!cpu_has(c, X86_FEATURE_BTC_NO)) { if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA)) setup_force_cpu_bug(X86_BUG_RETBLEED); } if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) setup_force_cpu_bug(X86_BUG_SMT_RSB); if (!cpu_has(c, X86_FEATURE_SRSO_NO)) { if (cpu_matches(cpu_vuln_blacklist, SRSO)) setup_force_cpu_bug(X86_BUG_SRSO); } /* * Check if CPU is vulnerable to GDS. If running in a virtual machine on * an affected processor, the VMM may have disabled the use of GATHER by * disabling AVX2. The only way to do this in HW is to clear XCR0[2], * which means that AVX will be disabled. */ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) && boot_cpu_has(X86_FEATURE_AVX)) setup_force_cpu_bug(X86_BUG_GDS); if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; /* Rogue Data Cache Load? No! */ if (ia32_cap & ARCH_CAP_RDCL_NO) return; setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); if (cpu_matches(cpu_vuln_whitelist, NO_L1TF)) return; setup_force_cpu_bug(X86_BUG_L1TF); } /* * The NOPL instruction is supposed to exist on all CPUs of family >= 6; * unfortunately, that's not true in practice because of early VIA * chips and (more importantly) broken virtualizers that are not easy * to detect. In the latter case it doesn't even *fail* reliably, so * probing for it doesn't even work. Disable it completely on 32-bit * unless we can find a reliable way to detect all the broken cases. * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). */ static void detect_nopl(void) { #ifdef CONFIG_X86_32 setup_clear_cpu_cap(X86_FEATURE_NOPL); #else setup_force_cpu_cap(X86_FEATURE_NOPL); #endif } /* * We parse cpu parameters early because fpu__init_system() is executed * before parse_early_param(). */ static void __init cpu_parse_early_param(void) { char arg[128]; char *argptr = arg, *opt; int arglen, taint = 0; #ifdef CONFIG_X86_32 if (cmdline_find_option_bool(boot_command_line, "no387")) #ifdef CONFIG_MATH_EMULATION setup_clear_cpu_cap(X86_FEATURE_FPU); #else pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n"); #endif if (cmdline_find_option_bool(boot_command_line, "nofxsr")) setup_clear_cpu_cap(X86_FEATURE_FXSR); #endif if (cmdline_find_option_bool(boot_command_line, "noxsave")) setup_clear_cpu_cap(X86_FEATURE_XSAVE); if (cmdline_find_option_bool(boot_command_line, "noxsaveopt")) setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); if (cmdline_find_option_bool(boot_command_line, "noxsaves")) setup_clear_cpu_cap(X86_FEATURE_XSAVES); if (cmdline_find_option_bool(boot_command_line, "nousershstk")) setup_clear_cpu_cap(X86_FEATURE_USER_SHSTK); arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg)); if (arglen <= 0) return; pr_info("Clearing CPUID bits:"); while (argptr) { bool found __maybe_unused = false; unsigned int bit; opt = strsep(&argptr, ","); /* * Handle naked numbers first for feature flags which don't * have names. */ if (!kstrtouint(opt, 10, &bit)) { if (bit < NCAPINTS * 32) { /* empty-string, i.e., ""-defined feature flags */ if (!x86_cap_flags[bit]) pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit)); else pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit)); setup_clear_cpu_cap(bit); taint++; } /* * The assumption is that there are no feature names with only * numbers in the name thus go to the next argument. */ continue; } for (bit = 0; bit < 32 * NCAPINTS; bit++) { if (!x86_cap_flag(bit)) continue; if (strcmp(x86_cap_flag(bit), opt)) continue; pr_cont(" %s", opt); setup_clear_cpu_cap(bit); taint++; found = true; break; } if (!found) pr_cont(" (unknown: %s)", opt); } pr_cont("\n"); if (taint) add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); } /* * Do minimum CPU detection early. * Fields really needed: vendor, cpuid_level, family, model, mask, * cache alignment. * The others are not touched to avoid unwanted side effects. * * WARNING: this function is only called on the boot CPU. Don't add code * here that is supposed to run on all CPUs. */ static void __init early_identify_cpu(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_64 c->x86_clflush_size = 64; c->x86_phys_bits = 36; c->x86_virt_bits = 48; #else c->x86_clflush_size = 32; c->x86_phys_bits = 32; c->x86_virt_bits = 32; #endif c->x86_cache_alignment = c->x86_clflush_size; memset(&c->x86_capability, 0, sizeof(c->x86_capability)); c->extended_cpuid_level = 0; if (!have_cpuid_p()) identify_cpu_without_cpuid(c); /* cyrix could have cpuid enabled via c_identify()*/ if (have_cpuid_p()) { cpu_detect(c); get_cpu_vendor(c); get_cpu_cap(c); get_cpu_address_sizes(c); setup_force_cpu_cap(X86_FEATURE_CPUID); cpu_parse_early_param(); if (this_cpu->c_early_init) this_cpu->c_early_init(c); c->cpu_index = 0; filter_cpuid_features(c, false); if (this_cpu->c_bsp_init) this_cpu->c_bsp_init(c); } else { setup_clear_cpu_cap(X86_FEATURE_CPUID); } setup_force_cpu_cap(X86_FEATURE_ALWAYS); cpu_set_bug_bits(c); sld_setup(c); #ifdef CONFIG_X86_32 /* * Regardless of whether PCID is enumerated, the SDM says * that it can't be enabled in 32-bit mode. */ setup_clear_cpu_cap(X86_FEATURE_PCID); #endif /* * Later in the boot process pgtable_l5_enabled() relies on * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not * enabled by this point we need to clear the feature bit to avoid * false-positives at the later stage. * * pgtable_l5_enabled() can be false here for several reasons: * - 5-level paging is disabled compile-time; * - it's 32-bit kernel; * - machine doesn't support 5-level paging; * - user specified 'no5lvl' in kernel command line. */ if (!pgtable_l5_enabled()) setup_clear_cpu_cap(X86_FEATURE_LA57); detect_nopl(); } void __init early_cpu_init(void) { const struct cpu_dev *const *cdev; int count = 0; #ifdef CONFIG_PROCESSOR_SELECT pr_info("KERNEL supported cpus:\n"); #endif for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { const struct cpu_dev *cpudev = *cdev; if (count >= X86_VENDOR_NUM) break; cpu_devs[count] = cpudev; count++; #ifdef CONFIG_PROCESSOR_SELECT { unsigned int j; for (j = 0; j < 2; j++) { if (!cpudev->c_ident[j]) continue; pr_info(" %s %s\n", cpudev->c_vendor, cpudev->c_ident[j]); } } #endif } early_identify_cpu(&boot_cpu_data); } static bool detect_null_seg_behavior(void) { /* * Empirically, writing zero to a segment selector on AMD does * not clear the base, whereas writing zero to a segment * selector on Intel does clear the base. Intel's behavior * allows slightly faster context switches in the common case * where GS is unused by the prev and next threads. * * Since neither vendor documents this anywhere that I can see, * detect it directly instead of hard-coding the choice by * vendor. * * I've designated AMD's behavior as the "bug" because it's * counterintuitive and less friendly. */ unsigned long old_base, tmp; rdmsrl(MSR_FS_BASE, old_base); wrmsrl(MSR_FS_BASE, 1); loadsegment(fs, 0); rdmsrl(MSR_FS_BASE, tmp); wrmsrl(MSR_FS_BASE, old_base); return tmp == 0; } void check_null_seg_clears_base(struct cpuinfo_x86 *c) { /* BUG_NULL_SEG is only relevant with 64bit userspace */ if (!IS_ENABLED(CONFIG_X86_64)) return; if (cpu_has(c, X86_FEATURE_NULL_SEL_CLR_BASE)) return; /* * CPUID bit above wasn't set. If this kernel is still running * as a HV guest, then the HV has decided not to advertize * that CPUID bit for whatever reason. For example, one * member of the migration pool might be vulnerable. Which * means, the bug is present: set the BUG flag and return. */ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) { set_cpu_bug(c, X86_BUG_NULL_SEG); return; } /* * Zen2 CPUs also have this behaviour, but no CPUID bit. * 0x18 is the respective family for Hygon. */ if ((c->x86 == 0x17 || c->x86 == 0x18) && detect_null_seg_behavior()) return; /* All the remaining ones are affected */ set_cpu_bug(c, X86_BUG_NULL_SEG); } static void generic_identify(struct cpuinfo_x86 *c) { c->extended_cpuid_level = 0; if (!have_cpuid_p()) identify_cpu_without_cpuid(c); /* cyrix could have cpuid enabled via c_identify()*/ if (!have_cpuid_p()) return; cpu_detect(c); get_cpu_vendor(c); get_cpu_cap(c); get_cpu_address_sizes(c); if (c->cpuid_level >= 0x00000001) { c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; #ifdef CONFIG_X86_32 # ifdef CONFIG_SMP c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); # else c->apicid = c->initial_apicid; # endif #endif c->phys_proc_id = c->initial_apicid; } get_model_name(c); /* Default name */ /* * ESPFIX is a strange bug. All real CPUs have it. Paravirt * systems that run Linux at CPL > 0 may or may not have the * issue, but, even if they have the issue, there's absolutely * nothing we can do about it because we can't use the real IRET * instruction. * * NB: For the time being, only 32-bit kernels support * X86_BUG_ESPFIX as such. 64-bit kernels directly choose * whether to apply espfix using paravirt hooks. If any * non-paravirt system ever shows up that does *not* have the * ESPFIX issue, we can change this. */ #ifdef CONFIG_X86_32 set_cpu_bug(c, X86_BUG_ESPFIX); #endif } /* * Validate that ACPI/mptables have the same information about the * effective APIC id and update the package map. */ static void validate_apic_and_package_id(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP unsigned int apicid, cpu = smp_processor_id(); apicid = apic->cpu_present_to_apicid(cpu); if (apicid != c->apicid) { pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n", cpu, apicid, c->initial_apicid); } BUG_ON(topology_update_package_map(c->phys_proc_id, cpu)); BUG_ON(topology_update_die_map(c->cpu_die_id, cpu)); #else c->logical_proc_id = 0; #endif } /* * This does the hard work of actually picking apart the CPU stuff... */ static void identify_cpu(struct cpuinfo_x86 *c) { int i; c->loops_per_jiffy = loops_per_jiffy; c->x86_cache_size = 0; c->x86_vendor = X86_VENDOR_UNKNOWN; c->x86_model = c->x86_stepping = 0; /* So far unknown... */ c->x86_vendor_id[0] = '\0'; /* Unset */ c->x86_model_id[0] = '\0'; /* Unset */ c->x86_max_cores = 1; c->x86_coreid_bits = 0; c->cu_id = 0xff; #ifdef CONFIG_X86_64 c->x86_clflush_size = 64; c->x86_phys_bits = 36; c->x86_virt_bits = 48; #else c->cpuid_level = -1; /* CPUID not detected */ c->x86_clflush_size = 32; c->x86_phys_bits = 32; c->x86_virt_bits = 32; #endif c->x86_cache_alignment = c->x86_clflush_size; memset(&c->x86_capability, 0, sizeof(c->x86_capability)); #ifdef CONFIG_X86_VMX_FEATURE_NAMES memset(&c->vmx_capability, 0, sizeof(c->vmx_capability)); #endif generic_identify(c); if (this_cpu->c_identify) this_cpu->c_identify(c); /* Clear/Set all flags overridden by options, after probe */ apply_forced_caps(c); #ifdef CONFIG_X86_64 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); #endif /* * Vendor-specific initialization. In this section we * canonicalize the feature flags, meaning if there are * features a certain CPU supports which CPUID doesn't * tell us, CPUID claiming incorrect flags, or other bugs, * we handle them here. * * At the end of this section, c->x86_capability better * indicate the features this CPU genuinely supports! */ if (this_cpu->c_init) this_cpu->c_init(c); /* Disable the PN if appropriate */ squash_the_stupid_serial_number(c); /* Set up SMEP/SMAP/UMIP */ setup_smep(c); setup_smap(c); setup_umip(c); /* Enable FSGSBASE instructions if available. */ if (cpu_has(c, X86_FEATURE_FSGSBASE)) { cr4_set_bits(X86_CR4_FSGSBASE); elf_hwcap2 |= HWCAP2_FSGSBASE; } /* * The vendor-specific functions might have changed features. * Now we do "generic changes." */ /* Filter out anything that depends on CPUID levels we don't have */ filter_cpuid_features(c, true); /* If the model name is still unset, do table lookup. */ if (!c->x86_model_id[0]) { const char *p; p = table_lookup_model(c); if (p) strcpy(c->x86_model_id, p); else /* Last resort... */ sprintf(c->x86_model_id, "%02x/%02x", c->x86, c->x86_model); } #ifdef CONFIG_X86_64 detect_ht(c); #endif x86_init_rdrand(c); setup_pku(c); setup_cet(c); /* * Clear/Set all flags overridden by options, need do it * before following smp all cpus cap AND. */ apply_forced_caps(c); /* * On SMP, boot_cpu_data holds the common feature set between * all CPUs; so make sure that we indicate which features are * common between the CPUs. The first time this routine gets * executed, c == &boot_cpu_data. */ if (c != &boot_cpu_data) { /* AND the already accumulated flags with these */ for (i = 0; i < NCAPINTS; i++) boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; /* OR, i.e. replicate the bug flags */ for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; } ppin_init(c); /* Init Machine Check Exception if available. */ mcheck_cpu_init(c); select_idle_routine(c); #ifdef CONFIG_NUMA numa_add_cpu(smp_processor_id()); #endif } /* * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions * on 32-bit kernels: */ #ifdef CONFIG_X86_32 void enable_sep_cpu(void) { struct tss_struct *tss; int cpu; if (!boot_cpu_has(X86_FEATURE_SEP)) return; cpu = get_cpu(); tss = &per_cpu(cpu_tss_rw, cpu); /* * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- * see the big comment in struct x86_hw_tss's definition. */ tss->x86_tss.ss1 = __KERNEL_CS; wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); put_cpu(); } #endif static __init void identify_boot_cpu(void) { identify_cpu(&boot_cpu_data); if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT)) pr_info("CET detected: Indirect Branch Tracking enabled\n"); #ifdef CONFIG_X86_32 enable_sep_cpu(); #endif cpu_detect_tlb(&boot_cpu_data); setup_cr_pinning(); tsx_init(); lkgs_init(); } void identify_secondary_cpu(struct cpuinfo_x86 *c) { BUG_ON(c == &boot_cpu_data); identify_cpu(c); #ifdef CONFIG_X86_32 enable_sep_cpu(); #endif validate_apic_and_package_id(c); x86_spec_ctrl_setup_ap(); update_srbds_msr(); if (boot_cpu_has_bug(X86_BUG_GDS)) update_gds_msr(); tsx_ap_init(); } void print_cpu_info(struct cpuinfo_x86 *c) { const char *vendor = NULL; if (c->x86_vendor < X86_VENDOR_NUM) { vendor = this_cpu->c_vendor; } else { if (c->cpuid_level >= 0) vendor = c->x86_vendor_id; } if (vendor && !strstr(c->x86_model_id, vendor)) pr_cont("%s ", vendor); if (c->x86_model_id[0]) pr_cont("%s", c->x86_model_id); else pr_cont("%d86", c->x86); pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); if (c->x86_stepping || c->cpuid_level >= 0) pr_cont(", stepping: 0x%x)\n", c->x86_stepping); else pr_cont(")\n"); } /* * clearcpuid= was already parsed in cpu_parse_early_param(). This dummy * function prevents it from becoming an environment variable for init. */ static __init int setup_clearcpuid(char *arg) { return 1; } __setup("clearcpuid=", setup_clearcpuid); DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = { .current_task = &init_task, .preempt_count = INIT_PREEMPT_COUNT, .top_of_stack = TOP_OF_INIT_STACK, }; EXPORT_PER_CPU_SYMBOL(pcpu_hot); #ifdef CONFIG_X86_64 DEFINE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __aligned(PAGE_SIZE) __visible; EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data); static void wrmsrl_cstar(unsigned long val) { /* * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR * is so far ignored by the CPU, but raises a #VE trap in a TDX * guest. Avoid the pointless write on all Intel CPUs. */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) wrmsrl(MSR_CSTAR, val); } /* May not be marked __init: used by software suspend */ void syscall_init(void) { wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); #ifdef CONFIG_IA32_EMULATION wrmsrl_cstar((unsigned long)entry_SYSCALL_compat); /* * This only works on Intel CPUs. * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. * This does not cause SYSENTER to jump to the wrong location, because * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). */ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); #else wrmsrl_cstar((unsigned long)ignore_sysret); wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); #endif /* * Flags to clear on syscall; clear as much as possible * to minimize user space-kernel interference. */ wrmsrl(MSR_SYSCALL_MASK, X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF| X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF| X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF| X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF| X86_EFLAGS_AC|X86_EFLAGS_ID); } #else /* CONFIG_X86_64 */ #ifdef CONFIG_STACKPROTECTOR DEFINE_PER_CPU(unsigned long, __stack_chk_guard); EXPORT_PER_CPU_SYMBOL(__stack_chk_guard); #endif #endif /* CONFIG_X86_64 */ /* * Clear all 6 debug registers: */ static void clear_all_debug_regs(void) { int i; for (i = 0; i < 8; i++) { /* Ignore db4, db5 */ if ((i == 4) || (i == 5)) continue; set_debugreg(0, i); } } #ifdef CONFIG_KGDB /* * Restore debug regs if using kgdbwait and you have a kernel debugger * connection established. */ static void dbg_restore_debug_regs(void) { if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) arch_kgdb_ops.correct_hw_break(); } #else /* ! CONFIG_KGDB */ #define dbg_restore_debug_regs() #endif /* ! CONFIG_KGDB */ static inline void setup_getcpu(int cpu) { unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); struct desc_struct d = { }; if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID)) wrmsr(MSR_TSC_AUX, cpudata, 0); /* Store CPU and node number in limit. */ d.limit0 = cpudata; d.limit1 = cpudata >> 16; d.type = 5; /* RO data, expand down, accessed */ d.dpl = 3; /* Visible to user code */ d.s = 1; /* Not a system segment */ d.p = 1; /* Present */ d.d = 1; /* 32-bit */ write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S); } #ifdef CONFIG_X86_64 static inline void ucode_cpu_init(int cpu) { } static inline void tss_setup_ist(struct tss_struct *tss) { /* Set up the per-CPU TSS IST stacks */ tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF); tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI); tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB); tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE); /* Only mapped when SEV-ES is active */ tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC); } #else /* CONFIG_X86_64 */ static inline void ucode_cpu_init(int cpu) { show_ucode_info_early(); } static inline void tss_setup_ist(struct tss_struct *tss) { } #endif /* !CONFIG_X86_64 */ static inline void tss_setup_io_bitmap(struct tss_struct *tss) { tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID; #ifdef CONFIG_X86_IOPL_IOPERM tss->io_bitmap.prev_max = 0; tss->io_bitmap.prev_sequence = 0; memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap)); /* * Invalidate the extra array entry past the end of the all * permission bitmap as required by the hardware. */ tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL; #endif } /* * Setup everything needed to handle exceptions from the IDT, including the IST * exceptions which use paranoid_entry(). */ void cpu_init_exception_handling(void) { struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); int cpu = raw_smp_processor_id(); /* paranoid_entry() gets the CPU number from the GDT */ setup_getcpu(cpu); /* IST vectors need TSS to be set up. */ tss_setup_ist(tss); tss_setup_io_bitmap(tss); set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); load_TR_desc(); /* GHCB needs to be setup to handle #VC. */ setup_ghcb(); /* Finally load the IDT */ load_current_idt(); } /* * cpu_init() initializes state that is per-CPU. Some data is already * initialized (naturally) in the bootstrap process, such as the GDT. We * reload it nevertheless, this function acts as a 'CPU state barrier', * nothing should get across. */ void cpu_init(void) { struct task_struct *cur = current; int cpu = raw_smp_processor_id(); ucode_cpu_init(cpu); #ifdef CONFIG_NUMA if (this_cpu_read(numa_node) == 0 && early_cpu_to_node(cpu) != NUMA_NO_NODE) set_numa_node(early_cpu_to_node(cpu)); #endif pr_debug("Initializing CPU#%d\n", cpu); if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) || boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE)) cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); if (IS_ENABLED(CONFIG_X86_64)) { loadsegment(fs, 0); memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); syscall_init(); wrmsrl(MSR_FS_BASE, 0); wrmsrl(MSR_KERNEL_GS_BASE, 0); barrier(); x2apic_setup(); } mmgrab(&init_mm); cur->active_mm = &init_mm; BUG_ON(cur->mm); initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, cur); /* * sp0 points to the entry trampoline stack regardless of what task * is running. */ load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); load_mm_ldt(&init_mm); clear_all_debug_regs(); dbg_restore_debug_regs(); doublefault_init_cpu_tss(); if (is_uv_system()) uv_cpu_init(); load_fixmap_gdt(cpu); } #ifdef CONFIG_MICROCODE_LATE_LOADING /** * store_cpu_caps() - Store a snapshot of CPU capabilities * @curr_info: Pointer where to store it * * Returns: None */ void store_cpu_caps(struct cpuinfo_x86 *curr_info) { /* Reload CPUID max function as it might've changed. */ curr_info->cpuid_level = cpuid_eax(0); /* Copy all capability leafs and pick up the synthetic ones. */ memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability, sizeof(curr_info->x86_capability)); /* Get the hardware CPUID leafs */ get_cpu_cap(curr_info); } /** * microcode_check() - Check if any CPU capabilities changed after an update. * @prev_info: CPU capabilities stored before an update. * * The microcode loader calls this upon late microcode load to recheck features, * only when microcode has been updated. Caller holds and CPU hotplug lock. * * Return: None */ void microcode_check(struct cpuinfo_x86 *prev_info) { struct cpuinfo_x86 curr_info; perf_check_microcode(); amd_check_microcode(); store_cpu_caps(&curr_info); if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, sizeof(prev_info->x86_capability))) return; pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); } #endif /* * Invoked from core CPU hotplug code after hotplug operations */ void arch_smt_update(void) { /* Handle the speculative execution misfeatures */ cpu_bugs_smt_update(); /* Check whether IPI broadcasting can be enabled */ apic_smt_update(); } void __init arch_cpu_finalize_init(void) { identify_boot_cpu(); /* * identify_boot_cpu() initialized SMT support information, let the * core code know. */ cpu_smt_set_num_threads(smp_num_siblings, smp_num_siblings); if (!IS_ENABLED(CONFIG_SMP)) { pr_info("CPU: "); print_cpu_info(&boot_cpu_data); } cpu_select_mitigations(); arch_smt_update(); if (IS_ENABLED(CONFIG_X86_32)) { /* * Check whether this is a real i386 which is not longer * supported and fixup the utsname. */ if (boot_cpu_data.x86 < 4) panic("Kernel requires i486+ for 'invlpg' and other features"); init_utsname()->machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); } /* * Must be before alternatives because it might set or clear * feature bits. */ fpu__init_system(); fpu__init_cpu(); alternative_instructions(); if (IS_ENABLED(CONFIG_X86_64)) { /* * Make sure the first 2MB area is not mapped by huge pages * There are typically fixed size MTRRs in there and overlapping * MTRRs into large pages causes slow downs. * * Right now we don't do that with gbpages because there seems * very little benefit for that case. */ if (!direct_gbpages) set_memory_4k((unsigned long)__va(0), 1); } else { fpu__init_check_bugs(); } /* * This needs to be called before any devices perform DMA * operations that might use the SWIOTLB bounce buffers. It will * mark the bounce buffers as decrypted so that their usage will * not cause "plain-text" data to be decrypted when accessed. It * must be called after late_time_init() so that Hyper-V x86/x64 * hypercalls work when the SWIOTLB bounce buffers are decrypted. */ mem_encrypt_init(); }
linux-master
arch/x86/kernel/cpu/common.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <asm/processor.h> #include "cpu.h" /* * UMC chips appear to be only either 386 or 486, * so no special init takes place. */ static const struct cpu_dev umc_cpu_dev = { .c_vendor = "UMC", .c_ident = { "UMC UMC UMC" }, .legacy_models = { { .family = 4, .model_names = { [1] = "U5D", [2] = "U5S", } }, }, .c_x86_vendor = X86_VENDOR_UMC, }; cpu_dev_register(umc_cpu_dev);
linux-master
arch/x86/kernel/cpu/umc.c
// SPDX-License-Identifier: GPL-2.0-only /* * x86 APERF/MPERF KHz calculation for * /sys/.../cpufreq/scaling_cur_freq * * Copyright (C) 2017 Intel Corp. * Author: Len Brown <[email protected]> */ #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/ktime.h> #include <linux/math64.h> #include <linux/percpu.h> #include <linux/rcupdate.h> #include <linux/sched/isolation.h> #include <linux/sched/topology.h> #include <linux/smp.h> #include <linux/syscore_ops.h> #include <asm/cpu.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include "cpu.h" struct aperfmperf { seqcount_t seq; unsigned long last_update; u64 acnt; u64 mcnt; u64 aperf; u64 mperf; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct aperfmperf, cpu_samples) = { .seq = SEQCNT_ZERO(cpu_samples.seq) }; static void init_counter_refs(void) { u64 aperf, mperf; rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); this_cpu_write(cpu_samples.aperf, aperf); this_cpu_write(cpu_samples.mperf, mperf); } #if defined(CONFIG_X86_64) && defined(CONFIG_SMP) /* * APERF/MPERF frequency ratio computation. * * The scheduler wants to do frequency invariant accounting and needs a <1 * ratio to account for the 'current' frequency, corresponding to * freq_curr / freq_max. * * Since the frequency freq_curr on x86 is controlled by micro-controller and * our P-state setting is little more than a request/hint, we need to observe * the effective frequency 'BusyMHz', i.e. the average frequency over a time * interval after discarding idle time. This is given by: * * BusyMHz = delta_APERF / delta_MPERF * freq_base * * where freq_base is the max non-turbo P-state. * * The freq_max term has to be set to a somewhat arbitrary value, because we * can't know which turbo states will be available at a given point in time: * it all depends on the thermal headroom of the entire package. We set it to * the turbo level with 4 cores active. * * Benchmarks show that's a good compromise between the 1C turbo ratio * (freq_curr/freq_max would rarely reach 1) and something close to freq_base, * which would ignore the entire turbo range (a conspicuous part, making * freq_curr/freq_max always maxed out). * * An exception to the heuristic above is the Atom uarch, where we choose the * highest turbo level for freq_max since Atom's are generally oriented towards * power efficiency. * * Setting freq_max to anything less than the 1C turbo ratio makes the ratio * freq_curr / freq_max to eventually grow >1, in which case we clip it to 1. */ DEFINE_STATIC_KEY_FALSE(arch_scale_freq_key); static u64 arch_turbo_freq_ratio = SCHED_CAPACITY_SCALE; static u64 arch_max_freq_ratio = SCHED_CAPACITY_SCALE; void arch_set_max_freq_ratio(bool turbo_disabled) { arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE : arch_turbo_freq_ratio; } EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio); static bool __init turbo_disabled(void) { u64 misc_en; int err; err = rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_en); if (err) return false; return (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); } static bool __init slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq) { int err; err = rdmsrl_safe(MSR_ATOM_CORE_RATIOS, base_freq); if (err) return false; err = rdmsrl_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq); if (err) return false; *base_freq = (*base_freq >> 16) & 0x3F; /* max P state */ *turbo_freq = *turbo_freq & 0x3F; /* 1C turbo */ return true; } #define X86_MATCH(model) \ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \ INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL) static const struct x86_cpu_id has_knl_turbo_ratio_limits[] __initconst = { X86_MATCH(XEON_PHI_KNL), X86_MATCH(XEON_PHI_KNM), {} }; static const struct x86_cpu_id has_skx_turbo_ratio_limits[] __initconst = { X86_MATCH(SKYLAKE_X), {} }; static const struct x86_cpu_id has_glm_turbo_ratio_limits[] __initconst = { X86_MATCH(ATOM_GOLDMONT), X86_MATCH(ATOM_GOLDMONT_D), X86_MATCH(ATOM_GOLDMONT_PLUS), {} }; static bool __init knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int num_delta_fratio) { int fratio, delta_fratio, found; int err, i; u64 msr; err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); if (err) return false; *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */ err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr); if (err) return false; fratio = (msr >> 8) & 0xFF; i = 16; found = 0; do { if (found >= num_delta_fratio) { *turbo_freq = fratio; return true; } delta_fratio = (msr >> (i + 5)) & 0x7; if (delta_fratio) { found += 1; fratio -= delta_fratio; } i += 8; } while (i < 64); return true; } static bool __init skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int size) { u64 ratios, counts; u32 group_size; int err, i; err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); if (err) return false; *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */ err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &ratios); if (err) return false; err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT1, &counts); if (err) return false; for (i = 0; i < 64; i += 8) { group_size = (counts >> i) & 0xFF; if (group_size >= size) { *turbo_freq = (ratios >> i) & 0xFF; return true; } } return false; } static bool __init core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq) { u64 msr; int err; err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); if (err) return false; err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr); if (err) return false; *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */ *turbo_freq = (msr >> 24) & 0xFF; /* 4C turbo */ /* The CPU may have less than 4 cores */ if (!*turbo_freq) *turbo_freq = msr & 0xFF; /* 1C turbo */ return true; } static bool __init intel_set_max_freq_ratio(void) { u64 base_freq, turbo_freq; u64 turbo_ratio; if (slv_set_max_freq_ratio(&base_freq, &turbo_freq)) goto out; if (x86_match_cpu(has_glm_turbo_ratio_limits) && skx_set_max_freq_ratio(&base_freq, &turbo_freq, 1)) goto out; if (x86_match_cpu(has_knl_turbo_ratio_limits) && knl_set_max_freq_ratio(&base_freq, &turbo_freq, 1)) goto out; if (x86_match_cpu(has_skx_turbo_ratio_limits) && skx_set_max_freq_ratio(&base_freq, &turbo_freq, 4)) goto out; if (core_set_max_freq_ratio(&base_freq, &turbo_freq)) goto out; return false; out: /* * Some hypervisors advertise X86_FEATURE_APERFMPERF * but then fill all MSR's with zeroes. * Some CPUs have turbo boost but don't declare any turbo ratio * in MSR_TURBO_RATIO_LIMIT. */ if (!base_freq || !turbo_freq) { pr_debug("Couldn't determine cpu base or turbo frequency, necessary for scale-invariant accounting.\n"); return false; } turbo_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE, base_freq); if (!turbo_ratio) { pr_debug("Non-zero turbo and base frequencies led to a 0 ratio.\n"); return false; } arch_turbo_freq_ratio = turbo_ratio; arch_set_max_freq_ratio(turbo_disabled()); return true; } #ifdef CONFIG_PM_SLEEP static struct syscore_ops freq_invariance_syscore_ops = { .resume = init_counter_refs, }; static void register_freq_invariance_syscore_ops(void) { register_syscore_ops(&freq_invariance_syscore_ops); } #else static inline void register_freq_invariance_syscore_ops(void) {} #endif static void freq_invariance_enable(void) { if (static_branch_unlikely(&arch_scale_freq_key)) { WARN_ON_ONCE(1); return; } static_branch_enable(&arch_scale_freq_key); register_freq_invariance_syscore_ops(); pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio); } void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled) { arch_turbo_freq_ratio = ratio; arch_set_max_freq_ratio(turbo_disabled); freq_invariance_enable(); } static void __init bp_init_freq_invariance(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return; if (intel_set_max_freq_ratio()) freq_invariance_enable(); } static void disable_freq_invariance_workfn(struct work_struct *work) { int cpu; static_branch_disable(&arch_scale_freq_key); /* * Set arch_freq_scale to a default value on all cpus * This negates the effect of scaling */ for_each_possible_cpu(cpu) per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE; } static DECLARE_WORK(disable_freq_invariance_work, disable_freq_invariance_workfn); DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE; static void scale_freq_tick(u64 acnt, u64 mcnt) { u64 freq_scale; if (!arch_scale_freq_invariant()) return; if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt)) goto error; if (check_mul_overflow(mcnt, arch_max_freq_ratio, &mcnt) || !mcnt) goto error; freq_scale = div64_u64(acnt, mcnt); if (!freq_scale) goto error; if (freq_scale > SCHED_CAPACITY_SCALE) freq_scale = SCHED_CAPACITY_SCALE; this_cpu_write(arch_freq_scale, freq_scale); return; error: pr_warn("Scheduler frequency invariance went wobbly, disabling!\n"); schedule_work(&disable_freq_invariance_work); } #else static inline void bp_init_freq_invariance(void) { } static inline void scale_freq_tick(u64 acnt, u64 mcnt) { } #endif /* CONFIG_X86_64 && CONFIG_SMP */ void arch_scale_freq_tick(void) { struct aperfmperf *s = this_cpu_ptr(&cpu_samples); u64 acnt, mcnt, aperf, mperf; if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF)) return; rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); acnt = aperf - s->aperf; mcnt = mperf - s->mperf; s->aperf = aperf; s->mperf = mperf; raw_write_seqcount_begin(&s->seq); s->last_update = jiffies; s->acnt = acnt; s->mcnt = mcnt; raw_write_seqcount_end(&s->seq); scale_freq_tick(acnt, mcnt); } /* * Discard samples older than the define maximum sample age of 20ms. There * is no point in sending IPIs in such a case. If the scheduler tick was * not running then the CPU is either idle or isolated. */ #define MAX_SAMPLE_AGE ((unsigned long)HZ / 50) unsigned int arch_freq_get_on_cpu(int cpu) { struct aperfmperf *s = per_cpu_ptr(&cpu_samples, cpu); unsigned int seq, freq; unsigned long last; u64 acnt, mcnt; if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF)) goto fallback; do { seq = raw_read_seqcount_begin(&s->seq); last = s->last_update; acnt = s->acnt; mcnt = s->mcnt; } while (read_seqcount_retry(&s->seq, seq)); /* * Bail on invalid count and when the last update was too long ago, * which covers idle and NOHZ full CPUs. */ if (!mcnt || (jiffies - last) > MAX_SAMPLE_AGE) goto fallback; return div64_u64((cpu_khz * acnt), mcnt); fallback: freq = cpufreq_quick_get(cpu); return freq ? freq : cpu_khz; } static int __init bp_init_aperfmperf(void) { if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF)) return 0; init_counter_refs(); bp_init_freq_invariance(); return 0; } early_initcall(bp_init_aperfmperf); void ap_init_aperfmperf(void) { if (cpu_feature_enabled(X86_FEATURE_APERFMPERF)) init_counter_refs(); }
linux-master
arch/x86/kernel/cpu/aperfmperf.c
// SPDX-License-Identifier: GPL-2.0 /* * local apic based NMI watchdog for various CPUs. * * This file also handles reservation of performance counters for coordination * with other users. * * Note that these events normally don't tick when the CPU idles. This means * the frequency varies with CPU load. * * Original code for K7/P6 written by Keith Owens * */ #include <linux/percpu.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/smp.h> #include <asm/nmi.h> #include <linux/kprobes.h> #include <asm/apic.h> #include <asm/perf_event.h> /* * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's * offset from MSR_P4_BSU_ESCR0. * * It will be the max for all platforms (for now) */ #define NMI_MAX_COUNTER_BITS 66 /* * perfctr_nmi_owner tracks the ownership of the perfctr registers: * evtsel_nmi_owner tracks the ownership of the event selection * - different performance counters/ event selection may be reserved for * different subsystems this reservation system just tries to coordinate * things a little */ static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS); static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS); /* converts an msr to an appropriate reservation bit */ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) { /* returns the bit offset of the performance counter register */ switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_HYGON: case X86_VENDOR_AMD: if (msr >= MSR_F15H_PERF_CTR) return (msr - MSR_F15H_PERF_CTR) >> 1; return msr - MSR_K7_PERFCTR0; case X86_VENDOR_INTEL: if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) return msr - MSR_ARCH_PERFMON_PERFCTR0; switch (boot_cpu_data.x86) { case 6: return msr - MSR_P6_PERFCTR0; case 11: return msr - MSR_KNC_PERFCTR0; case 15: return msr - MSR_P4_BPU_PERFCTR0; } break; case X86_VENDOR_ZHAOXIN: case X86_VENDOR_CENTAUR: return msr - MSR_ARCH_PERFMON_PERFCTR0; } return 0; } /* * converts an msr to an appropriate reservation bit * returns the bit offset of the event selection register */ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) { /* returns the bit offset of the event selection register */ switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_HYGON: case X86_VENDOR_AMD: if (msr >= MSR_F15H_PERF_CTL) return (msr - MSR_F15H_PERF_CTL) >> 1; return msr - MSR_K7_EVNTSEL0; case X86_VENDOR_INTEL: if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) return msr - MSR_ARCH_PERFMON_EVENTSEL0; switch (boot_cpu_data.x86) { case 6: return msr - MSR_P6_EVNTSEL0; case 11: return msr - MSR_KNC_EVNTSEL0; case 15: return msr - MSR_P4_BSU_ESCR0; } break; case X86_VENDOR_ZHAOXIN: case X86_VENDOR_CENTAUR: return msr - MSR_ARCH_PERFMON_EVENTSEL0; } return 0; } int reserve_perfctr_nmi(unsigned int msr) { unsigned int counter; counter = nmi_perfctr_msr_to_bit(msr); /* register not managed by the allocator? */ if (counter > NMI_MAX_COUNTER_BITS) return 1; if (!test_and_set_bit(counter, perfctr_nmi_owner)) return 1; return 0; } EXPORT_SYMBOL(reserve_perfctr_nmi); void release_perfctr_nmi(unsigned int msr) { unsigned int counter; counter = nmi_perfctr_msr_to_bit(msr); /* register not managed by the allocator? */ if (counter > NMI_MAX_COUNTER_BITS) return; clear_bit(counter, perfctr_nmi_owner); } EXPORT_SYMBOL(release_perfctr_nmi); int reserve_evntsel_nmi(unsigned int msr) { unsigned int counter; counter = nmi_evntsel_msr_to_bit(msr); /* register not managed by the allocator? */ if (counter > NMI_MAX_COUNTER_BITS) return 1; if (!test_and_set_bit(counter, evntsel_nmi_owner)) return 1; return 0; } EXPORT_SYMBOL(reserve_evntsel_nmi); void release_evntsel_nmi(unsigned int msr) { unsigned int counter; counter = nmi_evntsel_msr_to_bit(msr); /* register not managed by the allocator? */ if (counter > NMI_MAX_COUNTER_BITS) return; clear_bit(counter, evntsel_nmi_owner); } EXPORT_SYMBOL(release_evntsel_nmi);
linux-master
arch/x86/kernel/cpu/perfctr-watchdog.c
// SPDX-License-Identifier: GPL-2.0 /* * Check for extended topology enumeration cpuid leaf 0xb and if it * exists, use it for populating initial_apicid and cpu topology * detection. */ #include <linux/cpu.h> #include <asm/apic.h> #include <asm/memtype.h> #include <asm/processor.h> #include "cpu.h" /* leaf 0xb SMT level */ #define SMT_LEVEL 0 /* extended topology sub-leaf types */ #define INVALID_TYPE 0 #define SMT_TYPE 1 #define CORE_TYPE 2 #define DIE_TYPE 5 #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff) unsigned int __max_die_per_package __read_mostly = 1; EXPORT_SYMBOL(__max_die_per_package); #ifdef CONFIG_SMP /* * Check if given CPUID extended topology "leaf" is implemented */ static int check_extended_topology_leaf(int leaf) { unsigned int eax, ebx, ecx, edx; cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx); if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) return -1; return 0; } /* * Return best CPUID Extended Topology Leaf supported */ static int detect_extended_topology_leaf(struct cpuinfo_x86 *c) { if (c->cpuid_level >= 0x1f) { if (check_extended_topology_leaf(0x1f) == 0) return 0x1f; } if (c->cpuid_level >= 0xb) { if (check_extended_topology_leaf(0xb) == 0) return 0xb; } return -1; } #endif int detect_extended_topology_early(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP unsigned int eax, ebx, ecx, edx; int leaf; leaf = detect_extended_topology_leaf(c); if (leaf < 0) return -1; set_cpu_cap(c, X86_FEATURE_XTOPOLOGY); cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx); /* * initial apic id, which also represents 32-bit extended x2apic id. */ c->initial_apicid = edx; smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx)); #endif return 0; } /* * Check for extended topology enumeration cpuid leaf, and if it * exists, use it for populating initial_apicid and cpu topology * detection. */ int detect_extended_topology(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP unsigned int eax, ebx, ecx, edx, sub_index; unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width; unsigned int core_select_mask, core_level_siblings; unsigned int die_select_mask, die_level_siblings; unsigned int pkg_mask_width; bool die_level_present = false; int leaf; leaf = detect_extended_topology_leaf(c); if (leaf < 0) return -1; /* * Populate HT related information from sub-leaf level 0. */ cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx); c->initial_apicid = edx; core_level_siblings = LEVEL_MAX_SIBLINGS(ebx); smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx)); core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); sub_index = 1; while (true) { cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx); /* * Check for the Core type in the implemented sub leaves. */ if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) { core_level_siblings = LEVEL_MAX_SIBLINGS(ebx); core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); die_level_siblings = core_level_siblings; die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); } if (LEAFB_SUBTYPE(ecx) == DIE_TYPE) { die_level_present = true; die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); } if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE) pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); else break; sub_index++; } core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width; die_select_mask = (~(-1 << die_plus_mask_width)) >> core_plus_mask_width; c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width) & core_select_mask; if (die_level_present) { c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width) & die_select_mask; } c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, pkg_mask_width); /* * Reinit the apicid, now that we have extended initial_apicid. */ c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); c->x86_max_cores = (core_level_siblings / smp_num_siblings); __max_die_per_package = (die_level_siblings / core_level_siblings); #endif return 0; }
linux-master
arch/x86/kernel/cpu/topology.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <asm/processor.h> #include "cpu.h" /* * No special init required for Vortex processors. */ static const struct cpu_dev vortex_cpu_dev = { .c_vendor = "Vortex", .c_ident = { "Vortex86 SoC" }, .legacy_models = { { .family = 5, .model_names = { [2] = "Vortex86DX", [8] = "Vortex86MX", }, }, { .family = 6, .model_names = { /* * Both the Vortex86EX and the Vortex86EX2 * have the same family and model id. * * However, the -EX2 supports the product name * CPUID call, so this name will only be used * for the -EX, which does not. */ [0] = "Vortex86EX", }, }, }, .c_x86_vendor = X86_VENDOR_VORTEX, }; cpu_dev_register(vortex_cpu_dev);
linux-master
arch/x86/kernel/cpu/vortex.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/syscore_ops.h> #include <linux/suspend.h> #include <linux/cpu.h> #include <asm/msr.h> #include <asm/mwait.h> #define UMWAIT_C02_ENABLE 0 #define UMWAIT_CTRL_VAL(max_time, c02_disable) \ (((max_time) & MSR_IA32_UMWAIT_CONTROL_TIME_MASK) | \ ((c02_disable) & MSR_IA32_UMWAIT_CONTROL_C02_DISABLE)) /* * Cache IA32_UMWAIT_CONTROL MSR. This is a systemwide control. By default, * umwait max time is 100000 in TSC-quanta and C0.2 is enabled */ static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE); /* * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by * hardware or BIOS before kernel boot. */ static u32 orig_umwait_control_cached __ro_after_init; /* * Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in * the sysfs write functions. */ static DEFINE_MUTEX(umwait_lock); static void umwait_update_control_msr(void * unused) { lockdep_assert_irqs_disabled(); wrmsr(MSR_IA32_UMWAIT_CONTROL, READ_ONCE(umwait_control_cached), 0); } /* * The CPU hotplug callback sets the control MSR to the global control * value. * * Disable interrupts so the read of umwait_control_cached and the WRMSR * are protected against a concurrent sysfs write. Otherwise the sysfs * write could update the cached value after it had been read on this CPU * and issue the IPI before the old value had been written. The IPI would * interrupt, write the new value and after return from IPI the previous * value would be written by this CPU. * * With interrupts disabled the upcoming CPU either sees the new control * value or the IPI is updating this CPU to the new control value after * interrupts have been reenabled. */ static int umwait_cpu_online(unsigned int cpu) { local_irq_disable(); umwait_update_control_msr(NULL); local_irq_enable(); return 0; } /* * The CPU hotplug callback sets the control MSR to the original control * value. */ static int umwait_cpu_offline(unsigned int cpu) { /* * This code is protected by the CPU hotplug already and * orig_umwait_control_cached is never changed after it caches * the original control MSR value in umwait_init(). So there * is no race condition here. */ wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0); return 0; } /* * On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which * is the only active CPU at this time. The MSR is set up on the APs via the * CPU hotplug callback. * * This function is invoked on resume from suspend and hibernation. On * resume from suspend the restore should be not required, but we neither * trust the firmware nor does it matter if the same value is written * again. */ static void umwait_syscore_resume(void) { umwait_update_control_msr(NULL); } static struct syscore_ops umwait_syscore_ops = { .resume = umwait_syscore_resume, }; /* sysfs interface */ /* * When bit 0 in IA32_UMWAIT_CONTROL MSR is 1, C0.2 is disabled. * Otherwise, C0.2 is enabled. */ static inline bool umwait_ctrl_c02_enabled(u32 ctrl) { return !(ctrl & MSR_IA32_UMWAIT_CONTROL_C02_DISABLE); } static inline u32 umwait_ctrl_max_time(u32 ctrl) { return ctrl & MSR_IA32_UMWAIT_CONTROL_TIME_MASK; } static inline void umwait_update_control(u32 maxtime, bool c02_enable) { u32 ctrl = maxtime & MSR_IA32_UMWAIT_CONTROL_TIME_MASK; if (!c02_enable) ctrl |= MSR_IA32_UMWAIT_CONTROL_C02_DISABLE; WRITE_ONCE(umwait_control_cached, ctrl); /* Propagate to all CPUs */ on_each_cpu(umwait_update_control_msr, NULL, 1); } static ssize_t enable_c02_show(struct device *dev, struct device_attribute *attr, char *buf) { u32 ctrl = READ_ONCE(umwait_control_cached); return sprintf(buf, "%d\n", umwait_ctrl_c02_enabled(ctrl)); } static ssize_t enable_c02_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { bool c02_enable; u32 ctrl; int ret; ret = kstrtobool(buf, &c02_enable); if (ret) return ret; mutex_lock(&umwait_lock); ctrl = READ_ONCE(umwait_control_cached); if (c02_enable != umwait_ctrl_c02_enabled(ctrl)) umwait_update_control(ctrl, c02_enable); mutex_unlock(&umwait_lock); return count; } static DEVICE_ATTR_RW(enable_c02); static ssize_t max_time_show(struct device *kobj, struct device_attribute *attr, char *buf) { u32 ctrl = READ_ONCE(umwait_control_cached); return sprintf(buf, "%u\n", umwait_ctrl_max_time(ctrl)); } static ssize_t max_time_store(struct device *kobj, struct device_attribute *attr, const char *buf, size_t count) { u32 max_time, ctrl; int ret; ret = kstrtou32(buf, 0, &max_time); if (ret) return ret; /* bits[1:0] must be zero */ if (max_time & ~MSR_IA32_UMWAIT_CONTROL_TIME_MASK) return -EINVAL; mutex_lock(&umwait_lock); ctrl = READ_ONCE(umwait_control_cached); if (max_time != umwait_ctrl_max_time(ctrl)) umwait_update_control(max_time, umwait_ctrl_c02_enabled(ctrl)); mutex_unlock(&umwait_lock); return count; } static DEVICE_ATTR_RW(max_time); static struct attribute *umwait_attrs[] = { &dev_attr_enable_c02.attr, &dev_attr_max_time.attr, NULL }; static struct attribute_group umwait_attr_group = { .attrs = umwait_attrs, .name = "umwait_control", }; static int __init umwait_init(void) { struct device *dev; int ret; if (!boot_cpu_has(X86_FEATURE_WAITPKG)) return -ENODEV; /* * Cache the original control MSR value before the control MSR is * changed. This is the only place where orig_umwait_control_cached * is modified. */ rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached); ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online", umwait_cpu_online, umwait_cpu_offline); if (ret < 0) { /* * On failure, the control MSR on all CPUs has the * original control value. */ return ret; } register_syscore_ops(&umwait_syscore_ops); /* * Add umwait control interface. Ignore failure, so at least the * default values are set up in case the machine manages to boot. */ dev = bus_get_dev_root(&cpu_subsys); if (dev) { ret = sysfs_create_group(&dev->kobj, &umwait_attr_group); put_device(dev); } return ret; } device_initcall(umwait_init);
linux-master
arch/x86/kernel/cpu/umwait.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/sched.h> #include <linux/sched/clock.h> #include <asm/cpu.h> #include <asm/cpufeature.h> #include "cpu.h" #define MSR_ZHAOXIN_FCR57 0x00001257 #define ACE_PRESENT (1 << 6) #define ACE_ENABLED (1 << 7) #define ACE_FCR (1 << 7) /* MSR_ZHAOXIN_FCR */ #define RNG_PRESENT (1 << 2) #define RNG_ENABLED (1 << 3) #define RNG_ENABLE (1 << 8) /* MSR_ZHAOXIN_RNG */ static void init_zhaoxin_cap(struct cpuinfo_x86 *c) { u32 lo, hi; /* Test for Extended Feature Flags presence */ if (cpuid_eax(0xC0000000) >= 0xC0000001) { u32 tmp = cpuid_edx(0xC0000001); /* Enable ACE unit, if present and disabled */ if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { rdmsr(MSR_ZHAOXIN_FCR57, lo, hi); /* Enable ACE unit */ lo |= ACE_FCR; wrmsr(MSR_ZHAOXIN_FCR57, lo, hi); pr_info("CPU: Enabled ACE h/w crypto\n"); } /* Enable RNG unit, if present and disabled */ if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { rdmsr(MSR_ZHAOXIN_FCR57, lo, hi); /* Enable RNG unit */ lo |= RNG_ENABLE; wrmsr(MSR_ZHAOXIN_FCR57, lo, hi); pr_info("CPU: Enabled h/w RNG\n"); } /* * Store Extended Feature Flags as word 5 of the CPU * capability bit array */ c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001); } if (c->x86 >= 0x6) set_cpu_cap(c, X86_FEATURE_REP_GOOD); } static void early_init_zhaoxin(struct cpuinfo_x86 *c) { if (c->x86 >= 0x6) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #endif if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } if (c->cpuid_level >= 0x00000001) { u32 eax, ebx, ecx, edx; cpuid(0x00000001, &eax, &ebx, &ecx, &edx); /* * If HTT (EDX[28]) is set EBX[16:23] contain the number of * apicids which are reserved per package. Store the resulting * shift value for the package management code. */ if (edx & (1U << 28)) c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); } } static void init_zhaoxin(struct cpuinfo_x86 *c) { early_init_zhaoxin(c); init_intel_cacheinfo(c); detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif if (c->cpuid_level > 9) { unsigned int eax = cpuid_eax(10); /* * Check for version and the number of counters * Version(eax[7:0]) can't be 0; * Counters(eax[15:8]) should be greater than 1; */ if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1)) set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); } if (c->x86 >= 0x6) init_zhaoxin_cap(c); #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); #endif init_ia32_feat_ctl(c); } #ifdef CONFIG_X86_32 static unsigned int zhaoxin_size_cache(struct cpuinfo_x86 *c, unsigned int size) { return size; } #endif static const struct cpu_dev zhaoxin_cpu_dev = { .c_vendor = "zhaoxin", .c_ident = { " Shanghai " }, .c_early_init = early_init_zhaoxin, .c_init = init_zhaoxin, #ifdef CONFIG_X86_32 .legacy_cache_size = zhaoxin_size_cache, #endif .c_x86_vendor = X86_VENDOR_ZHAOXIN, }; cpu_dev_register(zhaoxin_cpu_dev);
linux-master
arch/x86/kernel/cpu/zhaoxin.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/export.h> #include <linux/bitops.h> #include <linux/elf.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/random.h> #include <linux/topology.h> #include <asm/processor.h> #include <asm/apic.h> #include <asm/cacheinfo.h> #include <asm/cpu.h> #include <asm/spec-ctrl.h> #include <asm/smp.h> #include <asm/numa.h> #include <asm/pci-direct.h> #include <asm/delay.h> #include <asm/debugreg.h> #include <asm/resctrl.h> #ifdef CONFIG_X86_64 # include <asm/mmconfig.h> #endif #include "cpu.h" /* * nodes_per_socket: Stores the number of nodes per socket. * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX * Node Identifiers[10:8] */ static u32 nodes_per_socket = 1; /* * AMD errata checking * * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that * have an OSVW id assigned, which it takes as first argument. Both take a * variable number of family-specific model-stepping ranges created by * AMD_MODEL_RANGE(). * * Example: * * const int amd_erratum_319[] = * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); */ #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) static const int amd_erratum_400[] = AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); static const int amd_erratum_383[] = AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); /* #1054: Instructions Retired Performance Counter May Be Inaccurate */ static const int amd_erratum_1054[] = AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); static const int amd_zenbleed[] = AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf), AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); static const int amd_div0[] = AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { int osvw_id = *erratum++; u32 range; u32 ms; if (osvw_id >= 0 && osvw_id < 65536 && cpu_has(cpu, X86_FEATURE_OSVW)) { u64 osvw_len; rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); if (osvw_id < osvw_len) { u64 osvw_bits; rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), osvw_bits); return osvw_bits & (1ULL << (osvw_id & 0x3f)); } } /* OSVW unavailable or ID unknown, match family-model-stepping range */ ms = (cpu->x86_model << 4) | cpu->x86_stepping; while ((range = *erratum++)) if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && (ms >= AMD_MODEL_RANGE_START(range)) && (ms <= AMD_MODEL_RANGE_END(range))) return true; return false; } static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) { u32 gprs[8] = { 0 }; int err; WARN_ONCE((boot_cpu_data.x86 != 0xf), "%s should only be used on K8!\n", __func__); gprs[1] = msr; gprs[7] = 0x9c5a203a; err = rdmsr_safe_regs(gprs); *p = gprs[0] | ((u64)gprs[2] << 32); return err; } static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) { u32 gprs[8] = { 0 }; WARN_ONCE((boot_cpu_data.x86 != 0xf), "%s should only be used on K8!\n", __func__); gprs[0] = (u32)val; gprs[1] = msr; gprs[2] = val >> 32; gprs[7] = 0x9c5a203a; return wrmsr_safe_regs(gprs); } /* * B step AMD K6 before B 9730xxxx have hardware bugs that can cause * misexecution of code under Linux. Owners of such processors should * contact AMD for precise details and a CPU swap. * * See http://www.multimania.com/poulot/k6bug.html * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" * (Publication # 21266 Issue Date: August 1998) * * The following test is erm.. interesting. AMD neglected to up * the chip setting when fixing the bug but they also tweaked some * performance at the same time.. */ #ifdef CONFIG_X86_32 extern __visible void vide(void); __asm__(".text\n" ".globl vide\n" ".type vide, @function\n" ".align 4\n" "vide: ret\n"); #endif static void init_amd_k5(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 /* * General Systems BIOSen alias the cpu frequency registers * of the Elan at 0x000df000. Unfortunately, one of the Linux * drivers subsequently pokes it, and changes the CPU speed. * Workaround : Remove the unneeded alias. */ #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ #define CBAR_ENB (0x80000000) #define CBAR_KEY (0X000000CB) if (c->x86_model == 9 || c->x86_model == 10) { if (inl(CBAR) & CBAR_ENB) outl(0 | CBAR_KEY, CBAR); } #endif } static void init_amd_k6(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 u32 l, h; int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); if (c->x86_model < 6) { /* Based on AMD doc 20734R - June 2000 */ if (c->x86_model == 0) { clear_cpu_cap(c, X86_FEATURE_APIC); set_cpu_cap(c, X86_FEATURE_PGE); } return; } if (c->x86_model == 6 && c->x86_stepping == 1) { const int K6_BUG_LOOP = 1000000; int n; void (*f_vide)(void); u64 d, d2; pr_info("AMD K6 stepping B detected - "); /* * It looks like AMD fixed the 2.6.2 bug and improved indirect * calls at the same time. */ n = K6_BUG_LOOP; f_vide = vide; OPTIMIZER_HIDE_VAR(f_vide); d = rdtsc(); while (n--) f_vide(); d2 = rdtsc(); d = d2-d; if (d > 20*K6_BUG_LOOP) pr_cont("system stability may be impaired when more than 32 MB are used.\n"); else pr_cont("probably OK (after B9730xxxx).\n"); } /* K6 with old style WHCR */ if (c->x86_model < 8 || (c->x86_model == 8 && c->x86_stepping < 8)) { /* We can only write allocate on the low 508Mb */ if (mbytes > 508) mbytes = 508; rdmsr(MSR_K6_WHCR, l, h); if ((l&0x0000FFFF) == 0) { unsigned long flags; l = (1<<0)|((mbytes/4)<<1); local_irq_save(flags); wbinvd(); wrmsr(MSR_K6_WHCR, l, h); local_irq_restore(flags); pr_info("Enabling old style K6 write allocation for %d Mb\n", mbytes); } return; } if ((c->x86_model == 8 && c->x86_stepping > 7) || c->x86_model == 9 || c->x86_model == 13) { /* The more serious chips .. */ if (mbytes > 4092) mbytes = 4092; rdmsr(MSR_K6_WHCR, l, h); if ((l&0xFFFF0000) == 0) { unsigned long flags; l = ((mbytes>>2)<<22)|(1<<16); local_irq_save(flags); wbinvd(); wrmsr(MSR_K6_WHCR, l, h); local_irq_restore(flags); pr_info("Enabling new style K6 write allocation for %d Mb\n", mbytes); } return; } if (c->x86_model == 10) { /* AMD Geode LX is model 10 */ /* placeholder for any needed mods */ return; } #endif } static void init_amd_k7(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 u32 l, h; /* * Bit 15 of Athlon specific MSR 15, needs to be 0 * to enable SSE on Palomino/Morgan/Barton CPU's. * If the BIOS didn't enable it already, enable it here. */ if (c->x86_model >= 6 && c->x86_model <= 10) { if (!cpu_has(c, X86_FEATURE_XMM)) { pr_info("Enabling disabled K7/SSE Support.\n"); msr_clear_bit(MSR_K7_HWCR, 15); set_cpu_cap(c, X86_FEATURE_XMM); } } /* * It's been determined by AMD that Athlons since model 8 stepping 1 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx * As per AMD technical note 27212 0.2 */ if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { rdmsr(MSR_K7_CLK_CTL, l, h); if ((l & 0xfff00000) != 0x20000000) { pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, ((l & 0x000fffff)|0x20000000)); wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); } } /* calling is from identify_secondary_cpu() ? */ if (!c->cpu_index) return; /* * Certain Athlons might work (for various values of 'work') in SMP * but they are not certified as MP capable. */ /* Athlon 660/661 is valid. */ if ((c->x86_model == 6) && ((c->x86_stepping == 0) || (c->x86_stepping == 1))) return; /* Duron 670 is valid */ if ((c->x86_model == 7) && (c->x86_stepping == 0)) return; /* * Athlon 662, Duron 671, and Athlon >model 7 have capability * bit. It's worth noting that the A5 stepping (662) of some * Athlon XP's have the MP bit set. * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for * more. */ if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || ((c->x86_model == 7) && (c->x86_stepping >= 1)) || (c->x86_model > 7)) if (cpu_has(c, X86_FEATURE_MP)) return; /* If we get here, not a certified SMP capable AMD system. */ /* * Don't taint if we are running SMP kernel on a single non-MP * approved Athlon */ WARN_ONCE(1, "WARNING: This combination of AMD" " processors is not suitable for SMP.\n"); add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); #endif } #ifdef CONFIG_NUMA /* * To workaround broken NUMA config. Read the comment in * srat_detect_node(). */ static int nearby_node(int apicid) { int i, node; for (i = apicid - 1; i >= 0; i--) { node = __apicid_to_node[i]; if (node != NUMA_NO_NODE && node_online(node)) return node; } for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { node = __apicid_to_node[i]; if (node != NUMA_NO_NODE && node_online(node)) return node; } return first_node(node_online_map); /* Shouldn't happen */ } #endif /* * Fix up cpu_core_id for pre-F17h systems to be in the * [0 .. cores_per_node - 1] range. Not really needed but * kept so as not to break existing setups. */ static void legacy_fixup_core_id(struct cpuinfo_x86 *c) { u32 cus_per_node; if (c->x86 >= 0x17) return; cus_per_node = c->x86_max_cores / nodes_per_socket; c->cpu_core_id %= cus_per_node; } /* * Fixup core topology information for * (1) AMD multi-node processors * Assumption: Number of cores in each internal node is the same. * (2) AMD processors supporting compute units */ static void amd_get_topology(struct cpuinfo_x86 *c) { int cpu = smp_processor_id(); /* get information required for multi-node processors */ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { int err; u32 eax, ebx, ecx, edx; cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); c->cpu_die_id = ecx & 0xff; if (c->x86 == 0x15) c->cu_id = ebx & 0xff; if (c->x86 >= 0x17) { c->cpu_core_id = ebx & 0xff; if (smp_num_siblings > 1) c->x86_max_cores /= smp_num_siblings; } /* * In case leaf B is available, use it to derive * topology information. */ err = detect_extended_topology(c); if (!err) c->x86_coreid_bits = get_count_order(c->x86_max_cores); cacheinfo_amd_init_llc_id(c, cpu); } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { u64 value; rdmsrl(MSR_FAM10H_NODE_ID, value); c->cpu_die_id = value & 7; per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; } else return; if (nodes_per_socket > 1) { set_cpu_cap(c, X86_FEATURE_AMD_DCM); legacy_fixup_core_id(c); } } /* * On a AMD dual core setup the lower bits of the APIC id distinguish the cores. * Assumes number of cores is a power of two. */ static void amd_detect_cmp(struct cpuinfo_x86 *c) { unsigned bits; int cpu = smp_processor_id(); bits = c->x86_coreid_bits; /* Low order bits define the core id (index of core in socket) */ c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); /* Convert the initial APIC ID into the socket ID */ c->phys_proc_id = c->initial_apicid >> bits; /* use socket ID also for last level cache */ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id; } u32 amd_get_nodes_per_socket(void) { return nodes_per_socket; } EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket); static void srat_detect_node(struct cpuinfo_x86 *c) { #ifdef CONFIG_NUMA int cpu = smp_processor_id(); int node; unsigned apicid = c->apicid; node = numa_cpu_node(cpu); if (node == NUMA_NO_NODE) node = get_llc_id(cpu); /* * On multi-fabric platform (e.g. Numascale NumaChip) a * platform-specific handler needs to be called to fixup some * IDs of the CPU. */ if (x86_cpuinit.fixup_cpu_id) x86_cpuinit.fixup_cpu_id(c, node); if (!node_online(node)) { /* * Two possibilities here: * * - The CPU is missing memory and no node was created. In * that case try picking one from a nearby CPU. * * - The APIC IDs differ from the HyperTransport node IDs * which the K8 northbridge parsing fills in. Assume * they are all increased by a constant offset, but in * the same order as the HT nodeids. If that doesn't * result in a usable node fall back to the path for the * previous case. * * This workaround operates directly on the mapping between * APIC ID and NUMA node, assuming certain relationship * between APIC ID, HT node ID and NUMA topology. As going * through CPU mapping may alter the outcome, directly * access __apicid_to_node[]. */ int ht_nodeid = c->initial_apicid; if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE) node = __apicid_to_node[ht_nodeid]; /* Pick a nearby node */ if (!node_online(node)) node = nearby_node(apicid); } numa_set_node(cpu, node); #endif } static void early_init_amd_mc(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP unsigned bits, ecx; /* Multi core CPU? */ if (c->extended_cpuid_level < 0x80000008) return; ecx = cpuid_ecx(0x80000008); c->x86_max_cores = (ecx & 0xff) + 1; /* CPU telling us the core id bits shift? */ bits = (ecx >> 12) & 0xF; /* Otherwise recompute */ if (bits == 0) { while ((1 << bits) < c->x86_max_cores) bits++; } c->x86_coreid_bits = bits; #endif } static void bsp_init_amd(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { if (c->x86 > 0x10 || (c->x86 == 0x10 && c->x86_model >= 0x2)) { u64 val; rdmsrl(MSR_K7_HWCR, val); if (!(val & BIT(24))) pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); } } if (c->x86 == 0x15) { unsigned long upperbit; u32 cpuid, assoc; cpuid = cpuid_edx(0x80000005); assoc = cpuid >> 16 & 0xff; upperbit = ((cpuid >> 24) << 10) / assoc; va_align.mask = (upperbit - 1) & PAGE_MASK; va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; /* A random value per boot for bit slice [12:upper_bit) */ va_align.bits = get_random_u32() & va_align.mask; } if (cpu_has(c, X86_FEATURE_MWAITX)) use_mwaitx_delay(); if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { u32 ecx; ecx = cpuid_ecx(0x8000001e); __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1; } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { u64 value; rdmsrl(MSR_FAM10H_NODE_ID, value); __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1; } if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && !boot_cpu_has(X86_FEATURE_VIRT_SSBD) && c->x86 >= 0x15 && c->x86 <= 0x17) { unsigned int bit; switch (c->x86) { case 0x15: bit = 54; break; case 0x16: bit = 33; break; case 0x17: bit = 10; break; default: return; } /* * Try to cache the base value so further operations can * avoid RMW. If that faults, do not enable SSBD. */ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); setup_force_cpu_cap(X86_FEATURE_SSBD); x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; } } resctrl_cpu_detect(c); } static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) { u64 msr; /* * BIOS support is required for SME and SEV. * For SME: If BIOS has enabled SME then adjust x86_phys_bits by * the SME physical address space reduction value. * If BIOS has not enabled SME then don't advertise the * SME feature (set in scattered.c). * If the kernel has not enabled SME via any means then * don't advertise the SME feature. * For SEV: If BIOS has not enabled SEV then don't advertise the * SEV and SEV_ES feature (set in scattered.c). * * In all cases, since support for SME and SEV requires long mode, * don't advertise the feature under CONFIG_X86_32. */ if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) { /* Check if memory encryption is enabled */ rdmsrl(MSR_AMD64_SYSCFG, msr); if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) goto clear_all; /* * Always adjust physical address bits. Even though this * will be a value above 32-bits this is still done for * CONFIG_X86_32 so that accurate values are reported. */ c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; if (IS_ENABLED(CONFIG_X86_32)) goto clear_all; if (!sme_me_mask) setup_clear_cpu_cap(X86_FEATURE_SME); rdmsrl(MSR_K7_HWCR, msr); if (!(msr & MSR_K7_HWCR_SMMLOCK)) goto clear_sev; return; clear_all: setup_clear_cpu_cap(X86_FEATURE_SME); clear_sev: setup_clear_cpu_cap(X86_FEATURE_SEV); setup_clear_cpu_cap(X86_FEATURE_SEV_ES); } } static void early_init_amd(struct cpuinfo_x86 *c) { u64 value; u32 dummy; early_init_amd_mc(c); if (c->x86 >= 0xf) set_cpu_cap(c, X86_FEATURE_K8); rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); /* * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate * with P/T states and does not stop in deep C-states */ if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ if (c->x86_power & BIT(12)) set_cpu_cap(c, X86_FEATURE_ACC_POWER); /* Bit 14 indicates the Runtime Average Power Limit interface. */ if (c->x86_power & BIT(14)) set_cpu_cap(c, X86_FEATURE_RAPL); #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSCALL32); #else /* Set MTRR capability flag if appropriate */ if (c->x86 == 5) if (c->x86_model == 13 || c->x86_model == 9 || (c->x86_model == 8 && c->x86_stepping >= 8)) set_cpu_cap(c, X86_FEATURE_K6_MTRR); #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) /* * ApicID can always be treated as an 8-bit value for AMD APIC versions * >= 0x10, but even old K8s came out of reset with version 0x10. So, we * can safely set X86_FEATURE_EXTD_APICID unconditionally for families * after 16h. */ if (boot_cpu_has(X86_FEATURE_APIC)) { if (c->x86 > 0x16) set_cpu_cap(c, X86_FEATURE_EXTD_APICID); else if (c->x86 >= 0xf) { /* check CPU config space for extended APIC ID */ unsigned int val; val = read_pci_config(0, 24, 0, 0x68); if ((val >> 17 & 0x3) == 0x3) set_cpu_cap(c, X86_FEATURE_EXTD_APICID); } } #endif /* * This is only needed to tell the kernel whether to use VMCALL * and VMMCALL. VMMCALL is never executed except under virt, so * we can set it unconditionally. */ set_cpu_cap(c, X86_FEATURE_VMMCALL); /* F16h erratum 793, CVE-2013-6885 */ if (c->x86 == 0x16 && c->x86_model <= 0xf) msr_set_bit(MSR_AMD64_LS_CFG, 15); /* * Check whether the machine is affected by erratum 400. This is * used to select the proper idle routine and to enable the check * whether the machine is affected in arch_post_acpi_init(), which * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. */ if (cpu_has_amd_erratum(c, amd_erratum_400)) set_cpu_bug(c, X86_BUG_AMD_E400); early_detect_mem_encrypt(c); /* Re-enable TopologyExtensions if switched off by BIOS */ if (c->x86 == 0x15 && (c->x86_model >= 0x10 && c->x86_model <= 0x6f) && !cpu_has(c, X86_FEATURE_TOPOEXT)) { if (msr_set_bit(0xc0011005, 54) > 0) { rdmsrl(0xc0011005, value); if (value & BIT_64(54)) { set_cpu_cap(c, X86_FEATURE_TOPOEXT); pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); } } } if (cpu_has(c, X86_FEATURE_TOPOEXT)) smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1; if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) { if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB)) setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); setup_force_cpu_cap(X86_FEATURE_SBPB); } } } static void init_amd_k8(struct cpuinfo_x86 *c) { u32 level; u64 value; /* On C+ stepping K8 rep microcode works well for copy/memset */ level = cpuid_eax(1); if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) set_cpu_cap(c, X86_FEATURE_REP_GOOD); /* * Some BIOSes incorrectly force this feature, but only K8 revision D * (model = 0x14) and later actually support it. * (AMD Erratum #110, docId: 25759). */ if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { clear_cpu_cap(c, X86_FEATURE_LAHF_LM); if (!rdmsrl_amd_safe(0xc001100d, &value)) { value &= ~BIT_64(32); wrmsrl_amd_safe(0xc001100d, value); } } if (!c->x86_model_id[0]) strcpy(c->x86_model_id, "Hammer"); #ifdef CONFIG_SMP /* * Disable TLB flush filter by setting HWCR.FFDIS on K8 * bit 6 of msr C001_0015 * * Errata 63 for SH-B3 steppings * Errata 122 for all steppings (F+ have it disabled by default) */ msr_set_bit(MSR_K7_HWCR, 6); #endif set_cpu_bug(c, X86_BUG_SWAPGS_FENCE); } static void init_amd_gh(struct cpuinfo_x86 *c) { #ifdef CONFIG_MMCONF_FAM10H /* do this for boot cpu */ if (c == &boot_cpu_data) check_enable_amd_mmconf_dmi(); fam10h_check_enable_mmcfg(); #endif /* * Disable GART TLB Walk Errors on Fam10h. We do this here because this * is always needed when GART is enabled, even in a kernel which has no * MCE support built in. BIOS should disable GartTlbWlk Errors already. * If it doesn't, we do it here as suggested by the BKDG. * * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 */ msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); /* * On family 10h BIOS may not have properly enabled WC+ support, causing * it to be converted to CD memtype. This may result in performance * degradation for certain nested-paging guests. Prevent this conversion * by clearing bit 24 in MSR_AMD64_BU_CFG2. * * NOTE: we want to use the _safe accessors so as not to #GP kvm * guests on older kvm hosts. */ msr_clear_bit(MSR_AMD64_BU_CFG2, 24); if (cpu_has_amd_erratum(c, amd_erratum_383)) set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); } static void init_amd_ln(struct cpuinfo_x86 *c) { /* * Apply erratum 665 fix unconditionally so machines without a BIOS * fix work. */ msr_set_bit(MSR_AMD64_DE_CFG, 31); } static bool rdrand_force; static int __init rdrand_cmdline(char *str) { if (!str) return -EINVAL; if (!strcmp(str, "force")) rdrand_force = true; else return -EINVAL; return 0; } early_param("rdrand", rdrand_cmdline); static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c) { /* * Saving of the MSR used to hide the RDRAND support during * suspend/resume is done by arch/x86/power/cpu.c, which is * dependent on CONFIG_PM_SLEEP. */ if (!IS_ENABLED(CONFIG_PM_SLEEP)) return; /* * The self-test can clear X86_FEATURE_RDRAND, so check for * RDRAND support using the CPUID function directly. */ if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force) return; msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62); /* * Verify that the CPUID change has occurred in case the kernel is * running virtualized and the hypervisor doesn't support the MSR. */ if (cpuid_ecx(1) & BIT(30)) { pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n"); return; } clear_cpu_cap(c, X86_FEATURE_RDRAND); pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n"); } static void init_amd_jg(struct cpuinfo_x86 *c) { /* * Some BIOS implementations do not restore proper RDRAND support * across suspend and resume. Check on whether to hide the RDRAND * instruction support via CPUID. */ clear_rdrand_cpuid_bit(c); } static void init_amd_bd(struct cpuinfo_x86 *c) { u64 value; /* * The way access filter has a performance penalty on some workloads. * Disable it on the affected CPUs. */ if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) { value |= 0x1E; wrmsrl_safe(MSR_F15H_IC_CFG, value); } } /* * Some BIOS implementations do not restore proper RDRAND support * across suspend and resume. Check on whether to hide the RDRAND * instruction support via CPUID. */ clear_rdrand_cpuid_bit(c); } void init_spectral_chicken(struct cpuinfo_x86 *c) { #ifdef CONFIG_CPU_UNRET_ENTRY u64 value; /* * On Zen2 we offer this chicken (bit) on the altar of Speculation. * * This suppresses speculation from the middle of a basic block, i.e. it * suppresses non-branch predictions. * * We use STIBP as a heuristic to filter out Zen2 from the rest of F17H */ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) { if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); } } #endif /* * Work around Erratum 1386. The XSAVES instruction malfunctions in * certain circumstances on Zen1/2 uarch, and not all parts have had * updated microcode at the time of writing (March 2023). * * Affected parts all have no supervisor XSAVE states, meaning that * the XSAVEC instruction (which works fine) is equivalent. */ clear_cpu_cap(c, X86_FEATURE_XSAVES); } static void init_amd_zn(struct cpuinfo_x86 *c) { set_cpu_cap(c, X86_FEATURE_ZEN); #ifdef CONFIG_NUMA node_reclaim_distance = 32; #endif /* Fix up CPUID bits, but only if not virtualised. */ if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { /* Erratum 1076: CPB feature bit not being set in CPUID. */ if (!cpu_has(c, X86_FEATURE_CPB)) set_cpu_cap(c, X86_FEATURE_CPB); /* * Zen3 (Fam19 model < 0x10) parts are not susceptible to * Branch Type Confusion, but predate the allocation of the * BTC_NO bit. */ if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO)) set_cpu_cap(c, X86_FEATURE_BTC_NO); } } static bool cpu_has_zenbleed_microcode(void) { u32 good_rev = 0; switch (boot_cpu_data.x86_model) { case 0x30 ... 0x3f: good_rev = 0x0830107a; break; case 0x60 ... 0x67: good_rev = 0x0860010b; break; case 0x68 ... 0x6f: good_rev = 0x08608105; break; case 0x70 ... 0x7f: good_rev = 0x08701032; break; case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; default: return false; break; } if (boot_cpu_data.microcode < good_rev) return false; return true; } static void zenbleed_check(struct cpuinfo_x86 *c) { if (!cpu_has_amd_erratum(c, amd_zenbleed)) return; if (cpu_has(c, X86_FEATURE_HYPERVISOR)) return; if (!cpu_has(c, X86_FEATURE_AVX)) return; if (!cpu_has_zenbleed_microcode()) { pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); } else { msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); } } static void init_amd(struct cpuinfo_x86 *c) { early_init_amd(c); /* * Bit 31 in normal CPUID used for nonstandard 3DNow ID; * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ clear_cpu_cap(c, 0*32+31); if (c->x86 >= 0x10) set_cpu_cap(c, X86_FEATURE_REP_GOOD); /* AMD FSRM also implies FSRS */ if (cpu_has(c, X86_FEATURE_FSRM)) set_cpu_cap(c, X86_FEATURE_FSRS); /* get apicid instead of initial apic id from cpuid */ c->apicid = read_apic_id(); /* K6s reports MCEs but don't actually have all the MSRs */ if (c->x86 < 6) clear_cpu_cap(c, X86_FEATURE_MCE); switch (c->x86) { case 4: init_amd_k5(c); break; case 5: init_amd_k6(c); break; case 6: init_amd_k7(c); break; case 0xf: init_amd_k8(c); break; case 0x10: init_amd_gh(c); break; case 0x12: init_amd_ln(c); break; case 0x15: init_amd_bd(c); break; case 0x16: init_amd_jg(c); break; case 0x17: init_spectral_chicken(c); fallthrough; case 0x19: init_amd_zn(c); break; } /* * Enable workaround for FXSAVE leak on CPUs * without a XSaveErPtr feature */ if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR))) set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); cpu_detect_cache_sizes(c); amd_detect_cmp(c); amd_get_topology(c); srat_detect_node(c); init_amd_cacheinfo(c); if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) { /* * Use LFENCE for execution serialization. On families which * don't have that MSR, LFENCE is already serializing. * msr_set_bit() uses the safe accessors, too, even if the MSR * is not present. */ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); /* A serializing LFENCE stops RDTSC speculation */ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); } /* * Family 0x12 and above processors have APIC timer * running in deep C states. */ if (c->x86 > 0x11) set_cpu_cap(c, X86_FEATURE_ARAT); /* 3DNow or LM implies PREFETCHW */ if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH)) if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ if (!cpu_feature_enabled(X86_FEATURE_XENPV)) set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); /* * Turn on the Instructions Retired free counter on machines not * susceptible to erratum #1054 "Instructions Retired Performance * Counter May Be Inaccurate". */ if (cpu_has(c, X86_FEATURE_IRPERF) && !cpu_has_amd_erratum(c, amd_erratum_1054)) msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); check_null_seg_clears_base(c); /* * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up * using the trampoline code and as part of it, MSR_EFER gets prepared there in * order to be replicated onto them. Regardless, set it here again, if not set, * to protect against any future refactoring/code reorganization which might * miss setting this important bit. */ if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && cpu_has(c, X86_FEATURE_AUTOIBRS)) WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS)); zenbleed_check(c); if (cpu_has_amd_erratum(c, amd_div0)) { pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); setup_force_cpu_bug(X86_BUG_DIV0); } } #ifdef CONFIG_X86_32 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) { /* AMD errata T13 (order #21922) */ if (c->x86 == 6) { /* Duron Rev A0 */ if (c->x86_model == 3 && c->x86_stepping == 0) size = 64; /* Tbird rev A1/A2 */ if (c->x86_model == 4 && (c->x86_stepping == 0 || c->x86_stepping == 1)) size = 256; } return size; } #endif static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) { u32 ebx, eax, ecx, edx; u16 mask = 0xfff; if (c->x86 < 0xf) return; if (c->extended_cpuid_level < 0x80000006) return; cpuid(0x80000006, &eax, &ebx, &ecx, &edx); tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask; tlb_lli_4k[ENTRIES] = ebx & mask; /* * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB * characteristics from the CPUID function 0x80000005 instead. */ if (c->x86 == 0xf) { cpuid(0x80000005, &eax, &ebx, &ecx, &edx); mask = 0xff; } /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ if (!((eax >> 16) & mask)) tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; else tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; /* a 4M entry uses two 2M entries */ tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ if (!(eax & mask)) { /* Erratum 658 */ if (c->x86 == 0x15 && c->x86_model <= 0x1f) { tlb_lli_2m[ENTRIES] = 1024; } else { cpuid(0x80000005, &eax, &ebx, &ecx, &edx); tlb_lli_2m[ENTRIES] = eax & 0xff; } } else tlb_lli_2m[ENTRIES] = eax & mask; tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; } static const struct cpu_dev amd_cpu_dev = { .c_vendor = "AMD", .c_ident = { "AuthenticAMD" }, #ifdef CONFIG_X86_32 .legacy_models = { { .family = 4, .model_names = { [3] = "486 DX/2", [7] = "486 DX/2-WB", [8] = "486 DX/4", [9] = "486 DX/4-WB", [14] = "Am5x86-WT", [15] = "Am5x86-WB" } }, }, .legacy_cache_size = amd_size_cache, #endif .c_early_init = early_init_amd, .c_detect_tlb = cpu_detect_tlb_amd, .c_bsp_init = bsp_init_amd, .c_init = init_amd, .c_x86_vendor = X86_VENDOR_AMD, }; cpu_dev_register(amd_cpu_dev); static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask); static unsigned int amd_msr_dr_addr_masks[] = { MSR_F16H_DR0_ADDR_MASK, MSR_F16H_DR1_ADDR_MASK, MSR_F16H_DR1_ADDR_MASK + 1, MSR_F16H_DR1_ADDR_MASK + 2 }; void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) { int cpu = smp_processor_id(); if (!cpu_feature_enabled(X86_FEATURE_BPEXT)) return; if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks))) return; if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask) return; wrmsr(amd_msr_dr_addr_masks[dr], mask, 0); per_cpu(amd_dr_addr_mask, cpu)[dr] = mask; } unsigned long amd_get_dr_addr_mask(unsigned int dr) { if (!cpu_feature_enabled(X86_FEATURE_BPEXT)) return 0; if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks))) return 0; return per_cpu(amd_dr_addr_mask[dr], smp_processor_id()); } EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask); u32 amd_get_highest_perf(void) { struct cpuinfo_x86 *c = &boot_cpu_data; if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) || (c->x86_model >= 0x70 && c->x86_model < 0x80))) return 166; if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) || (c->x86_model >= 0x40 && c->x86_model < 0x70))) return 166; return 255; } EXPORT_SYMBOL_GPL(amd_get_highest_perf); static void zenbleed_check_cpu(void *unused) { struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); zenbleed_check(c); } void amd_check_microcode(void) { on_each_cpu(zenbleed_check_cpu, NULL, 1); } /* * Issue a DIV 0/1 insn to clear any division data from previous DIV * operations. */ void noinstr amd_clear_divider(void) { asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) :: "a" (0), "d" (0), "r" (1)); } EXPORT_SYMBOL_GPL(amd_clear_divider);
linux-master
arch/x86/kernel/cpu/amd.c
// SPDX-License-Identifier: GPL-2.0+ /* * Hygon Processor Support for Linux * * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd. * * Author: Pu Wen <[email protected]> */ #include <linux/io.h> #include <asm/apic.h> #include <asm/cpu.h> #include <asm/smp.h> #include <asm/numa.h> #include <asm/cacheinfo.h> #include <asm/spec-ctrl.h> #include <asm/delay.h> #include "cpu.h" #define APICID_SOCKET_ID_BIT 6 /* * nodes_per_socket: Stores the number of nodes per socket. * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8] */ static u32 nodes_per_socket = 1; #ifdef CONFIG_NUMA /* * To workaround broken NUMA config. Read the comment in * srat_detect_node(). */ static int nearby_node(int apicid) { int i, node; for (i = apicid - 1; i >= 0; i--) { node = __apicid_to_node[i]; if (node != NUMA_NO_NODE && node_online(node)) return node; } for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { node = __apicid_to_node[i]; if (node != NUMA_NO_NODE && node_online(node)) return node; } return first_node(node_online_map); /* Shouldn't happen */ } #endif static void hygon_get_topology_early(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_TOPOEXT)) smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1; } /* * Fixup core topology information for * (1) Hygon multi-node processors * Assumption: Number of cores in each internal node is the same. * (2) Hygon processors supporting compute units */ static void hygon_get_topology(struct cpuinfo_x86 *c) { int cpu = smp_processor_id(); /* get information required for multi-node processors */ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { int err; u32 eax, ebx, ecx, edx; cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); c->cpu_die_id = ecx & 0xff; c->cpu_core_id = ebx & 0xff; if (smp_num_siblings > 1) c->x86_max_cores /= smp_num_siblings; /* * In case leaf B is available, use it to derive * topology information. */ err = detect_extended_topology(c); if (!err) c->x86_coreid_bits = get_count_order(c->x86_max_cores); /* Socket ID is ApicId[6] for these processors. */ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT; cacheinfo_hygon_init_llc_id(c, cpu); } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { u64 value; rdmsrl(MSR_FAM10H_NODE_ID, value); c->cpu_die_id = value & 7; per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; } else return; if (nodes_per_socket > 1) set_cpu_cap(c, X86_FEATURE_AMD_DCM); } /* * On Hygon setup the lower bits of the APIC id distinguish the cores. * Assumes number of cores is a power of two. */ static void hygon_detect_cmp(struct cpuinfo_x86 *c) { unsigned int bits; int cpu = smp_processor_id(); bits = c->x86_coreid_bits; /* Low order bits define the core id (index of core in socket) */ c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); /* Convert the initial APIC ID into the socket ID */ c->phys_proc_id = c->initial_apicid >> bits; /* use socket ID also for last level cache */ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id; } static void srat_detect_node(struct cpuinfo_x86 *c) { #ifdef CONFIG_NUMA int cpu = smp_processor_id(); int node; unsigned int apicid = c->apicid; node = numa_cpu_node(cpu); if (node == NUMA_NO_NODE) node = per_cpu(cpu_llc_id, cpu); /* * On multi-fabric platform (e.g. Numascale NumaChip) a * platform-specific handler needs to be called to fixup some * IDs of the CPU. */ if (x86_cpuinit.fixup_cpu_id) x86_cpuinit.fixup_cpu_id(c, node); if (!node_online(node)) { /* * Two possibilities here: * * - The CPU is missing memory and no node was created. In * that case try picking one from a nearby CPU. * * - The APIC IDs differ from the HyperTransport node IDs. * Assume they are all increased by a constant offset, but * in the same order as the HT nodeids. If that doesn't * result in a usable node fall back to the path for the * previous case. * * This workaround operates directly on the mapping between * APIC ID and NUMA node, assuming certain relationship * between APIC ID, HT node ID and NUMA topology. As going * through CPU mapping may alter the outcome, directly * access __apicid_to_node[]. */ int ht_nodeid = c->initial_apicid; if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE) node = __apicid_to_node[ht_nodeid]; /* Pick a nearby node */ if (!node_online(node)) node = nearby_node(apicid); } numa_set_node(cpu, node); #endif } static void early_init_hygon_mc(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP unsigned int bits, ecx; /* Multi core CPU? */ if (c->extended_cpuid_level < 0x80000008) return; ecx = cpuid_ecx(0x80000008); c->x86_max_cores = (ecx & 0xff) + 1; /* CPU telling us the core id bits shift? */ bits = (ecx >> 12) & 0xF; /* Otherwise recompute */ if (bits == 0) { while ((1 << bits) < c->x86_max_cores) bits++; } c->x86_coreid_bits = bits; #endif } static void bsp_init_hygon(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { u64 val; rdmsrl(MSR_K7_HWCR, val); if (!(val & BIT(24))) pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); } if (cpu_has(c, X86_FEATURE_MWAITX)) use_mwaitx_delay(); if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { u32 ecx; ecx = cpuid_ecx(0x8000001e); __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1; } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { u64 value; rdmsrl(MSR_FAM10H_NODE_ID, value); __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1; } if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) { /* * Try to cache the base value so further operations can * avoid RMW. If that faults, do not enable SSBD. */ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); setup_force_cpu_cap(X86_FEATURE_SSBD); x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; } } } static void early_init_hygon(struct cpuinfo_x86 *c) { u32 dummy; early_init_hygon_mc(c); set_cpu_cap(c, X86_FEATURE_K8); rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); /* * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate * with P/T states and does not stop in deep C-states */ if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ if (c->x86_power & BIT(12)) set_cpu_cap(c, X86_FEATURE_ACC_POWER); /* Bit 14 indicates the Runtime Average Power Limit interface. */ if (c->x86_power & BIT(14)) set_cpu_cap(c, X86_FEATURE_RAPL); #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSCALL32); #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) /* * ApicID can always be treated as an 8-bit value for Hygon APIC So, we * can safely set X86_FEATURE_EXTD_APICID unconditionally. */ if (boot_cpu_has(X86_FEATURE_APIC)) set_cpu_cap(c, X86_FEATURE_EXTD_APICID); #endif /* * This is only needed to tell the kernel whether to use VMCALL * and VMMCALL. VMMCALL is never executed except under virt, so * we can set it unconditionally. */ set_cpu_cap(c, X86_FEATURE_VMMCALL); hygon_get_topology_early(c); } static void init_hygon(struct cpuinfo_x86 *c) { early_init_hygon(c); /* * Bit 31 in normal CPUID used for nonstandard 3DNow ID; * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ clear_cpu_cap(c, 0*32+31); set_cpu_cap(c, X86_FEATURE_REP_GOOD); /* get apicid instead of initial apic id from cpuid */ c->apicid = read_apic_id(); /* * XXX someone from Hygon needs to confirm this DTRT * init_spectral_chicken(c); */ set_cpu_cap(c, X86_FEATURE_ZEN); set_cpu_cap(c, X86_FEATURE_CPB); cpu_detect_cache_sizes(c); hygon_detect_cmp(c); hygon_get_topology(c); srat_detect_node(c); init_hygon_cacheinfo(c); if (cpu_has(c, X86_FEATURE_XMM2)) { /* * Use LFENCE for execution serialization. On families which * don't have that MSR, LFENCE is already serializing. * msr_set_bit() uses the safe accessors, too, even if the MSR * is not present. */ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); /* A serializing LFENCE stops RDTSC speculation */ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); } /* * Hygon processors have APIC timer running in deep C states. */ set_cpu_cap(c, X86_FEATURE_ARAT); /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */ if (!cpu_feature_enabled(X86_FEATURE_XENPV)) set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); check_null_seg_clears_base(c); } static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) { u32 ebx, eax, ecx, edx; u16 mask = 0xfff; if (c->extended_cpuid_level < 0x80000006) return; cpuid(0x80000006, &eax, &ebx, &ecx, &edx); tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask; tlb_lli_4k[ENTRIES] = ebx & mask; /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ if (!((eax >> 16) & mask)) tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; else tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; /* a 4M entry uses two 2M entries */ tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ if (!(eax & mask)) { cpuid(0x80000005, &eax, &ebx, &ecx, &edx); tlb_lli_2m[ENTRIES] = eax & 0xff; } else tlb_lli_2m[ENTRIES] = eax & mask; tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; } static const struct cpu_dev hygon_cpu_dev = { .c_vendor = "Hygon", .c_ident = { "HygonGenuine" }, .c_early_init = early_init_hygon, .c_detect_tlb = cpu_detect_tlb_hygon, .c_bsp_init = bsp_init_hygon, .c_init = init_hygon, .c_x86_vendor = X86_VENDOR_HYGON, }; cpu_dev_register(hygon_cpu_dev);
linux-master
arch/x86/kernel/cpu/hygon.c
/* * VMware Detection code. * * Copyright (C) 2008, VMware, Inc. * Author : Alok N Kataria <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/dmi.h> #include <linux/init.h> #include <linux/export.h> #include <linux/clocksource.h> #include <linux/cpu.h> #include <linux/reboot.h> #include <linux/static_call.h> #include <asm/div64.h> #include <asm/x86_init.h> #include <asm/hypervisor.h> #include <asm/timer.h> #include <asm/apic.h> #include <asm/vmware.h> #include <asm/svm.h> #undef pr_fmt #define pr_fmt(fmt) "vmware: " fmt #define CPUID_VMWARE_INFO_LEAF 0x40000000 #define CPUID_VMWARE_FEATURES_LEAF 0x40000010 #define CPUID_VMWARE_FEATURES_ECX_VMMCALL BIT(0) #define CPUID_VMWARE_FEATURES_ECX_VMCALL BIT(1) #define VMWARE_HYPERVISOR_MAGIC 0x564D5868 #define VMWARE_CMD_GETVERSION 10 #define VMWARE_CMD_GETHZ 45 #define VMWARE_CMD_GETVCPU_INFO 68 #define VMWARE_CMD_LEGACY_X2APIC 3 #define VMWARE_CMD_VCPU_RESERVED 31 #define VMWARE_CMD_STEALCLOCK 91 #define STEALCLOCK_NOT_AVAILABLE (-1) #define STEALCLOCK_DISABLED 0 #define STEALCLOCK_ENABLED 1 #define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ __asm__("inl (%%dx), %%eax" : \ "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ "a"(VMWARE_HYPERVISOR_MAGIC), \ "c"(VMWARE_CMD_##cmd), \ "d"(VMWARE_HYPERVISOR_PORT), "b"(UINT_MAX) : \ "memory") #define VMWARE_VMCALL(cmd, eax, ebx, ecx, edx) \ __asm__("vmcall" : \ "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ "a"(VMWARE_HYPERVISOR_MAGIC), \ "c"(VMWARE_CMD_##cmd), \ "d"(0), "b"(UINT_MAX) : \ "memory") #define VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx) \ __asm__("vmmcall" : \ "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ "a"(VMWARE_HYPERVISOR_MAGIC), \ "c"(VMWARE_CMD_##cmd), \ "d"(0), "b"(UINT_MAX) : \ "memory") #define VMWARE_CMD(cmd, eax, ebx, ecx, edx) do { \ switch (vmware_hypercall_mode) { \ case CPUID_VMWARE_FEATURES_ECX_VMCALL: \ VMWARE_VMCALL(cmd, eax, ebx, ecx, edx); \ break; \ case CPUID_VMWARE_FEATURES_ECX_VMMCALL: \ VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx); \ break; \ default: \ VMWARE_PORT(cmd, eax, ebx, ecx, edx); \ break; \ } \ } while (0) struct vmware_steal_time { union { uint64_t clock; /* stolen time counter in units of vtsc */ struct { /* only for little-endian */ uint32_t clock_low; uint32_t clock_high; }; }; uint64_t reserved[7]; }; static unsigned long vmware_tsc_khz __ro_after_init; static u8 vmware_hypercall_mode __ro_after_init; static inline int __vmware_platform(void) { uint32_t eax, ebx, ecx, edx; VMWARE_CMD(GETVERSION, eax, ebx, ecx, edx); return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC; } static unsigned long vmware_get_tsc_khz(void) { return vmware_tsc_khz; } #ifdef CONFIG_PARAVIRT static struct cyc2ns_data vmware_cyc2ns __ro_after_init; static bool vmw_sched_clock __initdata = true; static DEFINE_PER_CPU_DECRYPTED(struct vmware_steal_time, vmw_steal_time) __aligned(64); static bool has_steal_clock; static bool steal_acc __initdata = true; /* steal time accounting */ static __init int setup_vmw_sched_clock(char *s) { vmw_sched_clock = false; return 0; } early_param("no-vmw-sched-clock", setup_vmw_sched_clock); static __init int parse_no_stealacc(char *arg) { steal_acc = false; return 0; } early_param("no-steal-acc", parse_no_stealacc); static noinstr u64 vmware_sched_clock(void) { unsigned long long ns; ns = mul_u64_u32_shr(rdtsc(), vmware_cyc2ns.cyc2ns_mul, vmware_cyc2ns.cyc2ns_shift); ns -= vmware_cyc2ns.cyc2ns_offset; return ns; } static void __init vmware_cyc2ns_setup(void) { struct cyc2ns_data *d = &vmware_cyc2ns; unsigned long long tsc_now = rdtsc(); clocks_calc_mult_shift(&d->cyc2ns_mul, &d->cyc2ns_shift, vmware_tsc_khz, NSEC_PER_MSEC, 0); d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul, d->cyc2ns_shift); pr_info("using clock offset of %llu ns\n", d->cyc2ns_offset); } static int vmware_cmd_stealclock(uint32_t arg1, uint32_t arg2) { uint32_t result, info; asm volatile (VMWARE_HYPERCALL : "=a"(result), "=c"(info) : "a"(VMWARE_HYPERVISOR_MAGIC), "b"(0), "c"(VMWARE_CMD_STEALCLOCK), "d"(0), "S"(arg1), "D"(arg2) : "memory"); return result; } static bool stealclock_enable(phys_addr_t pa) { return vmware_cmd_stealclock(upper_32_bits(pa), lower_32_bits(pa)) == STEALCLOCK_ENABLED; } static int __stealclock_disable(void) { return vmware_cmd_stealclock(0, 1); } static void stealclock_disable(void) { __stealclock_disable(); } static bool vmware_is_stealclock_available(void) { return __stealclock_disable() != STEALCLOCK_NOT_AVAILABLE; } /** * vmware_steal_clock() - read the per-cpu steal clock * @cpu: the cpu number whose steal clock we want to read * * The function reads the steal clock if we are on a 64-bit system, otherwise * reads it in parts, checking that the high part didn't change in the * meantime. * * Return: * The steal clock reading in ns. */ static uint64_t vmware_steal_clock(int cpu) { struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu); uint64_t clock; if (IS_ENABLED(CONFIG_64BIT)) clock = READ_ONCE(steal->clock); else { uint32_t initial_high, low, high; do { initial_high = READ_ONCE(steal->clock_high); /* Do not reorder initial_high and high readings */ virt_rmb(); low = READ_ONCE(steal->clock_low); /* Keep low reading in between */ virt_rmb(); high = READ_ONCE(steal->clock_high); } while (initial_high != high); clock = ((uint64_t)high << 32) | low; } return mul_u64_u32_shr(clock, vmware_cyc2ns.cyc2ns_mul, vmware_cyc2ns.cyc2ns_shift); } static void vmware_register_steal_time(void) { int cpu = smp_processor_id(); struct vmware_steal_time *st = &per_cpu(vmw_steal_time, cpu); if (!has_steal_clock) return; if (!stealclock_enable(slow_virt_to_phys(st))) { has_steal_clock = false; return; } pr_info("vmware-stealtime: cpu %d, pa %llx\n", cpu, (unsigned long long) slow_virt_to_phys(st)); } static void vmware_disable_steal_time(void) { if (!has_steal_clock) return; stealclock_disable(); } static void vmware_guest_cpu_init(void) { if (has_steal_clock) vmware_register_steal_time(); } static void vmware_pv_guest_cpu_reboot(void *unused) { vmware_disable_steal_time(); } static int vmware_pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) { if (code == SYS_RESTART) on_each_cpu(vmware_pv_guest_cpu_reboot, NULL, 1); return NOTIFY_DONE; } static struct notifier_block vmware_pv_reboot_nb = { .notifier_call = vmware_pv_reboot_notify, }; #ifdef CONFIG_SMP static void __init vmware_smp_prepare_boot_cpu(void) { vmware_guest_cpu_init(); native_smp_prepare_boot_cpu(); } static int vmware_cpu_online(unsigned int cpu) { local_irq_disable(); vmware_guest_cpu_init(); local_irq_enable(); return 0; } static int vmware_cpu_down_prepare(unsigned int cpu) { local_irq_disable(); vmware_disable_steal_time(); local_irq_enable(); return 0; } #endif static __init int activate_jump_labels(void) { if (has_steal_clock) { static_key_slow_inc(&paravirt_steal_enabled); if (steal_acc) static_key_slow_inc(&paravirt_steal_rq_enabled); } return 0; } arch_initcall(activate_jump_labels); static void __init vmware_paravirt_ops_setup(void) { pv_info.name = "VMware hypervisor"; pv_ops.cpu.io_delay = paravirt_nop; if (vmware_tsc_khz == 0) return; vmware_cyc2ns_setup(); if (vmw_sched_clock) paravirt_set_sched_clock(vmware_sched_clock); if (vmware_is_stealclock_available()) { has_steal_clock = true; static_call_update(pv_steal_clock, vmware_steal_clock); /* We use reboot notifier only to disable steal clock */ register_reboot_notifier(&vmware_pv_reboot_nb); #ifdef CONFIG_SMP smp_ops.smp_prepare_boot_cpu = vmware_smp_prepare_boot_cpu; if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/vmware:online", vmware_cpu_online, vmware_cpu_down_prepare) < 0) pr_err("vmware_guest: Failed to install cpu hotplug callbacks\n"); #else vmware_guest_cpu_init(); #endif } } #else #define vmware_paravirt_ops_setup() do {} while (0) #endif /* * VMware hypervisor takes care of exporting a reliable TSC to the guest. * Still, due to timing difference when running on virtual cpus, the TSC can * be marked as unstable in some cases. For example, the TSC sync check at * bootup can fail due to a marginal offset between vcpus' TSCs (though the * TSCs do not drift from each other). Also, the ACPI PM timer clocksource * is not suitable as a watchdog when running on a hypervisor because the * kernel may miss a wrap of the counter if the vcpu is descheduled for a * long time. To skip these checks at runtime we set these capability bits, * so that the kernel could just trust the hypervisor with providing a * reliable virtual TSC that is suitable for timekeeping. */ static void __init vmware_set_capabilities(void) { setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC); setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); if (vmware_tsc_khz) setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMCALL) setup_force_cpu_cap(X86_FEATURE_VMCALL); else if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMMCALL) setup_force_cpu_cap(X86_FEATURE_VMW_VMMCALL); } static void __init vmware_platform_setup(void) { uint32_t eax, ebx, ecx, edx; uint64_t lpj, tsc_khz; VMWARE_CMD(GETHZ, eax, ebx, ecx, edx); if (ebx != UINT_MAX) { lpj = tsc_khz = eax | (((uint64_t)ebx) << 32); do_div(tsc_khz, 1000); WARN_ON(tsc_khz >> 32); pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n", (unsigned long) tsc_khz / 1000, (unsigned long) tsc_khz % 1000); if (!preset_lpj) { do_div(lpj, HZ); preset_lpj = lpj; } vmware_tsc_khz = tsc_khz; x86_platform.calibrate_tsc = vmware_get_tsc_khz; x86_platform.calibrate_cpu = vmware_get_tsc_khz; #ifdef CONFIG_X86_LOCAL_APIC /* Skip lapic calibration since we know the bus frequency. */ lapic_timer_period = ecx / HZ; pr_info("Host bus clock speed read from hypervisor : %u Hz\n", ecx); #endif } else { pr_warn("Failed to get TSC freq from the hypervisor\n"); } vmware_paravirt_ops_setup(); #ifdef CONFIG_X86_IO_APIC no_timer_check = 1; #endif vmware_set_capabilities(); } static u8 __init vmware_select_hypercall(void) { int eax, ebx, ecx, edx; cpuid(CPUID_VMWARE_FEATURES_LEAF, &eax, &ebx, &ecx, &edx); return (ecx & (CPUID_VMWARE_FEATURES_ECX_VMMCALL | CPUID_VMWARE_FEATURES_ECX_VMCALL)); } /* * While checking the dmi string information, just checking the product * serial key should be enough, as this will always have a VMware * specific string when running under VMware hypervisor. * If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode * intentionally defaults to 0. */ static uint32_t __init vmware_platform(void) { if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { unsigned int eax; unsigned int hyper_vendor_id[3]; cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0], &hyper_vendor_id[1], &hyper_vendor_id[2]); if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) { if (eax >= CPUID_VMWARE_FEATURES_LEAF) vmware_hypercall_mode = vmware_select_hypercall(); pr_info("hypercall mode: 0x%02x\n", (unsigned int) vmware_hypercall_mode); return CPUID_VMWARE_INFO_LEAF; } } else if (dmi_available && dmi_name_in_serial("VMware") && __vmware_platform()) return 1; return 0; } /* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */ static bool __init vmware_legacy_x2apic_available(void) { uint32_t eax, ebx, ecx, edx; VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx); return !(eax & BIT(VMWARE_CMD_VCPU_RESERVED)) && (eax & BIT(VMWARE_CMD_LEGACY_X2APIC)); } #ifdef CONFIG_AMD_MEM_ENCRYPT static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs) { /* Copy VMWARE specific Hypercall parameters to the GHCB */ ghcb_set_rip(ghcb, regs->ip); ghcb_set_rbx(ghcb, regs->bx); ghcb_set_rcx(ghcb, regs->cx); ghcb_set_rdx(ghcb, regs->dx); ghcb_set_rsi(ghcb, regs->si); ghcb_set_rdi(ghcb, regs->di); ghcb_set_rbp(ghcb, regs->bp); } static bool vmware_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs) { if (!(ghcb_rbx_is_valid(ghcb) && ghcb_rcx_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) && ghcb_rsi_is_valid(ghcb) && ghcb_rdi_is_valid(ghcb) && ghcb_rbp_is_valid(ghcb))) return false; regs->bx = ghcb_get_rbx(ghcb); regs->cx = ghcb_get_rcx(ghcb); regs->dx = ghcb_get_rdx(ghcb); regs->si = ghcb_get_rsi(ghcb); regs->di = ghcb_get_rdi(ghcb); regs->bp = ghcb_get_rbp(ghcb); return true; } #endif const __initconst struct hypervisor_x86 x86_hyper_vmware = { .name = "VMware", .detect = vmware_platform, .type = X86_HYPER_VMWARE, .init.init_platform = vmware_platform_setup, .init.x2apic_available = vmware_legacy_x2apic_available, #ifdef CONFIG_AMD_MEM_ENCRYPT .runtime.sev_es_hcall_prepare = vmware_sev_es_hcall_prepare, .runtime.sev_es_hcall_finish = vmware_sev_es_hcall_finish, #endif };
linux-master
arch/x86/kernel/cpu/vmware.c
// SPDX-License-Identifier: GPL-2.0 /* * ACRN detection support * * Copyright (C) 2019 Intel Corporation. All rights reserved. * * Jason Chen CJ <[email protected]> * Zhao Yakui <[email protected]> * */ #include <linux/interrupt.h> #include <asm/acrn.h> #include <asm/apic.h> #include <asm/cpufeatures.h> #include <asm/desc.h> #include <asm/hypervisor.h> #include <asm/idtentry.h> #include <asm/irq_regs.h> static u32 __init acrn_detect(void) { return acrn_cpuid_base(); } static void __init acrn_init_platform(void) { /* Setup the IDT for ACRN hypervisor callback */ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_acrn_hv_callback); x86_platform.calibrate_tsc = acrn_get_tsc_khz; x86_platform.calibrate_cpu = acrn_get_tsc_khz; } static bool acrn_x2apic_available(void) { return boot_cpu_has(X86_FEATURE_X2APIC); } static void (*acrn_intr_handler)(void); DEFINE_IDTENTRY_SYSVEC(sysvec_acrn_hv_callback) { struct pt_regs *old_regs = set_irq_regs(regs); /* * The hypervisor requires that the APIC EOI should be acked. * If the APIC EOI is not acked, the APIC ISR bit for the * HYPERVISOR_CALLBACK_VECTOR will not be cleared and then it * will block the interrupt whose vector is lower than * HYPERVISOR_CALLBACK_VECTOR. */ apic_eoi(); inc_irq_stat(irq_hv_callback_count); if (acrn_intr_handler) acrn_intr_handler(); set_irq_regs(old_regs); } void acrn_setup_intr_handler(void (*handler)(void)) { acrn_intr_handler = handler; } EXPORT_SYMBOL_GPL(acrn_setup_intr_handler); void acrn_remove_intr_handler(void) { acrn_intr_handler = NULL; } EXPORT_SYMBOL_GPL(acrn_remove_intr_handler); const __initconst struct hypervisor_x86 x86_hyper_acrn = { .name = "ACRN", .detect = acrn_detect, .type = X86_HYPER_ACRN, .init.init_platform = acrn_init_platform, .init.x2apic_available = acrn_x2apic_available, };
linux-master
arch/x86/kernel/cpu/acrn.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel Transactional Synchronization Extensions (TSX) control. * * Copyright (C) 2019-2021 Intel Corporation * * Author: * Pawan Gupta <[email protected]> */ #include <linux/cpufeature.h> #include <asm/cmdline.h> #include <asm/cpu.h> #include "cpu.h" #undef pr_fmt #define pr_fmt(fmt) "tsx: " fmt enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED; static void tsx_disable(void) { u64 tsx; rdmsrl(MSR_IA32_TSX_CTRL, tsx); /* Force all transactions to immediately abort */ tsx |= TSX_CTRL_RTM_DISABLE; /* * Ensure TSX support is not enumerated in CPUID. * This is visible to userspace and will ensure they * do not waste resources trying TSX transactions that * will always abort. */ tsx |= TSX_CTRL_CPUID_CLEAR; wrmsrl(MSR_IA32_TSX_CTRL, tsx); } static void tsx_enable(void) { u64 tsx; rdmsrl(MSR_IA32_TSX_CTRL, tsx); /* Enable the RTM feature in the cpu */ tsx &= ~TSX_CTRL_RTM_DISABLE; /* * Ensure TSX support is enumerated in CPUID. * This is visible to userspace and will ensure they * can enumerate and use the TSX feature. */ tsx &= ~TSX_CTRL_CPUID_CLEAR; wrmsrl(MSR_IA32_TSX_CTRL, tsx); } static enum tsx_ctrl_states x86_get_tsx_auto_mode(void) { if (boot_cpu_has_bug(X86_BUG_TAA)) return TSX_CTRL_DISABLE; return TSX_CTRL_ENABLE; } /* * Disabling TSX is not a trivial business. * * First of all, there's a CPUID bit: X86_FEATURE_RTM_ALWAYS_ABORT * which says that TSX is practically disabled (all transactions are * aborted by default). When that bit is set, the kernel unconditionally * disables TSX. * * In order to do that, however, it needs to dance a bit: * * 1. The first method to disable it is through MSR_TSX_FORCE_ABORT and * the MSR is present only when *two* CPUID bits are set: * * - X86_FEATURE_RTM_ALWAYS_ABORT * - X86_FEATURE_TSX_FORCE_ABORT * * 2. The second method is for CPUs which do not have the above-mentioned * MSR: those use a different MSR - MSR_IA32_TSX_CTRL and disable TSX * through that one. Those CPUs can also have the initially mentioned * CPUID bit X86_FEATURE_RTM_ALWAYS_ABORT set and for those the same strategy * applies: TSX gets disabled unconditionally. * * When either of the two methods are present, the kernel disables TSX and * clears the respective RTM and HLE feature flags. * * An additional twist in the whole thing presents late microcode loading * which, when done, may cause for the X86_FEATURE_RTM_ALWAYS_ABORT CPUID * bit to be set after the update. * * A subsequent hotplug operation on any logical CPU except the BSP will * cause for the supported CPUID feature bits to get re-detected and, if * RTM and HLE get cleared all of a sudden, but, userspace did consult * them before the update, then funny explosions will happen. Long story * short: the kernel doesn't modify CPUID feature bits after booting. * * That's why, this function's call in init_intel() doesn't clear the * feature flags. */ static void tsx_clear_cpuid(void) { u64 msr; /* * MSR_TFA_TSX_CPUID_CLEAR bit is only present when both CPUID * bits RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are present. */ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) && boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) { rdmsrl(MSR_TSX_FORCE_ABORT, msr); msr |= MSR_TFA_TSX_CPUID_CLEAR; wrmsrl(MSR_TSX_FORCE_ABORT, msr); } else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) { rdmsrl(MSR_IA32_TSX_CTRL, msr); msr |= TSX_CTRL_CPUID_CLEAR; wrmsrl(MSR_IA32_TSX_CTRL, msr); } } /* * Disable TSX development mode * * When the microcode released in Feb 2022 is applied, TSX will be disabled by * default on some processors. MSR 0x122 (TSX_CTRL) and MSR 0x123 * (IA32_MCU_OPT_CTRL) can be used to re-enable TSX for development, doing so is * not recommended for production deployments. In particular, applying MD_CLEAR * flows for mitigation of the Intel TSX Asynchronous Abort (TAA) transient * execution attack may not be effective on these processors when Intel TSX is * enabled with updated microcode. */ static void tsx_dev_mode_disable(void) { u64 mcu_opt_ctrl; /* Check if RTM_ALLOW exists */ if (!boot_cpu_has_bug(X86_BUG_TAA) || !cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL) || !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL)) return; rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); if (mcu_opt_ctrl & RTM_ALLOW) { mcu_opt_ctrl &= ~RTM_ALLOW; wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); setup_force_cpu_cap(X86_FEATURE_RTM_ALWAYS_ABORT); } } void __init tsx_init(void) { char arg[5] = {}; int ret; tsx_dev_mode_disable(); /* * Hardware will always abort a TSX transaction when the CPUID bit * RTM_ALWAYS_ABORT is set. In this case, it is better not to enumerate * CPUID.RTM and CPUID.HLE bits. Clear them here. */ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) { tsx_ctrl_state = TSX_CTRL_RTM_ALWAYS_ABORT; tsx_clear_cpuid(); setup_clear_cpu_cap(X86_FEATURE_RTM); setup_clear_cpu_cap(X86_FEATURE_HLE); return; } /* * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. * * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, * tsx= cmdline requests will do nothing on CPUs without * MSR_IA32_TSX_CTRL support. */ if (x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR) { setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL); } else { tsx_ctrl_state = TSX_CTRL_NOT_SUPPORTED; return; } ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg)); if (ret >= 0) { if (!strcmp(arg, "on")) { tsx_ctrl_state = TSX_CTRL_ENABLE; } else if (!strcmp(arg, "off")) { tsx_ctrl_state = TSX_CTRL_DISABLE; } else if (!strcmp(arg, "auto")) { tsx_ctrl_state = x86_get_tsx_auto_mode(); } else { tsx_ctrl_state = TSX_CTRL_DISABLE; pr_err("invalid option, defaulting to off\n"); } } else { /* tsx= not provided */ if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_AUTO)) tsx_ctrl_state = x86_get_tsx_auto_mode(); else if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_OFF)) tsx_ctrl_state = TSX_CTRL_DISABLE; else tsx_ctrl_state = TSX_CTRL_ENABLE; } if (tsx_ctrl_state == TSX_CTRL_DISABLE) { tsx_disable(); /* * tsx_disable() will change the state of the RTM and HLE CPUID * bits. Clear them here since they are now expected to be not * set. */ setup_clear_cpu_cap(X86_FEATURE_RTM); setup_clear_cpu_cap(X86_FEATURE_HLE); } else if (tsx_ctrl_state == TSX_CTRL_ENABLE) { /* * HW defaults TSX to be enabled at bootup. * We may still need the TSX enable support * during init for special cases like * kexec after TSX is disabled. */ tsx_enable(); /* * tsx_enable() will change the state of the RTM and HLE CPUID * bits. Force them here since they are now expected to be set. */ setup_force_cpu_cap(X86_FEATURE_RTM); setup_force_cpu_cap(X86_FEATURE_HLE); } } void tsx_ap_init(void) { tsx_dev_mode_disable(); if (tsx_ctrl_state == TSX_CTRL_ENABLE) tsx_enable(); else if (tsx_ctrl_state == TSX_CTRL_DISABLE) tsx_disable(); else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT) /* See comment over that function for more details. */ tsx_clear_cpuid(); }
linux-master
arch/x86/kernel/cpu/tsx.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/sched.h> #include <linux/sched/clock.h> #include <asm/cpu.h> #include <asm/cpufeature.h> #include <asm/e820/api.h> #include <asm/mtrr.h> #include <asm/msr.h> #include "cpu.h" #define ACE_PRESENT (1 << 6) #define ACE_ENABLED (1 << 7) #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ #define RNG_PRESENT (1 << 2) #define RNG_ENABLED (1 << 3) #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ static void init_c3(struct cpuinfo_x86 *c) { u32 lo, hi; /* Test for Centaur Extended Feature Flags presence */ if (cpuid_eax(0xC0000000) >= 0xC0000001) { u32 tmp = cpuid_edx(0xC0000001); /* enable ACE unit, if present and disabled */ if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { rdmsr(MSR_VIA_FCR, lo, hi); lo |= ACE_FCR; /* enable ACE unit */ wrmsr(MSR_VIA_FCR, lo, hi); pr_info("CPU: Enabled ACE h/w crypto\n"); } /* enable RNG unit, if present and disabled */ if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { rdmsr(MSR_VIA_RNG, lo, hi); lo |= RNG_ENABLE; /* enable RNG unit */ wrmsr(MSR_VIA_RNG, lo, hi); pr_info("CPU: Enabled h/w RNG\n"); } /* store Centaur Extended Feature Flags as * word 5 of the CPU capability bit array */ c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001); } #ifdef CONFIG_X86_32 /* Cyrix III family needs CX8 & PGE explicitly enabled. */ if (c->x86_model >= 6 && c->x86_model <= 13) { rdmsr(MSR_VIA_FCR, lo, hi); lo |= (1<<1 | 1<<7); wrmsr(MSR_VIA_FCR, lo, hi); set_cpu_cap(c, X86_FEATURE_CX8); } /* Before Nehemiah, the C3's had 3dNOW! */ if (c->x86_model >= 6 && c->x86_model < 9) set_cpu_cap(c, X86_FEATURE_3DNOW); #endif if (c->x86 == 0x6 && c->x86_model >= 0xf) { c->x86_cache_alignment = c->x86_clflush_size * 2; set_cpu_cap(c, X86_FEATURE_REP_GOOD); } if (c->x86 >= 7) set_cpu_cap(c, X86_FEATURE_REP_GOOD); } enum { ECX8 = 1<<1, EIERRINT = 1<<2, DPM = 1<<3, DMCE = 1<<4, DSTPCLK = 1<<5, ELINEAR = 1<<6, DSMC = 1<<7, DTLOCK = 1<<8, EDCTLB = 1<<8, EMMX = 1<<9, DPDC = 1<<11, EBRPRED = 1<<12, DIC = 1<<13, DDC = 1<<14, DNA = 1<<15, ERETSTK = 1<<16, E2MMX = 1<<19, EAMD3D = 1<<20, }; static void early_init_centaur(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 /* Emulate MTRRs using Centaur's MCR. */ if (c->x86 == 5) set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); #endif if ((c->x86 == 6 && c->x86_model >= 0xf) || (c->x86 >= 7)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #endif if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } } static void init_centaur(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 char *name; u32 fcr_set = 0; u32 fcr_clr = 0; u32 lo, hi, newlo; u32 aa, bb, cc, dd; /* * Bit 31 in normal CPUID used for nonstandard 3DNow ID; * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ clear_cpu_cap(c, 0*32+31); #endif early_init_centaur(c); init_intel_cacheinfo(c); detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif if (c->cpuid_level > 9) { unsigned int eax = cpuid_eax(10); /* * Check for version and the number of counters * Version(eax[7:0]) can't be 0; * Counters(eax[15:8]) should be greater than 1; */ if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1)) set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); } #ifdef CONFIG_X86_32 if (c->x86 == 5) { switch (c->x86_model) { case 4: name = "C6"; fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; fcr_clr = DPDC; pr_notice("Disabling bugged TSC.\n"); clear_cpu_cap(c, X86_FEATURE_TSC); break; case 8: switch (c->x86_stepping) { default: name = "2"; break; case 7 ... 9: name = "2A"; break; case 10 ... 15: name = "2B"; break; } fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| E2MMX|EAMD3D; fcr_clr = DPDC; break; case 9: name = "3"; fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| E2MMX|EAMD3D; fcr_clr = DPDC; break; default: name = "??"; } rdmsr(MSR_IDT_FCR1, lo, hi); newlo = (lo|fcr_set) & (~fcr_clr); if (newlo != lo) { pr_info("Centaur FCR was 0x%X now 0x%X\n", lo, newlo); wrmsr(MSR_IDT_FCR1, newlo, hi); } else { pr_info("Centaur FCR is 0x%X\n", lo); } /* Emulate MTRRs using Centaur's MCR. */ set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); /* Report CX8 */ set_cpu_cap(c, X86_FEATURE_CX8); /* Set 3DNow! on Winchip 2 and above. */ if (c->x86_model >= 8) set_cpu_cap(c, X86_FEATURE_3DNOW); /* See if we can find out some more. */ if (cpuid_eax(0x80000000) >= 0x80000005) { /* Yes, we can. */ cpuid(0x80000005, &aa, &bb, &cc, &dd); /* Add L1 data and code cache sizes. */ c->x86_cache_size = (cc>>24)+(dd>>24); } sprintf(c->x86_model_id, "WinChip %s", name); } #endif if (c->x86 == 6 || c->x86 >= 7) init_c3(c); #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); #endif init_ia32_feat_ctl(c); } #ifdef CONFIG_X86_32 static unsigned int centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) { /* VIA C3 CPUs (670-68F) need further shifting. */ if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) size >>= 8; /* * There's also an erratum in Nehemiah stepping 1, which * returns '65KB' instead of '64KB' * - Note, it seems this may only be in engineering samples. */ if ((c->x86 == 6) && (c->x86_model == 9) && (c->x86_stepping == 1) && (size == 65)) size -= 1; return size; } #endif static const struct cpu_dev centaur_cpu_dev = { .c_vendor = "Centaur", .c_ident = { "CentaurHauls" }, .c_early_init = early_init_centaur, .c_init = init_centaur, #ifdef CONFIG_X86_32 .legacy_cache_size = centaur_size_cache, #endif .c_x86_vendor = X86_VENDOR_CENTAUR, }; cpu_dev_register(centaur_cpu_dev);
linux-master
arch/x86/kernel/cpu/centaur.c
// SPDX-License-Identifier: GPL-2.0 /* * Routines to identify caches on Intel CPU. * * Changes: * Venkatesh Pallipadi : Adding cache identification through cpuid(4) * Ashok Raj <[email protected]>: Work with CPU hotplug infrastructure. * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. */ #include <linux/slab.h> #include <linux/cacheinfo.h> #include <linux/cpu.h> #include <linux/cpuhotplug.h> #include <linux/sched.h> #include <linux/capability.h> #include <linux/sysfs.h> #include <linux/pci.h> #include <linux/stop_machine.h> #include <asm/cpufeature.h> #include <asm/cacheinfo.h> #include <asm/amd_nb.h> #include <asm/smp.h> #include <asm/mtrr.h> #include <asm/tlbflush.h> #include "cpu.h" #define LVL_1_INST 1 #define LVL_1_DATA 2 #define LVL_2 3 #define LVL_3 4 #define LVL_TRACE 5 /* Shared last level cache maps */ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); /* Shared L2 cache maps */ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map); static cpumask_var_t cpu_cacheinfo_mask; /* Kernel controls MTRR and/or PAT MSRs. */ unsigned int memory_caching_control __ro_after_init; struct _cache_table { unsigned char descriptor; char cache_type; short size; }; #define MB(x) ((x) * 1024) /* All the cache descriptor types we care about (no TLB or trace cache entries) */ static const struct _cache_table cache_table[] = { { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */ { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */ { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */ { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */ { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */ { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */ { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */ { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */ { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */ { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */ { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */ { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */ { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */ { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */ { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */ { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */ { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */ { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */ { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */ { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */ { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */ { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */ { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */ { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */ { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */ { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */ { 0x00, 0, 0} }; enum _cache_type { CTYPE_NULL = 0, CTYPE_DATA = 1, CTYPE_INST = 2, CTYPE_UNIFIED = 3 }; union _cpuid4_leaf_eax { struct { enum _cache_type type:5; unsigned int level:3; unsigned int is_self_initializing:1; unsigned int is_fully_associative:1; unsigned int reserved:4; unsigned int num_threads_sharing:12; unsigned int num_cores_on_die:6; } split; u32 full; }; union _cpuid4_leaf_ebx { struct { unsigned int coherency_line_size:12; unsigned int physical_line_partition:10; unsigned int ways_of_associativity:10; } split; u32 full; }; union _cpuid4_leaf_ecx { struct { unsigned int number_of_sets:32; } split; u32 full; }; struct _cpuid4_info_regs { union _cpuid4_leaf_eax eax; union _cpuid4_leaf_ebx ebx; union _cpuid4_leaf_ecx ecx; unsigned int id; unsigned long size; struct amd_northbridge *nb; }; static unsigned short num_cache_leaves; /* AMD doesn't have CPUID4. Emulate it here to report the same information to the user. This makes some assumptions about the machine: L2 not shared, no SMT etc. that is currently true on AMD CPUs. In theory the TLBs could be reported as fake type (they are in "dummy"). Maybe later */ union l1_cache { struct { unsigned line_size:8; unsigned lines_per_tag:8; unsigned assoc:8; unsigned size_in_kb:8; }; unsigned val; }; union l2_cache { struct { unsigned line_size:8; unsigned lines_per_tag:4; unsigned assoc:4; unsigned size_in_kb:16; }; unsigned val; }; union l3_cache { struct { unsigned line_size:8; unsigned lines_per_tag:4; unsigned assoc:4; unsigned res:2; unsigned size_encoded:14; }; unsigned val; }; static const unsigned short assocs[] = { [1] = 1, [2] = 2, [4] = 4, [6] = 8, [8] = 16, [0xa] = 32, [0xb] = 48, [0xc] = 64, [0xd] = 96, [0xe] = 128, [0xf] = 0xffff /* fully associative - no way to show this currently */ }; static const unsigned char levels[] = { 1, 1, 2, 3 }; static const unsigned char types[] = { 1, 2, 3, 3 }; static const enum cache_type cache_type_map[] = { [CTYPE_NULL] = CACHE_TYPE_NOCACHE, [CTYPE_DATA] = CACHE_TYPE_DATA, [CTYPE_INST] = CACHE_TYPE_INST, [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED, }; static void amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, union _cpuid4_leaf_ebx *ebx, union _cpuid4_leaf_ecx *ecx) { unsigned dummy; unsigned line_size, lines_per_tag, assoc, size_in_kb; union l1_cache l1i, l1d; union l2_cache l2; union l3_cache l3; union l1_cache *l1 = &l1d; eax->full = 0; ebx->full = 0; ecx->full = 0; cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val); cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val); switch (leaf) { case 1: l1 = &l1i; fallthrough; case 0: if (!l1->val) return; assoc = assocs[l1->assoc]; line_size = l1->line_size; lines_per_tag = l1->lines_per_tag; size_in_kb = l1->size_in_kb; break; case 2: if (!l2.val) return; assoc = assocs[l2.assoc]; line_size = l2.line_size; lines_per_tag = l2.lines_per_tag; /* cpu_data has errata corrections for K7 applied */ size_in_kb = __this_cpu_read(cpu_info.x86_cache_size); break; case 3: if (!l3.val) return; assoc = assocs[l3.assoc]; line_size = l3.line_size; lines_per_tag = l3.lines_per_tag; size_in_kb = l3.size_encoded * 512; if (boot_cpu_has(X86_FEATURE_AMD_DCM)) { size_in_kb = size_in_kb >> 1; assoc = assoc >> 1; } break; default: return; } eax->split.is_self_initializing = 1; eax->split.type = types[leaf]; eax->split.level = levels[leaf]; eax->split.num_threads_sharing = 0; eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1; if (assoc == 0xffff) eax->split.is_fully_associative = 1; ebx->split.coherency_line_size = line_size - 1; ebx->split.ways_of_associativity = assoc - 1; ebx->split.physical_line_partition = lines_per_tag - 1; ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / (ebx->split.ways_of_associativity + 1) - 1; } #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS) /* * L3 cache descriptors */ static void amd_calc_l3_indices(struct amd_northbridge *nb) { struct amd_l3_cache *l3 = &nb->l3_cache; unsigned int sc0, sc1, sc2, sc3; u32 val = 0; pci_read_config_dword(nb->misc, 0x1C4, &val); /* calculate subcache sizes */ l3->subcaches[0] = sc0 = !(val & BIT(0)); l3->subcaches[1] = sc1 = !(val & BIT(4)); if (boot_cpu_data.x86 == 0x15) { l3->subcaches[0] = sc0 += !(val & BIT(1)); l3->subcaches[1] = sc1 += !(val & BIT(5)); } l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; } /* * check whether a slot used for disabling an L3 index is occupied. * @l3: L3 cache descriptor * @slot: slot number (0..1) * * @returns: the disabled index if used or negative value if slot free. */ static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) { unsigned int reg = 0; pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg); /* check whether this slot is activated already */ if (reg & (3UL << 30)) return reg & 0xfff; return -1; } static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf, unsigned int slot) { int index; struct amd_northbridge *nb = this_leaf->priv; index = amd_get_l3_disable_slot(nb, slot); if (index >= 0) return sprintf(buf, "%d\n", index); return sprintf(buf, "FREE\n"); } #define SHOW_CACHE_DISABLE(slot) \ static ssize_t \ cache_disable_##slot##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ return show_cache_disable(this_leaf, buf, slot); \ } SHOW_CACHE_DISABLE(0) SHOW_CACHE_DISABLE(1) static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu, unsigned slot, unsigned long idx) { int i; idx |= BIT(30); /* * disable index in all 4 subcaches */ for (i = 0; i < 4; i++) { u32 reg = idx | (i << 20); if (!nb->l3_cache.subcaches[i]) continue; pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); /* * We need to WBINVD on a core on the node containing the L3 * cache which indices we disable therefore a simple wbinvd() * is not sufficient. */ wbinvd_on_cpu(cpu); reg |= BIT(31); pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); } } /* * disable a L3 cache index by using a disable-slot * * @l3: L3 cache descriptor * @cpu: A CPU on the node containing the L3 cache * @slot: slot number (0..1) * @index: index to disable * * @return: 0 on success, error status on failure */ static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, unsigned long index) { int ret = 0; /* check if @slot is already used or the index is already disabled */ ret = amd_get_l3_disable_slot(nb, slot); if (ret >= 0) return -EEXIST; if (index > nb->l3_cache.indices) return -EINVAL; /* check whether the other slot has disabled the same index already */ if (index == amd_get_l3_disable_slot(nb, !slot)) return -EEXIST; amd_l3_disable_index(nb, cpu, slot, index); return 0; } static ssize_t store_cache_disable(struct cacheinfo *this_leaf, const char *buf, size_t count, unsigned int slot) { unsigned long val = 0; int cpu, err = 0; struct amd_northbridge *nb = this_leaf->priv; if (!capable(CAP_SYS_ADMIN)) return -EPERM; cpu = cpumask_first(&this_leaf->shared_cpu_map); if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; err = amd_set_l3_disable_slot(nb, cpu, slot, val); if (err) { if (err == -EEXIST) pr_warn("L3 slot %d in use/index already disabled!\n", slot); return err; } return count; } #define STORE_CACHE_DISABLE(slot) \ static ssize_t \ cache_disable_##slot##_store(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ return store_cache_disable(this_leaf, buf, count, slot); \ } STORE_CACHE_DISABLE(0) STORE_CACHE_DISABLE(1) static ssize_t subcaches_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cacheinfo *this_leaf = dev_get_drvdata(dev); int cpu = cpumask_first(&this_leaf->shared_cpu_map); return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); } static ssize_t subcaches_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cacheinfo *this_leaf = dev_get_drvdata(dev); int cpu = cpumask_first(&this_leaf->shared_cpu_map); unsigned long val; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (kstrtoul(buf, 16, &val) < 0) return -EINVAL; if (amd_set_subcaches(cpu, val)) return -EINVAL; return count; } static DEVICE_ATTR_RW(cache_disable_0); static DEVICE_ATTR_RW(cache_disable_1); static DEVICE_ATTR_RW(subcaches); static umode_t cache_private_attrs_is_visible(struct kobject *kobj, struct attribute *attr, int unused) { struct device *dev = kobj_to_dev(kobj); struct cacheinfo *this_leaf = dev_get_drvdata(dev); umode_t mode = attr->mode; if (!this_leaf->priv) return 0; if ((attr == &dev_attr_subcaches.attr) && amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) return mode; if ((attr == &dev_attr_cache_disable_0.attr || attr == &dev_attr_cache_disable_1.attr) && amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) return mode; return 0; } static struct attribute_group cache_private_group = { .is_visible = cache_private_attrs_is_visible, }; static void init_amd_l3_attrs(void) { int n = 1; static struct attribute **amd_l3_attrs; if (amd_l3_attrs) /* already initialized */ return; if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) n += 2; if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) n += 1; amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL); if (!amd_l3_attrs) return; n = 0; if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr; amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr; } if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) amd_l3_attrs[n++] = &dev_attr_subcaches.attr; cache_private_group.attrs = amd_l3_attrs; } const struct attribute_group * cache_get_priv_group(struct cacheinfo *this_leaf) { struct amd_northbridge *nb = this_leaf->priv; if (this_leaf->level < 3 || !nb) return NULL; if (nb && nb->l3_cache.indices) init_amd_l3_attrs(); return &cache_private_group; } static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) { int node; /* only for L3, and not in virtualized environments */ if (index < 3) return; node = topology_die_id(smp_processor_id()); this_leaf->nb = node_to_amd_nb(node); if (this_leaf->nb && !this_leaf->nb->l3_cache.indices) amd_calc_l3_indices(this_leaf->nb); } #else #define amd_init_l3_cache(x, y) #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ static int cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) { union _cpuid4_leaf_eax eax; union _cpuid4_leaf_ebx ebx; union _cpuid4_leaf_ecx ecx; unsigned edx; if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { if (boot_cpu_has(X86_FEATURE_TOPOEXT)) cpuid_count(0x8000001d, index, &eax.full, &ebx.full, &ecx.full, &edx); else amd_cpuid4(index, &eax, &ebx, &ecx); amd_init_l3_cache(this_leaf, index); } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { cpuid_count(0x8000001d, index, &eax.full, &ebx.full, &ecx.full, &edx); amd_init_l3_cache(this_leaf, index); } else { cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); } if (eax.split.type == CTYPE_NULL) return -EIO; /* better error ? */ this_leaf->eax = eax; this_leaf->ebx = ebx; this_leaf->ecx = ecx; this_leaf->size = (ecx.split.number_of_sets + 1) * (ebx.split.coherency_line_size + 1) * (ebx.split.physical_line_partition + 1) * (ebx.split.ways_of_associativity + 1); return 0; } static int find_num_cache_leaves(struct cpuinfo_x86 *c) { unsigned int eax, ebx, ecx, edx, op; union _cpuid4_leaf_eax cache_eax; int i = -1; if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) op = 0x8000001d; else op = 4; do { ++i; /* Do cpuid(op) loop to find out num_cache_leaves */ cpuid_count(op, i, &eax, &ebx, &ecx, &edx); cache_eax.full = eax; } while (cache_eax.split.type != CTYPE_NULL); return i; } void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu) { /* * We may have multiple LLCs if L3 caches exist, so check if we * have an L3 cache by looking at the L3 cache CPUID leaf. */ if (!cpuid_edx(0x80000006)) return; if (c->x86 < 0x17) { /* LLC is at the node level. */ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) { /* * LLC is at the core complex level. * Core complex ID is ApicId[3] for these processors. */ per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; } else { /* * LLC ID is calculated from the number of threads sharing the * cache. * */ u32 eax, ebx, ecx, edx, num_sharing_cache = 0; u32 llc_index = find_num_cache_leaves(c) - 1; cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx); if (eax) num_sharing_cache = ((eax >> 14) & 0xfff) + 1; if (num_sharing_cache) { int bits = get_count_order(num_sharing_cache); per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; } } } void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu) { /* * We may have multiple LLCs if L3 caches exist, so check if we * have an L3 cache by looking at the L3 cache CPUID leaf. */ if (!cpuid_edx(0x80000006)) return; /* * LLC is at the core complex level. * Core complex ID is ApicId[3] for these processors. */ per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; } void init_amd_cacheinfo(struct cpuinfo_x86 *c) { if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { num_cache_leaves = find_num_cache_leaves(c); } else if (c->extended_cpuid_level >= 0x80000006) { if (cpuid_edx(0x80000006) & 0xf000) num_cache_leaves = 4; else num_cache_leaves = 3; } } void init_hygon_cacheinfo(struct cpuinfo_x86 *c) { num_cache_leaves = find_num_cache_leaves(c); } void init_intel_cacheinfo(struct cpuinfo_x86 *c) { /* Cache sizes */ unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0; unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; #ifdef CONFIG_SMP unsigned int cpu = c->cpu_index; #endif if (c->cpuid_level > 3) { static int is_initialized; if (is_initialized == 0) { /* Init num_cache_leaves from boot CPU */ num_cache_leaves = find_num_cache_leaves(c); is_initialized++; } /* * Whenever possible use cpuid(4), deterministic cache * parameters cpuid leaf to find the cache details */ for (i = 0; i < num_cache_leaves; i++) { struct _cpuid4_info_regs this_leaf = {}; int retval; retval = cpuid4_cache_lookup_regs(i, &this_leaf); if (retval < 0) continue; switch (this_leaf.eax.split.level) { case 1: if (this_leaf.eax.split.type == CTYPE_DATA) new_l1d = this_leaf.size/1024; else if (this_leaf.eax.split.type == CTYPE_INST) new_l1i = this_leaf.size/1024; break; case 2: new_l2 = this_leaf.size/1024; num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; index_msb = get_count_order(num_threads_sharing); l2_id = c->apicid & ~((1 << index_msb) - 1); break; case 3: new_l3 = this_leaf.size/1024; num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; index_msb = get_count_order(num_threads_sharing); l3_id = c->apicid & ~((1 << index_msb) - 1); break; default: break; } } } /* * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for * trace cache */ if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { /* supports eax=2 call */ int j, n; unsigned int regs[4]; unsigned char *dp = (unsigned char *)regs; int only_trace = 0; if (num_cache_leaves != 0 && c->x86 == 15) only_trace = 1; /* Number of times to iterate */ n = cpuid_eax(2) & 0xFF; for (i = 0 ; i < n ; i++) { cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]); /* If bit 31 is set, this is an unknown format */ for (j = 0 ; j < 3 ; j++) if (regs[j] & (1 << 31)) regs[j] = 0; /* Byte 0 is level count, not a descriptor */ for (j = 1 ; j < 16 ; j++) { unsigned char des = dp[j]; unsigned char k = 0; /* look up this descriptor in the table */ while (cache_table[k].descriptor != 0) { if (cache_table[k].descriptor == des) { if (only_trace && cache_table[k].cache_type != LVL_TRACE) break; switch (cache_table[k].cache_type) { case LVL_1_INST: l1i += cache_table[k].size; break; case LVL_1_DATA: l1d += cache_table[k].size; break; case LVL_2: l2 += cache_table[k].size; break; case LVL_3: l3 += cache_table[k].size; break; } break; } k++; } } } } if (new_l1d) l1d = new_l1d; if (new_l1i) l1i = new_l1i; if (new_l2) { l2 = new_l2; #ifdef CONFIG_SMP per_cpu(cpu_llc_id, cpu) = l2_id; per_cpu(cpu_l2c_id, cpu) = l2_id; #endif } if (new_l3) { l3 = new_l3; #ifdef CONFIG_SMP per_cpu(cpu_llc_id, cpu) = l3_id; #endif } #ifdef CONFIG_SMP /* * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in * turns means that the only possibility is SMT (as indicated in * cpuid1). Since cpuid2 doesn't specify shared caches, and we know * that SMT shares all caches, we can unconditionally set cpu_llc_id to * c->phys_proc_id. */ if (per_cpu(cpu_llc_id, cpu) == BAD_APICID) per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; #endif c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); if (!l2) cpu_detect_cache_sizes(c); } static int __cache_amd_cpumap_setup(unsigned int cpu, int index, struct _cpuid4_info_regs *base) { struct cpu_cacheinfo *this_cpu_ci; struct cacheinfo *this_leaf; int i, sibling; /* * For L3, always use the pre-calculated cpu_llc_shared_mask * to derive shared_cpu_map. */ if (index == 3) { for_each_cpu(i, cpu_llc_shared_mask(cpu)) { this_cpu_ci = get_cpu_cacheinfo(i); if (!this_cpu_ci->info_list) continue; this_leaf = this_cpu_ci->info_list + index; for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { if (!cpu_online(sibling)) continue; cpumask_set_cpu(sibling, &this_leaf->shared_cpu_map); } } } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { unsigned int apicid, nshared, first, last; nshared = base->eax.split.num_threads_sharing + 1; apicid = cpu_data(cpu).apicid; first = apicid - (apicid % nshared); last = first + nshared - 1; for_each_online_cpu(i) { this_cpu_ci = get_cpu_cacheinfo(i); if (!this_cpu_ci->info_list) continue; apicid = cpu_data(i).apicid; if ((apicid < first) || (apicid > last)) continue; this_leaf = this_cpu_ci->info_list + index; for_each_online_cpu(sibling) { apicid = cpu_data(sibling).apicid; if ((apicid < first) || (apicid > last)) continue; cpumask_set_cpu(sibling, &this_leaf->shared_cpu_map); } } } else return 0; return 1; } static void __cache_cpumap_setup(unsigned int cpu, int index, struct _cpuid4_info_regs *base) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf, *sibling_leaf; unsigned long num_threads_sharing; int index_msb, i; struct cpuinfo_x86 *c = &cpu_data(cpu); if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) { if (__cache_amd_cpumap_setup(cpu, index, base)) return; } this_leaf = this_cpu_ci->info_list + index; num_threads_sharing = 1 + base->eax.split.num_threads_sharing; cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); if (num_threads_sharing == 1) return; index_msb = get_count_order(num_threads_sharing); for_each_online_cpu(i) if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) { struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); if (i == cpu || !sib_cpu_ci->info_list) continue;/* skip if itself or no cacheinfo */ sibling_leaf = sib_cpu_ci->info_list + index; cpumask_set_cpu(i, &this_leaf->shared_cpu_map); cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map); } } static void ci_leaf_init(struct cacheinfo *this_leaf, struct _cpuid4_info_regs *base) { this_leaf->id = base->id; this_leaf->attributes = CACHE_ID; this_leaf->level = base->eax.split.level; this_leaf->type = cache_type_map[base->eax.split.type]; this_leaf->coherency_line_size = base->ebx.split.coherency_line_size + 1; this_leaf->ways_of_associativity = base->ebx.split.ways_of_associativity + 1; this_leaf->size = base->size; this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1; this_leaf->physical_line_partition = base->ebx.split.physical_line_partition + 1; this_leaf->priv = base->nb; } int init_cache_level(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); if (!num_cache_leaves) return -ENOENT; if (!this_cpu_ci) return -EINVAL; this_cpu_ci->num_levels = 3; this_cpu_ci->num_leaves = num_cache_leaves; return 0; } /* * The max shared threads number comes from CPUID.4:EAX[25-14] with input * ECX as cache index. Then right shift apicid by the number's order to get * cache id for this cache node. */ static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs) { struct cpuinfo_x86 *c = &cpu_data(cpu); unsigned long num_threads_sharing; int index_msb; num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing; index_msb = get_count_order(num_threads_sharing); id4_regs->id = c->apicid >> index_msb; } int populate_cache_leaves(unsigned int cpu) { unsigned int idx, ret; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf = this_cpu_ci->info_list; struct _cpuid4_info_regs id4_regs = {}; for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { ret = cpuid4_cache_lookup_regs(idx, &id4_regs); if (ret) return ret; get_cache_id(cpu, &id4_regs); ci_leaf_init(this_leaf++, &id4_regs); __cache_cpumap_setup(cpu, idx, &id4_regs); } this_cpu_ci->cpu_map_populated = true; return 0; } /* * Disable and enable caches. Needed for changing MTRRs and the PAT MSR. * * Since we are disabling the cache don't allow any interrupts, * they would run extremely slow and would only increase the pain. * * The caller must ensure that local interrupts are disabled and * are reenabled after cache_enable() has been called. */ static unsigned long saved_cr4; static DEFINE_RAW_SPINLOCK(cache_disable_lock); void cache_disable(void) __acquires(cache_disable_lock) { unsigned long cr0; /* * Note that this is not ideal * since the cache is only flushed/disabled for this CPU while the * MTRRs are changed, but changing this requires more invasive * changes to the way the kernel boots */ raw_spin_lock(&cache_disable_lock); /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ cr0 = read_cr0() | X86_CR0_CD; write_cr0(cr0); /* * Cache flushing is the most time-consuming step when programming * the MTRRs. Fortunately, as per the Intel Software Development * Manual, we can skip it if the processor supports cache self- * snooping. */ if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) wbinvd(); /* Save value of CR4 and clear Page Global Enable (bit 7) */ if (cpu_feature_enabled(X86_FEATURE_PGE)) { saved_cr4 = __read_cr4(); __write_cr4(saved_cr4 & ~X86_CR4_PGE); } /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); flush_tlb_local(); if (cpu_feature_enabled(X86_FEATURE_MTRR)) mtrr_disable(); /* Again, only flush caches if we have to. */ if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) wbinvd(); } void cache_enable(void) __releases(cache_disable_lock) { /* Flush TLBs (no need to flush caches - they are disabled) */ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); flush_tlb_local(); if (cpu_feature_enabled(X86_FEATURE_MTRR)) mtrr_enable(); /* Enable caches */ write_cr0(read_cr0() & ~X86_CR0_CD); /* Restore value of CR4 */ if (cpu_feature_enabled(X86_FEATURE_PGE)) __write_cr4(saved_cr4); raw_spin_unlock(&cache_disable_lock); } static void cache_cpu_init(void) { unsigned long flags; local_irq_save(flags); cache_disable(); if (memory_caching_control & CACHE_MTRR) mtrr_generic_set_state(); if (memory_caching_control & CACHE_PAT) pat_cpu_init(); cache_enable(); local_irq_restore(flags); } static bool cache_aps_delayed_init = true; void set_cache_aps_delayed_init(bool val) { cache_aps_delayed_init = val; } bool get_cache_aps_delayed_init(void) { return cache_aps_delayed_init; } static int cache_rendezvous_handler(void *unused) { if (get_cache_aps_delayed_init() || !cpu_online(smp_processor_id())) cache_cpu_init(); return 0; } void __init cache_bp_init(void) { mtrr_bp_init(); pat_bp_init(); if (memory_caching_control) cache_cpu_init(); } void cache_bp_restore(void) { if (memory_caching_control) cache_cpu_init(); } static int cache_ap_online(unsigned int cpu) { cpumask_set_cpu(cpu, cpu_cacheinfo_mask); if (!memory_caching_control || get_cache_aps_delayed_init()) return 0; /* * Ideally we should hold mtrr_mutex here to avoid MTRR entries * changed, but this routine will be called in CPU boot time, * holding the lock breaks it. * * This routine is called in two cases: * * 1. very early time of software resume, when there absolutely * isn't MTRR entry changes; * * 2. CPU hotadd time. We let mtrr_add/del_page hold cpuhotplug * lock to prevent MTRR entry changes */ stop_machine_from_inactive_cpu(cache_rendezvous_handler, NULL, cpu_cacheinfo_mask); return 0; } static int cache_ap_offline(unsigned int cpu) { cpumask_clear_cpu(cpu, cpu_cacheinfo_mask); return 0; } /* * Delayed cache initialization for all AP's */ void cache_aps_init(void) { if (!memory_caching_control || !get_cache_aps_delayed_init()) return; stop_machine(cache_rendezvous_handler, NULL, cpu_online_mask); set_cache_aps_delayed_init(false); } static int __init cache_ap_register(void) { zalloc_cpumask_var(&cpu_cacheinfo_mask, GFP_KERNEL); cpumask_set_cpu(smp_processor_id(), cpu_cacheinfo_mask); cpuhp_setup_state_nocalls(CPUHP_AP_CACHECTRL_STARTING, "x86/cachectrl:starting", cache_ap_online, cache_ap_offline); return 0; } early_initcall(cache_ap_register);
linux-master
arch/x86/kernel/cpu/cacheinfo.c
/* Declare dependencies between CPUIDs */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <asm/cpufeature.h> struct cpuid_dep { unsigned int feature; unsigned int depends; }; /* * Table of CPUID features that depend on others. * * This only includes dependencies that can be usefully disabled, not * features part of the base set (like FPU). * * Note this all is not __init / __initdata because it can be * called from cpu hotplug. It shouldn't do anything in this case, * but it's difficult to tell that to the init reference checker. */ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_FXSR, X86_FEATURE_FPU }, { X86_FEATURE_XSAVEOPT, X86_FEATURE_XSAVE }, { X86_FEATURE_XSAVEC, X86_FEATURE_XSAVE }, { X86_FEATURE_XSAVES, X86_FEATURE_XSAVE }, { X86_FEATURE_AVX, X86_FEATURE_XSAVE }, { X86_FEATURE_PKU, X86_FEATURE_XSAVE }, { X86_FEATURE_MPX, X86_FEATURE_XSAVE }, { X86_FEATURE_XGETBV1, X86_FEATURE_XSAVE }, { X86_FEATURE_CMOV, X86_FEATURE_FXSR }, { X86_FEATURE_MMX, X86_FEATURE_FXSR }, { X86_FEATURE_MMXEXT, X86_FEATURE_MMX }, { X86_FEATURE_FXSR_OPT, X86_FEATURE_FXSR }, { X86_FEATURE_XSAVE, X86_FEATURE_FXSR }, { X86_FEATURE_XMM, X86_FEATURE_FXSR }, { X86_FEATURE_XMM2, X86_FEATURE_XMM }, { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, { X86_FEATURE_XMM4_1, X86_FEATURE_XMM2 }, { X86_FEATURE_XMM4_2, X86_FEATURE_XMM2 }, { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, { X86_FEATURE_PCLMULQDQ, X86_FEATURE_XMM2 }, { X86_FEATURE_SSSE3, X86_FEATURE_XMM2, }, { X86_FEATURE_F16C, X86_FEATURE_XMM2, }, { X86_FEATURE_AES, X86_FEATURE_XMM2 }, { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, { X86_FEATURE_FMA, X86_FEATURE_AVX }, { X86_FEATURE_AVX2, X86_FEATURE_AVX, }, { X86_FEATURE_AVX512F, X86_FEATURE_AVX, }, { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512PF, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512ER, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512CD, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512DQ, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512BW, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL }, { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL }, { X86_FEATURE_VAES, X86_FEATURE_AVX512VL }, { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL }, { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL }, { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL }, { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512_VP2INTERSECT, X86_FEATURE_AVX512VL }, { X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC }, { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC }, { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC }, { X86_FEATURE_BMEC, X86_FEATURE_CQM_MBM_TOTAL }, { X86_FEATURE_BMEC, X86_FEATURE_CQM_MBM_LOCAL }, { X86_FEATURE_AVX512_BF16, X86_FEATURE_AVX512VL }, { X86_FEATURE_AVX512_FP16, X86_FEATURE_AVX512BW }, { X86_FEATURE_ENQCMD, X86_FEATURE_XSAVES }, { X86_FEATURE_PER_THREAD_MBA, X86_FEATURE_MBA }, { X86_FEATURE_SGX_LC, X86_FEATURE_SGX }, { X86_FEATURE_SGX1, X86_FEATURE_SGX }, { X86_FEATURE_SGX2, X86_FEATURE_SGX1 }, { X86_FEATURE_SGX_EDECCSSA, X86_FEATURE_SGX1 }, { X86_FEATURE_XFD, X86_FEATURE_XSAVES }, { X86_FEATURE_XFD, X86_FEATURE_XGETBV1 }, { X86_FEATURE_AMX_TILE, X86_FEATURE_XFD }, { X86_FEATURE_SHSTK, X86_FEATURE_XSAVES }, {} }; static inline void clear_feature(struct cpuinfo_x86 *c, unsigned int feature) { /* * Note: This could use the non atomic __*_bit() variants, but the * rest of the cpufeature code uses atomics as well, so keep it for * consistency. Cleanup all of it separately. */ if (!c) { clear_cpu_cap(&boot_cpu_data, feature); set_bit(feature, (unsigned long *)cpu_caps_cleared); } else { clear_bit(feature, (unsigned long *)c->x86_capability); } } /* Take the capabilities and the BUG bits into account */ #define MAX_FEATURE_BITS ((NCAPINTS + NBUGINTS) * sizeof(u32) * 8) static void do_clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature) { DECLARE_BITMAP(disable, MAX_FEATURE_BITS); const struct cpuid_dep *d; bool changed; if (WARN_ON(feature >= MAX_FEATURE_BITS)) return; clear_feature(c, feature); /* Collect all features to disable, handling dependencies */ memset(disable, 0, sizeof(disable)); __set_bit(feature, disable); /* Loop until we get a stable state. */ do { changed = false; for (d = cpuid_deps; d->feature; d++) { if (!test_bit(d->depends, disable)) continue; if (__test_and_set_bit(d->feature, disable)) continue; changed = true; clear_feature(c, d->feature); } } while (changed); } void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature) { do_clear_cpu_cap(c, feature); } void setup_clear_cpu_cap(unsigned int feature) { do_clear_cpu_cap(NULL, feature); }
linux-master
arch/x86/kernel/cpu/cpuid-deps.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel Performance and Energy Bias Hint support. * * Copyright (C) 2019 Intel Corporation * * Author: * Rafael J. Wysocki <[email protected]> */ #include <linux/cpuhotplug.h> #include <linux/cpu.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/syscore_ops.h> #include <linux/pm.h> #include <asm/cpu_device_id.h> #include <asm/cpufeature.h> #include <asm/msr.h> /** * DOC: overview * * The Performance and Energy Bias Hint (EPB) allows software to specify its * preference with respect to the power-performance tradeoffs present in the * processor. Generally, the EPB is expected to be set by user space (directly * via sysfs or with the help of the x86_energy_perf_policy tool), but there are * two reasons for the kernel to update it. * * First, there are systems where the platform firmware resets the EPB during * system-wide transitions from sleep states back into the working state * effectively causing the previous EPB updates by user space to be lost. * Thus the kernel needs to save the current EPB values for all CPUs during * system-wide transitions to sleep states and restore them on the way back to * the working state. That can be achieved by saving EPB for secondary CPUs * when they are taken offline during transitions into system sleep states and * for the boot CPU in a syscore suspend operation, so that it can be restored * for the boot CPU in a syscore resume operation and for the other CPUs when * they are brought back online. However, CPUs that are already offline when * a system-wide PM transition is started are not taken offline again, but their * EPB values may still be reset by the platform firmware during the transition, * so in fact it is necessary to save the EPB of any CPU taken offline and to * restore it when the given CPU goes back online at all times. * * Second, on many systems the initial EPB value coming from the platform * firmware is 0 ('performance') and at least on some of them that is because * the platform firmware does not initialize EPB at all with the assumption that * the OS will do that anyway. That sometimes is problematic, as it may cause * the system battery to drain too fast, for example, so it is better to adjust * it on CPU bring-up and if the initial EPB value for a given CPU is 0, the * kernel changes it to 6 ('normal'). */ static DEFINE_PER_CPU(u8, saved_epb); #define EPB_MASK 0x0fULL #define EPB_SAVED 0x10ULL #define MAX_EPB EPB_MASK enum energy_perf_value_index { EPB_INDEX_PERFORMANCE, EPB_INDEX_BALANCE_PERFORMANCE, EPB_INDEX_NORMAL, EPB_INDEX_BALANCE_POWERSAVE, EPB_INDEX_POWERSAVE, }; static u8 energ_perf_values[] = { [EPB_INDEX_PERFORMANCE] = ENERGY_PERF_BIAS_PERFORMANCE, [EPB_INDEX_BALANCE_PERFORMANCE] = ENERGY_PERF_BIAS_BALANCE_PERFORMANCE, [EPB_INDEX_NORMAL] = ENERGY_PERF_BIAS_NORMAL, [EPB_INDEX_BALANCE_POWERSAVE] = ENERGY_PERF_BIAS_BALANCE_POWERSAVE, [EPB_INDEX_POWERSAVE] = ENERGY_PERF_BIAS_POWERSAVE, }; static int intel_epb_save(void) { u64 epb; rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); /* * Ensure that saved_epb will always be nonzero after this write even if * the EPB value read from the MSR is 0. */ this_cpu_write(saved_epb, (epb & EPB_MASK) | EPB_SAVED); return 0; } static void intel_epb_restore(void) { u64 val = this_cpu_read(saved_epb); u64 epb; rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); if (val) { val &= EPB_MASK; } else { /* * Because intel_epb_save() has not run for the current CPU yet, * it is going online for the first time, so if its EPB value is * 0 ('performance') at this point, assume that it has not been * initialized by the platform firmware and set it to 6 * ('normal'). */ val = epb & EPB_MASK; if (val == ENERGY_PERF_BIAS_PERFORMANCE) { val = energ_perf_values[EPB_INDEX_NORMAL]; pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); } } wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val); } static struct syscore_ops intel_epb_syscore_ops = { .suspend = intel_epb_save, .resume = intel_epb_restore, }; static const char * const energy_perf_strings[] = { [EPB_INDEX_PERFORMANCE] = "performance", [EPB_INDEX_BALANCE_PERFORMANCE] = "balance-performance", [EPB_INDEX_NORMAL] = "normal", [EPB_INDEX_BALANCE_POWERSAVE] = "balance-power", [EPB_INDEX_POWERSAVE] = "power", }; static ssize_t energy_perf_bias_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int cpu = dev->id; u64 epb; int ret; ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); if (ret < 0) return ret; return sprintf(buf, "%llu\n", epb); } static ssize_t energy_perf_bias_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int cpu = dev->id; u64 epb, val; int ret; ret = __sysfs_match_string(energy_perf_strings, ARRAY_SIZE(energy_perf_strings), buf); if (ret >= 0) val = energ_perf_values[ret]; else if (kstrtou64(buf, 0, &val) || val > MAX_EPB) return -EINVAL; ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); if (ret < 0) return ret; ret = wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val); if (ret < 0) return ret; return count; } static DEVICE_ATTR_RW(energy_perf_bias); static struct attribute *intel_epb_attrs[] = { &dev_attr_energy_perf_bias.attr, NULL }; static const struct attribute_group intel_epb_attr_group = { .name = power_group_name, .attrs = intel_epb_attrs }; static int intel_epb_online(unsigned int cpu) { struct device *cpu_dev = get_cpu_device(cpu); intel_epb_restore(); if (!cpuhp_tasks_frozen) sysfs_merge_group(&cpu_dev->kobj, &intel_epb_attr_group); return 0; } static int intel_epb_offline(unsigned int cpu) { struct device *cpu_dev = get_cpu_device(cpu); if (!cpuhp_tasks_frozen) sysfs_unmerge_group(&cpu_dev->kobj, &intel_epb_attr_group); intel_epb_save(); return 0; } static const struct x86_cpu_id intel_epb_normal[] = { X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, ENERGY_PERF_BIAS_NORMAL_POWERSAVE), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, ENERGY_PERF_BIAS_NORMAL_POWERSAVE), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, ENERGY_PERF_BIAS_NORMAL_POWERSAVE), {} }; static __init int intel_epb_init(void) { const struct x86_cpu_id *id = x86_match_cpu(intel_epb_normal); int ret; if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENODEV; if (id) energ_perf_values[EPB_INDEX_NORMAL] = id->driver_data; ret = cpuhp_setup_state(CPUHP_AP_X86_INTEL_EPB_ONLINE, "x86/intel/epb:online", intel_epb_online, intel_epb_offline); if (ret < 0) goto err_out_online; register_syscore_ops(&intel_epb_syscore_ops); return 0; err_out_online: cpuhp_remove_state(CPUHP_AP_X86_INTEL_EPB_ONLINE); return ret; } subsys_initcall(intel_epb_init);
linux-master
arch/x86/kernel/cpu/intel_epb.c
// SPDX-License-Identifier: GPL-2.0-only /* * HyperV Detection code. * * Copyright (C) 2010, Novell, Inc. * Author : K. Y. Srinivasan <[email protected]> */ #include <linux/types.h> #include <linux/time.h> #include <linux/clocksource.h> #include <linux/init.h> #include <linux/export.h> #include <linux/hardirq.h> #include <linux/efi.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kexec.h> #include <linux/i8253.h> #include <linux/random.h> #include <asm/processor.h> #include <asm/hypervisor.h> #include <asm/hyperv-tlfs.h> #include <asm/mshyperv.h> #include <asm/desc.h> #include <asm/idtentry.h> #include <asm/irq_regs.h> #include <asm/i8259.h> #include <asm/apic.h> #include <asm/timer.h> #include <asm/reboot.h> #include <asm/nmi.h> #include <clocksource/hyperv_timer.h> #include <asm/numa.h> #include <asm/svm.h> /* Is Linux running as the root partition? */ bool hv_root_partition; /* Is Linux running on nested Microsoft Hypervisor */ bool hv_nested; struct ms_hyperv_info ms_hyperv; /* Used in modules via hv_do_hypercall(): see arch/x86/include/asm/mshyperv.h */ bool hyperv_paravisor_present __ro_after_init; EXPORT_SYMBOL_GPL(hyperv_paravisor_present); #if IS_ENABLED(CONFIG_HYPERV) static inline unsigned int hv_get_nested_reg(unsigned int reg) { if (hv_is_sint_reg(reg)) return reg - HV_REGISTER_SINT0 + HV_REGISTER_NESTED_SINT0; switch (reg) { case HV_REGISTER_SIMP: return HV_REGISTER_NESTED_SIMP; case HV_REGISTER_SIEFP: return HV_REGISTER_NESTED_SIEFP; case HV_REGISTER_SVERSION: return HV_REGISTER_NESTED_SVERSION; case HV_REGISTER_SCONTROL: return HV_REGISTER_NESTED_SCONTROL; case HV_REGISTER_EOM: return HV_REGISTER_NESTED_EOM; default: return reg; } } u64 hv_get_non_nested_register(unsigned int reg) { u64 value; if (hv_is_synic_reg(reg) && ms_hyperv.paravisor_present) hv_ivm_msr_read(reg, &value); else rdmsrl(reg, value); return value; } EXPORT_SYMBOL_GPL(hv_get_non_nested_register); void hv_set_non_nested_register(unsigned int reg, u64 value) { if (hv_is_synic_reg(reg) && ms_hyperv.paravisor_present) { hv_ivm_msr_write(reg, value); /* Write proxy bit via wrmsl instruction */ if (hv_is_sint_reg(reg)) wrmsrl(reg, value | 1 << 20); } else { wrmsrl(reg, value); } } EXPORT_SYMBOL_GPL(hv_set_non_nested_register); u64 hv_get_register(unsigned int reg) { if (hv_nested) reg = hv_get_nested_reg(reg); return hv_get_non_nested_register(reg); } EXPORT_SYMBOL_GPL(hv_get_register); void hv_set_register(unsigned int reg, u64 value) { if (hv_nested) reg = hv_get_nested_reg(reg); hv_set_non_nested_register(reg, value); } EXPORT_SYMBOL_GPL(hv_set_register); static void (*vmbus_handler)(void); static void (*hv_stimer0_handler)(void); static void (*hv_kexec_handler)(void); static void (*hv_crash_handler)(struct pt_regs *regs); DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback) { struct pt_regs *old_regs = set_irq_regs(regs); inc_irq_stat(irq_hv_callback_count); if (vmbus_handler) vmbus_handler(); if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED) apic_eoi(); set_irq_regs(old_regs); } void hv_setup_vmbus_handler(void (*handler)(void)) { vmbus_handler = handler; } void hv_remove_vmbus_handler(void) { /* We have no way to deallocate the interrupt gate */ vmbus_handler = NULL; } /* * Routines to do per-architecture handling of stimer0 * interrupts when in Direct Mode */ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0) { struct pt_regs *old_regs = set_irq_regs(regs); inc_irq_stat(hyperv_stimer0_count); if (hv_stimer0_handler) hv_stimer0_handler(); add_interrupt_randomness(HYPERV_STIMER0_VECTOR); apic_eoi(); set_irq_regs(old_regs); } /* For x86/x64, override weak placeholders in hyperv_timer.c */ void hv_setup_stimer0_handler(void (*handler)(void)) { hv_stimer0_handler = handler; } void hv_remove_stimer0_handler(void) { /* We have no way to deallocate the interrupt gate */ hv_stimer0_handler = NULL; } void hv_setup_kexec_handler(void (*handler)(void)) { hv_kexec_handler = handler; } void hv_remove_kexec_handler(void) { hv_kexec_handler = NULL; } void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)) { hv_crash_handler = handler; } void hv_remove_crash_handler(void) { hv_crash_handler = NULL; } #ifdef CONFIG_KEXEC_CORE static void hv_machine_shutdown(void) { if (kexec_in_progress && hv_kexec_handler) hv_kexec_handler(); /* * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor * corrupts the old VP Assist Pages and can crash the kexec kernel. */ if (kexec_in_progress && hyperv_init_cpuhp > 0) cpuhp_remove_state(hyperv_init_cpuhp); /* The function calls stop_other_cpus(). */ native_machine_shutdown(); /* Disable the hypercall page when there is only 1 active CPU. */ if (kexec_in_progress) hyperv_cleanup(); } static void hv_machine_crash_shutdown(struct pt_regs *regs) { if (hv_crash_handler) hv_crash_handler(regs); /* The function calls crash_smp_send_stop(). */ native_machine_crash_shutdown(regs); /* Disable the hypercall page when there is only 1 active CPU. */ hyperv_cleanup(); } #endif /* CONFIG_KEXEC_CORE */ #endif /* CONFIG_HYPERV */ static uint32_t __init ms_hyperv_platform(void) { u32 eax; u32 hyp_signature[3]; if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) return 0; cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); if (eax < HYPERV_CPUID_MIN || eax > HYPERV_CPUID_MAX || memcmp("Microsoft Hv", hyp_signature, 12)) return 0; /* HYPERCALL and VP_INDEX MSRs are mandatory for all features. */ eax = cpuid_eax(HYPERV_CPUID_FEATURES); if (!(eax & HV_MSR_HYPERCALL_AVAILABLE)) { pr_warn("x86/hyperv: HYPERCALL MSR not available.\n"); return 0; } if (!(eax & HV_MSR_VP_INDEX_AVAILABLE)) { pr_warn("x86/hyperv: VP_INDEX MSR not available.\n"); return 0; } return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS; } #ifdef CONFIG_X86_LOCAL_APIC /* * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes * it difficult to process CHANNELMSG_UNLOAD in case of crash. Handle * unknown NMI on the first CPU which gets it. */ static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs) { static atomic_t nmi_cpu = ATOMIC_INIT(-1); if (!unknown_nmi_panic) return NMI_DONE; if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1) return NMI_HANDLED; return NMI_DONE; } #endif static unsigned long hv_get_tsc_khz(void) { unsigned long freq; rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq); return freq / 1000; } #if defined(CONFIG_SMP) && IS_ENABLED(CONFIG_HYPERV) static void __init hv_smp_prepare_boot_cpu(void) { native_smp_prepare_boot_cpu(); #if defined(CONFIG_X86_64) && defined(CONFIG_PARAVIRT_SPINLOCKS) hv_init_spinlocks(); #endif } static void __init hv_smp_prepare_cpus(unsigned int max_cpus) { #ifdef CONFIG_X86_64 int i; int ret; #endif native_smp_prepare_cpus(max_cpus); /* * Override wakeup_secondary_cpu_64 callback for SEV-SNP * enlightened guest. */ if (!ms_hyperv.paravisor_present && hv_isolation_type_snp()) { apic->wakeup_secondary_cpu_64 = hv_snp_boot_ap; return; } #ifdef CONFIG_X86_64 for_each_present_cpu(i) { if (i == 0) continue; ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i)); BUG_ON(ret); } for_each_present_cpu(i) { if (i == 0) continue; ret = hv_call_create_vp(numa_cpu_node(i), hv_current_partition_id, i, i); BUG_ON(ret); } #endif } #endif /* * When a fully enlightened TDX VM runs on Hyper-V, the firmware sets the * HW_REDUCED flag: refer to acpi_tb_create_local_fadt(). Consequently ttyS0 * interrupts can't work because request_irq() -> ... -> irq_to_desc() returns * NULL for ttyS0. This happens because mp_config_acpi_legacy_irqs() sees a * nr_legacy_irqs() of 0, so it doesn't initialize the array 'mp_irqs[]', and * later setup_IO_APIC_irqs() -> find_irq_entry() fails to find the legacy irqs * from the array and hence doesn't create the necessary irq description info. * * Clone arch/x86/kernel/acpi/boot.c: acpi_generic_reduced_hw_init() here, * except don't change 'legacy_pic', which keeps its default value * 'default_legacy_pic'. This way, mp_config_acpi_legacy_irqs() sees a non-zero * nr_legacy_irqs() and eventually serial console interrupts works properly. */ static void __init reduced_hw_init(void) { x86_init.timers.timer_init = x86_init_noop; x86_init.irqs.pre_vector_init = x86_init_noop; } static void __init ms_hyperv_init_platform(void) { int hv_max_functions_eax; int hv_host_info_eax; int hv_host_info_ebx; int hv_host_info_ecx; int hv_host_info_edx; #ifdef CONFIG_PARAVIRT pv_info.name = "Hyper-V"; #endif /* * Extract the features and hints */ ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES); ms_hyperv.priv_high = cpuid_ebx(HYPERV_CPUID_FEATURES); ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); hv_max_functions_eax = cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS); pr_info("Hyper-V: privilege flags low 0x%x, high 0x%x, hints 0x%x, misc 0x%x\n", ms_hyperv.features, ms_hyperv.priv_high, ms_hyperv.hints, ms_hyperv.misc_features); ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS); ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS); pr_debug("Hyper-V: max %u virtual processors, %u logical processors\n", ms_hyperv.max_vp_index, ms_hyperv.max_lp_index); /* * Check CPU management privilege. * * To mirror what Windows does we should extract CPU management * features and use the ReservedIdentityBit to detect if Linux is the * root partition. But that requires negotiating CPU management * interface (a process to be finalized). For now, use the privilege * flag as the indicator for running as root. * * Hyper-V should never specify running as root and as a Confidential * VM. But to protect against a compromised/malicious Hyper-V trying * to exploit root behavior to expose Confidential VM memory, ignore * the root partition setting if also a Confidential VM. */ if ((ms_hyperv.priv_high & HV_CPU_MANAGEMENT) && !(ms_hyperv.priv_high & HV_ISOLATION)) { hv_root_partition = true; pr_info("Hyper-V: running as root partition\n"); } if (ms_hyperv.hints & HV_X64_HYPERV_NESTED) { hv_nested = true; pr_info("Hyper-V: running on a nested hypervisor\n"); } /* * Extract host information. */ if (hv_max_functions_eax >= HYPERV_CPUID_VERSION) { hv_host_info_eax = cpuid_eax(HYPERV_CPUID_VERSION); hv_host_info_ebx = cpuid_ebx(HYPERV_CPUID_VERSION); hv_host_info_ecx = cpuid_ecx(HYPERV_CPUID_VERSION); hv_host_info_edx = cpuid_edx(HYPERV_CPUID_VERSION); pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n", hv_host_info_ebx >> 16, hv_host_info_ebx & 0xFFFF, hv_host_info_eax, hv_host_info_edx & 0xFFFFFF, hv_host_info_ecx, hv_host_info_edx >> 24); } if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS && ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) { x86_platform.calibrate_tsc = hv_get_tsc_khz; x86_platform.calibrate_cpu = hv_get_tsc_khz; } if (ms_hyperv.priv_high & HV_ISOLATION) { ms_hyperv.isolation_config_a = cpuid_eax(HYPERV_CPUID_ISOLATION_CONFIG); ms_hyperv.isolation_config_b = cpuid_ebx(HYPERV_CPUID_ISOLATION_CONFIG); if (ms_hyperv.shared_gpa_boundary_active) ms_hyperv.shared_gpa_boundary = BIT_ULL(ms_hyperv.shared_gpa_boundary_bits); hyperv_paravisor_present = !!ms_hyperv.paravisor_present; pr_info("Hyper-V: Isolation Config: Group A 0x%x, Group B 0x%x\n", ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b); if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP) { static_branch_enable(&isolation_type_snp); } else if (hv_get_isolation_type() == HV_ISOLATION_TYPE_TDX) { static_branch_enable(&isolation_type_tdx); /* A TDX VM must use x2APIC and doesn't use lazy EOI. */ ms_hyperv.hints &= ~HV_X64_APIC_ACCESS_RECOMMENDED; if (!ms_hyperv.paravisor_present) { /* To be supported: more work is required. */ ms_hyperv.features &= ~HV_MSR_REFERENCE_TSC_AVAILABLE; /* HV_REGISTER_CRASH_CTL is unsupported. */ ms_hyperv.misc_features &= ~HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; /* Don't trust Hyper-V's TLB-flushing hypercalls. */ ms_hyperv.hints &= ~HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; x86_init.acpi.reduced_hw_early_init = reduced_hw_init; } } } if (hv_max_functions_eax >= HYPERV_CPUID_NESTED_FEATURES) { ms_hyperv.nested_features = cpuid_eax(HYPERV_CPUID_NESTED_FEATURES); pr_info("Hyper-V: Nested features: 0x%x\n", ms_hyperv.nested_features); } #ifdef CONFIG_X86_LOCAL_APIC if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS && ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) { /* * Get the APIC frequency. */ u64 hv_lapic_frequency; rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency); hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ); lapic_timer_period = hv_lapic_frequency; pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n", lapic_timer_period); } register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST, "hv_nmi_unknown"); #endif #ifdef CONFIG_X86_IO_APIC no_timer_check = 1; #endif #if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE) machine_ops.shutdown = hv_machine_shutdown; machine_ops.crash_shutdown = hv_machine_crash_shutdown; #endif if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) { /* * Writing to synthetic MSR 0x40000118 updates/changes the * guest visible CPUIDs. Setting bit 0 of this MSR enables * guests to report invariant TSC feature through CPUID * instruction, CPUID 0x800000007/EDX, bit 8. See code in * early_init_intel() where this bit is examined. The * setting of this MSR bit should happen before init_intel() * is called. */ wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, HV_EXPOSE_INVARIANT_TSC); setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); } /* * Generation 2 instances don't support reading the NMI status from * 0x61 port. */ if (efi_enabled(EFI_BOOT)) x86_platform.get_nmi_reason = hv_get_nmi_reason; /* * Hyper-V VMs have a PIT emulation quirk such that zeroing the * counter register during PIT shutdown restarts the PIT. So it * continues to interrupt @18.2 HZ. Setting i8253_clear_counter * to false tells pit_shutdown() not to zero the counter so that * the PIT really is shutdown. Generation 2 VMs don't have a PIT, * and setting this value has no effect. */ i8253_clear_counter_on_shutdown = false; #if IS_ENABLED(CONFIG_HYPERV) if ((hv_get_isolation_type() == HV_ISOLATION_TYPE_VBS) || ms_hyperv.paravisor_present) hv_vtom_init(); /* * Setup the hook to get control post apic initialization. */ x86_platform.apic_post_init = hyperv_init; hyperv_setup_mmu_ops(); /* Setup the IDT for hypervisor callback */ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_hyperv_callback); /* Setup the IDT for reenlightenment notifications */ if (ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT) { alloc_intr_gate(HYPERV_REENLIGHTENMENT_VECTOR, asm_sysvec_hyperv_reenlightenment); } /* Setup the IDT for stimer0 */ if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE) { alloc_intr_gate(HYPERV_STIMER0_VECTOR, asm_sysvec_hyperv_stimer0); } # ifdef CONFIG_SMP smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu; if (hv_root_partition || (!ms_hyperv.paravisor_present && hv_isolation_type_snp())) smp_ops.smp_prepare_cpus = hv_smp_prepare_cpus; # endif /* * Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic, * set x2apic destination mode to physical mode when x2apic is available * and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs * have 8-bit APIC id. */ # ifdef CONFIG_X86_X2APIC if (x2apic_supported()) x2apic_phys = 1; # endif /* Register Hyper-V specific clocksource */ hv_init_clocksource(); hv_vtl_init_platform(); #endif /* * TSC should be marked as unstable only after Hyper-V * clocksource has been initialized. This ensures that the * stability of the sched_clock is not altered. */ if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT)) mark_tsc_unstable("running on Hyper-V"); hardlockup_detector_disable(); } static bool __init ms_hyperv_x2apic_available(void) { return x2apic_supported(); } /* * If ms_hyperv_msi_ext_dest_id() returns true, hyperv_prepare_irq_remapping() * returns -ENODEV and the Hyper-V IOMMU driver is not used; instead, the * generic support of the 15-bit APIC ID is used: see __irq_msi_compose_msg(). * * Note: for a VM on Hyper-V, the I/O-APIC is the only device which * (logically) generates MSIs directly to the system APIC irq domain. * There is no HPET, and PCI MSI/MSI-X interrupts are remapped by the * pci-hyperv host bridge. * * Note: for a Hyper-V root partition, this will always return false. * The hypervisor doesn't expose these HYPERV_CPUID_VIRT_STACK_* cpuids by * default, they are implemented as intercepts by the Windows Hyper-V stack. * Even a nested root partition (L2 root) will not get them because the * nested (L1) hypervisor filters them out. */ static bool __init ms_hyperv_msi_ext_dest_id(void) { u32 eax; eax = cpuid_eax(HYPERV_CPUID_VIRT_STACK_INTERFACE); if (eax != HYPERV_VS_INTERFACE_EAX_SIGNATURE) return false; eax = cpuid_eax(HYPERV_CPUID_VIRT_STACK_PROPERTIES); return eax & HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE; } #ifdef CONFIG_AMD_MEM_ENCRYPT static void hv_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs) { /* RAX and CPL are already in the GHCB */ ghcb_set_rcx(ghcb, regs->cx); ghcb_set_rdx(ghcb, regs->dx); ghcb_set_r8(ghcb, regs->r8); } static bool hv_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs) { /* No checking of the return state needed */ return true; } #endif const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = { .name = "Microsoft Hyper-V", .detect = ms_hyperv_platform, .type = X86_HYPER_MS_HYPERV, .init.x2apic_available = ms_hyperv_x2apic_available, .init.msi_ext_dest_id = ms_hyperv_msi_ext_dest_id, .init.init_platform = ms_hyperv_init_platform, #ifdef CONFIG_AMD_MEM_ENCRYPT .runtime.sev_es_hcall_prepare = hv_sev_es_hcall_prepare, .runtime.sev_es_hcall_finish = hv_sev_es_hcall_finish, #endif };
linux-master
arch/x86/kernel/cpu/mshyperv.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/smp.h> #include <linux/timex.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/cpufreq.h> #include <asm/prctl.h> #include <linux/proc_fs.h> #include "cpu.h" #ifdef CONFIG_X86_VMX_FEATURE_NAMES extern const char * const x86_vmx_flags[NVMXINTS*32]; #endif /* * Get CPU information for use by the procfs. */ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, unsigned int cpu) { #ifdef CONFIG_SMP seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); seq_printf(m, "siblings\t: %d\n", cpumask_weight(topology_core_cpumask(cpu))); seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); seq_printf(m, "apicid\t\t: %d\n", c->apicid); seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); #endif } #ifdef CONFIG_X86_32 static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) { seq_printf(m, "fdiv_bug\t: %s\n" "f00f_bug\t: %s\n" "coma_bug\t: %s\n" "fpu\t\t: %s\n" "fpu_exception\t: %s\n" "cpuid level\t: %d\n" "wp\t\t: yes\n", boot_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no", boot_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no", boot_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no", boot_cpu_has(X86_FEATURE_FPU) ? "yes" : "no", boot_cpu_has(X86_FEATURE_FPU) ? "yes" : "no", c->cpuid_level); } #else static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) { seq_printf(m, "fpu\t\t: yes\n" "fpu_exception\t: yes\n" "cpuid level\t: %d\n" "wp\t\t: yes\n", c->cpuid_level); } #endif static int show_cpuinfo(struct seq_file *m, void *v) { struct cpuinfo_x86 *c = v; unsigned int cpu; int i; cpu = c->cpu_index; seq_printf(m, "processor\t: %u\n" "vendor_id\t: %s\n" "cpu family\t: %d\n" "model\t\t: %u\n" "model name\t: %s\n", cpu, c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", c->x86, c->x86_model, c->x86_model_id[0] ? c->x86_model_id : "unknown"); if (c->x86_stepping || c->cpuid_level >= 0) seq_printf(m, "stepping\t: %d\n", c->x86_stepping); else seq_puts(m, "stepping\t: unknown\n"); if (c->microcode) seq_printf(m, "microcode\t: 0x%x\n", c->microcode); if (cpu_has(c, X86_FEATURE_TSC)) { unsigned int freq = arch_freq_get_on_cpu(cpu); seq_printf(m, "cpu MHz\t\t: %u.%03u\n", freq / 1000, (freq % 1000)); } /* Cache size */ if (c->x86_cache_size) seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size); show_cpuinfo_core(m, c, cpu); show_cpuinfo_misc(m, c); seq_puts(m, "flags\t\t:"); for (i = 0; i < 32*NCAPINTS; i++) if (cpu_has(c, i) && x86_cap_flags[i] != NULL) seq_printf(m, " %s", x86_cap_flags[i]); #ifdef CONFIG_X86_VMX_FEATURE_NAMES if (cpu_has(c, X86_FEATURE_VMX) && c->vmx_capability[0]) { seq_puts(m, "\nvmx flags\t:"); for (i = 0; i < 32*NVMXINTS; i++) { if (test_bit(i, (unsigned long *)c->vmx_capability) && x86_vmx_flags[i] != NULL) seq_printf(m, " %s", x86_vmx_flags[i]); } } #endif seq_puts(m, "\nbugs\t\t:"); for (i = 0; i < 32*NBUGINTS; i++) { unsigned int bug_bit = 32*NCAPINTS + i; if (cpu_has_bug(c, bug_bit) && x86_bug_flags[i]) seq_printf(m, " %s", x86_bug_flags[i]); } seq_printf(m, "\nbogomips\t: %lu.%02lu\n", c->loops_per_jiffy/(500000/HZ), (c->loops_per_jiffy/(5000/HZ)) % 100); #ifdef CONFIG_X86_64 if (c->x86_tlbsize > 0) seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); #endif seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", c->x86_phys_bits, c->x86_virt_bits); seq_puts(m, "power management:"); for (i = 0; i < 32; i++) { if (c->x86_power & (1 << i)) { if (i < ARRAY_SIZE(x86_power_flags) && x86_power_flags[i]) seq_printf(m, "%s%s", x86_power_flags[i][0] ? " " : "", x86_power_flags[i]); else seq_printf(m, " [%d]", i); } } seq_puts(m, "\n\n"); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { *pos = cpumask_next(*pos - 1, cpu_online_mask); if ((*pos) < nr_cpu_ids) return &cpu_data(*pos); return NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { (*pos)++; return c_start(m, pos); } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo, }; #ifdef CONFIG_X86_USER_SHADOW_STACK static void dump_x86_features(struct seq_file *m, unsigned long features) { if (features & ARCH_SHSTK_SHSTK) seq_puts(m, "shstk "); if (features & ARCH_SHSTK_WRSS) seq_puts(m, "wrss "); } void arch_proc_pid_thread_features(struct seq_file *m, struct task_struct *task) { seq_puts(m, "x86_Thread_features:\t"); dump_x86_features(m, task->thread.features); seq_putc(m, '\n'); seq_puts(m, "x86_Thread_features_locked:\t"); dump_x86_features(m, task->thread.features_locked); seq_putc(m, '\n'); } #endif /* CONFIG_X86_USER_SHADOW_STACK */
linux-master
arch/x86/kernel/cpu/proc.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1994 Linus Torvalds * * Cyrix stuff, June 1998 by: * - Rafael R. Reilova (moved everything from head.S), * <[email protected]> * - Channing Corn (tests & fixes), * - Andrew D. Balsa (code cleanup). */ #include <linux/init.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/nospec.h> #include <linux/prctl.h> #include <linux/sched/smt.h> #include <linux/pgtable.h> #include <linux/bpf.h> #include <asm/spec-ctrl.h> #include <asm/cmdline.h> #include <asm/bugs.h> #include <asm/processor.h> #include <asm/processor-flags.h> #include <asm/fpu/api.h> #include <asm/msr.h> #include <asm/vmx.h> #include <asm/paravirt.h> #include <asm/intel-family.h> #include <asm/e820/api.h> #include <asm/hypervisor.h> #include <asm/tlbflush.h> #include <asm/cpu.h> #include "cpu.h" static void __init spectre_v1_select_mitigation(void); static void __init spectre_v2_select_mitigation(void); static void __init retbleed_select_mitigation(void); static void __init spectre_v2_user_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); static void __init mds_select_mitigation(void); static void __init md_clear_update_mitigation(void); static void __init md_clear_select_mitigation(void); static void __init taa_select_mitigation(void); static void __init mmio_select_mitigation(void); static void __init srbds_select_mitigation(void); static void __init l1d_flush_select_mitigation(void); static void __init srso_select_mitigation(void); static void __init gds_select_mitigation(void); /* The base value of the SPEC_CTRL MSR without task-specific bits set */ u64 x86_spec_ctrl_base; EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); /* The current value of the SPEC_CTRL MSR with task-specific bits set */ DEFINE_PER_CPU(u64, x86_spec_ctrl_current); EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; EXPORT_SYMBOL_GPL(x86_pred_cmd); static DEFINE_MUTEX(spec_ctrl_mutex); void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) { this_cpu_write(x86_spec_ctrl_current, val); wrmsrl(MSR_IA32_SPEC_CTRL, val); } /* * Keep track of the SPEC_CTRL MSR value for the current task, which may differ * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). */ void update_spec_ctrl_cond(u64 val) { if (this_cpu_read(x86_spec_ctrl_current) == val) return; this_cpu_write(x86_spec_ctrl_current, val); /* * When KERNEL_IBRS this MSR is written on return-to-user, unless * forced the update can be delayed until that time. */ if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) wrmsrl(MSR_IA32_SPEC_CTRL, val); } noinstr u64 spec_ctrl_current(void) { return this_cpu_read(x86_spec_ctrl_current); } EXPORT_SYMBOL_GPL(spec_ctrl_current); /* * AMD specific MSR info for Speculative Store Bypass control. * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). */ u64 __ro_after_init x86_amd_ls_cfg_base; u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; /* Control conditional STIBP in switch_to() */ DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); /* Control conditional IBPB in switch_mm() */ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); /* Control unconditional IBPB in switch_mm() */ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); /* Control MDS CPU buffer clear before returning to user space */ DEFINE_STATIC_KEY_FALSE(mds_user_clear); EXPORT_SYMBOL_GPL(mds_user_clear); /* Control MDS CPU buffer clear before idling (halt, mwait) */ DEFINE_STATIC_KEY_FALSE(mds_idle_clear); EXPORT_SYMBOL_GPL(mds_idle_clear); /* * Controls whether l1d flush based mitigations are enabled, * based on hw features and admin setting via boot parameter * defaults to false */ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); EXPORT_SYMBOL_GPL(mmio_stale_data_clear); void __init cpu_select_mitigations(void) { /* * Read the SPEC_CTRL MSR to account for reserved bits which may * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD * init code as it is not enumerated and depends on the family. */ if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); /* * Previously running kernel (kexec), may have some controls * turned ON. Clear them and let the mitigations setup below * rediscover them based on configuration. */ x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; } /* Select the proper CPU mitigations before patching alternatives: */ spectre_v1_select_mitigation(); spectre_v2_select_mitigation(); /* * retbleed_select_mitigation() relies on the state set by * spectre_v2_select_mitigation(); specifically it wants to know about * spectre_v2=ibrs. */ retbleed_select_mitigation(); /* * spectre_v2_user_select_mitigation() relies on the state set by * retbleed_select_mitigation(); specifically the STIBP selection is * forced for UNRET or IBPB. */ spectre_v2_user_select_mitigation(); ssb_select_mitigation(); l1tf_select_mitigation(); md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); /* * srso_select_mitigation() depends and must run after * retbleed_select_mitigation(). */ srso_select_mitigation(); gds_select_mitigation(); } /* * NOTE: This function is *only* called for SVM, since Intel uses * MSR_IA32_SPEC_CTRL for SSBD. */ void x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest) { u64 guestval, hostval; struct thread_info *ti = current_thread_info(); /* * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. */ if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && !static_cpu_has(X86_FEATURE_VIRT_SSBD)) return; /* * If the host has SSBD mitigation enabled, force it in the host's * virtual MSR value. If its not permanently enabled, evaluate * current's TIF_SSBD thread flag. */ if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) hostval = SPEC_CTRL_SSBD; else hostval = ssbd_tif_to_spec_ctrl(ti->flags); /* Sanitize the guest value */ guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; if (hostval != guestval) { unsigned long tif; tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : ssbd_spec_ctrl_to_tif(hostval); speculation_ctrl_update(tif); } } EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); static void x86_amd_ssb_disable(void) { u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) wrmsrl(MSR_AMD64_LS_CFG, msrval); } #undef pr_fmt #define pr_fmt(fmt) "MDS: " fmt /* Default mitigation for MDS-affected CPUs */ static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; static bool mds_nosmt __ro_after_init = false; static const char * const mds_strings[] = { [MDS_MITIGATION_OFF] = "Vulnerable", [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", }; static void __init mds_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { mds_mitigation = MDS_MITIGATION_OFF; return; } if (mds_mitigation == MDS_MITIGATION_FULL) { if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) mds_mitigation = MDS_MITIGATION_VMWERV; static_branch_enable(&mds_user_clear); if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && (mds_nosmt || cpu_mitigations_auto_nosmt())) cpu_smt_disable(false); } } static int __init mds_cmdline(char *str) { if (!boot_cpu_has_bug(X86_BUG_MDS)) return 0; if (!str) return -EINVAL; if (!strcmp(str, "off")) mds_mitigation = MDS_MITIGATION_OFF; else if (!strcmp(str, "full")) mds_mitigation = MDS_MITIGATION_FULL; else if (!strcmp(str, "full,nosmt")) { mds_mitigation = MDS_MITIGATION_FULL; mds_nosmt = true; } return 0; } early_param("mds", mds_cmdline); #undef pr_fmt #define pr_fmt(fmt) "TAA: " fmt enum taa_mitigations { TAA_MITIGATION_OFF, TAA_MITIGATION_UCODE_NEEDED, TAA_MITIGATION_VERW, TAA_MITIGATION_TSX_DISABLED, }; /* Default mitigation for TAA-affected CPUs */ static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; static bool taa_nosmt __ro_after_init; static const char * const taa_strings[] = { [TAA_MITIGATION_OFF] = "Vulnerable", [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", }; static void __init taa_select_mitigation(void) { u64 ia32_cap; if (!boot_cpu_has_bug(X86_BUG_TAA)) { taa_mitigation = TAA_MITIGATION_OFF; return; } /* TSX previously disabled by tsx=off */ if (!boot_cpu_has(X86_FEATURE_RTM)) { taa_mitigation = TAA_MITIGATION_TSX_DISABLED; return; } if (cpu_mitigations_off()) { taa_mitigation = TAA_MITIGATION_OFF; return; } /* * TAA mitigation via VERW is turned off if both * tsx_async_abort=off and mds=off are specified. */ if (taa_mitigation == TAA_MITIGATION_OFF && mds_mitigation == MDS_MITIGATION_OFF) return; if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) taa_mitigation = TAA_MITIGATION_VERW; else taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; /* * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. * A microcode update fixes this behavior to clear CPU buffers. It also * adds support for MSR_IA32_TSX_CTRL which is enumerated by the * ARCH_CAP_TSX_CTRL_MSR bit. * * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode * update is required. */ ia32_cap = x86_read_arch_cap_msr(); if ( (ia32_cap & ARCH_CAP_MDS_NO) && !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; /* * TSX is enabled, select alternate mitigation for TAA which is * the same as MDS. Enable MDS static branch to clear CPU buffers. * * For guests that can't determine whether the correct microcode is * present on host, enable the mitigation for UCODE_NEEDED as well. */ static_branch_enable(&mds_user_clear); if (taa_nosmt || cpu_mitigations_auto_nosmt()) cpu_smt_disable(false); } static int __init tsx_async_abort_parse_cmdline(char *str) { if (!boot_cpu_has_bug(X86_BUG_TAA)) return 0; if (!str) return -EINVAL; if (!strcmp(str, "off")) { taa_mitigation = TAA_MITIGATION_OFF; } else if (!strcmp(str, "full")) { taa_mitigation = TAA_MITIGATION_VERW; } else if (!strcmp(str, "full,nosmt")) { taa_mitigation = TAA_MITIGATION_VERW; taa_nosmt = true; } return 0; } early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); #undef pr_fmt #define pr_fmt(fmt) "MMIO Stale Data: " fmt enum mmio_mitigations { MMIO_MITIGATION_OFF, MMIO_MITIGATION_UCODE_NEEDED, MMIO_MITIGATION_VERW, }; /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; static bool mmio_nosmt __ro_after_init = false; static const char * const mmio_strings[] = { [MMIO_MITIGATION_OFF] = "Vulnerable", [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", }; static void __init mmio_select_mitigation(void) { u64 ia32_cap; if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || cpu_mitigations_off()) { mmio_mitigation = MMIO_MITIGATION_OFF; return; } if (mmio_mitigation == MMIO_MITIGATION_OFF) return; ia32_cap = x86_read_arch_cap_msr(); /* * Enable CPU buffer clear mitigation for host and VMM, if also affected * by MDS or TAA. Otherwise, enable mitigation for VMM only. */ if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM))) static_branch_enable(&mds_user_clear); else static_branch_enable(&mmio_stale_data_clear); /* * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can * be propagated to uncore buffers, clearing the Fill buffers on idle * is required irrespective of SMT state. */ if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) static_branch_enable(&mds_idle_clear); /* * Check if the system has the right microcode. * * CPU Fill buffer clear mitigation is enumerated by either an explicit * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS * affected systems. */ if ((ia32_cap & ARCH_CAP_FB_CLEAR) || (boot_cpu_has(X86_FEATURE_MD_CLEAR) && boot_cpu_has(X86_FEATURE_FLUSH_L1D) && !(ia32_cap & ARCH_CAP_MDS_NO))) mmio_mitigation = MMIO_MITIGATION_VERW; else mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; if (mmio_nosmt || cpu_mitigations_auto_nosmt()) cpu_smt_disable(false); } static int __init mmio_stale_data_parse_cmdline(char *str) { if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) return 0; if (!str) return -EINVAL; if (!strcmp(str, "off")) { mmio_mitigation = MMIO_MITIGATION_OFF; } else if (!strcmp(str, "full")) { mmio_mitigation = MMIO_MITIGATION_VERW; } else if (!strcmp(str, "full,nosmt")) { mmio_mitigation = MMIO_MITIGATION_VERW; mmio_nosmt = true; } return 0; } early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); #undef pr_fmt #define pr_fmt(fmt) "" fmt static void __init md_clear_update_mitigation(void) { if (cpu_mitigations_off()) return; if (!static_key_enabled(&mds_user_clear)) goto out; /* * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data * mitigation, if necessary. */ if (mds_mitigation == MDS_MITIGATION_OFF && boot_cpu_has_bug(X86_BUG_MDS)) { mds_mitigation = MDS_MITIGATION_FULL; mds_select_mitigation(); } if (taa_mitigation == TAA_MITIGATION_OFF && boot_cpu_has_bug(X86_BUG_TAA)) { taa_mitigation = TAA_MITIGATION_VERW; taa_select_mitigation(); } if (mmio_mitigation == MMIO_MITIGATION_OFF && boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { mmio_mitigation = MMIO_MITIGATION_VERW; mmio_select_mitigation(); } out: if (boot_cpu_has_bug(X86_BUG_MDS)) pr_info("MDS: %s\n", mds_strings[mds_mitigation]); if (boot_cpu_has_bug(X86_BUG_TAA)) pr_info("TAA: %s\n", taa_strings[taa_mitigation]); if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) pr_info("MMIO Stale Data: Unknown: No mitigations\n"); } static void __init md_clear_select_mitigation(void) { mds_select_mitigation(); taa_select_mitigation(); mmio_select_mitigation(); /* * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update * and print their mitigation after MDS, TAA and MMIO Stale Data * mitigation selection is done. */ md_clear_update_mitigation(); } #undef pr_fmt #define pr_fmt(fmt) "SRBDS: " fmt enum srbds_mitigations { SRBDS_MITIGATION_OFF, SRBDS_MITIGATION_UCODE_NEEDED, SRBDS_MITIGATION_FULL, SRBDS_MITIGATION_TSX_OFF, SRBDS_MITIGATION_HYPERVISOR, }; static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; static const char * const srbds_strings[] = { [SRBDS_MITIGATION_OFF] = "Vulnerable", [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", }; static bool srbds_off; void update_srbds_msr(void) { u64 mcu_ctrl; if (!boot_cpu_has_bug(X86_BUG_SRBDS)) return; if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return; if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) return; /* * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX * being disabled and it hasn't received the SRBDS MSR microcode. */ if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) return; rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); switch (srbds_mitigation) { case SRBDS_MITIGATION_OFF: case SRBDS_MITIGATION_TSX_OFF: mcu_ctrl |= RNGDS_MITG_DIS; break; case SRBDS_MITIGATION_FULL: mcu_ctrl &= ~RNGDS_MITG_DIS; break; default: break; } wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); } static void __init srbds_select_mitigation(void) { u64 ia32_cap; if (!boot_cpu_has_bug(X86_BUG_SRBDS)) return; /* * Check to see if this is one of the MDS_NO systems supporting TSX that * are only exposed to SRBDS when TSX is enabled or when CPU is affected * by Processor MMIO Stale Data vulnerability. */ ia32_cap = x86_read_arch_cap_msr(); if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; else if (cpu_mitigations_off() || srbds_off) srbds_mitigation = SRBDS_MITIGATION_OFF; update_srbds_msr(); pr_info("%s\n", srbds_strings[srbds_mitigation]); } static int __init srbds_parse_cmdline(char *str) { if (!str) return -EINVAL; if (!boot_cpu_has_bug(X86_BUG_SRBDS)) return 0; srbds_off = !strcmp(str, "off"); return 0; } early_param("srbds", srbds_parse_cmdline); #undef pr_fmt #define pr_fmt(fmt) "L1D Flush : " fmt enum l1d_flush_mitigations { L1D_FLUSH_OFF = 0, L1D_FLUSH_ON, }; static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; static void __init l1d_flush_select_mitigation(void) { if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) return; static_branch_enable(&switch_mm_cond_l1d_flush); pr_info("Conditional flush on switch_mm() enabled\n"); } static int __init l1d_flush_parse_cmdline(char *str) { if (!strcmp(str, "on")) l1d_flush_mitigation = L1D_FLUSH_ON; return 0; } early_param("l1d_flush", l1d_flush_parse_cmdline); #undef pr_fmt #define pr_fmt(fmt) "GDS: " fmt enum gds_mitigations { GDS_MITIGATION_OFF, GDS_MITIGATION_UCODE_NEEDED, GDS_MITIGATION_FORCE, GDS_MITIGATION_FULL, GDS_MITIGATION_FULL_LOCKED, GDS_MITIGATION_HYPERVISOR, }; #if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION) static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE; #else static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; #endif static const char * const gds_strings[] = { [GDS_MITIGATION_OFF] = "Vulnerable", [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", [GDS_MITIGATION_FULL] = "Mitigation: Microcode", [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", }; bool gds_ucode_mitigated(void) { return (gds_mitigation == GDS_MITIGATION_FULL || gds_mitigation == GDS_MITIGATION_FULL_LOCKED); } EXPORT_SYMBOL_GPL(gds_ucode_mitigated); void update_gds_msr(void) { u64 mcu_ctrl_after; u64 mcu_ctrl; switch (gds_mitigation) { case GDS_MITIGATION_OFF: rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); mcu_ctrl |= GDS_MITG_DIS; break; case GDS_MITIGATION_FULL_LOCKED: /* * The LOCKED state comes from the boot CPU. APs might not have * the same state. Make sure the mitigation is enabled on all * CPUs. */ case GDS_MITIGATION_FULL: rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); mcu_ctrl &= ~GDS_MITG_DIS; break; case GDS_MITIGATION_FORCE: case GDS_MITIGATION_UCODE_NEEDED: case GDS_MITIGATION_HYPERVISOR: return; }; wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); /* * Check to make sure that the WRMSR value was not ignored. Writes to * GDS_MITG_DIS will be ignored if this processor is locked but the boot * processor was not. */ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); } static void __init gds_select_mitigation(void) { u64 mcu_ctrl; if (!boot_cpu_has_bug(X86_BUG_GDS)) return; if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { gds_mitigation = GDS_MITIGATION_HYPERVISOR; goto out; } if (cpu_mitigations_off()) gds_mitigation = GDS_MITIGATION_OFF; /* Will verify below that mitigation _can_ be disabled */ /* No microcode */ if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { if (gds_mitigation == GDS_MITIGATION_FORCE) { /* * This only needs to be done on the boot CPU so do it * here rather than in update_gds_msr() */ setup_clear_cpu_cap(X86_FEATURE_AVX); pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); } else { gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; } goto out; } /* Microcode has mitigation, use it */ if (gds_mitigation == GDS_MITIGATION_FORCE) gds_mitigation = GDS_MITIGATION_FULL; rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); if (mcu_ctrl & GDS_MITG_LOCKED) { if (gds_mitigation == GDS_MITIGATION_OFF) pr_warn("Mitigation locked. Disable failed.\n"); /* * The mitigation is selected from the boot CPU. All other CPUs * _should_ have the same state. If the boot CPU isn't locked * but others are then update_gds_msr() will WARN() of the state * mismatch. If the boot CPU is locked update_gds_msr() will * ensure the other CPUs have the mitigation enabled. */ gds_mitigation = GDS_MITIGATION_FULL_LOCKED; } update_gds_msr(); out: pr_info("%s\n", gds_strings[gds_mitigation]); } static int __init gds_parse_cmdline(char *str) { if (!str) return -EINVAL; if (!boot_cpu_has_bug(X86_BUG_GDS)) return 0; if (!strcmp(str, "off")) gds_mitigation = GDS_MITIGATION_OFF; else if (!strcmp(str, "force")) gds_mitigation = GDS_MITIGATION_FORCE; return 0; } early_param("gather_data_sampling", gds_parse_cmdline); #undef pr_fmt #define pr_fmt(fmt) "Spectre V1 : " fmt enum spectre_v1_mitigation { SPECTRE_V1_MITIGATION_NONE, SPECTRE_V1_MITIGATION_AUTO, }; static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = SPECTRE_V1_MITIGATION_AUTO; static const char * const spectre_v1_strings[] = { [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", }; /* * Does SMAP provide full mitigation against speculative kernel access to * userspace? */ static bool smap_works_speculatively(void) { if (!boot_cpu_has(X86_FEATURE_SMAP)) return false; /* * On CPUs which are vulnerable to Meltdown, SMAP does not * prevent speculative access to user data in the L1 cache. * Consider SMAP to be non-functional as a mitigation on these * CPUs. */ if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) return false; return true; } static void __init spectre_v1_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) { spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; return; } if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { /* * With Spectre v1, a user can speculatively control either * path of a conditional swapgs with a user-controlled GS * value. The mitigation is to add lfences to both code paths. * * If FSGSBASE is enabled, the user can put a kernel address in * GS, in which case SMAP provides no protection. * * If FSGSBASE is disabled, the user can only put a user space * address in GS. That makes an attack harder, but still * possible if there's no SMAP protection. */ if (boot_cpu_has(X86_FEATURE_FSGSBASE) || !smap_works_speculatively()) { /* * Mitigation can be provided from SWAPGS itself or * PTI as the CR3 write in the Meltdown mitigation * is serializing. * * If neither is there, mitigate with an LFENCE to * stop speculation through swapgs. */ if (boot_cpu_has_bug(X86_BUG_SWAPGS) && !boot_cpu_has(X86_FEATURE_PTI)) setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); /* * Enable lfences in the kernel entry (non-swapgs) * paths, to prevent user entry from speculatively * skipping swapgs. */ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); } } pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); } static int __init nospectre_v1_cmdline(char *str) { spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; return 0; } early_param("nospectre_v1", nospectre_v1_cmdline); enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; #undef pr_fmt #define pr_fmt(fmt) "RETBleed: " fmt enum retbleed_mitigation { RETBLEED_MITIGATION_NONE, RETBLEED_MITIGATION_UNRET, RETBLEED_MITIGATION_IBPB, RETBLEED_MITIGATION_IBRS, RETBLEED_MITIGATION_EIBRS, RETBLEED_MITIGATION_STUFF, }; enum retbleed_mitigation_cmd { RETBLEED_CMD_OFF, RETBLEED_CMD_AUTO, RETBLEED_CMD_UNRET, RETBLEED_CMD_IBPB, RETBLEED_CMD_STUFF, }; static const char * const retbleed_strings[] = { [RETBLEED_MITIGATION_NONE] = "Vulnerable", [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing", }; static enum retbleed_mitigation retbleed_mitigation __ro_after_init = RETBLEED_MITIGATION_NONE; static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = RETBLEED_CMD_AUTO; static int __ro_after_init retbleed_nosmt = false; static int __init retbleed_parse_cmdline(char *str) { if (!str) return -EINVAL; while (str) { char *next = strchr(str, ','); if (next) { *next = 0; next++; } if (!strcmp(str, "off")) { retbleed_cmd = RETBLEED_CMD_OFF; } else if (!strcmp(str, "auto")) { retbleed_cmd = RETBLEED_CMD_AUTO; } else if (!strcmp(str, "unret")) { retbleed_cmd = RETBLEED_CMD_UNRET; } else if (!strcmp(str, "ibpb")) { retbleed_cmd = RETBLEED_CMD_IBPB; } else if (!strcmp(str, "stuff")) { retbleed_cmd = RETBLEED_CMD_STUFF; } else if (!strcmp(str, "nosmt")) { retbleed_nosmt = true; } else if (!strcmp(str, "force")) { setup_force_cpu_bug(X86_BUG_RETBLEED); } else { pr_err("Ignoring unknown retbleed option (%s).", str); } str = next; } return 0; } early_param("retbleed", retbleed_parse_cmdline); #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" static void __init retbleed_select_mitigation(void) { bool mitigate_smt = false; if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) return; switch (retbleed_cmd) { case RETBLEED_CMD_OFF: return; case RETBLEED_CMD_UNRET: if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) { retbleed_mitigation = RETBLEED_MITIGATION_UNRET; } else { pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n"); goto do_cmd_auto; } break; case RETBLEED_CMD_IBPB: if (!boot_cpu_has(X86_FEATURE_IBPB)) { pr_err("WARNING: CPU does not support IBPB.\n"); goto do_cmd_auto; } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { retbleed_mitigation = RETBLEED_MITIGATION_IBPB; } else { pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); goto do_cmd_auto; } break; case RETBLEED_CMD_STUFF: if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING) && spectre_v2_enabled == SPECTRE_V2_RETPOLINE) { retbleed_mitigation = RETBLEED_MITIGATION_STUFF; } else { if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING)) pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n"); else pr_err("WARNING: kernel not compiled with CALL_DEPTH_TRACKING.\n"); goto do_cmd_auto; } break; do_cmd_auto: case RETBLEED_CMD_AUTO: default: if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) retbleed_mitigation = RETBLEED_MITIGATION_UNRET; else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB)) retbleed_mitigation = RETBLEED_MITIGATION_IBPB; } /* * The Intel mitigation (IBRS or eIBRS) was already selected in * spectre_v2_select_mitigation(). 'retbleed_mitigation' will * be set accordingly below. */ break; } switch (retbleed_mitigation) { case RETBLEED_MITIGATION_UNRET: setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); if (IS_ENABLED(CONFIG_RETHUNK)) x86_return_thunk = retbleed_return_thunk; if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) pr_err(RETBLEED_UNTRAIN_MSG); mitigate_smt = true; break; case RETBLEED_MITIGATION_IBPB: setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); mitigate_smt = true; break; case RETBLEED_MITIGATION_STUFF: setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); x86_set_skl_return_thunk(); break; default: break; } if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && (retbleed_nosmt || cpu_mitigations_auto_nosmt())) cpu_smt_disable(false); /* * Let IBRS trump all on Intel without affecting the effects of the * retbleed= cmdline option except for call depth based stuffing */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { switch (spectre_v2_enabled) { case SPECTRE_V2_IBRS: retbleed_mitigation = RETBLEED_MITIGATION_IBRS; break; case SPECTRE_V2_EIBRS: case SPECTRE_V2_EIBRS_RETPOLINE: case SPECTRE_V2_EIBRS_LFENCE: retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; break; default: if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) pr_err(RETBLEED_INTEL_MSG); } } pr_info("%s\n", retbleed_strings[retbleed_mitigation]); } #undef pr_fmt #define pr_fmt(fmt) "Spectre V2 : " fmt static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = SPECTRE_V2_USER_NONE; static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = SPECTRE_V2_USER_NONE; #ifdef CONFIG_RETPOLINE static bool spectre_v2_bad_module; bool retpoline_module_ok(bool has_retpoline) { if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) return true; pr_err("System may be vulnerable to spectre v2\n"); spectre_v2_bad_module = true; return false; } static inline const char *spectre_v2_module_string(void) { return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; } #else static inline const char *spectre_v2_module_string(void) { return ""; } #endif #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" #ifdef CONFIG_BPF_SYSCALL void unpriv_ebpf_notify(int new_state) { if (new_state) return; /* Unprivileged eBPF is enabled */ switch (spectre_v2_enabled) { case SPECTRE_V2_EIBRS: pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); break; case SPECTRE_V2_EIBRS_LFENCE: if (sched_smt_active()) pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); break; default: break; } } #endif static inline bool match_option(const char *arg, int arglen, const char *opt) { int len = strlen(opt); return len == arglen && !strncmp(arg, opt, len); } /* The kernel command line selection for spectre v2 */ enum spectre_v2_mitigation_cmd { SPECTRE_V2_CMD_NONE, SPECTRE_V2_CMD_AUTO, SPECTRE_V2_CMD_FORCE, SPECTRE_V2_CMD_RETPOLINE, SPECTRE_V2_CMD_RETPOLINE_GENERIC, SPECTRE_V2_CMD_RETPOLINE_LFENCE, SPECTRE_V2_CMD_EIBRS, SPECTRE_V2_CMD_EIBRS_RETPOLINE, SPECTRE_V2_CMD_EIBRS_LFENCE, SPECTRE_V2_CMD_IBRS, }; enum spectre_v2_user_cmd { SPECTRE_V2_USER_CMD_NONE, SPECTRE_V2_USER_CMD_AUTO, SPECTRE_V2_USER_CMD_FORCE, SPECTRE_V2_USER_CMD_PRCTL, SPECTRE_V2_USER_CMD_PRCTL_IBPB, SPECTRE_V2_USER_CMD_SECCOMP, SPECTRE_V2_USER_CMD_SECCOMP_IBPB, }; static const char * const spectre_v2_user_strings[] = { [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", }; static const struct { const char *option; enum spectre_v2_user_cmd cmd; bool secure; } v2_user_options[] __initconst = { { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, { "off", SPECTRE_V2_USER_CMD_NONE, false }, { "on", SPECTRE_V2_USER_CMD_FORCE, true }, { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, }; static void __init spec_v2_user_print_cond(const char *reason, bool secure) { if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) pr_info("spectre_v2_user=%s forced on command line.\n", reason); } static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void) { char arg[20]; int ret, i; switch (spectre_v2_cmd) { case SPECTRE_V2_CMD_NONE: return SPECTRE_V2_USER_CMD_NONE; case SPECTRE_V2_CMD_FORCE: return SPECTRE_V2_USER_CMD_FORCE; default: break; } ret = cmdline_find_option(boot_command_line, "spectre_v2_user", arg, sizeof(arg)); if (ret < 0) return SPECTRE_V2_USER_CMD_AUTO; for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { if (match_option(arg, ret, v2_user_options[i].option)) { spec_v2_user_print_cond(v2_user_options[i].option, v2_user_options[i].secure); return v2_user_options[i].cmd; } } pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); return SPECTRE_V2_USER_CMD_AUTO; } static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) { return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; } static void __init spectre_v2_user_select_mitigation(void) { enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; bool smt_possible = IS_ENABLED(CONFIG_SMP); enum spectre_v2_user_cmd cmd; if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) return; if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || cpu_smt_control == CPU_SMT_NOT_SUPPORTED) smt_possible = false; cmd = spectre_v2_parse_user_cmdline(); switch (cmd) { case SPECTRE_V2_USER_CMD_NONE: goto set_mode; case SPECTRE_V2_USER_CMD_FORCE: mode = SPECTRE_V2_USER_STRICT; break; case SPECTRE_V2_USER_CMD_AUTO: case SPECTRE_V2_USER_CMD_PRCTL: case SPECTRE_V2_USER_CMD_PRCTL_IBPB: mode = SPECTRE_V2_USER_PRCTL; break; case SPECTRE_V2_USER_CMD_SECCOMP: case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: if (IS_ENABLED(CONFIG_SECCOMP)) mode = SPECTRE_V2_USER_SECCOMP; else mode = SPECTRE_V2_USER_PRCTL; break; } /* Initialize Indirect Branch Prediction Barrier */ if (boot_cpu_has(X86_FEATURE_IBPB)) { setup_force_cpu_cap(X86_FEATURE_USE_IBPB); spectre_v2_user_ibpb = mode; switch (cmd) { case SPECTRE_V2_USER_CMD_FORCE: case SPECTRE_V2_USER_CMD_PRCTL_IBPB: case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: static_branch_enable(&switch_mm_always_ibpb); spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; break; case SPECTRE_V2_USER_CMD_PRCTL: case SPECTRE_V2_USER_CMD_AUTO: case SPECTRE_V2_USER_CMD_SECCOMP: static_branch_enable(&switch_mm_cond_ibpb); break; default: break; } pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", static_key_enabled(&switch_mm_always_ibpb) ? "always-on" : "conditional"); } /* * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP * is not required. * * Intel's Enhanced IBRS also protects against cross-thread branch target * injection in user-mode as the IBRS bit remains always set which * implicitly enables cross-thread protections. However, in legacy IBRS * mode, the IBRS bit is set only on kernel entry and cleared on return * to userspace. AMD Automatic IBRS also does not protect userspace. * These modes therefore disable the implicit cross-thread protection, * so allow for STIBP to be selected in those cases. */ if (!boot_cpu_has(X86_FEATURE_STIBP) || !smt_possible || (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && !boot_cpu_has(X86_FEATURE_AUTOIBRS))) return; /* * At this point, an STIBP mode other than "off" has been set. * If STIBP support is not being forced, check if STIBP always-on * is preferred. */ if (mode != SPECTRE_V2_USER_STRICT && boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) mode = SPECTRE_V2_USER_STRICT_PREFERRED; if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { if (mode != SPECTRE_V2_USER_STRICT && mode != SPECTRE_V2_USER_STRICT_PREFERRED) pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); mode = SPECTRE_V2_USER_STRICT_PREFERRED; } spectre_v2_user_stibp = mode; set_mode: pr_info("%s\n", spectre_v2_user_strings[mode]); } static const char * const spectre_v2_strings[] = { [SPECTRE_V2_NONE] = "Vulnerable", [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", [SPECTRE_V2_IBRS] = "Mitigation: IBRS", }; static const struct { const char *option; enum spectre_v2_mitigation_cmd cmd; bool secure; } mitigation_options[] __initconst = { { "off", SPECTRE_V2_CMD_NONE, false }, { "on", SPECTRE_V2_CMD_FORCE, true }, { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, { "auto", SPECTRE_V2_CMD_AUTO, false }, { "ibrs", SPECTRE_V2_CMD_IBRS, false }, }; static void __init spec_v2_print_cond(const char *reason, bool secure) { if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) pr_info("%s selected on command line.\n", reason); } static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) { enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; char arg[20]; int ret, i; if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || cpu_mitigations_off()) return SPECTRE_V2_CMD_NONE; ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); if (ret < 0) return SPECTRE_V2_CMD_AUTO; for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { if (!match_option(arg, ret, mitigation_options[i].option)) continue; cmd = mitigation_options[i].cmd; break; } if (i >= ARRAY_SIZE(mitigation_options)) { pr_err("unknown option (%s). Switching to AUTO select\n", arg); return SPECTRE_V2_CMD_AUTO; } if ((cmd == SPECTRE_V2_CMD_RETPOLINE || cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && !IS_ENABLED(CONFIG_RETPOLINE)) { pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } if ((cmd == SPECTRE_V2_CMD_EIBRS || cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n", mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) { pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } spec_v2_print_cond(mitigation_options[i].option, mitigation_options[i].secure); return cmd; } static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) { if (!IS_ENABLED(CONFIG_RETPOLINE)) { pr_err("Kernel not compiled with retpoline; no mitigation available!"); return SPECTRE_V2_NONE; } return SPECTRE_V2_RETPOLINE; } /* Disable in-kernel use of non-RSB RET predictors */ static void __init spec_ctrl_disable_kernel_rrsba(void) { u64 ia32_cap; if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) return; ia32_cap = x86_read_arch_cap_msr(); if (ia32_cap & ARCH_CAP_RRSBA) { x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; update_spec_ctrl(x86_spec_ctrl_base); } } static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) { /* * Similar to context switches, there are two types of RSB attacks * after VM exit: * * 1) RSB underflow * * 2) Poisoned RSB entry * * When retpoline is enabled, both are mitigated by filling/clearing * the RSB. * * When IBRS is enabled, while #1 would be mitigated by the IBRS branch * prediction isolation protections, RSB still needs to be cleared * because of #2. Note that SMEP provides no protection here, unlike * user-space-poisoned RSB entries. * * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB * bug is present then a LITE version of RSB protection is required, * just a single call needs to retire before a RET is executed. */ switch (mode) { case SPECTRE_V2_NONE: return; case SPECTRE_V2_EIBRS_LFENCE: case SPECTRE_V2_EIBRS: if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); } return; case SPECTRE_V2_EIBRS_RETPOLINE: case SPECTRE_V2_RETPOLINE: case SPECTRE_V2_LFENCE: case SPECTRE_V2_IBRS: setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n"); return; } pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit"); dump_stack(); } static void __init spectre_v2_select_mitigation(void) { enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; /* * If the CPU is not affected and the command line mode is NONE or AUTO * then nothing to do. */ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) return; switch (cmd) { case SPECTRE_V2_CMD_NONE: return; case SPECTRE_V2_CMD_FORCE: case SPECTRE_V2_CMD_AUTO: if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { mode = SPECTRE_V2_EIBRS; break; } if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && boot_cpu_has_bug(X86_BUG_RETBLEED) && retbleed_cmd != RETBLEED_CMD_OFF && retbleed_cmd != RETBLEED_CMD_STUFF && boot_cpu_has(X86_FEATURE_IBRS) && boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { mode = SPECTRE_V2_IBRS; break; } mode = spectre_v2_select_retpoline(); break; case SPECTRE_V2_CMD_RETPOLINE_LFENCE: pr_err(SPECTRE_V2_LFENCE_MSG); mode = SPECTRE_V2_LFENCE; break; case SPECTRE_V2_CMD_RETPOLINE_GENERIC: mode = SPECTRE_V2_RETPOLINE; break; case SPECTRE_V2_CMD_RETPOLINE: mode = spectre_v2_select_retpoline(); break; case SPECTRE_V2_CMD_IBRS: mode = SPECTRE_V2_IBRS; break; case SPECTRE_V2_CMD_EIBRS: mode = SPECTRE_V2_EIBRS; break; case SPECTRE_V2_CMD_EIBRS_LFENCE: mode = SPECTRE_V2_EIBRS_LFENCE; break; case SPECTRE_V2_CMD_EIBRS_RETPOLINE: mode = SPECTRE_V2_EIBRS_RETPOLINE; break; } if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); if (spectre_v2_in_ibrs_mode(mode)) { if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); } else { x86_spec_ctrl_base |= SPEC_CTRL_IBRS; update_spec_ctrl(x86_spec_ctrl_base); } } switch (mode) { case SPECTRE_V2_NONE: case SPECTRE_V2_EIBRS: break; case SPECTRE_V2_IBRS: setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) pr_warn(SPECTRE_V2_IBRS_PERF_MSG); break; case SPECTRE_V2_LFENCE: case SPECTRE_V2_EIBRS_LFENCE: setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); fallthrough; case SPECTRE_V2_RETPOLINE: case SPECTRE_V2_EIBRS_RETPOLINE: setup_force_cpu_cap(X86_FEATURE_RETPOLINE); break; } /* * Disable alternate RSB predictions in kernel when indirect CALLs and * JMPs gets protection against BHI and Intramode-BTI, but RET * prediction from a non-RSB predictor is still a risk. */ if (mode == SPECTRE_V2_EIBRS_LFENCE || mode == SPECTRE_V2_EIBRS_RETPOLINE || mode == SPECTRE_V2_RETPOLINE) spec_ctrl_disable_kernel_rrsba(); spectre_v2_enabled = mode; pr_info("%s\n", spectre_v2_strings[mode]); /* * If Spectre v2 protection has been enabled, fill the RSB during a * context switch. In general there are two types of RSB attacks * across context switches, for which the CALLs/RETs may be unbalanced. * * 1) RSB underflow * * Some Intel parts have "bottomless RSB". When the RSB is empty, * speculated return targets may come from the branch predictor, * which could have a user-poisoned BTB or BHB entry. * * AMD has it even worse: *all* returns are speculated from the BTB, * regardless of the state of the RSB. * * When IBRS or eIBRS is enabled, the "user -> kernel" attack * scenario is mitigated by the IBRS branch prediction isolation * properties, so the RSB buffer filling wouldn't be necessary to * protect against this type of attack. * * The "user -> user" attack scenario is mitigated by RSB filling. * * 2) Poisoned RSB entry * * If the 'next' in-kernel return stack is shorter than 'prev', * 'next' could be tricked into speculating with a user-poisoned RSB * entry. * * The "user -> kernel" attack scenario is mitigated by SMEP and * eIBRS. * * The "user -> user" scenario, also known as SpectreBHB, requires * RSB clearing. * * So to mitigate all cases, unconditionally fill RSB on context * switches. * * FIXME: Is this pointless for retbleed-affected AMD? */ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); spectre_v2_determine_rsb_fill_type_at_vmexit(mode); /* * Retpoline protects the kernel, but doesn't protect firmware. IBRS * and Enhanced IBRS protect firmware too, so enable IBRS around * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't * otherwise enabled. * * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because * the user might select retpoline on the kernel command line and if * the CPU supports Enhanced IBRS, kernel might un-intentionally not * enable IBRS around firmware calls. */ if (boot_cpu_has_bug(X86_BUG_RETBLEED) && boot_cpu_has(X86_FEATURE_IBPB) && (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { if (retbleed_cmd != RETBLEED_CMD_IBPB) { setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); pr_info("Enabling Speculation Barrier for firmware calls\n"); } } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); pr_info("Enabling Restricted Speculation for firmware calls\n"); } /* Set up IBPB and STIBP depending on the general spectre V2 command */ spectre_v2_cmd = cmd; } static void update_stibp_msr(void * __unused) { u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); update_spec_ctrl(val); } /* Update x86_spec_ctrl_base in case SMT state changed. */ static void update_stibp_strict(void) { u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; if (sched_smt_active()) mask |= SPEC_CTRL_STIBP; if (mask == x86_spec_ctrl_base) return; pr_info("Update user space SMT mitigation: STIBP %s\n", mask & SPEC_CTRL_STIBP ? "always-on" : "off"); x86_spec_ctrl_base = mask; on_each_cpu(update_stibp_msr, NULL, 1); } /* Update the static key controlling the evaluation of TIF_SPEC_IB */ static void update_indir_branch_cond(void) { if (sched_smt_active()) static_branch_enable(&switch_to_cond_stibp); else static_branch_disable(&switch_to_cond_stibp); } #undef pr_fmt #define pr_fmt(fmt) fmt /* Update the static key controlling the MDS CPU buffer clear in idle */ static void update_mds_branch_idle(void) { u64 ia32_cap = x86_read_arch_cap_msr(); /* * Enable the idle clearing if SMT is active on CPUs which are * affected only by MSBDS and not any other MDS variant. * * The other variants cannot be mitigated when SMT is enabled, so * clearing the buffers on idle just to prevent the Store Buffer * repartitioning leak would be a window dressing exercise. */ if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) return; if (sched_smt_active()) { static_branch_enable(&mds_idle_clear); } else if (mmio_mitigation == MMIO_MITIGATION_OFF || (ia32_cap & ARCH_CAP_FBSDP_NO)) { static_branch_disable(&mds_idle_clear); } } #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" void cpu_bugs_smt_update(void) { mutex_lock(&spec_ctrl_mutex); if (sched_smt_active() && unprivileged_ebpf_enabled() && spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); switch (spectre_v2_user_stibp) { case SPECTRE_V2_USER_NONE: break; case SPECTRE_V2_USER_STRICT: case SPECTRE_V2_USER_STRICT_PREFERRED: update_stibp_strict(); break; case SPECTRE_V2_USER_PRCTL: case SPECTRE_V2_USER_SECCOMP: update_indir_branch_cond(); break; } switch (mds_mitigation) { case MDS_MITIGATION_FULL: case MDS_MITIGATION_VMWERV: if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) pr_warn_once(MDS_MSG_SMT); update_mds_branch_idle(); break; case MDS_MITIGATION_OFF: break; } switch (taa_mitigation) { case TAA_MITIGATION_VERW: case TAA_MITIGATION_UCODE_NEEDED: if (sched_smt_active()) pr_warn_once(TAA_MSG_SMT); break; case TAA_MITIGATION_TSX_DISABLED: case TAA_MITIGATION_OFF: break; } switch (mmio_mitigation) { case MMIO_MITIGATION_VERW: case MMIO_MITIGATION_UCODE_NEEDED: if (sched_smt_active()) pr_warn_once(MMIO_MSG_SMT); break; case MMIO_MITIGATION_OFF: break; } mutex_unlock(&spec_ctrl_mutex); } #undef pr_fmt #define pr_fmt(fmt) "Speculative Store Bypass: " fmt static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; /* The kernel command line selection */ enum ssb_mitigation_cmd { SPEC_STORE_BYPASS_CMD_NONE, SPEC_STORE_BYPASS_CMD_AUTO, SPEC_STORE_BYPASS_CMD_ON, SPEC_STORE_BYPASS_CMD_PRCTL, SPEC_STORE_BYPASS_CMD_SECCOMP, }; static const char * const ssb_strings[] = { [SPEC_STORE_BYPASS_NONE] = "Vulnerable", [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", }; static const struct { const char *option; enum ssb_mitigation_cmd cmd; } ssb_mitigation_options[] __initconst = { { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ }; static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) { enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; char arg[20]; int ret, i; if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || cpu_mitigations_off()) { return SPEC_STORE_BYPASS_CMD_NONE; } else { ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", arg, sizeof(arg)); if (ret < 0) return SPEC_STORE_BYPASS_CMD_AUTO; for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { if (!match_option(arg, ret, ssb_mitigation_options[i].option)) continue; cmd = ssb_mitigation_options[i].cmd; break; } if (i >= ARRAY_SIZE(ssb_mitigation_options)) { pr_err("unknown option (%s). Switching to AUTO select\n", arg); return SPEC_STORE_BYPASS_CMD_AUTO; } } return cmd; } static enum ssb_mitigation __init __ssb_select_mitigation(void) { enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; enum ssb_mitigation_cmd cmd; if (!boot_cpu_has(X86_FEATURE_SSBD)) return mode; cmd = ssb_parse_cmdline(); if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && (cmd == SPEC_STORE_BYPASS_CMD_NONE || cmd == SPEC_STORE_BYPASS_CMD_AUTO)) return mode; switch (cmd) { case SPEC_STORE_BYPASS_CMD_SECCOMP: /* * Choose prctl+seccomp as the default mode if seccomp is * enabled. */ if (IS_ENABLED(CONFIG_SECCOMP)) mode = SPEC_STORE_BYPASS_SECCOMP; else mode = SPEC_STORE_BYPASS_PRCTL; break; case SPEC_STORE_BYPASS_CMD_ON: mode = SPEC_STORE_BYPASS_DISABLE; break; case SPEC_STORE_BYPASS_CMD_AUTO: case SPEC_STORE_BYPASS_CMD_PRCTL: mode = SPEC_STORE_BYPASS_PRCTL; break; case SPEC_STORE_BYPASS_CMD_NONE: break; } /* * We have three CPU feature flags that are in play here: * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation */ if (mode == SPEC_STORE_BYPASS_DISABLE) { setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); /* * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may * use a completely different MSR and bit dependent on family. */ if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && !static_cpu_has(X86_FEATURE_AMD_SSBD)) { x86_amd_ssb_disable(); } else { x86_spec_ctrl_base |= SPEC_CTRL_SSBD; update_spec_ctrl(x86_spec_ctrl_base); } } return mode; } static void ssb_select_mitigation(void) { ssb_mode = __ssb_select_mitigation(); if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) pr_info("%s\n", ssb_strings[ssb_mode]); } #undef pr_fmt #define pr_fmt(fmt) "Speculation prctl: " fmt static void task_update_spec_tif(struct task_struct *tsk) { /* Force the update of the real TIF bits */ set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); /* * Immediately update the speculation control MSRs for the current * task, but for a non-current task delay setting the CPU * mitigation until it is scheduled next. * * This can only happen for SECCOMP mitigation. For PRCTL it's * always the current task. */ if (tsk == current) speculation_ctrl_update_current(); } static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) { if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) return -EPERM; switch (ctrl) { case PR_SPEC_ENABLE: set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); return 0; case PR_SPEC_DISABLE: clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); return 0; default: return -ERANGE; } } static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) { if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && ssb_mode != SPEC_STORE_BYPASS_SECCOMP) return -ENXIO; switch (ctrl) { case PR_SPEC_ENABLE: /* If speculation is force disabled, enable is not allowed */ if (task_spec_ssb_force_disable(task)) return -EPERM; task_clear_spec_ssb_disable(task); task_clear_spec_ssb_noexec(task); task_update_spec_tif(task); break; case PR_SPEC_DISABLE: task_set_spec_ssb_disable(task); task_clear_spec_ssb_noexec(task); task_update_spec_tif(task); break; case PR_SPEC_FORCE_DISABLE: task_set_spec_ssb_disable(task); task_set_spec_ssb_force_disable(task); task_clear_spec_ssb_noexec(task); task_update_spec_tif(task); break; case PR_SPEC_DISABLE_NOEXEC: if (task_spec_ssb_force_disable(task)) return -EPERM; task_set_spec_ssb_disable(task); task_set_spec_ssb_noexec(task); task_update_spec_tif(task); break; default: return -ERANGE; } return 0; } static bool is_spec_ib_user_controlled(void) { return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; } static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) { switch (ctrl) { case PR_SPEC_ENABLE: if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return 0; /* * With strict mode for both IBPB and STIBP, the instruction * code paths avoid checking this task flag and instead, * unconditionally run the instruction. However, STIBP and IBPB * are independent and either can be set to conditionally * enabled regardless of the mode of the other. * * If either is set to conditional, allow the task flag to be * updated, unless it was force-disabled by a previous prctl * call. Currently, this is possible on an AMD CPU which has the * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the * kernel is booted with 'spectre_v2_user=seccomp', then * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. */ if (!is_spec_ib_user_controlled() || task_spec_ib_force_disable(task)) return -EPERM; task_clear_spec_ib_disable(task); task_update_spec_tif(task); break; case PR_SPEC_DISABLE: case PR_SPEC_FORCE_DISABLE: /* * Indirect branch speculation is always allowed when * mitigation is force disabled. */ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return -EPERM; if (!is_spec_ib_user_controlled()) return 0; task_set_spec_ib_disable(task); if (ctrl == PR_SPEC_FORCE_DISABLE) task_set_spec_ib_force_disable(task); task_update_spec_tif(task); if (task == current) indirect_branch_prediction_barrier(); break; default: return -ERANGE; } return 0; } int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, unsigned long ctrl) { switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_set(task, ctrl); case PR_SPEC_INDIRECT_BRANCH: return ib_prctl_set(task, ctrl); case PR_SPEC_L1D_FLUSH: return l1d_flush_prctl_set(task, ctrl); default: return -ENODEV; } } #ifdef CONFIG_SECCOMP void arch_seccomp_spec_mitigate(struct task_struct *task) { if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); } #endif static int l1d_flush_prctl_get(struct task_struct *task) { if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) return PR_SPEC_FORCE_DISABLE; if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) return PR_SPEC_PRCTL | PR_SPEC_ENABLE; else return PR_SPEC_PRCTL | PR_SPEC_DISABLE; } static int ssb_prctl_get(struct task_struct *task) { switch (ssb_mode) { case SPEC_STORE_BYPASS_DISABLE: return PR_SPEC_DISABLE; case SPEC_STORE_BYPASS_SECCOMP: case SPEC_STORE_BYPASS_PRCTL: if (task_spec_ssb_force_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; if (task_spec_ssb_noexec(task)) return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; if (task_spec_ssb_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE; default: if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) return PR_SPEC_ENABLE; return PR_SPEC_NOT_AFFECTED; } } static int ib_prctl_get(struct task_struct *task) { if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) return PR_SPEC_NOT_AFFECTED; if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return PR_SPEC_ENABLE; else if (is_spec_ib_user_controlled()) { if (task_spec_ib_force_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; if (task_spec_ib_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE; } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) return PR_SPEC_DISABLE; else return PR_SPEC_NOT_AFFECTED; } int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) { switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_get(task); case PR_SPEC_INDIRECT_BRANCH: return ib_prctl_get(task); case PR_SPEC_L1D_FLUSH: return l1d_flush_prctl_get(task); default: return -ENODEV; } } void x86_spec_ctrl_setup_ap(void) { if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) update_spec_ctrl(x86_spec_ctrl_base); if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) x86_amd_ssb_disable(); } bool itlb_multihit_kvm_mitigation; EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); #undef pr_fmt #define pr_fmt(fmt) "L1TF: " fmt /* Default mitigation for L1TF-affected CPUs */ enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; #if IS_ENABLED(CONFIG_KVM_INTEL) EXPORT_SYMBOL_GPL(l1tf_mitigation); #endif enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); /* * These CPUs all support 44bits physical address space internally in the * cache but CPUID can report a smaller number of physical address bits. * * The L1TF mitigation uses the top most address bit for the inversion of * non present PTEs. When the installed memory reaches into the top most * address bit due to memory holes, which has been observed on machines * which report 36bits physical address bits and have 32G RAM installed, * then the mitigation range check in l1tf_select_mitigation() triggers. * This is a false positive because the mitigation is still possible due to * the fact that the cache uses 44bit internally. Use the cache bits * instead of the reported physical bits and adjust them on the affected * machines to 44bit if the reported bits are less than 44. */ static void override_cache_bits(struct cpuinfo_x86 *c) { if (c->x86 != 6) return; switch (c->x86_model) { case INTEL_FAM6_NEHALEM: case INTEL_FAM6_WESTMERE: case INTEL_FAM6_SANDYBRIDGE: case INTEL_FAM6_IVYBRIDGE: case INTEL_FAM6_HASWELL: case INTEL_FAM6_HASWELL_L: case INTEL_FAM6_HASWELL_G: case INTEL_FAM6_BROADWELL: case INTEL_FAM6_BROADWELL_G: case INTEL_FAM6_SKYLAKE_L: case INTEL_FAM6_SKYLAKE: case INTEL_FAM6_KABYLAKE_L: case INTEL_FAM6_KABYLAKE: if (c->x86_cache_bits < 44) c->x86_cache_bits = 44; break; } } static void __init l1tf_select_mitigation(void) { u64 half_pa; if (!boot_cpu_has_bug(X86_BUG_L1TF)) return; if (cpu_mitigations_off()) l1tf_mitigation = L1TF_MITIGATION_OFF; else if (cpu_mitigations_auto_nosmt()) l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; override_cache_bits(&boot_cpu_data); switch (l1tf_mitigation) { case L1TF_MITIGATION_OFF: case L1TF_MITIGATION_FLUSH_NOWARN: case L1TF_MITIGATION_FLUSH: break; case L1TF_MITIGATION_FLUSH_NOSMT: case L1TF_MITIGATION_FULL: cpu_smt_disable(false); break; case L1TF_MITIGATION_FULL_FORCE: cpu_smt_disable(true); break; } #if CONFIG_PGTABLE_LEVELS == 2 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); return; #endif half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; if (l1tf_mitigation != L1TF_MITIGATION_OFF && e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", half_pa); pr_info("However, doing so will make a part of your RAM unusable.\n"); pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); return; } setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); } static int __init l1tf_cmdline(char *str) { if (!boot_cpu_has_bug(X86_BUG_L1TF)) return 0; if (!str) return -EINVAL; if (!strcmp(str, "off")) l1tf_mitigation = L1TF_MITIGATION_OFF; else if (!strcmp(str, "flush,nowarn")) l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; else if (!strcmp(str, "flush")) l1tf_mitigation = L1TF_MITIGATION_FLUSH; else if (!strcmp(str, "flush,nosmt")) l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; else if (!strcmp(str, "full")) l1tf_mitigation = L1TF_MITIGATION_FULL; else if (!strcmp(str, "full,force")) l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; return 0; } early_param("l1tf", l1tf_cmdline); #undef pr_fmt #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt enum srso_mitigation { SRSO_MITIGATION_NONE, SRSO_MITIGATION_MICROCODE, SRSO_MITIGATION_SAFE_RET, SRSO_MITIGATION_IBPB, SRSO_MITIGATION_IBPB_ON_VMEXIT, }; enum srso_mitigation_cmd { SRSO_CMD_OFF, SRSO_CMD_MICROCODE, SRSO_CMD_SAFE_RET, SRSO_CMD_IBPB, SRSO_CMD_IBPB_ON_VMEXIT, }; static const char * const srso_strings[] = { [SRSO_MITIGATION_NONE] = "Vulnerable", [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" }; static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET; static int __init srso_parse_cmdline(char *str) { if (!str) return -EINVAL; if (!strcmp(str, "off")) srso_cmd = SRSO_CMD_OFF; else if (!strcmp(str, "microcode")) srso_cmd = SRSO_CMD_MICROCODE; else if (!strcmp(str, "safe-ret")) srso_cmd = SRSO_CMD_SAFE_RET; else if (!strcmp(str, "ibpb")) srso_cmd = SRSO_CMD_IBPB; else if (!strcmp(str, "ibpb-vmexit")) srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT; else pr_err("Ignoring unknown SRSO option (%s).", str); return 0; } early_param("spec_rstack_overflow", srso_parse_cmdline); #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." static void __init srso_select_mitigation(void) { bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE); if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) goto pred_cmd; if (!has_microcode) { pr_warn("IBPB-extending microcode not applied!\n"); pr_warn(SRSO_NOTICE); } else { /* * Zen1/2 with SMT off aren't vulnerable after the right * IBPB microcode has been applied. */ if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { setup_force_cpu_cap(X86_FEATURE_SRSO_NO); return; } } if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { if (has_microcode) { pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n"); srso_mitigation = SRSO_MITIGATION_IBPB; goto pred_cmd; } } switch (srso_cmd) { case SRSO_CMD_OFF: goto pred_cmd; case SRSO_CMD_MICROCODE: if (has_microcode) { srso_mitigation = SRSO_MITIGATION_MICROCODE; pr_warn(SRSO_NOTICE); } break; case SRSO_CMD_SAFE_RET: if (IS_ENABLED(CONFIG_CPU_SRSO)) { /* * Enable the return thunk for generated code * like ftrace, static_call, etc. */ setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); x86_return_thunk = srso_alias_return_thunk; } else { setup_force_cpu_cap(X86_FEATURE_SRSO); x86_return_thunk = srso_return_thunk; } srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); goto pred_cmd; } break; case SRSO_CMD_IBPB: if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { if (has_microcode) { setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); srso_mitigation = SRSO_MITIGATION_IBPB; } } else { pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); goto pred_cmd; } break; case SRSO_CMD_IBPB_ON_VMEXIT: if (IS_ENABLED(CONFIG_CPU_SRSO)) { if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; } } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); goto pred_cmd; } break; default: break; } pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode")); pred_cmd: if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) && boot_cpu_has(X86_FEATURE_SBPB)) x86_pred_cmd = PRED_CMD_SBPB; } #undef pr_fmt #define pr_fmt(fmt) fmt #ifdef CONFIG_SYSFS #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" #if IS_ENABLED(CONFIG_KVM_INTEL) static const char * const l1tf_vmx_states[] = { [VMENTER_L1D_FLUSH_AUTO] = "auto", [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" }; static ssize_t l1tf_show_state(char *buf) { if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && sched_smt_active())) { return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, l1tf_vmx_states[l1tf_vmx_mitigation]); } return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, l1tf_vmx_states[l1tf_vmx_mitigation], sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t itlb_multihit_show_state(char *buf) { if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || !boot_cpu_has(X86_FEATURE_VMX)) return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n"); else if (!(cr4_read_shadow() & X86_CR4_VMXE)) return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n"); else if (itlb_multihit_kvm_mitigation) return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n"); else return sysfs_emit(buf, "KVM: Vulnerable\n"); } #else static ssize_t l1tf_show_state(char *buf) { return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); } static ssize_t itlb_multihit_show_state(char *buf) { return sysfs_emit(buf, "Processor vulnerable\n"); } #endif static ssize_t mds_show_state(char *buf) { if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { return sysfs_emit(buf, "%s; SMT Host state unknown\n", mds_strings[mds_mitigation]); } if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : sched_smt_active() ? "mitigated" : "disabled")); } return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t tsx_async_abort_show_state(char *buf) { if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || (taa_mitigation == TAA_MITIGATION_OFF)) return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]); if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { return sysfs_emit(buf, "%s; SMT Host state unknown\n", taa_strings[taa_mitigation]); } return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t mmio_stale_data_show_state(char *buf) { if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) return sysfs_emit(buf, "Unknown: No mitigations\n"); if (mmio_mitigation == MMIO_MITIGATION_OFF) return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { return sysfs_emit(buf, "%s; SMT Host state unknown\n", mmio_strings[mmio_mitigation]); } return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], sched_smt_active() ? "vulnerable" : "disabled"); } static char *stibp_state(void) { if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && !boot_cpu_has(X86_FEATURE_AUTOIBRS)) return ""; switch (spectre_v2_user_stibp) { case SPECTRE_V2_USER_NONE: return ", STIBP: disabled"; case SPECTRE_V2_USER_STRICT: return ", STIBP: forced"; case SPECTRE_V2_USER_STRICT_PREFERRED: return ", STIBP: always-on"; case SPECTRE_V2_USER_PRCTL: case SPECTRE_V2_USER_SECCOMP: if (static_key_enabled(&switch_to_cond_stibp)) return ", STIBP: conditional"; } return ""; } static char *ibpb_state(void) { if (boot_cpu_has(X86_FEATURE_IBPB)) { if (static_key_enabled(&switch_mm_always_ibpb)) return ", IBPB: always-on"; if (static_key_enabled(&switch_mm_cond_ibpb)) return ", IBPB: conditional"; return ", IBPB: disabled"; } return ""; } static char *pbrsb_eibrs_state(void) { if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) return ", PBRSB-eIBRS: SW sequence"; else return ", PBRSB-eIBRS: Vulnerable"; } else { return ", PBRSB-eIBRS: Not affected"; } } static ssize_t spectre_v2_show_state(char *buf) { if (spectre_v2_enabled == SPECTRE_V2_LFENCE) return sysfs_emit(buf, "Vulnerable: LFENCE\n"); if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); if (sched_smt_active() && unprivileged_ebpf_enabled() && spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); return sysfs_emit(buf, "%s%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], ibpb_state(), boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", stibp_state(), boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", pbrsb_eibrs_state(), spectre_v2_module_string()); } static ssize_t srbds_show_state(char *buf) { return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]); } static ssize_t retbleed_show_state(char *buf) { if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], !sched_smt_active() ? "disabled" : spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? "enabled with STIBP protection" : "vulnerable"); } return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); } static ssize_t srso_show_state(char *buf) { if (boot_cpu_has(X86_FEATURE_SRSO_NO)) return sysfs_emit(buf, "Mitigation: SMT disabled\n"); return sysfs_emit(buf, "%s%s\n", srso_strings[srso_mitigation], boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode"); } static ssize_t gds_show_state(char *buf) { return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); } static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { if (!boot_cpu_has_bug(bug)) return sysfs_emit(buf, "Not affected\n"); switch (bug) { case X86_BUG_CPU_MELTDOWN: if (boot_cpu_has(X86_FEATURE_PTI)) return sysfs_emit(buf, "Mitigation: PTI\n"); if (hypervisor_is_type(X86_HYPER_XEN_PV)) return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); break; case X86_BUG_SPECTRE_V1: return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); case X86_BUG_SPECTRE_V2: return spectre_v2_show_state(buf); case X86_BUG_SPEC_STORE_BYPASS: return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]); case X86_BUG_L1TF: if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) return l1tf_show_state(buf); break; case X86_BUG_MDS: return mds_show_state(buf); case X86_BUG_TAA: return tsx_async_abort_show_state(buf); case X86_BUG_ITLB_MULTIHIT: return itlb_multihit_show_state(buf); case X86_BUG_SRBDS: return srbds_show_state(buf); case X86_BUG_MMIO_STALE_DATA: case X86_BUG_MMIO_UNKNOWN: return mmio_stale_data_show_state(buf); case X86_BUG_RETBLEED: return retbleed_show_state(buf); case X86_BUG_SRSO: return srso_show_state(buf); case X86_BUG_GDS: return gds_show_state(buf); default: break; } return sysfs_emit(buf, "Vulnerable\n"); } ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); } ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); } ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); } ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); } ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); } ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_MDS); } ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_TAA); } ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); } ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); } ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) { if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); else return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); } ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); } ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); } ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_GDS); } #endif
linux-master
arch/x86/kernel/cpu/bugs.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/mm.h> #include <asm/cpufeature.h> #include <asm/msr.h> #include "cpu.h" static void early_init_transmeta(struct cpuinfo_x86 *c) { u32 xlvl; /* Transmeta-defined flags: level 0x80860001 */ xlvl = cpuid_eax(0x80860000); if ((xlvl & 0xffff0000) == 0x80860000) { if (xlvl >= 0x80860001) c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001); } } static void init_transmeta(struct cpuinfo_x86 *c) { unsigned int cap_mask, uk, max, dummy; unsigned int cms_rev1, cms_rev2; unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev; char cpu_info[65]; early_init_transmeta(c); cpu_detect_cache_sizes(c); /* Print CMS and CPU revision */ max = cpuid_eax(0x80860000); cpu_rev = 0; if (max >= 0x80860001) { cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); if (cpu_rev != 0x02000000) { pr_info("CPU: Processor revision %u.%u.%u.%u, %u MHz\n", (cpu_rev >> 24) & 0xff, (cpu_rev >> 16) & 0xff, (cpu_rev >> 8) & 0xff, cpu_rev & 0xff, cpu_freq); } } if (max >= 0x80860002) { cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy); if (cpu_rev == 0x02000000) { pr_info("CPU: Processor revision %08X, %u MHz\n", new_cpu_rev, cpu_freq); } pr_info("CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n", (cms_rev1 >> 24) & 0xff, (cms_rev1 >> 16) & 0xff, (cms_rev1 >> 8) & 0xff, cms_rev1 & 0xff, cms_rev2); } if (max >= 0x80860006) { cpuid(0x80860003, (void *)&cpu_info[0], (void *)&cpu_info[4], (void *)&cpu_info[8], (void *)&cpu_info[12]); cpuid(0x80860004, (void *)&cpu_info[16], (void *)&cpu_info[20], (void *)&cpu_info[24], (void *)&cpu_info[28]); cpuid(0x80860005, (void *)&cpu_info[32], (void *)&cpu_info[36], (void *)&cpu_info[40], (void *)&cpu_info[44]); cpuid(0x80860006, (void *)&cpu_info[48], (void *)&cpu_info[52], (void *)&cpu_info[56], (void *)&cpu_info[60]); cpu_info[64] = '\0'; pr_info("CPU: %s\n", cpu_info); } /* Unhide possibly hidden capability flags */ rdmsr(0x80860004, cap_mask, uk); wrmsr(0x80860004, ~0, uk); c->x86_capability[CPUID_1_EDX] = cpuid_edx(0x00000001); wrmsr(0x80860004, cap_mask, uk); /* All Transmeta CPUs have a constant TSC */ set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); #ifdef CONFIG_SYSCTL /* * randomize_va_space slows us down enormously; * it probably triggers retranslation of x86->native bytecode */ randomize_va_space = 0; #endif } static const struct cpu_dev transmeta_cpu_dev = { .c_vendor = "Transmeta", .c_ident = { "GenuineTMx86", "TransmetaCPU" }, .c_early_init = early_init_transmeta, .c_init = init_transmeta, .c_x86_vendor = X86_VENDOR_TRANSMETA, }; cpu_dev_register(transmeta_cpu_dev);
linux-master
arch/x86/kernel/cpu/transmeta.c
/* * Common hypervisor code * * Copyright (C) 2008, VMware, Inc. * Author : Alok N Kataria <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/init.h> #include <linux/export.h> #include <asm/processor.h> #include <asm/hypervisor.h> static const __initconst struct hypervisor_x86 * const hypervisors[] = { #ifdef CONFIG_XEN_PV &x86_hyper_xen_pv, #endif #ifdef CONFIG_XEN_PVHVM &x86_hyper_xen_hvm, #endif &x86_hyper_vmware, &x86_hyper_ms_hyperv, #ifdef CONFIG_KVM_GUEST &x86_hyper_kvm, #endif #ifdef CONFIG_JAILHOUSE_GUEST &x86_hyper_jailhouse, #endif #ifdef CONFIG_ACRN_GUEST &x86_hyper_acrn, #endif }; enum x86_hypervisor_type x86_hyper_type; EXPORT_SYMBOL(x86_hyper_type); bool __initdata nopv; static __init int parse_nopv(char *arg) { nopv = true; return 0; } early_param("nopv", parse_nopv); static inline const struct hypervisor_x86 * __init detect_hypervisor_vendor(void) { const struct hypervisor_x86 *h = NULL, * const *p; uint32_t pri, max_pri = 0; for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) { if (unlikely(nopv) && !(*p)->ignore_nopv) continue; pri = (*p)->detect(); if (pri > max_pri) { max_pri = pri; h = *p; } } if (h) pr_info("Hypervisor detected: %s\n", h->name); return h; } static void __init copy_array(const void *src, void *target, unsigned int size) { unsigned int i, n = size / sizeof(void *); const void * const *from = (const void * const *)src; const void **to = (const void **)target; for (i = 0; i < n; i++) if (from[i]) to[i] = from[i]; } void __init init_hypervisor_platform(void) { const struct hypervisor_x86 *h; h = detect_hypervisor_vendor(); if (!h) return; copy_array(&h->init, &x86_init.hyper, sizeof(h->init)); copy_array(&h->runtime, &x86_platform.hyper, sizeof(h->runtime)); x86_hyper_type = h->type; x86_init.hyper.init_platform(); }
linux-master
arch/x86/kernel/cpu/hypervisor.c
/* * Routines to identify additional cpu features that are scattered in * cpuid space. */ #include <linux/cpu.h> #include <asm/memtype.h> #include <asm/apic.h> #include <asm/processor.h> #include "cpu.h" struct cpuid_bit { u16 feature; u8 reg; u8 bit; u32 level; u32 sub_leaf; }; /* * Please keep the leaf sorted by cpuid_bit.level for faster search. * X86_FEATURE_MBA is supported by both Intel and AMD. But the CPUID * levels are different and there is a separate entry for each. */ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, { X86_FEATURE_INTEL_PPIN, CPUID_EBX, 0, 0x00000007, 1 }, { X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 }, { X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 }, { X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 }, { X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 }, { X86_FEATURE_CQM_MBM_LOCAL, CPUID_EDX, 2, 0x0000000f, 1 }, { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 }, { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 }, { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 }, { X86_FEATURE_CDP_L2, CPUID_ECX, 2, 0x00000010, 2 }, { X86_FEATURE_MBA, CPUID_EBX, 3, 0x00000010, 0 }, { X86_FEATURE_PER_THREAD_MBA, CPUID_ECX, 0, 0x00000010, 3 }, { X86_FEATURE_SGX1, CPUID_EAX, 0, 0x00000012, 0 }, { X86_FEATURE_SGX2, CPUID_EAX, 1, 0x00000012, 0 }, { X86_FEATURE_SGX_EDECCSSA, CPUID_EAX, 11, 0x00000012, 0 }, { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 }, { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, { X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 }, { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 }, { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 }, { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 }, { 0, 0, 0, 0, 0 } }; void init_scattered_cpuid_features(struct cpuinfo_x86 *c) { u32 max_level; u32 regs[4]; const struct cpuid_bit *cb; for (cb = cpuid_bits; cb->feature; cb++) { /* Verify that the level is valid */ max_level = cpuid_eax(cb->level & 0xffff0000); if (max_level < cb->level || max_level > (cb->level | 0xffff)) continue; cpuid_count(cb->level, cb->sub_leaf, &regs[CPUID_EAX], &regs[CPUID_EBX], &regs[CPUID_ECX], &regs[CPUID_EDX]); if (regs[cb->reg] & (1 << cb->bit)) set_cpu_cap(c, cb->feature); } }
linux-master
arch/x86/kernel/cpu/scattered.c
// SPDX-License-Identifier: GPL-2.0 /* * Strings for the various x86 power flags * * This file must not contain any executable code. */ #include <asm/cpufeature.h> const char *const x86_power_flags[32] = { "ts", /* temperature sensor */ "fid", /* frequency id control */ "vid", /* voltage id control */ "ttp", /* thermal trip */ "tm", /* hardware thermal control */ "stc", /* software thermal control */ "100mhzsteps", /* 100 MHz multiplier control */ "hwpstate", /* hardware P-state control */ "", /* tsc invariant mapped to constant_tsc */ "cpb", /* core performance boost */ "eff_freq_ro", /* Readonly aperf/mperf */ "proc_feedback", /* processor feedback interface */ "acc_power", /* accumulated power mechanism */ };
linux-master
arch/x86/kernel/cpu/powerflags.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel PCONFIG instruction support. * * Copyright (C) 2017 Intel Corporation * * Author: * Kirill A. Shutemov <[email protected]> */ #include <asm/cpufeature.h> #include <asm/intel_pconfig.h> #define PCONFIG_CPUID 0x1b #define PCONFIG_CPUID_SUBLEAF_MASK ((1 << 12) - 1) /* Subleaf type (EAX) for PCONFIG CPUID leaf (0x1B) */ enum { PCONFIG_CPUID_SUBLEAF_INVALID = 0, PCONFIG_CPUID_SUBLEAF_TARGETID = 1, }; /* Bitmask of supported targets */ static u64 targets_supported __read_mostly; int pconfig_target_supported(enum pconfig_target target) { /* * We would need to re-think the implementation once we get > 64 * PCONFIG targets. Spec allows up to 2^32 targets. */ BUILD_BUG_ON(PCONFIG_TARGET_NR >= 64); if (WARN_ON_ONCE(target >= 64)) return 0; return targets_supported & (1ULL << target); } static int __init intel_pconfig_init(void) { int subleaf; if (!boot_cpu_has(X86_FEATURE_PCONFIG)) return 0; /* * Scan subleafs of PCONFIG CPUID leaf. * * Subleafs of the same type need not to be consecutive. * * Stop on the first invalid subleaf type. All subleafs after the first * invalid are invalid too. */ for (subleaf = 0; subleaf < INT_MAX; subleaf++) { struct cpuid_regs regs; cpuid_count(PCONFIG_CPUID, subleaf, &regs.eax, &regs.ebx, &regs.ecx, &regs.edx); switch (regs.eax & PCONFIG_CPUID_SUBLEAF_MASK) { case PCONFIG_CPUID_SUBLEAF_INVALID: /* Stop on the first invalid subleaf */ goto out; case PCONFIG_CPUID_SUBLEAF_TARGETID: /* Mark supported PCONFIG targets */ if (regs.ebx < 64) targets_supported |= (1ULL << regs.ebx); if (regs.ecx < 64) targets_supported |= (1ULL << regs.ecx); if (regs.edx < 64) targets_supported |= (1ULL << regs.edx); break; default: /* Unknown CPUID.PCONFIG subleaf: ignore */ break; } } out: return 0; } arch_initcall(intel_pconfig_init);
linux-master
arch/x86/kernel/cpu/intel_pconfig.c
// SPDX-License-Identifier: GPL-2.0-only /* * This file is part of the Linux kernel. * * Copyright (c) 2011, Intel Corporation * Authors: Fenghua Yu <[email protected]>, * H. Peter Anvin <[email protected]> */ #include <asm/processor.h> #include <asm/archrandom.h> #include <asm/sections.h> /* * RDRAND has Built-In-Self-Test (BIST) that runs on every invocation. * Run the instruction a few times as a sanity check. Also make sure * it's not outputting the same value over and over, which has happened * as a result of past CPU bugs. * * If it fails, it is simple to disable RDRAND and RDSEED here. */ void x86_init_rdrand(struct cpuinfo_x86 *c) { enum { SAMPLES = 8, MIN_CHANGE = 5 }; unsigned long sample, prev; bool failure = false; size_t i, changed; if (!cpu_has(c, X86_FEATURE_RDRAND)) return; for (changed = 0, i = 0; i < SAMPLES; ++i) { if (!rdrand_long(&sample)) { failure = true; break; } changed += i && sample != prev; prev = sample; } if (changed < MIN_CHANGE) failure = true; if (failure) { clear_cpu_cap(c, X86_FEATURE_RDRAND); clear_cpu_cap(c, X86_FEATURE_RDSEED); pr_emerg("RDRAND is not reliable on this platform; disabling.\n"); } }
linux-master
arch/x86/kernel/cpu/rdrand.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/tboot.h> #include <asm/cpu.h> #include <asm/cpufeature.h> #include <asm/msr-index.h> #include <asm/processor.h> #include <asm/vmx.h> #undef pr_fmt #define pr_fmt(fmt) "x86/cpu: " fmt #ifdef CONFIG_X86_VMX_FEATURE_NAMES enum vmx_feature_leafs { MISC_FEATURES = 0, PRIMARY_CTLS, SECONDARY_CTLS, TERTIARY_CTLS_LOW, TERTIARY_CTLS_HIGH, NR_VMX_FEATURE_WORDS, }; #define VMX_F(x) BIT(VMX_FEATURE_##x & 0x1f) static void init_vmx_capabilities(struct cpuinfo_x86 *c) { u32 supported, funcs, ept, vpid, ign, low, high; BUILD_BUG_ON(NVMXINTS != NR_VMX_FEATURE_WORDS); /* * The high bits contain the allowed-1 settings, i.e. features that can * be turned on. The low bits contain the allowed-0 settings, i.e. * features that can be turned off. Ignore the allowed-0 settings, * if a feature can be turned on then it's supported. * * Use raw rdmsr() for primary processor controls and pin controls MSRs * as they exist on any CPU that supports VMX, i.e. we want the WARN if * the RDMSR faults. */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, ign, supported); c->vmx_capability[PRIMARY_CTLS] = supported; rdmsr_safe(MSR_IA32_VMX_PROCBASED_CTLS2, &ign, &supported); c->vmx_capability[SECONDARY_CTLS] = supported; /* All 64 bits of tertiary controls MSR are allowed-1 settings. */ rdmsr_safe(MSR_IA32_VMX_PROCBASED_CTLS3, &low, &high); c->vmx_capability[TERTIARY_CTLS_LOW] = low; c->vmx_capability[TERTIARY_CTLS_HIGH] = high; rdmsr(MSR_IA32_VMX_PINBASED_CTLS, ign, supported); rdmsr_safe(MSR_IA32_VMX_VMFUNC, &ign, &funcs); /* * Except for EPT+VPID, which enumerates support for both in a single * MSR, low for EPT, high for VPID. */ rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, &ept, &vpid); /* Pin, EPT, VPID and VM-Func are merged into a single word. */ WARN_ON_ONCE(supported >> 16); WARN_ON_ONCE(funcs >> 4); c->vmx_capability[MISC_FEATURES] = (supported & 0xffff) | ((vpid & 0x1) << 16) | ((funcs & 0xf) << 28); /* EPT bits are full on scattered and must be manually handled. */ if (ept & VMX_EPT_EXECUTE_ONLY_BIT) c->vmx_capability[MISC_FEATURES] |= VMX_F(EPT_EXECUTE_ONLY); if (ept & VMX_EPT_AD_BIT) c->vmx_capability[MISC_FEATURES] |= VMX_F(EPT_AD); if (ept & VMX_EPT_1GB_PAGE_BIT) c->vmx_capability[MISC_FEATURES] |= VMX_F(EPT_1GB); /* Synthetic APIC features that are aggregates of multiple features. */ if ((c->vmx_capability[PRIMARY_CTLS] & VMX_F(VIRTUAL_TPR)) && (c->vmx_capability[SECONDARY_CTLS] & VMX_F(VIRT_APIC_ACCESSES))) c->vmx_capability[MISC_FEATURES] |= VMX_F(FLEXPRIORITY); if ((c->vmx_capability[PRIMARY_CTLS] & VMX_F(VIRTUAL_TPR)) && (c->vmx_capability[SECONDARY_CTLS] & VMX_F(APIC_REGISTER_VIRT)) && (c->vmx_capability[SECONDARY_CTLS] & VMX_F(VIRT_INTR_DELIVERY)) && (c->vmx_capability[MISC_FEATURES] & VMX_F(POSTED_INTR))) c->vmx_capability[MISC_FEATURES] |= VMX_F(APICV); /* Set the synthetic cpufeatures to preserve /proc/cpuinfo's ABI. */ if (c->vmx_capability[PRIMARY_CTLS] & VMX_F(VIRTUAL_TPR)) set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); if (c->vmx_capability[MISC_FEATURES] & VMX_F(FLEXPRIORITY)) set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); if (c->vmx_capability[MISC_FEATURES] & VMX_F(VIRTUAL_NMIS)) set_cpu_cap(c, X86_FEATURE_VNMI); if (c->vmx_capability[SECONDARY_CTLS] & VMX_F(EPT)) set_cpu_cap(c, X86_FEATURE_EPT); if (c->vmx_capability[MISC_FEATURES] & VMX_F(EPT_AD)) set_cpu_cap(c, X86_FEATURE_EPT_AD); if (c->vmx_capability[MISC_FEATURES] & VMX_F(VPID)) set_cpu_cap(c, X86_FEATURE_VPID); } #endif /* CONFIG_X86_VMX_FEATURE_NAMES */ static int __init nosgx(char *str) { setup_clear_cpu_cap(X86_FEATURE_SGX); return 0; } early_param("nosgx", nosgx); void init_ia32_feat_ctl(struct cpuinfo_x86 *c) { bool enable_sgx_kvm = false, enable_sgx_driver = false; bool tboot = tboot_enabled(); bool enable_vmx; u64 msr; if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) { clear_cpu_cap(c, X86_FEATURE_VMX); clear_cpu_cap(c, X86_FEATURE_SGX); return; } enable_vmx = cpu_has(c, X86_FEATURE_VMX) && IS_ENABLED(CONFIG_KVM_INTEL); if (cpu_has(c, X86_FEATURE_SGX) && IS_ENABLED(CONFIG_X86_SGX)) { /* * Separate out SGX driver enabling from KVM. This allows KVM * guests to use SGX even if the kernel SGX driver refuses to * use it. This happens if flexible Launch Control is not * available. */ enable_sgx_driver = cpu_has(c, X86_FEATURE_SGX_LC); enable_sgx_kvm = enable_vmx && IS_ENABLED(CONFIG_X86_SGX_KVM); } if (msr & FEAT_CTL_LOCKED) goto update_caps; /* * Ignore whatever value BIOS left in the MSR to avoid enabling random * features or faulting on the WRMSR. */ msr = FEAT_CTL_LOCKED; /* * Enable VMX if and only if the kernel may do VMXON at some point, * i.e. KVM is enabled, to avoid unnecessarily adding an attack vector * for the kernel, e.g. using VMX to hide malicious code. */ if (enable_vmx) { msr |= FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; if (tboot) msr |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX; } if (enable_sgx_kvm || enable_sgx_driver) { msr |= FEAT_CTL_SGX_ENABLED; if (enable_sgx_driver) msr |= FEAT_CTL_SGX_LC_ENABLED; } wrmsrl(MSR_IA32_FEAT_CTL, msr); update_caps: set_cpu_cap(c, X86_FEATURE_MSR_IA32_FEAT_CTL); if (!cpu_has(c, X86_FEATURE_VMX)) goto update_sgx; if ( (tboot && !(msr & FEAT_CTL_VMX_ENABLED_INSIDE_SMX)) || (!tboot && !(msr & FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX))) { if (IS_ENABLED(CONFIG_KVM_INTEL)) pr_err_once("VMX (%s TXT) disabled by BIOS\n", tboot ? "inside" : "outside"); clear_cpu_cap(c, X86_FEATURE_VMX); } else { #ifdef CONFIG_X86_VMX_FEATURE_NAMES init_vmx_capabilities(c); #endif } update_sgx: if (!(msr & FEAT_CTL_SGX_ENABLED)) { if (enable_sgx_kvm || enable_sgx_driver) pr_err_once("SGX disabled by BIOS.\n"); clear_cpu_cap(c, X86_FEATURE_SGX); return; } /* * VMX feature bit may be cleared due to being disabled in BIOS, * in which case SGX virtualization cannot be supported either. */ if (!cpu_has(c, X86_FEATURE_VMX) && enable_sgx_kvm) { pr_err_once("SGX virtualization disabled due to lack of VMX.\n"); enable_sgx_kvm = 0; } if (!(msr & FEAT_CTL_SGX_LC_ENABLED) && enable_sgx_driver) { if (!enable_sgx_kvm) { pr_err_once("SGX Launch Control is locked. Disable SGX.\n"); clear_cpu_cap(c, X86_FEATURE_SGX); } else { pr_err_once("SGX Launch Control is locked. Support SGX virtualization only.\n"); clear_cpu_cap(c, X86_FEATURE_SGX_LC); } } }
linux-master
arch/x86/kernel/cpu/feat_ctl.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel specific MCE features. * Copyright 2004 Zwane Mwaikambo <[email protected]> * Copyright (C) 2008, 2009 Intel Corporation * Author: Andi Kleen */ #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/sched.h> #include <linux/cpumask.h> #include <asm/apic.h> #include <asm/cpufeature.h> #include <asm/intel-family.h> #include <asm/processor.h> #include <asm/msr.h> #include <asm/mce.h> #include "internal.h" /* * Support for Intel Correct Machine Check Interrupts. This allows * the CPU to raise an interrupt when a corrected machine check happened. * Normally we pick those up using a regular polling timer. * Also supports reliable discovery of shared banks. */ /* * CMCI can be delivered to multiple cpus that share a machine check bank * so we need to designate a single cpu to process errors logged in each bank * in the interrupt handler (otherwise we would have many races and potential * double reporting of the same error). * Note that this can change when a cpu is offlined or brought online since * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear() * disables CMCI on all banks owned by the cpu and clears this bitfield. At * this point, cmci_rediscover() kicks in and a different cpu may end up * taking ownership of some of the shared MCA banks that were previously * owned by the offlined cpu. */ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); /* * CMCI storm detection backoff counter * * During storm, we reset this counter to INITIAL_CHECK_INTERVAL in case we've * encountered an error. If not, we decrement it by one. We signal the end of * the CMCI storm when it reaches 0. */ static DEFINE_PER_CPU(int, cmci_backoff_cnt); /* * cmci_discover_lock protects against parallel discovery attempts * which could race against each other. */ static DEFINE_RAW_SPINLOCK(cmci_discover_lock); /* * On systems that do support CMCI but it's disabled, polling for MCEs can * cause the same event to be reported multiple times because IA32_MCi_STATUS * is shared by the same package. */ static DEFINE_SPINLOCK(cmci_poll_lock); #define CMCI_THRESHOLD 1 #define CMCI_POLL_INTERVAL (30 * HZ) #define CMCI_STORM_INTERVAL (HZ) #define CMCI_STORM_THRESHOLD 15 static DEFINE_PER_CPU(unsigned long, cmci_time_stamp); static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt); static DEFINE_PER_CPU(unsigned int, cmci_storm_state); enum { CMCI_STORM_NONE, CMCI_STORM_ACTIVE, CMCI_STORM_SUBSIDED, }; static atomic_t cmci_storm_on_cpus; static int cmci_supported(int *banks) { u64 cap; if (mca_cfg.cmci_disabled || mca_cfg.ignore_ce) return 0; /* * Vendor check is not strictly needed, but the initial * initialization is vendor keyed and this * makes sure none of the backdoors are entered otherwise. */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return 0; if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6) return 0; rdmsrl(MSR_IA32_MCG_CAP, cap); *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff); return !!(cap & MCG_CMCI_P); } static bool lmce_supported(void) { u64 tmp; if (mca_cfg.lmce_disabled) return false; rdmsrl(MSR_IA32_MCG_CAP, tmp); /* * LMCE depends on recovery support in the processor. Hence both * MCG_SER_P and MCG_LMCE_P should be present in MCG_CAP. */ if ((tmp & (MCG_SER_P | MCG_LMCE_P)) != (MCG_SER_P | MCG_LMCE_P)) return false; /* * BIOS should indicate support for LMCE by setting bit 20 in * IA32_FEAT_CTL without which touching MCG_EXT_CTL will generate a #GP * fault. The MSR must also be locked for LMCE_ENABLED to take effect. * WARN if the MSR isn't locked as init_ia32_feat_ctl() unconditionally * locks the MSR in the event that it wasn't already locked by BIOS. */ rdmsrl(MSR_IA32_FEAT_CTL, tmp); if (WARN_ON_ONCE(!(tmp & FEAT_CTL_LOCKED))) return false; return tmp & FEAT_CTL_LMCE_ENABLED; } bool mce_intel_cmci_poll(void) { if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) return false; /* * Reset the counter if we've logged an error in the last poll * during the storm. */ if (machine_check_poll(0, this_cpu_ptr(&mce_banks_owned))) this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL); else this_cpu_dec(cmci_backoff_cnt); return true; } void mce_intel_hcpu_update(unsigned long cpu) { if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE) atomic_dec(&cmci_storm_on_cpus); per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE; } static void cmci_toggle_interrupt_mode(bool on) { unsigned long flags, *owned; int bank; u64 val; raw_spin_lock_irqsave(&cmci_discover_lock, flags); owned = this_cpu_ptr(mce_banks_owned); for_each_set_bit(bank, owned, MAX_NR_BANKS) { rdmsrl(MSR_IA32_MCx_CTL2(bank), val); if (on) val |= MCI_CTL2_CMCI_EN; else val &= ~MCI_CTL2_CMCI_EN; wrmsrl(MSR_IA32_MCx_CTL2(bank), val); } raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); } unsigned long cmci_intel_adjust_timer(unsigned long interval) { if ((this_cpu_read(cmci_backoff_cnt) > 0) && (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) { mce_notify_irq(); return CMCI_STORM_INTERVAL; } switch (__this_cpu_read(cmci_storm_state)) { case CMCI_STORM_ACTIVE: /* * We switch back to interrupt mode once the poll timer has * silenced itself. That means no events recorded and the timer * interval is back to our poll interval. */ __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED); if (!atomic_sub_return(1, &cmci_storm_on_cpus)) pr_notice("CMCI storm subsided: switching to interrupt mode\n"); fallthrough; case CMCI_STORM_SUBSIDED: /* * We wait for all CPUs to go back to SUBSIDED state. When that * happens we switch back to interrupt mode. */ if (!atomic_read(&cmci_storm_on_cpus)) { __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); cmci_toggle_interrupt_mode(true); cmci_recheck(); } return CMCI_POLL_INTERVAL; default: /* We have shiny weather. Let the poll do whatever it thinks. */ return interval; } } static bool cmci_storm_detect(void) { unsigned int cnt = __this_cpu_read(cmci_storm_cnt); unsigned long ts = __this_cpu_read(cmci_time_stamp); unsigned long now = jiffies; int r; if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE) return true; if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) { cnt++; } else { cnt = 1; __this_cpu_write(cmci_time_stamp, now); } __this_cpu_write(cmci_storm_cnt, cnt); if (cnt <= CMCI_STORM_THRESHOLD) return false; cmci_toggle_interrupt_mode(false); __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); r = atomic_add_return(1, &cmci_storm_on_cpus); mce_timer_kick(CMCI_STORM_INTERVAL); this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL); if (r == 1) pr_notice("CMCI storm detected: switching to poll mode\n"); return true; } /* * The interrupt handler. This is called on every event. * Just call the poller directly to log any events. * This could in theory increase the threshold under high load, * but doesn't for now. */ static void intel_threshold_interrupt(void) { if (cmci_storm_detect()) return; machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); } /* * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks * on this CPU. Use the algorithm recommended in the SDM to discover shared * banks. */ static void cmci_discover(int banks) { unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned); unsigned long flags; int i; int bios_wrong_thresh = 0; raw_spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) { u64 val; int bios_zero_thresh = 0; if (test_bit(i, owned)) continue; /* Skip banks in firmware first mode */ if (test_bit(i, mce_banks_ce_disabled)) continue; rdmsrl(MSR_IA32_MCx_CTL2(i), val); /* Already owned by someone else? */ if (val & MCI_CTL2_CMCI_EN) { clear_bit(i, owned); __clear_bit(i, this_cpu_ptr(mce_poll_banks)); continue; } if (!mca_cfg.bios_cmci_threshold) { val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK; val |= CMCI_THRESHOLD; } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) { /* * If bios_cmci_threshold boot option was specified * but the threshold is zero, we'll try to initialize * it to 1. */ bios_zero_thresh = 1; val |= CMCI_THRESHOLD; } val |= MCI_CTL2_CMCI_EN; wrmsrl(MSR_IA32_MCx_CTL2(i), val); rdmsrl(MSR_IA32_MCx_CTL2(i), val); /* Did the enable bit stick? -- the bank supports CMCI */ if (val & MCI_CTL2_CMCI_EN) { set_bit(i, owned); __clear_bit(i, this_cpu_ptr(mce_poll_banks)); /* * We are able to set thresholds for some banks that * had a threshold of 0. This means the BIOS has not * set the thresholds properly or does not work with * this boot option. Note down now and report later. */ if (mca_cfg.bios_cmci_threshold && bios_zero_thresh && (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) bios_wrong_thresh = 1; } else { WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks))); } } raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) { pr_info_once( "bios_cmci_threshold: Some banks do not have valid thresholds set\n"); pr_info_once( "bios_cmci_threshold: Make sure your BIOS supports this boot option\n"); } } /* * Just in case we missed an event during initialization check * all the CMCI owned banks. */ void cmci_recheck(void) { unsigned long flags; int banks; if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) return; local_irq_save(flags); machine_check_poll(0, this_cpu_ptr(&mce_banks_owned)); local_irq_restore(flags); } /* Caller must hold the lock on cmci_discover_lock */ static void __cmci_disable_bank(int bank) { u64 val; if (!test_bit(bank, this_cpu_ptr(mce_banks_owned))) return; rdmsrl(MSR_IA32_MCx_CTL2(bank), val); val &= ~MCI_CTL2_CMCI_EN; wrmsrl(MSR_IA32_MCx_CTL2(bank), val); __clear_bit(bank, this_cpu_ptr(mce_banks_owned)); } /* * Disable CMCI on this CPU for all banks it owns when it goes down. * This allows other CPUs to claim the banks on rediscovery. */ void cmci_clear(void) { unsigned long flags; int i; int banks; if (!cmci_supported(&banks)) return; raw_spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) __cmci_disable_bank(i); raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); } static void cmci_rediscover_work_func(void *arg) { int banks; /* Recheck banks in case CPUs don't all have the same */ if (cmci_supported(&banks)) cmci_discover(banks); } /* After a CPU went down cycle through all the others and rediscover */ void cmci_rediscover(void) { int banks; if (!cmci_supported(&banks)) return; on_each_cpu(cmci_rediscover_work_func, NULL, 1); } /* * Reenable CMCI on this CPU in case a CPU down failed. */ void cmci_reenable(void) { int banks; if (cmci_supported(&banks)) cmci_discover(banks); } void cmci_disable_bank(int bank) { int banks; unsigned long flags; if (!cmci_supported(&banks)) return; raw_spin_lock_irqsave(&cmci_discover_lock, flags); __cmci_disable_bank(bank); raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); } /* Bank polling function when CMCI is disabled. */ static void cmci_mc_poll_banks(void) { spin_lock(&cmci_poll_lock); machine_check_poll(0, this_cpu_ptr(&mce_poll_banks)); spin_unlock(&cmci_poll_lock); } void intel_init_cmci(void) { int banks; if (!cmci_supported(&banks)) { mc_poll_banks = cmci_mc_poll_banks; return; } mce_threshold_vector = intel_threshold_interrupt; cmci_discover(banks); /* * For CPU #0 this runs with still disabled APIC, but that's * ok because only the vector is set up. We still do another * check for the banks later for CPU #0 just to make sure * to not miss any events. */ apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED); cmci_recheck(); } void intel_init_lmce(void) { u64 val; if (!lmce_supported()) return; rdmsrl(MSR_IA32_MCG_EXT_CTL, val); if (!(val & MCG_EXT_CTL_LMCE_EN)) wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); } void intel_clear_lmce(void) { u64 val; if (!lmce_supported()) return; rdmsrl(MSR_IA32_MCG_EXT_CTL, val); val &= ~MCG_EXT_CTL_LMCE_EN; wrmsrl(MSR_IA32_MCG_EXT_CTL, val); } /* * Enable additional error logs from the integrated * memory controller on processors that support this. */ static void intel_imc_init(struct cpuinfo_x86 *c) { u64 error_control; switch (c->x86_model) { case INTEL_FAM6_SANDYBRIDGE_X: case INTEL_FAM6_IVYBRIDGE_X: case INTEL_FAM6_HASWELL_X: if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control)) return; error_control |= 2; wrmsrl_safe(MSR_ERROR_CONTROL, error_control); break; } } void mce_intel_feature_init(struct cpuinfo_x86 *c) { intel_init_cmci(); intel_init_lmce(); intel_imc_init(c); } void mce_intel_feature_clear(struct cpuinfo_x86 *c) { intel_clear_lmce(); } bool intel_filter_mce(struct mce *m) { struct cpuinfo_x86 *c = &boot_cpu_data; /* MCE errata HSD131, HSM142, HSW131, BDM48, HSM142 and SKX37 */ if ((c->x86 == 6) && ((c->x86_model == INTEL_FAM6_HASWELL) || (c->x86_model == INTEL_FAM6_HASWELL_L) || (c->x86_model == INTEL_FAM6_BROADWELL) || (c->x86_model == INTEL_FAM6_HASWELL_G) || (c->x86_model == INTEL_FAM6_SKYLAKE_X)) && (m->bank == 0) && ((m->status & 0xa0000000ffffffff) == 0x80000000000f0005)) return true; return false; }
linux-master
arch/x86/kernel/cpu/mce/intel.c
// SPDX-License-Identifier: GPL-2.0 /* * IDT Winchip specific Machine Check Exception Reporting * (C) Copyright 2002 Alan Cox <[email protected]> */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/hardirq.h> #include <asm/processor.h> #include <asm/traps.h> #include <asm/tlbflush.h> #include <asm/mce.h> #include <asm/msr.h> #include "internal.h" /* Machine check handler for WinChip C6: */ noinstr void winchip_machine_check(struct pt_regs *regs) { instrumentation_begin(); pr_emerg("CPU0: Machine Check Exception.\n"); add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); instrumentation_end(); } /* Set up machine check reporting on the Winchip C6 series */ void winchip_mcheck_init(struct cpuinfo_x86 *c) { u32 lo, hi; rdmsr(MSR_IDT_FCR1, lo, hi); lo |= (1<<2); /* Enable EIERRINT (int 18 MCE) */ lo &= ~(1<<4); /* Enable MCE */ wrmsr(MSR_IDT_FCR1, lo, hi); cr4_set_bits(X86_CR4_MCE); pr_info("Winchip machine check reporting enabled on CPU#0.\n"); }
linux-master
arch/x86/kernel/cpu/mce/winchip.c
// SPDX-License-Identifier: GPL-2.0 /* * P5 specific Machine Check Exception Reporting * (C) Copyright 2002 Alan Cox <[email protected]> */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/smp.h> #include <linux/hardirq.h> #include <asm/processor.h> #include <asm/traps.h> #include <asm/tlbflush.h> #include <asm/mce.h> #include <asm/msr.h> #include "internal.h" /* By default disabled */ int mce_p5_enabled __read_mostly; /* Machine check handler for Pentium class Intel CPUs: */ noinstr void pentium_machine_check(struct pt_regs *regs) { u32 loaddr, hi, lotype; instrumentation_begin(); rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); pr_emerg("CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n", smp_processor_id(), loaddr, lotype); if (lotype & (1<<5)) { pr_emerg("CPU#%d: Possible thermal failure (CPU on fire ?).\n", smp_processor_id()); } add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); instrumentation_end(); } /* Set up machine check reporting for processors with Intel style MCE: */ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) { u32 l, h; /* Default P5 to off as its often misconnected: */ if (!mce_p5_enabled) return; /* Check for MCE support: */ if (!cpu_has(c, X86_FEATURE_MCE)) return; /* Read registers before enabling: */ rdmsr(MSR_IA32_P5_MC_ADDR, l, h); rdmsr(MSR_IA32_P5_MC_TYPE, l, h); pr_info("Intel old style machine check architecture supported.\n"); /* Enable MCE: */ cr4_set_bits(X86_CR4_MCE); pr_info("Intel old style machine check reporting enabled on CPU#%d.\n", smp_processor_id()); }
linux-master
arch/x86/kernel/cpu/mce/p5.c
// SPDX-License-Identifier: GPL-2.0-only /* * MCE event pool management in MCE context * * Copyright (C) 2015 Intel Corp. * Author: Chen, Gong <[email protected]> */ #include <linux/smp.h> #include <linux/mm.h> #include <linux/genalloc.h> #include <linux/llist.h> #include "internal.h" /* * printk() is not safe in MCE context. This is a lock-less memory allocator * used to save error information organized in a lock-less list. * * This memory pool is only to be used to save MCE records in MCE context. * MCE events are rare, so a fixed size memory pool should be enough. Use * 2 pages to save MCE events for now (~80 MCE records at most). */ #define MCE_POOLSZ (2 * PAGE_SIZE) static struct gen_pool *mce_evt_pool; static LLIST_HEAD(mce_event_llist); static char gen_pool_buf[MCE_POOLSZ]; /* * Compare the record "t" with each of the records on list "l" to see if * an equivalent one is present in the list. */ static bool is_duplicate_mce_record(struct mce_evt_llist *t, struct mce_evt_llist *l) { struct mce_evt_llist *node; struct mce *m1, *m2; m1 = &t->mce; llist_for_each_entry(node, &l->llnode, llnode) { m2 = &node->mce; if (!mce_cmp(m1, m2)) return true; } return false; } /* * The system has panicked - we'd like to peruse the list of MCE records * that have been queued, but not seen by anyone yet. The list is in * reverse time order, so we need to reverse it. While doing that we can * also drop duplicate records (these were logged because some banks are * shared between cores or by all threads on a socket). */ struct llist_node *mce_gen_pool_prepare_records(void) { struct llist_node *head; LLIST_HEAD(new_head); struct mce_evt_llist *node, *t; head = llist_del_all(&mce_event_llist); if (!head) return NULL; /* squeeze out duplicates while reversing order */ llist_for_each_entry_safe(node, t, head, llnode) { if (!is_duplicate_mce_record(node, t)) llist_add(&node->llnode, &new_head); } return new_head.first; } void mce_gen_pool_process(struct work_struct *__unused) { struct llist_node *head; struct mce_evt_llist *node, *tmp; struct mce *mce; head = llist_del_all(&mce_event_llist); if (!head) return; head = llist_reverse_order(head); llist_for_each_entry_safe(node, tmp, head, llnode) { mce = &node->mce; blocking_notifier_call_chain(&x86_mce_decoder_chain, 0, mce); gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node)); } } bool mce_gen_pool_empty(void) { return llist_empty(&mce_event_llist); } int mce_gen_pool_add(struct mce *mce) { struct mce_evt_llist *node; if (filter_mce(mce)) return -EINVAL; if (!mce_evt_pool) return -EINVAL; node = (void *)gen_pool_alloc(mce_evt_pool, sizeof(*node)); if (!node) { pr_warn_ratelimited("MCE records pool full!\n"); return -ENOMEM; } memcpy(&node->mce, mce, sizeof(*mce)); llist_add(&node->llnode, &mce_event_llist); return 0; } static int mce_gen_pool_create(void) { struct gen_pool *tmpp; int ret = -ENOMEM; tmpp = gen_pool_create(ilog2(sizeof(struct mce_evt_llist)), -1); if (!tmpp) goto out; ret = gen_pool_add(tmpp, (unsigned long)gen_pool_buf, MCE_POOLSZ, -1); if (ret) { gen_pool_destroy(tmpp); goto out; } mce_evt_pool = tmpp; out: return ret; } int mce_gen_pool_init(void) { /* Just init mce_gen_pool once. */ if (mce_evt_pool) return 0; return mce_gen_pool_create(); }
linux-master
arch/x86/kernel/cpu/mce/genpool.c
// SPDX-License-Identifier: GPL-2.0-only /* * (c) 2005-2016 Advanced Micro Devices, Inc. * * Written by Jacob Shin - AMD, Inc. * Maintained by: Borislav Petkov <[email protected]> * * All MC4_MISCi registers are shared between cores on a node. */ #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/kobject.h> #include <linux/percpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/string.h> #include <asm/amd_nb.h> #include <asm/traps.h> #include <asm/apic.h> #include <asm/mce.h> #include <asm/msr.h> #include <asm/trace/irq_vectors.h> #include "internal.h" #define NR_BLOCKS 5 #define THRESHOLD_MAX 0xFFF #define INT_TYPE_APIC 0x00020000 #define MASK_VALID_HI 0x80000000 #define MASK_CNTP_HI 0x40000000 #define MASK_LOCKED_HI 0x20000000 #define MASK_LVTOFF_HI 0x00F00000 #define MASK_COUNT_EN_HI 0x00080000 #define MASK_INT_TYPE_HI 0x00060000 #define MASK_OVERFLOW_HI 0x00010000 #define MASK_ERR_COUNT_HI 0x00000FFF #define MASK_BLKPTR_LO 0xFF000000 #define MCG_XBLK_ADDR 0xC0000400 /* Deferred error settings */ #define MSR_CU_DEF_ERR 0xC0000410 #define MASK_DEF_LVTOFF 0x000000F0 #define MASK_DEF_INT_TYPE 0x00000006 #define DEF_LVT_OFF 0x2 #define DEF_INT_TYPE_APIC 0x2 /* Scalable MCA: */ /* Threshold LVT offset is at MSR0xC0000410[15:12] */ #define SMCA_THR_LVT_OFF 0xF000 static bool thresholding_irq_en; static const char * const th_names[] = { "load_store", "insn_fetch", "combined_unit", "decode_unit", "northbridge", "execution_unit", }; static const char * const smca_umc_block_names[] = { "dram_ecc", "misc_umc" }; #define HWID_MCATYPE(hwid, mcatype) (((hwid) << 16) | (mcatype)) struct smca_hwid { unsigned int bank_type; /* Use with smca_bank_types for easy indexing. */ u32 hwid_mcatype; /* (hwid,mcatype) tuple */ }; struct smca_bank { const struct smca_hwid *hwid; u32 id; /* Value of MCA_IPID[InstanceId]. */ u8 sysfs_id; /* Value used for sysfs name. */ }; static DEFINE_PER_CPU_READ_MOSTLY(struct smca_bank[MAX_NR_BANKS], smca_banks); static DEFINE_PER_CPU_READ_MOSTLY(u8[N_SMCA_BANK_TYPES], smca_bank_counts); struct smca_bank_name { const char *name; /* Short name for sysfs */ const char *long_name; /* Long name for pretty-printing */ }; static struct smca_bank_name smca_names[] = { [SMCA_LS ... SMCA_LS_V2] = { "load_store", "Load Store Unit" }, [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" }, [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" }, [SMCA_DE] = { "decode_unit", "Decode Unit" }, [SMCA_RESERVED] = { "reserved", "Reserved" }, [SMCA_EX] = { "execution_unit", "Execution Unit" }, [SMCA_FP] = { "floating_point", "Floating Point Unit" }, [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" }, [SMCA_CS ... SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" }, [SMCA_PIE] = { "pie", "Power, Interrupts, etc." }, /* UMC v2 is separate because both of them can exist in a single system. */ [SMCA_UMC] = { "umc", "Unified Memory Controller" }, [SMCA_UMC_V2] = { "umc_v2", "Unified Memory Controller v2" }, [SMCA_PB] = { "param_block", "Parameter Block" }, [SMCA_PSP ... SMCA_PSP_V2] = { "psp", "Platform Security Processor" }, [SMCA_SMU ... SMCA_SMU_V2] = { "smu", "System Management Unit" }, [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" }, [SMCA_MPDMA] = { "mpdma", "MPDMA Unit" }, [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" }, [SMCA_PCIE ... SMCA_PCIE_V2] = { "pcie", "PCI Express Unit" }, [SMCA_XGMI_PCS] = { "xgmi_pcs", "Ext Global Memory Interconnect PCS Unit" }, [SMCA_NBIF] = { "nbif", "NBIF Unit" }, [SMCA_SHUB] = { "shub", "System Hub Unit" }, [SMCA_SATA] = { "sata", "SATA Unit" }, [SMCA_USB] = { "usb", "USB Unit" }, [SMCA_GMI_PCS] = { "gmi_pcs", "Global Memory Interconnect PCS Unit" }, [SMCA_XGMI_PHY] = { "xgmi_phy", "Ext Global Memory Interconnect PHY Unit" }, [SMCA_WAFL_PHY] = { "wafl_phy", "WAFL PHY Unit" }, [SMCA_GMI_PHY] = { "gmi_phy", "Global Memory Interconnect PHY Unit" }, }; static const char *smca_get_name(enum smca_bank_types t) { if (t >= N_SMCA_BANK_TYPES) return NULL; return smca_names[t].name; } const char *smca_get_long_name(enum smca_bank_types t) { if (t >= N_SMCA_BANK_TYPES) return NULL; return smca_names[t].long_name; } EXPORT_SYMBOL_GPL(smca_get_long_name); enum smca_bank_types smca_get_bank_type(unsigned int cpu, unsigned int bank) { struct smca_bank *b; if (bank >= MAX_NR_BANKS) return N_SMCA_BANK_TYPES; b = &per_cpu(smca_banks, cpu)[bank]; if (!b->hwid) return N_SMCA_BANK_TYPES; return b->hwid->bank_type; } EXPORT_SYMBOL_GPL(smca_get_bank_type); static const struct smca_hwid smca_hwid_mcatypes[] = { /* { bank_type, hwid_mcatype } */ /* Reserved type */ { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0) }, /* ZN Core (HWID=0xB0) MCA types */ { SMCA_LS, HWID_MCATYPE(0xB0, 0x0) }, { SMCA_LS_V2, HWID_MCATYPE(0xB0, 0x10) }, { SMCA_IF, HWID_MCATYPE(0xB0, 0x1) }, { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2) }, { SMCA_DE, HWID_MCATYPE(0xB0, 0x3) }, /* HWID 0xB0 MCATYPE 0x4 is Reserved */ { SMCA_EX, HWID_MCATYPE(0xB0, 0x5) }, { SMCA_FP, HWID_MCATYPE(0xB0, 0x6) }, { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7) }, /* Data Fabric MCA types */ { SMCA_CS, HWID_MCATYPE(0x2E, 0x0) }, { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1) }, { SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2) }, /* Unified Memory Controller MCA type */ { SMCA_UMC, HWID_MCATYPE(0x96, 0x0) }, { SMCA_UMC_V2, HWID_MCATYPE(0x96, 0x1) }, /* Parameter Block MCA type */ { SMCA_PB, HWID_MCATYPE(0x05, 0x0) }, /* Platform Security Processor MCA type */ { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0) }, { SMCA_PSP_V2, HWID_MCATYPE(0xFF, 0x1) }, /* System Management Unit MCA type */ { SMCA_SMU, HWID_MCATYPE(0x01, 0x0) }, { SMCA_SMU_V2, HWID_MCATYPE(0x01, 0x1) }, /* Microprocessor 5 Unit MCA type */ { SMCA_MP5, HWID_MCATYPE(0x01, 0x2) }, /* MPDMA MCA type */ { SMCA_MPDMA, HWID_MCATYPE(0x01, 0x3) }, /* Northbridge IO Unit MCA type */ { SMCA_NBIO, HWID_MCATYPE(0x18, 0x0) }, /* PCI Express Unit MCA type */ { SMCA_PCIE, HWID_MCATYPE(0x46, 0x0) }, { SMCA_PCIE_V2, HWID_MCATYPE(0x46, 0x1) }, { SMCA_XGMI_PCS, HWID_MCATYPE(0x50, 0x0) }, { SMCA_NBIF, HWID_MCATYPE(0x6C, 0x0) }, { SMCA_SHUB, HWID_MCATYPE(0x80, 0x0) }, { SMCA_SATA, HWID_MCATYPE(0xA8, 0x0) }, { SMCA_USB, HWID_MCATYPE(0xAA, 0x0) }, { SMCA_GMI_PCS, HWID_MCATYPE(0x241, 0x0) }, { SMCA_XGMI_PHY, HWID_MCATYPE(0x259, 0x0) }, { SMCA_WAFL_PHY, HWID_MCATYPE(0x267, 0x0) }, { SMCA_GMI_PHY, HWID_MCATYPE(0x269, 0x0) }, }; /* * In SMCA enabled processors, we can have multiple banks for a given IP type. * So to define a unique name for each bank, we use a temp c-string to append * the MCA_IPID[InstanceId] to type's name in get_name(). * * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN * is greater than 8 plus 1 (for underscore) plus length of longest type name. */ #define MAX_MCATYPE_NAME_LEN 30 static char buf_mcatype[MAX_MCATYPE_NAME_LEN]; static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks); /* * A list of the banks enabled on each logical CPU. Controls which respective * descriptors to initialize later in mce_threshold_create_device(). */ static DEFINE_PER_CPU(u64, bank_map); /* Map of banks that have more than MCA_MISC0 available. */ static DEFINE_PER_CPU(u64, smca_misc_banks_map); static void amd_threshold_interrupt(void); static void amd_deferred_error_interrupt(void); static void default_deferred_error_interrupt(void) { pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR); } void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt; static void smca_set_misc_banks_map(unsigned int bank, unsigned int cpu) { u32 low, high; /* * For SMCA enabled processors, BLKPTR field of the first MISC register * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). */ if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) return; if (!(low & MCI_CONFIG_MCAX)) return; if (rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high)) return; if (low & MASK_BLKPTR_LO) per_cpu(smca_misc_banks_map, cpu) |= BIT_ULL(bank); } static void smca_configure(unsigned int bank, unsigned int cpu) { u8 *bank_counts = this_cpu_ptr(smca_bank_counts); const struct smca_hwid *s_hwid; unsigned int i, hwid_mcatype; u32 high, low; u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank); /* Set appropriate bits in MCA_CONFIG */ if (!rdmsr_safe(smca_config, &low, &high)) { /* * OS is required to set the MCAX bit to acknowledge that it is * now using the new MSR ranges and new registers under each * bank. It also means that the OS will configure deferred * errors in the new MCx_CONFIG register. If the bit is not set, * uncorrectable errors will cause a system panic. * * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.) */ high |= BIT(0); /* * SMCA sets the Deferred Error Interrupt type per bank. * * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us * if the DeferredIntType bit field is available. * * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the * high portion of the MSR). OS should set this to 0x1 to enable * APIC based interrupt. First, check that no interrupt has been * set. */ if ((low & BIT(5)) && !((high >> 5) & 0x3)) high |= BIT(5); this_cpu_ptr(mce_banks_array)[bank].lsb_in_status = !!(low & BIT(8)); wrmsr(smca_config, low, high); } smca_set_misc_banks_map(bank, cpu); if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) { pr_warn("Failed to read MCA_IPID for bank %d\n", bank); return; } hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID, (high & MCI_IPID_MCATYPE) >> 16); for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) { s_hwid = &smca_hwid_mcatypes[i]; if (hwid_mcatype == s_hwid->hwid_mcatype) { this_cpu_ptr(smca_banks)[bank].hwid = s_hwid; this_cpu_ptr(smca_banks)[bank].id = low; this_cpu_ptr(smca_banks)[bank].sysfs_id = bank_counts[s_hwid->bank_type]++; break; } } } struct thresh_restart { struct threshold_block *b; int reset; int set_lvt_off; int lvt_off; u16 old_limit; }; static inline bool is_shared_bank(int bank) { /* * Scalable MCA provides for only one core to have access to the MSRs of * a shared bank. */ if (mce_flags.smca) return false; /* Bank 4 is for northbridge reporting and is thus shared */ return (bank == 4); } static const char *bank4_names(const struct threshold_block *b) { switch (b->address) { /* MSR4_MISC0 */ case 0x00000413: return "dram"; case 0xc0000408: return "ht_links"; case 0xc0000409: return "l3_cache"; default: WARN(1, "Funny MSR: 0x%08x\n", b->address); return ""; } }; static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits) { /* * bank 4 supports APIC LVT interrupts implicitly since forever. */ if (bank == 4) return true; /* * IntP: interrupt present; if this bit is set, the thresholding * bank can generate APIC LVT interrupts */ return msr_high_bits & BIT(28); } static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi) { int msr = (hi & MASK_LVTOFF_HI) >> 20; if (apic < 0) { pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt " "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu, b->bank, b->block, b->address, hi, lo); return 0; } if (apic != msr) { /* * On SMCA CPUs, LVT offset is programmed at a different MSR, and * the BIOS provides the value. The original field where LVT offset * was set is reserved. Return early here: */ if (mce_flags.smca) return 0; pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d " "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu, apic, b->bank, b->block, b->address, hi, lo); return 0; } return 1; }; /* Reprogram MCx_MISC MSR behind this threshold bank. */ static void threshold_restart_bank(void *_tr) { struct thresh_restart *tr = _tr; u32 hi, lo; /* sysfs write might race against an offline operation */ if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off) return; rdmsr(tr->b->address, lo, hi); if (tr->b->threshold_limit < (hi & THRESHOLD_MAX)) tr->reset = 1; /* limit cannot be lower than err count */ if (tr->reset) { /* reset err count and overflow bit */ hi = (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | (THRESHOLD_MAX - tr->b->threshold_limit); } else if (tr->old_limit) { /* change limit w/o reset */ int new_count = (hi & THRESHOLD_MAX) + (tr->old_limit - tr->b->threshold_limit); hi = (hi & ~MASK_ERR_COUNT_HI) | (new_count & THRESHOLD_MAX); } /* clear IntType */ hi &= ~MASK_INT_TYPE_HI; if (!tr->b->interrupt_capable) goto done; if (tr->set_lvt_off) { if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) { /* set new lvt offset */ hi &= ~MASK_LVTOFF_HI; hi |= tr->lvt_off << 20; } } if (tr->b->interrupt_enable) hi |= INT_TYPE_APIC; done: hi |= MASK_COUNT_EN_HI; wrmsr(tr->b->address, lo, hi); } static void mce_threshold_block_init(struct threshold_block *b, int offset) { struct thresh_restart tr = { .b = b, .set_lvt_off = 1, .lvt_off = offset, }; b->threshold_limit = THRESHOLD_MAX; threshold_restart_bank(&tr); }; static int setup_APIC_mce_threshold(int reserved, int new) { if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR, APIC_EILVT_MSG_FIX, 0)) return new; return reserved; } static int setup_APIC_deferred_error(int reserved, int new) { if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR, APIC_EILVT_MSG_FIX, 0)) return new; return reserved; } static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) { u32 low = 0, high = 0; int def_offset = -1, def_new; if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high)) return; def_new = (low & MASK_DEF_LVTOFF) >> 4; if (!(low & MASK_DEF_LVTOFF)) { pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n"); def_new = DEF_LVT_OFF; low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4); } def_offset = setup_APIC_deferred_error(def_offset, def_new); if ((def_offset == def_new) && (deferred_error_int_vector != amd_deferred_error_interrupt)) deferred_error_int_vector = amd_deferred_error_interrupt; if (!mce_flags.smca) low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC; wrmsr(MSR_CU_DEF_ERR, low, high); } static u32 smca_get_block_address(unsigned int bank, unsigned int block, unsigned int cpu) { if (!block) return MSR_AMD64_SMCA_MCx_MISC(bank); if (!(per_cpu(smca_misc_banks_map, cpu) & BIT_ULL(bank))) return 0; return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); } static u32 get_block_address(u32 current_addr, u32 low, u32 high, unsigned int bank, unsigned int block, unsigned int cpu) { u32 addr = 0, offset = 0; if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS)) return addr; if (mce_flags.smca) return smca_get_block_address(bank, block, cpu); /* Fall back to method we used for older processors: */ switch (block) { case 0: addr = mca_msr_reg(bank, MCA_MISC); break; case 1: offset = ((low & MASK_BLKPTR_LO) >> 21); if (offset) addr = MCG_XBLK_ADDR + offset; break; default: addr = ++current_addr; } return addr; } static int prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, int offset, u32 misc_high) { unsigned int cpu = smp_processor_id(); u32 smca_low, smca_high; struct threshold_block b; int new; if (!block) per_cpu(bank_map, cpu) |= BIT_ULL(bank); memset(&b, 0, sizeof(b)); b.cpu = cpu; b.bank = bank; b.block = block; b.address = addr; b.interrupt_capable = lvt_interrupt_supported(bank, misc_high); if (!b.interrupt_capable) goto done; b.interrupt_enable = 1; if (!mce_flags.smca) { new = (misc_high & MASK_LVTOFF_HI) >> 20; goto set_offset; } /* Gather LVT offset for thresholding: */ if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high)) goto out; new = (smca_low & SMCA_THR_LVT_OFF) >> 12; set_offset: offset = setup_APIC_mce_threshold(offset, new); if (offset == new) thresholding_irq_en = true; done: mce_threshold_block_init(&b, offset); out: return offset; } bool amd_filter_mce(struct mce *m) { enum smca_bank_types bank_type = smca_get_bank_type(m->extcpu, m->bank); struct cpuinfo_x86 *c = &boot_cpu_data; /* See Family 17h Models 10h-2Fh Erratum #1114. */ if (c->x86 == 0x17 && c->x86_model >= 0x10 && c->x86_model <= 0x2F && bank_type == SMCA_IF && XEC(m->status, 0x3f) == 10) return true; /* NB GART TLB error reporting is disabled by default. */ if (c->x86 < 0x17) { if (m->bank == 4 && XEC(m->status, 0x1f) == 0x5) return true; } return false; } /* * Turn off thresholding banks for the following conditions: * - MC4_MISC thresholding is not supported on Family 0x15. * - Prevent possible spurious interrupts from the IF bank on Family 0x17 * Models 0x10-0x2F due to Erratum #1114. */ static void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank) { int i, num_msrs; u64 hwcr; bool need_toggle; u32 msrs[NR_BLOCKS]; if (c->x86 == 0x15 && bank == 4) { msrs[0] = 0x00000413; /* MC4_MISC0 */ msrs[1] = 0xc0000408; /* MC4_MISC1 */ num_msrs = 2; } else if (c->x86 == 0x17 && (c->x86_model >= 0x10 && c->x86_model <= 0x2F)) { if (smca_get_bank_type(smp_processor_id(), bank) != SMCA_IF) return; msrs[0] = MSR_AMD64_SMCA_MCx_MISC(bank); num_msrs = 1; } else { return; } rdmsrl(MSR_K7_HWCR, hwcr); /* McStatusWrEn has to be set */ need_toggle = !(hwcr & BIT(18)); if (need_toggle) wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); /* Clear CntP bit safely */ for (i = 0; i < num_msrs; i++) msr_clear_bit(msrs[i], 62); /* restore old settings */ if (need_toggle) wrmsrl(MSR_K7_HWCR, hwcr); } /* cpu init entry point, called from mce.c with preempt off */ void mce_amd_feature_init(struct cpuinfo_x86 *c) { unsigned int bank, block, cpu = smp_processor_id(); u32 low = 0, high = 0, address = 0; int offset = -1; for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { if (mce_flags.smca) smca_configure(bank, cpu); disable_err_thresholding(c, bank); for (block = 0; block < NR_BLOCKS; ++block) { address = get_block_address(address, low, high, bank, block, cpu); if (!address) break; if (rdmsr_safe(address, &low, &high)) break; if (!(high & MASK_VALID_HI)) continue; if (!(high & MASK_CNTP_HI) || (high & MASK_LOCKED_HI)) continue; offset = prepare_threshold_block(bank, block, address, offset, high); } } if (mce_flags.succor) deferred_error_interrupt_enable(c); } bool amd_mce_is_memory_error(struct mce *m) { enum smca_bank_types bank_type; /* ErrCodeExt[20:16] */ u8 xec = (m->status >> 16) & 0x1f; bank_type = smca_get_bank_type(m->extcpu, m->bank); if (mce_flags.smca) return (bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) && xec == 0x0; return m->bank == 4 && xec == 0x8; } static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) { struct mce m; mce_setup(&m); m.status = status; m.misc = misc; m.bank = bank; m.tsc = rdtsc(); if (m.status & MCI_STATUS_ADDRV) { m.addr = addr; smca_extract_err_addr(&m); } if (mce_flags.smca) { rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid); if (m.status & MCI_STATUS_SYNDV) rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd); } mce_log(&m); } DEFINE_IDTENTRY_SYSVEC(sysvec_deferred_error) { trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); inc_irq_stat(irq_deferred_error_count); deferred_error_int_vector(); trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR); apic_eoi(); } /* * Returns true if the logged error is deferred. False, otherwise. */ static inline bool _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) { u64 status, addr = 0; rdmsrl(msr_stat, status); if (!(status & MCI_STATUS_VAL)) return false; if (status & MCI_STATUS_ADDRV) rdmsrl(msr_addr, addr); __log_error(bank, status, addr, misc); wrmsrl(msr_stat, 0); return status & MCI_STATUS_DEFERRED; } static bool _log_error_deferred(unsigned int bank, u32 misc) { if (!_log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), mca_msr_reg(bank, MCA_ADDR), misc)) return false; /* * Non-SMCA systems don't have MCA_DESTAT/MCA_DEADDR registers. * Return true here to avoid accessing these registers. */ if (!mce_flags.smca) return true; /* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */ wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); return true; } /* * We have three scenarios for checking for Deferred errors: * * 1) Non-SMCA systems check MCA_STATUS and log error if found. * 2) SMCA systems check MCA_STATUS. If error is found then log it and also * clear MCA_DESTAT. * 3) SMCA systems check MCA_DESTAT, if error was not found in MCA_STATUS, and * log it. */ static void log_error_deferred(unsigned int bank) { if (_log_error_deferred(bank, 0)) return; /* * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check * for a valid error. */ _log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank), MSR_AMD64_SMCA_MCx_DEADDR(bank), 0); } /* APIC interrupt handler for deferred errors */ static void amd_deferred_error_interrupt(void) { unsigned int bank; for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) log_error_deferred(bank); } static void log_error_thresholding(unsigned int bank, u64 misc) { _log_error_deferred(bank, misc); } static void log_and_reset_block(struct threshold_block *block) { struct thresh_restart tr; u32 low = 0, high = 0; if (!block) return; if (rdmsr_safe(block->address, &low, &high)) return; if (!(high & MASK_OVERFLOW_HI)) return; /* Log the MCE which caused the threshold event. */ log_error_thresholding(block->bank, ((u64)high << 32) | low); /* Reset threshold block after logging error. */ memset(&tr, 0, sizeof(tr)); tr.b = block; threshold_restart_bank(&tr); } /* * Threshold interrupt handler will service THRESHOLD_APIC_VECTOR. The interrupt * goes off when error_count reaches threshold_limit. */ static void amd_threshold_interrupt(void) { struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL; struct threshold_bank **bp = this_cpu_read(threshold_banks); unsigned int bank, cpu = smp_processor_id(); /* * Validate that the threshold bank has been initialized already. The * handler is installed at boot time, but on a hotplug event the * interrupt might fire before the data has been initialized. */ if (!bp) return; for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank))) continue; first_block = bp[bank]->blocks; if (!first_block) continue; /* * The first block is also the head of the list. Check it first * before iterating over the rest. */ log_and_reset_block(first_block); list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj) log_and_reset_block(block); } } /* * Sysfs Interface */ struct threshold_attr { struct attribute attr; ssize_t (*show) (struct threshold_block *, char *); ssize_t (*store) (struct threshold_block *, const char *, size_t count); }; #define SHOW_FIELDS(name) \ static ssize_t show_ ## name(struct threshold_block *b, char *buf) \ { \ return sprintf(buf, "%lu\n", (unsigned long) b->name); \ } SHOW_FIELDS(interrupt_enable) SHOW_FIELDS(threshold_limit) static ssize_t store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size) { struct thresh_restart tr; unsigned long new; if (!b->interrupt_capable) return -EINVAL; if (kstrtoul(buf, 0, &new) < 0) return -EINVAL; b->interrupt_enable = !!new; memset(&tr, 0, sizeof(tr)); tr.b = b; if (smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1)) return -ENODEV; return size; } static ssize_t store_threshold_limit(struct threshold_block *b, const char *buf, size_t size) { struct thresh_restart tr; unsigned long new; if (kstrtoul(buf, 0, &new) < 0) return -EINVAL; if (new > THRESHOLD_MAX) new = THRESHOLD_MAX; if (new < 1) new = 1; memset(&tr, 0, sizeof(tr)); tr.old_limit = b->threshold_limit; b->threshold_limit = new; tr.b = b; if (smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1)) return -ENODEV; return size; } static ssize_t show_error_count(struct threshold_block *b, char *buf) { u32 lo, hi; /* CPU might be offline by now */ if (rdmsr_on_cpu(b->cpu, b->address, &lo, &hi)) return -ENODEV; return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) - (THRESHOLD_MAX - b->threshold_limit))); } static struct threshold_attr error_count = { .attr = {.name = __stringify(error_count), .mode = 0444 }, .show = show_error_count, }; #define RW_ATTR(val) \ static struct threshold_attr val = { \ .attr = {.name = __stringify(val), .mode = 0644 }, \ .show = show_## val, \ .store = store_## val, \ }; RW_ATTR(interrupt_enable); RW_ATTR(threshold_limit); static struct attribute *default_attrs[] = { &threshold_limit.attr, &error_count.attr, NULL, /* possibly interrupt_enable if supported, see below */ NULL, }; ATTRIBUTE_GROUPS(default); #define to_block(k) container_of(k, struct threshold_block, kobj) #define to_attr(a) container_of(a, struct threshold_attr, attr) static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { struct threshold_block *b = to_block(kobj); struct threshold_attr *a = to_attr(attr); ssize_t ret; ret = a->show ? a->show(b, buf) : -EIO; return ret; } static ssize_t store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct threshold_block *b = to_block(kobj); struct threshold_attr *a = to_attr(attr); ssize_t ret; ret = a->store ? a->store(b, buf, count) : -EIO; return ret; } static const struct sysfs_ops threshold_ops = { .show = show, .store = store, }; static void threshold_block_release(struct kobject *kobj); static const struct kobj_type threshold_ktype = { .sysfs_ops = &threshold_ops, .default_groups = default_groups, .release = threshold_block_release, }; static const char *get_name(unsigned int cpu, unsigned int bank, struct threshold_block *b) { enum smca_bank_types bank_type; if (!mce_flags.smca) { if (b && bank == 4) return bank4_names(b); return th_names[bank]; } bank_type = smca_get_bank_type(cpu, bank); if (bank_type >= N_SMCA_BANK_TYPES) return NULL; if (b && (bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2)) { if (b->block < ARRAY_SIZE(smca_umc_block_names)) return smca_umc_block_names[b->block]; return NULL; } if (per_cpu(smca_bank_counts, cpu)[bank_type] == 1) return smca_get_name(bank_type); snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, "%s_%u", smca_get_name(bank_type), per_cpu(smca_banks, cpu)[bank].sysfs_id); return buf_mcatype; } static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb, unsigned int bank, unsigned int block, u32 address) { struct threshold_block *b = NULL; u32 low, high; int err; if ((bank >= this_cpu_read(mce_num_banks)) || (block >= NR_BLOCKS)) return 0; if (rdmsr_safe(address, &low, &high)) return 0; if (!(high & MASK_VALID_HI)) { if (block) goto recurse; else return 0; } if (!(high & MASK_CNTP_HI) || (high & MASK_LOCKED_HI)) goto recurse; b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL); if (!b) return -ENOMEM; b->block = block; b->bank = bank; b->cpu = cpu; b->address = address; b->interrupt_enable = 0; b->interrupt_capable = lvt_interrupt_supported(bank, high); b->threshold_limit = THRESHOLD_MAX; if (b->interrupt_capable) { default_attrs[2] = &interrupt_enable.attr; b->interrupt_enable = 1; } else { default_attrs[2] = NULL; } INIT_LIST_HEAD(&b->miscj); /* This is safe as @tb is not visible yet */ if (tb->blocks) list_add(&b->miscj, &tb->blocks->miscj); else tb->blocks = b; err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(cpu, bank, b)); if (err) goto out_free; recurse: address = get_block_address(address, low, high, bank, ++block, cpu); if (!address) return 0; err = allocate_threshold_blocks(cpu, tb, bank, block, address); if (err) goto out_free; if (b) kobject_uevent(&b->kobj, KOBJ_ADD); return 0; out_free: if (b) { list_del(&b->miscj); kobject_put(&b->kobj); } return err; } static int __threshold_add_blocks(struct threshold_bank *b) { struct list_head *head = &b->blocks->miscj; struct threshold_block *pos = NULL; struct threshold_block *tmp = NULL; int err = 0; err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name); if (err) return err; list_for_each_entry_safe(pos, tmp, head, miscj) { err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name); if (err) { list_for_each_entry_safe_reverse(pos, tmp, head, miscj) kobject_del(&pos->kobj); return err; } } return err; } static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu, unsigned int bank) { struct device *dev = this_cpu_read(mce_device); struct amd_northbridge *nb = NULL; struct threshold_bank *b = NULL; const char *name = get_name(cpu, bank, NULL); int err = 0; if (!dev) return -ENODEV; if (is_shared_bank(bank)) { nb = node_to_amd_nb(topology_die_id(cpu)); /* threshold descriptor already initialized on this node? */ if (nb && nb->bank4) { /* yes, use it */ b = nb->bank4; err = kobject_add(b->kobj, &dev->kobj, name); if (err) goto out; bp[bank] = b; refcount_inc(&b->cpus); err = __threshold_add_blocks(b); goto out; } } b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); if (!b) { err = -ENOMEM; goto out; } /* Associate the bank with the per-CPU MCE device */ b->kobj = kobject_create_and_add(name, &dev->kobj); if (!b->kobj) { err = -EINVAL; goto out_free; } if (is_shared_bank(bank)) { b->shared = 1; refcount_set(&b->cpus, 1); /* nb is already initialized, see above */ if (nb) { WARN_ON(nb->bank4); nb->bank4 = b; } } err = allocate_threshold_blocks(cpu, b, bank, 0, mca_msr_reg(bank, MCA_MISC)); if (err) goto out_kobj; bp[bank] = b; return 0; out_kobj: kobject_put(b->kobj); out_free: kfree(b); out: return err; } static void threshold_block_release(struct kobject *kobj) { kfree(to_block(kobj)); } static void deallocate_threshold_blocks(struct threshold_bank *bank) { struct threshold_block *pos, *tmp; list_for_each_entry_safe(pos, tmp, &bank->blocks->miscj, miscj) { list_del(&pos->miscj); kobject_put(&pos->kobj); } kobject_put(&bank->blocks->kobj); } static void __threshold_remove_blocks(struct threshold_bank *b) { struct threshold_block *pos = NULL; struct threshold_block *tmp = NULL; kobject_put(b->kobj); list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj) kobject_put(b->kobj); } static void threshold_remove_bank(struct threshold_bank *bank) { struct amd_northbridge *nb; if (!bank->blocks) goto out_free; if (!bank->shared) goto out_dealloc; if (!refcount_dec_and_test(&bank->cpus)) { __threshold_remove_blocks(bank); return; } else { /* * The last CPU on this node using the shared bank is going * away, remove that bank now. */ nb = node_to_amd_nb(topology_die_id(smp_processor_id())); nb->bank4 = NULL; } out_dealloc: deallocate_threshold_blocks(bank); out_free: kobject_put(bank->kobj); kfree(bank); } static void __threshold_remove_device(struct threshold_bank **bp) { unsigned int bank, numbanks = this_cpu_read(mce_num_banks); for (bank = 0; bank < numbanks; bank++) { if (!bp[bank]) continue; threshold_remove_bank(bp[bank]); bp[bank] = NULL; } kfree(bp); } int mce_threshold_remove_device(unsigned int cpu) { struct threshold_bank **bp = this_cpu_read(threshold_banks); if (!bp) return 0; /* * Clear the pointer before cleaning up, so that the interrupt won't * touch anything of this. */ this_cpu_write(threshold_banks, NULL); __threshold_remove_device(bp); return 0; } /** * mce_threshold_create_device - Create the per-CPU MCE threshold device * @cpu: The plugged in CPU * * Create directories and files for all valid threshold banks. * * This is invoked from the CPU hotplug callback which was installed in * mcheck_init_device(). The invocation happens in context of the hotplug * thread running on @cpu. The callback is invoked on all CPUs which are * online when the callback is installed or during a real hotplug event. */ int mce_threshold_create_device(unsigned int cpu) { unsigned int numbanks, bank; struct threshold_bank **bp; int err; if (!mce_flags.amd_threshold) return 0; bp = this_cpu_read(threshold_banks); if (bp) return 0; numbanks = this_cpu_read(mce_num_banks); bp = kcalloc(numbanks, sizeof(*bp), GFP_KERNEL); if (!bp) return -ENOMEM; for (bank = 0; bank < numbanks; ++bank) { if (!(this_cpu_read(bank_map) & BIT_ULL(bank))) continue; err = threshold_create_bank(bp, cpu, bank); if (err) { __threshold_remove_device(bp); return err; } } this_cpu_write(threshold_banks, bp); if (thresholding_irq_en) mce_threshold_vector = amd_threshold_interrupt; return 0; }
linux-master
arch/x86/kernel/cpu/mce/amd.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bridge between MCE and APEI * * On some machine, corrected memory errors are reported via APEI * generic hardware error source (GHES) instead of corrected Machine * Check. These corrected memory errors can be reported to user space * through /dev/mcelog via faking a corrected Machine Check, so that * the error memory page can be offlined by /sbin/mcelog if the error * count for one page is beyond the threshold. * * For fatal MCE, save MCE record into persistent storage via ERST, so * that the MCE record can be logged after reboot via ERST. * * Copyright 2010 Intel Corp. * Author: Huang Ying <[email protected]> */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/acpi.h> #include <linux/cper.h> #include <acpi/apei.h> #include <acpi/ghes.h> #include <asm/mce.h> #include "internal.h" void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) { struct mce m; int lsb; if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) return; /* * Even if the ->validation_bits are set for address mask, * to be extra safe, check and reject an error radius '0', * and fall back to the default page size. */ if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK) lsb = find_first_bit((void *)&mem_err->physical_addr_mask, PAGE_SHIFT); else lsb = PAGE_SHIFT; mce_setup(&m); m.bank = -1; /* Fake a memory read error with unknown channel */ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | 0x9f; m.misc = (MCI_MISC_ADDR_PHYS << 6) | lsb; if (severity >= GHES_SEV_RECOVERABLE) m.status |= MCI_STATUS_UC; if (severity >= GHES_SEV_PANIC) { m.status |= MCI_STATUS_PCC; m.tsc = rdtsc(); } m.addr = mem_err->physical_addr; mce_log(&m); } EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id) { const u64 *i_mce = ((const u64 *) (ctx_info + 1)); unsigned int cpu; struct mce m; if (!boot_cpu_has(X86_FEATURE_SMCA)) return -EINVAL; /* * The starting address of the register array extracted from BERT must * match with the first expected register in the register layout of * SMCA address space. This address corresponds to banks's MCA_STATUS * register. * * Match any MCi_STATUS register by turning off bank numbers. */ if ((ctx_info->msr_addr & MSR_AMD64_SMCA_MC0_STATUS) != MSR_AMD64_SMCA_MC0_STATUS) return -EINVAL; /* * The register array size must be large enough to include all the * SMCA registers which need to be extracted. * * The number of registers in the register array is determined by * Register Array Size/8 as defined in UEFI spec v2.8, sec N.2.4.2.2. * The register layout is fixed and currently the raw data in the * register array includes 6 SMCA registers which the kernel can * extract. */ if (ctx_info->reg_arr_size < 48) return -EINVAL; mce_setup(&m); m.extcpu = -1; m.socketid = -1; for_each_possible_cpu(cpu) { if (cpu_data(cpu).initial_apicid == lapic_id) { m.extcpu = cpu; m.socketid = cpu_data(m.extcpu).phys_proc_id; break; } } m.apicid = lapic_id; m.bank = (ctx_info->msr_addr >> 4) & 0xFF; m.status = *i_mce; m.addr = *(i_mce + 1); m.misc = *(i_mce + 2); /* Skipping MCA_CONFIG */ m.ipid = *(i_mce + 4); m.synd = *(i_mce + 5); mce_log(&m); return 0; } #define CPER_CREATOR_MCE \ GUID_INIT(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ 0x64, 0x90, 0xb8, 0x9d) #define CPER_SECTION_TYPE_MCE \ GUID_INIT(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ 0x04, 0x4a, 0x38, 0xfc) /* * CPER specification (in UEFI specification 2.3 appendix N) requires * byte-packed. */ struct cper_mce_record { struct cper_record_header hdr; struct cper_section_descriptor sec_hdr; struct mce mce; } __packed; int apei_write_mce(struct mce *m) { struct cper_mce_record rcd; memset(&rcd, 0, sizeof(rcd)); memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE); rcd.hdr.revision = CPER_RECORD_REV; rcd.hdr.signature_end = CPER_SIG_END; rcd.hdr.section_count = 1; rcd.hdr.error_severity = CPER_SEV_FATAL; /* timestamp, platform_id, partition_id are all invalid */ rcd.hdr.validation_bits = 0; rcd.hdr.record_length = sizeof(rcd); rcd.hdr.creator_id = CPER_CREATOR_MCE; rcd.hdr.notification_type = CPER_NOTIFY_MCE; rcd.hdr.record_id = cper_next_record_id(); rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR; rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd; rcd.sec_hdr.section_length = sizeof(rcd.mce); rcd.sec_hdr.revision = CPER_SEC_REV; /* fru_id and fru_text is invalid */ rcd.sec_hdr.validation_bits = 0; rcd.sec_hdr.flags = CPER_SEC_PRIMARY; rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE; rcd.sec_hdr.section_severity = CPER_SEV_FATAL; memcpy(&rcd.mce, m, sizeof(*m)); return erst_write(&rcd.hdr); } ssize_t apei_read_mce(struct mce *m, u64 *record_id) { struct cper_mce_record rcd; int rc, pos; rc = erst_get_record_id_begin(&pos); if (rc) return rc; retry: rc = erst_get_record_id_next(&pos, record_id); if (rc) goto out; /* no more record */ if (*record_id == APEI_ERST_INVALID_RECORD_ID) goto out; rc = erst_read_record(*record_id, &rcd.hdr, sizeof(rcd), sizeof(rcd), &CPER_CREATOR_MCE); /* someone else has cleared the record, try next one */ if (rc == -ENOENT) goto retry; else if (rc < 0) goto out; memcpy(m, &rcd.mce, sizeof(*m)); rc = sizeof(*m); out: erst_get_record_id_end(); return rc; } /* Check whether there is record in ERST */ int apei_check_mce(void) { return erst_get_record_count(); } int apei_clear_mce(u64 record_id) { return erst_clear(record_id); }
linux-master
arch/x86/kernel/cpu/mce/apei.c
// SPDX-License-Identifier: GPL-2.0-only /* * Machine check injection support. * Copyright 2008 Intel Corporation. * * Authors: * Andi Kleen * Ying Huang * * The AMD part (from mce_amd_inj.c): a simple MCE injection facility * for testing different aspects of the RAS code. This driver should be * built as module so that it can be loaded on production kernels for * testing purposes. * * Copyright (c) 2010-17: Borislav Petkov <[email protected]> * Advanced Micro Devices Inc. */ #include <linux/cpu.h> #include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/pci.h> #include <linux/uaccess.h> #include <asm/amd_nb.h> #include <asm/apic.h> #include <asm/irq_vectors.h> #include <asm/mce.h> #include <asm/nmi.h> #include <asm/smp.h> #include "internal.h" static bool hw_injection_possible = true; /* * Collect all the MCi_XXX settings */ static struct mce i_mce; static struct dentry *dfs_inj; #define MAX_FLAG_OPT_SIZE 4 #define NBCFG 0x44 enum injection_type { SW_INJ = 0, /* SW injection, simply decode the error */ HW_INJ, /* Trigger a #MC */ DFR_INT_INJ, /* Trigger Deferred error interrupt */ THR_INT_INJ, /* Trigger threshold interrupt */ N_INJ_TYPES, }; static const char * const flags_options[] = { [SW_INJ] = "sw", [HW_INJ] = "hw", [DFR_INT_INJ] = "df", [THR_INT_INJ] = "th", NULL }; /* Set default injection to SW_INJ */ static enum injection_type inj_type = SW_INJ; #define MCE_INJECT_SET(reg) \ static int inj_##reg##_set(void *data, u64 val) \ { \ struct mce *m = (struct mce *)data; \ \ m->reg = val; \ return 0; \ } MCE_INJECT_SET(status); MCE_INJECT_SET(misc); MCE_INJECT_SET(addr); MCE_INJECT_SET(synd); #define MCE_INJECT_GET(reg) \ static int inj_##reg##_get(void *data, u64 *val) \ { \ struct mce *m = (struct mce *)data; \ \ *val = m->reg; \ return 0; \ } MCE_INJECT_GET(status); MCE_INJECT_GET(misc); MCE_INJECT_GET(addr); MCE_INJECT_GET(synd); MCE_INJECT_GET(ipid); DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n"); DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n"); DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n"); DEFINE_SIMPLE_ATTRIBUTE(synd_fops, inj_synd_get, inj_synd_set, "%llx\n"); /* Use the user provided IPID value on a sw injection. */ static int inj_ipid_set(void *data, u64 val) { struct mce *m = (struct mce *)data; if (cpu_feature_enabled(X86_FEATURE_SMCA)) { if (inj_type == SW_INJ) m->ipid = val; } return 0; } DEFINE_SIMPLE_ATTRIBUTE(ipid_fops, inj_ipid_get, inj_ipid_set, "%llx\n"); static void setup_inj_struct(struct mce *m) { memset(m, 0, sizeof(struct mce)); m->cpuvendor = boot_cpu_data.x86_vendor; m->time = ktime_get_real_seconds(); m->cpuid = cpuid_eax(1); m->microcode = boot_cpu_data.microcode; } /* Update fake mce registers on current CPU. */ static void inject_mce(struct mce *m) { struct mce *i = &per_cpu(injectm, m->extcpu); /* Make sure no one reads partially written injectm */ i->finished = 0; mb(); m->finished = 0; /* First set the fields after finished */ i->extcpu = m->extcpu; mb(); /* Now write record in order, finished last (except above) */ memcpy(i, m, sizeof(struct mce)); /* Finally activate it */ mb(); i->finished = 1; } static void raise_poll(struct mce *m) { unsigned long flags; mce_banks_t b; memset(&b, 0xff, sizeof(mce_banks_t)); local_irq_save(flags); machine_check_poll(0, &b); local_irq_restore(flags); m->finished = 0; } static void raise_exception(struct mce *m, struct pt_regs *pregs) { struct pt_regs regs; unsigned long flags; if (!pregs) { memset(&regs, 0, sizeof(struct pt_regs)); regs.ip = m->ip; regs.cs = m->cs; pregs = &regs; } /* do_machine_check() expects interrupts disabled -- at least */ local_irq_save(flags); do_machine_check(pregs); local_irq_restore(flags); m->finished = 0; } static cpumask_var_t mce_inject_cpumask; static DEFINE_MUTEX(mce_inject_mutex); static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) { int cpu = smp_processor_id(); struct mce *m = this_cpu_ptr(&injectm); if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) return NMI_DONE; cpumask_clear_cpu(cpu, mce_inject_cpumask); if (m->inject_flags & MCJ_EXCEPTION) raise_exception(m, regs); else if (m->status) raise_poll(m); return NMI_HANDLED; } static void mce_irq_ipi(void *info) { int cpu = smp_processor_id(); struct mce *m = this_cpu_ptr(&injectm); if (cpumask_test_cpu(cpu, mce_inject_cpumask) && m->inject_flags & MCJ_EXCEPTION) { cpumask_clear_cpu(cpu, mce_inject_cpumask); raise_exception(m, NULL); } } /* Inject mce on current CPU */ static int raise_local(void) { struct mce *m = this_cpu_ptr(&injectm); int context = MCJ_CTX(m->inject_flags); int ret = 0; int cpu = m->extcpu; if (m->inject_flags & MCJ_EXCEPTION) { pr_info("Triggering MCE exception on CPU %d\n", cpu); switch (context) { case MCJ_CTX_IRQ: /* * Could do more to fake interrupts like * calling irq_enter, but the necessary * machinery isn't exported currently. */ fallthrough; case MCJ_CTX_PROCESS: raise_exception(m, NULL); break; default: pr_info("Invalid MCE context\n"); ret = -EINVAL; } pr_info("MCE exception done on CPU %d\n", cpu); } else if (m->status) { pr_info("Starting machine check poll CPU %d\n", cpu); raise_poll(m); mce_notify_irq(); pr_info("Machine check poll done on CPU %d\n", cpu); } else m->finished = 0; return ret; } static void __maybe_unused raise_mce(struct mce *m) { int context = MCJ_CTX(m->inject_flags); inject_mce(m); if (context == MCJ_CTX_RANDOM) return; if (m->inject_flags & (MCJ_IRQ_BROADCAST | MCJ_NMI_BROADCAST)) { unsigned long start; int cpu; cpus_read_lock(); cpumask_copy(mce_inject_cpumask, cpu_online_mask); cpumask_clear_cpu(get_cpu(), mce_inject_cpumask); for_each_online_cpu(cpu) { struct mce *mcpu = &per_cpu(injectm, cpu); if (!mcpu->finished || MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM) cpumask_clear_cpu(cpu, mce_inject_cpumask); } if (!cpumask_empty(mce_inject_cpumask)) { if (m->inject_flags & MCJ_IRQ_BROADCAST) { /* * don't wait because mce_irq_ipi is necessary * to be sync with following raise_local */ preempt_disable(); smp_call_function_many(mce_inject_cpumask, mce_irq_ipi, NULL, 0); preempt_enable(); } else if (m->inject_flags & MCJ_NMI_BROADCAST) __apic_send_IPI_mask(mce_inject_cpumask, NMI_VECTOR); } start = jiffies; while (!cpumask_empty(mce_inject_cpumask)) { if (!time_before(jiffies, start + 2*HZ)) { pr_err("Timeout waiting for mce inject %lx\n", *cpumask_bits(mce_inject_cpumask)); break; } cpu_relax(); } raise_local(); put_cpu(); cpus_read_unlock(); } else { preempt_disable(); raise_local(); preempt_enable(); } } static int mce_inject_raise(struct notifier_block *nb, unsigned long val, void *data) { struct mce *m = (struct mce *)data; if (!m) return NOTIFY_DONE; mutex_lock(&mce_inject_mutex); raise_mce(m); mutex_unlock(&mce_inject_mutex); return NOTIFY_DONE; } static struct notifier_block inject_nb = { .notifier_call = mce_inject_raise, }; /* * Caller needs to be make sure this cpu doesn't disappear * from under us, i.e.: get_cpu/put_cpu. */ static int toggle_hw_mce_inject(unsigned int cpu, bool enable) { u32 l, h; int err; err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h); if (err) { pr_err("%s: error reading HWCR\n", __func__); return err; } enable ? (l |= BIT(18)) : (l &= ~BIT(18)); err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h); if (err) pr_err("%s: error writing HWCR\n", __func__); return err; } static int __set_inj(const char *buf) { int i; for (i = 0; i < N_INJ_TYPES; i++) { if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) { if (i > SW_INJ && !hw_injection_possible) continue; inj_type = i; return 0; } } return -EINVAL; } static ssize_t flags_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[MAX_FLAG_OPT_SIZE]; int n; n = sprintf(buf, "%s\n", flags_options[inj_type]); return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); } static ssize_t flags_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[MAX_FLAG_OPT_SIZE], *__buf; int err; if (!cnt || cnt > MAX_FLAG_OPT_SIZE) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt - 1] = 0; /* strip whitespace */ __buf = strstrip(buf); err = __set_inj(__buf); if (err) { pr_err("%s: Invalid flags value: %s\n", __func__, __buf); return err; } *ppos += cnt; return cnt; } static const struct file_operations flags_fops = { .read = flags_read, .write = flags_write, .llseek = generic_file_llseek, }; /* * On which CPU to inject? */ MCE_INJECT_GET(extcpu); static int inj_extcpu_set(void *data, u64 val) { struct mce *m = (struct mce *)data; if (val >= nr_cpu_ids || !cpu_online(val)) { pr_err("%s: Invalid CPU: %llu\n", __func__, val); return -EINVAL; } m->extcpu = val; return 0; } DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n"); static void trigger_mce(void *info) { asm volatile("int $18"); } static void trigger_dfr_int(void *info) { asm volatile("int %0" :: "i" (DEFERRED_ERROR_VECTOR)); } static void trigger_thr_int(void *info) { asm volatile("int %0" :: "i" (THRESHOLD_APIC_VECTOR)); } static u32 get_nbc_for_node(int node_id) { struct cpuinfo_x86 *c = &boot_cpu_data; u32 cores_per_node; cores_per_node = (c->x86_max_cores * smp_num_siblings) / amd_get_nodes_per_socket(); return cores_per_node * node_id; } static void toggle_nb_mca_mst_cpu(u16 nid) { struct amd_northbridge *nb; struct pci_dev *F3; u32 val; int err; nb = node_to_amd_nb(nid); if (!nb) return; F3 = nb->misc; if (!F3) return; err = pci_read_config_dword(F3, NBCFG, &val); if (err) { pr_err("%s: Error reading F%dx%03x.\n", __func__, PCI_FUNC(F3->devfn), NBCFG); return; } if (val & BIT(27)) return; pr_err("%s: Set D18F3x44[NbMcaToMstCpuEn] which BIOS hasn't done.\n", __func__); val |= BIT(27); err = pci_write_config_dword(F3, NBCFG, val); if (err) pr_err("%s: Error writing F%dx%03x.\n", __func__, PCI_FUNC(F3->devfn), NBCFG); } static void prepare_msrs(void *info) { struct mce m = *(struct mce *)info; u8 b = m.bank; wrmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); if (boot_cpu_has(X86_FEATURE_SMCA)) { if (m.inject_flags == DFR_INT_INJ) { wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status); wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr); } else { wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), m.status); wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr); } wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), m.misc); wrmsrl(MSR_AMD64_SMCA_MCx_SYND(b), m.synd); } else { wrmsrl(MSR_IA32_MCx_STATUS(b), m.status); wrmsrl(MSR_IA32_MCx_ADDR(b), m.addr); wrmsrl(MSR_IA32_MCx_MISC(b), m.misc); } } static void do_inject(void) { u64 mcg_status = 0; unsigned int cpu = i_mce.extcpu; u8 b = i_mce.bank; i_mce.tsc = rdtsc_ordered(); i_mce.status |= MCI_STATUS_VAL; if (i_mce.misc) i_mce.status |= MCI_STATUS_MISCV; if (i_mce.synd) i_mce.status |= MCI_STATUS_SYNDV; if (inj_type == SW_INJ) { mce_log(&i_mce); return; } /* prep MCE global settings for the injection */ mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV; if (!(i_mce.status & MCI_STATUS_PCC)) mcg_status |= MCG_STATUS_RIPV; /* * Ensure necessary status bits for deferred errors: * - MCx_STATUS[Deferred]: make sure it is a deferred error * - MCx_STATUS[UC] cleared: deferred errors are _not_ UC */ if (inj_type == DFR_INT_INJ) { i_mce.status |= MCI_STATUS_DEFERRED; i_mce.status &= ~MCI_STATUS_UC; } /* * For multi node CPUs, logging and reporting of bank 4 errors happens * only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for * Fam10h and later BKDGs. */ if (boot_cpu_has(X86_FEATURE_AMD_DCM) && b == 4 && boot_cpu_data.x86 < 0x17) { toggle_nb_mca_mst_cpu(topology_die_id(cpu)); cpu = get_nbc_for_node(topology_die_id(cpu)); } cpus_read_lock(); if (!cpu_online(cpu)) goto err; toggle_hw_mce_inject(cpu, true); i_mce.mcgstatus = mcg_status; i_mce.inject_flags = inj_type; smp_call_function_single(cpu, prepare_msrs, &i_mce, 0); toggle_hw_mce_inject(cpu, false); switch (inj_type) { case DFR_INT_INJ: smp_call_function_single(cpu, trigger_dfr_int, NULL, 0); break; case THR_INT_INJ: smp_call_function_single(cpu, trigger_thr_int, NULL, 0); break; default: smp_call_function_single(cpu, trigger_mce, NULL, 0); } err: cpus_read_unlock(); } /* * This denotes into which bank we're injecting and triggers * the injection, at the same time. */ static int inj_bank_set(void *data, u64 val) { struct mce *m = (struct mce *)data; u8 n_banks; u64 cap; /* Get bank count on target CPU so we can handle non-uniform values. */ rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap); n_banks = cap & MCG_BANKCNT_MASK; if (val >= n_banks) { pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu); return -EINVAL; } m->bank = val; /* * sw-only injection allows to write arbitrary values into the MCA * registers because it tests only the decoding paths. */ if (inj_type == SW_INJ) goto inject; /* * Read IPID value to determine if a bank is populated on the target * CPU. */ if (cpu_feature_enabled(X86_FEATURE_SMCA)) { u64 ipid; if (rdmsrl_on_cpu(m->extcpu, MSR_AMD64_SMCA_MCx_IPID(val), &ipid)) { pr_err("Error reading IPID on CPU%d\n", m->extcpu); return -EINVAL; } if (!ipid) { pr_err("Cannot inject into unpopulated bank %llu\n", val); return -ENODEV; } } inject: do_inject(); /* Reset injection struct */ setup_inj_struct(&i_mce); return 0; } MCE_INJECT_GET(bank); DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n"); static const char readme_msg[] = "Description of the files and their usages:\n" "\n" "Note1: i refers to the bank number below.\n" "Note2: See respective BKDGs for the exact bit definitions of the files below\n" "as they mirror the hardware registers.\n" "\n" "status:\t Set MCi_STATUS: the bits in that MSR control the error type and\n" "\t attributes of the error which caused the MCE.\n" "\n" "misc:\t Set MCi_MISC: provide auxiliary info about the error. It is mostly\n" "\t used for error thresholding purposes and its validity is indicated by\n" "\t MCi_STATUS[MiscV].\n" "\n" "synd:\t Set MCi_SYND: provide syndrome info about the error. Only valid on\n" "\t Scalable MCA systems, and its validity is indicated by MCi_STATUS[SyndV].\n" "\n" "addr:\t Error address value to be written to MCi_ADDR. Log address information\n" "\t associated with the error.\n" "\n" "cpu:\t The CPU to inject the error on.\n" "\n" "bank:\t Specify the bank you want to inject the error into: the number of\n" "\t banks in a processor varies and is family/model-specific, therefore, the\n" "\t supplied value is sanity-checked. Setting the bank value also triggers the\n" "\t injection.\n" "\n" "flags:\t Injection type to be performed. Writing to this file will trigger a\n" "\t real machine check, an APIC interrupt or invoke the error decoder routines\n" "\t for AMD processors.\n" "\n" "\t Allowed error injection types:\n" "\t - \"sw\": Software error injection. Decode error to a human-readable \n" "\t format only. Safe to use.\n" "\t - \"hw\": Hardware error injection. Causes the #MC exception handler to \n" "\t handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n" "\t is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n" "\t before injecting.\n" "\t - \"df\": Trigger APIC interrupt for Deferred error. Causes deferred \n" "\t error APIC interrupt handler to handle the error if the feature is \n" "\t is present in hardware. \n" "\t - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n" "\t APIC interrupt handler to handle the error. \n" "\n" "ipid:\t IPID (AMD-specific)\n" "\n"; static ssize_t inj_readme_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return simple_read_from_buffer(ubuf, cnt, ppos, readme_msg, strlen(readme_msg)); } static const struct file_operations readme_fops = { .read = inj_readme_read, }; static struct dfs_node { char *name; const struct file_operations *fops; umode_t perm; } dfs_fls[] = { { .name = "status", .fops = &status_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "misc", .fops = &misc_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "addr", .fops = &addr_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "synd", .fops = &synd_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "ipid", .fops = &ipid_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "bank", .fops = &bank_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "flags", .fops = &flags_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "cpu", .fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR }, { .name = "README", .fops = &readme_fops, .perm = S_IRUSR | S_IRGRP | S_IROTH }, }; static void __init debugfs_init(void) { unsigned int i; dfs_inj = debugfs_create_dir("mce-inject", NULL); for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) debugfs_create_file(dfs_fls[i].name, dfs_fls[i].perm, dfs_inj, &i_mce, dfs_fls[i].fops); } static void check_hw_inj_possible(void) { int cpu; u8 bank; /* * This behavior exists only on SMCA systems though its not directly * related to SMCA. */ if (!cpu_feature_enabled(X86_FEATURE_SMCA)) return; cpu = get_cpu(); for (bank = 0; bank < MAX_NR_BANKS; ++bank) { u64 status = MCI_STATUS_VAL, ipid; /* Check whether bank is populated */ rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), ipid); if (!ipid) continue; toggle_hw_mce_inject(cpu, true); wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status); rdmsrl_safe(mca_msr_reg(bank, MCA_STATUS), &status); if (!status) { hw_injection_possible = false; pr_warn("Platform does not allow *hardware* error injection." "Try using APEI EINJ instead.\n"); } toggle_hw_mce_inject(cpu, false); break; } put_cpu(); } static int __init inject_init(void) { if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL)) return -ENOMEM; check_hw_inj_possible(); debugfs_init(); register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, "mce_notify"); mce_register_injector_chain(&inject_nb); setup_inj_struct(&i_mce); pr_info("Machine check injector initialized\n"); return 0; } static void __exit inject_exit(void) { mce_unregister_injector_chain(&inject_nb); unregister_nmi_handler(NMI_LOCAL, "mce_notify"); debugfs_remove_recursive(dfs_inj); dfs_inj = NULL; memset(&dfs_fls, 0, sizeof(dfs_fls)); free_cpumask_var(mce_inject_cpumask); } module_init(inject_init); module_exit(inject_exit); MODULE_LICENSE("GPL");
linux-master
arch/x86/kernel/cpu/mce/inject.c
// SPDX-License-Identifier: GPL-2.0-only /* * Machine check handler. * * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. * Rest from unknown author(s). * 2004 Andi Kleen. Rewrote most of it. * Copyright 2008 Intel Corporation * Author: Andi Kleen */ #include <linux/thread_info.h> #include <linux/capability.h> #include <linux/miscdevice.h> #include <linux/ratelimit.h> #include <linux/rcupdate.h> #include <linux/kobject.h> #include <linux/uaccess.h> #include <linux/kdebug.h> #include <linux/kernel.h> #include <linux/percpu.h> #include <linux/string.h> #include <linux/device.h> #include <linux/syscore_ops.h> #include <linux/delay.h> #include <linux/ctype.h> #include <linux/sched.h> #include <linux/sysfs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/poll.h> #include <linux/nmi.h> #include <linux/cpu.h> #include <linux/ras.h> #include <linux/smp.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/debugfs.h> #include <linux/irq_work.h> #include <linux/export.h> #include <linux/set_memory.h> #include <linux/sync_core.h> #include <linux/task_work.h> #include <linux/hardirq.h> #include <asm/intel-family.h> #include <asm/processor.h> #include <asm/traps.h> #include <asm/tlbflush.h> #include <asm/mce.h> #include <asm/msr.h> #include <asm/reboot.h> #include "internal.h" /* sysfs synchronization */ static DEFINE_MUTEX(mce_sysfs_mutex); #define CREATE_TRACE_POINTS #include <trace/events/mce.h> #define SPINUNIT 100 /* 100ns */ DEFINE_PER_CPU(unsigned, mce_exception_count); DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks); DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array); #define ATTR_LEN 16 /* One object for each MCE bank, shared by all CPUs */ struct mce_bank_dev { struct device_attribute attr; /* device attribute */ char attrname[ATTR_LEN]; /* attribute name */ u8 bank; /* bank number */ }; static struct mce_bank_dev mce_bank_devs[MAX_NR_BANKS]; struct mce_vendor_flags mce_flags __read_mostly; struct mca_config mca_cfg __read_mostly = { .bootlog = -1, .monarch_timeout = -1 }; static DEFINE_PER_CPU(struct mce, mces_seen); static unsigned long mce_need_notify; /* * MCA banks polled by the period polling timer for corrected events. * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). */ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL }; /* * MCA banks controlled through firmware first for corrected errors. * This is a global list of banks for which we won't enable CMCI and we * won't poll. Firmware controls these banks and is responsible for * reporting corrected errors through GHES. Uncorrected/recoverable * errors are still notified through a machine check. */ mce_banks_t mce_banks_ce_disabled; static struct work_struct mce_work; static struct irq_work mce_irq_work; /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. */ BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain); /* Do initial initialization of a struct mce */ void mce_setup(struct mce *m) { memset(m, 0, sizeof(struct mce)); m->cpu = m->extcpu = smp_processor_id(); /* need the internal __ version to avoid deadlocks */ m->time = __ktime_get_real_seconds(); m->cpuvendor = boot_cpu_data.x86_vendor; m->cpuid = cpuid_eax(1); m->socketid = cpu_data(m->extcpu).phys_proc_id; m->apicid = cpu_data(m->extcpu).initial_apicid; m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP); m->ppin = cpu_data(m->extcpu).ppin; m->microcode = boot_cpu_data.microcode; } DEFINE_PER_CPU(struct mce, injectm); EXPORT_PER_CPU_SYMBOL_GPL(injectm); void mce_log(struct mce *m) { if (!mce_gen_pool_add(m)) irq_work_queue(&mce_irq_work); } EXPORT_SYMBOL_GPL(mce_log); void mce_register_decode_chain(struct notifier_block *nb) { if (WARN_ON(nb->priority < MCE_PRIO_LOWEST || nb->priority > MCE_PRIO_HIGHEST)) return; blocking_notifier_chain_register(&x86_mce_decoder_chain, nb); } EXPORT_SYMBOL_GPL(mce_register_decode_chain); void mce_unregister_decode_chain(struct notifier_block *nb) { blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb); } EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); static void __print_mce(struct mce *m) { pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n", m->extcpu, (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""), m->mcgstatus, m->bank, m->status); if (m->ip) { pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ", !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", m->cs, m->ip); if (m->cs == __KERNEL_CS) pr_cont("{%pS}", (void *)(unsigned long)m->ip); pr_cont("\n"); } pr_emerg(HW_ERR "TSC %llx ", m->tsc); if (m->addr) pr_cont("ADDR %llx ", m->addr); if (m->misc) pr_cont("MISC %llx ", m->misc); if (m->ppin) pr_cont("PPIN %llx ", m->ppin); if (mce_flags.smca) { if (m->synd) pr_cont("SYND %llx ", m->synd); if (m->ipid) pr_cont("IPID %llx ", m->ipid); } pr_cont("\n"); /* * Note this output is parsed by external tools and old fields * should not be changed. */ pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, m->microcode); } static void print_mce(struct mce *m) { __print_mce(m); if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON) pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); } #define PANIC_TIMEOUT 5 /* 5 seconds */ static atomic_t mce_panicked; static int fake_panic; static atomic_t mce_fake_panicked; /* Panic in progress. Enable interrupts and wait for final IPI */ static void wait_for_panic(void) { long timeout = PANIC_TIMEOUT*USEC_PER_SEC; preempt_disable(); local_irq_enable(); while (timeout-- > 0) udelay(1); if (panic_timeout == 0) panic_timeout = mca_cfg.panic_timeout; panic("Panicing machine check CPU died"); } static noinstr void mce_panic(const char *msg, struct mce *final, char *exp) { struct llist_node *pending; struct mce_evt_llist *l; int apei_err = 0; /* * Allow instrumentation around external facilities usage. Not that it * matters a whole lot since the machine is going to panic anyway. */ instrumentation_begin(); if (!fake_panic) { /* * Make sure only one CPU runs in machine check panic */ if (atomic_inc_return(&mce_panicked) > 1) wait_for_panic(); barrier(); bust_spinlocks(1); console_verbose(); } else { /* Don't log too much for fake panic */ if (atomic_inc_return(&mce_fake_panicked) > 1) goto out; } pending = mce_gen_pool_prepare_records(); /* First print corrected ones that are still unlogged */ llist_for_each_entry(l, pending, llnode) { struct mce *m = &l->mce; if (!(m->status & MCI_STATUS_UC)) { print_mce(m); if (!apei_err) apei_err = apei_write_mce(m); } } /* Now print uncorrected but with the final one last */ llist_for_each_entry(l, pending, llnode) { struct mce *m = &l->mce; if (!(m->status & MCI_STATUS_UC)) continue; if (!final || mce_cmp(m, final)) { print_mce(m); if (!apei_err) apei_err = apei_write_mce(m); } } if (final) { print_mce(final); if (!apei_err) apei_err = apei_write_mce(final); } if (exp) pr_emerg(HW_ERR "Machine check: %s\n", exp); if (!fake_panic) { if (panic_timeout == 0) panic_timeout = mca_cfg.panic_timeout; panic(msg); } else pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); out: instrumentation_end(); } /* Support code for software error injection */ static int msr_to_offset(u32 msr) { unsigned bank = __this_cpu_read(injectm.bank); if (msr == mca_cfg.rip_msr) return offsetof(struct mce, ip); if (msr == mca_msr_reg(bank, MCA_STATUS)) return offsetof(struct mce, status); if (msr == mca_msr_reg(bank, MCA_ADDR)) return offsetof(struct mce, addr); if (msr == mca_msr_reg(bank, MCA_MISC)) return offsetof(struct mce, misc); if (msr == MSR_IA32_MCG_STATUS) return offsetof(struct mce, mcgstatus); return -1; } void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr) { if (wrmsr) { pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n", (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax, regs->ip, (void *)regs->ip); } else { pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", (unsigned int)regs->cx, regs->ip, (void *)regs->ip); } show_stack_regs(regs); panic("MCA architectural violation!\n"); while (true) cpu_relax(); } /* MSR access wrappers used for error injection */ noinstr u64 mce_rdmsrl(u32 msr) { DECLARE_ARGS(val, low, high); if (__this_cpu_read(injectm.finished)) { int offset; u64 ret; instrumentation_begin(); offset = msr_to_offset(msr); if (offset < 0) ret = 0; else ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); instrumentation_end(); return ret; } /* * RDMSR on MCA MSRs should not fault. If they do, this is very much an * architectural violation and needs to be reported to hw vendor. Panic * the box to not allow any further progress. */ asm volatile("1: rdmsr\n" "2:\n" _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR_IN_MCE) : EAX_EDX_RET(val, low, high) : "c" (msr)); return EAX_EDX_VAL(val, low, high); } static noinstr void mce_wrmsrl(u32 msr, u64 v) { u32 low, high; if (__this_cpu_read(injectm.finished)) { int offset; instrumentation_begin(); offset = msr_to_offset(msr); if (offset >= 0) *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; instrumentation_end(); return; } low = (u32)v; high = (u32)(v >> 32); /* See comment in mce_rdmsrl() */ asm volatile("1: wrmsr\n" "2:\n" _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR_IN_MCE) : : "c" (msr), "a"(low), "d" (high) : "memory"); } /* * Collect all global (w.r.t. this processor) status about this machine * check into our "mce" struct so that we can use it later to assess * the severity of the problem as we read per-bank specific details. */ static noinstr void mce_gather_info(struct mce *m, struct pt_regs *regs) { /* * Enable instrumentation around mce_setup() which calls external * facilities. */ instrumentation_begin(); mce_setup(m); instrumentation_end(); m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); if (regs) { /* * Get the address of the instruction at the time of * the machine check error. */ if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) { m->ip = regs->ip; m->cs = regs->cs; /* * When in VM86 mode make the cs look like ring 3 * always. This is a lie, but it's better than passing * the additional vm86 bit around everywhere. */ if (v8086_mode(regs)) m->cs |= 3; } /* Use accurate RIP reporting if available. */ if (mca_cfg.rip_msr) m->ip = mce_rdmsrl(mca_cfg.rip_msr); } } int mce_available(struct cpuinfo_x86 *c) { if (mca_cfg.disabled) return 0; return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); } static void mce_schedule_work(void) { if (!mce_gen_pool_empty()) schedule_work(&mce_work); } static void mce_irq_work_cb(struct irq_work *entry) { mce_schedule_work(); } /* * Check if the address reported by the CPU is in a format we can parse. * It would be possible to add code for most other cases, but all would * be somewhat complicated (e.g. segment offset would require an instruction * parser). So only support physical addresses up to page granularity for now. */ int mce_usable_address(struct mce *m) { if (!(m->status & MCI_STATUS_ADDRV)) return 0; /* Checks after this one are Intel/Zhaoxin-specific: */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return 1; if (!(m->status & MCI_STATUS_MISCV)) return 0; if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT) return 0; if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS) return 0; return 1; } EXPORT_SYMBOL_GPL(mce_usable_address); bool mce_is_memory_error(struct mce *m) { switch (m->cpuvendor) { case X86_VENDOR_AMD: case X86_VENDOR_HYGON: return amd_mce_is_memory_error(m); case X86_VENDOR_INTEL: case X86_VENDOR_ZHAOXIN: /* * Intel SDM Volume 3B - 15.9.2 Compound Error Codes * * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for * indicating a memory error. Bit 8 is used for indicating a * cache hierarchy error. The combination of bit 2 and bit 3 * is used for indicating a `generic' cache hierarchy error * But we can't just blindly check the above bits, because if * bit 11 is set, then it is a bus/interconnect error - and * either way the above bits just gives more detail on what * bus/interconnect error happened. Note that bit 12 can be * ignored, as it's the "filter" bit. */ return (m->status & 0xef80) == BIT(7) || (m->status & 0xef00) == BIT(8) || (m->status & 0xeffc) == 0xc; default: return false; } } EXPORT_SYMBOL_GPL(mce_is_memory_error); static bool whole_page(struct mce *m) { if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV)) return true; return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT; } bool mce_is_correctable(struct mce *m) { if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) return false; if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED) return false; if (m->status & MCI_STATUS_UC) return false; return true; } EXPORT_SYMBOL_GPL(mce_is_correctable); static int mce_early_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct mce *m = (struct mce *)data; if (!m) return NOTIFY_DONE; /* Emit the trace record: */ trace_mce_record(m); set_bit(0, &mce_need_notify); mce_notify_irq(); return NOTIFY_DONE; } static struct notifier_block early_nb = { .notifier_call = mce_early_notifier, .priority = MCE_PRIO_EARLY, }; static int uc_decode_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct mce *mce = (struct mce *)data; unsigned long pfn; if (!mce || !mce_usable_address(mce)) return NOTIFY_DONE; if (mce->severity != MCE_AO_SEVERITY && mce->severity != MCE_DEFERRED_SEVERITY) return NOTIFY_DONE; pfn = (mce->addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT; if (!memory_failure(pfn, 0)) { set_mce_nospec(pfn); mce->kflags |= MCE_HANDLED_UC; } return NOTIFY_OK; } static struct notifier_block mce_uc_nb = { .notifier_call = uc_decode_notifier, .priority = MCE_PRIO_UC, }; static int mce_default_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct mce *m = (struct mce *)data; if (!m) return NOTIFY_DONE; if (mca_cfg.print_all || !m->kflags) __print_mce(m); return NOTIFY_DONE; } static struct notifier_block mce_default_nb = { .notifier_call = mce_default_notifier, /* lowest prio, we want it to run last. */ .priority = MCE_PRIO_LOWEST, }; /* * Read ADDR and MISC registers. */ static noinstr void mce_read_aux(struct mce *m, int i) { if (m->status & MCI_STATUS_MISCV) m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC)); if (m->status & MCI_STATUS_ADDRV) { m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR)); /* * Mask the reported address by the reported granularity. */ if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) { u8 shift = MCI_MISC_ADDR_LSB(m->misc); m->addr >>= shift; m->addr <<= shift; } smca_extract_err_addr(m); } if (mce_flags.smca) { m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i)); if (m->status & MCI_STATUS_SYNDV) m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i)); } } DEFINE_PER_CPU(unsigned, mce_poll_count); /* * Poll for corrected events or events that happened before reset. * Those are just logged through /dev/mcelog. * * This is executed in standard interrupt context. * * Note: spec recommends to panic for fatal unsignalled * errors here. However this would be quite problematic -- * we would need to reimplement the Monarch handling and * it would mess up the exclusion between exception handler * and poll handler -- * so we skip this for now. * These cases should not happen anyways, or only when the CPU * is already totally * confused. In this case it's likely it will * not fully execute the machine check handler either. */ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); bool error_seen = false; struct mce m; int i; this_cpu_inc(mce_poll_count); mce_gather_info(&m, NULL); if (flags & MCP_TIMESTAMP) m.tsc = rdtsc(); for (i = 0; i < this_cpu_read(mce_num_banks); i++) { if (!mce_banks[i].ctl || !test_bit(i, *b)) continue; m.misc = 0; m.addr = 0; m.bank = i; barrier(); m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); /* If this entry is not valid, ignore it */ if (!(m.status & MCI_STATUS_VAL)) continue; /* * If we are logging everything (at CPU online) or this * is a corrected error, then we must log it. */ if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC)) goto log_it; /* * Newer Intel systems that support software error * recovery need to make additional checks. Other * CPUs should skip over uncorrected errors, but log * everything else. */ if (!mca_cfg.ser) { if (m.status & MCI_STATUS_UC) continue; goto log_it; } /* Log "not enabled" (speculative) errors */ if (!(m.status & MCI_STATUS_EN)) goto log_it; /* * Log UCNA (SDM: 15.6.3 "UCR Error Classification") * UC == 1 && PCC == 0 && S == 0 */ if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S)) goto log_it; /* * Skip anything else. Presumption is that our read of this * bank is racing with a machine check. Leave the log alone * for do_machine_check() to deal with it. */ continue; log_it: error_seen = true; if (flags & MCP_DONTLOG) goto clear_it; mce_read_aux(&m, i); m.severity = mce_severity(&m, NULL, NULL, false); /* * Don't get the IP here because it's unlikely to * have anything to do with the actual error location. */ if (mca_cfg.dont_log_ce && !mce_usable_address(&m)) goto clear_it; if (flags & MCP_QUEUE_LOG) mce_gen_pool_add(&m); else mce_log(&m); clear_it: /* * Clear state for this bank. */ mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); } /* * Don't clear MCG_STATUS here because it's only defined for * exceptions. */ sync_core(); return error_seen; } EXPORT_SYMBOL_GPL(machine_check_poll); /* * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM * Vol 3B Table 15-20). But this confuses both the code that determines * whether the machine check occurred in kernel or user mode, and also * the severity assessment code. Pretend that EIPV was set, and take the * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. */ static __always_inline void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) { if (bank != 0) return; if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) return; if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| MCACOD)) != (MCI_STATUS_UC|MCI_STATUS_EN| MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| MCI_STATUS_AR|MCACOD_INSTR)) return; m->mcgstatus |= MCG_STATUS_EIPV; m->ip = regs->ip; m->cs = regs->cs; } /* * Disable fast string copy and return from the MCE handler upon the first SRAR * MCE on bank 1 due to a CPU erratum on Intel Skylake/Cascade Lake/Cooper Lake * CPUs. * The fast string copy instructions ("REP; MOVS*") could consume an * uncorrectable memory error in the cache line _right after_ the desired region * to copy and raise an MCE with RIP pointing to the instruction _after_ the * "REP; MOVS*". * This mitigation addresses the issue completely with the caveat of performance * degradation on the CPU affected. This is still better than the OS crashing on * MCEs raised on an irrelevant process due to "REP; MOVS*" accesses from a * kernel context (e.g., copy_page). * * Returns true when fast string copy on CPU has been disabled. */ static noinstr bool quirk_skylake_repmov(void) { u64 mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); u64 misc_enable = mce_rdmsrl(MSR_IA32_MISC_ENABLE); u64 mc1_status; /* * Apply the quirk only to local machine checks, i.e., no broadcast * sync is needed. */ if (!(mcgstatus & MCG_STATUS_LMCES) || !(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) return false; mc1_status = mce_rdmsrl(MSR_IA32_MCx_STATUS(1)); /* Check for a software-recoverable data fetch error. */ if ((mc1_status & (MCI_STATUS_VAL | MCI_STATUS_OVER | MCI_STATUS_UC | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | MCI_STATUS_PCC | MCI_STATUS_AR | MCI_STATUS_S)) == (MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | MCI_STATUS_AR | MCI_STATUS_S)) { misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; mce_wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); mce_wrmsrl(MSR_IA32_MCx_STATUS(1), 0); instrumentation_begin(); pr_err_once("Erratum detected, disable fast string copy instructions.\n"); instrumentation_end(); return true; } return false; } /* * Some Zen-based Instruction Fetch Units set EIPV=RIPV=0 on poison consumption * errors. This means mce_gather_info() will not save the "ip" and "cs" registers. * * However, the context is still valid, so save the "cs" register for later use. * * The "ip" register is truly unknown, so don't save it or fixup EIPV/RIPV. * * The Instruction Fetch Unit is at MCA bank 1 for all affected systems. */ static __always_inline void quirk_zen_ifu(int bank, struct mce *m, struct pt_regs *regs) { if (bank != 1) return; if (!(m->status & MCI_STATUS_POISON)) return; m->cs = regs->cs; } /* * Do a quick check if any of the events requires a panic. * This decides if we keep the events around or clear them. */ static __always_inline int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, struct pt_regs *regs) { char *tmp = *msg; int i; for (i = 0; i < this_cpu_read(mce_num_banks); i++) { m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); if (!(m->status & MCI_STATUS_VAL)) continue; arch___set_bit(i, validp); if (mce_flags.snb_ifu_quirk) quirk_sandybridge_ifu(i, m, regs); if (mce_flags.zen_ifu_quirk) quirk_zen_ifu(i, m, regs); m->bank = i; if (mce_severity(m, regs, &tmp, true) >= MCE_PANIC_SEVERITY) { mce_read_aux(m, i); *msg = tmp; return 1; } } return 0; } /* * Variable to establish order between CPUs while scanning. * Each CPU spins initially until executing is equal its number. */ static atomic_t mce_executing; /* * Defines order of CPUs on entry. First CPU becomes Monarch. */ static atomic_t mce_callin; /* * Track which CPUs entered the MCA broadcast synchronization and which not in * order to print holdouts. */ static cpumask_t mce_missing_cpus = CPU_MASK_ALL; /* * Check if a timeout waiting for other CPUs happened. */ static noinstr int mce_timed_out(u64 *t, const char *msg) { int ret = 0; /* Enable instrumentation around calls to external facilities */ instrumentation_begin(); /* * The others already did panic for some reason. * Bail out like in a timeout. * rmb() to tell the compiler that system_state * might have been modified by someone else. */ rmb(); if (atomic_read(&mce_panicked)) wait_for_panic(); if (!mca_cfg.monarch_timeout) goto out; if ((s64)*t < SPINUNIT) { if (cpumask_and(&mce_missing_cpus, cpu_online_mask, &mce_missing_cpus)) pr_emerg("CPUs not responding to MCE broadcast (may include false positives): %*pbl\n", cpumask_pr_args(&mce_missing_cpus)); mce_panic(msg, NULL, NULL); ret = 1; goto out; } *t -= SPINUNIT; out: touch_nmi_watchdog(); instrumentation_end(); return ret; } /* * The Monarch's reign. The Monarch is the CPU who entered * the machine check handler first. It waits for the others to * raise the exception too and then grades them. When any * error is fatal panic. Only then let the others continue. * * The other CPUs entering the MCE handler will be controlled by the * Monarch. They are called Subjects. * * This way we prevent any potential data corruption in a unrecoverable case * and also makes sure always all CPU's errors are examined. * * Also this detects the case of a machine check event coming from outer * space (not detected by any CPUs) In this case some external agent wants * us to shut down, so panic too. * * The other CPUs might still decide to panic if the handler happens * in a unrecoverable place, but in this case the system is in a semi-stable * state and won't corrupt anything by itself. It's ok to let the others * continue for a bit first. * * All the spin loops have timeouts; when a timeout happens a CPU * typically elects itself to be Monarch. */ static void mce_reign(void) { int cpu; struct mce *m = NULL; int global_worst = 0; char *msg = NULL; /* * This CPU is the Monarch and the other CPUs have run * through their handlers. * Grade the severity of the errors of all the CPUs. */ for_each_possible_cpu(cpu) { struct mce *mtmp = &per_cpu(mces_seen, cpu); if (mtmp->severity > global_worst) { global_worst = mtmp->severity; m = &per_cpu(mces_seen, cpu); } } /* * Cannot recover? Panic here then. * This dumps all the mces in the log buffer and stops the * other CPUs. */ if (m && global_worst >= MCE_PANIC_SEVERITY) { /* call mce_severity() to get "msg" for panic */ mce_severity(m, NULL, &msg, true); mce_panic("Fatal machine check", m, msg); } /* * For UC somewhere we let the CPU who detects it handle it. * Also must let continue the others, otherwise the handling * CPU could deadlock on a lock. */ /* * No machine check event found. Must be some external * source or one CPU is hung. Panic. */ if (global_worst <= MCE_KEEP_SEVERITY) mce_panic("Fatal machine check from unknown source", NULL, NULL); /* * Now clear all the mces_seen so that they don't reappear on * the next mce. */ for_each_possible_cpu(cpu) memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); } static atomic_t global_nwo; /* * Start of Monarch synchronization. This waits until all CPUs have * entered the exception handler and then determines if any of them * saw a fatal event that requires panic. Then it executes them * in the entry order. * TBD double check parallel CPU hotunplug */ static noinstr int mce_start(int *no_way_out) { u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; int order, ret = -1; if (!timeout) return ret; raw_atomic_add(*no_way_out, &global_nwo); /* * Rely on the implied barrier below, such that global_nwo * is updated before mce_callin. */ order = raw_atomic_inc_return(&mce_callin); arch_cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus); /* Enable instrumentation around calls to external facilities */ instrumentation_begin(); /* * Wait for everyone. */ while (raw_atomic_read(&mce_callin) != num_online_cpus()) { if (mce_timed_out(&timeout, "Timeout: Not all CPUs entered broadcast exception handler")) { raw_atomic_set(&global_nwo, 0); goto out; } ndelay(SPINUNIT); } /* * mce_callin should be read before global_nwo */ smp_rmb(); if (order == 1) { /* * Monarch: Starts executing now, the others wait. */ raw_atomic_set(&mce_executing, 1); } else { /* * Subject: Now start the scanning loop one by one in * the original callin order. * This way when there are any shared banks it will be * only seen by one CPU before cleared, avoiding duplicates. */ while (raw_atomic_read(&mce_executing) < order) { if (mce_timed_out(&timeout, "Timeout: Subject CPUs unable to finish machine check processing")) { raw_atomic_set(&global_nwo, 0); goto out; } ndelay(SPINUNIT); } } /* * Cache the global no_way_out state. */ *no_way_out = raw_atomic_read(&global_nwo); ret = order; out: instrumentation_end(); return ret; } /* * Synchronize between CPUs after main scanning loop. * This invokes the bulk of the Monarch processing. */ static noinstr int mce_end(int order) { u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; int ret = -1; /* Allow instrumentation around external facilities. */ instrumentation_begin(); if (!timeout) goto reset; if (order < 0) goto reset; /* * Allow others to run. */ atomic_inc(&mce_executing); if (order == 1) { /* * Monarch: Wait for everyone to go through their scanning * loops. */ while (atomic_read(&mce_executing) <= num_online_cpus()) { if (mce_timed_out(&timeout, "Timeout: Monarch CPU unable to finish machine check processing")) goto reset; ndelay(SPINUNIT); } mce_reign(); barrier(); ret = 0; } else { /* * Subject: Wait for Monarch to finish. */ while (atomic_read(&mce_executing) != 0) { if (mce_timed_out(&timeout, "Timeout: Monarch CPU did not finish machine check processing")) goto reset; ndelay(SPINUNIT); } /* * Don't reset anything. That's done by the Monarch. */ ret = 0; goto out; } /* * Reset all global state. */ reset: atomic_set(&global_nwo, 0); atomic_set(&mce_callin, 0); cpumask_setall(&mce_missing_cpus); barrier(); /* * Let others run again. */ atomic_set(&mce_executing, 0); out: instrumentation_end(); return ret; } static __always_inline void mce_clear_state(unsigned long *toclear) { int i; for (i = 0; i < this_cpu_read(mce_num_banks); i++) { if (arch_test_bit(i, toclear)) mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); } } /* * Cases where we avoid rendezvous handler timeout: * 1) If this CPU is offline. * * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to * skip those CPUs which remain looping in the 1st kernel - see * crash_nmi_callback(). * * Note: there still is a small window between kexec-ing and the new, * kdump kernel establishing a new #MC handler where a broadcasted MCE * might not get handled properly. */ static noinstr bool mce_check_crashing_cpu(void) { unsigned int cpu = smp_processor_id(); if (arch_cpu_is_offline(cpu) || (crashing_cpu != -1 && crashing_cpu != cpu)) { u64 mcgstatus; mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS); if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { if (mcgstatus & MCG_STATUS_LMCES) return false; } if (mcgstatus & MCG_STATUS_RIPV) { __wrmsr(MSR_IA32_MCG_STATUS, 0, 0); return true; } } return false; } static __always_inline int __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final, unsigned long *toclear, unsigned long *valid_banks, int no_way_out, int *worst) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); struct mca_config *cfg = &mca_cfg; int severity, i, taint = 0; for (i = 0; i < this_cpu_read(mce_num_banks); i++) { arch___clear_bit(i, toclear); if (!arch_test_bit(i, valid_banks)) continue; if (!mce_banks[i].ctl) continue; m->misc = 0; m->addr = 0; m->bank = i; m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); if (!(m->status & MCI_STATUS_VAL)) continue; /* * Corrected or non-signaled errors are handled by * machine_check_poll(). Leave them alone, unless this panics. */ if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && !no_way_out) continue; /* Set taint even when machine check was not enabled. */ taint++; severity = mce_severity(m, regs, NULL, true); /* * When machine check was for corrected/deferred handler don't * touch, unless we're panicking. */ if ((severity == MCE_KEEP_SEVERITY || severity == MCE_UCNA_SEVERITY) && !no_way_out) continue; arch___set_bit(i, toclear); /* Machine check event was not enabled. Clear, but ignore. */ if (severity == MCE_NO_SEVERITY) continue; mce_read_aux(m, i); /* assuming valid severity level != 0 */ m->severity = severity; /* * Enable instrumentation around the mce_log() call which is * done in #MC context, where instrumentation is disabled. */ instrumentation_begin(); mce_log(m); instrumentation_end(); if (severity > *worst) { *final = *m; *worst = severity; } } /* mce_clear_state will clear *final, save locally for use later */ *m = *final; return taint; } static void kill_me_now(struct callback_head *ch) { struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me); p->mce_count = 0; force_sig(SIGBUS); } static void kill_me_maybe(struct callback_head *cb) { struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me); int flags = MF_ACTION_REQUIRED; unsigned long pfn; int ret; p->mce_count = 0; pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr); if (!p->mce_ripv) flags |= MF_MUST_KILL; pfn = (p->mce_addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT; ret = memory_failure(pfn, flags); if (!ret) { set_mce_nospec(pfn); sync_core(); return; } /* * -EHWPOISON from memory_failure() means that it already sent SIGBUS * to the current process with the proper error info, * -EOPNOTSUPP means hwpoison_filter() filtered the error event, * * In both cases, no further processing is required. */ if (ret == -EHWPOISON || ret == -EOPNOTSUPP) return; pr_err("Memory error not recovered"); kill_me_now(cb); } static void kill_me_never(struct callback_head *cb) { struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me); unsigned long pfn; p->mce_count = 0; pr_err("Kernel accessed poison in user space at %llx\n", p->mce_addr); pfn = (p->mce_addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT; if (!memory_failure(pfn, 0)) set_mce_nospec(pfn); } static void queue_task_work(struct mce *m, char *msg, void (*func)(struct callback_head *)) { int count = ++current->mce_count; /* First call, save all the details */ if (count == 1) { current->mce_addr = m->addr; current->mce_kflags = m->kflags; current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); current->mce_whole_page = whole_page(m); current->mce_kill_me.func = func; } /* Ten is likely overkill. Don't expect more than two faults before task_work() */ if (count > 10) mce_panic("Too many consecutive machine checks while accessing user data", m, msg); /* Second or later call, make sure page address matches the one from first call */ if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT)) mce_panic("Consecutive machine checks to different user pages", m, msg); /* Do not call task_work_add() more than once */ if (count > 1) return; task_work_add(current, &current->mce_kill_me, TWA_RESUME); } /* Handle unconfigured int18 (should never happen) */ static noinstr void unexpected_machine_check(struct pt_regs *regs) { instrumentation_begin(); pr_err("CPU#%d: Unexpected int18 (Machine Check)\n", smp_processor_id()); instrumentation_end(); } /* * The actual machine check handler. This only handles real exceptions when * something got corrupted coming in through int 18. * * This is executed in #MC context not subject to normal locking rules. * This implies that most kernel services cannot be safely used. Don't even * think about putting a printk in there! * * On Intel systems this is entered on all CPUs in parallel through * MCE broadcast. However some CPUs might be broken beyond repair, * so be always careful when synchronizing with others. * * Tracing and kprobes are disabled: if we interrupted a kernel context * with IF=1, we need to minimize stack usage. There are also recursion * issues: if the machine check was due to a failure of the memory * backing the user stack, tracing that reads the user stack will cause * potentially infinite recursion. * * Currently, the #MC handler calls out to a number of external facilities * and, therefore, allows instrumentation around them. The optimal thing to * have would be to do the absolutely minimal work required in #MC context * and have instrumentation disabled only around that. Further processing can * then happen in process context where instrumentation is allowed. Achieving * that requires careful auditing and modifications. Until then, the code * allows instrumentation temporarily, where required. * */ noinstr void do_machine_check(struct pt_regs *regs) { int worst = 0, order, no_way_out, kill_current_task, lmce, taint = 0; DECLARE_BITMAP(valid_banks, MAX_NR_BANKS) = { 0 }; DECLARE_BITMAP(toclear, MAX_NR_BANKS) = { 0 }; struct mce m, *final; char *msg = NULL; if (unlikely(mce_flags.p5)) return pentium_machine_check(regs); else if (unlikely(mce_flags.winchip)) return winchip_machine_check(regs); else if (unlikely(!mca_cfg.initialized)) return unexpected_machine_check(regs); if (mce_flags.skx_repmov_quirk && quirk_skylake_repmov()) goto clear; /* * Establish sequential order between the CPUs entering the machine * check handler. */ order = -1; /* * If no_way_out gets set, there is no safe way to recover from this * MCE. */ no_way_out = 0; /* * If kill_current_task is not set, there might be a way to recover from this * error. */ kill_current_task = 0; /* * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES * on Intel. */ lmce = 1; this_cpu_inc(mce_exception_count); mce_gather_info(&m, regs); m.tsc = rdtsc(); final = this_cpu_ptr(&mces_seen); *final = m; no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); barrier(); /* * When no restart IP might need to kill or panic. * Assume the worst for now, but if we find the * severity is MCE_AR_SEVERITY we have other options. */ if (!(m.mcgstatus & MCG_STATUS_RIPV)) kill_current_task = 1; /* * Check if this MCE is signaled to only this logical processor, * on Intel, Zhaoxin only. */ if (m.cpuvendor == X86_VENDOR_INTEL || m.cpuvendor == X86_VENDOR_ZHAOXIN) lmce = m.mcgstatus & MCG_STATUS_LMCES; /* * Local machine check may already know that we have to panic. * Broadcast machine check begins rendezvous in mce_start() * Go through all banks in exclusion of the other CPUs. This way we * don't report duplicated events on shared banks because the first one * to see it will clear it. */ if (lmce) { if (no_way_out) mce_panic("Fatal local machine check", &m, msg); } else { order = mce_start(&no_way_out); } taint = __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst); if (!no_way_out) mce_clear_state(toclear); /* * Do most of the synchronization with other CPUs. * When there's any problem use only local no_way_out state. */ if (!lmce) { if (mce_end(order) < 0) { if (!no_way_out) no_way_out = worst >= MCE_PANIC_SEVERITY; if (no_way_out) mce_panic("Fatal machine check on current CPU", &m, msg); } } else { /* * If there was a fatal machine check we should have * already called mce_panic earlier in this function. * Since we re-read the banks, we might have found * something new. Check again to see if we found a * fatal error. We call "mce_severity()" again to * make sure we have the right "msg". */ if (worst >= MCE_PANIC_SEVERITY) { mce_severity(&m, regs, &msg, true); mce_panic("Local fatal machine check!", &m, msg); } } /* * Enable instrumentation around the external facilities like task_work_add() * (via queue_task_work()), fixup_exception() etc. For now, that is. Fixing this * properly would need a lot more involved reorganization. */ instrumentation_begin(); if (taint) add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); if (worst != MCE_AR_SEVERITY && !kill_current_task) goto out; /* Fault was in user mode and we need to take some action */ if ((m.cs & 3) == 3) { /* If this triggers there is no way to recover. Die hard. */ BUG_ON(!on_thread_stack() || !user_mode(regs)); if (!mce_usable_address(&m)) queue_task_work(&m, msg, kill_me_now); else queue_task_work(&m, msg, kill_me_maybe); } else { /* * Handle an MCE which has happened in kernel space but from * which the kernel can recover: ex_has_fault_handler() has * already verified that the rIP at which the error happened is * a rIP from which the kernel can recover (by jumping to * recovery code specified in _ASM_EXTABLE_FAULT()) and the * corresponding exception handler which would do that is the * proper one. */ if (m.kflags & MCE_IN_KERNEL_RECOV) { if (!fixup_exception(regs, X86_TRAP_MC, 0, 0)) mce_panic("Failed kernel mode recovery", &m, msg); } if (m.kflags & MCE_IN_KERNEL_COPYIN) queue_task_work(&m, msg, kill_me_never); } out: instrumentation_end(); clear: mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); } EXPORT_SYMBOL_GPL(do_machine_check); #ifndef CONFIG_MEMORY_FAILURE int memory_failure(unsigned long pfn, int flags) { /* mce_severity() should not hand us an ACTION_REQUIRED error */ BUG_ON(flags & MF_ACTION_REQUIRED); pr_err("Uncorrected memory error in page 0x%lx ignored\n" "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", pfn); return 0; } #endif /* * Periodic polling timer for "silent" machine check errors. If the * poller finds an MCE, poll 2x faster. When the poller finds no more * errors, poll 2x slower (up to check_interval seconds). */ static unsigned long check_interval = INITIAL_CHECK_INTERVAL; static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ static DEFINE_PER_CPU(struct timer_list, mce_timer); static unsigned long mce_adjust_timer_default(unsigned long interval) { return interval; } static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; static void __start_timer(struct timer_list *t, unsigned long interval) { unsigned long when = jiffies + interval; unsigned long flags; local_irq_save(flags); if (!timer_pending(t) || time_before(when, t->expires)) mod_timer(t, round_jiffies(when)); local_irq_restore(flags); } static void mc_poll_banks_default(void) { machine_check_poll(0, this_cpu_ptr(&mce_poll_banks)); } void (*mc_poll_banks)(void) = mc_poll_banks_default; static void mce_timer_fn(struct timer_list *t) { struct timer_list *cpu_t = this_cpu_ptr(&mce_timer); unsigned long iv; WARN_ON(cpu_t != t); iv = __this_cpu_read(mce_next_interval); if (mce_available(this_cpu_ptr(&cpu_info))) { mc_poll_banks(); if (mce_intel_cmci_poll()) { iv = mce_adjust_timer(iv); goto done; } } /* * Alert userspace if needed. If we logged an MCE, reduce the polling * interval, otherwise increase the polling interval. */ if (mce_notify_irq()) iv = max(iv / 2, (unsigned long) HZ/100); else iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); done: __this_cpu_write(mce_next_interval, iv); __start_timer(t, iv); } /* * Ensure that the timer is firing in @interval from now. */ void mce_timer_kick(unsigned long interval) { struct timer_list *t = this_cpu_ptr(&mce_timer); unsigned long iv = __this_cpu_read(mce_next_interval); __start_timer(t, interval); if (interval < iv) __this_cpu_write(mce_next_interval, interval); } /* Must not be called in IRQ context where del_timer_sync() can deadlock */ static void mce_timer_delete_all(void) { int cpu; for_each_online_cpu(cpu) del_timer_sync(&per_cpu(mce_timer, cpu)); } /* * Notify the user(s) about new machine check events. * Can be called from interrupt context, but not from machine check/NMI * context. */ int mce_notify_irq(void) { /* Not more than two messages every minute */ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); if (test_and_clear_bit(0, &mce_need_notify)) { mce_work_trigger(); if (__ratelimit(&ratelimit)) pr_info(HW_ERR "Machine check events logged\n"); return 1; } return 0; } EXPORT_SYMBOL_GPL(mce_notify_irq); static void __mcheck_cpu_mce_banks_init(void) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); u8 n_banks = this_cpu_read(mce_num_banks); int i; for (i = 0; i < n_banks; i++) { struct mce_bank *b = &mce_banks[i]; /* * Init them all, __mcheck_cpu_apply_quirks() is going to apply * the required vendor quirks before * __mcheck_cpu_init_clear_banks() does the final bank setup. */ b->ctl = -1ULL; b->init = true; } } /* * Initialize Machine Checks for a CPU. */ static void __mcheck_cpu_cap_init(void) { u64 cap; u8 b; rdmsrl(MSR_IA32_MCG_CAP, cap); b = cap & MCG_BANKCNT_MASK; if (b > MAX_NR_BANKS) { pr_warn("CPU%d: Using only %u machine check banks out of %u\n", smp_processor_id(), MAX_NR_BANKS, b); b = MAX_NR_BANKS; } this_cpu_write(mce_num_banks, b); __mcheck_cpu_mce_banks_init(); /* Use accurate RIP reporting if available. */ if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) mca_cfg.rip_msr = MSR_IA32_MCG_EIP; if (cap & MCG_SER_P) mca_cfg.ser = 1; } static void __mcheck_cpu_init_generic(void) { enum mcp_flags m_fl = 0; mce_banks_t all_banks; u64 cap; if (!mca_cfg.bootlog) m_fl = MCP_DONTLOG; /* * Log the machine checks left over from the previous reset. Log them * only, do not start processing them. That will happen in mcheck_late_init() * when all consumers have been registered on the notifier chain. */ bitmap_fill(all_banks, MAX_NR_BANKS); machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks); cr4_set_bits(X86_CR4_MCE); rdmsrl(MSR_IA32_MCG_CAP, cap); if (cap & MCG_CTL_P) wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); } static void __mcheck_cpu_init_clear_banks(void) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); int i; for (i = 0; i < this_cpu_read(mce_num_banks); i++) { struct mce_bank *b = &mce_banks[i]; if (!b->init) continue; wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); } } /* * Do a final check to see if there are any unused/RAZ banks. * * This must be done after the banks have been initialized and any quirks have * been applied. * * Do not call this from any user-initiated flows, e.g. CPU hotplug or sysfs. * Otherwise, a user who disables a bank will not be able to re-enable it * without a system reboot. */ static void __mcheck_cpu_check_banks(void) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); u64 msrval; int i; for (i = 0; i < this_cpu_read(mce_num_banks); i++) { struct mce_bank *b = &mce_banks[i]; if (!b->init) continue; rdmsrl(mca_msr_reg(i, MCA_CTL), msrval); b->init = !!msrval; } } /* Add per CPU specific workarounds here */ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); struct mca_config *cfg = &mca_cfg; if (c->x86_vendor == X86_VENDOR_UNKNOWN) { pr_info("unknown CPU type - not enabling MCE support\n"); return -EOPNOTSUPP; } /* This should be disabled by the BIOS, but isn't always */ if (c->x86_vendor == X86_VENDOR_AMD) { if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) { /* * disable GART TBL walk error reporting, which * trips off incorrectly with the IOMMU & 3ware * & Cerberus: */ clear_bit(10, (unsigned long *)&mce_banks[4].ctl); } if (c->x86 < 0x11 && cfg->bootlog < 0) { /* * Lots of broken BIOS around that don't clear them * by default and leave crap in there. Don't log: */ cfg->bootlog = 0; } /* * Various K7s with broken bank 0 around. Always disable * by default. */ if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0) mce_banks[0].ctl = 0; /* * overflow_recov is supported for F15h Models 00h-0fh * even though we don't have a CPUID bit for it. */ if (c->x86 == 0x15 && c->x86_model <= 0xf) mce_flags.overflow_recov = 1; if (c->x86 >= 0x17 && c->x86 <= 0x1A) mce_flags.zen_ifu_quirk = 1; } if (c->x86_vendor == X86_VENDOR_INTEL) { /* * SDM documents that on family 6 bank 0 should not be written * because it aliases to another special BIOS controlled * register. * But it's not aliased anymore on model 0x1a+ * Don't ignore bank 0 completely because there could be a * valid event later, merely don't write CTL0. */ if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0) mce_banks[0].init = false; /* * All newer Intel systems support MCE broadcasting. Enable * synchronization with a one second timeout. */ if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && cfg->monarch_timeout < 0) cfg->monarch_timeout = USEC_PER_SEC; /* * There are also broken BIOSes on some Pentium M and * earlier systems: */ if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) cfg->bootlog = 0; if (c->x86 == 6 && c->x86_model == 45) mce_flags.snb_ifu_quirk = 1; /* * Skylake, Cascacde Lake and Cooper Lake require a quirk on * rep movs. */ if (c->x86 == 6 && c->x86_model == INTEL_FAM6_SKYLAKE_X) mce_flags.skx_repmov_quirk = 1; } if (c->x86_vendor == X86_VENDOR_ZHAOXIN) { /* * All newer Zhaoxin CPUs support MCE broadcasting. Enable * synchronization with a one second timeout. */ if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) { if (cfg->monarch_timeout < 0) cfg->monarch_timeout = USEC_PER_SEC; } } if (cfg->monarch_timeout < 0) cfg->monarch_timeout = 0; if (cfg->bootlog != 0) cfg->panic_timeout = 30; return 0; } static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) { if (c->x86 != 5) return 0; switch (c->x86_vendor) { case X86_VENDOR_INTEL: intel_p5_mcheck_init(c); mce_flags.p5 = 1; return 1; case X86_VENDOR_CENTAUR: winchip_mcheck_init(c); mce_flags.winchip = 1; return 1; default: return 0; } return 0; } /* * Init basic CPU features needed for early decoding of MCEs. */ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) { if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) { mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); mce_flags.amd_threshold = 1; } } static void mce_centaur_feature_init(struct cpuinfo_x86 *c) { struct mca_config *cfg = &mca_cfg; /* * All newer Centaur CPUs support MCE broadcasting. Enable * synchronization with a one second timeout. */ if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || c->x86 > 6) { if (cfg->monarch_timeout < 0) cfg->monarch_timeout = USEC_PER_SEC; } } static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); /* * These CPUs have MCA bank 8 which reports only one error type called * SVAD (System View Address Decoder). The reporting of that error is * controlled by IA32_MC8.CTL.0. * * If enabled, prefetching on these CPUs will cause SVAD MCE when * virtual machines start and result in a system panic. Always disable * bank 8 SVAD error by default. */ if ((c->x86 == 7 && c->x86_model == 0x1b) || (c->x86_model == 0x19 || c->x86_model == 0x1f)) { if (this_cpu_read(mce_num_banks) > 8) mce_banks[8].ctl = 0; } intel_init_cmci(); intel_init_lmce(); mce_adjust_timer = cmci_intel_adjust_timer; } static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c) { intel_clear_lmce(); } static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) { switch (c->x86_vendor) { case X86_VENDOR_INTEL: mce_intel_feature_init(c); mce_adjust_timer = cmci_intel_adjust_timer; break; case X86_VENDOR_AMD: { mce_amd_feature_init(c); break; } case X86_VENDOR_HYGON: mce_hygon_feature_init(c); break; case X86_VENDOR_CENTAUR: mce_centaur_feature_init(c); break; case X86_VENDOR_ZHAOXIN: mce_zhaoxin_feature_init(c); break; default: break; } } static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) { switch (c->x86_vendor) { case X86_VENDOR_INTEL: mce_intel_feature_clear(c); break; case X86_VENDOR_ZHAOXIN: mce_zhaoxin_feature_clear(c); break; default: break; } } static void mce_start_timer(struct timer_list *t) { unsigned long iv = check_interval * HZ; if (mca_cfg.ignore_ce || !iv) return; this_cpu_write(mce_next_interval, iv); __start_timer(t, iv); } static void __mcheck_cpu_setup_timer(void) { struct timer_list *t = this_cpu_ptr(&mce_timer); timer_setup(t, mce_timer_fn, TIMER_PINNED); } static void __mcheck_cpu_init_timer(void) { struct timer_list *t = this_cpu_ptr(&mce_timer); timer_setup(t, mce_timer_fn, TIMER_PINNED); mce_start_timer(t); } bool filter_mce(struct mce *m) { if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) return amd_filter_mce(m); if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) return intel_filter_mce(m); return false; } static __always_inline void exc_machine_check_kernel(struct pt_regs *regs) { irqentry_state_t irq_state; WARN_ON_ONCE(user_mode(regs)); /* * Only required when from kernel mode. See * mce_check_crashing_cpu() for details. */ if (mca_cfg.initialized && mce_check_crashing_cpu()) return; irq_state = irqentry_nmi_enter(regs); do_machine_check(regs); irqentry_nmi_exit(regs, irq_state); } static __always_inline void exc_machine_check_user(struct pt_regs *regs) { irqentry_enter_from_user_mode(regs); do_machine_check(regs); irqentry_exit_to_user_mode(regs); } #ifdef CONFIG_X86_64 /* MCE hit kernel mode */ DEFINE_IDTENTRY_MCE(exc_machine_check) { unsigned long dr7; dr7 = local_db_save(); exc_machine_check_kernel(regs); local_db_restore(dr7); } /* The user mode variant. */ DEFINE_IDTENTRY_MCE_USER(exc_machine_check) { unsigned long dr7; dr7 = local_db_save(); exc_machine_check_user(regs); local_db_restore(dr7); } #else /* 32bit unified entry point */ DEFINE_IDTENTRY_RAW(exc_machine_check) { unsigned long dr7; dr7 = local_db_save(); if (user_mode(regs)) exc_machine_check_user(regs); else exc_machine_check_kernel(regs); local_db_restore(dr7); } #endif /* * Called for each booted CPU to set up machine checks. * Must be called with preempt off: */ void mcheck_cpu_init(struct cpuinfo_x86 *c) { if (mca_cfg.disabled) return; if (__mcheck_cpu_ancient_init(c)) return; if (!mce_available(c)) return; __mcheck_cpu_cap_init(); if (__mcheck_cpu_apply_quirks(c) < 0) { mca_cfg.disabled = 1; return; } if (mce_gen_pool_init()) { mca_cfg.disabled = 1; pr_emerg("Couldn't allocate MCE records pool!\n"); return; } mca_cfg.initialized = 1; __mcheck_cpu_init_early(c); __mcheck_cpu_init_generic(); __mcheck_cpu_init_vendor(c); __mcheck_cpu_init_clear_banks(); __mcheck_cpu_check_banks(); __mcheck_cpu_setup_timer(); } /* * Called for each booted CPU to clear some machine checks opt-ins */ void mcheck_cpu_clear(struct cpuinfo_x86 *c) { if (mca_cfg.disabled) return; if (!mce_available(c)) return; /* * Possibly to clear general settings generic to x86 * __mcheck_cpu_clear_generic(c); */ __mcheck_cpu_clear_vendor(c); } static void __mce_disable_bank(void *arg) { int bank = *((int *)arg); __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); cmci_disable_bank(bank); } void mce_disable_bank(int bank) { if (bank >= this_cpu_read(mce_num_banks)) { pr_warn(FW_BUG "Ignoring request to disable invalid MCA bank %d.\n", bank); return; } set_bit(bank, mce_banks_ce_disabled); on_each_cpu(__mce_disable_bank, &bank, 1); } /* * mce=off Disables machine check * mce=no_cmci Disables CMCI * mce=no_lmce Disables LMCE * mce=dont_log_ce Clears corrected events silently, no log created for CEs. * mce=print_all Print all machine check logs to console * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) * monarchtimeout is how long to wait for other CPUs on machine * check, or 0 to not wait * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h and older. * mce=nobootlog Don't log MCEs from before booting. * mce=bios_cmci_threshold Don't program the CMCI threshold * mce=recovery force enable copy_mc_fragile() */ static int __init mcheck_enable(char *str) { struct mca_config *cfg = &mca_cfg; if (*str == 0) { enable_p5_mce(); return 1; } if (*str == '=') str++; if (!strcmp(str, "off")) cfg->disabled = 1; else if (!strcmp(str, "no_cmci")) cfg->cmci_disabled = true; else if (!strcmp(str, "no_lmce")) cfg->lmce_disabled = 1; else if (!strcmp(str, "dont_log_ce")) cfg->dont_log_ce = true; else if (!strcmp(str, "print_all")) cfg->print_all = true; else if (!strcmp(str, "ignore_ce")) cfg->ignore_ce = true; else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) cfg->bootlog = (str[0] == 'b'); else if (!strcmp(str, "bios_cmci_threshold")) cfg->bios_cmci_threshold = 1; else if (!strcmp(str, "recovery")) cfg->recovery = 1; else if (isdigit(str[0])) get_option(&str, &(cfg->monarch_timeout)); else { pr_info("mce argument %s ignored. Please use /sys\n", str); return 0; } return 1; } __setup("mce", mcheck_enable); int __init mcheck_init(void) { mce_register_decode_chain(&early_nb); mce_register_decode_chain(&mce_uc_nb); mce_register_decode_chain(&mce_default_nb); INIT_WORK(&mce_work, mce_gen_pool_process); init_irq_work(&mce_irq_work, mce_irq_work_cb); return 0; } /* * mce_syscore: PM support */ /* * Disable machine checks on suspend and shutdown. We can't really handle * them later. */ static void mce_disable_error_reporting(void) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); int i; for (i = 0; i < this_cpu_read(mce_num_banks); i++) { struct mce_bank *b = &mce_banks[i]; if (b->init) wrmsrl(mca_msr_reg(i, MCA_CTL), 0); } return; } static void vendor_disable_error_reporting(void) { /* * Don't clear on Intel or AMD or Hygon or Zhaoxin CPUs. Some of these * MSRs are socket-wide. Disabling them for just a single offlined CPU * is bad, since it will inhibit reporting for all shared resources on * the socket like the last level cache (LLC), the integrated memory * controller (iMC), etc. */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || boot_cpu_data.x86_vendor == X86_VENDOR_AMD || boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) return; mce_disable_error_reporting(); } static int mce_syscore_suspend(void) { vendor_disable_error_reporting(); return 0; } static void mce_syscore_shutdown(void) { vendor_disable_error_reporting(); } /* * On resume clear all MCE state. Don't want to see leftovers from the BIOS. * Only one CPU is active at this time, the others get re-added later using * CPU hotplug: */ static void mce_syscore_resume(void) { __mcheck_cpu_init_generic(); __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); __mcheck_cpu_init_clear_banks(); } static struct syscore_ops mce_syscore_ops = { .suspend = mce_syscore_suspend, .shutdown = mce_syscore_shutdown, .resume = mce_syscore_resume, }; /* * mce_device: Sysfs support */ static void mce_cpu_restart(void *data) { if (!mce_available(raw_cpu_ptr(&cpu_info))) return; __mcheck_cpu_init_generic(); __mcheck_cpu_init_clear_banks(); __mcheck_cpu_init_timer(); } /* Reinit MCEs after user configuration changes */ static void mce_restart(void) { mce_timer_delete_all(); on_each_cpu(mce_cpu_restart, NULL, 1); mce_schedule_work(); } /* Toggle features for corrected errors */ static void mce_disable_cmci(void *data) { if (!mce_available(raw_cpu_ptr(&cpu_info))) return; cmci_clear(); } static void mce_enable_ce(void *all) { if (!mce_available(raw_cpu_ptr(&cpu_info))) return; cmci_reenable(); cmci_recheck(); if (all) __mcheck_cpu_init_timer(); } static struct bus_type mce_subsys = { .name = "machinecheck", .dev_name = "machinecheck", }; DEFINE_PER_CPU(struct device *, mce_device); static inline struct mce_bank_dev *attr_to_bank(struct device_attribute *attr) { return container_of(attr, struct mce_bank_dev, attr); } static ssize_t show_bank(struct device *s, struct device_attribute *attr, char *buf) { u8 bank = attr_to_bank(attr)->bank; struct mce_bank *b; if (bank >= per_cpu(mce_num_banks, s->id)) return -EINVAL; b = &per_cpu(mce_banks_array, s->id)[bank]; if (!b->init) return -ENODEV; return sprintf(buf, "%llx\n", b->ctl); } static ssize_t set_bank(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { u8 bank = attr_to_bank(attr)->bank; struct mce_bank *b; u64 new; if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; if (bank >= per_cpu(mce_num_banks, s->id)) return -EINVAL; b = &per_cpu(mce_banks_array, s->id)[bank]; if (!b->init) return -ENODEV; b->ctl = new; mce_restart(); return size; } static ssize_t set_ignore_ce(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { u64 new; if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; mutex_lock(&mce_sysfs_mutex); if (mca_cfg.ignore_ce ^ !!new) { if (new) { /* disable ce features */ mce_timer_delete_all(); on_each_cpu(mce_disable_cmci, NULL, 1); mca_cfg.ignore_ce = true; } else { /* enable ce features */ mca_cfg.ignore_ce = false; on_each_cpu(mce_enable_ce, (void *)1, 1); } } mutex_unlock(&mce_sysfs_mutex); return size; } static ssize_t set_cmci_disabled(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { u64 new; if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; mutex_lock(&mce_sysfs_mutex); if (mca_cfg.cmci_disabled ^ !!new) { if (new) { /* disable cmci */ on_each_cpu(mce_disable_cmci, NULL, 1); mca_cfg.cmci_disabled = true; } else { /* enable cmci */ mca_cfg.cmci_disabled = false; on_each_cpu(mce_enable_ce, NULL, 1); } } mutex_unlock(&mce_sysfs_mutex); return size; } static ssize_t store_int_with_restart(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { unsigned long old_check_interval = check_interval; ssize_t ret = device_store_ulong(s, attr, buf, size); if (check_interval == old_check_interval) return ret; mutex_lock(&mce_sysfs_mutex); mce_restart(); mutex_unlock(&mce_sysfs_mutex); return ret; } static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all); static struct dev_ext_attribute dev_attr_check_interval = { __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), &check_interval }; static struct dev_ext_attribute dev_attr_ignore_ce = { __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce), &mca_cfg.ignore_ce }; static struct dev_ext_attribute dev_attr_cmci_disabled = { __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled), &mca_cfg.cmci_disabled }; static struct device_attribute *mce_device_attrs[] = { &dev_attr_check_interval.attr, #ifdef CONFIG_X86_MCELOG_LEGACY &dev_attr_trigger, #endif &dev_attr_monarch_timeout.attr, &dev_attr_dont_log_ce.attr, &dev_attr_print_all.attr, &dev_attr_ignore_ce.attr, &dev_attr_cmci_disabled.attr, NULL }; static cpumask_var_t mce_device_initialized; static void mce_device_release(struct device *dev) { kfree(dev); } /* Per CPU device init. All of the CPUs still share the same bank device: */ static int mce_device_create(unsigned int cpu) { struct device *dev; int err; int i, j; if (!mce_available(&boot_cpu_data)) return -EIO; dev = per_cpu(mce_device, cpu); if (dev) return 0; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->id = cpu; dev->bus = &mce_subsys; dev->release = &mce_device_release; err = device_register(dev); if (err) { put_device(dev); return err; } for (i = 0; mce_device_attrs[i]; i++) { err = device_create_file(dev, mce_device_attrs[i]); if (err) goto error; } for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) { err = device_create_file(dev, &mce_bank_devs[j].attr); if (err) goto error2; } cpumask_set_cpu(cpu, mce_device_initialized); per_cpu(mce_device, cpu) = dev; return 0; error2: while (--j >= 0) device_remove_file(dev, &mce_bank_devs[j].attr); error: while (--i >= 0) device_remove_file(dev, mce_device_attrs[i]); device_unregister(dev); return err; } static void mce_device_remove(unsigned int cpu) { struct device *dev = per_cpu(mce_device, cpu); int i; if (!cpumask_test_cpu(cpu, mce_device_initialized)) return; for (i = 0; mce_device_attrs[i]; i++) device_remove_file(dev, mce_device_attrs[i]); for (i = 0; i < per_cpu(mce_num_banks, cpu); i++) device_remove_file(dev, &mce_bank_devs[i].attr); device_unregister(dev); cpumask_clear_cpu(cpu, mce_device_initialized); per_cpu(mce_device, cpu) = NULL; } /* Make sure there are no machine checks on offlined CPUs. */ static void mce_disable_cpu(void) { if (!mce_available(raw_cpu_ptr(&cpu_info))) return; if (!cpuhp_tasks_frozen) cmci_clear(); vendor_disable_error_reporting(); } static void mce_reenable_cpu(void) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); int i; if (!mce_available(raw_cpu_ptr(&cpu_info))) return; if (!cpuhp_tasks_frozen) cmci_reenable(); for (i = 0; i < this_cpu_read(mce_num_banks); i++) { struct mce_bank *b = &mce_banks[i]; if (b->init) wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); } } static int mce_cpu_dead(unsigned int cpu) { mce_intel_hcpu_update(cpu); /* intentionally ignoring frozen here */ if (!cpuhp_tasks_frozen) cmci_rediscover(); return 0; } static int mce_cpu_online(unsigned int cpu) { struct timer_list *t = this_cpu_ptr(&mce_timer); int ret; mce_device_create(cpu); ret = mce_threshold_create_device(cpu); if (ret) { mce_device_remove(cpu); return ret; } mce_reenable_cpu(); mce_start_timer(t); return 0; } static int mce_cpu_pre_down(unsigned int cpu) { struct timer_list *t = this_cpu_ptr(&mce_timer); mce_disable_cpu(); del_timer_sync(t); mce_threshold_remove_device(cpu); mce_device_remove(cpu); return 0; } static __init void mce_init_banks(void) { int i; for (i = 0; i < MAX_NR_BANKS; i++) { struct mce_bank_dev *b = &mce_bank_devs[i]; struct device_attribute *a = &b->attr; b->bank = i; sysfs_attr_init(&a->attr); a->attr.name = b->attrname; snprintf(b->attrname, ATTR_LEN, "bank%d", i); a->attr.mode = 0644; a->show = show_bank; a->store = set_bank; } } /* * When running on XEN, this initcall is ordered against the XEN mcelog * initcall: * * device_initcall(xen_late_init_mcelog); * device_initcall_sync(mcheck_init_device); */ static __init int mcheck_init_device(void) { int err; /* * Check if we have a spare virtual bit. This will only become * a problem if/when we move beyond 5-level page tables. */ MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63); if (!mce_available(&boot_cpu_data)) { err = -EIO; goto err_out; } if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { err = -ENOMEM; goto err_out; } mce_init_banks(); err = subsys_system_register(&mce_subsys, NULL); if (err) goto err_out_mem; err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL, mce_cpu_dead); if (err) goto err_out_mem; /* * Invokes mce_cpu_online() on all CPUs which are online when * the state is installed. */ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online", mce_cpu_online, mce_cpu_pre_down); if (err < 0) goto err_out_online; register_syscore_ops(&mce_syscore_ops); return 0; err_out_online: cpuhp_remove_state(CPUHP_X86_MCE_DEAD); err_out_mem: free_cpumask_var(mce_device_initialized); err_out: pr_err("Unable to init MCE device (rc: %d)\n", err); return err; } device_initcall_sync(mcheck_init_device); /* * Old style boot options parsing. Only for compatibility. */ static int __init mcheck_disable(char *str) { mca_cfg.disabled = 1; return 1; } __setup("nomce", mcheck_disable); #ifdef CONFIG_DEBUG_FS struct dentry *mce_get_debugfs_dir(void) { static struct dentry *dmce; if (!dmce) dmce = debugfs_create_dir("mce", NULL); return dmce; } static void mce_reset(void) { atomic_set(&mce_fake_panicked, 0); atomic_set(&mce_executing, 0); atomic_set(&mce_callin, 0); atomic_set(&global_nwo, 0); cpumask_setall(&mce_missing_cpus); } static int fake_panic_get(void *data, u64 *val) { *val = fake_panic; return 0; } static int fake_panic_set(void *data, u64 val) { mce_reset(); fake_panic = val; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set, "%llu\n"); static void __init mcheck_debugfs_init(void) { struct dentry *dmce; dmce = mce_get_debugfs_dir(); debugfs_create_file_unsafe("fake_panic", 0444, dmce, NULL, &fake_panic_fops); } #else static void __init mcheck_debugfs_init(void) { } #endif static int __init mcheck_late_init(void) { if (mca_cfg.recovery) enable_copy_mc_fragile(); mcheck_debugfs_init(); /* * Flush out everything that has been logged during early boot, now that * everything has been initialized (workqueues, decoders, ...). */ mce_schedule_work(); return 0; } late_initcall(mcheck_late_init);
linux-master
arch/x86/kernel/cpu/mce/core.c
// SPDX-License-Identifier: GPL-2.0-only /* * MCE grading rules. * Copyright 2008, 2009 Intel Corporation. * * Author: Andi Kleen */ #include <linux/kernel.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <asm/mce.h> #include <asm/intel-family.h> #include <asm/traps.h> #include <asm/insn.h> #include <asm/insn-eval.h> #include "internal.h" /* * Grade an mce by severity. In general the most severe ones are processed * first. Since there are quite a lot of combinations test the bits in a * table-driven way. The rules are simply processed in order, first * match wins. * * Note this is only used for machine check exceptions, the corrected * errors use much simpler rules. The exceptions still check for the corrected * errors, but only to leave them alone for the CMCI handler (except for * panic situations) */ enum context { IN_KERNEL = 1, IN_USER = 2, IN_KERNEL_RECOV = 3 }; enum ser { SER_REQUIRED = 1, NO_SER = 2 }; enum exception { EXCP_CONTEXT = 1, NO_EXCP = 2 }; static struct severity { u64 mask; u64 result; unsigned char sev; unsigned char mcgmask; unsigned char mcgres; unsigned char ser; unsigned char context; unsigned char excp; unsigned char covered; unsigned char cpu_model; unsigned char cpu_minstepping; unsigned char bank_lo, bank_hi; char *msg; } severities[] = { #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c } #define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h #define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s #define KERNEL .context = IN_KERNEL #define USER .context = IN_USER #define KERNEL_RECOV .context = IN_KERNEL_RECOV #define SER .ser = SER_REQUIRED #define NOSER .ser = NO_SER #define EXCP .excp = EXCP_CONTEXT #define NOEXCP .excp = NO_EXCP #define BITCLR(x) .mask = x, .result = 0 #define BITSET(x) .mask = x, .result = x #define MCGMASK(x, y) .mcgmask = x, .mcgres = y #define MASK(x, y) .mask = x, .result = y #define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S) #define MCI_UC_AR (MCI_STATUS_UC|MCI_STATUS_AR) #define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR) #define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV) MCESEV( NO, "Invalid", BITCLR(MCI_STATUS_VAL) ), MCESEV( NO, "Not enabled", EXCP, BITCLR(MCI_STATUS_EN) ), MCESEV( PANIC, "Processor context corrupt", BITSET(MCI_STATUS_PCC) ), /* When MCIP is not set something is very confused */ MCESEV( PANIC, "MCIP not set in MCA handler", EXCP, MCGMASK(MCG_STATUS_MCIP, 0) ), /* Neither return not error IP -- no chance to recover -> PANIC */ MCESEV( PANIC, "Neither restart nor error IP", EXCP, MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0) ), MCESEV( PANIC, "In kernel and no restart IP", EXCP, KERNEL, MCGMASK(MCG_STATUS_RIPV, 0) ), MCESEV( PANIC, "In kernel and no restart IP", EXCP, KERNEL_RECOV, MCGMASK(MCG_STATUS_RIPV, 0) ), MCESEV( KEEP, "Corrected error", NOSER, BITCLR(MCI_STATUS_UC) ), /* * known AO MCACODs reported via MCE or CMC: * * SRAO could be signaled either via a machine check exception or * CMCI with the corresponding bit S 1 or 0. So we don't need to * check bit S for SRAO. */ MCESEV( AO, "Action optional: memory scrubbing error", SER, MASK(MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB) ), MCESEV( AO, "Action optional: last level cache writeback error", SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB) ), /* * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured * to report uncorrected errors using CMCI with a special signature. * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported * in one of the memory controller banks. * Set severity to "AO" for same action as normal patrol scrub error. */ MCESEV( AO, "Uncorrected Patrol Scrub Error", SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0), MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18) ), /* ignore OVER for UCNA */ MCESEV( UCNA, "Uncorrected no action required", SER, MASK(MCI_UC_SAR, MCI_STATUS_UC) ), MCESEV( PANIC, "Illegal combination (UCNA with AR=1)", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR) ), MCESEV( KEEP, "Non signaled machine check", SER, BITCLR(MCI_STATUS_S) ), MCESEV( PANIC, "Action required with lost events", SER, BITSET(MCI_STATUS_OVER|MCI_UC_SAR) ), /* known AR MCACODs: */ #ifdef CONFIG_MEMORY_FAILURE MCESEV( KEEP, "Action required but unaffected thread is continuable", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR, MCI_UC_SAR|MCI_ADDR), MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, MCG_STATUS_RIPV) ), MCESEV( AR, "Action required: data load in error recoverable area of kernel", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), KERNEL_RECOV ), MCESEV( AR, "Action required: data load error in a user process", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), USER ), MCESEV( AR, "Action required: instruction fetch error in a user process", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), USER ), MCESEV( PANIC, "Data load in unrecoverable area of kernel", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), KERNEL ), MCESEV( PANIC, "Instruction fetch error in kernel", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), KERNEL ), #endif MCESEV( PANIC, "Action required: unknown MCACOD", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR) ), MCESEV( SOME, "Action optional: unknown MCACOD", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S) ), MCESEV( SOME, "Action optional with lost events", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_S) ), MCESEV( PANIC, "Overflowed uncorrected", BITSET(MCI_STATUS_OVER|MCI_STATUS_UC) ), MCESEV( PANIC, "Uncorrected in kernel", BITSET(MCI_STATUS_UC), KERNEL ), MCESEV( UC, "Uncorrected", BITSET(MCI_STATUS_UC) ), MCESEV( SOME, "No match", BITSET(0) ) /* always matches. keep at end */ }; #define mc_recoverable(mcg) (((mcg) & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) == \ (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) static bool is_copy_from_user(struct pt_regs *regs) { u8 insn_buf[MAX_INSN_SIZE]; unsigned long addr; struct insn insn; int ret; if (!regs) return false; if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, MAX_INSN_SIZE)) return false; ret = insn_decode_kernel(&insn, insn_buf); if (ret < 0) return false; switch (insn.opcode.value) { /* MOV mem,reg */ case 0x8A: case 0x8B: /* MOVZ mem,reg */ case 0xB60F: case 0xB70F: addr = (unsigned long)insn_get_addr_ref(&insn, regs); break; /* REP MOVS */ case 0xA4: case 0xA5: addr = regs->si; break; default: return false; } if (fault_in_kernel_space(addr)) return false; current->mce_vaddr = (void __user *)addr; return true; } /* * If mcgstatus indicated that ip/cs on the stack were * no good, then "m->cs" will be zero and we will have * to assume the worst case (IN_KERNEL) as we actually * have no idea what we were executing when the machine * check hit. * If we do have a good "m->cs" (or a faked one in the * case we were executing in VM86 mode) we can use it to * distinguish an exception taken in user from from one * taken in the kernel. */ static noinstr int error_context(struct mce *m, struct pt_regs *regs) { int fixup_type; bool copy_user; if ((m->cs & 3) == 3) return IN_USER; if (!mc_recoverable(m->mcgstatus)) return IN_KERNEL; /* Allow instrumentation around external facilities usage. */ instrumentation_begin(); fixup_type = ex_get_fixup_type(m->ip); copy_user = is_copy_from_user(regs); instrumentation_end(); switch (fixup_type) { case EX_TYPE_UACCESS: case EX_TYPE_COPY: if (!copy_user) return IN_KERNEL; m->kflags |= MCE_IN_KERNEL_COPYIN; fallthrough; case EX_TYPE_FAULT_MCE_SAFE: case EX_TYPE_DEFAULT_MCE_SAFE: m->kflags |= MCE_IN_KERNEL_RECOV; return IN_KERNEL_RECOV; default: return IN_KERNEL; } } /* See AMD PPR(s) section Machine Check Error Handling. */ static noinstr int mce_severity_amd(struct mce *m, struct pt_regs *regs, char **msg, bool is_excp) { char *panic_msg = NULL; int ret; /* * Default return value: Action required, the error must be handled * immediately. */ ret = MCE_AR_SEVERITY; /* Processor Context Corrupt, no need to fumble too much, die! */ if (m->status & MCI_STATUS_PCC) { panic_msg = "Processor Context Corrupt"; ret = MCE_PANIC_SEVERITY; goto out; } if (m->status & MCI_STATUS_DEFERRED) { ret = MCE_DEFERRED_SEVERITY; goto out; } /* * If the UC bit is not set, the system either corrected or deferred * the error. No action will be required after logging the error. */ if (!(m->status & MCI_STATUS_UC)) { ret = MCE_KEEP_SEVERITY; goto out; } /* * On MCA overflow, without the MCA overflow recovery feature the * system will not be able to recover, panic. */ if ((m->status & MCI_STATUS_OVER) && !mce_flags.overflow_recov) { panic_msg = "Overflowed uncorrected error without MCA Overflow Recovery"; ret = MCE_PANIC_SEVERITY; goto out; } if (!mce_flags.succor) { panic_msg = "Uncorrected error without MCA Recovery"; ret = MCE_PANIC_SEVERITY; goto out; } if (error_context(m, regs) == IN_KERNEL) { panic_msg = "Uncorrected unrecoverable error in kernel context"; ret = MCE_PANIC_SEVERITY; } out: if (msg && panic_msg) *msg = panic_msg; return ret; } static noinstr int mce_severity_intel(struct mce *m, struct pt_regs *regs, char **msg, bool is_excp) { enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP); enum context ctx = error_context(m, regs); struct severity *s; for (s = severities;; s++) { if ((m->status & s->mask) != s->result) continue; if ((m->mcgstatus & s->mcgmask) != s->mcgres) continue; if (s->ser == SER_REQUIRED && !mca_cfg.ser) continue; if (s->ser == NO_SER && mca_cfg.ser) continue; if (s->context && ctx != s->context) continue; if (s->excp && excp != s->excp) continue; if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model) continue; if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping) continue; if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi)) continue; if (msg) *msg = s->msg; s->covered = 1; return s->sev; } } int noinstr mce_severity(struct mce *m, struct pt_regs *regs, char **msg, bool is_excp) { if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) return mce_severity_amd(m, regs, msg, is_excp); else return mce_severity_intel(m, regs, msg, is_excp); } #ifdef CONFIG_DEBUG_FS static void *s_start(struct seq_file *f, loff_t *pos) { if (*pos >= ARRAY_SIZE(severities)) return NULL; return &severities[*pos]; } static void *s_next(struct seq_file *f, void *data, loff_t *pos) { if (++(*pos) >= ARRAY_SIZE(severities)) return NULL; return &severities[*pos]; } static void s_stop(struct seq_file *f, void *data) { } static int s_show(struct seq_file *f, void *data) { struct severity *ser = data; seq_printf(f, "%d\t%s\n", ser->covered, ser->msg); return 0; } static const struct seq_operations severities_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static int severities_coverage_open(struct inode *inode, struct file *file) { return seq_open(file, &severities_seq_ops); } static ssize_t severities_coverage_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { int i; for (i = 0; i < ARRAY_SIZE(severities); i++) severities[i].covered = 0; return count; } static const struct file_operations severities_coverage_fops = { .open = severities_coverage_open, .release = seq_release, .read = seq_read, .write = severities_coverage_write, .llseek = seq_lseek, }; static int __init severities_debugfs_init(void) { struct dentry *dmce; dmce = mce_get_debugfs_dir(); debugfs_create_file("severities-coverage", 0444, dmce, NULL, &severities_coverage_fops); return 0; } late_initcall(severities_debugfs_init); #endif /* CONFIG_DEBUG_FS */
linux-master
arch/x86/kernel/cpu/mce/severity.c
// SPDX-License-Identifier: GPL-2.0-only /* * /dev/mcelog driver * * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. * Rest from unknown author(s). * 2004 Andi Kleen. Rewrote most of it. * Copyright 2008 Intel Corporation * Author: Andi Kleen */ #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/poll.h> #include "internal.h" static BLOCKING_NOTIFIER_HEAD(mce_injector_chain); static DEFINE_MUTEX(mce_chrdev_read_mutex); static char mce_helper[128]; static char *mce_helper_argv[2] = { mce_helper, NULL }; /* * Lockless MCE logging infrastructure. * This avoids deadlocks on printk locks without having to break locks. Also * separate MCEs from kernel messages to avoid bogus bug reports. */ static struct mce_log_buffer *mcelog; static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait); static int dev_mce_log(struct notifier_block *nb, unsigned long val, void *data) { struct mce *mce = (struct mce *)data; unsigned int entry; if (mce->kflags & MCE_HANDLED_CEC) return NOTIFY_DONE; mutex_lock(&mce_chrdev_read_mutex); entry = mcelog->next; /* * When the buffer fills up discard new entries. Assume that the * earlier errors are the more interesting ones: */ if (entry >= mcelog->len) { set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog->flags); goto unlock; } mcelog->next = entry + 1; memcpy(mcelog->entry + entry, mce, sizeof(struct mce)); mcelog->entry[entry].finished = 1; mcelog->entry[entry].kflags = 0; /* wake processes polling /dev/mcelog */ wake_up_interruptible(&mce_chrdev_wait); unlock: mutex_unlock(&mce_chrdev_read_mutex); if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) mce->kflags |= MCE_HANDLED_MCELOG; return NOTIFY_OK; } static struct notifier_block dev_mcelog_nb = { .notifier_call = dev_mce_log, .priority = MCE_PRIO_MCELOG, }; static void mce_do_trigger(struct work_struct *work) { call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT); } static DECLARE_WORK(mce_trigger_work, mce_do_trigger); void mce_work_trigger(void) { if (mce_helper[0]) schedule_work(&mce_trigger_work); } static ssize_t show_trigger(struct device *s, struct device_attribute *attr, char *buf) { strcpy(buf, mce_helper); strcat(buf, "\n"); return strlen(mce_helper) + 1; } static ssize_t set_trigger(struct device *s, struct device_attribute *attr, const char *buf, size_t siz) { char *p; strscpy(mce_helper, buf, sizeof(mce_helper)); p = strchr(mce_helper, '\n'); if (p) *p = 0; return strlen(mce_helper) + !!p; } DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger); /* * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log. */ static DEFINE_SPINLOCK(mce_chrdev_state_lock); static int mce_chrdev_open_count; /* #times opened */ static int mce_chrdev_open_exclu; /* already open exclusive? */ static int mce_chrdev_open(struct inode *inode, struct file *file) { spin_lock(&mce_chrdev_state_lock); if (mce_chrdev_open_exclu || (mce_chrdev_open_count && (file->f_flags & O_EXCL))) { spin_unlock(&mce_chrdev_state_lock); return -EBUSY; } if (file->f_flags & O_EXCL) mce_chrdev_open_exclu = 1; mce_chrdev_open_count++; spin_unlock(&mce_chrdev_state_lock); return nonseekable_open(inode, file); } static int mce_chrdev_release(struct inode *inode, struct file *file) { spin_lock(&mce_chrdev_state_lock); mce_chrdev_open_count--; mce_chrdev_open_exclu = 0; spin_unlock(&mce_chrdev_state_lock); return 0; } static int mce_apei_read_done; /* Collect MCE record of previous boot in persistent storage via APEI ERST. */ static int __mce_read_apei(char __user **ubuf, size_t usize) { int rc; u64 record_id; struct mce m; if (usize < sizeof(struct mce)) return -EINVAL; rc = apei_read_mce(&m, &record_id); /* Error or no more MCE record */ if (rc <= 0) { mce_apei_read_done = 1; /* * When ERST is disabled, mce_chrdev_read() should return * "no record" instead of "no device." */ if (rc == -ENODEV) return 0; return rc; } rc = -EFAULT; if (copy_to_user(*ubuf, &m, sizeof(struct mce))) return rc; /* * In fact, we should have cleared the record after that has * been flushed to the disk or sent to network in * /sbin/mcelog, but we have no interface to support that now, * so just clear it to avoid duplication. */ rc = apei_clear_mce(record_id); if (rc) { mce_apei_read_done = 1; return rc; } *ubuf += sizeof(struct mce); return 0; } static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off) { char __user *buf = ubuf; unsigned next; int i, err; mutex_lock(&mce_chrdev_read_mutex); if (!mce_apei_read_done) { err = __mce_read_apei(&buf, usize); if (err || buf != ubuf) goto out; } /* Only supports full reads right now */ err = -EINVAL; if (*off != 0 || usize < mcelog->len * sizeof(struct mce)) goto out; next = mcelog->next; err = 0; for (i = 0; i < next; i++) { struct mce *m = &mcelog->entry[i]; err |= copy_to_user(buf, m, sizeof(*m)); buf += sizeof(*m); } memset(mcelog->entry, 0, next * sizeof(struct mce)); mcelog->next = 0; if (err) err = -EFAULT; out: mutex_unlock(&mce_chrdev_read_mutex); return err ? err : buf - ubuf; } static __poll_t mce_chrdev_poll(struct file *file, poll_table *wait) { poll_wait(file, &mce_chrdev_wait, wait); if (READ_ONCE(mcelog->next)) return EPOLLIN | EPOLLRDNORM; if (!mce_apei_read_done && apei_check_mce()) return EPOLLIN | EPOLLRDNORM; return 0; } static long mce_chrdev_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { int __user *p = (int __user *)arg; if (!capable(CAP_SYS_ADMIN)) return -EPERM; switch (cmd) { case MCE_GET_RECORD_LEN: return put_user(sizeof(struct mce), p); case MCE_GET_LOG_LEN: return put_user(mcelog->len, p); case MCE_GETCLEAR_FLAGS: { unsigned flags; do { flags = mcelog->flags; } while (cmpxchg(&mcelog->flags, flags, 0) != flags); return put_user(flags, p); } default: return -ENOTTY; } } void mce_register_injector_chain(struct notifier_block *nb) { blocking_notifier_chain_register(&mce_injector_chain, nb); } EXPORT_SYMBOL_GPL(mce_register_injector_chain); void mce_unregister_injector_chain(struct notifier_block *nb) { blocking_notifier_chain_unregister(&mce_injector_chain, nb); } EXPORT_SYMBOL_GPL(mce_unregister_injector_chain); static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf, size_t usize, loff_t *off) { struct mce m; if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* * There are some cases where real MSR reads could slip * through. */ if (!boot_cpu_has(X86_FEATURE_MCE) || !boot_cpu_has(X86_FEATURE_MCA)) return -EIO; if ((unsigned long)usize > sizeof(struct mce)) usize = sizeof(struct mce); if (copy_from_user(&m, ubuf, usize)) return -EFAULT; if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu)) return -EINVAL; /* * Need to give user space some time to set everything up, * so do it a jiffie or two later everywhere. */ schedule_timeout(2); blocking_notifier_call_chain(&mce_injector_chain, 0, &m); return usize; } static const struct file_operations mce_chrdev_ops = { .open = mce_chrdev_open, .release = mce_chrdev_release, .read = mce_chrdev_read, .write = mce_chrdev_write, .poll = mce_chrdev_poll, .unlocked_ioctl = mce_chrdev_ioctl, .compat_ioctl = compat_ptr_ioctl, .llseek = no_llseek, }; static struct miscdevice mce_chrdev_device = { MISC_MCELOG_MINOR, "mcelog", &mce_chrdev_ops, }; static __init int dev_mcelog_init_device(void) { int mce_log_len; int err; mce_log_len = max(MCE_LOG_MIN_LEN, num_online_cpus()); mcelog = kzalloc(struct_size(mcelog, entry, mce_log_len), GFP_KERNEL); if (!mcelog) return -ENOMEM; memcpy(mcelog->signature, MCE_LOG_SIGNATURE, sizeof(mcelog->signature)); mcelog->len = mce_log_len; mcelog->recordlen = sizeof(struct mce); /* register character device /dev/mcelog */ err = misc_register(&mce_chrdev_device); if (err) { if (err == -EBUSY) /* Xen dom0 might have registered the device already. */ pr_info("Unable to init device /dev/mcelog, already registered"); else pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err); kfree(mcelog); return err; } mce_register_decode_chain(&dev_mcelog_nb); return 0; } device_initcall_sync(dev_mcelog_init_device);
linux-master
arch/x86/kernel/cpu/mce/dev-mcelog.c
// SPDX-License-Identifier: GPL-2.0 /* * Common corrected MCE threshold handler code: */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <asm/irq_vectors.h> #include <asm/traps.h> #include <asm/apic.h> #include <asm/mce.h> #include <asm/trace/irq_vectors.h> #include "internal.h" static void default_threshold_interrupt(void) { pr_err("Unexpected threshold interrupt at vector %x\n", THRESHOLD_APIC_VECTOR); } void (*mce_threshold_vector)(void) = default_threshold_interrupt; DEFINE_IDTENTRY_SYSVEC(sysvec_threshold) { trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); inc_irq_stat(irq_threshold_count); mce_threshold_vector(); trace_threshold_apic_exit(THRESHOLD_APIC_VECTOR); apic_eoi(); }
linux-master
arch/x86/kernel/cpu/mce/threshold.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/init.h> #include <linux/io.h> #include <linux/mm.h> #include <asm/processor-cyrix.h> #include <asm/processor-flags.h> #include <asm/mtrr.h> #include <asm/msr.h> #include "mtrr.h" static void cyrix_get_arr(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type * type) { unsigned char arr, ccr3, rcr, shift; unsigned long flags; arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ local_irq_save(flags); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ ((unsigned char *)base)[3] = getCx86(arr); ((unsigned char *)base)[2] = getCx86(arr + 1); ((unsigned char *)base)[1] = getCx86(arr + 2); rcr = getCx86(CX86_RCR_BASE + reg); setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ local_irq_restore(flags); shift = ((unsigned char *) base)[1] & 0x0f; *base >>= PAGE_SHIFT; /* * Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 * Note: shift==0xf means 4G, this is unsupported. */ if (shift) *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1); else *size = 0; /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */ if (reg < 7) { switch (rcr) { case 1: *type = MTRR_TYPE_UNCACHABLE; break; case 8: *type = MTRR_TYPE_WRBACK; break; case 9: *type = MTRR_TYPE_WRCOMB; break; case 24: default: *type = MTRR_TYPE_WRTHROUGH; break; } } else { switch (rcr) { case 0: *type = MTRR_TYPE_UNCACHABLE; break; case 8: *type = MTRR_TYPE_WRCOMB; break; case 9: *type = MTRR_TYPE_WRBACK; break; case 25: default: *type = MTRR_TYPE_WRTHROUGH; break; } } } /* * cyrix_get_free_region - get a free ARR. * * @base: the starting (base) address of the region. * @size: the size (in bytes) of the region. * * Returns: the index of the region on success, else -1 on error. */ static int cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) { unsigned long lbase, lsize; mtrr_type ltype; int i; switch (replace_reg) { case 7: if (size < 0x40) break; fallthrough; case 6: case 5: case 4: return replace_reg; case 3: case 2: case 1: case 0: return replace_reg; } /* If we are to set up a region >32M then look at ARR7 immediately */ if (size > 0x2000) { cyrix_get_arr(7, &lbase, &lsize, &ltype); if (lsize == 0) return 7; /* Else try ARR0-ARR6 first */ } else { for (i = 0; i < 7; i++) { cyrix_get_arr(i, &lbase, &lsize, &ltype); if (lsize == 0) return i; } /* * ARR0-ARR6 isn't free * try ARR7 but its size must be at least 256K */ cyrix_get_arr(i, &lbase, &lsize, &ltype); if ((lsize == 0) && (size >= 0x40)) return i; } return -ENOSPC; } static u32 cr4, ccr3; static void prepare_set(void) { u32 cr0; /* Save value of CR4 and clear Page Global Enable (bit 7) */ if (boot_cpu_has(X86_FEATURE_PGE)) { cr4 = __read_cr4(); __write_cr4(cr4 & ~X86_CR4_PGE); } /* * Disable and flush caches. * Note that wbinvd flushes the TLBs as a side-effect */ cr0 = read_cr0() | X86_CR0_CD; wbinvd(); write_cr0(cr0); wbinvd(); /* Cyrix ARRs - everything else was excluded at the top */ ccr3 = getCx86(CX86_CCR3); /* Cyrix ARRs - everything else was excluded at the top */ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); } static void post_set(void) { /* Flush caches and TLBs */ wbinvd(); /* Cyrix ARRs - everything else was excluded at the top */ setCx86(CX86_CCR3, ccr3); /* Enable caches */ write_cr0(read_cr0() & ~X86_CR0_CD); /* Restore value of CR4 */ if (boot_cpu_has(X86_FEATURE_PGE)) __write_cr4(cr4); } static void cyrix_set_arr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) { unsigned char arr, arr_type, arr_size; arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */ if (reg >= 7) size >>= 6; size &= 0x7fff; /* make sure arr_size <= 14 */ for (arr_size = 0; size; arr_size++, size >>= 1) ; if (reg < 7) { switch (type) { case MTRR_TYPE_UNCACHABLE: arr_type = 1; break; case MTRR_TYPE_WRCOMB: arr_type = 9; break; case MTRR_TYPE_WRTHROUGH: arr_type = 24; break; default: arr_type = 8; break; } } else { switch (type) { case MTRR_TYPE_UNCACHABLE: arr_type = 0; break; case MTRR_TYPE_WRCOMB: arr_type = 8; break; case MTRR_TYPE_WRTHROUGH: arr_type = 25; break; default: arr_type = 9; break; } } prepare_set(); base <<= PAGE_SHIFT; setCx86(arr + 0, ((unsigned char *)&base)[3]); setCx86(arr + 1, ((unsigned char *)&base)[2]); setCx86(arr + 2, (((unsigned char *)&base)[1]) | arr_size); setCx86(CX86_RCR_BASE + reg, arr_type); post_set(); } const struct mtrr_ops cyrix_mtrr_ops = { .var_regs = 8, .set = cyrix_set_arr, .get = cyrix_get_arr, .get_free_region = cyrix_get_free_region, .validate_add_page = generic_validate_add_page, .have_wrcomb = positive_have_wrcomb, };
linux-master
arch/x86/kernel/cpu/mtrr/cyrix.c
/* Generic MTRR (Memory Type Range Register) driver. Copyright (C) 1997-2000 Richard Gooch Copyright (c) 2002 Patrick Mochel This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. Richard Gooch may be reached by email at [email protected] The postal address is: Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. Source: "Pentium Pro Family Developer's Manual, Volume 3: Operating System Writer's Guide" (Intel document number 242692), section 11.11.7 This was cleaned and made readable by Patrick Mochel <[email protected]> on 6-7 March 2002. Source: Intel Architecture Software Developers Manual, Volume 3: System Programming Guide; Section 9.11. (1997 edition - PPro). */ #include <linux/types.h> /* FIXME: kvm_para.h needs this */ #include <linux/stop_machine.h> #include <linux/kvm_para.h> #include <linux/uaccess.h> #include <linux/export.h> #include <linux/mutex.h> #include <linux/init.h> #include <linux/sort.h> #include <linux/cpu.h> #include <linux/pci.h> #include <linux/smp.h> #include <linux/syscore_ops.h> #include <linux/rcupdate.h> #include <asm/cacheinfo.h> #include <asm/cpufeature.h> #include <asm/e820/api.h> #include <asm/mtrr.h> #include <asm/msr.h> #include <asm/memtype.h> #include "mtrr.h" /* arch_phys_wc_add returns an MTRR register index plus this offset. */ #define MTRR_TO_PHYS_WC_OFFSET 1000 u32 num_var_ranges; unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; DEFINE_MUTEX(mtrr_mutex); const struct mtrr_ops *mtrr_if; /* Returns non-zero if we have the write-combining memory type */ static int have_wrcomb(void) { struct pci_dev *dev; dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL); if (dev != NULL) { /* * ServerWorks LE chipsets < rev 6 have problems with * write-combining. Don't allow it and leave room for other * chipsets to be tagged */ if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && dev->device == PCI_DEVICE_ID_SERVERWORKS_LE && dev->revision <= 5) { pr_info("Serverworks LE rev < 6 detected. Write-combining disabled.\n"); pci_dev_put(dev); return 0; } /* * Intel 450NX errata # 23. Non ascending cacheline evictions to * write combining memory may resulting in data corruption */ if (dev->vendor == PCI_VENDOR_ID_INTEL && dev->device == PCI_DEVICE_ID_INTEL_82451NX) { pr_info("Intel 450NX MMC detected. Write-combining disabled.\n"); pci_dev_put(dev); return 0; } pci_dev_put(dev); } return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0; } static void __init init_table(void) { int i, max; max = num_var_ranges; for (i = 0; i < max; i++) mtrr_usage_table[i] = 1; } struct set_mtrr_data { unsigned long smp_base; unsigned long smp_size; unsigned int smp_reg; mtrr_type smp_type; }; /** * mtrr_rendezvous_handler - Work done in the synchronization handler. Executed * by all the CPUs. * @info: pointer to mtrr configuration data * * Returns nothing. */ static int mtrr_rendezvous_handler(void *info) { struct set_mtrr_data *data = info; mtrr_if->set(data->smp_reg, data->smp_base, data->smp_size, data->smp_type); return 0; } static inline int types_compatible(mtrr_type type1, mtrr_type type2) { return type1 == MTRR_TYPE_UNCACHABLE || type2 == MTRR_TYPE_UNCACHABLE || (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH); } /** * set_mtrr - update mtrrs on all processors * @reg: mtrr in question * @base: mtrr base * @size: mtrr size * @type: mtrr type * * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: * * 1. Queue work to do the following on all processors: * 2. Disable Interrupts * 3. Wait for all procs to do so * 4. Enter no-fill cache mode * 5. Flush caches * 6. Clear PGE bit * 7. Flush all TLBs * 8. Disable all range registers * 9. Update the MTRRs * 10. Enable all range registers * 11. Flush all TLBs and caches again * 12. Enter normal cache mode and reenable caching * 13. Set PGE * 14. Wait for buddies to catch up * 15. Enable interrupts. * * What does that mean for us? Well, stop_machine() will ensure that * the rendezvous handler is started on each CPU. And in lockstep they * do the state transition of disabling interrupts, updating MTRR's * (the CPU vendors may each do it differently, so we call mtrr_if->set() * callback and let them take care of it.) and enabling interrupts. * * Note that the mechanism is the same for UP systems, too; all the SMP stuff * becomes nops. */ static void set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) { struct set_mtrr_data data = { .smp_reg = reg, .smp_base = base, .smp_size = size, .smp_type = type }; stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask); generic_rebuild_map(); } /** * mtrr_add_page - Add a memory type region * @base: Physical base address of region in pages (in units of 4 kB!) * @size: Physical size of region in pages (4 kB) * @type: Type of MTRR desired * @increment: If this is true do usage counting on the region * * Memory type region registers control the caching on newer Intel and * non Intel processors. This function allows drivers to request an * MTRR is added. The details and hardware specifics of each processor's * implementation are hidden from the caller, but nevertheless the * caller should expect to need to provide a power of two size on an * equivalent power of two boundary. * * If the region cannot be added either because all regions are in use * or the CPU cannot support it a negative value is returned. On success * the register number for this entry is returned, but should be treated * as a cookie only. * * On a multiprocessor machine the changes are made to all processors. * This is required on x86 by the Intel processors. * * The available types are * * %MTRR_TYPE_UNCACHABLE - No caching * * %MTRR_TYPE_WRBACK - Write data back in bursts whenever * * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts * * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes * * BUGS: Needs a quiet flag for the cases where drivers do not mind * failures and do not wish system log messages to be sent. */ int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, bool increment) { unsigned long lbase, lsize; int i, replace, error; mtrr_type ltype; if (!mtrr_enabled()) return -ENXIO; error = mtrr_if->validate_add_page(base, size, type); if (error) return error; if (type >= MTRR_NUM_TYPES) { pr_warn("type: %u invalid\n", type); return -EINVAL; } /* If the type is WC, check that this processor supports it */ if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { pr_warn("your processor doesn't support write-combining\n"); return -ENOSYS; } if (!size) { pr_warn("zero sized request\n"); return -EINVAL; } if ((base | (base + size - 1)) >> (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) { pr_warn("base or size exceeds the MTRR width\n"); return -EINVAL; } error = -EINVAL; replace = -1; /* No CPU hotplug when we change MTRR entries */ cpus_read_lock(); /* Search for existing MTRR */ mutex_lock(&mtrr_mutex); for (i = 0; i < num_var_ranges; ++i) { mtrr_if->get(i, &lbase, &lsize, &ltype); if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) continue; /* * At this point we know there is some kind of * overlap/enclosure */ if (base < lbase || base + size - 1 > lbase + lsize - 1) { if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { /* New region encloses an existing region */ if (type == ltype) { replace = replace == -1 ? i : -2; continue; } else if (types_compatible(type, ltype)) continue; } pr_warn("0x%lx000,0x%lx000 overlaps existing 0x%lx000,0x%lx000\n", base, size, lbase, lsize); goto out; } /* New region is enclosed by an existing region */ if (ltype != type) { if (types_compatible(type, ltype)) continue; pr_warn("type mismatch for %lx000,%lx000 old: %s new: %s\n", base, size, mtrr_attrib_to_str(ltype), mtrr_attrib_to_str(type)); goto out; } if (increment) ++mtrr_usage_table[i]; error = i; goto out; } /* Search for an empty MTRR */ i = mtrr_if->get_free_region(base, size, replace); if (i >= 0) { set_mtrr(i, base, size, type); if (likely(replace < 0)) { mtrr_usage_table[i] = 1; } else { mtrr_usage_table[i] = mtrr_usage_table[replace]; if (increment) mtrr_usage_table[i]++; if (unlikely(replace != i)) { set_mtrr(replace, 0, 0, 0); mtrr_usage_table[replace] = 0; } } } else { pr_info("no more MTRRs available\n"); } error = i; out: mutex_unlock(&mtrr_mutex); cpus_read_unlock(); return error; } static int mtrr_check(unsigned long base, unsigned long size) { if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { pr_warn("size and base must be multiples of 4 kiB\n"); Dprintk("size: 0x%lx base: 0x%lx\n", size, base); dump_stack(); return -1; } return 0; } /** * mtrr_add - Add a memory type region * @base: Physical base address of region * @size: Physical size of region * @type: Type of MTRR desired * @increment: If this is true do usage counting on the region * * Memory type region registers control the caching on newer Intel and * non Intel processors. This function allows drivers to request an * MTRR is added. The details and hardware specifics of each processor's * implementation are hidden from the caller, but nevertheless the * caller should expect to need to provide a power of two size on an * equivalent power of two boundary. * * If the region cannot be added either because all regions are in use * or the CPU cannot support it a negative value is returned. On success * the register number for this entry is returned, but should be treated * as a cookie only. * * On a multiprocessor machine the changes are made to all processors. * This is required on x86 by the Intel processors. * * The available types are * * %MTRR_TYPE_UNCACHABLE - No caching * * %MTRR_TYPE_WRBACK - Write data back in bursts whenever * * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts * * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes * * BUGS: Needs a quiet flag for the cases where drivers do not mind * failures and do not wish system log messages to be sent. */ int mtrr_add(unsigned long base, unsigned long size, unsigned int type, bool increment) { if (!mtrr_enabled()) return -ENODEV; if (mtrr_check(base, size)) return -EINVAL; return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, increment); } /** * mtrr_del_page - delete a memory type region * @reg: Register returned by mtrr_add * @base: Physical base address * @size: Size of region * * If register is supplied then base and size are ignored. This is * how drivers should call it. * * Releases an MTRR region. If the usage count drops to zero the * register is freed and the region returns to default state. * On success the register is returned, on failure a negative error * code. */ int mtrr_del_page(int reg, unsigned long base, unsigned long size) { int i, max; mtrr_type ltype; unsigned long lbase, lsize; int error = -EINVAL; if (!mtrr_enabled()) return -ENODEV; max = num_var_ranges; /* No CPU hotplug when we change MTRR entries */ cpus_read_lock(); mutex_lock(&mtrr_mutex); if (reg < 0) { /* Search for existing MTRR */ for (i = 0; i < max; ++i) { mtrr_if->get(i, &lbase, &lsize, &ltype); if (lbase == base && lsize == size) { reg = i; break; } } if (reg < 0) { Dprintk("no MTRR for %lx000,%lx000 found\n", base, size); goto out; } } if (reg >= max) { pr_warn("register: %d too big\n", reg); goto out; } mtrr_if->get(reg, &lbase, &lsize, &ltype); if (lsize < 1) { pr_warn("MTRR %d not used\n", reg); goto out; } if (mtrr_usage_table[reg] < 1) { pr_warn("reg: %d has count=0\n", reg); goto out; } if (--mtrr_usage_table[reg] < 1) set_mtrr(reg, 0, 0, 0); error = reg; out: mutex_unlock(&mtrr_mutex); cpus_read_unlock(); return error; } /** * mtrr_del - delete a memory type region * @reg: Register returned by mtrr_add * @base: Physical base address * @size: Size of region * * If register is supplied then base and size are ignored. This is * how drivers should call it. * * Releases an MTRR region. If the usage count drops to zero the * register is freed and the region returns to default state. * On success the register is returned, on failure a negative error * code. */ int mtrr_del(int reg, unsigned long base, unsigned long size) { if (!mtrr_enabled()) return -ENODEV; if (mtrr_check(base, size)) return -EINVAL; return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); } /** * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable * @base: Physical base address * @size: Size of region * * If PAT is available, this does nothing. If PAT is unavailable, it * attempts to add a WC MTRR covering size bytes starting at base and * logs an error if this fails. * * The called should provide a power of two size on an equivalent * power of two boundary. * * Drivers must store the return value to pass to mtrr_del_wc_if_needed, * but drivers should not try to interpret that return value. */ int arch_phys_wc_add(unsigned long base, unsigned long size) { int ret; if (pat_enabled() || !mtrr_enabled()) return 0; /* Success! (We don't need to do anything.) */ ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true); if (ret < 0) { pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.", (void *)base, (void *)(base + size - 1)); return ret; } return ret + MTRR_TO_PHYS_WC_OFFSET; } EXPORT_SYMBOL(arch_phys_wc_add); /* * arch_phys_wc_del - undoes arch_phys_wc_add * @handle: Return value from arch_phys_wc_add * * This cleans up after mtrr_add_wc_if_needed. * * The API guarantees that mtrr_del_wc_if_needed(error code) and * mtrr_del_wc_if_needed(0) do nothing. */ void arch_phys_wc_del(int handle) { if (handle >= 1) { WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET); mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0); } } EXPORT_SYMBOL(arch_phys_wc_del); /* * arch_phys_wc_index - translates arch_phys_wc_add's return value * @handle: Return value from arch_phys_wc_add * * This will turn the return value from arch_phys_wc_add into an mtrr * index suitable for debugging. * * Note: There is no legitimate use for this function, except possibly * in printk line. Alas there is an illegitimate use in some ancient * drm ioctls. */ int arch_phys_wc_index(int handle) { if (handle < MTRR_TO_PHYS_WC_OFFSET) return -1; else return handle - MTRR_TO_PHYS_WC_OFFSET; } EXPORT_SYMBOL_GPL(arch_phys_wc_index); int __initdata changed_by_mtrr_cleanup; /** * mtrr_bp_init - initialize MTRRs on the boot CPU * * This needs to be called early; before any of the other CPUs are * initialized (i.e. before smp_init()). */ void __init mtrr_bp_init(void) { bool generic_mtrrs = cpu_feature_enabled(X86_FEATURE_MTRR); const char *why = "(not available)"; unsigned long config, dummy; phys_hi_rsvd = GENMASK(31, boot_cpu_data.x86_phys_bits - 32); if (!generic_mtrrs && mtrr_state.enabled) { /* * Software overwrite of MTRR state, only for generic case. * Note that X86_FEATURE_MTRR has been reset in this case. */ init_table(); mtrr_build_map(); pr_info("MTRRs set to read-only\n"); return; } if (generic_mtrrs) mtrr_if = &generic_mtrr_ops; else mtrr_set_if(); if (mtrr_enabled()) { /* Get the number of variable MTRR ranges. */ if (mtrr_if == &generic_mtrr_ops) rdmsr(MSR_MTRRcap, config, dummy); else config = mtrr_if->var_regs; num_var_ranges = config & MTRR_CAP_VCNT; init_table(); if (mtrr_if == &generic_mtrr_ops) { /* BIOS may override */ if (get_mtrr_state()) { memory_caching_control |= CACHE_MTRR; changed_by_mtrr_cleanup = mtrr_cleanup(); mtrr_build_map(); } else { mtrr_if = NULL; why = "by BIOS"; } } } if (!mtrr_enabled()) pr_info("MTRRs disabled %s\n", why); } /** * mtrr_save_state - Save current fixed-range MTRR state of the first * cpu in cpu_online_mask. */ void mtrr_save_state(void) { int first_cpu; if (!mtrr_enabled()) return; first_cpu = cpumask_first(cpu_online_mask); smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1); } static int __init mtrr_init_finalize(void) { /* * Map might exist if mtrr_overwrite_state() has been called or if * mtrr_enabled() returns true. */ mtrr_copy_map(); if (!mtrr_enabled()) return 0; if (memory_caching_control & CACHE_MTRR) { if (!changed_by_mtrr_cleanup) mtrr_state_warn(); return 0; } mtrr_register_syscore(); return 0; } subsys_initcall(mtrr_init_finalize);
linux-master
arch/x86/kernel/cpu/mtrr/mtrr.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/init.h> #include <linux/mm.h> #include <asm/mtrr.h> #include <asm/msr.h> #include "mtrr.h" static void amd_get_mtrr(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type *type) { unsigned long low, high; rdmsr(MSR_K6_UWCCR, low, high); /* Upper dword is region 1, lower is region 0 */ if (reg == 1) low = high; /* The base masks off on the right alignment */ *base = (low & 0xFFFE0000) >> PAGE_SHIFT; *type = 0; if (low & 1) *type = MTRR_TYPE_UNCACHABLE; if (low & 2) *type = MTRR_TYPE_WRCOMB; if (!(low & 3)) { *size = 0; return; } /* * This needs a little explaining. The size is stored as an * inverted mask of bits of 128K granularity 15 bits long offset * 2 bits. * * So to get a size we do invert the mask and add 1 to the lowest * mask bit (4 as its 2 bits in). This gives us a size we then shift * to turn into 128K blocks. * * eg 111 1111 1111 1100 is 512K * * invert 000 0000 0000 0011 * +1 000 0000 0000 0100 * *128K ... */ low = (~low) & 0x1FFFC; *size = (low + 4) << (15 - PAGE_SHIFT); } /** * amd_set_mtrr - Set variable MTRR register on the local CPU. * * @reg The register to set. * @base The base address of the region. * @size The size of the region. If this is 0 the region is disabled. * @type The type of the region. * * Returns nothing. */ static void amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) { u32 regs[2]; /* * Low is MTRR0, High MTRR 1 */ rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); /* * Blank to disable */ if (size == 0) { regs[reg] = 0; } else { /* * Set the register to the base, the type (off by one) and an * inverted bitmask of the size The size is the only odd * bit. We are fed say 512K We invert this and we get 111 1111 * 1111 1011 but if you subtract one and invert you get the * desired 111 1111 1111 1100 mask * * But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */ regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) | (base << PAGE_SHIFT) | (type + 1); } /* * The writeback rule is quite specific. See the manual. Its * disable local interrupts, write back the cache, set the mtrr */ wbinvd(); wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); } static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) { /* * Apply the K6 block alignment and size rules * In order * o Uncached or gathering only * o 128K or bigger block * o Power of 2 block * o base suitably aligned to the power */ if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) || (size & ~(size - 1)) - size || (base & (size - 1))) return -EINVAL; return 0; } const struct mtrr_ops amd_mtrr_ops = { .var_regs = 2, .set = amd_set_mtrr, .get = amd_get_mtrr, .get_free_region = generic_get_free_region, .validate_add_page = amd_validate_add_page, .have_wrcomb = positive_have_wrcomb, };
linux-master
arch/x86/kernel/cpu/mtrr/amd.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/capability.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/proc_fs.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/init.h> #define LINE_SIZE 80 #include <asm/mtrr.h> #include "mtrr.h" #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) static const char *const mtrr_strings[MTRR_NUM_TYPES] = { "uncachable", /* 0 */ "write-combining", /* 1 */ "?", /* 2 */ "?", /* 3 */ "write-through", /* 4 */ "write-protect", /* 5 */ "write-back", /* 6 */ }; const char *mtrr_attrib_to_str(int x) { return (x <= 6) ? mtrr_strings[x] : "?"; } #ifdef CONFIG_PROC_FS static int mtrr_file_add(unsigned long base, unsigned long size, unsigned int type, bool increment, struct file *file, int page) { unsigned int *fcount = FILE_FCOUNT(file); int reg, max; max = num_var_ranges; if (fcount == NULL) { fcount = kcalloc(max, sizeof(*fcount), GFP_KERNEL); if (!fcount) return -ENOMEM; FILE_FCOUNT(file) = fcount; } if (!page) { if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) return -EINVAL; base >>= PAGE_SHIFT; size >>= PAGE_SHIFT; } reg = mtrr_add_page(base, size, type, true); if (reg >= 0) ++fcount[reg]; return reg; } static int mtrr_file_del(unsigned long base, unsigned long size, struct file *file, int page) { unsigned int *fcount = FILE_FCOUNT(file); int reg; if (!page) { if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) return -EINVAL; base >>= PAGE_SHIFT; size >>= PAGE_SHIFT; } reg = mtrr_del_page(-1, base, size); if (reg < 0) return reg; if (fcount == NULL) return reg; if (fcount[reg] < 1) return -EINVAL; --fcount[reg]; return reg; } /* * seq_file can seek but we ignore it. * * Format of control line: * "base=%Lx size=%Lx type=%s" or "disable=%d" */ static ssize_t mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) { int i, err; unsigned long reg; unsigned long long base, size; char *ptr; char line[LINE_SIZE]; int length; size_t linelen; memset(line, 0, LINE_SIZE); len = min_t(size_t, len, LINE_SIZE - 1); length = strncpy_from_user(line, buf, len); if (length < 0) return length; linelen = strlen(line); ptr = line + linelen - 1; if (linelen && *ptr == '\n') *ptr = '\0'; if (!strncmp(line, "disable=", 8)) { reg = simple_strtoul(line + 8, &ptr, 0); err = mtrr_del_page(reg, 0, 0); if (err < 0) return err; return len; } if (strncmp(line, "base=", 5)) return -EINVAL; base = simple_strtoull(line + 5, &ptr, 0); ptr = skip_spaces(ptr); if (strncmp(ptr, "size=", 5)) return -EINVAL; size = simple_strtoull(ptr + 5, &ptr, 0); if ((base & 0xfff) || (size & 0xfff)) return -EINVAL; ptr = skip_spaces(ptr); if (strncmp(ptr, "type=", 5)) return -EINVAL; ptr = skip_spaces(ptr + 5); i = match_string(mtrr_strings, MTRR_NUM_TYPES, ptr); if (i < 0) return i; base >>= PAGE_SHIFT; size >>= PAGE_SHIFT; err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true); if (err < 0) return err; return len; } static long mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) { int err = 0; mtrr_type type; unsigned long base; unsigned long size; struct mtrr_sentry sentry; struct mtrr_gentry gentry; void __user *arg = (void __user *) __arg; memset(&gentry, 0, sizeof(gentry)); switch (cmd) { case MTRRIOC_ADD_ENTRY: case MTRRIOC_SET_ENTRY: case MTRRIOC_DEL_ENTRY: case MTRRIOC_KILL_ENTRY: case MTRRIOC_ADD_PAGE_ENTRY: case MTRRIOC_SET_PAGE_ENTRY: case MTRRIOC_DEL_PAGE_ENTRY: case MTRRIOC_KILL_PAGE_ENTRY: if (copy_from_user(&sentry, arg, sizeof(sentry))) return -EFAULT; break; case MTRRIOC_GET_ENTRY: case MTRRIOC_GET_PAGE_ENTRY: if (copy_from_user(&gentry, arg, sizeof(gentry))) return -EFAULT; break; #ifdef CONFIG_COMPAT case MTRRIOC32_ADD_ENTRY: case MTRRIOC32_SET_ENTRY: case MTRRIOC32_DEL_ENTRY: case MTRRIOC32_KILL_ENTRY: case MTRRIOC32_ADD_PAGE_ENTRY: case MTRRIOC32_SET_PAGE_ENTRY: case MTRRIOC32_DEL_PAGE_ENTRY: case MTRRIOC32_KILL_PAGE_ENTRY: { struct mtrr_sentry32 __user *s32; s32 = (struct mtrr_sentry32 __user *)__arg; err = get_user(sentry.base, &s32->base); err |= get_user(sentry.size, &s32->size); err |= get_user(sentry.type, &s32->type); if (err) return err; break; } case MTRRIOC32_GET_ENTRY: case MTRRIOC32_GET_PAGE_ENTRY: { struct mtrr_gentry32 __user *g32; g32 = (struct mtrr_gentry32 __user *)__arg; err = get_user(gentry.regnum, &g32->regnum); err |= get_user(gentry.base, &g32->base); err |= get_user(gentry.size, &g32->size); err |= get_user(gentry.type, &g32->type); if (err) return err; break; } #endif } switch (cmd) { default: return -ENOTTY; case MTRRIOC_ADD_ENTRY: #ifdef CONFIG_COMPAT case MTRRIOC32_ADD_ENTRY: #endif err = mtrr_file_add(sentry.base, sentry.size, sentry.type, true, file, 0); break; case MTRRIOC_SET_ENTRY: #ifdef CONFIG_COMPAT case MTRRIOC32_SET_ENTRY: #endif err = mtrr_add(sentry.base, sentry.size, sentry.type, false); break; case MTRRIOC_DEL_ENTRY: #ifdef CONFIG_COMPAT case MTRRIOC32_DEL_ENTRY: #endif err = mtrr_file_del(sentry.base, sentry.size, file, 0); break; case MTRRIOC_KILL_ENTRY: #ifdef CONFIG_COMPAT case MTRRIOC32_KILL_ENTRY: #endif err = mtrr_del(-1, sentry.base, sentry.size); break; case MTRRIOC_GET_ENTRY: #ifdef CONFIG_COMPAT case MTRRIOC32_GET_ENTRY: #endif if (gentry.regnum >= num_var_ranges) return -EINVAL; mtrr_if->get(gentry.regnum, &base, &size, &type); /* Hide entries that go above 4GB */ if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) gentry.base = gentry.size = gentry.type = 0; else { gentry.base = base << PAGE_SHIFT; gentry.size = size << PAGE_SHIFT; gentry.type = type; } break; case MTRRIOC_ADD_PAGE_ENTRY: #ifdef CONFIG_COMPAT case MTRRIOC32_ADD_PAGE_ENTRY: #endif err = mtrr_file_add(sentry.base, sentry.size, sentry.type, true, file, 1); break; case MTRRIOC_SET_PAGE_ENTRY: #ifdef CONFIG_COMPAT case MTRRIOC32_SET_PAGE_ENTRY: #endif err = mtrr_add_page(sentry.base, sentry.size, sentry.type, false); break; case MTRRIOC_DEL_PAGE_ENTRY: #ifdef CONFIG_COMPAT case MTRRIOC32_DEL_PAGE_ENTRY: #endif err = mtrr_file_del(sentry.base, sentry.size, file, 1); break; case MTRRIOC_KILL_PAGE_ENTRY: #ifdef CONFIG_COMPAT case MTRRIOC32_KILL_PAGE_ENTRY: #endif err = mtrr_del_page(-1, sentry.base, sentry.size); break; case MTRRIOC_GET_PAGE_ENTRY: #ifdef CONFIG_COMPAT case MTRRIOC32_GET_PAGE_ENTRY: #endif if (gentry.regnum >= num_var_ranges) return -EINVAL; mtrr_if->get(gentry.regnum, &base, &size, &type); /* Hide entries that would overflow */ if (size != (__typeof__(gentry.size))size) gentry.base = gentry.size = gentry.type = 0; else { gentry.base = base; gentry.size = size; gentry.type = type; } break; } if (err) return err; switch (cmd) { case MTRRIOC_GET_ENTRY: case MTRRIOC_GET_PAGE_ENTRY: if (copy_to_user(arg, &gentry, sizeof(gentry))) err = -EFAULT; break; #ifdef CONFIG_COMPAT case MTRRIOC32_GET_ENTRY: case MTRRIOC32_GET_PAGE_ENTRY: { struct mtrr_gentry32 __user *g32; g32 = (struct mtrr_gentry32 __user *)__arg; err = put_user(gentry.base, &g32->base); err |= put_user(gentry.size, &g32->size); err |= put_user(gentry.regnum, &g32->regnum); err |= put_user(gentry.type, &g32->type); break; } #endif } return err; } static int mtrr_close(struct inode *ino, struct file *file) { unsigned int *fcount = FILE_FCOUNT(file); int i, max; if (fcount != NULL) { max = num_var_ranges; for (i = 0; i < max; ++i) { while (fcount[i] > 0) { mtrr_del(i, 0, 0); --fcount[i]; } } kfree(fcount); FILE_FCOUNT(file) = NULL; } return single_release(ino, file); } static int mtrr_seq_show(struct seq_file *seq, void *offset) { char factor; int i, max; mtrr_type type; unsigned long base, size; max = num_var_ranges; for (i = 0; i < max; i++) { mtrr_if->get(i, &base, &size, &type); if (size == 0) { mtrr_usage_table[i] = 0; continue; } if (size < (0x100000 >> PAGE_SHIFT)) { /* less than 1MB */ factor = 'K'; size <<= PAGE_SHIFT - 10; } else { factor = 'M'; size >>= 20 - PAGE_SHIFT; } /* Base can be > 32bit */ seq_printf(seq, "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n", i, base, base >> (20 - PAGE_SHIFT), size, factor, mtrr_usage_table[i], mtrr_attrib_to_str(type)); } return 0; } static int mtrr_open(struct inode *inode, struct file *file) { if (!mtrr_if) return -EIO; if (!mtrr_if->get) return -ENXIO; if (!capable(CAP_SYS_ADMIN)) return -EPERM; return single_open(file, mtrr_seq_show, NULL); } static const struct proc_ops mtrr_proc_ops = { .proc_open = mtrr_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_write = mtrr_write, .proc_ioctl = mtrr_ioctl, #ifdef CONFIG_COMPAT .proc_compat_ioctl = mtrr_ioctl, #endif .proc_release = mtrr_close, }; static int __init mtrr_if_init(void) { struct cpuinfo_x86 *c = &boot_cpu_data; if ((!cpu_has(c, X86_FEATURE_MTRR)) && (!cpu_has(c, X86_FEATURE_K6_MTRR)) && (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) && (!cpu_has(c, X86_FEATURE_CENTAUR_MCR))) return -ENODEV; proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_proc_ops); return 0; } arch_initcall(mtrr_if_init); #endif /* CONFIG_PROC_FS */
linux-master
arch/x86/kernel/cpu/mtrr/if.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/init.h> #include <linux/mm.h> #include <asm/mtrr.h> #include <asm/msr.h> #include "mtrr.h" static struct { unsigned long high; unsigned long low; } centaur_mcr[8]; static u8 centaur_mcr_reserved; static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ /** * centaur_get_free_region - Get a free MTRR. * * @base: The starting (base) address of the region. * @size: The size (in bytes) of the region. * * Returns: the index of the region on success, else -1 on error. */ static int centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) { unsigned long lbase, lsize; mtrr_type ltype; int i, max; max = num_var_ranges; if (replace_reg >= 0 && replace_reg < max) return replace_reg; for (i = 0; i < max; ++i) { if (centaur_mcr_reserved & (1 << i)) continue; mtrr_if->get(i, &lbase, &lsize, &ltype); if (lsize == 0) return i; } return -ENOSPC; } static void centaur_get_mcr(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type * type) { *base = centaur_mcr[reg].high >> PAGE_SHIFT; *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; *type = MTRR_TYPE_WRCOMB; /* write-combining */ if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) *type = MTRR_TYPE_UNCACHABLE; if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) *type = MTRR_TYPE_WRBACK; if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) *type = MTRR_TYPE_WRBACK; } static void centaur_set_mcr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) { unsigned long low, high; if (size == 0) { /* Disable */ high = low = 0; } else { high = base << PAGE_SHIFT; if (centaur_mcr_type == 0) { /* Only support write-combining... */ low = -size << PAGE_SHIFT | 0x1f; } else { if (type == MTRR_TYPE_UNCACHABLE) low = -size << PAGE_SHIFT | 0x02; /* NC */ else low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */ } } centaur_mcr[reg].high = high; centaur_mcr[reg].low = low; wrmsr(MSR_IDT_MCR0 + reg, low, high); } static int centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int type) { /* * FIXME: Winchip2 supports uncached */ if (type != MTRR_TYPE_WRCOMB && (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { pr_warn("mtrr: only write-combining%s supported\n", centaur_mcr_type ? " and uncacheable are" : " is"); return -EINVAL; } return 0; } const struct mtrr_ops centaur_mtrr_ops = { .var_regs = 8, .set = centaur_set_mcr, .get = centaur_get_mcr, .get_free_region = centaur_get_free_region, .validate_add_page = centaur_validate_add_page, .have_wrcomb = positive_have_wrcomb, };
linux-master
arch/x86/kernel/cpu/mtrr/centaur.c
// SPDX-License-Identifier: GPL-2.0-only /* * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong * because MTRRs can span up to 40 bits (36bits on most modern x86) */ #include <linux/export.h> #include <linux/init.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/cc_platform.h> #include <asm/processor-flags.h> #include <asm/cacheinfo.h> #include <asm/cpufeature.h> #include <asm/hypervisor.h> #include <asm/mshyperv.h> #include <asm/tlbflush.h> #include <asm/mtrr.h> #include <asm/msr.h> #include <asm/memtype.h> #include "mtrr.h" struct fixed_range_block { int base_msr; /* start address of an MTRR block */ int ranges; /* number of MTRRs in this block */ }; static struct fixed_range_block fixed_range_blocks[] = { { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ {} }; struct cache_map { u64 start; u64 end; u64 flags; u64 type:8; u64 fixed:1; }; bool mtrr_debug; static int __init mtrr_param_setup(char *str) { int rc = 0; if (!str) return -EINVAL; if (!strcmp(str, "debug")) mtrr_debug = true; else rc = -EINVAL; return rc; } early_param("mtrr", mtrr_param_setup); /* * CACHE_MAP_MAX is the maximum number of memory ranges in cache_map, where * no 2 adjacent ranges have the same cache mode (those would be merged). * The number is based on the worst case: * - no two adjacent fixed MTRRs share the same cache mode * - one variable MTRR is spanning a huge area with mode WB * - 255 variable MTRRs with mode UC all overlap with the WB MTRR, creating 2 * additional ranges each (result like "ababababa...aba" with a = WB, b = UC), * accounting for MTRR_MAX_VAR_RANGES * 2 - 1 range entries * - a TOP_MEM2 area (even with overlapping an UC MTRR can't add 2 range entries * to the possible maximum, as it always starts at 4GB, thus it can't be in * the middle of that MTRR, unless that MTRR starts at 0, which would remove * the initial "a" from the "abababa" pattern above) * The map won't contain ranges with no matching MTRR (those fall back to the * default cache mode). */ #define CACHE_MAP_MAX (MTRR_NUM_FIXED_RANGES + MTRR_MAX_VAR_RANGES * 2) static struct cache_map init_cache_map[CACHE_MAP_MAX] __initdata; static struct cache_map *cache_map __refdata = init_cache_map; static unsigned int cache_map_size = CACHE_MAP_MAX; static unsigned int cache_map_n; static unsigned int cache_map_fixed; static unsigned long smp_changes_mask; static int mtrr_state_set; u64 mtrr_tom2; struct mtrr_state_type mtrr_state; EXPORT_SYMBOL_GPL(mtrr_state); /* Reserved bits in the high portion of the MTRRphysBaseN MSR. */ u32 phys_hi_rsvd; /* * BIOS is expected to clear MtrrFixDramModEn bit, see for example * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD * Opteron Processors" (26094 Rev. 3.30 February 2006), section * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set * to 1 during BIOS initialization of the fixed MTRRs, then cleared to * 0 for operation." */ static inline void k8_check_syscfg_dram_mod_en(void) { u32 lo, hi; if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0x0f))) return; rdmsr(MSR_AMD64_SYSCFG, lo, hi); if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) { pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]" " not cleared by BIOS, clearing this bit\n", smp_processor_id()); lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi); } } /* Get the size of contiguous MTRR range */ static u64 get_mtrr_size(u64 mask) { u64 size; mask |= (u64)phys_hi_rsvd << 32; size = -mask; return size; } static u8 get_var_mtrr_state(unsigned int reg, u64 *start, u64 *size) { struct mtrr_var_range *mtrr = mtrr_state.var_ranges + reg; if (!(mtrr->mask_lo & MTRR_PHYSMASK_V)) return MTRR_TYPE_INVALID; *start = (((u64)mtrr->base_hi) << 32) + (mtrr->base_lo & PAGE_MASK); *size = get_mtrr_size((((u64)mtrr->mask_hi) << 32) + (mtrr->mask_lo & PAGE_MASK)); return mtrr->base_lo & MTRR_PHYSBASE_TYPE; } static u8 get_effective_type(u8 type1, u8 type2) { if (type1 == MTRR_TYPE_UNCACHABLE || type2 == MTRR_TYPE_UNCACHABLE) return MTRR_TYPE_UNCACHABLE; if ((type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH) || (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK)) return MTRR_TYPE_WRTHROUGH; if (type1 != type2) return MTRR_TYPE_UNCACHABLE; return type1; } static void rm_map_entry_at(int idx) { cache_map_n--; if (cache_map_n > idx) { memmove(cache_map + idx, cache_map + idx + 1, sizeof(*cache_map) * (cache_map_n - idx)); } } /* * Add an entry into cache_map at a specific index. Merges adjacent entries if * appropriate. Return the number of merges for correcting the scan index * (this is needed as merging will reduce the number of entries, which will * result in skipping entries in future iterations if the scan index isn't * corrected). * Note that the corrected index can never go below -1 (resulting in being 0 in * the next scan iteration), as "2" is returned only if the current index is * larger than zero. */ static int add_map_entry_at(u64 start, u64 end, u8 type, int idx) { bool merge_prev = false, merge_next = false; if (start >= end) return 0; if (idx > 0) { struct cache_map *prev = cache_map + idx - 1; if (!prev->fixed && start == prev->end && type == prev->type) merge_prev = true; } if (idx < cache_map_n) { struct cache_map *next = cache_map + idx; if (!next->fixed && end == next->start && type == next->type) merge_next = true; } if (merge_prev && merge_next) { cache_map[idx - 1].end = cache_map[idx].end; rm_map_entry_at(idx); return 2; } if (merge_prev) { cache_map[idx - 1].end = end; return 1; } if (merge_next) { cache_map[idx].start = start; return 1; } /* Sanity check: the array should NEVER be too small! */ if (cache_map_n == cache_map_size) { WARN(1, "MTRR cache mode memory map exhausted!\n"); cache_map_n = cache_map_fixed; return 0; } if (cache_map_n > idx) { memmove(cache_map + idx + 1, cache_map + idx, sizeof(*cache_map) * (cache_map_n - idx)); } cache_map[idx].start = start; cache_map[idx].end = end; cache_map[idx].type = type; cache_map[idx].fixed = 0; cache_map_n++; return 0; } /* Clear a part of an entry. Return 1 if start of entry is still valid. */ static int clr_map_range_at(u64 start, u64 end, int idx) { int ret = start != cache_map[idx].start; u64 tmp; if (start == cache_map[idx].start && end == cache_map[idx].end) { rm_map_entry_at(idx); } else if (start == cache_map[idx].start) { cache_map[idx].start = end; } else if (end == cache_map[idx].end) { cache_map[idx].end = start; } else { tmp = cache_map[idx].end; cache_map[idx].end = start; add_map_entry_at(end, tmp, cache_map[idx].type, idx + 1); } return ret; } /* * Add MTRR to the map. The current map is scanned and each part of the MTRR * either overlapping with an existing entry or with a hole in the map is * handled separately. */ static void add_map_entry(u64 start, u64 end, u8 type) { u8 new_type, old_type; u64 tmp; int i; for (i = 0; i < cache_map_n && start < end; i++) { if (start >= cache_map[i].end) continue; if (start < cache_map[i].start) { /* Region start has no overlap. */ tmp = min(end, cache_map[i].start); i -= add_map_entry_at(start, tmp, type, i); start = tmp; continue; } new_type = get_effective_type(type, cache_map[i].type); old_type = cache_map[i].type; if (cache_map[i].fixed || new_type == old_type) { /* Cut off start of new entry. */ start = cache_map[i].end; continue; } /* Handle only overlapping part of region. */ tmp = min(end, cache_map[i].end); i += clr_map_range_at(start, tmp, i); i -= add_map_entry_at(start, tmp, new_type, i); start = tmp; } /* Add rest of region after last map entry (rest might be empty). */ add_map_entry_at(start, end, type, i); } /* Add variable MTRRs to cache map. */ static void map_add_var(void) { u64 start, size; unsigned int i; u8 type; /* * Add AMD TOP_MEM2 area. Can't be added in mtrr_build_map(), as it * needs to be added again when rebuilding the map due to potentially * having moved as a result of variable MTRRs for memory below 4GB. */ if (mtrr_tom2) { add_map_entry(BIT_ULL(32), mtrr_tom2, MTRR_TYPE_WRBACK); cache_map[cache_map_n - 1].fixed = 1; } for (i = 0; i < num_var_ranges; i++) { type = get_var_mtrr_state(i, &start, &size); if (type != MTRR_TYPE_INVALID) add_map_entry(start, start + size, type); } } /* * Rebuild map by replacing variable entries. Needs to be called when MTRR * registers are being changed after boot, as such changes could include * removals of registers, which are complicated to handle without rebuild of * the map. */ void generic_rebuild_map(void) { if (mtrr_if != &generic_mtrr_ops) return; cache_map_n = cache_map_fixed; map_add_var(); } static unsigned int __init get_cache_map_size(void) { return cache_map_fixed + 2 * num_var_ranges + (mtrr_tom2 != 0); } /* Build the cache_map containing the cache modes per memory range. */ void __init mtrr_build_map(void) { u64 start, end, size; unsigned int i; u8 type; /* Add fixed MTRRs, optimize for adjacent entries with same type. */ if (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED) { /* * Start with 64k size fixed entries, preset 1st one (hence the * loop below is starting with index 1). */ start = 0; end = size = 0x10000; type = mtrr_state.fixed_ranges[0]; for (i = 1; i < MTRR_NUM_FIXED_RANGES; i++) { /* 8 64k entries, then 16 16k ones, rest 4k. */ if (i == 8 || i == 24) size >>= 2; if (mtrr_state.fixed_ranges[i] != type) { add_map_entry(start, end, type); start = end; type = mtrr_state.fixed_ranges[i]; } end += size; } add_map_entry(start, end, type); } /* Mark fixed, they take precedence. */ for (i = 0; i < cache_map_n; i++) cache_map[i].fixed = 1; cache_map_fixed = cache_map_n; map_add_var(); pr_info("MTRR map: %u entries (%u fixed + %u variable; max %u), built from %u variable MTRRs\n", cache_map_n, cache_map_fixed, cache_map_n - cache_map_fixed, get_cache_map_size(), num_var_ranges + (mtrr_tom2 != 0)); if (mtrr_debug) { for (i = 0; i < cache_map_n; i++) { pr_info("%3u: %016llx-%016llx %s\n", i, cache_map[i].start, cache_map[i].end - 1, mtrr_attrib_to_str(cache_map[i].type)); } } } /* Copy the cache_map from __initdata memory to dynamically allocated one. */ void __init mtrr_copy_map(void) { unsigned int new_size = get_cache_map_size(); if (!mtrr_state.enabled || !new_size) { cache_map = NULL; return; } mutex_lock(&mtrr_mutex); cache_map = kcalloc(new_size, sizeof(*cache_map), GFP_KERNEL); if (cache_map) { memmove(cache_map, init_cache_map, cache_map_n * sizeof(*cache_map)); cache_map_size = new_size; } else { mtrr_state.enabled = 0; pr_err("MTRRs disabled due to allocation failure for lookup map.\n"); } mutex_unlock(&mtrr_mutex); } /** * mtrr_overwrite_state - set static MTRR state * * Used to set MTRR state via different means (e.g. with data obtained from * a hypervisor). * Is allowed only for special cases when running virtualized. Must be called * from the x86_init.hyper.init_platform() hook. It can be called only once. * The MTRR state can't be changed afterwards. To ensure that, X86_FEATURE_MTRR * is cleared. */ void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var, mtrr_type def_type) { unsigned int i; /* Only allowed to be called once before mtrr_bp_init(). */ if (WARN_ON_ONCE(mtrr_state_set)) return; /* Only allowed when running virtualized. */ if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) return; /* * Only allowed for special virtualization cases: * - when running as Hyper-V, SEV-SNP guest using vTOM * - when running as Xen PV guest * - when running as SEV-SNP or TDX guest to avoid unnecessary * VMM communication/Virtualization exceptions (#VC, #VE) */ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !hv_is_isolation_supported() && !cpu_feature_enabled(X86_FEATURE_XENPV) && !cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) return; /* Disable MTRR in order to disable MTRR modifications. */ setup_clear_cpu_cap(X86_FEATURE_MTRR); if (var) { if (num_var > MTRR_MAX_VAR_RANGES) { pr_warn("Trying to overwrite MTRR state with %u variable entries\n", num_var); num_var = MTRR_MAX_VAR_RANGES; } for (i = 0; i < num_var; i++) mtrr_state.var_ranges[i] = var[i]; num_var_ranges = num_var; } mtrr_state.def_type = def_type; mtrr_state.enabled |= MTRR_STATE_MTRR_ENABLED; mtrr_state_set = 1; } static u8 type_merge(u8 type, u8 new_type, u8 *uniform) { u8 effective_type; if (type == MTRR_TYPE_INVALID) return new_type; effective_type = get_effective_type(type, new_type); if (type != effective_type) *uniform = 0; return effective_type; } /** * mtrr_type_lookup - look up memory type in MTRR * * Return Values: * MTRR_TYPE_(type) - The effective MTRR type for the region * MTRR_TYPE_INVALID - MTRR is disabled * * Output Argument: * uniform - Set to 1 when the returned MTRR type is valid for the whole * region, set to 0 else. */ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform) { u8 type = MTRR_TYPE_INVALID; unsigned int i; if (!mtrr_state_set) { /* Uniformity is unknown. */ *uniform = 0; return MTRR_TYPE_UNCACHABLE; } *uniform = 1; if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED)) return MTRR_TYPE_UNCACHABLE; for (i = 0; i < cache_map_n && start < end; i++) { /* Region after current map entry? -> continue with next one. */ if (start >= cache_map[i].end) continue; /* Start of region not covered by current map entry? */ if (start < cache_map[i].start) { /* At least some part of region has default type. */ type = type_merge(type, mtrr_state.def_type, uniform); /* End of region not covered, too? -> lookup done. */ if (end <= cache_map[i].start) return type; } /* At least part of region covered by map entry. */ type = type_merge(type, cache_map[i].type, uniform); start = cache_map[i].end; } /* End of region past last entry in map? -> use default type. */ if (start < end) type = type_merge(type, mtrr_state.def_type, uniform); return type; } /* Get the MSR pair relating to a var range */ static void get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) { rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); } /* Fill the MSR pair relating to a var range */ void fill_mtrr_var_range(unsigned int index, u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) { struct mtrr_var_range *vr; vr = mtrr_state.var_ranges; vr[index].base_lo = base_lo; vr[index].base_hi = base_hi; vr[index].mask_lo = mask_lo; vr[index].mask_hi = mask_hi; } static void get_fixed_ranges(mtrr_type *frs) { unsigned int *p = (unsigned int *)frs; int i; k8_check_syscfg_dram_mod_en(); rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]); for (i = 0; i < 2; i++) rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]); for (i = 0; i < 8; i++) rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]); } void mtrr_save_fixed_ranges(void *info) { if (boot_cpu_has(X86_FEATURE_MTRR)) get_fixed_ranges(mtrr_state.fixed_ranges); } static unsigned __initdata last_fixed_start; static unsigned __initdata last_fixed_end; static mtrr_type __initdata last_fixed_type; static void __init print_fixed_last(void) { if (!last_fixed_end) return; pr_info(" %05X-%05X %s\n", last_fixed_start, last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); last_fixed_end = 0; } static void __init update_fixed_last(unsigned base, unsigned end, mtrr_type type) { last_fixed_start = base; last_fixed_end = end; last_fixed_type = type; } static void __init print_fixed(unsigned base, unsigned step, const mtrr_type *types) { unsigned i; for (i = 0; i < 8; ++i, ++types, base += step) { if (last_fixed_end == 0) { update_fixed_last(base, base + step, *types); continue; } if (last_fixed_end == base && last_fixed_type == *types) { last_fixed_end = base + step; continue; } /* new segments: gap or different type */ print_fixed_last(); update_fixed_last(base, base + step, *types); } } static void __init print_mtrr_state(void) { unsigned int i; int high_width; pr_info("MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type)); if (mtrr_state.have_fixed) { pr_info("MTRR fixed ranges %sabled:\n", ((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) && (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ? "en" : "dis"); print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); for (i = 0; i < 2; ++i) print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); for (i = 0; i < 8; ++i) print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); /* tail */ print_fixed_last(); } pr_info("MTRR variable ranges %sabled:\n", mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis"); high_width = (boot_cpu_data.x86_phys_bits - (32 - PAGE_SHIFT) + 3) / 4; for (i = 0; i < num_var_ranges; ++i) { if (mtrr_state.var_ranges[i].mask_lo & MTRR_PHYSMASK_V) pr_info(" %u base %0*X%05X000 mask %0*X%05X000 %s\n", i, high_width, mtrr_state.var_ranges[i].base_hi, mtrr_state.var_ranges[i].base_lo >> 12, high_width, mtrr_state.var_ranges[i].mask_hi, mtrr_state.var_ranges[i].mask_lo >> 12, mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & MTRR_PHYSBASE_TYPE)); else pr_info(" %u disabled\n", i); } if (mtrr_tom2) pr_info("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20); } /* Grab all of the MTRR state for this CPU into *state */ bool __init get_mtrr_state(void) { struct mtrr_var_range *vrs; unsigned lo, dummy; unsigned int i; vrs = mtrr_state.var_ranges; rdmsr(MSR_MTRRcap, lo, dummy); mtrr_state.have_fixed = lo & MTRR_CAP_FIX; for (i = 0; i < num_var_ranges; i++) get_mtrr_var_range(i, &vrs[i]); if (mtrr_state.have_fixed) get_fixed_ranges(mtrr_state.fixed_ranges); rdmsr(MSR_MTRRdefType, lo, dummy); mtrr_state.def_type = lo & MTRR_DEF_TYPE_TYPE; mtrr_state.enabled = (lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT; if (amd_special_default_mtrr()) { unsigned low, high; /* TOP_MEM2 */ rdmsr(MSR_K8_TOP_MEM2, low, high); mtrr_tom2 = high; mtrr_tom2 <<= 32; mtrr_tom2 |= low; mtrr_tom2 &= 0xffffff800000ULL; } if (mtrr_debug) print_mtrr_state(); mtrr_state_set = 1; return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED); } /* Some BIOS's are messed up and don't set all MTRRs the same! */ void __init mtrr_state_warn(void) { unsigned long mask = smp_changes_mask; if (!mask) return; if (mask & MTRR_CHANGE_MASK_FIXED) pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); if (mask & MTRR_CHANGE_MASK_VARIABLE) pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n"); if (mask & MTRR_CHANGE_MASK_DEFTYPE) pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); pr_info("mtrr: probably your BIOS does not setup all CPUs.\n"); pr_info("mtrr: corrected configuration.\n"); } /* * Doesn't attempt to pass an error out to MTRR users * because it's quite complicated in some cases and probably not * worth it because the best error handling is to ignore it. */ void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) { if (wrmsr_safe(msr, a, b) < 0) { pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", smp_processor_id(), msr, a, b); } } /** * set_fixed_range - checks & updates a fixed-range MTRR if it * differs from the value it should have * @msr: MSR address of the MTTR which should be checked and updated * @changed: pointer which indicates whether the MTRR needed to be changed * @msrwords: pointer to the MSR values which the MSR should have */ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) { unsigned lo, hi; rdmsr(msr, lo, hi); if (lo != msrwords[0] || hi != msrwords[1]) { mtrr_wrmsr(msr, msrwords[0], msrwords[1]); *changed = true; } } /** * generic_get_free_region - Get a free MTRR. * @base: The starting (base) address of the region. * @size: The size (in bytes) of the region. * @replace_reg: mtrr index to be replaced; set to invalid value if none. * * Returns: The index of the region on success, else negative on error. */ int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) { unsigned long lbase, lsize; mtrr_type ltype; int i, max; max = num_var_ranges; if (replace_reg >= 0 && replace_reg < max) return replace_reg; for (i = 0; i < max; ++i) { mtrr_if->get(i, &lbase, &lsize, &ltype); if (lsize == 0) return i; } return -ENOSPC; } static void generic_get_mtrr(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type *type) { u32 mask_lo, mask_hi, base_lo, base_hi; unsigned int hi; u64 tmp, mask; /* * get_mtrr doesn't need to update mtrr_state, also it could be called * from any cpu, so try to print it out directly. */ get_cpu(); rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); if (!(mask_lo & MTRR_PHYSMASK_V)) { /* Invalid (i.e. free) range */ *base = 0; *size = 0; *type = 0; goto out_put_cpu; } rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); /* Work out the shifted address mask: */ tmp = (u64)mask_hi << 32 | (mask_lo & PAGE_MASK); mask = (u64)phys_hi_rsvd << 32 | tmp; /* Expand tmp with high bits to all 1s: */ hi = fls64(tmp); if (hi > 0) { tmp |= ~((1ULL<<(hi - 1)) - 1); if (tmp != mask) { pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); mask = tmp; } } /* * This works correctly if size is a power of two, i.e. a * contiguous range: */ *size = -mask >> PAGE_SHIFT; *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; *type = base_lo & MTRR_PHYSBASE_TYPE; out_put_cpu: put_cpu(); } /** * set_fixed_ranges - checks & updates the fixed-range MTRRs if they * differ from the saved set * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() */ static int set_fixed_ranges(mtrr_type *frs) { unsigned long long *saved = (unsigned long long *)frs; bool changed = false; int block = -1, range; k8_check_syscfg_dram_mod_en(); while (fixed_range_blocks[++block].ranges) { for (range = 0; range < fixed_range_blocks[block].ranges; range++) set_fixed_range(fixed_range_blocks[block].base_msr + range, &changed, (unsigned int *)saved++); } return changed; } /* * Set the MSR pair relating to a var range. * Returns true if changes are made. */ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) { unsigned int lo, hi; bool changed = false; rdmsr(MTRRphysBase_MSR(index), lo, hi); if ((vr->base_lo & ~MTRR_PHYSBASE_RSVD) != (lo & ~MTRR_PHYSBASE_RSVD) || (vr->base_hi & ~phys_hi_rsvd) != (hi & ~phys_hi_rsvd)) { mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); changed = true; } rdmsr(MTRRphysMask_MSR(index), lo, hi); if ((vr->mask_lo & ~MTRR_PHYSMASK_RSVD) != (lo & ~MTRR_PHYSMASK_RSVD) || (vr->mask_hi & ~phys_hi_rsvd) != (hi & ~phys_hi_rsvd)) { mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); changed = true; } return changed; } static u32 deftype_lo, deftype_hi; /** * set_mtrr_state - Set the MTRR state for this CPU. * * NOTE: The CPU must already be in a safe state for MTRR changes, including * measures that only a single CPU can be active in set_mtrr_state() in * order to not be subject to races for usage of deftype_lo. This is * accomplished by taking cache_disable_lock. * RETURNS: 0 if no changes made, else a mask indicating what was changed. */ static unsigned long set_mtrr_state(void) { unsigned long change_mask = 0; unsigned int i; for (i = 0; i < num_var_ranges; i++) { if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) change_mask |= MTRR_CHANGE_MASK_VARIABLE; } if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) change_mask |= MTRR_CHANGE_MASK_FIXED; /* * Set_mtrr_restore restores the old value of MTRRdefType, * so to set it we fiddle with the saved value: */ if ((deftype_lo & MTRR_DEF_TYPE_TYPE) != mtrr_state.def_type || ((deftype_lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT) != mtrr_state.enabled) { deftype_lo = (deftype_lo & MTRR_DEF_TYPE_DISABLE) | mtrr_state.def_type | (mtrr_state.enabled << MTRR_STATE_SHIFT); change_mask |= MTRR_CHANGE_MASK_DEFTYPE; } return change_mask; } void mtrr_disable(void) { /* Save MTRR state */ rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); /* Disable MTRRs, and set the default type to uncached */ mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & MTRR_DEF_TYPE_DISABLE, deftype_hi); } void mtrr_enable(void) { /* Intel (P6) standard MTRRs */ mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); } void mtrr_generic_set_state(void) { unsigned long mask, count; /* Actually set the state */ mask = set_mtrr_state(); /* Use the atomic bitops to update the global mask */ for (count = 0; count < sizeof(mask) * 8; ++count) { if (mask & 0x01) set_bit(count, &smp_changes_mask); mask >>= 1; } } /** * generic_set_mtrr - set variable MTRR register on the local CPU. * * @reg: The register to set. * @base: The base address of the region. * @size: The size of the region. If this is 0 the region is disabled. * @type: The type of the region. * * Returns nothing. */ static void generic_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) { unsigned long flags; struct mtrr_var_range *vr; vr = &mtrr_state.var_ranges[reg]; local_irq_save(flags); cache_disable(); if (size == 0) { /* * The invalid bit is kept in the mask, so we simply * clear the relevant mask register to disable a range. */ mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); memset(vr, 0, sizeof(struct mtrr_var_range)); } else { vr->base_lo = base << PAGE_SHIFT | type; vr->base_hi = (base >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd; vr->mask_lo = -size << PAGE_SHIFT | MTRR_PHYSMASK_V; vr->mask_hi = (-size >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd; mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); } cache_enable(); local_irq_restore(flags); } int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) { unsigned long lbase, last; /* * For Intel PPro stepping <= 7 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF */ if (mtrr_if == &generic_mtrr_ops && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && boot_cpu_data.x86_stepping <= 7) { if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); return -EINVAL; } if (!(base + size < 0x70000 || base > 0x7003F) && (type == MTRR_TYPE_WRCOMB || type == MTRR_TYPE_WRBACK)) { pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); return -EINVAL; } } /* * Check upper bits of base and last are equal and lower bits are 0 * for base and 1 for last */ last = base + size - 1; for (lbase = base; !(lbase & 1) && (last & 1); lbase = lbase >> 1, last = last >> 1) ; if (lbase != last) { pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); return -EINVAL; } return 0; } static int generic_have_wrcomb(void) { unsigned long config, dummy; rdmsr(MSR_MTRRcap, config, dummy); return config & MTRR_CAP_WC; } int positive_have_wrcomb(void) { return 1; } /* * Generic structure... */ const struct mtrr_ops generic_mtrr_ops = { .get = generic_get_mtrr, .get_free_region = generic_get_free_region, .set = generic_set_mtrr, .validate_add_page = generic_validate_add_page, .have_wrcomb = generic_have_wrcomb, };
linux-master
arch/x86/kernel/cpu/mtrr/generic.c
/* * MTRR (Memory Type Range Register) cleanup * * Copyright (C) 2009 Yinghai Lu * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/kvm_para.h> #include <linux/range.h> #include <asm/processor.h> #include <asm/e820/api.h> #include <asm/mtrr.h> #include <asm/msr.h> #include "mtrr.h" struct var_mtrr_range_state { unsigned long base_pfn; unsigned long size_pfn; mtrr_type type; }; struct var_mtrr_state { unsigned long range_startk; unsigned long range_sizek; unsigned long chunk_sizek; unsigned long gran_sizek; unsigned int reg; }; /* Should be related to MTRR_VAR_RANGES nums */ #define RANGE_NUM 256 static struct range __initdata range[RANGE_NUM]; static int __initdata nr_range; static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; #define BIOS_BUG_MSG \ "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" static int __init x86_get_mtrr_mem_range(struct range *range, int nr_range, unsigned long extra_remove_base, unsigned long extra_remove_size) { unsigned long base, size; mtrr_type type; int i; for (i = 0; i < num_var_ranges; i++) { type = range_state[i].type; if (type != MTRR_TYPE_WRBACK) continue; base = range_state[i].base_pfn; size = range_state[i].size_pfn; nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, base, base + size); } Dprintk("After WB checking\n"); for (i = 0; i < nr_range; i++) Dprintk("MTRR MAP PFN: %016llx - %016llx\n", range[i].start, range[i].end); /* Take out UC ranges: */ for (i = 0; i < num_var_ranges; i++) { type = range_state[i].type; if (type != MTRR_TYPE_UNCACHABLE && type != MTRR_TYPE_WRPROT) continue; size = range_state[i].size_pfn; if (!size) continue; base = range_state[i].base_pfn; if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && (mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) && (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) { /* Var MTRR contains UC entry below 1M? Skip it: */ pr_warn(BIOS_BUG_MSG, i); if (base + size <= (1<<(20-PAGE_SHIFT))) continue; size -= (1<<(20-PAGE_SHIFT)) - base; base = 1<<(20-PAGE_SHIFT); } subtract_range(range, RANGE_NUM, base, base + size); } if (extra_remove_size) subtract_range(range, RANGE_NUM, extra_remove_base, extra_remove_base + extra_remove_size); Dprintk("After UC checking\n"); for (i = 0; i < RANGE_NUM; i++) { if (!range[i].end) continue; Dprintk("MTRR MAP PFN: %016llx - %016llx\n", range[i].start, range[i].end); } /* sort the ranges */ nr_range = clean_sort_range(range, RANGE_NUM); Dprintk("After sorting\n"); for (i = 0; i < nr_range; i++) Dprintk("MTRR MAP PFN: %016llx - %016llx\n", range[i].start, range[i].end); return nr_range; } #ifdef CONFIG_MTRR_SANITIZER static unsigned long __init sum_ranges(struct range *range, int nr_range) { unsigned long sum = 0; int i; for (i = 0; i < nr_range; i++) sum += range[i].end - range[i].start; return sum; } static int enable_mtrr_cleanup __initdata = CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT; static int __init disable_mtrr_cleanup_setup(char *str) { enable_mtrr_cleanup = 0; return 0; } early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup); static int __init enable_mtrr_cleanup_setup(char *str) { enable_mtrr_cleanup = 1; return 0; } early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); static void __init set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, unsigned char type) { u32 base_lo, base_hi, mask_lo, mask_hi; u64 base, mask; if (!sizek) { fill_mtrr_var_range(reg, 0, 0, 0, 0); return; } mask = (1ULL << boot_cpu_data.x86_phys_bits) - 1; mask &= ~((((u64)sizek) << 10) - 1); base = ((u64)basek) << 10; base |= type; mask |= 0x800; base_lo = base & ((1ULL<<32) - 1); base_hi = base >> 32; mask_lo = mask & ((1ULL<<32) - 1); mask_hi = mask >> 32; fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi); } static void __init save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, unsigned char type) { range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); range_state[reg].type = type; } static void __init set_var_mtrr_all(void) { unsigned long basek, sizek; unsigned char type; unsigned int reg; for (reg = 0; reg < num_var_ranges; reg++) { basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10); sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10); type = range_state[reg].type; set_var_mtrr(reg, basek, sizek, type); } } static unsigned long to_size_factor(unsigned long sizek, char *factorp) { unsigned long base = sizek; char factor; if (base & ((1<<10) - 1)) { /* Not MB-aligned: */ factor = 'K'; } else if (base & ((1<<20) - 1)) { factor = 'M'; base >>= 10; } else { factor = 'G'; base >>= 20; } *factorp = factor; return base; } static unsigned int __init range_to_mtrr(unsigned int reg, unsigned long range_startk, unsigned long range_sizek, unsigned char type) { if (!range_sizek || (reg >= num_var_ranges)) return reg; while (range_sizek) { unsigned long max_align, align; unsigned long sizek; /* Compute the maximum size with which we can make a range: */ if (range_startk) max_align = __ffs(range_startk); else max_align = BITS_PER_LONG - 1; align = __fls(range_sizek); if (align > max_align) align = max_align; sizek = 1UL << align; if (mtrr_debug) { char start_factor = 'K', size_factor = 'K'; unsigned long start_base, size_base; start_base = to_size_factor(range_startk, &start_factor); size_base = to_size_factor(sizek, &size_factor); Dprintk("Setting variable MTRR %d, " "base: %ld%cB, range: %ld%cB, type %s\n", reg, start_base, start_factor, size_base, size_factor, (type == MTRR_TYPE_UNCACHABLE) ? "UC" : ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other") ); } save_var_mtrr(reg++, range_startk, sizek, type); range_startk += sizek; range_sizek -= sizek; if (reg >= num_var_ranges) break; } return reg; } static unsigned __init range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, unsigned long sizek) { unsigned long hole_basek, hole_sizek; unsigned long second_sizek; unsigned long range0_basek, range0_sizek; unsigned long range_basek, range_sizek; unsigned long chunk_sizek; unsigned long gran_sizek; hole_basek = 0; hole_sizek = 0; second_sizek = 0; chunk_sizek = state->chunk_sizek; gran_sizek = state->gran_sizek; /* Align with gran size, prevent small block used up MTRRs: */ range_basek = ALIGN(state->range_startk, gran_sizek); if ((range_basek > basek) && basek) return second_sizek; state->range_sizek -= (range_basek - state->range_startk); range_sizek = ALIGN(state->range_sizek, gran_sizek); while (range_sizek > state->range_sizek) { range_sizek -= gran_sizek; if (!range_sizek) return 0; } state->range_sizek = range_sizek; /* Try to append some small hole: */ range0_basek = state->range_startk; range0_sizek = ALIGN(state->range_sizek, chunk_sizek); /* No increase: */ if (range0_sizek == state->range_sizek) { Dprintk("rangeX: %016lx - %016lx\n", range0_basek<<10, (range0_basek + state->range_sizek)<<10); state->reg = range_to_mtrr(state->reg, range0_basek, state->range_sizek, MTRR_TYPE_WRBACK); return 0; } /* Only cut back when it is not the last: */ if (sizek) { while (range0_basek + range0_sizek > (basek + sizek)) { if (range0_sizek >= chunk_sizek) range0_sizek -= chunk_sizek; else range0_sizek = 0; if (!range0_sizek) break; } } second_try: range_basek = range0_basek + range0_sizek; /* One hole in the middle: */ if (range_basek > basek && range_basek <= (basek + sizek)) second_sizek = range_basek - basek; if (range0_sizek > state->range_sizek) { /* One hole in middle or at the end: */ hole_sizek = range0_sizek - state->range_sizek - second_sizek; /* Hole size should be less than half of range0 size: */ if (hole_sizek >= (range0_sizek >> 1) && range0_sizek >= chunk_sizek) { range0_sizek -= chunk_sizek; second_sizek = 0; hole_sizek = 0; goto second_try; } } if (range0_sizek) { Dprintk("range0: %016lx - %016lx\n", range0_basek<<10, (range0_basek + range0_sizek)<<10); state->reg = range_to_mtrr(state->reg, range0_basek, range0_sizek, MTRR_TYPE_WRBACK); } if (range0_sizek < state->range_sizek) { /* Need to handle left over range: */ range_sizek = state->range_sizek - range0_sizek; Dprintk("range: %016lx - %016lx\n", range_basek<<10, (range_basek + range_sizek)<<10); state->reg = range_to_mtrr(state->reg, range_basek, range_sizek, MTRR_TYPE_WRBACK); } if (hole_sizek) { hole_basek = range_basek - hole_sizek - second_sizek; Dprintk("hole: %016lx - %016lx\n", hole_basek<<10, (hole_basek + hole_sizek)<<10); state->reg = range_to_mtrr(state->reg, hole_basek, hole_sizek, MTRR_TYPE_UNCACHABLE); } return second_sizek; } static void __init set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn, unsigned long size_pfn) { unsigned long basek, sizek; unsigned long second_sizek = 0; if (state->reg >= num_var_ranges) return; basek = base_pfn << (PAGE_SHIFT - 10); sizek = size_pfn << (PAGE_SHIFT - 10); /* See if I can merge with the last range: */ if ((basek <= 1024) || (state->range_startk + state->range_sizek == basek)) { unsigned long endk = basek + sizek; state->range_sizek = endk - state->range_startk; return; } /* Write the range mtrrs: */ if (state->range_sizek != 0) second_sizek = range_to_mtrr_with_hole(state, basek, sizek); /* Allocate an msr: */ state->range_startk = basek + second_sizek; state->range_sizek = sizek - second_sizek; } /* Minimum size of mtrr block that can take hole: */ static u64 mtrr_chunk_size __initdata = (256ULL<<20); static int __init parse_mtrr_chunk_size_opt(char *p) { if (!p) return -EINVAL; mtrr_chunk_size = memparse(p, &p); return 0; } early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); /* Granularity of mtrr of block: */ static u64 mtrr_gran_size __initdata; static int __init parse_mtrr_gran_size_opt(char *p) { if (!p) return -EINVAL; mtrr_gran_size = memparse(p, &p); return 0; } early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); static unsigned long nr_mtrr_spare_reg __initdata = CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; static int __init parse_mtrr_spare_reg(char *arg) { if (arg) nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); return 0; } early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); static int __init x86_setup_var_mtrrs(struct range *range, int nr_range, u64 chunk_size, u64 gran_size) { struct var_mtrr_state var_state; int num_reg; int i; var_state.range_startk = 0; var_state.range_sizek = 0; var_state.reg = 0; var_state.chunk_sizek = chunk_size >> 10; var_state.gran_sizek = gran_size >> 10; memset(range_state, 0, sizeof(range_state)); /* Write the range: */ for (i = 0; i < nr_range; i++) { set_var_mtrr_range(&var_state, range[i].start, range[i].end - range[i].start); } /* Write the last range: */ if (var_state.range_sizek != 0) range_to_mtrr_with_hole(&var_state, 0, 0); num_reg = var_state.reg; /* Clear out the extra MTRR's: */ while (var_state.reg < num_var_ranges) { save_var_mtrr(var_state.reg, 0, 0, 0); var_state.reg++; } return num_reg; } struct mtrr_cleanup_result { unsigned long gran_sizek; unsigned long chunk_sizek; unsigned long lose_cover_sizek; unsigned int num_reg; int bad; }; /* * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G * chunk size: gran_size, ..., 2G * so we need (1+16)*8 */ #define NUM_RESULT 136 #define PSHIFT (PAGE_SHIFT - 10) static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; static unsigned long __initdata min_loss_pfn[RANGE_NUM]; static void __init print_out_mtrr_range_state(void) { char start_factor = 'K', size_factor = 'K'; unsigned long start_base, size_base; mtrr_type type; int i; for (i = 0; i < num_var_ranges; i++) { size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10); if (!size_base) continue; size_base = to_size_factor(size_base, &size_factor); start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10); start_base = to_size_factor(start_base, &start_factor); type = range_state[i].type; Dprintk("reg %d, base: %ld%cB, range: %ld%cB, type %s\n", i, start_base, start_factor, size_base, size_factor, (type == MTRR_TYPE_UNCACHABLE) ? "UC" : ((type == MTRR_TYPE_WRPROT) ? "WP" : ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other")) ); } } static int __init mtrr_need_cleanup(void) { int i; mtrr_type type; unsigned long size; /* Extra one for all 0: */ int num[MTRR_NUM_TYPES + 1]; /* Check entries number: */ memset(num, 0, sizeof(num)); for (i = 0; i < num_var_ranges; i++) { type = range_state[i].type; size = range_state[i].size_pfn; if (type >= MTRR_NUM_TYPES) continue; if (!size) type = MTRR_NUM_TYPES; num[type]++; } /* Check if we got UC entries: */ if (!num[MTRR_TYPE_UNCACHABLE]) return 0; /* Check if we only had WB and UC */ if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != num_var_ranges - num[MTRR_NUM_TYPES]) return 0; return 1; } static unsigned long __initdata range_sums; static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size, unsigned long x_remove_base, unsigned long x_remove_size, int i) { /* * range_new should really be an automatic variable, but * putting 4096 bytes on the stack is frowned upon, to put it * mildly. It is safe to make it a static __initdata variable, * since mtrr_calc_range_state is only called during init and * there's no way it will call itself recursively. */ static struct range range_new[RANGE_NUM] __initdata; unsigned long range_sums_new; int nr_range_new; int num_reg; /* Convert ranges to var ranges state: */ num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); /* We got new setting in range_state, check it: */ memset(range_new, 0, sizeof(range_new)); nr_range_new = x86_get_mtrr_mem_range(range_new, 0, x_remove_base, x_remove_size); range_sums_new = sum_ranges(range_new, nr_range_new); result[i].chunk_sizek = chunk_size >> 10; result[i].gran_sizek = gran_size >> 10; result[i].num_reg = num_reg; if (range_sums < range_sums_new) { result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT; result[i].bad = 1; } else { result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT; } /* Double check it: */ if (!result[i].bad && !result[i].lose_cover_sizek) { if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range))) result[i].bad = 1; } if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg])) min_loss_pfn[num_reg] = range_sums - range_sums_new; } static void __init mtrr_print_out_one_result(int i) { unsigned long gran_base, chunk_base, lose_base; char gran_factor, chunk_factor, lose_factor; gran_base = to_size_factor(result[i].gran_sizek, &gran_factor); chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor); lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor); pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t", result[i].bad ? "*BAD*" : " ", gran_base, gran_factor, chunk_base, chunk_factor); pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n", result[i].num_reg, result[i].bad ? "-" : "", lose_base, lose_factor); } static int __init mtrr_search_optimal_index(void) { int num_reg_good; int index_good; int i; if (nr_mtrr_spare_reg >= num_var_ranges) nr_mtrr_spare_reg = num_var_ranges - 1; num_reg_good = -1; for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { if (!min_loss_pfn[i]) num_reg_good = i; } index_good = -1; if (num_reg_good != -1) { for (i = 0; i < NUM_RESULT; i++) { if (!result[i].bad && result[i].num_reg == num_reg_good && !result[i].lose_cover_sizek) { index_good = i; break; } } } return index_good; } int __init mtrr_cleanup(void) { unsigned long x_remove_base, x_remove_size; unsigned long base, size, def, dummy; u64 chunk_size, gran_size; mtrr_type type; int index_good; int i; if (!mtrr_enabled()) return 0; if (!cpu_feature_enabled(X86_FEATURE_MTRR) || enable_mtrr_cleanup < 1) return 0; rdmsr(MSR_MTRRdefType, def, dummy); def &= 0xff; if (def != MTRR_TYPE_UNCACHABLE) return 0; /* Get it and store it aside: */ memset(range_state, 0, sizeof(range_state)); for (i = 0; i < num_var_ranges; i++) { mtrr_if->get(i, &base, &size, &type); range_state[i].base_pfn = base; range_state[i].size_pfn = size; range_state[i].type = type; } /* Check if we need handle it and can handle it: */ if (!mtrr_need_cleanup()) return 0; /* Print original var MTRRs at first, for debugging: */ Dprintk("original variable MTRRs\n"); print_out_mtrr_range_state(); memset(range, 0, sizeof(range)); x_remove_size = 0; x_remove_base = 1 << (32 - PAGE_SHIFT); if (mtrr_tom2) x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; /* * [0, 1M) should always be covered by var mtrr with WB * and fixed mtrrs should take effect before var mtrr for it: */ nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0, 1ULL<<(20 - PAGE_SHIFT)); /* add from var mtrr at last */ nr_range = x86_get_mtrr_mem_range(range, nr_range, x_remove_base, x_remove_size); range_sums = sum_ranges(range, nr_range); pr_info("total RAM covered: %ldM\n", range_sums >> (20 - PAGE_SHIFT)); if (mtrr_chunk_size && mtrr_gran_size) { i = 0; mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, x_remove_base, x_remove_size, i); mtrr_print_out_one_result(i); if (!result[i].bad) { set_var_mtrr_all(); Dprintk("New variable MTRRs\n"); print_out_mtrr_range_state(); return 1; } pr_info("invalid mtrr_gran_size or mtrr_chunk_size, will find optimal one\n"); } i = 0; memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); memset(result, 0, sizeof(result)); for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) { for (chunk_size = gran_size; chunk_size < (1ULL<<32); chunk_size <<= 1) { if (i >= NUM_RESULT) continue; mtrr_calc_range_state(chunk_size, gran_size, x_remove_base, x_remove_size, i); if (mtrr_debug) { mtrr_print_out_one_result(i); pr_info("\n"); } i++; } } /* Try to find the optimal index: */ index_good = mtrr_search_optimal_index(); if (index_good != -1) { pr_info("Found optimal setting for mtrr clean up\n"); i = index_good; mtrr_print_out_one_result(i); /* Convert ranges to var ranges state: */ chunk_size = result[i].chunk_sizek; chunk_size <<= 10; gran_size = result[i].gran_sizek; gran_size <<= 10; x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); set_var_mtrr_all(); Dprintk("New variable MTRRs\n"); print_out_mtrr_range_state(); return 1; } else { /* print out all */ for (i = 0; i < NUM_RESULT; i++) mtrr_print_out_one_result(i); } pr_info("mtrr_cleanup: can not find optimal value\n"); pr_info("please specify mtrr_gran_size/mtrr_chunk_size\n"); return 0; } #else int __init mtrr_cleanup(void) { return 0; } #endif static int disable_mtrr_trim; static int __init disable_mtrr_trim_setup(char *str) { disable_mtrr_trim = 1; return 0; } early_param("disable_mtrr_trim", disable_mtrr_trim_setup); /* * Newer AMD K8s and later CPUs have a special magic MSR way to force WB * for memory >4GB. Check for that here. * Note this won't check if the MTRRs < 4GB where the magic bit doesn't * apply to are wrong, but so far we don't know of any such case in the wild. */ #define Tom2Enabled (1U << 21) #define Tom2ForceMemTypeWB (1U << 22) int __init amd_special_default_mtrr(void) { u32 l, h; if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return 0; if (boot_cpu_data.x86 < 0xf) return 0; /* In case some hypervisor doesn't pass SYSCFG through: */ if (rdmsr_safe(MSR_AMD64_SYSCFG, &l, &h) < 0) return 0; /* * Memory between 4GB and top of mem is forced WB by this magic bit. * Reserved before K8RevF, but should be zero there. */ if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) == (Tom2Enabled | Tom2ForceMemTypeWB)) return 1; return 0; } static u64 __init real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn) { u64 trim_start, trim_size; trim_start = start_pfn; trim_start <<= PAGE_SHIFT; trim_size = limit_pfn; trim_size <<= PAGE_SHIFT; trim_size -= trim_start; return e820__range_update(trim_start, trim_size, E820_TYPE_RAM, E820_TYPE_RESERVED); } /** * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs * @end_pfn: ending page frame number * * Some buggy BIOSes don't setup the MTRRs properly for systems with certain * memory configurations. This routine checks that the highest MTRR matches * the end of memory, to make sure the MTRRs having a write back type cover * all of the memory the kernel is intending to use. If not, it'll trim any * memory off the end by adjusting end_pfn, removing it from the kernel's * allocation pools, warning the user with an obnoxious message. */ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) { unsigned long i, base, size, highest_pfn = 0, def, dummy; mtrr_type type; u64 total_trim_size; /* extra one for all 0 */ int num[MTRR_NUM_TYPES + 1]; if (!mtrr_enabled()) return 0; /* * Make sure we only trim uncachable memory on machines that * support the Intel MTRR architecture: */ if (!cpu_feature_enabled(X86_FEATURE_MTRR) || disable_mtrr_trim) return 0; rdmsr(MSR_MTRRdefType, def, dummy); def &= MTRR_DEF_TYPE_TYPE; if (def != MTRR_TYPE_UNCACHABLE) return 0; /* Get it and store it aside: */ memset(range_state, 0, sizeof(range_state)); for (i = 0; i < num_var_ranges; i++) { mtrr_if->get(i, &base, &size, &type); range_state[i].base_pfn = base; range_state[i].size_pfn = size; range_state[i].type = type; } /* Find highest cached pfn: */ for (i = 0; i < num_var_ranges; i++) { type = range_state[i].type; if (type != MTRR_TYPE_WRBACK) continue; base = range_state[i].base_pfn; size = range_state[i].size_pfn; if (highest_pfn < base + size) highest_pfn = base + size; } /* kvm/qemu doesn't have mtrr set right, don't trim them all: */ if (!highest_pfn) { pr_info("CPU MTRRs all blank - virtualized system.\n"); return 0; } /* Check entries number: */ memset(num, 0, sizeof(num)); for (i = 0; i < num_var_ranges; i++) { type = range_state[i].type; if (type >= MTRR_NUM_TYPES) continue; size = range_state[i].size_pfn; if (!size) type = MTRR_NUM_TYPES; num[type]++; } /* No entry for WB? */ if (!num[MTRR_TYPE_WRBACK]) return 0; /* Check if we only had WB and UC: */ if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != num_var_ranges - num[MTRR_NUM_TYPES]) return 0; memset(range, 0, sizeof(range)); nr_range = 0; if (mtrr_tom2) { range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT)); range[nr_range].end = mtrr_tom2 >> PAGE_SHIFT; if (highest_pfn < range[nr_range].end) highest_pfn = range[nr_range].end; nr_range++; } nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); /* Check the head: */ total_trim_size = 0; if (range[0].start) total_trim_size += real_trim_memory(0, range[0].start); /* Check the holes: */ for (i = 0; i < nr_range - 1; i++) { if (range[i].end < range[i+1].start) total_trim_size += real_trim_memory(range[i].end, range[i+1].start); } /* Check the top: */ i = nr_range - 1; if (range[i].end < end_pfn) total_trim_size += real_trim_memory(range[i].end, end_pfn); if (total_trim_size) { pr_warn("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20); if (!changed_by_mtrr_cleanup) WARN_ON(1); pr_info("update e820 for mtrr\n"); e820__update_table_print(); return 1; } return 0; }
linux-master
arch/x86/kernel/cpu/mtrr/cleanup.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/types.h> #include <linux/slab.h> #include <linux/syscore_ops.h> #include <asm/cpufeature.h> #include <asm/mtrr.h> #include <asm/processor.h> #include "mtrr.h" void mtrr_set_if(void) { switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: /* Pre-Athlon (K6) AMD CPU MTRRs */ if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) mtrr_if = &amd_mtrr_ops; break; case X86_VENDOR_CENTAUR: if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) mtrr_if = &centaur_mtrr_ops; break; case X86_VENDOR_CYRIX: if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) mtrr_if = &cyrix_mtrr_ops; break; default: break; } } /* * The suspend/resume methods are only for CPUs without MTRR. CPUs using generic * MTRR driver don't require this. */ struct mtrr_value { mtrr_type ltype; unsigned long lbase; unsigned long lsize; }; static struct mtrr_value *mtrr_value; static int mtrr_save(void) { int i; if (!mtrr_value) return -ENOMEM; for (i = 0; i < num_var_ranges; i++) { mtrr_if->get(i, &mtrr_value[i].lbase, &mtrr_value[i].lsize, &mtrr_value[i].ltype); } return 0; } static void mtrr_restore(void) { int i; for (i = 0; i < num_var_ranges; i++) { if (mtrr_value[i].lsize) { mtrr_if->set(i, mtrr_value[i].lbase, mtrr_value[i].lsize, mtrr_value[i].ltype); } } } static struct syscore_ops mtrr_syscore_ops = { .suspend = mtrr_save, .resume = mtrr_restore, }; void mtrr_register_syscore(void) { mtrr_value = kcalloc(num_var_ranges, sizeof(*mtrr_value), GFP_KERNEL); /* * The CPU has no MTRR and seems to not support SMP. They have * specific drivers, we use a tricky method to support * suspend/resume for them. * * TBD: is there any system with such CPU which supports * suspend/resume? If no, we should remove the code. */ register_syscore_ops(&mtrr_syscore_ops); }
linux-master
arch/x86/kernel/cpu/mtrr/legacy.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2016-20 Intel Corporation. */ #include <linux/lockdep.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/shmem_fs.h> #include <linux/suspend.h> #include <linux/sched/mm.h> #include <asm/sgx.h> #include "encl.h" #include "encls.h" #include "sgx.h" static int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index, struct sgx_backing *backing); #define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd)) /* * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to * determine the page index associated with the first PCMD entry * within a PCMD page. */ #define PCMD_FIRST_MASK GENMASK(4, 0) /** * reclaimer_writing_to_pcmd() - Query if any enclave page associated with * a PCMD page is in process of being reclaimed. * @encl: Enclave to which PCMD page belongs * @start_addr: Address of enclave page using first entry within the PCMD page * * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is * stored. The PCMD data of a reclaimed enclave page contains enough * information for the processor to verify the page at the time * it is loaded back into the Enclave Page Cache (EPC). * * The backing storage to which enclave pages are reclaimed is laid out as * follows: * Encrypted enclave pages:SECS page:PCMD pages * * Each PCMD page contains the PCMD metadata of * PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages. * * A PCMD page can only be truncated if it is (a) empty, and (b) not in the * process of getting data (and thus soon being non-empty). (b) is tested with * a check if an enclave page sharing the PCMD page is in the process of being * reclaimed. * * The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it * intends to reclaim that enclave page - it means that the PCMD page * associated with that enclave page is about to get some data and thus * even if the PCMD page is empty, it should not be truncated. * * Context: Enclave mutex (&sgx_encl->lock) must be held. * Return: 1 if the reclaimer is about to write to the PCMD page * 0 if the reclaimer has no intention to write to the PCMD page */ static int reclaimer_writing_to_pcmd(struct sgx_encl *encl, unsigned long start_addr) { int reclaimed = 0; int i; /* * PCMD_FIRST_MASK is based on number of PCMD entries within * PCMD page being 32. */ BUILD_BUG_ON(PCMDS_PER_PAGE != 32); for (i = 0; i < PCMDS_PER_PAGE; i++) { struct sgx_encl_page *entry; unsigned long addr; addr = start_addr + i * PAGE_SIZE; /* * Stop when reaching the SECS page - it does not * have a page_array entry and its reclaim is * started and completed with enclave mutex held so * it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED * flag. */ if (addr == encl->base + encl->size) break; entry = xa_load(&encl->page_array, PFN_DOWN(addr)); if (!entry) continue; /* * VA page slot ID uses same bit as the flag so it is important * to ensure that the page is not already in backing store. */ if (entry->epc_page && (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) { reclaimed = 1; break; } } return reclaimed; } /* * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's * follow right after the EPC data in the backing storage. In addition to the * visible enclave pages, there's one extra page slot for SECS, before PCMD * structs. */ static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl, unsigned long page_index) { pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs); return epc_end_off + page_index * sizeof(struct sgx_pcmd); } /* * Free a page from the backing storage in the given page index. */ static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index) { struct inode *inode = file_inode(encl->backing); shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1); } /* * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC * Pages" in the SDM. */ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, struct sgx_epc_page *epc_page, struct sgx_epc_page *secs_page) { unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK; struct sgx_encl *encl = encl_page->encl; pgoff_t page_index, page_pcmd_off; unsigned long pcmd_first_page; struct sgx_pageinfo pginfo; struct sgx_backing b; bool pcmd_page_empty; u8 *pcmd_page; int ret; if (secs_page) page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); else page_index = PFN_DOWN(encl->size); /* * Address of enclave page using the first entry within the PCMD page. */ pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base; page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index); ret = sgx_encl_lookup_backing(encl, page_index, &b); if (ret) return ret; pginfo.addr = encl_page->desc & PAGE_MASK; pginfo.contents = (unsigned long)kmap_local_page(b.contents); pcmd_page = kmap_local_page(b.pcmd); pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset; if (secs_page) pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page); else pginfo.secs = 0; ret = __eldu(&pginfo, sgx_get_epc_virt_addr(epc_page), sgx_get_epc_virt_addr(encl_page->va_page->epc_page) + va_offset); if (ret) { if (encls_failed(ret)) ENCLS_WARN(ret, "ELDU"); ret = -EFAULT; } memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd)); set_page_dirty(b.pcmd); /* * The area for the PCMD in the page was zeroed above. Check if the * whole page is now empty meaning that all PCMD's have been zeroed: */ pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE); kunmap_local(pcmd_page); kunmap_local((void *)(unsigned long)pginfo.contents); get_page(b.pcmd); sgx_encl_put_backing(&b); sgx_encl_truncate_backing_page(encl, page_index); if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) { sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off)); pcmd_page = kmap_local_page(b.pcmd); if (memchr_inv(pcmd_page, 0, PAGE_SIZE)) pr_warn("PCMD page not empty after truncate.\n"); kunmap_local(pcmd_page); } put_page(b.pcmd); return ret; } static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page, struct sgx_epc_page *secs_page) { unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK; struct sgx_encl *encl = encl_page->encl; struct sgx_epc_page *epc_page; int ret; epc_page = sgx_alloc_epc_page(encl_page, false); if (IS_ERR(epc_page)) return epc_page; ret = __sgx_encl_eldu(encl_page, epc_page, secs_page); if (ret) { sgx_encl_free_epc_page(epc_page); return ERR_PTR(ret); } sgx_free_va_slot(encl_page->va_page, va_offset); list_move(&encl_page->va_page->list, &encl->va_pages); encl_page->desc &= ~SGX_ENCL_PAGE_VA_OFFSET_MASK; encl_page->epc_page = epc_page; return epc_page; } static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl, struct sgx_encl_page *entry) { struct sgx_epc_page *epc_page; /* Entry successfully located. */ if (entry->epc_page) { if (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED) return ERR_PTR(-EBUSY); return entry; } if (!(encl->secs.epc_page)) { epc_page = sgx_encl_eldu(&encl->secs, NULL); if (IS_ERR(epc_page)) return ERR_CAST(epc_page); } epc_page = sgx_encl_eldu(entry, encl->secs.epc_page); if (IS_ERR(epc_page)) return ERR_CAST(epc_page); encl->secs_child_cnt++; sgx_mark_page_reclaimable(entry->epc_page); return entry; } static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl, unsigned long addr, unsigned long vm_flags) { unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; struct sgx_encl_page *entry; entry = xa_load(&encl->page_array, PFN_DOWN(addr)); if (!entry) return ERR_PTR(-EFAULT); /* * Verify that the page has equal or higher build time * permissions than the VMA permissions (i.e. the subset of {VM_READ, * VM_WRITE, VM_EXECUTE} in vma->vm_flags). */ if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits) return ERR_PTR(-EFAULT); return __sgx_encl_load_page(encl, entry); } struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl, unsigned long addr) { struct sgx_encl_page *entry; entry = xa_load(&encl->page_array, PFN_DOWN(addr)); if (!entry) return ERR_PTR(-EFAULT); return __sgx_encl_load_page(encl, entry); } /** * sgx_encl_eaug_page() - Dynamically add page to initialized enclave * @vma: VMA obtained from fault info from where page is accessed * @encl: enclave accessing the page * @addr: address that triggered the page fault * * When an initialized enclave accesses a page with no backing EPC page * on a SGX2 system then the EPC can be added dynamically via the SGX2 * ENCLS[EAUG] instruction. * * Returns: Appropriate vm_fault_t: VM_FAULT_NOPAGE when PTE was installed * successfully, VM_FAULT_SIGBUS or VM_FAULT_OOM as error otherwise. */ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma, struct sgx_encl *encl, unsigned long addr) { vm_fault_t vmret = VM_FAULT_SIGBUS; struct sgx_pageinfo pginfo = {0}; struct sgx_encl_page *encl_page; struct sgx_epc_page *epc_page; struct sgx_va_page *va_page; unsigned long phys_addr; u64 secinfo_flags; int ret; if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) return VM_FAULT_SIGBUS; /* * Ignore internal permission checking for dynamically added pages. * They matter only for data added during the pre-initialization * phase. The enclave decides the permissions by the means of * EACCEPT, EACCEPTCOPY and EMODPE. */ secinfo_flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X; encl_page = sgx_encl_page_alloc(encl, addr - encl->base, secinfo_flags); if (IS_ERR(encl_page)) return VM_FAULT_OOM; mutex_lock(&encl->lock); epc_page = sgx_alloc_epc_page(encl_page, false); if (IS_ERR(epc_page)) { if (PTR_ERR(epc_page) == -EBUSY) vmret = VM_FAULT_NOPAGE; goto err_out_unlock; } va_page = sgx_encl_grow(encl, false); if (IS_ERR(va_page)) { if (PTR_ERR(va_page) == -EBUSY) vmret = VM_FAULT_NOPAGE; goto err_out_epc; } if (va_page) list_add(&va_page->list, &encl->va_pages); ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc), encl_page, GFP_KERNEL); /* * If ret == -EBUSY then page was created in another flow while * running without encl->lock */ if (ret) goto err_out_shrink; pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page); pginfo.addr = encl_page->desc & PAGE_MASK; pginfo.metadata = 0; ret = __eaug(&pginfo, sgx_get_epc_virt_addr(epc_page)); if (ret) goto err_out; encl_page->encl = encl; encl_page->epc_page = epc_page; encl_page->type = SGX_PAGE_TYPE_REG; encl->secs_child_cnt++; sgx_mark_page_reclaimable(encl_page->epc_page); phys_addr = sgx_get_epc_phys_addr(epc_page); /* * Do not undo everything when creating PTE entry fails - next #PF * would find page ready for a PTE. */ vmret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr)); if (vmret != VM_FAULT_NOPAGE) { mutex_unlock(&encl->lock); return VM_FAULT_SIGBUS; } mutex_unlock(&encl->lock); return VM_FAULT_NOPAGE; err_out: xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc)); err_out_shrink: sgx_encl_shrink(encl, va_page); err_out_epc: sgx_encl_free_epc_page(epc_page); err_out_unlock: mutex_unlock(&encl->lock); kfree(encl_page); return vmret; } static vm_fault_t sgx_vma_fault(struct vm_fault *vmf) { unsigned long addr = (unsigned long)vmf->address; struct vm_area_struct *vma = vmf->vma; struct sgx_encl_page *entry; unsigned long phys_addr; struct sgx_encl *encl; vm_fault_t ret; encl = vma->vm_private_data; /* * It's very unlikely but possible that allocating memory for the * mm_list entry of a forked process failed in sgx_vma_open(). When * this happens, vm_private_data is set to NULL. */ if (unlikely(!encl)) return VM_FAULT_SIGBUS; /* * The page_array keeps track of all enclave pages, whether they * are swapped out or not. If there is no entry for this page and * the system supports SGX2 then it is possible to dynamically add * a new enclave page. This is only possible for an initialized * enclave that will be checked for right away. */ if (cpu_feature_enabled(X86_FEATURE_SGX2) && (!xa_load(&encl->page_array, PFN_DOWN(addr)))) return sgx_encl_eaug_page(vma, encl, addr); mutex_lock(&encl->lock); entry = sgx_encl_load_page_in_vma(encl, addr, vma->vm_flags); if (IS_ERR(entry)) { mutex_unlock(&encl->lock); if (PTR_ERR(entry) == -EBUSY) return VM_FAULT_NOPAGE; return VM_FAULT_SIGBUS; } phys_addr = sgx_get_epc_phys_addr(entry->epc_page); ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr)); if (ret != VM_FAULT_NOPAGE) { mutex_unlock(&encl->lock); return VM_FAULT_SIGBUS; } sgx_encl_test_and_clear_young(vma->vm_mm, entry); mutex_unlock(&encl->lock); return VM_FAULT_NOPAGE; } static void sgx_vma_open(struct vm_area_struct *vma) { struct sgx_encl *encl = vma->vm_private_data; /* * It's possible but unlikely that vm_private_data is NULL. This can * happen in a grandchild of a process, when sgx_encl_mm_add() had * failed to allocate memory in this callback. */ if (unlikely(!encl)) return; if (sgx_encl_mm_add(encl, vma->vm_mm)) vma->vm_private_data = NULL; } /** * sgx_encl_may_map() - Check if a requested VMA mapping is allowed * @encl: an enclave pointer * @start: lower bound of the address range, inclusive * @end: upper bound of the address range, exclusive * @vm_flags: VMA flags * * Iterate through the enclave pages contained within [@start, @end) to verify * that the permissions requested by a subset of {VM_READ, VM_WRITE, VM_EXEC} * do not contain any permissions that are not contained in the build time * permissions of any of the enclave pages within the given address range. * * An enclave creator must declare the strongest permissions that will be * needed for each enclave page. This ensures that mappings have the identical * or weaker permissions than the earlier declared permissions. * * Return: 0 on success, -EACCES otherwise */ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, unsigned long end, unsigned long vm_flags) { unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; struct sgx_encl_page *page; unsigned long count = 0; int ret = 0; XA_STATE(xas, &encl->page_array, PFN_DOWN(start)); /* Disallow mapping outside enclave's address range. */ if (test_bit(SGX_ENCL_INITIALIZED, &encl->flags) && (start < encl->base || end > encl->base + encl->size)) return -EACCES; /* * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might * conflict with the enclave page permissions. */ if (current->personality & READ_IMPLIES_EXEC) return -EACCES; mutex_lock(&encl->lock); xas_lock(&xas); xas_for_each(&xas, page, PFN_DOWN(end - 1)) { if (~page->vm_max_prot_bits & vm_prot_bits) { ret = -EACCES; break; } /* Reschedule on every XA_CHECK_SCHED iteration. */ if (!(++count % XA_CHECK_SCHED)) { xas_pause(&xas); xas_unlock(&xas); mutex_unlock(&encl->lock); cond_resched(); mutex_lock(&encl->lock); xas_lock(&xas); } } xas_unlock(&xas); mutex_unlock(&encl->lock); return ret; } static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long newflags) { return sgx_encl_may_map(vma->vm_private_data, start, end, newflags); } static int sgx_encl_debug_read(struct sgx_encl *encl, struct sgx_encl_page *page, unsigned long addr, void *data) { unsigned long offset = addr & ~PAGE_MASK; int ret; ret = __edbgrd(sgx_get_epc_virt_addr(page->epc_page) + offset, data); if (ret) return -EIO; return 0; } static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *page, unsigned long addr, void *data) { unsigned long offset = addr & ~PAGE_MASK; int ret; ret = __edbgwr(sgx_get_epc_virt_addr(page->epc_page) + offset, data); if (ret) return -EIO; return 0; } /* * Load an enclave page to EPC if required, and take encl->lock. */ static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl, unsigned long addr, unsigned long vm_flags) { struct sgx_encl_page *entry; for ( ; ; ) { mutex_lock(&encl->lock); entry = sgx_encl_load_page_in_vma(encl, addr, vm_flags); if (PTR_ERR(entry) != -EBUSY) break; mutex_unlock(&encl->lock); } if (IS_ERR(entry)) mutex_unlock(&encl->lock); return entry; } static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { struct sgx_encl *encl = vma->vm_private_data; struct sgx_encl_page *entry = NULL; char data[sizeof(unsigned long)]; unsigned long align; int offset; int cnt; int ret = 0; int i; /* * If process was forked, VMA is still there but vm_private_data is set * to NULL. */ if (!encl) return -EFAULT; if (!test_bit(SGX_ENCL_DEBUG, &encl->flags)) return -EFAULT; for (i = 0; i < len; i += cnt) { entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK, vma->vm_flags); if (IS_ERR(entry)) { ret = PTR_ERR(entry); break; } align = ALIGN_DOWN(addr + i, sizeof(unsigned long)); offset = (addr + i) & (sizeof(unsigned long) - 1); cnt = sizeof(unsigned long) - offset; cnt = min(cnt, len - i); ret = sgx_encl_debug_read(encl, entry, align, data); if (ret) goto out; if (write) { memcpy(data + offset, buf + i, cnt); ret = sgx_encl_debug_write(encl, entry, align, data); if (ret) goto out; } else { memcpy(buf + i, data + offset, cnt); } out: mutex_unlock(&encl->lock); if (ret) break; } return ret < 0 ? ret : i; } const struct vm_operations_struct sgx_vm_ops = { .fault = sgx_vma_fault, .mprotect = sgx_vma_mprotect, .open = sgx_vma_open, .access = sgx_vma_access, }; /** * sgx_encl_release - Destroy an enclave instance * @ref: address of a kref inside &sgx_encl * * Used together with kref_put(). Frees all the resources associated with the * enclave and the instance itself. */ void sgx_encl_release(struct kref *ref) { struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount); unsigned long max_page_index = PFN_DOWN(encl->base + encl->size - 1); struct sgx_va_page *va_page; struct sgx_encl_page *entry; unsigned long count = 0; XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base)); xas_lock(&xas); xas_for_each(&xas, entry, max_page_index) { if (entry->epc_page) { /* * The page and its radix tree entry cannot be freed * if the page is being held by the reclaimer. */ if (sgx_unmark_page_reclaimable(entry->epc_page)) continue; sgx_encl_free_epc_page(entry->epc_page); encl->secs_child_cnt--; entry->epc_page = NULL; } kfree(entry); /* * Invoke scheduler on every XA_CHECK_SCHED iteration * to prevent soft lockups. */ if (!(++count % XA_CHECK_SCHED)) { xas_pause(&xas); xas_unlock(&xas); cond_resched(); xas_lock(&xas); } } xas_unlock(&xas); xa_destroy(&encl->page_array); if (!encl->secs_child_cnt && encl->secs.epc_page) { sgx_encl_free_epc_page(encl->secs.epc_page); encl->secs.epc_page = NULL; } while (!list_empty(&encl->va_pages)) { va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, list); list_del(&va_page->list); sgx_encl_free_epc_page(va_page->epc_page); kfree(va_page); } if (encl->backing) fput(encl->backing); cleanup_srcu_struct(&encl->srcu); WARN_ON_ONCE(!list_empty(&encl->mm_list)); /* Detect EPC page leak's. */ WARN_ON_ONCE(encl->secs_child_cnt); WARN_ON_ONCE(encl->secs.epc_page); kfree(encl); } /* * 'mm' is exiting and no longer needs mmu notifications. */ static void sgx_mmu_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier); struct sgx_encl_mm *tmp = NULL; bool found = false; /* * The enclave itself can remove encl_mm. Note, objects can't be moved * off an RCU protected list, but deletion is ok. */ spin_lock(&encl_mm->encl->mm_lock); list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) { if (tmp == encl_mm) { list_del_rcu(&encl_mm->list); found = true; break; } } spin_unlock(&encl_mm->encl->mm_lock); if (found) { synchronize_srcu(&encl_mm->encl->srcu); mmu_notifier_put(mn); } } static void sgx_mmu_notifier_free(struct mmu_notifier *mn) { struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier); /* 'encl_mm' is going away, put encl_mm->encl reference: */ kref_put(&encl_mm->encl->refcount, sgx_encl_release); kfree(encl_mm); } static const struct mmu_notifier_ops sgx_mmu_notifier_ops = { .release = sgx_mmu_notifier_release, .free_notifier = sgx_mmu_notifier_free, }; static struct sgx_encl_mm *sgx_encl_find_mm(struct sgx_encl *encl, struct mm_struct *mm) { struct sgx_encl_mm *encl_mm = NULL; struct sgx_encl_mm *tmp; int idx; idx = srcu_read_lock(&encl->srcu); list_for_each_entry_rcu(tmp, &encl->mm_list, list) { if (tmp->mm == mm) { encl_mm = tmp; break; } } srcu_read_unlock(&encl->srcu, idx); return encl_mm; } int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm) { struct sgx_encl_mm *encl_mm; int ret; /* * Even though a single enclave may be mapped into an mm more than once, * each 'mm' only appears once on encl->mm_list. This is guaranteed by * holding the mm's mmap lock for write before an mm can be added or * remove to an encl->mm_list. */ mmap_assert_write_locked(mm); /* * It's possible that an entry already exists in the mm_list, because it * is removed only on VFS release or process exit. */ if (sgx_encl_find_mm(encl, mm)) return 0; encl_mm = kzalloc(sizeof(*encl_mm), GFP_KERNEL); if (!encl_mm) return -ENOMEM; /* Grab a refcount for the encl_mm->encl reference: */ kref_get(&encl->refcount); encl_mm->encl = encl; encl_mm->mm = mm; encl_mm->mmu_notifier.ops = &sgx_mmu_notifier_ops; ret = __mmu_notifier_register(&encl_mm->mmu_notifier, mm); if (ret) { kfree(encl_mm); return ret; } spin_lock(&encl->mm_lock); list_add_rcu(&encl_mm->list, &encl->mm_list); /* Pairs with smp_rmb() in sgx_zap_enclave_ptes(). */ smp_wmb(); encl->mm_list_version++; spin_unlock(&encl->mm_lock); return 0; } /** * sgx_encl_cpumask() - Query which CPUs might be accessing the enclave * @encl: the enclave * * Some SGX functions require that no cached linear-to-physical address * mappings are present before they can succeed. For example, ENCLS[EWB] * copies a page from the enclave page cache to regular main memory but * it fails if it cannot ensure that there are no cached * linear-to-physical address mappings referring to the page. * * SGX hardware flushes all cached linear-to-physical mappings on a CPU * when an enclave is exited via ENCLU[EEXIT] or an Asynchronous Enclave * Exit (AEX). Exiting an enclave will thus ensure cached linear-to-physical * address mappings are cleared but coordination with the tracking done within * the SGX hardware is needed to support the SGX functions that depend on this * cache clearing. * * When the ENCLS[ETRACK] function is issued on an enclave the hardware * tracks threads operating inside the enclave at that time. The SGX * hardware tracking require that all the identified threads must have * exited the enclave in order to flush the mappings before a function such * as ENCLS[EWB] will be permitted * * The following flow is used to support SGX functions that require that * no cached linear-to-physical address mappings are present: * 1) Execute ENCLS[ETRACK] to initiate hardware tracking. * 2) Use this function (sgx_encl_cpumask()) to query which CPUs might be * accessing the enclave. * 3) Send IPI to identified CPUs, kicking them out of the enclave and * thus flushing all locally cached linear-to-physical address mappings. * 4) Execute SGX function. * * Context: It is required to call this function after ENCLS[ETRACK]. * This will ensure that if any new mm appears (racing with * sgx_encl_mm_add()) then the new mm will enter into the * enclave with fresh linear-to-physical address mappings. * * It is required that all IPIs are completed before a new * ENCLS[ETRACK] is issued so be sure to protect steps 1 to 3 * of the above flow with the enclave's mutex. * * Return: cpumask of CPUs that might be accessing @encl */ const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl) { cpumask_t *cpumask = &encl->cpumask; struct sgx_encl_mm *encl_mm; int idx; cpumask_clear(cpumask); idx = srcu_read_lock(&encl->srcu); list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { if (!mmget_not_zero(encl_mm->mm)) continue; cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm)); mmput_async(encl_mm->mm); } srcu_read_unlock(&encl->srcu, idx); return cpumask; } static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl, pgoff_t index) { struct address_space *mapping = encl->backing->f_mapping; gfp_t gfpmask = mapping_gfp_mask(mapping); return shmem_read_mapping_page_gfp(mapping, index, gfpmask); } /** * __sgx_encl_get_backing() - Pin the backing storage * @encl: an enclave pointer * @page_index: enclave page index * @backing: data for accessing backing storage for the page * * Pin the backing storage pages for storing the encrypted contents and Paging * Crypto MetaData (PCMD) of an enclave page. * * Return: * 0 on success, * -errno otherwise. */ static int __sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, struct sgx_backing *backing) { pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index); struct page *contents; struct page *pcmd; contents = sgx_encl_get_backing_page(encl, page_index); if (IS_ERR(contents)) return PTR_ERR(contents); pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off)); if (IS_ERR(pcmd)) { put_page(contents); return PTR_ERR(pcmd); } backing->contents = contents; backing->pcmd = pcmd; backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1); return 0; } /* * When called from ksgxd, returns the mem_cgroup of a struct mm stored * in the enclave's mm_list. When not called from ksgxd, just returns * the mem_cgroup of the current task. */ static struct mem_cgroup *sgx_encl_get_mem_cgroup(struct sgx_encl *encl) { struct mem_cgroup *memcg = NULL; struct sgx_encl_mm *encl_mm; int idx; /* * If called from normal task context, return the mem_cgroup * of the current task's mm. The remainder of the handling is for * ksgxd. */ if (!current_is_ksgxd()) return get_mem_cgroup_from_mm(current->mm); /* * Search the enclave's mm_list to find an mm associated with * this enclave to charge the allocation to. */ idx = srcu_read_lock(&encl->srcu); list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { if (!mmget_not_zero(encl_mm->mm)) continue; memcg = get_mem_cgroup_from_mm(encl_mm->mm); mmput_async(encl_mm->mm); break; } srcu_read_unlock(&encl->srcu, idx); /* * In the rare case that there isn't an mm associated with * the enclave, set memcg to the current active mem_cgroup. * This will be the root mem_cgroup if there is no active * mem_cgroup. */ if (!memcg) return get_mem_cgroup_from_mm(NULL); return memcg; } /** * sgx_encl_alloc_backing() - create a new backing storage page * @encl: an enclave pointer * @page_index: enclave page index * @backing: data for accessing backing storage for the page * * When called from ksgxd, sets the active memcg from one of the * mms in the enclave's mm_list prior to any backing page allocation, * in order to ensure that shmem page allocations are charged to the * enclave. Create a backing page for loading data back into an EPC page with * ELDU. This function takes a reference on a new backing page which * must be dropped with a corresponding call to sgx_encl_put_backing(). * * Return: * 0 on success, * -errno otherwise. */ int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index, struct sgx_backing *backing) { struct mem_cgroup *encl_memcg = sgx_encl_get_mem_cgroup(encl); struct mem_cgroup *memcg = set_active_memcg(encl_memcg); int ret; ret = __sgx_encl_get_backing(encl, page_index, backing); set_active_memcg(memcg); mem_cgroup_put(encl_memcg); return ret; } /** * sgx_encl_lookup_backing() - retrieve an existing backing storage page * @encl: an enclave pointer * @page_index: enclave page index * @backing: data for accessing backing storage for the page * * Retrieve a backing page for loading data back into an EPC page with ELDU. * It is the caller's responsibility to ensure that it is appropriate to use * sgx_encl_lookup_backing() rather than sgx_encl_alloc_backing(). If lookup is * not used correctly, this will cause an allocation which is not accounted for. * This function takes a reference on an existing backing page which must be * dropped with a corresponding call to sgx_encl_put_backing(). * * Return: * 0 on success, * -errno otherwise. */ static int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index, struct sgx_backing *backing) { return __sgx_encl_get_backing(encl, page_index, backing); } /** * sgx_encl_put_backing() - Unpin the backing storage * @backing: data for accessing backing storage for the page */ void sgx_encl_put_backing(struct sgx_backing *backing) { put_page(backing->pcmd); put_page(backing->contents); } static int sgx_encl_test_and_clear_young_cb(pte_t *ptep, unsigned long addr, void *data) { pte_t pte; int ret; ret = pte_young(*ptep); if (ret) { pte = pte_mkold(*ptep); set_pte_at((struct mm_struct *)data, addr, ptep, pte); } return ret; } /** * sgx_encl_test_and_clear_young() - Test and reset the accessed bit * @mm: mm_struct that is checked * @page: enclave page to be tested for recent access * * Checks the Access (A) bit from the PTE corresponding to the enclave page and * clears it. * * Return: 1 if the page has been recently accessed and 0 if not. */ int sgx_encl_test_and_clear_young(struct mm_struct *mm, struct sgx_encl_page *page) { unsigned long addr = page->desc & PAGE_MASK; struct sgx_encl *encl = page->encl; struct vm_area_struct *vma; int ret; ret = sgx_encl_find(mm, addr, &vma); if (ret) return 0; if (encl != vma->vm_private_data) return 0; ret = apply_to_page_range(vma->vm_mm, addr, PAGE_SIZE, sgx_encl_test_and_clear_young_cb, vma->vm_mm); if (ret < 0) return 0; return ret; } struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl, unsigned long offset, u64 secinfo_flags) { struct sgx_encl_page *encl_page; unsigned long prot; encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL); if (!encl_page) return ERR_PTR(-ENOMEM); encl_page->desc = encl->base + offset; encl_page->encl = encl; prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ) | _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) | _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC); /* * TCS pages must always RW set for CPU access while the SECINFO * permissions are *always* zero - the CPU ignores the user provided * values and silently overwrites them with zero permissions. */ if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS) prot |= PROT_READ | PROT_WRITE; /* Calculate maximum of the VM flags for the page. */ encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0); return encl_page; } /** * sgx_zap_enclave_ptes() - remove PTEs mapping the address from enclave * @encl: the enclave * @addr: page aligned pointer to single page for which PTEs will be removed * * Multiple VMAs may have an enclave page mapped. Remove the PTE mapping * @addr from each VMA. Ensure that page fault handler is ready to handle * new mappings of @addr before calling this function. */ void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr) { unsigned long mm_list_version; struct sgx_encl_mm *encl_mm; struct vm_area_struct *vma; int idx, ret; do { mm_list_version = encl->mm_list_version; /* Pairs with smp_wmb() in sgx_encl_mm_add(). */ smp_rmb(); idx = srcu_read_lock(&encl->srcu); list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { if (!mmget_not_zero(encl_mm->mm)) continue; mmap_read_lock(encl_mm->mm); ret = sgx_encl_find(encl_mm->mm, addr, &vma); if (!ret && encl == vma->vm_private_data) zap_vma_ptes(vma, addr, PAGE_SIZE); mmap_read_unlock(encl_mm->mm); mmput_async(encl_mm->mm); } srcu_read_unlock(&encl->srcu, idx); } while (unlikely(encl->mm_list_version != mm_list_version)); } /** * sgx_alloc_va_page() - Allocate a Version Array (VA) page * @reclaim: Reclaim EPC pages directly if none available. Enclave * mutex should not be held if this is set. * * Allocate a free EPC page and convert it to a Version Array (VA) page. * * Return: * a VA page, * -errno otherwise */ struct sgx_epc_page *sgx_alloc_va_page(bool reclaim) { struct sgx_epc_page *epc_page; int ret; epc_page = sgx_alloc_epc_page(NULL, reclaim); if (IS_ERR(epc_page)) return ERR_CAST(epc_page); ret = __epa(sgx_get_epc_virt_addr(epc_page)); if (ret) { WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret); sgx_encl_free_epc_page(epc_page); return ERR_PTR(-EFAULT); } return epc_page; } /** * sgx_alloc_va_slot - allocate a VA slot * @va_page: a &struct sgx_va_page instance * * Allocates a slot from a &struct sgx_va_page instance. * * Return: offset of the slot inside the VA page */ unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page) { int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT); if (slot < SGX_VA_SLOT_COUNT) set_bit(slot, va_page->slots); return slot << 3; } /** * sgx_free_va_slot - free a VA slot * @va_page: a &struct sgx_va_page instance * @offset: offset of the slot inside the VA page * * Frees a slot from a &struct sgx_va_page instance. */ void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset) { clear_bit(offset >> 3, va_page->slots); } /** * sgx_va_page_full - is the VA page full? * @va_page: a &struct sgx_va_page instance * * Return: true if all slots have been taken */ bool sgx_va_page_full(struct sgx_va_page *va_page) { int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT); return slot == SGX_VA_SLOT_COUNT; } /** * sgx_encl_free_epc_page - free an EPC page assigned to an enclave * @page: EPC page to be freed * * Free an EPC page assigned to an enclave. It does EREMOVE for the page, and * only upon success, it puts the page back to free page list. Otherwise, it * gives a WARNING to indicate page is leaked. */ void sgx_encl_free_epc_page(struct sgx_epc_page *page) { int ret; WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED); ret = __eremove(sgx_get_epc_virt_addr(page)); if (WARN_ONCE(ret, EREMOVE_ERROR_MESSAGE, ret, ret)) return; sgx_free_epc_page(page); }
linux-master
arch/x86/kernel/cpu/sgx/encl.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2016-20 Intel Corporation. */ #include <asm/mman.h> #include <asm/sgx.h> #include <linux/mman.h> #include <linux/delay.h> #include <linux/file.h> #include <linux/hashtable.h> #include <linux/highmem.h> #include <linux/ratelimit.h> #include <linux/sched/signal.h> #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/suspend.h> #include "driver.h" #include "encl.h" #include "encls.h" struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl, bool reclaim) { struct sgx_va_page *va_page = NULL; void *err; BUILD_BUG_ON(SGX_VA_SLOT_COUNT != (SGX_ENCL_PAGE_VA_OFFSET_MASK >> 3) + 1); if (!(encl->page_cnt % SGX_VA_SLOT_COUNT)) { va_page = kzalloc(sizeof(*va_page), GFP_KERNEL); if (!va_page) return ERR_PTR(-ENOMEM); va_page->epc_page = sgx_alloc_va_page(reclaim); if (IS_ERR(va_page->epc_page)) { err = ERR_CAST(va_page->epc_page); kfree(va_page); return err; } WARN_ON_ONCE(encl->page_cnt % SGX_VA_SLOT_COUNT); } encl->page_cnt++; return va_page; } void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page) { encl->page_cnt--; if (va_page) { sgx_encl_free_epc_page(va_page->epc_page); list_del(&va_page->list); kfree(va_page); } } static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs) { struct sgx_epc_page *secs_epc; struct sgx_va_page *va_page; struct sgx_pageinfo pginfo; struct sgx_secinfo secinfo; unsigned long encl_size; struct file *backing; long ret; va_page = sgx_encl_grow(encl, true); if (IS_ERR(va_page)) return PTR_ERR(va_page); else if (va_page) list_add(&va_page->list, &encl->va_pages); /* else the tail page of the VA page list had free slots. */ /* The extra page goes to SECS. */ encl_size = secs->size + PAGE_SIZE; backing = shmem_file_setup("SGX backing", encl_size + (encl_size >> 5), VM_NORESERVE); if (IS_ERR(backing)) { ret = PTR_ERR(backing); goto err_out_shrink; } encl->backing = backing; secs_epc = sgx_alloc_epc_page(&encl->secs, true); if (IS_ERR(secs_epc)) { ret = PTR_ERR(secs_epc); goto err_out_backing; } encl->secs.epc_page = secs_epc; pginfo.addr = 0; pginfo.contents = (unsigned long)secs; pginfo.metadata = (unsigned long)&secinfo; pginfo.secs = 0; memset(&secinfo, 0, sizeof(secinfo)); ret = __ecreate((void *)&pginfo, sgx_get_epc_virt_addr(secs_epc)); if (ret) { ret = -EIO; goto err_out; } if (secs->attributes & SGX_ATTR_DEBUG) set_bit(SGX_ENCL_DEBUG, &encl->flags); encl->secs.encl = encl; encl->secs.type = SGX_PAGE_TYPE_SECS; encl->base = secs->base; encl->size = secs->size; encl->attributes = secs->attributes; encl->attributes_mask = SGX_ATTR_UNPRIV_MASK; /* Set only after completion, as encl->lock has not been taken. */ set_bit(SGX_ENCL_CREATED, &encl->flags); return 0; err_out: sgx_encl_free_epc_page(encl->secs.epc_page); encl->secs.epc_page = NULL; err_out_backing: fput(encl->backing); encl->backing = NULL; err_out_shrink: sgx_encl_shrink(encl, va_page); return ret; } /** * sgx_ioc_enclave_create() - handler for %SGX_IOC_ENCLAVE_CREATE * @encl: An enclave pointer. * @arg: The ioctl argument. * * Allocate kernel data structures for the enclave and invoke ECREATE. * * Return: * - 0: Success. * - -EIO: ECREATE failed. * - -errno: POSIX error. */ static long sgx_ioc_enclave_create(struct sgx_encl *encl, void __user *arg) { struct sgx_enclave_create create_arg; void *secs; int ret; if (test_bit(SGX_ENCL_CREATED, &encl->flags)) return -EINVAL; if (copy_from_user(&create_arg, arg, sizeof(create_arg))) return -EFAULT; secs = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!secs) return -ENOMEM; if (copy_from_user(secs, (void __user *)create_arg.src, PAGE_SIZE)) ret = -EFAULT; else ret = sgx_encl_create(encl, secs); kfree(secs); return ret; } static int sgx_validate_secinfo(struct sgx_secinfo *secinfo) { u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK; u64 pt = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK; if (pt != SGX_SECINFO_REG && pt != SGX_SECINFO_TCS) return -EINVAL; if ((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R)) return -EINVAL; /* * CPU will silently overwrite the permissions as zero, which means * that we need to validate it ourselves. */ if (pt == SGX_SECINFO_TCS && perm) return -EINVAL; if (secinfo->flags & SGX_SECINFO_RESERVED_MASK) return -EINVAL; if (memchr_inv(secinfo->reserved, 0, sizeof(secinfo->reserved))) return -EINVAL; return 0; } static int __sgx_encl_add_page(struct sgx_encl *encl, struct sgx_encl_page *encl_page, struct sgx_epc_page *epc_page, struct sgx_secinfo *secinfo, unsigned long src) { struct sgx_pageinfo pginfo; struct vm_area_struct *vma; struct page *src_page; int ret; /* Deny noexec. */ vma = find_vma(current->mm, src); if (!vma) return -EFAULT; if (!(vma->vm_flags & VM_MAYEXEC)) return -EACCES; ret = get_user_pages(src, 1, 0, &src_page); if (ret < 1) return -EFAULT; pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page); pginfo.addr = encl_page->desc & PAGE_MASK; pginfo.metadata = (unsigned long)secinfo; pginfo.contents = (unsigned long)kmap_local_page(src_page); ret = __eadd(&pginfo, sgx_get_epc_virt_addr(epc_page)); kunmap_local((void *)pginfo.contents); put_page(src_page); return ret ? -EIO : 0; } /* * If the caller requires measurement of the page as a proof for the content, * use EEXTEND to add a measurement for 256 bytes of the page. Repeat this * operation until the entire page is measured." */ static int __sgx_encl_extend(struct sgx_encl *encl, struct sgx_epc_page *epc_page) { unsigned long offset; int ret; for (offset = 0; offset < PAGE_SIZE; offset += SGX_EEXTEND_BLOCK_SIZE) { ret = __eextend(sgx_get_epc_virt_addr(encl->secs.epc_page), sgx_get_epc_virt_addr(epc_page) + offset); if (ret) { if (encls_failed(ret)) ENCLS_WARN(ret, "EEXTEND"); return -EIO; } } return 0; } static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src, unsigned long offset, struct sgx_secinfo *secinfo, unsigned long flags) { struct sgx_encl_page *encl_page; struct sgx_epc_page *epc_page; struct sgx_va_page *va_page; int ret; encl_page = sgx_encl_page_alloc(encl, offset, secinfo->flags); if (IS_ERR(encl_page)) return PTR_ERR(encl_page); epc_page = sgx_alloc_epc_page(encl_page, true); if (IS_ERR(epc_page)) { kfree(encl_page); return PTR_ERR(epc_page); } va_page = sgx_encl_grow(encl, true); if (IS_ERR(va_page)) { ret = PTR_ERR(va_page); goto err_out_free; } mmap_read_lock(current->mm); mutex_lock(&encl->lock); /* * Adding to encl->va_pages must be done under encl->lock. Ditto for * deleting (via sgx_encl_shrink()) in the error path. */ if (va_page) list_add(&va_page->list, &encl->va_pages); /* * Insert prior to EADD in case of OOM. EADD modifies MRENCLAVE, i.e. * can't be gracefully unwound, while failure on EADD/EXTEND is limited * to userspace errors (or kernel/hardware bugs). */ ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc), encl_page, GFP_KERNEL); if (ret) goto err_out_unlock; ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo, src); if (ret) goto err_out; /* * Complete the "add" before doing the "extend" so that the "add" * isn't in a half-baked state in the extremely unlikely scenario * the enclave will be destroyed in response to EEXTEND failure. */ encl_page->encl = encl; encl_page->epc_page = epc_page; encl_page->type = (secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK) >> 8; encl->secs_child_cnt++; if (flags & SGX_PAGE_MEASURE) { ret = __sgx_encl_extend(encl, epc_page); if (ret) goto err_out; } sgx_mark_page_reclaimable(encl_page->epc_page); mutex_unlock(&encl->lock); mmap_read_unlock(current->mm); return ret; err_out: xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc)); err_out_unlock: sgx_encl_shrink(encl, va_page); mutex_unlock(&encl->lock); mmap_read_unlock(current->mm); err_out_free: sgx_encl_free_epc_page(epc_page); kfree(encl_page); return ret; } /* * Ensure user provided offset and length values are valid for * an enclave. */ static int sgx_validate_offset_length(struct sgx_encl *encl, unsigned long offset, unsigned long length) { if (!IS_ALIGNED(offset, PAGE_SIZE)) return -EINVAL; if (!length || !IS_ALIGNED(length, PAGE_SIZE)) return -EINVAL; if (offset + length < offset) return -EINVAL; if (offset + length - PAGE_SIZE >= encl->size) return -EINVAL; return 0; } /** * sgx_ioc_enclave_add_pages() - The handler for %SGX_IOC_ENCLAVE_ADD_PAGES * @encl: an enclave pointer * @arg: a user pointer to a struct sgx_enclave_add_pages instance * * Add one or more pages to an uninitialized enclave, and optionally extend the * measurement with the contents of the page. The SECINFO and measurement mask * are applied to all pages. * * A SECINFO for a TCS is required to always contain zero permissions because * CPU silently zeros them. Allowing anything else would cause a mismatch in * the measurement. * * mmap()'s protection bits are capped by the page permissions. For each page * address, the maximum protection bits are computed with the following * heuristics: * * 1. A regular page: PROT_R, PROT_W and PROT_X match the SECINFO permissions. * 2. A TCS page: PROT_R | PROT_W. * * mmap() is not allowed to surpass the minimum of the maximum protection bits * within the given address range. * * The function deinitializes kernel data structures for enclave and returns * -EIO in any of the following conditions: * * - Enclave Page Cache (EPC), the physical memory holding enclaves, has * been invalidated. This will cause EADD and EEXTEND to fail. * - If the source address is corrupted somehow when executing EADD. * * Return: * - 0: Success. * - -EACCES: The source page is located in a noexec partition. * - -ENOMEM: Out of EPC pages. * - -EINTR: The call was interrupted before data was processed. * - -EIO: Either EADD or EEXTEND failed because invalid source address * or power cycle. * - -errno: POSIX error. */ static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg) { struct sgx_enclave_add_pages add_arg; struct sgx_secinfo secinfo; unsigned long c; int ret; if (!test_bit(SGX_ENCL_CREATED, &encl->flags) || test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) return -EINVAL; if (copy_from_user(&add_arg, arg, sizeof(add_arg))) return -EFAULT; if (!IS_ALIGNED(add_arg.src, PAGE_SIZE)) return -EINVAL; if (sgx_validate_offset_length(encl, add_arg.offset, add_arg.length)) return -EINVAL; if (copy_from_user(&secinfo, (void __user *)add_arg.secinfo, sizeof(secinfo))) return -EFAULT; if (sgx_validate_secinfo(&secinfo)) return -EINVAL; for (c = 0 ; c < add_arg.length; c += PAGE_SIZE) { if (signal_pending(current)) { if (!c) ret = -ERESTARTSYS; break; } if (need_resched()) cond_resched(); ret = sgx_encl_add_page(encl, add_arg.src + c, add_arg.offset + c, &secinfo, add_arg.flags); if (ret) break; } add_arg.count = c; if (copy_to_user(arg, &add_arg, sizeof(add_arg))) return -EFAULT; return ret; } static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus, void *hash) { SHASH_DESC_ON_STACK(shash, tfm); shash->tfm = tfm; return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash); } static int sgx_get_key_hash(const void *modulus, void *hash) { struct crypto_shash *tfm; int ret; tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return PTR_ERR(tfm); ret = __sgx_get_key_hash(tfm, modulus, hash); crypto_free_shash(tfm); return ret; } static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct, void *token) { u64 mrsigner[4]; int i, j; void *addr; int ret; /* * Deny initializing enclaves with attributes (namely provisioning) * that have not been explicitly allowed. */ if (encl->attributes & ~encl->attributes_mask) return -EACCES; /* * Attributes should not be enforced *only* against what's available on * platform (done in sgx_encl_create) but checked and enforced against * the mask for enforcement in sigstruct. For example an enclave could * opt to sign with AVX bit in xfrm, but still be loadable on a platform * without it if the sigstruct->body.attributes_mask does not turn that * bit on. */ if (sigstruct->body.attributes & sigstruct->body.attributes_mask & sgx_attributes_reserved_mask) return -EINVAL; if (sigstruct->body.miscselect & sigstruct->body.misc_mask & sgx_misc_reserved_mask) return -EINVAL; if (sigstruct->body.xfrm & sigstruct->body.xfrm_mask & sgx_xfrm_reserved_mask) return -EINVAL; ret = sgx_get_key_hash(sigstruct->modulus, mrsigner); if (ret) return ret; mutex_lock(&encl->lock); /* * ENCLS[EINIT] is interruptible because it has such a high latency, * e.g. 50k+ cycles on success. If an IRQ/NMI/SMI becomes pending, * EINIT may fail with SGX_UNMASKED_EVENT so that the event can be * serviced. */ for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) { for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) { addr = sgx_get_epc_virt_addr(encl->secs.epc_page); preempt_disable(); sgx_update_lepubkeyhash(mrsigner); ret = __einit(sigstruct, token, addr); preempt_enable(); if (ret == SGX_UNMASKED_EVENT) continue; else break; } if (ret != SGX_UNMASKED_EVENT) break; msleep_interruptible(SGX_EINIT_SLEEP_TIME); if (signal_pending(current)) { ret = -ERESTARTSYS; goto err_out; } } if (encls_faulted(ret)) { if (encls_failed(ret)) ENCLS_WARN(ret, "EINIT"); ret = -EIO; } else if (ret) { pr_debug("EINIT returned %d\n", ret); ret = -EPERM; } else { set_bit(SGX_ENCL_INITIALIZED, &encl->flags); } err_out: mutex_unlock(&encl->lock); return ret; } /** * sgx_ioc_enclave_init() - handler for %SGX_IOC_ENCLAVE_INIT * @encl: an enclave pointer * @arg: userspace pointer to a struct sgx_enclave_init instance * * Flush any outstanding enqueued EADD operations and perform EINIT. The * Launch Enclave Public Key Hash MSRs are rewritten as necessary to match * the enclave's MRSIGNER, which is caculated from the provided sigstruct. * * Return: * - 0: Success. * - -EPERM: Invalid SIGSTRUCT. * - -EIO: EINIT failed because of a power cycle. * - -errno: POSIX error. */ static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg) { struct sgx_sigstruct *sigstruct; struct sgx_enclave_init init_arg; void *token; int ret; if (!test_bit(SGX_ENCL_CREATED, &encl->flags) || test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) return -EINVAL; if (copy_from_user(&init_arg, arg, sizeof(init_arg))) return -EFAULT; /* * 'sigstruct' must be on a page boundary and 'token' on a 512 byte * boundary. kmalloc() will give this alignment when allocating * PAGE_SIZE bytes. */ sigstruct = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!sigstruct) return -ENOMEM; token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2); memset(token, 0, SGX_LAUNCH_TOKEN_SIZE); if (copy_from_user(sigstruct, (void __user *)init_arg.sigstruct, sizeof(*sigstruct))) { ret = -EFAULT; goto out; } /* * A legacy field used with Intel signed enclaves. These used to mean * regular and architectural enclaves. The CPU only accepts these values * but they do not have any other meaning. * * Thus, reject any other values. */ if (sigstruct->header.vendor != 0x0000 && sigstruct->header.vendor != 0x8086) { ret = -EINVAL; goto out; } ret = sgx_encl_init(encl, sigstruct, token); out: kfree(sigstruct); return ret; } /** * sgx_ioc_enclave_provision() - handler for %SGX_IOC_ENCLAVE_PROVISION * @encl: an enclave pointer * @arg: userspace pointer to a struct sgx_enclave_provision instance * * Allow ATTRIBUTE.PROVISION_KEY for an enclave by providing a file handle to * /dev/sgx_provision. * * Return: * - 0: Success. * - -errno: Otherwise. */ static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg) { struct sgx_enclave_provision params; if (copy_from_user(&params, arg, sizeof(params))) return -EFAULT; return sgx_set_attribute(&encl->attributes_mask, params.fd); } /* * Ensure enclave is ready for SGX2 functions. Readiness is checked * by ensuring the hardware supports SGX2 and the enclave is initialized * and thus able to handle requests to modify pages within it. */ static int sgx_ioc_sgx2_ready(struct sgx_encl *encl) { if (!(cpu_feature_enabled(X86_FEATURE_SGX2))) return -ENODEV; if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) return -EINVAL; return 0; } /* * Some SGX functions require that no cached linear-to-physical address * mappings are present before they can succeed. Collaborate with * hardware via ENCLS[ETRACK] to ensure that all cached * linear-to-physical address mappings belonging to all threads of * the enclave are cleared. See sgx_encl_cpumask() for details. * * Must be called with enclave's mutex held from the time the * SGX function requiring that no cached linear-to-physical mappings * are present is executed until this ETRACK flow is complete. */ static int sgx_enclave_etrack(struct sgx_encl *encl) { void *epc_virt; int ret; epc_virt = sgx_get_epc_virt_addr(encl->secs.epc_page); ret = __etrack(epc_virt); if (ret) { /* * ETRACK only fails when there is an OS issue. For * example, two consecutive ETRACK was sent without * completed IPI between. */ pr_err_once("ETRACK returned %d (0x%x)", ret, ret); /* * Send IPIs to kick CPUs out of the enclave and * try ETRACK again. */ on_each_cpu_mask(sgx_encl_cpumask(encl), sgx_ipi_cb, NULL, 1); ret = __etrack(epc_virt); if (ret) { pr_err_once("ETRACK repeat returned %d (0x%x)", ret, ret); return -EFAULT; } } on_each_cpu_mask(sgx_encl_cpumask(encl), sgx_ipi_cb, NULL, 1); return 0; } /** * sgx_enclave_restrict_permissions() - Restrict EPCM permissions * @encl: Enclave to which the pages belong. * @modp: Checked parameters from user on which pages need modifying and * their new permissions. * * Return: * - 0: Success. * - -errno: Otherwise. */ static long sgx_enclave_restrict_permissions(struct sgx_encl *encl, struct sgx_enclave_restrict_permissions *modp) { struct sgx_encl_page *entry; struct sgx_secinfo secinfo; unsigned long addr; unsigned long c; void *epc_virt; int ret; memset(&secinfo, 0, sizeof(secinfo)); secinfo.flags = modp->permissions & SGX_SECINFO_PERMISSION_MASK; for (c = 0 ; c < modp->length; c += PAGE_SIZE) { addr = encl->base + modp->offset + c; sgx_reclaim_direct(); mutex_lock(&encl->lock); entry = sgx_encl_load_page(encl, addr); if (IS_ERR(entry)) { ret = PTR_ERR(entry) == -EBUSY ? -EAGAIN : -EFAULT; goto out_unlock; } /* * Changing EPCM permissions is only supported on regular * SGX pages. Attempting this change on other pages will * result in #PF. */ if (entry->type != SGX_PAGE_TYPE_REG) { ret = -EINVAL; goto out_unlock; } /* * Apart from ensuring that read-access remains, do not verify * the permission bits requested. Kernel has no control over * how EPCM permissions can be relaxed from within the enclave. * ENCLS[EMODPR] can only remove existing EPCM permissions, * attempting to set new permissions will be ignored by the * hardware. */ /* Change EPCM permissions. */ epc_virt = sgx_get_epc_virt_addr(entry->epc_page); ret = __emodpr(&secinfo, epc_virt); if (encls_faulted(ret)) { /* * All possible faults should be avoidable: * parameters have been checked, will only change * permissions of a regular page, and no concurrent * SGX1/SGX2 ENCLS instructions since these * are protected with mutex. */ pr_err_once("EMODPR encountered exception %d\n", ENCLS_TRAPNR(ret)); ret = -EFAULT; goto out_unlock; } if (encls_failed(ret)) { modp->result = ret; ret = -EFAULT; goto out_unlock; } ret = sgx_enclave_etrack(encl); if (ret) { ret = -EFAULT; goto out_unlock; } mutex_unlock(&encl->lock); } ret = 0; goto out; out_unlock: mutex_unlock(&encl->lock); out: modp->count = c; return ret; } /** * sgx_ioc_enclave_restrict_permissions() - handler for * %SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS * @encl: an enclave pointer * @arg: userspace pointer to a &struct sgx_enclave_restrict_permissions * instance * * SGX2 distinguishes between relaxing and restricting the enclave page * permissions maintained by the hardware (EPCM permissions) of pages * belonging to an initialized enclave (after SGX_IOC_ENCLAVE_INIT). * * EPCM permissions cannot be restricted from within the enclave, the enclave * requires the kernel to run the privileged level 0 instructions ENCLS[EMODPR] * and ENCLS[ETRACK]. An attempt to relax EPCM permissions with this call * will be ignored by the hardware. * * Return: * - 0: Success * - -errno: Otherwise */ static long sgx_ioc_enclave_restrict_permissions(struct sgx_encl *encl, void __user *arg) { struct sgx_enclave_restrict_permissions params; long ret; ret = sgx_ioc_sgx2_ready(encl); if (ret) return ret; if (copy_from_user(&params, arg, sizeof(params))) return -EFAULT; if (sgx_validate_offset_length(encl, params.offset, params.length)) return -EINVAL; if (params.permissions & ~SGX_SECINFO_PERMISSION_MASK) return -EINVAL; /* * Fail early if invalid permissions requested to prevent ENCLS[EMODPR] * from faulting later when the CPU does the same check. */ if ((params.permissions & SGX_SECINFO_W) && !(params.permissions & SGX_SECINFO_R)) return -EINVAL; if (params.result || params.count) return -EINVAL; ret = sgx_enclave_restrict_permissions(encl, &params); if (copy_to_user(arg, &params, sizeof(params))) return -EFAULT; return ret; } /** * sgx_enclave_modify_types() - Modify type of SGX enclave pages * @encl: Enclave to which the pages belong. * @modt: Checked parameters from user about which pages need modifying * and their new page type. * * Return: * - 0: Success * - -errno: Otherwise */ static long sgx_enclave_modify_types(struct sgx_encl *encl, struct sgx_enclave_modify_types *modt) { unsigned long max_prot_restore; enum sgx_page_type page_type; struct sgx_encl_page *entry; struct sgx_secinfo secinfo; unsigned long prot; unsigned long addr; unsigned long c; void *epc_virt; int ret; page_type = modt->page_type & SGX_PAGE_TYPE_MASK; /* * The only new page types allowed by hardware are PT_TCS and PT_TRIM. */ if (page_type != SGX_PAGE_TYPE_TCS && page_type != SGX_PAGE_TYPE_TRIM) return -EINVAL; memset(&secinfo, 0, sizeof(secinfo)); secinfo.flags = page_type << 8; for (c = 0 ; c < modt->length; c += PAGE_SIZE) { addr = encl->base + modt->offset + c; sgx_reclaim_direct(); mutex_lock(&encl->lock); entry = sgx_encl_load_page(encl, addr); if (IS_ERR(entry)) { ret = PTR_ERR(entry) == -EBUSY ? -EAGAIN : -EFAULT; goto out_unlock; } /* * Borrow the logic from the Intel SDM. Regular pages * (SGX_PAGE_TYPE_REG) can change type to SGX_PAGE_TYPE_TCS * or SGX_PAGE_TYPE_TRIM but TCS pages can only be trimmed. * CET pages not supported yet. */ if (!(entry->type == SGX_PAGE_TYPE_REG || (entry->type == SGX_PAGE_TYPE_TCS && page_type == SGX_PAGE_TYPE_TRIM))) { ret = -EINVAL; goto out_unlock; } max_prot_restore = entry->vm_max_prot_bits; /* * Once a regular page becomes a TCS page it cannot be * changed back. So the maximum allowed protection reflects * the TCS page that is always RW from kernel perspective but * will be inaccessible from within enclave. Before doing * so, do make sure that the new page type continues to * respect the originally vetted page permissions. */ if (entry->type == SGX_PAGE_TYPE_REG && page_type == SGX_PAGE_TYPE_TCS) { if (~entry->vm_max_prot_bits & (VM_READ | VM_WRITE)) { ret = -EPERM; goto out_unlock; } prot = PROT_READ | PROT_WRITE; entry->vm_max_prot_bits = calc_vm_prot_bits(prot, 0); /* * Prevent page from being reclaimed while mutex * is released. */ if (sgx_unmark_page_reclaimable(entry->epc_page)) { ret = -EAGAIN; goto out_entry_changed; } /* * Do not keep encl->lock because of dependency on * mmap_lock acquired in sgx_zap_enclave_ptes(). */ mutex_unlock(&encl->lock); sgx_zap_enclave_ptes(encl, addr); mutex_lock(&encl->lock); sgx_mark_page_reclaimable(entry->epc_page); } /* Change EPC type */ epc_virt = sgx_get_epc_virt_addr(entry->epc_page); ret = __emodt(&secinfo, epc_virt); if (encls_faulted(ret)) { /* * All possible faults should be avoidable: * parameters have been checked, will only change * valid page types, and no concurrent * SGX1/SGX2 ENCLS instructions since these are * protected with mutex. */ pr_err_once("EMODT encountered exception %d\n", ENCLS_TRAPNR(ret)); ret = -EFAULT; goto out_entry_changed; } if (encls_failed(ret)) { modt->result = ret; ret = -EFAULT; goto out_entry_changed; } ret = sgx_enclave_etrack(encl); if (ret) { ret = -EFAULT; goto out_unlock; } entry->type = page_type; mutex_unlock(&encl->lock); } ret = 0; goto out; out_entry_changed: entry->vm_max_prot_bits = max_prot_restore; out_unlock: mutex_unlock(&encl->lock); out: modt->count = c; return ret; } /** * sgx_ioc_enclave_modify_types() - handler for %SGX_IOC_ENCLAVE_MODIFY_TYPES * @encl: an enclave pointer * @arg: userspace pointer to a &struct sgx_enclave_modify_types instance * * Ability to change the enclave page type supports the following use cases: * * * It is possible to add TCS pages to an enclave by changing the type of * regular pages (%SGX_PAGE_TYPE_REG) to TCS (%SGX_PAGE_TYPE_TCS) pages. * With this support the number of threads supported by an initialized * enclave can be increased dynamically. * * * Regular or TCS pages can dynamically be removed from an initialized * enclave by changing the page type to %SGX_PAGE_TYPE_TRIM. Changing the * page type to %SGX_PAGE_TYPE_TRIM marks the page for removal with actual * removal done by handler of %SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl() called * after ENCLU[EACCEPT] is run on %SGX_PAGE_TYPE_TRIM page from within the * enclave. * * Return: * - 0: Success * - -errno: Otherwise */ static long sgx_ioc_enclave_modify_types(struct sgx_encl *encl, void __user *arg) { struct sgx_enclave_modify_types params; long ret; ret = sgx_ioc_sgx2_ready(encl); if (ret) return ret; if (copy_from_user(&params, arg, sizeof(params))) return -EFAULT; if (sgx_validate_offset_length(encl, params.offset, params.length)) return -EINVAL; if (params.page_type & ~SGX_PAGE_TYPE_MASK) return -EINVAL; if (params.result || params.count) return -EINVAL; ret = sgx_enclave_modify_types(encl, &params); if (copy_to_user(arg, &params, sizeof(params))) return -EFAULT; return ret; } /** * sgx_encl_remove_pages() - Remove trimmed pages from SGX enclave * @encl: Enclave to which the pages belong * @params: Checked parameters from user on which pages need to be removed * * Return: * - 0: Success. * - -errno: Otherwise. */ static long sgx_encl_remove_pages(struct sgx_encl *encl, struct sgx_enclave_remove_pages *params) { struct sgx_encl_page *entry; struct sgx_secinfo secinfo; unsigned long addr; unsigned long c; void *epc_virt; int ret; memset(&secinfo, 0, sizeof(secinfo)); secinfo.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X; for (c = 0 ; c < params->length; c += PAGE_SIZE) { addr = encl->base + params->offset + c; sgx_reclaim_direct(); mutex_lock(&encl->lock); entry = sgx_encl_load_page(encl, addr); if (IS_ERR(entry)) { ret = PTR_ERR(entry) == -EBUSY ? -EAGAIN : -EFAULT; goto out_unlock; } if (entry->type != SGX_PAGE_TYPE_TRIM) { ret = -EPERM; goto out_unlock; } /* * ENCLS[EMODPR] is a no-op instruction used to inform if * ENCLU[EACCEPT] was run from within the enclave. If * ENCLS[EMODPR] is run with RWX on a trimmed page that is * not yet accepted then it will return * %SGX_PAGE_NOT_MODIFIABLE, after the trimmed page is * accepted the instruction will encounter a page fault. */ epc_virt = sgx_get_epc_virt_addr(entry->epc_page); ret = __emodpr(&secinfo, epc_virt); if (!encls_faulted(ret) || ENCLS_TRAPNR(ret) != X86_TRAP_PF) { ret = -EPERM; goto out_unlock; } if (sgx_unmark_page_reclaimable(entry->epc_page)) { ret = -EBUSY; goto out_unlock; } /* * Do not keep encl->lock because of dependency on * mmap_lock acquired in sgx_zap_enclave_ptes(). */ mutex_unlock(&encl->lock); sgx_zap_enclave_ptes(encl, addr); mutex_lock(&encl->lock); sgx_encl_free_epc_page(entry->epc_page); encl->secs_child_cnt--; entry->epc_page = NULL; xa_erase(&encl->page_array, PFN_DOWN(entry->desc)); sgx_encl_shrink(encl, NULL); kfree(entry); mutex_unlock(&encl->lock); } ret = 0; goto out; out_unlock: mutex_unlock(&encl->lock); out: params->count = c; return ret; } /** * sgx_ioc_enclave_remove_pages() - handler for %SGX_IOC_ENCLAVE_REMOVE_PAGES * @encl: an enclave pointer * @arg: userspace pointer to &struct sgx_enclave_remove_pages instance * * Final step of the flow removing pages from an initialized enclave. The * complete flow is: * * 1) User changes the type of the pages to be removed to %SGX_PAGE_TYPE_TRIM * using the %SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl(). * 2) User approves the page removal by running ENCLU[EACCEPT] from within * the enclave. * 3) User initiates actual page removal using the * %SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl() that is handled here. * * First remove any page table entries pointing to the page and then proceed * with the actual removal of the enclave page and data in support of it. * * VA pages are not affected by this removal. It is thus possible that the * enclave may end up with more VA pages than needed to support all its * pages. * * Return: * - 0: Success * - -errno: Otherwise */ static long sgx_ioc_enclave_remove_pages(struct sgx_encl *encl, void __user *arg) { struct sgx_enclave_remove_pages params; long ret; ret = sgx_ioc_sgx2_ready(encl); if (ret) return ret; if (copy_from_user(&params, arg, sizeof(params))) return -EFAULT; if (sgx_validate_offset_length(encl, params.offset, params.length)) return -EINVAL; if (params.count) return -EINVAL; ret = sgx_encl_remove_pages(encl, &params); if (copy_to_user(arg, &params, sizeof(params))) return -EFAULT; return ret; } long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct sgx_encl *encl = filep->private_data; int ret; if (test_and_set_bit(SGX_ENCL_IOCTL, &encl->flags)) return -EBUSY; switch (cmd) { case SGX_IOC_ENCLAVE_CREATE: ret = sgx_ioc_enclave_create(encl, (void __user *)arg); break; case SGX_IOC_ENCLAVE_ADD_PAGES: ret = sgx_ioc_enclave_add_pages(encl, (void __user *)arg); break; case SGX_IOC_ENCLAVE_INIT: ret = sgx_ioc_enclave_init(encl, (void __user *)arg); break; case SGX_IOC_ENCLAVE_PROVISION: ret = sgx_ioc_enclave_provision(encl, (void __user *)arg); break; case SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS: ret = sgx_ioc_enclave_restrict_permissions(encl, (void __user *)arg); break; case SGX_IOC_ENCLAVE_MODIFY_TYPES: ret = sgx_ioc_enclave_modify_types(encl, (void __user *)arg); break; case SGX_IOC_ENCLAVE_REMOVE_PAGES: ret = sgx_ioc_enclave_remove_pages(encl, (void __user *)arg); break; default: ret = -ENOIOCTLCMD; break; } clear_bit(SGX_ENCL_IOCTL, &encl->flags); return ret; }
linux-master
arch/x86/kernel/cpu/sgx/ioctl.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2016-20 Intel Corporation. */ #include <linux/file.h> #include <linux/freezer.h> #include <linux/highmem.h> #include <linux/kthread.h> #include <linux/miscdevice.h> #include <linux/node.h> #include <linux/pagemap.h> #include <linux/ratelimit.h> #include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <asm/sgx.h> #include "driver.h" #include "encl.h" #include "encls.h" struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS]; static int sgx_nr_epc_sections; static struct task_struct *ksgxd_tsk; static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq); static DEFINE_XARRAY(sgx_epc_address_space); /* * These variables are part of the state of the reclaimer, and must be accessed * with sgx_reclaimer_lock acquired. */ static LIST_HEAD(sgx_active_page_list); static DEFINE_SPINLOCK(sgx_reclaimer_lock); static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0); /* Nodes with one or more EPC sections. */ static nodemask_t sgx_numa_mask; /* * Array with one list_head for each possible NUMA node. Each * list contains all the sgx_epc_section's which are on that * node. */ static struct sgx_numa_node *sgx_numa_nodes; static LIST_HEAD(sgx_dirty_page_list); /* * Reset post-kexec EPC pages to the uninitialized state. The pages are removed * from the input list, and made available for the page allocator. SECS pages * prepending their children in the input list are left intact. * * Return 0 when sanitization was successful or kthread was stopped, and the * number of unsanitized pages otherwise. */ static unsigned long __sgx_sanitize_pages(struct list_head *dirty_page_list) { unsigned long left_dirty = 0; struct sgx_epc_page *page; LIST_HEAD(dirty); int ret; /* dirty_page_list is thread-local, no need for a lock: */ while (!list_empty(dirty_page_list)) { if (kthread_should_stop()) return 0; page = list_first_entry(dirty_page_list, struct sgx_epc_page, list); /* * Checking page->poison without holding the node->lock * is racy, but losing the race (i.e. poison is set just * after the check) just means __eremove() will be uselessly * called for a page that sgx_free_epc_page() will put onto * the node->sgx_poison_page_list later. */ if (page->poison) { struct sgx_epc_section *section = &sgx_epc_sections[page->section]; struct sgx_numa_node *node = section->node; spin_lock(&node->lock); list_move(&page->list, &node->sgx_poison_page_list); spin_unlock(&node->lock); continue; } ret = __eremove(sgx_get_epc_virt_addr(page)); if (!ret) { /* * page is now sanitized. Make it available via the SGX * page allocator: */ list_del(&page->list); sgx_free_epc_page(page); } else { /* The page is not yet clean - move to the dirty list. */ list_move_tail(&page->list, &dirty); left_dirty++; } cond_resched(); } list_splice(&dirty, dirty_page_list); return left_dirty; } static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page) { struct sgx_encl_page *page = epc_page->owner; struct sgx_encl *encl = page->encl; struct sgx_encl_mm *encl_mm; bool ret = true; int idx; idx = srcu_read_lock(&encl->srcu); list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { if (!mmget_not_zero(encl_mm->mm)) continue; mmap_read_lock(encl_mm->mm); ret = !sgx_encl_test_and_clear_young(encl_mm->mm, page); mmap_read_unlock(encl_mm->mm); mmput_async(encl_mm->mm); if (!ret) break; } srcu_read_unlock(&encl->srcu, idx); if (!ret) return false; return true; } static void sgx_reclaimer_block(struct sgx_epc_page *epc_page) { struct sgx_encl_page *page = epc_page->owner; unsigned long addr = page->desc & PAGE_MASK; struct sgx_encl *encl = page->encl; int ret; sgx_zap_enclave_ptes(encl, addr); mutex_lock(&encl->lock); ret = __eblock(sgx_get_epc_virt_addr(epc_page)); if (encls_failed(ret)) ENCLS_WARN(ret, "EBLOCK"); mutex_unlock(&encl->lock); } static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot, struct sgx_backing *backing) { struct sgx_pageinfo pginfo; int ret; pginfo.addr = 0; pginfo.secs = 0; pginfo.contents = (unsigned long)kmap_local_page(backing->contents); pginfo.metadata = (unsigned long)kmap_local_page(backing->pcmd) + backing->pcmd_offset; ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot); set_page_dirty(backing->pcmd); set_page_dirty(backing->contents); kunmap_local((void *)(unsigned long)(pginfo.metadata - backing->pcmd_offset)); kunmap_local((void *)(unsigned long)pginfo.contents); return ret; } void sgx_ipi_cb(void *info) { } /* * Swap page to the regular memory transformed to the blocked state by using * EBLOCK, which means that it can no longer be referenced (no new TLB entries). * * The first trial just tries to write the page assuming that some other thread * has reset the count for threads inside the enclave by using ETRACK, and * previous thread count has been zeroed out. The second trial calls ETRACK * before EWB. If that fails we kick all the HW threads out, and then do EWB, * which should be guaranteed the succeed. */ static void sgx_encl_ewb(struct sgx_epc_page *epc_page, struct sgx_backing *backing) { struct sgx_encl_page *encl_page = epc_page->owner; struct sgx_encl *encl = encl_page->encl; struct sgx_va_page *va_page; unsigned int va_offset; void *va_slot; int ret; encl_page->desc &= ~SGX_ENCL_PAGE_BEING_RECLAIMED; va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, list); va_offset = sgx_alloc_va_slot(va_page); va_slot = sgx_get_epc_virt_addr(va_page->epc_page) + va_offset; if (sgx_va_page_full(va_page)) list_move_tail(&va_page->list, &encl->va_pages); ret = __sgx_encl_ewb(epc_page, va_slot, backing); if (ret == SGX_NOT_TRACKED) { ret = __etrack(sgx_get_epc_virt_addr(encl->secs.epc_page)); if (ret) { if (encls_failed(ret)) ENCLS_WARN(ret, "ETRACK"); } ret = __sgx_encl_ewb(epc_page, va_slot, backing); if (ret == SGX_NOT_TRACKED) { /* * Slow path, send IPIs to kick cpus out of the * enclave. Note, it's imperative that the cpu * mask is generated *after* ETRACK, else we'll * miss cpus that entered the enclave between * generating the mask and incrementing epoch. */ on_each_cpu_mask(sgx_encl_cpumask(encl), sgx_ipi_cb, NULL, 1); ret = __sgx_encl_ewb(epc_page, va_slot, backing); } } if (ret) { if (encls_failed(ret)) ENCLS_WARN(ret, "EWB"); sgx_free_va_slot(va_page, va_offset); } else { encl_page->desc |= va_offset; encl_page->va_page = va_page; } } static void sgx_reclaimer_write(struct sgx_epc_page *epc_page, struct sgx_backing *backing) { struct sgx_encl_page *encl_page = epc_page->owner; struct sgx_encl *encl = encl_page->encl; struct sgx_backing secs_backing; int ret; mutex_lock(&encl->lock); sgx_encl_ewb(epc_page, backing); encl_page->epc_page = NULL; encl->secs_child_cnt--; sgx_encl_put_backing(backing); if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) { ret = sgx_encl_alloc_backing(encl, PFN_DOWN(encl->size), &secs_backing); if (ret) goto out; sgx_encl_ewb(encl->secs.epc_page, &secs_backing); sgx_encl_free_epc_page(encl->secs.epc_page); encl->secs.epc_page = NULL; sgx_encl_put_backing(&secs_backing); } out: mutex_unlock(&encl->lock); } /* * Take a fixed number of pages from the head of the active page pool and * reclaim them to the enclave's private shmem files. Skip the pages, which have * been accessed since the last scan. Move those pages to the tail of active * page pool so that the pages get scanned in LRU like fashion. * * Batch process a chunk of pages (at the moment 16) in order to degrade amount * of IPI's and ETRACK's potentially required. sgx_encl_ewb() does degrade a bit * among the HW threads with three stage EWB pipeline (EWB, ETRACK + EWB and IPI * + EWB) but not sufficiently. Reclaiming one page at a time would also be * problematic as it would increase the lock contention too much, which would * halt forward progress. */ static void sgx_reclaim_pages(void) { struct sgx_epc_page *chunk[SGX_NR_TO_SCAN]; struct sgx_backing backing[SGX_NR_TO_SCAN]; struct sgx_encl_page *encl_page; struct sgx_epc_page *epc_page; pgoff_t page_index; int cnt = 0; int ret; int i; spin_lock(&sgx_reclaimer_lock); for (i = 0; i < SGX_NR_TO_SCAN; i++) { if (list_empty(&sgx_active_page_list)) break; epc_page = list_first_entry(&sgx_active_page_list, struct sgx_epc_page, list); list_del_init(&epc_page->list); encl_page = epc_page->owner; if (kref_get_unless_zero(&encl_page->encl->refcount) != 0) chunk[cnt++] = epc_page; else /* The owner is freeing the page. No need to add the * page back to the list of reclaimable pages. */ epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; } spin_unlock(&sgx_reclaimer_lock); for (i = 0; i < cnt; i++) { epc_page = chunk[i]; encl_page = epc_page->owner; if (!sgx_reclaimer_age(epc_page)) goto skip; page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); mutex_lock(&encl_page->encl->lock); ret = sgx_encl_alloc_backing(encl_page->encl, page_index, &backing[i]); if (ret) { mutex_unlock(&encl_page->encl->lock); goto skip; } encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED; mutex_unlock(&encl_page->encl->lock); continue; skip: spin_lock(&sgx_reclaimer_lock); list_add_tail(&epc_page->list, &sgx_active_page_list); spin_unlock(&sgx_reclaimer_lock); kref_put(&encl_page->encl->refcount, sgx_encl_release); chunk[i] = NULL; } for (i = 0; i < cnt; i++) { epc_page = chunk[i]; if (epc_page) sgx_reclaimer_block(epc_page); } for (i = 0; i < cnt; i++) { epc_page = chunk[i]; if (!epc_page) continue; encl_page = epc_page->owner; sgx_reclaimer_write(epc_page, &backing[i]); kref_put(&encl_page->encl->refcount, sgx_encl_release); epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; sgx_free_epc_page(epc_page); } } static bool sgx_should_reclaim(unsigned long watermark) { return atomic_long_read(&sgx_nr_free_pages) < watermark && !list_empty(&sgx_active_page_list); } /* * sgx_reclaim_direct() should be called (without enclave's mutex held) * in locations where SGX memory resources might be low and might be * needed in order to make forward progress. */ void sgx_reclaim_direct(void) { if (sgx_should_reclaim(SGX_NR_LOW_PAGES)) sgx_reclaim_pages(); } static int ksgxd(void *p) { set_freezable(); /* * Sanitize pages in order to recover from kexec(). The 2nd pass is * required for SECS pages, whose child pages blocked EREMOVE. */ __sgx_sanitize_pages(&sgx_dirty_page_list); WARN_ON(__sgx_sanitize_pages(&sgx_dirty_page_list)); while (!kthread_should_stop()) { if (try_to_freeze()) continue; wait_event_freezable(ksgxd_waitq, kthread_should_stop() || sgx_should_reclaim(SGX_NR_HIGH_PAGES)); if (sgx_should_reclaim(SGX_NR_HIGH_PAGES)) sgx_reclaim_pages(); cond_resched(); } return 0; } static bool __init sgx_page_reclaimer_init(void) { struct task_struct *tsk; tsk = kthread_run(ksgxd, NULL, "ksgxd"); if (IS_ERR(tsk)) return false; ksgxd_tsk = tsk; return true; } bool current_is_ksgxd(void) { return current == ksgxd_tsk; } static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid) { struct sgx_numa_node *node = &sgx_numa_nodes[nid]; struct sgx_epc_page *page = NULL; spin_lock(&node->lock); if (list_empty(&node->free_page_list)) { spin_unlock(&node->lock); return NULL; } page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list); list_del_init(&page->list); page->flags = 0; spin_unlock(&node->lock); atomic_long_dec(&sgx_nr_free_pages); return page; } /** * __sgx_alloc_epc_page() - Allocate an EPC page * * Iterate through NUMA nodes and reserve ia free EPC page to the caller. Start * from the NUMA node, where the caller is executing. * * Return: * - an EPC page: A borrowed EPC pages were available. * - NULL: Out of EPC pages. */ struct sgx_epc_page *__sgx_alloc_epc_page(void) { struct sgx_epc_page *page; int nid_of_current = numa_node_id(); int nid = nid_of_current; if (node_isset(nid_of_current, sgx_numa_mask)) { page = __sgx_alloc_epc_page_from_node(nid_of_current); if (page) return page; } /* Fall back to the non-local NUMA nodes: */ while (true) { nid = next_node_in(nid, sgx_numa_mask); if (nid == nid_of_current) break; page = __sgx_alloc_epc_page_from_node(nid); if (page) return page; } return ERR_PTR(-ENOMEM); } /** * sgx_mark_page_reclaimable() - Mark a page as reclaimable * @page: EPC page * * Mark a page as reclaimable and add it to the active page list. Pages * are automatically removed from the active list when freed. */ void sgx_mark_page_reclaimable(struct sgx_epc_page *page) { spin_lock(&sgx_reclaimer_lock); page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED; list_add_tail(&page->list, &sgx_active_page_list); spin_unlock(&sgx_reclaimer_lock); } /** * sgx_unmark_page_reclaimable() - Remove a page from the reclaim list * @page: EPC page * * Clear the reclaimable flag and remove the page from the active page list. * * Return: * 0 on success, * -EBUSY if the page is in the process of being reclaimed */ int sgx_unmark_page_reclaimable(struct sgx_epc_page *page) { spin_lock(&sgx_reclaimer_lock); if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) { /* The page is being reclaimed. */ if (list_empty(&page->list)) { spin_unlock(&sgx_reclaimer_lock); return -EBUSY; } list_del(&page->list); page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; } spin_unlock(&sgx_reclaimer_lock); return 0; } /** * sgx_alloc_epc_page() - Allocate an EPC page * @owner: the owner of the EPC page * @reclaim: reclaim pages if necessary * * Iterate through EPC sections and borrow a free EPC page to the caller. When a * page is no longer needed it must be released with sgx_free_epc_page(). If * @reclaim is set to true, directly reclaim pages when we are out of pages. No * mm's can be locked when @reclaim is set to true. * * Finally, wake up ksgxd when the number of pages goes below the watermark * before returning back to the caller. * * Return: * an EPC page, * -errno on error */ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) { struct sgx_epc_page *page; for ( ; ; ) { page = __sgx_alloc_epc_page(); if (!IS_ERR(page)) { page->owner = owner; break; } if (list_empty(&sgx_active_page_list)) return ERR_PTR(-ENOMEM); if (!reclaim) { page = ERR_PTR(-EBUSY); break; } if (signal_pending(current)) { page = ERR_PTR(-ERESTARTSYS); break; } sgx_reclaim_pages(); cond_resched(); } if (sgx_should_reclaim(SGX_NR_LOW_PAGES)) wake_up(&ksgxd_waitq); return page; } /** * sgx_free_epc_page() - Free an EPC page * @page: an EPC page * * Put the EPC page back to the list of free pages. It's the caller's * responsibility to make sure that the page is in uninitialized state. In other * words, do EREMOVE, EWB or whatever operation is necessary before calling * this function. */ void sgx_free_epc_page(struct sgx_epc_page *page) { struct sgx_epc_section *section = &sgx_epc_sections[page->section]; struct sgx_numa_node *node = section->node; spin_lock(&node->lock); page->owner = NULL; if (page->poison) list_add(&page->list, &node->sgx_poison_page_list); else list_add_tail(&page->list, &node->free_page_list); page->flags = SGX_EPC_PAGE_IS_FREE; spin_unlock(&node->lock); atomic_long_inc(&sgx_nr_free_pages); } static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size, unsigned long index, struct sgx_epc_section *section) { unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long i; section->virt_addr = memremap(phys_addr, size, MEMREMAP_WB); if (!section->virt_addr) return false; section->pages = vmalloc(nr_pages * sizeof(struct sgx_epc_page)); if (!section->pages) { memunmap(section->virt_addr); return false; } section->phys_addr = phys_addr; xa_store_range(&sgx_epc_address_space, section->phys_addr, phys_addr + size - 1, section, GFP_KERNEL); for (i = 0; i < nr_pages; i++) { section->pages[i].section = index; section->pages[i].flags = 0; section->pages[i].owner = NULL; section->pages[i].poison = 0; list_add_tail(&section->pages[i].list, &sgx_dirty_page_list); } return true; } bool arch_is_platform_page(u64 paddr) { return !!xa_load(&sgx_epc_address_space, paddr); } EXPORT_SYMBOL_GPL(arch_is_platform_page); static struct sgx_epc_page *sgx_paddr_to_page(u64 paddr) { struct sgx_epc_section *section; section = xa_load(&sgx_epc_address_space, paddr); if (!section) return NULL; return &section->pages[PFN_DOWN(paddr - section->phys_addr)]; } /* * Called in process context to handle a hardware reported * error in an SGX EPC page. * If the MF_ACTION_REQUIRED bit is set in flags, then the * context is the task that consumed the poison data. Otherwise * this is called from a kernel thread unrelated to the page. */ int arch_memory_failure(unsigned long pfn, int flags) { struct sgx_epc_page *page = sgx_paddr_to_page(pfn << PAGE_SHIFT); struct sgx_epc_section *section; struct sgx_numa_node *node; /* * mm/memory-failure.c calls this routine for all errors * where there isn't a "struct page" for the address. But that * includes other address ranges besides SGX. */ if (!page) return -ENXIO; /* * If poison was consumed synchronously. Send a SIGBUS to * the task. Hardware has already exited the SGX enclave and * will not allow re-entry to an enclave that has a memory * error. The signal may help the task understand why the * enclave is broken. */ if (flags & MF_ACTION_REQUIRED) force_sig(SIGBUS); section = &sgx_epc_sections[page->section]; node = section->node; spin_lock(&node->lock); /* Already poisoned? Nothing more to do */ if (page->poison) goto out; page->poison = 1; /* * If the page is on a free list, move it to the per-node * poison page list. */ if (page->flags & SGX_EPC_PAGE_IS_FREE) { list_move(&page->list, &node->sgx_poison_page_list); goto out; } /* * TBD: Add additional plumbing to enable pre-emptive * action for asynchronous poison notification. Until * then just hope that the poison: * a) is not accessed - sgx_free_epc_page() will deal with it * when the user gives it back * b) results in a recoverable machine check rather than * a fatal one */ out: spin_unlock(&node->lock); return 0; } /** * A section metric is concatenated in a way that @low bits 12-31 define the * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the * metric. */ static inline u64 __init sgx_calc_section_metric(u64 low, u64 high) { return (low & GENMASK_ULL(31, 12)) + ((high & GENMASK_ULL(19, 0)) << 32); } #ifdef CONFIG_NUMA static ssize_t sgx_total_bytes_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", sgx_numa_nodes[dev->id].size); } static DEVICE_ATTR_RO(sgx_total_bytes); static umode_t arch_node_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { /* Make all x86/ attributes invisible when SGX is not initialized: */ if (nodes_empty(sgx_numa_mask)) return 0; return attr->mode; } static struct attribute *arch_node_dev_attrs[] = { &dev_attr_sgx_total_bytes.attr, NULL, }; const struct attribute_group arch_node_dev_group = { .name = "x86", .attrs = arch_node_dev_attrs, .is_visible = arch_node_attr_is_visible, }; static void __init arch_update_sysfs_visibility(int nid) { struct node *node = node_devices[nid]; int ret; ret = sysfs_update_group(&node->dev.kobj, &arch_node_dev_group); if (ret) pr_err("sysfs update failed (%d), files may be invisible", ret); } #else /* !CONFIG_NUMA */ static void __init arch_update_sysfs_visibility(int nid) {} #endif static bool __init sgx_page_cache_init(void) { u32 eax, ebx, ecx, edx, type; u64 pa, size; int nid; int i; sgx_numa_nodes = kmalloc_array(num_possible_nodes(), sizeof(*sgx_numa_nodes), GFP_KERNEL); if (!sgx_numa_nodes) return false; for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) { cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx); type = eax & SGX_CPUID_EPC_MASK; if (type == SGX_CPUID_EPC_INVALID) break; if (type != SGX_CPUID_EPC_SECTION) { pr_err_once("Unknown EPC section type: %u\n", type); break; } pa = sgx_calc_section_metric(eax, ebx); size = sgx_calc_section_metric(ecx, edx); pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1); if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i])) { pr_err("No free memory for an EPC section\n"); break; } nid = numa_map_to_online_node(phys_to_target_node(pa)); if (nid == NUMA_NO_NODE) { /* The physical address is already printed above. */ pr_warn(FW_BUG "Unable to map EPC section to online node. Fallback to the NUMA node 0.\n"); nid = 0; } if (!node_isset(nid, sgx_numa_mask)) { spin_lock_init(&sgx_numa_nodes[nid].lock); INIT_LIST_HEAD(&sgx_numa_nodes[nid].free_page_list); INIT_LIST_HEAD(&sgx_numa_nodes[nid].sgx_poison_page_list); node_set(nid, sgx_numa_mask); sgx_numa_nodes[nid].size = 0; /* Make SGX-specific node sysfs files visible: */ arch_update_sysfs_visibility(nid); } sgx_epc_sections[i].node = &sgx_numa_nodes[nid]; sgx_numa_nodes[nid].size += size; sgx_nr_epc_sections++; } if (!sgx_nr_epc_sections) { pr_err("There are zero EPC sections.\n"); return false; } return true; } /* * Update the SGX_LEPUBKEYHASH MSRs to the values specified by caller. * Bare-metal driver requires to update them to hash of enclave's signer * before EINIT. KVM needs to update them to guest's virtual MSR values * before doing EINIT from guest. */ void sgx_update_lepubkeyhash(u64 *lepubkeyhash) { int i; WARN_ON_ONCE(preemptible()); for (i = 0; i < 4; i++) wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]); } const struct file_operations sgx_provision_fops = { .owner = THIS_MODULE, }; static struct miscdevice sgx_dev_provision = { .minor = MISC_DYNAMIC_MINOR, .name = "sgx_provision", .nodename = "sgx_provision", .fops = &sgx_provision_fops, }; /** * sgx_set_attribute() - Update allowed attributes given file descriptor * @allowed_attributes: Pointer to allowed enclave attributes * @attribute_fd: File descriptor for specific attribute * * Append enclave attribute indicated by file descriptor to allowed * attributes. Currently only SGX_ATTR_PROVISIONKEY indicated by * /dev/sgx_provision is supported. * * Return: * -0: SGX_ATTR_PROVISIONKEY is appended to allowed_attributes * -EINVAL: Invalid, or not supported file descriptor */ int sgx_set_attribute(unsigned long *allowed_attributes, unsigned int attribute_fd) { struct fd f = fdget(attribute_fd); if (!f.file) return -EINVAL; if (f.file->f_op != &sgx_provision_fops) { fdput(f); return -EINVAL; } *allowed_attributes |= SGX_ATTR_PROVISIONKEY; fdput(f); return 0; } EXPORT_SYMBOL_GPL(sgx_set_attribute); static int __init sgx_init(void) { int ret; int i; if (!cpu_feature_enabled(X86_FEATURE_SGX)) return -ENODEV; if (!sgx_page_cache_init()) return -ENOMEM; if (!sgx_page_reclaimer_init()) { ret = -ENOMEM; goto err_page_cache; } ret = misc_register(&sgx_dev_provision); if (ret) goto err_kthread; /* * Always try to initialize the native *and* KVM drivers. * The KVM driver is less picky than the native one and * can function if the native one is not supported on the * current system or fails to initialize. * * Error out only if both fail to initialize. */ ret = sgx_drv_init(); if (sgx_vepc_init() && ret) goto err_provision; return 0; err_provision: misc_deregister(&sgx_dev_provision); err_kthread: kthread_stop(ksgxd_tsk); err_page_cache: for (i = 0; i < sgx_nr_epc_sections; i++) { vfree(sgx_epc_sections[i].pages); memunmap(sgx_epc_sections[i].virt_addr); } return ret; } device_initcall(sgx_init);
linux-master
arch/x86/kernel/cpu/sgx/main.c
// SPDX-License-Identifier: GPL-2.0 /* * Device driver to expose SGX enclave memory to KVM guests. * * Copyright(c) 2021 Intel Corporation. */ #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/xarray.h> #include <asm/sgx.h> #include <uapi/asm/sgx.h> #include "encls.h" #include "sgx.h" struct sgx_vepc { struct xarray page_array; struct mutex lock; }; /* * Temporary SECS pages that cannot be EREMOVE'd due to having child in other * virtual EPC instances, and the lock to protect it. */ static struct mutex zombie_secs_pages_lock; static struct list_head zombie_secs_pages; static int __sgx_vepc_fault(struct sgx_vepc *vepc, struct vm_area_struct *vma, unsigned long addr) { struct sgx_epc_page *epc_page; unsigned long index, pfn; int ret; WARN_ON(!mutex_is_locked(&vepc->lock)); /* Calculate index of EPC page in virtual EPC's page_array */ index = vma->vm_pgoff + PFN_DOWN(addr - vma->vm_start); epc_page = xa_load(&vepc->page_array, index); if (epc_page) return 0; epc_page = sgx_alloc_epc_page(vepc, false); if (IS_ERR(epc_page)) return PTR_ERR(epc_page); ret = xa_err(xa_store(&vepc->page_array, index, epc_page, GFP_KERNEL)); if (ret) goto err_free; pfn = PFN_DOWN(sgx_get_epc_phys_addr(epc_page)); ret = vmf_insert_pfn(vma, addr, pfn); if (ret != VM_FAULT_NOPAGE) { ret = -EFAULT; goto err_delete; } return 0; err_delete: xa_erase(&vepc->page_array, index); err_free: sgx_free_epc_page(epc_page); return ret; } static vm_fault_t sgx_vepc_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct sgx_vepc *vepc = vma->vm_private_data; int ret; mutex_lock(&vepc->lock); ret = __sgx_vepc_fault(vepc, vma, vmf->address); mutex_unlock(&vepc->lock); if (!ret) return VM_FAULT_NOPAGE; if (ret == -EBUSY && (vmf->flags & FAULT_FLAG_ALLOW_RETRY)) { mmap_read_unlock(vma->vm_mm); return VM_FAULT_RETRY; } return VM_FAULT_SIGBUS; } static const struct vm_operations_struct sgx_vepc_vm_ops = { .fault = sgx_vepc_fault, }; static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma) { struct sgx_vepc *vepc = file->private_data; if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vma->vm_ops = &sgx_vepc_vm_ops; /* Don't copy VMA in fork() */ vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY); vma->vm_private_data = vepc; return 0; } static int sgx_vepc_remove_page(struct sgx_epc_page *epc_page) { /* * Take a previously guest-owned EPC page and return it to the * general EPC page pool. * * Guests can not be trusted to have left this page in a good * state, so run EREMOVE on the page unconditionally. In the * case that a guest properly EREMOVE'd this page, a superfluous * EREMOVE is harmless. */ return __eremove(sgx_get_epc_virt_addr(epc_page)); } static int sgx_vepc_free_page(struct sgx_epc_page *epc_page) { int ret = sgx_vepc_remove_page(epc_page); if (ret) { /* * Only SGX_CHILD_PRESENT is expected, which is because of * EREMOVE'ing an SECS still with child, in which case it can * be handled by EREMOVE'ing the SECS again after all pages in * virtual EPC have been EREMOVE'd. See comments in below in * sgx_vepc_release(). * * The user of virtual EPC (KVM) needs to guarantee there's no * logical processor is still running in the enclave in guest, * otherwise EREMOVE will get SGX_ENCLAVE_ACT which cannot be * handled here. */ WARN_ONCE(ret != SGX_CHILD_PRESENT, EREMOVE_ERROR_MESSAGE, ret, ret); return ret; } sgx_free_epc_page(epc_page); return 0; } static long sgx_vepc_remove_all(struct sgx_vepc *vepc) { struct sgx_epc_page *entry; unsigned long index; long failures = 0; xa_for_each(&vepc->page_array, index, entry) { int ret = sgx_vepc_remove_page(entry); if (ret) { if (ret == SGX_CHILD_PRESENT) { /* The page is a SECS, userspace will retry. */ failures++; } else { /* * Report errors due to #GP or SGX_ENCLAVE_ACT; do not * WARN, as userspace can induce said failures by * calling the ioctl concurrently on multiple vEPCs or * while one or more CPUs is running the enclave. Only * a #PF on EREMOVE indicates a kernel/hardware issue. */ WARN_ON_ONCE(encls_faulted(ret) && ENCLS_TRAPNR(ret) != X86_TRAP_GP); return -EBUSY; } } cond_resched(); } /* * Return the number of SECS pages that failed to be removed, so * userspace knows that it has to retry. */ return failures; } static int sgx_vepc_release(struct inode *inode, struct file *file) { struct sgx_vepc *vepc = file->private_data; struct sgx_epc_page *epc_page, *tmp, *entry; unsigned long index; LIST_HEAD(secs_pages); xa_for_each(&vepc->page_array, index, entry) { /* * Remove all normal, child pages. sgx_vepc_free_page() * will fail if EREMOVE fails, but this is OK and expected on * SECS pages. Those can only be EREMOVE'd *after* all their * child pages. Retries below will clean them up. */ if (sgx_vepc_free_page(entry)) continue; xa_erase(&vepc->page_array, index); cond_resched(); } /* * Retry EREMOVE'ing pages. This will clean up any SECS pages that * only had children in this 'epc' area. */ xa_for_each(&vepc->page_array, index, entry) { epc_page = entry; /* * An EREMOVE failure here means that the SECS page still * has children. But, since all children in this 'sgx_vepc' * have been removed, the SECS page must have a child on * another instance. */ if (sgx_vepc_free_page(epc_page)) list_add_tail(&epc_page->list, &secs_pages); xa_erase(&vepc->page_array, index); cond_resched(); } /* * SECS pages are "pinned" by child pages, and "unpinned" once all * children have been EREMOVE'd. A child page in this instance * may have pinned an SECS page encountered in an earlier release(), * creating a zombie. Since some children were EREMOVE'd above, * try to EREMOVE all zombies in the hopes that one was unpinned. */ mutex_lock(&zombie_secs_pages_lock); list_for_each_entry_safe(epc_page, tmp, &zombie_secs_pages, list) { /* * Speculatively remove the page from the list of zombies, * if the page is successfully EREMOVE'd it will be added to * the list of free pages. If EREMOVE fails, throw the page * on the local list, which will be spliced on at the end. */ list_del(&epc_page->list); if (sgx_vepc_free_page(epc_page)) list_add_tail(&epc_page->list, &secs_pages); cond_resched(); } if (!list_empty(&secs_pages)) list_splice_tail(&secs_pages, &zombie_secs_pages); mutex_unlock(&zombie_secs_pages_lock); xa_destroy(&vepc->page_array); kfree(vepc); return 0; } static int sgx_vepc_open(struct inode *inode, struct file *file) { struct sgx_vepc *vepc; vepc = kzalloc(sizeof(struct sgx_vepc), GFP_KERNEL); if (!vepc) return -ENOMEM; mutex_init(&vepc->lock); xa_init(&vepc->page_array); file->private_data = vepc; return 0; } static long sgx_vepc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct sgx_vepc *vepc = file->private_data; switch (cmd) { case SGX_IOC_VEPC_REMOVE_ALL: if (arg) return -EINVAL; return sgx_vepc_remove_all(vepc); default: return -ENOTTY; } } static const struct file_operations sgx_vepc_fops = { .owner = THIS_MODULE, .open = sgx_vepc_open, .unlocked_ioctl = sgx_vepc_ioctl, .compat_ioctl = sgx_vepc_ioctl, .release = sgx_vepc_release, .mmap = sgx_vepc_mmap, }; static struct miscdevice sgx_vepc_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "sgx_vepc", .nodename = "sgx_vepc", .fops = &sgx_vepc_fops, }; int __init sgx_vepc_init(void) { /* SGX virtualization requires KVM to work */ if (!cpu_feature_enabled(X86_FEATURE_VMX)) return -ENODEV; INIT_LIST_HEAD(&zombie_secs_pages); mutex_init(&zombie_secs_pages_lock); return misc_register(&sgx_vepc_dev); } /** * sgx_virt_ecreate() - Run ECREATE on behalf of guest * @pageinfo: Pointer to PAGEINFO structure * @secs: Userspace pointer to SECS page * @trapnr: trap number injected to guest in case of ECREATE error * * Run ECREATE on behalf of guest after KVM traps ECREATE for the purpose * of enforcing policies of guest's enclaves, and return the trap number * which should be injected to guest in case of any ECREATE error. * * Return: * - 0: ECREATE was successful. * - <0: on error. */ int sgx_virt_ecreate(struct sgx_pageinfo *pageinfo, void __user *secs, int *trapnr) { int ret; /* * @secs is an untrusted, userspace-provided address. It comes from * KVM and is assumed to be a valid pointer which points somewhere in * userspace. This can fault and call SGX or other fault handlers when * userspace mapping @secs doesn't exist. * * Add a WARN() to make sure @secs is already valid userspace pointer * from caller (KVM), who should already have handled invalid pointer * case (for instance, made by malicious guest). All other checks, * such as alignment of @secs, are deferred to ENCLS itself. */ if (WARN_ON_ONCE(!access_ok(secs, PAGE_SIZE))) return -EINVAL; __uaccess_begin(); ret = __ecreate(pageinfo, (void *)secs); __uaccess_end(); if (encls_faulted(ret)) { *trapnr = ENCLS_TRAPNR(ret); return -EFAULT; } /* ECREATE doesn't return an error code, it faults or succeeds. */ WARN_ON_ONCE(ret); return 0; } EXPORT_SYMBOL_GPL(sgx_virt_ecreate); static int __sgx_virt_einit(void __user *sigstruct, void __user *token, void __user *secs) { int ret; /* * Make sure all userspace pointers from caller (KVM) are valid. * All other checks deferred to ENCLS itself. Also see comment * for @secs in sgx_virt_ecreate(). */ #define SGX_EINITTOKEN_SIZE 304 if (WARN_ON_ONCE(!access_ok(sigstruct, sizeof(struct sgx_sigstruct)) || !access_ok(token, SGX_EINITTOKEN_SIZE) || !access_ok(secs, PAGE_SIZE))) return -EINVAL; __uaccess_begin(); ret = __einit((void *)sigstruct, (void *)token, (void *)secs); __uaccess_end(); return ret; } /** * sgx_virt_einit() - Run EINIT on behalf of guest * @sigstruct: Userspace pointer to SIGSTRUCT structure * @token: Userspace pointer to EINITTOKEN structure * @secs: Userspace pointer to SECS page * @lepubkeyhash: Pointer to guest's *virtual* SGX_LEPUBKEYHASH MSR values * @trapnr: trap number injected to guest in case of EINIT error * * Run EINIT on behalf of guest after KVM traps EINIT. If SGX_LC is available * in host, SGX driver may rewrite the hardware values at wish, therefore KVM * needs to update hardware values to guest's virtual MSR values in order to * ensure EINIT is executed with expected hardware values. * * Return: * - 0: EINIT was successful. * - <0: on error. */ int sgx_virt_einit(void __user *sigstruct, void __user *token, void __user *secs, u64 *lepubkeyhash, int *trapnr) { int ret; if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) { ret = __sgx_virt_einit(sigstruct, token, secs); } else { preempt_disable(); sgx_update_lepubkeyhash(lepubkeyhash); ret = __sgx_virt_einit(sigstruct, token, secs); preempt_enable(); } /* Propagate up the error from the WARN_ON_ONCE in __sgx_virt_einit() */ if (ret == -EINVAL) return ret; if (encls_faulted(ret)) { *trapnr = ENCLS_TRAPNR(ret); return -EFAULT; } return ret; } EXPORT_SYMBOL_GPL(sgx_virt_einit);
linux-master
arch/x86/kernel/cpu/sgx/virt.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2016-20 Intel Corporation. */ #include <linux/acpi.h> #include <linux/miscdevice.h> #include <linux/mman.h> #include <linux/security.h> #include <linux/suspend.h> #include <asm/traps.h> #include "driver.h" #include "encl.h" u64 sgx_attributes_reserved_mask; u64 sgx_xfrm_reserved_mask = ~0x3; u32 sgx_misc_reserved_mask; static int sgx_open(struct inode *inode, struct file *file) { struct sgx_encl *encl; int ret; encl = kzalloc(sizeof(*encl), GFP_KERNEL); if (!encl) return -ENOMEM; kref_init(&encl->refcount); xa_init(&encl->page_array); mutex_init(&encl->lock); INIT_LIST_HEAD(&encl->va_pages); INIT_LIST_HEAD(&encl->mm_list); spin_lock_init(&encl->mm_lock); ret = init_srcu_struct(&encl->srcu); if (ret) { kfree(encl); return ret; } file->private_data = encl; return 0; } static int sgx_release(struct inode *inode, struct file *file) { struct sgx_encl *encl = file->private_data; struct sgx_encl_mm *encl_mm; /* * Drain the remaining mm_list entries. At this point the list contains * entries for processes, which have closed the enclave file but have * not exited yet. The processes, which have exited, are gone from the * list by sgx_mmu_notifier_release(). */ for ( ; ; ) { spin_lock(&encl->mm_lock); if (list_empty(&encl->mm_list)) { encl_mm = NULL; } else { encl_mm = list_first_entry(&encl->mm_list, struct sgx_encl_mm, list); list_del_rcu(&encl_mm->list); } spin_unlock(&encl->mm_lock); /* The enclave is no longer mapped by any mm. */ if (!encl_mm) break; synchronize_srcu(&encl->srcu); mmu_notifier_unregister(&encl_mm->mmu_notifier, encl_mm->mm); kfree(encl_mm); /* 'encl_mm' is gone, put encl_mm->encl reference: */ kref_put(&encl->refcount, sgx_encl_release); } kref_put(&encl->refcount, sgx_encl_release); return 0; } static int sgx_mmap(struct file *file, struct vm_area_struct *vma) { struct sgx_encl *encl = file->private_data; int ret; ret = sgx_encl_may_map(encl, vma->vm_start, vma->vm_end, vma->vm_flags); if (ret) return ret; ret = sgx_encl_mm_add(encl, vma->vm_mm); if (ret) return ret; vma->vm_ops = &sgx_vm_ops; vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO); vma->vm_private_data = encl; return 0; } static unsigned long sgx_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { if ((flags & MAP_TYPE) == MAP_PRIVATE) return -EINVAL; if (flags & MAP_FIXED) return addr; return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); } #ifdef CONFIG_COMPAT static long sgx_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { return sgx_ioctl(filep, cmd, arg); } #endif static const struct file_operations sgx_encl_fops = { .owner = THIS_MODULE, .open = sgx_open, .release = sgx_release, .unlocked_ioctl = sgx_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = sgx_compat_ioctl, #endif .mmap = sgx_mmap, .get_unmapped_area = sgx_get_unmapped_area, }; static struct miscdevice sgx_dev_enclave = { .minor = MISC_DYNAMIC_MINOR, .name = "sgx_enclave", .nodename = "sgx_enclave", .fops = &sgx_encl_fops, }; int __init sgx_drv_init(void) { unsigned int eax, ebx, ecx, edx; u64 attr_mask; u64 xfrm_mask; int ret; if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) return -ENODEV; cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx); if (!(eax & 1)) { pr_err("SGX disabled: SGX1 instruction support not available.\n"); return -ENODEV; } sgx_misc_reserved_mask = ~ebx | SGX_MISC_RESERVED_MASK; cpuid_count(SGX_CPUID, 1, &eax, &ebx, &ecx, &edx); attr_mask = (((u64)ebx) << 32) + (u64)eax; sgx_attributes_reserved_mask = ~attr_mask | SGX_ATTR_RESERVED_MASK; if (cpu_feature_enabled(X86_FEATURE_OSXSAVE)) { xfrm_mask = (((u64)edx) << 32) + (u64)ecx; sgx_xfrm_reserved_mask = ~xfrm_mask; } ret = misc_register(&sgx_dev_enclave); if (ret) return ret; return 0; }
linux-master
arch/x86/kernel/cpu/sgx/driver.c